1 diff -urN linux-2.6.16.original/block/ll_rw_blk.c linux-2.6.16.hdaps/block/ll_rw_blk.c
2 --- linux-2.6.16.original/block/ll_rw_blk.c 2006-03-20 05:53:29.000000000 +0000
3 +++ linux-2.6.16.hdaps/block/ll_rw_blk.c 2006-03-28 20:39:03.000000000 +0100
5 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
6 static void init_request_from_bio(struct request *req, struct bio *bio);
7 static int __make_request(request_queue_t *q, struct bio *bio);
8 +static int blk_protect_register(request_queue_t *q);
9 +static void blk_protect_unregister(request_queue_t *q);
12 * For the allocated request tables
15 EXPORT_SYMBOL(blk_queue_issue_flush_fn);
17 +void blk_queue_issue_protect_fn(request_queue_t *q, issue_protect_fn *ipf)
19 + q->issue_protect_fn = ipf;
21 +EXPORT_SYMBOL(blk_queue_issue_protect_fn);
23 +void blk_queue_issue_unprotect_fn(request_queue_t *q, issue_unprotect_fn *iuf)
25 + q->issue_unprotect_fn = iuf;
27 +EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
30 * Cache flushing for ordered writes handling
36 + blk_protect_register(q);
40 @@ -3825,10 +3840,120 @@
41 request_queue_t *q = disk->queue;
43 if (q && q->request_fn) {
44 + blk_protect_unregister(q);
45 elv_unregister_queue(q);
47 kobject_uevent(&q->kobj, KOBJ_REMOVE);
48 kobject_del(&q->kobj);
49 kobject_put(&disk->kobj);
54 + * Restore the unplugging timer that we re-used
55 + * to implement the queue freeze timeout...
57 +static void blk_unfreeze_work(void *data)
59 + request_queue_t *q = (request_queue_t *) data;
61 + INIT_WORK(&q->unplug_work, blk_unplug_work, q);
62 + q->unplug_timer.function = blk_unplug_timeout;
64 + q->issue_unprotect_fn(q);
68 + * Called when the queue freeze timeout expires...
70 +static void blk_unfreeze_timeout(unsigned long data)
72 + request_queue_t *q = (request_queue_t *) data;
73 + kblockd_schedule_work(&q->unplug_work);
77 + * The lower level driver parks and freezes the queue, and this block layer
78 + * function sets up the freeze timeout timer on return. If the queue is
79 + * already frozen then this is called to extend the timer...
81 +void blk_freeze_queue(request_queue_t *q, int seconds)
83 + /* set/reset the timer */
84 + mod_timer(&q->unplug_timer, msecs_to_jiffies(seconds*1000) + jiffies);
86 + /* we do this every iteration - is this sane? */
87 + INIT_WORK(&q->unplug_work, blk_unfreeze_work, q);
88 + q->unplug_timer.function = blk_unfreeze_timeout;
92 + * When reading the 'protect' attribute, we return boolean frozen or active
94 + * - maybe we should return seconds remaining instead?
96 +static ssize_t queue_protect_show(struct request_queue *q, char *page)
98 + return queue_var_show(blk_queue_stopped(q), (page));
102 + * When writing the 'protect' attribute, input is the number of seconds
103 + * to freeze the queue for. We call a lower level helper function to
104 + * park the heads and freeze/block the queue, then we make a block layer
105 + * call to setup the thaw timeout. If input is 0, then we thaw the queue.
107 +static ssize_t queue_protect_store(struct request_queue *q, const char *page, size_t count)
109 + unsigned long freeze = 0;
110 + queue_var_store(&freeze, page, count);
113 + /* Park and freeze */
114 + if (!blk_queue_stopped(q))
115 + q->issue_protect_fn(q);
116 + /* set / reset the thaw timer */
117 + blk_freeze_queue(q, freeze);
120 + blk_unfreeze_timeout((unsigned long) q);
125 +static struct queue_sysfs_entry queue_protect_entry = {
126 + .attr = {.name = "protect", .mode = S_IRUGO | S_IWUSR },
127 + .show = queue_protect_show,
128 + .store = queue_protect_store,
131 +static int blk_protect_register(request_queue_t *q)
135 + /* check that the lower level driver has a protect handler */
136 + if (!q->issue_protect_fn)
139 + /* create the attribute */
140 + error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
143 + "blk_protect_register(): failed to create protect queue attribute!\n");
147 + kobject_get(&q->kobj);
151 +static void blk_protect_unregister(request_queue_t *q)
153 + /* check that the lower level driver has a protect handler */
154 + if (!q->issue_protect_fn)
157 + /* remove the attribute */
158 + sysfs_remove_file(&q->kobj,&queue_protect_entry.attr);
159 + kobject_put(&q->kobj);
161 diff -urN linux-2.6.16.original/drivers/ide/ide-disk.c linux-2.6.16.hdaps/drivers/ide/ide-disk.c
162 --- linux-2.6.16.original/drivers/ide/ide-disk.c 2006-03-20 05:53:29.000000000 +0000
163 +++ linux-2.6.16.hdaps/drivers/ide/ide-disk.c 2006-04-30 13:24:35.000000000 +0100
166 #include <asm/div64.h>
168 +int idedisk_protect_method = 0;
169 +module_param_named(protect_method, idedisk_protect_method, int, 0444);
170 +MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
172 struct ide_disk_obj {
174 ide_driver_t *driver;
175 @@ -727,6 +731,154 @@
180 + * - we freeze the queue regardless of success and rely on the
181 + * ide_protect_queue function to thaw immediately if the command
182 + * failed (to be consistent with the libata handler)... should
183 + * we also inspect here?
185 +void ide_end_protect_rq(struct request *rq, int error)
187 + struct completion *waiting = rq->waiting;
189 + /* spin lock already accquired */
190 + if (!blk_queue_stopped(rq->q))
191 + blk_stop_queue(rq->q);
196 +int ide_unprotect_queue(request_queue_t *q)
198 + struct request rq;
\r
199 + unsigned long flags;
200 + int pending = 0, rc = 0;
\r
201 + ide_drive_t *drive = q->queuedata;
\r
202 + u8 args[7], *argbuf = args;
\r
204 + if (!blk_queue_stopped(q))
207 + /* Are there any pending jobs on the queue? */
\r
208 + pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
\r
210 + spin_lock_irqsave(q->queue_lock, flags);
211 + blk_start_queue(q);
212 + spin_unlock_irqrestore(q->queue_lock, flags);
214 + /* The unload feature of the IDLE_IMMEDIATE command
\r
215 + temporarily disables HD power management from spinning down
\r
216 + the disk. Any other command will reenable HD pm, so, if
\r
217 + there are no pending jobs on the queue, another
\r
218 + CHECK_POWER_MODE1 command without the unload feature should do
\r
221 + printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n");
222 + memset(args, 0, sizeof(args));
\r
223 + argbuf[0] = 0xe5; /* CHECK_POWER_MODE1 */
\r
224 + ide_init_drive_cmd(&rq);
\r
225 + rq.flags = REQ_DRIVE_TASK;
\r
226 + rq.buffer = argbuf;
\r
227 + rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);
\r
233 +int ide_protect_queue(request_queue_t *q, int unload)
235 + ide_drive_t *drive = q->queuedata;
237 + u8 args[7], *argbuf = args;
239 + DECLARE_COMPLETION(wait);
241 + memset(&rq, 0, sizeof(rq));
242 + memset(args, 0, sizeof(args));
244 + if (blk_queue_stopped(q))
256 + /* Issue the park command & freeze */
257 + ide_init_drive_cmd(&rq);
259 + rq.flags = REQ_DRIVE_TASK;
260 + rq.buffer = argbuf;
261 + rq.waiting = &wait;
262 + rq.end_io = ide_end_protect_rq;
264 + ret = ide_do_drive_cmd(drive, &rq, ide_next);
265 + wait_for_completion(&wait);
270 + printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n");
271 + ide_unprotect_queue(q);
276 + if (args[3] == 0xc4)
277 + printk(KERN_DEBUG "ide_protect_queue(): head parked..\n");
279 + /* error parking the head */
280 + printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n");
282 + ide_unprotect_queue(q);
285 + printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n");
290 +int idedisk_issue_protect_fn(request_queue_t *q)
292 + ide_drive_t *drive = q->queuedata;
296 + * Check capability of the device -
297 + * - if "idle immediate with unload" is supported we use that, else
298 + * we use "standby immediate" and live with spinning down the drive..
299 + * (Word 84, bit 13 of IDENTIFY DEVICE data)
301 + if (idedisk_protect_method == 1) {
303 + printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
305 + else if (idedisk_protect_method == 2) {
307 + printk(KERN_DEBUG "idedisk_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
309 + else if (drive->id->cfsse & (1 << 13)) {
311 + printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support reported by drive..\n");
315 + printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support NOT reported by drive!..\n");
318 + return ide_protect_queue(q, unload);
321 +int idedisk_issue_unprotect_fn(request_queue_t *q)
323 + return ide_unprotect_queue(q);
327 * This is tightly woven into the driver->do_special can not touch.
328 * DON'T do it again until a total personality rewrite is committed.
333 write_cache(drive, 1);
335 + blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);
336 + blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn);
339 static void ide_cacheflush_p(ide_drive_t *drive)
340 diff -urN linux-2.6.16.original/drivers/ide/ide-io.c linux-2.6.16.hdaps/drivers/ide/ide-io.c
341 --- linux-2.6.16.original/drivers/ide/ide-io.c 2006-03-20 05:53:29.000000000 +0000
342 +++ linux-2.6.16.hdaps/drivers/ide/ide-io.c 2006-03-26 15:10:44.000000000 +0100
343 @@ -1180,6 +1180,17 @@
347 + * Don't accept a request when the queue is stopped (unless we
348 + * are resuming from suspend). Prevents existing queue entries
349 + * being processed after queue is stopped by the hard disk
350 + * protection mechanism...
352 + if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) {
358 * Sanity: don't accept a request that isn't a PM request
359 * if we are currently power managed. This is very important as
360 * blk_stop_queue() doesn't prevent the elv_next_request()
361 @@ -1660,6 +1671,9 @@
362 where = ELEVATOR_INSERT_FRONT;
363 rq->flags |= REQ_PREEMPT;
365 + if (action == ide_next)
366 + where = ELEVATOR_INSERT_FRONT;
368 __elv_add_request(drive->queue, rq, where, 0);
369 ide_do_request(hwgroup, IDE_NO_IRQ);
370 spin_unlock_irqrestore(&ide_lock, flags);
371 diff -urN linux-2.6.16.original/drivers/scsi/libata-core.c linux-2.6.16.hdaps/drivers/scsi/libata-core.c
372 --- linux-2.6.16.original/drivers/scsi/libata-core.c 2006-03-20 05:53:29.000000000 +0000
373 +++ linux-2.6.16.hdaps/drivers/scsi/libata-core.c 2006-04-30 13:25:24.000000000 +0100
375 static unsigned int ata_unique_id = 1;
376 static struct workqueue_struct *ata_wq;
378 +int libata_protect_method = 0;
379 +module_param_named(protect_method, libata_protect_method, int, 0444);
380 +MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
382 int atapi_enabled = 1;
383 module_param(atapi_enabled, int, 0444);
384 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
385 diff -urN linux-2.6.16.original/drivers/scsi/libata.h linux-2.6.16.hdaps/drivers/scsi/libata.h
386 --- linux-2.6.16.original/drivers/scsi/libata.h 2006-03-20 05:53:29.000000000 +0000
387 +++ linux-2.6.16.hdaps/drivers/scsi/libata.h 2006-04-30 13:25:33.000000000 +0100
392 +extern int libata_protect_method;
393 extern int atapi_enabled;
394 extern int libata_fua;
395 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
396 diff -urN linux-2.6.16.original/drivers/scsi/libata-scsi.c linux-2.6.16.hdaps/drivers/scsi/libata-scsi.c
397 --- linux-2.6.16.original/drivers/scsi/libata-scsi.c 2006-03-20 05:53:29.000000000 +0000
398 +++ linux-2.6.16.hdaps/drivers/scsi/libata-scsi.c 2006-04-30 13:24:56.000000000 +0100
403 +extern int scsi_protect_queue(request_queue_t *q, int unload);
404 +extern int scsi_unprotect_queue(request_queue_t *q);
406 +static int ata_scsi_issue_protect_fn(request_queue_t *q)
408 + struct scsi_device *sdev = q->queuedata;
409 + struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
410 + struct ata_device *dev = &ap->device[sdev->id];
413 + if (libata_protect_method == 1) {
415 + printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
417 + else if (libata_protect_method == 2) {
419 + printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
421 + else if (ata_id_has_unload(dev->id)) {
423 + printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support reported by drive..\n");
427 + printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support NOT reported by drive!..\n");
430 + /* call scsi_protect_queue, requesting either unload or standby */
431 + return scsi_protect_queue(q, unload);
434 +static int ata_scsi_issue_unprotect_fn(request_queue_t *q)
436 + return scsi_unprotect_queue(q);
440 * ata_scsi_slave_config - Set SCSI device attributes
441 * @sdev: SCSI device to examine
443 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
446 + blk_queue_issue_protect_fn(sdev->request_queue, ata_scsi_issue_protect_fn);
447 + blk_queue_issue_unprotect_fn(sdev->request_queue, ata_scsi_issue_unprotect_fn);
449 return 0; /* scsi layer doesn't check return value, sigh */
451 diff -urN linux-2.6.16.original/drivers/scsi/scsi_lib.c linux-2.6.16.hdaps/drivers/scsi/scsi_lib.c
452 --- linux-2.6.16.original/drivers/scsi/scsi_lib.c 2006-03-20 05:53:29.000000000 +0000
453 +++ linux-2.6.16.hdaps/drivers/scsi/scsi_lib.c 2006-03-26 15:10:44.000000000 +0100
454 @@ -2307,2 +2307,188 @@
456 EXPORT_SYMBOL_GPL(scsi_target_unblock);
459 + * As per scsi_wait_req_end_io(), which was removed in 2.6.15
461 +static void scsi_protect_wait_req_end_io(struct request *req, int error)
463 + BUG_ON(!req->waiting);
465 + complete(req->waiting);
469 + * As per scsi_wait_done(), except calls scsi_device_block
470 + * to block the queue at command completion. Only called by
471 + * scsi_protect_wait().
473 + * - we block the queue regardless of success and rely on the
474 + * scsi_protect_queue function to unblock if the command
475 + * failed... should we also inspect here?
477 +static void scsi_protect_wait_done(struct scsi_cmnd *cmd)
479 + struct request *req = cmd->request;
480 + struct request_queue *q = cmd->device->request_queue;
481 + struct scsi_device *sdev = cmd->device;
482 + unsigned long flags;
484 + req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
486 + spin_lock_irqsave(q->queue_lock, flags);
487 + if (blk_rq_tagged(req))
488 + blk_queue_end_tag(q, req);
489 + spin_unlock_irqrestore(q->queue_lock, flags);
491 + scsi_internal_device_block(sdev);
494 + complete(req->waiting);
498 + * As per scsi_wait_req(), except sets the completion function
499 + * as scsi_protect_wait_done().
501 +void scsi_protect_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
502 + unsigned bufflen, int timeout, int retries)
504 + DECLARE_COMPLETION(wait);
506 + sreq->sr_request->waiting = &wait;
507 + sreq->sr_request->rq_status = RQ_SCSI_BUSY;
508 + sreq->sr_request->end_io = scsi_protect_wait_req_end_io;
509 + scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_protect_wait_done,
511 + wait_for_completion(&wait);
512 + sreq->sr_request->waiting = NULL;
513 + if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
514 + sreq->sr_result |= (DRIVER_ERROR << 24);
516 + __scsi_release_request(sreq);
520 + * scsi_unprotect_queue()
521 + * - release the queue that was previously blocked
523 +int scsi_unprotect_queue(request_queue_t *q){
525 + struct scsi_device *sdev = q->queuedata;
526 + int rc = 0, pending = 0;
527 + u8 scsi_cmd[MAX_COMMAND_SIZE];
528 + struct scsi_sense_hdr sshdr;
530 + if (sdev->sdev_state != SDEV_BLOCK)
533 + /* Are there any pending jobs on the queue? */
\r
534 + pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
\r
536 + rc = scsi_internal_device_unblock(sdev);
541 + printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n");
543 + memset(scsi_cmd, 0, sizeof(scsi_cmd));
544 + scsi_cmd[0] = ATA_16;
545 + scsi_cmd[1] = (3 << 1); /* Non-data */
546 + /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
547 + scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
549 + /* Good values for timeout and retries? Values below
550 + from scsi_ioctl_send_command() for default case... */
551 + if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
557 +EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
560 + * scsi_protect_queue()
561 + * - build and issue the park/standby command..
562 + * - queue is blocked during command completion handler
564 +int scsi_protect_queue(request_queue_t *q, int unload)
566 + struct scsi_device *sdev = q->queuedata;
568 + u8 scsi_cmd[MAX_COMMAND_SIZE];
570 + struct scsi_request *sreq;
571 + unsigned char *sb, *desc;
573 + if (sdev->sdev_state != SDEV_RUNNING)
576 + memset(args, 0, sizeof(args));
587 + memset(scsi_cmd, 0, sizeof(scsi_cmd));
588 + scsi_cmd[0] = ATA_16;
589 + scsi_cmd[1] = (3 << 1); /* Non-data */
590 + scsi_cmd[2] = 0x20; /* no off.line, or data xfer, request cc */
591 + scsi_cmd[4] = args[1];
592 + scsi_cmd[6] = args[2];
593 + scsi_cmd[8] = args[3];
594 + scsi_cmd[10] = args[4];
595 + scsi_cmd[12] = args[5];
596 + scsi_cmd[14] = args[0];
598 + sreq = scsi_allocate_request(sdev, GFP_KERNEL);
604 + sreq->sr_data_direction = DMA_NONE;
606 + scsi_protect_wait_req(sreq, scsi_cmd, NULL, 0, (10*HZ), 5);
608 + if (!sreq->sr_result == ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) {
609 + printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
610 + scsi_unprotect_queue(q); /* just in case we still managed to block */
615 + sb = sreq->sr_sense_buffer;
618 + /* Retrieve data from check condition */
623 + args[5] = desc[11];
624 + args[0] = desc[13];
627 + if (args[3] == 0xc4)
628 + printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n");
630 + /* error parking the head */
631 + printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
633 + scsi_unprotect_queue(q);
636 + printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n");
639 + scsi_release_request(sreq);
642 +EXPORT_SYMBOL_GPL(scsi_protect_queue);
643 diff -urN linux-2.6.16.original/include/linux/ata.h linux-2.6.16.hdaps/include/linux/ata.h
644 --- linux-2.6.16.original/include/linux/ata.h 2006-03-20 05:53:29.000000000 +0000
645 +++ linux-2.6.16.hdaps/include/linux/ata.h 2006-03-26 14:26:24.000000000 +0100
647 #define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
648 #define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
649 #define ata_id_hpa_enabled(id) ((id)[85] & (1 << 10))
650 +#define ata_id_has_unload(id) ((id)[84] & (1 << 13))
651 #define ata_id_has_fua(id) ((id)[84] & (1 << 6))
652 #define ata_id_has_flush(id) ((id)[83] & (1 << 12))
653 #define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
654 diff -urN linux-2.6.16.original/include/linux/blkdev.h linux-2.6.16.hdaps/include/linux/blkdev.h
655 --- linux-2.6.16.original/include/linux/blkdev.h 2006-03-20 05:53:29.000000000 +0000
656 +++ linux-2.6.16.hdaps/include/linux/blkdev.h 2006-03-26 14:26:24.000000000 +0100
658 typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
659 typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
660 typedef void (softirq_done_fn)(struct request *);
661 +typedef int (issue_protect_fn) (request_queue_t *);
662 +typedef int (issue_unprotect_fn) (request_queue_t *);
664 enum blk_queue_state {
667 issue_flush_fn *issue_flush_fn;
668 prepare_flush_fn *prepare_flush_fn;
669 softirq_done_fn *softirq_done_fn;
670 + issue_protect_fn *issue_protect_fn;
671 + issue_unprotect_fn *issue_unprotect_fn;
674 * Dispatch queue sorting
676 extern unsigned blk_ordered_cur_seq(request_queue_t *);
677 extern unsigned blk_ordered_req_seq(struct request *);
678 extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
679 +extern void blk_queue_issue_protect_fn(request_queue_t *, issue_protect_fn *);
680 +extern void blk_queue_issue_unprotect_fn(request_queue_t *, issue_unprotect_fn *);
682 extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
683 extern void blk_dump_rq_flags(struct request *, char *);
684 diff -urN linux-2.6.16.original/include/linux/ide.h linux-2.6.16.hdaps/include/linux/ide.h
685 --- linux-2.6.16.original/include/linux/ide.h 2006-03-20 05:53:29.000000000 +0000
686 +++ linux-2.6.16.hdaps/include/linux/ide.h 2006-03-26 14:26:24.000000000 +0100
687 @@ -1081,6 +1081,7 @@
690 ide_wait, /* insert rq at end of list, and wait for it */
691 + ide_next, /* insert rq immediately after current request */
692 ide_preempt, /* insert rq in front of current request */
693 ide_head_wait, /* insert rq in front of current request and wait for it */
694 ide_end /* insert rq at end of list, but don't wait for it */