]> git.pld-linux.org Git - packages/kernel.git/blob - linux-hdaps_protect.patch
- hdaps_protect.patch dropped - it is provided as a kernel module by
[packages/kernel.git] / linux-hdaps_protect.patch
1 diff -urN linux-2.6.16.original/block/ll_rw_blk.c linux-2.6.16.hdaps/block/ll_rw_blk.c
2 --- linux-2.6.16.original/block/ll_rw_blk.c     2006-03-20 05:53:29.000000000 +0000
3 +++ linux-2.6.16.hdaps/block/ll_rw_blk.c        2006-03-28 20:39:03.000000000 +0100
4 @@ -39,6 +39,8 @@
5  static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
6  static void init_request_from_bio(struct request *req, struct bio *bio);
7  static int __make_request(request_queue_t *q, struct bio *bio);
8 +static int blk_protect_register(request_queue_t *q);
9 +static void blk_protect_unregister(request_queue_t *q);
10  
11  /*
12   * For the allocated request tables
13 @@ -359,6 +361,18 @@
14  
15  EXPORT_SYMBOL(blk_queue_issue_flush_fn);
16  
17 +void blk_queue_issue_protect_fn(request_queue_t *q, issue_protect_fn *ipf)
18 +{
19 +       q->issue_protect_fn = ipf;
20 +}
21 +EXPORT_SYMBOL(blk_queue_issue_protect_fn);
22 +
23 +void blk_queue_issue_unprotect_fn(request_queue_t *q, issue_unprotect_fn *iuf)
24 +{
25 +       q->issue_unprotect_fn = iuf;
26 +}
27 +EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
28 +
29  /*
30   * Cache flushing for ordered writes handling
31   */
32 @@ -3817,6 +3831,7 @@
33                 return ret;
34         }
35  
36 +       blk_protect_register(q);
37         return 0;
38  }
39  
40 @@ -3825,10 +3840,120 @@
41         request_queue_t *q = disk->queue;
42  
43         if (q && q->request_fn) {
44 +               blk_protect_unregister(q);
45                 elv_unregister_queue(q);
46  
47                 kobject_uevent(&q->kobj, KOBJ_REMOVE);
48                 kobject_del(&q->kobj);
49                 kobject_put(&disk->kobj);
50         }
51  }
52 +
53 +/*
54 + * Restore the unplugging timer that we re-used
55 + * to implement the queue freeze timeout...
56 + */
57 +static void blk_unfreeze_work(void *data)
58 +{
59 +       request_queue_t *q = (request_queue_t *) data;
60 +
61 +       INIT_WORK(&q->unplug_work, blk_unplug_work, q);
62 +       q->unplug_timer.function = blk_unplug_timeout;
63 +
64 +       q->issue_unprotect_fn(q);
65 +}
66 +
67 +/*
68 + * Called when the queue freeze timeout expires...
69 + */
70 +static void blk_unfreeze_timeout(unsigned long data)
71 +{
72 +       request_queue_t *q = (request_queue_t *) data;
73 +       kblockd_schedule_work(&q->unplug_work);
74 +}
75 +
76 +/* 
77 + * The lower level driver parks and freezes the queue, and this block layer
78 + *  function sets up the freeze timeout timer on return. If the queue is
79 + *  already frozen then this is called to extend the timer...
80 + */
81 +void blk_freeze_queue(request_queue_t *q, int seconds)
82 +{
83 +       /* set/reset the timer */
84 +       mod_timer(&q->unplug_timer, msecs_to_jiffies(seconds*1000) + jiffies);
85 +
86 +       /* we do this every iteration - is this sane? */
87 +       INIT_WORK(&q->unplug_work, blk_unfreeze_work, q);
88 +       q->unplug_timer.function = blk_unfreeze_timeout;
89 +}
90 +
91 +/* 
92 + * When reading the 'protect' attribute, we return boolean frozen or active
93 + * todo:
94 + * - maybe we should return seconds remaining instead?
95 + */
96 +static ssize_t queue_protect_show(struct request_queue *q, char *page)
97 +{
98 +       return queue_var_show(blk_queue_stopped(q), (page));
99 +}
100 +
101 +/* 
102 + * When writing the 'protect' attribute, input is the number of seconds
103 + * to freeze the queue for. We call a lower level helper function to 
104 + * park the heads and freeze/block the queue, then we make a block layer
105 + * call to setup the thaw timeout. If input is 0, then we thaw the queue.
106 + */
107 +static ssize_t queue_protect_store(struct request_queue *q, const char *page, size_t count)
108 +{
109 +       unsigned long freeze = 0;
110 +       queue_var_store(&freeze, page, count);
111 +
112 +       if(freeze>0) {
113 +               /* Park and freeze */
114 +               if (!blk_queue_stopped(q))
115 +                       q->issue_protect_fn(q);
116 +               /* set / reset the thaw timer */
117 +               blk_freeze_queue(q, freeze);
118 +       }
119 +       else
120 +               blk_unfreeze_timeout((unsigned long) q);
121 +
122 +       return count;
123 +}
124 +
125 +static struct queue_sysfs_entry queue_protect_entry = {
126 +       .attr = {.name = "protect", .mode = S_IRUGO | S_IWUSR },
127 +       .show = queue_protect_show,
128 +       .store = queue_protect_store,
129 +};
130 +
131 +static int blk_protect_register(request_queue_t *q)
132 +{
133 +       int error = 0;
134 +
135 +       /* check that the lower level driver has a protect handler */   
136 +       if (!q->issue_protect_fn)
137 +               return 1;
138 +       
139 +       /* create the attribute */
140 +       error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
141 +       if(error){
142 +               printk(KERN_ERR 
143 +                       "blk_protect_register(): failed to create protect queue attribute!\n");
144 +               return error;
145 +       }
146 +       
147 +       kobject_get(&q->kobj);
148 +       return 0;               
149 +}
150 +
151 +static void blk_protect_unregister(request_queue_t *q)
152 +{
153 +       /* check that the lower level driver has a protect handler */   
154 +       if (!q->issue_protect_fn)
155 +               return;
156 +
157 +       /* remove the attribute */
158 +       sysfs_remove_file(&q->kobj,&queue_protect_entry.attr);
159 +       kobject_put(&q->kobj);
160 +}
161 diff -urN linux-2.6.16.original/drivers/ide/ide-disk.c linux-2.6.16.hdaps/drivers/ide/ide-disk.c
162 --- linux-2.6.16.original/drivers/ide/ide-disk.c        2006-03-20 05:53:29.000000000 +0000
163 +++ linux-2.6.16.hdaps/drivers/ide/ide-disk.c   2006-04-30 13:24:35.000000000 +0100
164 @@ -71,6 +71,10 @@
165  #include <asm/io.h>
166  #include <asm/div64.h>
167  
168 +int idedisk_protect_method = 0;
169 +module_param_named(protect_method, idedisk_protect_method, int, 0444);
170 +MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
171 +
172  struct ide_disk_obj {
173         ide_drive_t     *drive;
174         ide_driver_t    *driver;
175 @@ -727,6 +731,154 @@
176  }
177  
178  /*
179 + * todo:
180 + *  - we freeze the queue regardless of success and rely on the 
181 + *    ide_protect_queue function to thaw immediately if the command
182 + *    failed (to be consistent with the libata handler)... should 
183 + *    we also inspect here?
184 + */
185 +void ide_end_protect_rq(struct request *rq, int error)
186 +{
187 +       struct completion *waiting = rq->waiting;
188 +
189 +       /* spin lock already accquired */
190 +       if (!blk_queue_stopped(rq->q))
191 +               blk_stop_queue(rq->q);
192 +
193 +       complete(waiting);
194 +}
195 +
196 +int ide_unprotect_queue(request_queue_t *q)
197 +{
198 +       struct request  rq;\r
199 +       unsigned long flags;
200 +       int             pending = 0, rc = 0;\r
201 +       ide_drive_t     *drive = q->queuedata;\r
202 +       u8              args[7], *argbuf = args;\r
203 +
204 +       if (!blk_queue_stopped(q))
205 +               return -EIO;
206 +
207 +       /* Are there any pending jobs on the queue? */\r
208 +       pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;\r
209 +       
210 +       spin_lock_irqsave(q->queue_lock, flags);
211 +       blk_start_queue(q);
212 +       spin_unlock_irqrestore(q->queue_lock, flags);
213 +
214 +       /* The unload feature of the IDLE_IMMEDIATE command\r
215 +          temporarily disables HD power management from spinning down\r
216 +          the disk. Any other command will reenable HD pm, so, if\r
217 +          there are no pending jobs on the queue, another\r
218 +          CHECK_POWER_MODE1 command without the unload feature should do\r
219 +          just fine. */\r
220 +       if (!pending) {\r
221 +               printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n");
222 +               memset(args, 0, sizeof(args));\r
223 +               argbuf[0] = 0xe5; /* CHECK_POWER_MODE1 */\r
224 +               ide_init_drive_cmd(&rq);\r
225 +               rq.flags = REQ_DRIVE_TASK;\r
226 +               rq.buffer = argbuf;\r
227 +               rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);\r
228 +       }
229 +\r
230 +       return rc;\r
231 +}
232 +
233 +int ide_protect_queue(request_queue_t *q, int unload)
234 +{
235 +       ide_drive_t     *drive = q->queuedata;
236 +       struct request  rq;
237 +       u8              args[7], *argbuf = args;
238 +       int             ret = 0;
239 +       DECLARE_COMPLETION(wait);
240 +
241 +       memset(&rq, 0, sizeof(rq));
242 +       memset(args, 0, sizeof(args));
243 +
244 +       if (blk_queue_stopped(q))
245 +               return -EIO;
246 +
247 +       if (unload) {
248 +               argbuf[0] = 0xe1;
249 +               argbuf[1] = 0x44;
250 +               argbuf[3] = 0x4c;
251 +               argbuf[4] = 0x4e;
252 +               argbuf[5] = 0x55;
253 +       } else
254 +               argbuf[0] = 0xe0;
255 +
256 +       /* Issue the park command & freeze */
257 +       ide_init_drive_cmd(&rq);
258 +
259 +       rq.flags = REQ_DRIVE_TASK;
260 +       rq.buffer = argbuf;
261 +       rq.waiting = &wait;
262 +       rq.end_io = ide_end_protect_rq;
263 +
264 +       ret = ide_do_drive_cmd(drive, &rq, ide_next);
265 +       wait_for_completion(&wait);
266 +       rq.waiting = NULL;
267 +
268 +       if (ret)
269 +       {
270 +               printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n");
271 +               ide_unprotect_queue(q);
272 +               return ret;
273 +       }
274 +
275 +       if (unload) {
276 +               if (args[3] == 0xc4)
277 +                       printk(KERN_DEBUG "ide_protect_queue(): head parked..\n");
278 +               else {
279 +                       /* error parking the head */
280 +                       printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n");
281 +                       ret = -EIO;
282 +                       ide_unprotect_queue(q);
283 +               }
284 +       } else
285 +               printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n");
286 +
287 +       return ret;
288 +}      
289 +
290 +int idedisk_issue_protect_fn(request_queue_t *q)
291 +{
292 +       ide_drive_t             *drive = q->queuedata;
293 +       int unload;
294 +
295 +       /*
296 +        * Check capability of the device -
297 +        *  - if "idle immediate with unload" is supported we use that, else
298 +        *    we use "standby immediate" and live with spinning down the drive..
299 +        *    (Word 84, bit 13 of IDENTIFY DEVICE data)
300 +        */
301 +       if (idedisk_protect_method == 1) {
302 +               unload = 1;     
303 +               printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
304 +       }
305 +       else if (idedisk_protect_method == 2) {
306 +               unload = 0;     
307 +               printk(KERN_DEBUG "idedisk_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
308 +       }
309 +       else if (drive->id->cfsse & (1 << 13)) {
310 +               unload = 1;
311 +               printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support reported by drive..\n");
312 +       }
313 +       else {
314 +               unload = 0;
315 +               printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support NOT reported by drive!..\n");
316 +       }
317 +
318 +       return ide_protect_queue(q, unload);
319 +}
320 +
321 +int idedisk_issue_unprotect_fn(request_queue_t *q)
322 +{
323 +       return ide_unprotect_queue(q);
324 +}
325 +
326 +/*
327   * This is tightly woven into the driver->do_special can not touch.
328   * DON'T do it again until a total personality rewrite is committed.
329   */
330 @@ -984,6 +1136,9 @@
331                 drive->wcache = 1;
332  
333         write_cache(drive, 1);
334 +
335 +       blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);     
336 +       blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn); 
337  }
338  
339  static void ide_cacheflush_p(ide_drive_t *drive)
340 diff -urN linux-2.6.16.original/drivers/ide/ide-io.c linux-2.6.16.hdaps/drivers/ide/ide-io.c
341 --- linux-2.6.16.original/drivers/ide/ide-io.c  2006-03-20 05:53:29.000000000 +0000
342 +++ linux-2.6.16.hdaps/drivers/ide/ide-io.c     2006-03-26 15:10:44.000000000 +0100
343 @@ -1180,6 +1180,17 @@
344                 }
345  
346                 /*
347 +                * Don't accept a request when the queue is stopped (unless we
348 +                * are resuming from suspend). Prevents existing queue entries 
349 +                * being processed after queue is stopped by the hard disk 
350 +                * protection mechanism...
351 +                */
352 +               if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) {
353 +                       hwgroup->busy = 0;
354 +                       break;
355 +               }
356 +
357 +               /*
358                  * Sanity: don't accept a request that isn't a PM request
359                  * if we are currently power managed. This is very important as
360                  * blk_stop_queue() doesn't prevent the elv_next_request()
361 @@ -1660,6 +1671,9 @@
362                 where = ELEVATOR_INSERT_FRONT;
363                 rq->flags |= REQ_PREEMPT;
364         }
365 +       if (action == ide_next)
366 +               where = ELEVATOR_INSERT_FRONT;
367 +
368         __elv_add_request(drive->queue, rq, where, 0);
369         ide_do_request(hwgroup, IDE_NO_IRQ);
370         spin_unlock_irqrestore(&ide_lock, flags);
371 diff -urN linux-2.6.16.original/drivers/scsi/libata-core.c linux-2.6.16.hdaps/drivers/scsi/libata-core.c
372 --- linux-2.6.16.original/drivers/scsi/libata-core.c    2006-03-20 05:53:29.000000000 +0000
373 +++ linux-2.6.16.hdaps/drivers/scsi/libata-core.c       2006-04-30 13:25:24.000000000 +0100
374 @@ -73,6 +73,10 @@
375  static unsigned int ata_unique_id = 1;
376  static struct workqueue_struct *ata_wq;
377  
378 +int libata_protect_method = 0;
379 +module_param_named(protect_method, libata_protect_method, int, 0444);
380 +MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
381 +
382  int atapi_enabled = 1;
383  module_param(atapi_enabled, int, 0444);
384  MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
385 diff -urN linux-2.6.16.original/drivers/scsi/libata.h linux-2.6.16.hdaps/drivers/scsi/libata.h
386 --- linux-2.6.16.original/drivers/scsi/libata.h 2006-03-20 05:53:29.000000000 +0000
387 +++ linux-2.6.16.hdaps/drivers/scsi/libata.h    2006-04-30 13:25:33.000000000 +0100
388 @@ -40,6 +40,7 @@
389  };
390  
391  /* libata-core.c */
392 +extern int libata_protect_method;
393  extern int atapi_enabled;
394  extern int libata_fua;
395  extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
396 diff -urN linux-2.6.16.original/drivers/scsi/libata-scsi.c linux-2.6.16.hdaps/drivers/scsi/libata-scsi.c
397 --- linux-2.6.16.original/drivers/scsi/libata-scsi.c    2006-03-20 05:53:29.000000000 +0000
398 +++ linux-2.6.16.hdaps/drivers/scsi/libata-scsi.c       2006-04-30 13:24:56.000000000 +0100
399 @@ -662,6 +662,42 @@
400         }
401  }
402  
403 +extern int scsi_protect_queue(request_queue_t *q, int unload);
404 +extern int scsi_unprotect_queue(request_queue_t *q);
405 +
406 +static int ata_scsi_issue_protect_fn(request_queue_t *q)
407 +{
408 +       struct scsi_device *sdev = q->queuedata;
409 +       struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
410 +       struct ata_device *dev = &ap->device[sdev->id];
411 +       int unload;
412 +
413 +       if (libata_protect_method == 1) {
414 +               unload = 1;     
415 +               printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
416 +       }
417 +       else if (libata_protect_method == 2) {
418 +               unload = 0;     
419 +               printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
420 +       }
421 +       else if (ata_id_has_unload(dev->id)) {
422 +               unload = 1;
423 +               printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support reported by drive..\n");
424 +       }
425 +       else {
426 +               unload = 0;
427 +               printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support NOT reported by drive!..\n");
428 +       }
429 +
430 +       /* call scsi_protect_queue, requesting either unload or standby */
431 +       return scsi_protect_queue(q, unload);
432 +}
433 +
434 +static int ata_scsi_issue_unprotect_fn(request_queue_t *q)
435 +{
436 +       return scsi_unprotect_queue(q);
437 +}
438 +
439  /**
440   *     ata_scsi_slave_config - Set SCSI device attributes
441   *     @sdev: SCSI device to examine
442 @@ -712,6 +748,8 @@
443                         blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
444                 }
445         }
446 +       blk_queue_issue_protect_fn(sdev->request_queue, ata_scsi_issue_protect_fn);     
447 +       blk_queue_issue_unprotect_fn(sdev->request_queue, ata_scsi_issue_unprotect_fn); 
448  
449         return 0;       /* scsi layer doesn't check return value, sigh */
450  }
451 diff -urN linux-2.6.16.original/drivers/scsi/scsi_lib.c linux-2.6.16.hdaps/drivers/scsi/scsi_lib.c
452 --- linux-2.6.16.original/drivers/scsi/scsi_lib.c       2006-03-20 05:53:29.000000000 +0000
453 +++ linux-2.6.16.hdaps/drivers/scsi/scsi_lib.c  2006-03-26 15:10:44.000000000 +0100
454 @@ -2307,2 +2307,188 @@
455  }
456  EXPORT_SYMBOL_GPL(scsi_target_unblock);
457 +
458 +/*
459 + * As per scsi_wait_req_end_io(), which was removed in 2.6.15
460 + */
461 +static void scsi_protect_wait_req_end_io(struct request *req, int error)
462 +{
463 +       BUG_ON(!req->waiting);
464 +
465 +       complete(req->waiting);
466 +}
467 +
468 +/*
469 + * As per scsi_wait_done(), except calls scsi_device_block
470 + * to block the queue at command completion. Only called by
471 + * scsi_protect_wait().
472 + * todo:
473 + *  - we block the queue regardless of success and rely on the 
474 + *    scsi_protect_queue function to unblock if the command
475 + *    failed... should we also inspect here?
476 + */
477 +static void scsi_protect_wait_done(struct scsi_cmnd *cmd)
478 +{
479 +       struct request *req = cmd->request;
480 +       struct request_queue *q = cmd->device->request_queue;
481 +       struct scsi_device *sdev = cmd->device;
482 +       unsigned long flags;
483 +
484 +       req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
485 +
486 +       spin_lock_irqsave(q->queue_lock, flags);
487 +       if (blk_rq_tagged(req))
488 +               blk_queue_end_tag(q, req);
489 +       spin_unlock_irqrestore(q->queue_lock, flags);
490 +
491 +       scsi_internal_device_block(sdev);
492 +
493 +       if (req->waiting)
494 +               complete(req->waiting);
495 +}
496 +
497 +/*
498 + * As per scsi_wait_req(), except sets the completion function
499 + * as scsi_protect_wait_done().
500 + */
501 +void scsi_protect_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
502 +                  unsigned bufflen, int timeout, int retries)
503 +{
504 +       DECLARE_COMPLETION(wait);
505 +       
506 +       sreq->sr_request->waiting = &wait;
507 +       sreq->sr_request->rq_status = RQ_SCSI_BUSY;
508 +       sreq->sr_request->end_io = scsi_protect_wait_req_end_io;
509 +       scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_protect_wait_done,
510 +                       timeout, retries);
511 +       wait_for_completion(&wait);
512 +       sreq->sr_request->waiting = NULL;
513 +       if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
514 +               sreq->sr_result |= (DRIVER_ERROR << 24);
515 +
516 +       __scsi_release_request(sreq);
517 +}
518 +
519 +/*
520 + * scsi_unprotect_queue()
521 + *  - release the queue that was previously blocked
522 + */
523 +int scsi_unprotect_queue(request_queue_t *q){
524 +
525 +       struct scsi_device *sdev = q->queuedata;
526 +       int rc = 0, pending = 0;
527 +       u8 scsi_cmd[MAX_COMMAND_SIZE];
528 +       struct scsi_sense_hdr sshdr;
529 +
530 +       if (sdev->sdev_state != SDEV_BLOCK)
531 +               return -ENXIO;
532 +
533 +       /* Are there any pending jobs on the queue? */\r
534 +       pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;\r
535 +
536 +       rc = scsi_internal_device_unblock(sdev);
537 +       if (rc)
538 +               return rc;
539 +
540 +       if (!pending) {\r
541 +               printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n");
542 +
543 +               memset(scsi_cmd, 0, sizeof(scsi_cmd));
544 +               scsi_cmd[0]  = ATA_16;
545 +               scsi_cmd[1]  = (3 << 1); /* Non-data */
546 +               /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
547 +               scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
548 +
549 +               /* Good values for timeout and retries?  Values below
550 +                  from scsi_ioctl_send_command() for default case... */        
551 +               if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
552 +                            (10*HZ), 5))
553 +                       rc = -EIO;
554 +       }
555 +       return rc;
556 +}
557 +EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
558 +
559 +/*
560 + * scsi_protect_queue()
561 + *  - build and issue the park/standby command.. 
562 + *  - queue is blocked during command completion handler
563 + */
564 +int scsi_protect_queue(request_queue_t *q, int unload)
565 +{
566 +       struct scsi_device *sdev = q->queuedata;
567 +       int rc = 0;
568 +       u8 scsi_cmd[MAX_COMMAND_SIZE];
569 +       u8 args[7];
570 +       struct scsi_request *sreq;
571 +       unsigned char *sb, *desc;
572 +
573 +       if (sdev->sdev_state != SDEV_RUNNING)
574 +               return -ENXIO;
575 +
576 +       memset(args, 0, sizeof(args));
577 +
578 +       if (unload) {
579 +               args[0] = 0xe1;
580 +               args[1] = 0x44;
581 +               args[3] = 0x4c;
582 +               args[4] = 0x4e;
583 +               args[5] = 0x55;
584 +       } else
585 +               args[0] = 0xe0;
586 +
587 +       memset(scsi_cmd, 0, sizeof(scsi_cmd));
588 +       scsi_cmd[0]  = ATA_16;
589 +       scsi_cmd[1]  = (3 << 1); /* Non-data */
590 +       scsi_cmd[2]  = 0x20;     /* no off.line, or data xfer, request cc */
591 +       scsi_cmd[4]  = args[1];
592 +       scsi_cmd[6]  = args[2];
593 +       scsi_cmd[8]  = args[3];
594 +       scsi_cmd[10] = args[4];
595 +       scsi_cmd[12] = args[5];
596 +       scsi_cmd[14] = args[0];
597 +
598 +       sreq = scsi_allocate_request(sdev, GFP_KERNEL);
599 +       if (!sreq) {
600 +               rc = -EINTR;
601 +               goto error;
602 +       }
603 +
604 +       sreq->sr_data_direction = DMA_NONE;
605 +
606 +       scsi_protect_wait_req(sreq, scsi_cmd, NULL, 0, (10*HZ), 5);
607 +
608 +       if (!sreq->sr_result == ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) {
609 +               printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
610 +               scsi_unprotect_queue(q);                /* just in case we still managed to block */
611 +               rc = -EIO;
612 +               goto error;
613 +       }
614 +
615 +       sb = sreq->sr_sense_buffer;
616 +       desc = sb + 8;
617 +
618 +       /* Retrieve data from check condition */
619 +       args[1] = desc[3];
620 +       args[2] = desc[5];
621 +       args[3] = desc[7];
622 +       args[4] = desc[9];
623 +       args[5] = desc[11];
624 +       args[0] = desc[13];
625 +
626 +       if (unload) {
627 +               if (args[3] == 0xc4)
628 +                       printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n");
629 +               else {
630 +                       /* error parking the head */
631 +                       printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
632 +                       rc = -EIO;
633 +                       scsi_unprotect_queue(q);
634 +               }
635 +       } else
636 +               printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n");
637 +
638 +error:
639 +       scsi_release_request(sreq);
640 +       return rc;
641 +}
642 +EXPORT_SYMBOL_GPL(scsi_protect_queue);
643 diff -urN linux-2.6.16.original/include/linux/ata.h linux-2.6.16.hdaps/include/linux/ata.h
644 --- linux-2.6.16.original/include/linux/ata.h   2006-03-20 05:53:29.000000000 +0000
645 +++ linux-2.6.16.hdaps/include/linux/ata.h      2006-03-26 14:26:24.000000000 +0100
646 @@ -253,6 +253,7 @@
647  #define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
648  #define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
649  #define ata_id_hpa_enabled(id) ((id)[85] & (1 << 10))
650 +#define ata_id_has_unload(id)  ((id)[84] & (1 << 13))
651  #define ata_id_has_fua(id)     ((id)[84] & (1 << 6))
652  #define ata_id_has_flush(id)   ((id)[83] & (1 << 12))
653  #define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
654 diff -urN linux-2.6.16.original/include/linux/blkdev.h linux-2.6.16.hdaps/include/linux/blkdev.h
655 --- linux-2.6.16.original/include/linux/blkdev.h        2006-03-20 05:53:29.000000000 +0000
656 +++ linux-2.6.16.hdaps/include/linux/blkdev.h   2006-03-26 14:26:24.000000000 +0100
657 @@ -293,6 +293,8 @@
658  typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
659  typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
660  typedef void (softirq_done_fn)(struct request *);
661 +typedef int (issue_protect_fn) (request_queue_t *);
662 +typedef int (issue_unprotect_fn) (request_queue_t *);
663  
664  enum blk_queue_state {
665         Queue_down,
666 @@ -335,6 +337,8 @@
667         issue_flush_fn          *issue_flush_fn;
668         prepare_flush_fn        *prepare_flush_fn;
669         softirq_done_fn         *softirq_done_fn;
670 +       issue_protect_fn        *issue_protect_fn;
671 +       issue_unprotect_fn      *issue_unprotect_fn;
672  
673         /*
674          * Dispatch queue sorting
675 @@ -715,6 +719,8 @@
676  extern unsigned blk_ordered_cur_seq(request_queue_t *);
677  extern unsigned blk_ordered_req_seq(struct request *);
678  extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
679 +extern void blk_queue_issue_protect_fn(request_queue_t *, issue_protect_fn *);
680 +extern void blk_queue_issue_unprotect_fn(request_queue_t *, issue_unprotect_fn *);
681  
682  extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
683  extern void blk_dump_rq_flags(struct request *, char *);
684 diff -urN linux-2.6.16.original/include/linux/ide.h linux-2.6.16.hdaps/include/linux/ide.h
685 --- linux-2.6.16.original/include/linux/ide.h   2006-03-20 05:53:29.000000000 +0000
686 +++ linux-2.6.16.hdaps/include/linux/ide.h      2006-03-26 14:26:24.000000000 +0100
687 @@ -1081,6 +1081,7 @@
688   */
689  typedef enum {
690         ide_wait,       /* insert rq at end of list, and wait for it */
691 +       ide_next,       /* insert rq immediately after current request */
692         ide_preempt,    /* insert rq in front of current request */
693         ide_head_wait,  /* insert rq in front of current request and wait for it */
694         ide_end         /* insert rq at end of list, but don't wait for it */
This page took 0.090667 seconds and 3 git commands to generate.