]> git.pld-linux.org Git - packages/kernel.git/blame - kernel-cdrw-packet.patch
* ver 12 - 26072002 Krzysiek Taraszka
[packages/kernel.git] / kernel-cdrw-packet.patch
CommitLineData
e5ff5fa6 1diff -uNr linux-2.4.18/Documentation/Configure.help pkt/Documentation/Configure.help
2--- linux-2.4.18/Documentation/Configure.help Mon Jul 8 23:47:08 2002
3+++ pkt/Documentation/Configure.help Mon Jul 29 10:01:15 2002
4@@ -677,6 +677,27 @@
5 say M here and read <file:Documentation/modules.txt>. The module
6 will be called ide-cd.o.
7
8+
9+Packet writing on CD/DVD media (EXPERIMENTAL)
10+CONFIG_CDROM_PKTCDVD
11+ If you have a CDROM drive that supports packet writing, say Y to
12+ include preliminary support. It should work with any MMC/Mt Fuji
13+ complain ATAPI or SCSI drive, which is just about any newer CD
14+ writer.
15+
16+ Currently only writing to CD-RW discs is possible.
17+
18+ If you want to compile the driver as a module ( = code which can be
19+ inserted in and removed from the running kernel whenever you want),
20+ say M here and read Documentation/modules.txt. The module will be
21+ called pktcdvd.o
22+
23+Write caching
24+CONFIG_CDROM_PKTCDVD_WCACHE
25+ If enabled, write caching will be set for the CD-R/W device. For now
26+ this option is dangerous unless the CD-RW media is known good, as we
27+ don't do deferred write error handling yet.
28+
29 Include IDE/ATAPI TAPE support
30 CONFIG_BLK_DEV_IDETAPE
31 If you have an IDE tape drive using the ATAPI protocol, say Y.
32diff -uNr linux-2.4.18/arch/sparc64/kernel/ioctl32.c pkt/arch/sparc64/kernel/ioctl32.c
33--- linux-2.4.18/arch/sparc64/kernel/ioctl32.c Mon Feb 25 20:37:56 2002
34+++ pkt/arch/sparc64/kernel/ioctl32.c Mon Jul 29 09:10:29 2002
35@@ -88,6 +88,7 @@
36 #include <linux/atm_tcp.h>
37 #include <linux/sonet.h>
38 #include <linux/atm_suni.h>
39+#include <linux/pktcdvd.h>
40 #include <linux/mtd/mtd.h>
41
42 #include <net/bluetooth/bluetooth.h>
43@@ -846,6 +847,41 @@
44 return ret;
45 }
46
47+struct packet_stats32 {
48+ u32 bh_s;
49+ u32 bh_e;
50+ u32 bh_cache_hits;
51+ u32 page_cache_hits;
52+ u32 bh_w;
53+ u32 bh_r;
54+};
55+
56+static inline int pkt_getstats(unsigned int fd, unsigned int cmd, unsigned long arg)
57+{
58+ struct packet_stats p;
59+ struct packet_stats32 p32;
60+ mm_segment_t old_fs = get_fs();
61+ int ret;
62+
63+ ret = copy_from_user (&p32, (struct packet_stats32 *)arg, sizeof(struct packet_stats32));
64+ if (ret)
65+ return -EFAULT;
66+#define P(x) (p.x = (unsigned long)p32.x)
67+ P(bh_s);
68+ P(bh_e);
69+ P(bh_cache_hits);
70+ P(page_cache_hits);
71+ P(bh_w);
72+ P(bh_r);
73+#undef P
74+
75+ set_fs (KERNEL_DS);
76+ ret = sys_ioctl (fd, cmd, (long)&p);
77+ set_fs (old_fs);
78+
79+ return ret;
80+}
81+
82 struct hd_geometry32 {
83 unsigned char heads;
84 unsigned char sectors;
85@@ -4529,6 +4565,12 @@
86 COMPATIBLE_IOCTL(WIOCSTART)
87 COMPATIBLE_IOCTL(WIOCSTOP)
88 COMPATIBLE_IOCTL(WIOCGSTAT)
89+/* Big X, CDRW Packet Driver */
90+#if defined(CONFIG_CDROM_PKTCDVD)
91+COMPATIBLE_IOCTL(PACKET_SETUP_DEV)
92+COMPATIBLE_IOCTL(PACKET_TEARDOWN_DEV)
93+HANDLE_IOCTL(PACKET_GET_STATS, pkt_getstats)
94+#endif /* CONFIG_CDROM_PKTCDVD */
95 /* Bluetooth ioctls */
96 COMPATIBLE_IOCTL(HCIDEVUP)
97 COMPATIBLE_IOCTL(HCIDEVDOWN)
98diff -uNr linux-2.4.18/drivers/block/Config.in pkt/drivers/block/Config.in
99--- linux-2.4.18/drivers/block/Config.in Fri Sep 14 23:04:06 2001
100+++ pkt/drivers/block/Config.in Mon Jul 29 09:10:29 2002
101@@ -37,6 +37,12 @@
102 dep_tristate 'Compaq Smart Array 5xxx support' CONFIG_BLK_CPQ_CISS_DA $CONFIG_PCI
103 dep_tristate 'Mylex DAC960/DAC1100 PCI RAID Controller support' CONFIG_BLK_DEV_DAC960 $CONFIG_PCI
104
105+tristate 'Packet writing on CD/DVD media' CONFIG_CDROM_PKTCDVD
106+if [ "$CONFIG_CDROM_PKTCDVD" != "n" ]; then
107+ comment 'Use write caching at YOUR OWN RISK'
108+ bool ' Enable write caching' CONFIG_CDROM_PKTCDVD_WCACHE n
109+fi
110+
111 tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
112 dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
113
114diff -uNr linux-2.4.18/drivers/block/Makefile pkt/drivers/block/Makefile
115--- linux-2.4.18/drivers/block/Makefile Mon Feb 25 20:37:57 2002
116+++ pkt/drivers/block/Makefile Mon Jul 29 09:10:29 2002
117@@ -10,7 +10,7 @@
118
119 O_TARGET := block.o
120
121-export-objs := ll_rw_blk.o blkpg.o loop.o DAC960.o genhd.o
122+export-objs := ll_rw_blk.o blkpg.o loop.o DAC960.o genhd.o elevator.o
123
124 obj-y := ll_rw_blk.o blkpg.o genhd.o elevator.o
125
126@@ -29,6 +29,7 @@
127 obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
128 obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
129 obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
130+obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
131
132 obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
133
134diff -uNr linux-2.4.18/drivers/block/elevator.c pkt/drivers/block/elevator.c
135--- linux-2.4.18/drivers/block/elevator.c Fri Jul 20 05:59:41 2001
136+++ pkt/drivers/block/elevator.c Mon Jul 29 09:10:29 2002
137@@ -220,3 +220,5 @@
138 *elevator = type;
139 elevator->queue_ID = queue_ID++;
140 }
141+
142+EXPORT_SYMBOL(elevator_init);
143diff -uNr linux-2.4.18/drivers/block/ll_rw_blk.c pkt/drivers/block/ll_rw_blk.c
144--- linux-2.4.18/drivers/block/ll_rw_blk.c Mon Jul 8 23:42:05 2002
145+++ pkt/drivers/block/ll_rw_blk.c Mon Jul 29 09:10:29 2002
146@@ -834,6 +834,7 @@
147 /* Test device size, when known. */
148 if (blk_size[major])
149 minorsize = blk_size[major][MINOR(bh->b_rdev)];
150+#if 0
151 if (minorsize) {
152 unsigned long maxsector = (minorsize << 1) + 1;
153 unsigned long sector = bh->b_rsector;
154@@ -857,6 +858,7 @@
155 return;
156 }
157 }
158+#endif
159
160 /*
161 * Resolve the mapping until finished. (drivers are
162diff -uNr linux-2.4.18/drivers/block/pktcdvd.c pkt/drivers/block/pktcdvd.c
163--- linux-2.4.18/drivers/block/pktcdvd.c Thu Jan 1 01:00:00 1970
164+++ pkt/drivers/block/pktcdvd.c Mon Jul 29 09:10:29 2002
165@@ -0,0 +1,2570 @@
166+/*
167+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
168+ *
169+ * May be copied or modified under the terms of the GNU General Public
170+ * License. See linux/COPYING for more information.
171+ *
172+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
173+ * DVD-RW devices (aka an exercise in block layer masturbation)
174+ *
175+ *
176+ * TODO: (circa order of when I will fix it)
177+ * - Only able to write on CD-RW media right now.
178+ * - check host application code on media and set it in write page
179+ * - Generic interface for UDF to submit large packets for variable length
180+ * packet writing
181+ * - interface for UDF <-> packet to negotiate a new location when a write
182+ * fails.
183+ * - handle OPC, especially for -RW media
184+ *
185+ * ------------------------------------------------------------------------
186+ *
187+ * Newer changes -- see ChangeLog
188+ *
189+ * 0.0.2d (26/10/2000)
190+ * - (scsi) use implicit segment recounting for all hba's
191+ * - fix speed setting, was consistenly off on most drives
192+ * - only print capacity when opening for write
193+ * - fix off-by-two error in getting/setting write+read speed (affected
194+ * reporting as well as actual speed used)
195+ * - possible to enable write caching on drive
196+ * - do ioctl marshalling on sparc64 from Ben Collins <bcollins@debian.org>
197+ * - avoid unaligned access on flags, should have been unsigned long of course
198+ * - fixed missed wakeup in kpacketd
199+ * - b_dev error (two places)
200+ * - fix buffer head b_count bugs
201+ * - fix hole merge bug, where tail could be added twice
202+ * - fsync and invalidate buffers on close
203+ * - check hash table for buffers first before using our own
204+ * - add read-ahead
205+ * - fixed several list races
206+ * - fix proc reporting for more than one device
207+ * - change to O_CREAT for creating devices
208+ * - added media_change hook
209+ * - added free buffers config option
210+ * - pkt_lock_tray fails on failed open (and oopses), remove it. unlock
211+ * is done explicitly in pkt_remove dev anyway.
212+ * - added proper elevator insertion (should probably be part of elevator.c)
213+ * - moved kernel thread info to private device, spawn one for each writer
214+ * - added separate buffer list for dirty packet buffers
215+ * - fixed nasty data corruption bug
216+ * - remember to account request even when we don't gather data for it
217+ * - add ioctl to force wakeup of kernel thread (for debug)
218+ * - fixed packet size setting bug on zero detected
219+ * - changed a lot of the proc reporting to be more readable to "humans"
220+ * - set full speed for read-only opens
221+ *
222+ * 0.0.2c (08/09/2000)
223+ * - inc usage count of buffer heads
224+ * - add internal buffer pool to avoid deadlock on oom
225+ * - gather data for as many buffers as we have, before initiating write. this
226+ * allows the laser to stay on longer, giving better performance.
227+ * - fix always busy when tray can't be locked
228+ * - remove request duplication nastiness, inject directly into the target
229+ * - adapted to devfs and elevator changes
230+ * - added proc interface
231+ *
232+ * 0.0.2b (21/06/2000)
233+ * - fix io_request_lock typos (missing '&')
234+ * - grab pkt_sem before invoking pkt_handle_queue
235+ * - SCSI uses queuedata too, mirror that in pd->queuedata (hack)
236+ * - remove SCSI sr debug messages
237+ * - really activate empty block querying (requires cvs UDF, CDRW branch)
238+ * - make sure sync_buffers doesn't consider us, or we can deadlock
239+ * - make sure people don't swap on us (for now ;)
240+ *
241+ * 0.0.2a (19/06/2000)
242+ * - add kpacketd kernel thread to handle actual data gathering
243+ * - pd->pkt_dev is now real device, not just minor
244+ * - add support for super_operations block_empty fn, to query fs for
245+ * unused blocks that don't need reading
246+ * - "cache" blocks that are contained in the UDF file/dir packet
247+ * - rewrite pkt_gather_data to a one-step solution
248+ * - add private pktcdvd elevator
249+ * - shutdown write access to device upon write failure
250+ * - fix off-by-one bug in capacity
251+ * - setup sourceforge project (packet-cd.sourceforge.net)
252+ * - add more blk ioctls to pkt_ioctl
253+ * - set inactive request queue head
254+ * - change panic calls to BUG, better with kdb
255+ * - have pkt_gather_data check correct block size and kill rq if wrong
256+ * - rework locking
257+ * - introduce per-pd queues, simplifies pkt_request
258+ * - store pd in queuedata
259+ *
260+ *************************************************************************/
261+
262+#define VERSION_CODE "v0.0.2p 03/03/2002 Jens Axboe (axboe@suse.de)"
263+
264+#include <linux/config.h>
265+#include <linux/module.h>
266+#include <linux/types.h>
267+#include <linux/kernel.h>
268+#include <linux/slab.h>
269+#include <linux/errno.h>
270+#include <linux/delay.h>
271+#include <linux/locks.h>
272+#include <linux/spinlock.h>
273+#include <linux/interrupt.h>
274+#include <linux/file.h>
275+#include <linux/blk.h>
276+#include <linux/blkpg.h>
277+#include <linux/cdrom.h>
278+#include <linux/ide.h>
279+#include <linux/smp_lock.h>
280+#include <linux/pktcdvd.h>
281+#include <linux/kernel_stat.h>
282+#include <linux/sysrq.h>
283+
284+#include <asm/unaligned.h>
285+#include <asm/uaccess.h>
286+
287+/*
288+ * remove for next version -- for now, disable the mention option in the
289+ * SCSI section
290+ */
291+#if defined(CONFIG_SCSI_DEBUG_QUEUES)
292+#error "Don't compile with 'Enable extra checks in new queueing code' enabled"
293+#endif
294+
295+#define SCSI_IOCTL_SEND_COMMAND 1
296+
297+/*
298+ * 32 buffers of 2048 bytes
299+ */
300+#define PACKET_MAX_SIZE 32
301+
302+#define NEXT_BH(bh, nbh) \
303+ (((bh)->b_rsector + ((bh)->b_size >> 9)) == (nbh)->b_rsector)
304+
305+#define BH_IN_ORDER(b1, b2) \
306+ ((b1)->b_rsector < (b2)->b_rsector)
307+
308+#define CONTIG_BH(b1, b2) \
309+ ((b1)->b_data + (b1)->b_size == (b2)->b_data)
310+
311+#define ZONE(sector, pd) \
312+ (((sector) + ((pd)->offset)) - (((sector) + ((pd)->offset)) & (((pd)->settings.size - 1))))
313+
314+static int *pkt_sizes;
315+static int *pkt_blksize;
316+static int *pkt_readahead;
317+static struct pktcdvd_device *pkt_devs;
318+static struct proc_dir_entry *pkt_proc;
319+static DECLARE_WAIT_QUEUE_HEAD(pd_bh_wait);
320+
321+/*
322+ * a bit of a kludge, but we want to be able to pass both real and packet
323+ * dev and get the right one.
324+ */
325+static inline struct pktcdvd_device *pkt_find_dev(kdev_t dev)
326+{
327+ int i;
328+
329+ for (i = 0; i < MAX_WRITERS; i++)
330+ if (pkt_devs[i].dev == dev || pkt_devs[i].pkt_dev == dev)
331+ return &pkt_devs[i];
332+
333+ return NULL;
334+}
335+
336+/*
337+ * The following functions are the plugins to the ll_rw_blk
338+ * layer and decides whether a given request / buffer head can be
339+ * merged. We differ in a couple of ways from "normal" block
340+ * devices:
341+ *
342+ * - don't merge when the buffer / request crosses a packet block
343+ * boundary
344+ * - merge buffer head even though it can't be added directly to the
345+ * front or back of the list. this gives us better performance, since
346+ * what would otherwise require multiple requests can now be handled
347+ * in one (hole merging)
348+ * - at this point its just writes, reads have already been remapped
349+ *
350+ * The device original merge_ functions are stored in the packet device
351+ * queue (pd->q)
352+ *
353+ */
354+static inline int pkt_do_merge(request_queue_t *q, struct request *rq,
355+ struct buffer_head *bh, int max_segs,
356+ merge_request_fn *merge_fn,
357+ struct pktcdvd_device *pd)
358+{
359+ void *ptr = q->queuedata;
360+ int ret;
361+
362+ if (rq->cmd != WRITE)
363+ BUG();
364+
365+ if (ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd))
366+ return ELEVATOR_NO_MERGE;
367+
368+ /*
369+ * NOTE: this is done under the io_request_lock/queue_lock, hence
370+ * it is safe
371+ */
372+ q->queuedata = pd->cdrw.queuedata;
373+ ret = merge_fn(q, rq, bh, max_segs);
374+ q->queuedata = ptr;
375+ return ret;
376+}
377+
378+static int pkt_front_merge_fn(request_queue_t *q, struct request *rq,
379+ struct buffer_head *bh, int max_segs)
380+{
381+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
382+
383+ return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.front_merge_fn, pd);
384+}
385+
386+static int pkt_back_merge_fn(request_queue_t *q, struct request *rq,
387+ struct buffer_head *bh, int max_segs)
388+{
389+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
390+
391+ return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.back_merge_fn, pd);
392+}
393+
394+/*
395+ * rules similar to above
396+ */
397+static int pkt_merge_requests_fn(request_queue_t *q, struct request *rq,
398+ struct request *nxt, int max_segs)
399+{
400+ struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev);
401+ struct packet_cdrw *cdrw = &pd->cdrw;
402+ void *ptr = q->queuedata;
403+ int ret;
404+
405+ if (ZONE(rq->sector, pd) != ZONE(nxt->sector + nxt->nr_sectors - 1, pd))
406+ return 0;
407+
408+ q->queuedata = cdrw->queuedata;
409+ ret = cdrw->merge_requests_fn(q, rq, nxt, max_segs);
410+ q->queuedata = ptr;
411+ return ret;
412+}
413+
414+static int pkt_grow_bhlist(struct pktcdvd_device *pd, int count)
415+{
416+ struct packet_cdrw *cdrw = &pd->cdrw;
417+ struct buffer_head *bh;
418+ int i = 0;
419+
420+ VPRINTK("grow_bhlist: count=%d\n", count);
421+
422+ while (i < count) {
423+ bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL);
424+ if (!bh)
425+ break;
426+
427+ bh->b_data = kmalloc(CD_FRAMESIZE, GFP_KERNEL);
428+ if (!bh->b_data) {
429+ kmem_cache_free(bh_cachep, bh);
430+ break;
431+ }
432+ bh->b_page = virt_to_page(bh->b_data);
433+
434+ spin_lock_irq(&pd->lock);
435+ bh->b_pprev = &cdrw->bhlist;
436+ bh->b_next = cdrw->bhlist;
437+ cdrw->bhlist = bh;
438+ spin_unlock_irq(&pd->lock);
439+
440+ bh->b_size = CD_FRAMESIZE;
441+ bh->b_list = PKT_BUF_LIST;
442+ atomic_inc(&cdrw->free_bh);
443+ i++;
444+ }
445+
446+ return i;
447+}
448+
449+static int pkt_shrink_bhlist(struct pktcdvd_device *pd, int count)
450+{
451+ struct packet_cdrw *cdrw = &pd->cdrw;
452+ struct buffer_head *bh;
453+ int i = 0;
454+
455+ VPRINTK("shrink_bhlist: count=%d\n", count);
456+
457+ while ((i < count) && cdrw->bhlist) {
458+ spin_lock_irq(&pd->lock);
459+ bh = cdrw->bhlist;
460+ cdrw->bhlist = bh->b_next;
461+ spin_unlock_irq(&pd->lock);
462+ if (bh->b_list != PKT_BUF_LIST)
463+ BUG();
464+ kfree(bh->b_data);
465+ kmem_cache_free(bh_cachep, bh);
466+ atomic_dec(&cdrw->free_bh);
467+ i++;
468+ }
469+
470+ return i;
471+}
472+
473+static request_queue_t *pkt_get_queue(kdev_t dev)
474+{
475+ struct pktcdvd_device *pd = pkt_find_dev(dev);
476+ if (!pd)
477+ return NULL;
478+ return &pd->cdrw.r_queue;
479+}
480+
481+static void pkt_put_buffer(struct buffer_head *bh)
482+{
483+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_dev)];
484+ unsigned long flags;
485+
486+ if (bh->b_list != PKT_BUF_LIST)
487+ return;
488+
489+ bh->b_state = 0;
490+ bh->b_reqnext = NULL;
491+ bh->b_end_io = NULL;
492+
493+ spin_lock_irqsave(&pd->lock, flags);
494+ bh->b_next = pd->cdrw.bhlist;
495+ pd->cdrw.bhlist = bh;
496+ spin_unlock_irqrestore(&pd->lock, flags);
497+ atomic_inc(&pd->cdrw.free_bh);
498+}
499+
500+static inline void __pkt_inject_request(request_queue_t *q, struct request *rq)
501+{
502+ struct list_head *head = &q->queue_head;
503+
504+ VPRINTK("__pkt_inject_request: list_empty == %d, size=%d, cmd=%d\n",
505+ list_empty(&q->queue_head), rq->bh->b_size >> 9, rq->cmd);
506+
507+ if (list_empty(&q->queue_head))
508+ q->plug_device_fn(q, rq->rq_dev);
509+
510+ list_add_tail(&rq->queue, head);
511+}
512+
513+static void pkt_inject_request(request_queue_t *q, struct request *rq)
514+{
515+ spin_lock_irq(&io_request_lock);
516+ __pkt_inject_request(q, rq);
517+ spin_unlock_irq(&io_request_lock);
518+}
519+
520+static inline void __pkt_end_request(struct pktcdvd_device *pd)
521+{
522+ pd->rq = NULL;
523+ clear_bit(PACKET_RQ, &pd->flags);
524+ clear_bit(PACKET_BUSY, &pd->flags);
525+}
526+
527+/*
528+ * io_request_lock must be held and interrupts disabled
529+ */
530+static void pkt_end_request(struct pktcdvd_device *pd)
531+{
532+ unsigned long flags;
533+
534+ spin_lock_irqsave(&pd->lock, flags);
535+ __pkt_end_request(pd);
536+ spin_unlock_irqrestore(&pd->lock, flags);
537+}
538+
539+
540+static inline void __pkt_kill_request(struct request *rq, int uptodate, char *name)
541+{
542+ struct buffer_head *bh = rq->bh, *nbh;
543+
544+ while (bh) {
545+ nbh = bh->b_reqnext;
546+ bh->b_reqnext = NULL;
547+
548+ if (bh->b_end_io) {
549+ bh->b_end_io(bh, uptodate);
550+ } else {
551+ mark_buffer_clean(bh);
552+ mark_buffer_uptodate(bh, uptodate);
553+ unlock_buffer(bh);
554+ }
555+
556+ bh = nbh;
557+ }
558+
559+ end_that_request_last(rq);
560+}
561+
562+
563+void pkt_kill_request(struct pktcdvd_device *pd, struct request *rq, int ok)
564+{
565+ printk("pktcdvd: killing request\n");
566+ spin_lock_irq(&io_request_lock);
567+ __pkt_kill_request(rq, ok, pd->name);
568+ spin_unlock_irq(&io_request_lock);
569+ pkt_end_request(pd);
570+}
571+
572+static void pkt_end_io_read(struct buffer_head *bh, int uptodate)
573+{
574+ if (!uptodate) {
575+ /* Obviously not correct, but it avoids locking up the kernel */
576+ printk("Ignoring read error on sector:%ld\n", bh->b_rsector);
577+ uptodate = 1;
578+ }
579+
580+ mark_buffer_uptodate(bh, uptodate);
581+ unlock_buffer(bh);
582+}
583+
584+#if 0
585+static struct buffer_head *pkt_get_page_hash(unsigned long block)
586+{
587+ struct inode *inode = pkt->pkt_dentry->d_inode;
588+ struct address_space *mapping;
589+ struct buffer_head *bh;
590+ unsigned long index, b;
591+ struct page *page;
592+ int i;
593+
594+ if (!inode) {
595+ printk("pktcdvd: sorry, no inode\n");
596+ return NULL;
597+ }
598+ if (!inode->i_sb) {
599+ printk("pktcdvd: sorry no i_sb\n");
600+ return NULL;
601+ }
602+
603+ mapping = inode->i_mapping;
604+ if (!mapping) {
605+ printk("pktcdvd: sorry no mapping\n");
606+ return NULL;
607+ }
608+
609+ index = block >> (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
610+ page = grab_cache_page(mapping, index);
611+ if (!page) {
612+ printk("pktcdvd: sorry no page found\n");
613+ return NULL;
614+ }
615+
616+ if (!PageLocked(page))
617+ BUG();
618+
619+ if (!page->buffers)
620+ create_empty_buffer(page, inode->i_dev, inode->i_sb->s_blocksize);
621+
622+ i = 0;
623+ b = block;
624+ bh = page->buffers;
625+ do {
626+ if (!buffer_mapped(bh)) {
627+ if (get_block(inode, b, bh, 1))
628+ goto err;
629+
630+ if (buffer_new(bh))
631+ unmap_underlying_metadata(bh);
632+ }
633+ bh = bh->b_this_page;
634+ b++;
635+ } while (bh != page->buffers);
636+
637+ lock_buffer(bh);
638+ bh->b_end_io = pkt_end_io_write_page;
639+ atomic_inc(&bh->b_count);
640+ set_bit(BH_Uptodate, &bh->b_state);
641+ clear_bit(BH_Dirty, &bh->b_state);
642+ return bh;
643+err:
644+ ClearPageUptodate(page);
645+ UnlockPage(page);
646+ return NULL;
647+}
648+#endif
649+
650+/*
651+ * if the buffer is already in the buffer cache, grab it if we can lock
652+ * it down
653+ */
654+static struct buffer_head *pkt_get_hash(kdev_t dev, unsigned long block, int size)
655+{
656+ struct buffer_head *bh = NULL;
657+
658+ bh = get_hash_table(dev, block, size);
659+ if (bh) {
660+ if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
661+ brelse(bh);
662+ if (atomic_set_buffer_clean(bh))
663+ refile_buffer(bh);
664+ SetPageReferenced(bh->b_page);
665+ } else {
666+ brelse(bh);
667+ bh = NULL;
668+ }
669+ }
670+
671+ return bh;
672+}
673+
674+static inline struct buffer_head *__pkt_get_buffer(struct pktcdvd_device *pd,
675+ unsigned long sector)
676+{
677+ struct buffer_head *bh;
678+
679+ if (!atomic_read(&pd->cdrw.free_bh))
680+ BUG();
681+
682+ atomic_dec(&pd->cdrw.free_bh);
683+
684+ spin_lock_irq(&pd->lock);
685+ bh = pd->cdrw.bhlist;
686+ pd->cdrw.bhlist = bh->b_next;
687+ bh->b_next = NULL;
688+ spin_unlock_irq(&pd->lock);
689+
690+ bh->b_next_free = NULL;
691+ bh->b_prev_free = NULL;
692+ bh->b_this_page = NULL;
693+ bh->b_pprev = NULL;
694+ bh->b_reqnext = NULL;
695+
696+ init_waitqueue_head(&bh->b_wait);
697+ atomic_set(&bh->b_count, 1);
698+ bh->b_list = PKT_BUF_LIST;
699+ bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req);
700+ bh->b_dev = pd->pkt_dev;
701+
702+ return bh;
703+}
704+
705+static void pkt_end_io_write(struct buffer_head *, int);
706+
707+static struct buffer_head *pkt_get_buffer(struct pktcdvd_device *pd,
708+ unsigned long sector, int size)
709+{
710+ unsigned long block = sector / (size >> 9);
711+ struct buffer_head *bh;
712+
713+ VPRINTK("get_buffer: sector=%ld, size=%d\n", sector, size);
714+
715+ bh = pkt_get_hash(pd->pkt_dev, block, size);
716+ if (bh)
717+ pd->stats.bh_cache_hits += (size >> 9);
718+ else
719+ bh = __pkt_get_buffer(pd, sector);
720+
721+ blk_started_io(bh->b_size >> 9);
722+ bh->b_blocknr = block;
723+ bh->b_end_io = pkt_end_io_write;
724+ bh->b_rsector = sector;
725+ bh->b_rdev = pd->dev;
726+ return bh;
727+}
728+
729+/*
730+ * this rq is done -- io_request_lock must be held and interrupts disabled
731+ */
732+static void pkt_rq_end_io(struct pktcdvd_device *pd)
733+{
734+ unsigned long flags;
735+
736+ VPRINTK("pkt_rq_end_io: rq=%p, cmd=%d, q=%p\n", pd->rq, pd->rq->cmd, pd->rq->q);
737+
738+ spin_lock_irqsave(&pd->lock, flags);
739+
740+ /*
741+ * debug checks
742+ */
743+ if (!test_bit(PACKET_RQ, &pd->flags))
744+ printk("pktcdvd: rq_end_io: RQ not set\n");
745+ if (!test_bit(PACKET_BUSY, &pd->flags))
746+ printk("pktcdvd: rq_end_io: BUSY not set\n");
747+
748+ __pkt_end_request(pd);
749+ wake_up(&pd->wqueue);
750+ spin_unlock_irqrestore(&pd->lock, flags);
751+}
752+
753+static inline void pkt_mark_readonly(struct pktcdvd_device *pd, int on)
754+{
755+ if (on)
756+ set_bit(PACKET_READONLY, &pd->flags);
757+ else
758+ clear_bit(PACKET_READONLY, &pd->flags);
759+}
760+
761+static inline void __pkt_end_io_write(struct pktcdvd_device *pd,
762+ struct buffer_head *bh, int uptodate)
763+{
764+ VPRINTK("end_io_write: bh=%ld, uptodate=%d\n", bh->b_blocknr, uptodate);
765+
766+ /*
767+ * general Linux bug, noone should clear the BH_Uptodate flag for
768+ * a failed write...
769+ */
770+ if (uptodate)
771+ mark_buffer_uptodate(bh, uptodate);
772+ else {
773+ printk("pktcdvd: %s: WRITE error sector %lu\n", pd->name, bh->b_rsector);
774+#if 0
775+ set_bit(PACKET_RECOVERY, &pd->flags);
776+ wake_up(&pd->wqueue);
777+#endif
778+ }
779+
780+ pd->stats.bh_e++;
781+
782+ atomic_dec(&pd->wrqcnt);
783+ if (atomic_read(&pd->wrqcnt) == 0) {
784+ pkt_rq_end_io(pd);
785+ }
786+
787+ unlock_buffer(bh);
788+}
789+
790+/*
791+ * we use this as our default b_end_io handler, since we need to take
792+ * the entire request off the list if just one of the clusters fail.
793+ * later on this should also talk to UDF about relocating blocks -- for
794+ * now we just drop the rq entirely. when doing the relocating we must also
795+ * lock the bh down to ensure that we can easily reconstruct the write should
796+ * it fail.
797+ */
798+static void pkt_end_io_write(struct buffer_head *bh, int uptodate)
799+{
800+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
801+
802+ __pkt_end_io_write(pd, bh, uptodate);
803+ pkt_put_buffer(bh);
804+}
805+
806+static void pkt_end_io_write_stacked(struct buffer_head *bh, int uptodate)
807+{
808+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
809+ struct buffer_head *rbh = bh->b_private;
810+
811+ __pkt_end_io_write(pd, bh, uptodate);
812+ rbh->b_end_io(rbh, uptodate);
813+ kmem_cache_free(bh_cachep, bh);
814+ atomic_dec(&pd->bhcnt);
815+}
816+
817+static int pkt_init_rq(struct pktcdvd_device *pd, struct request *rq)
818+{
819+ struct buffer_head *bh;
820+ unsigned int cnt, nr_segments;
821+
822+ cnt = 0;
823+ nr_segments = 1;
824+ bh = rq->bh;
825+ while (bh) {
826+ struct buffer_head *nbh = bh->b_reqnext;
827+
828+ bh->b_rdev = pd->pkt_dev;
829+
830+ /*
831+ * the buffer better be uptodate, mapped, and locked!
832+ */
833+ if (!buffer_uptodate(bh)) {
834+ printk("%lu not uptodate\n", bh->b_rsector);
835+ /*
836+ * It is not really the pktcdvd drivers problem if
837+ * someone wants to write stale data.
838+ */
839+ }
840+
841+ if (!buffer_locked(bh) || !buffer_mapped(bh)) {
842+ printk("%lu, state %lx\n", bh->b_rsector, bh->b_state);
843+ BUG();
844+ }
845+
846+ if (nbh) {
847+ if (!CONTIG_BH(bh, nbh))
848+ nr_segments++;
849+
850+ /*
851+ * if this happens, do report
852+ */
853+ if ((bh->b_rsector + (bh->b_size >> 9))!=nbh->b_rsector) {
854+ printk("%lu (%p)-> %lu (%p) (%lu in all)\n",
855+ bh->b_rsector, bh, nbh->b_rsector, nbh,
856+ rq->nr_sectors);
857+ return 1;
858+ }
859+ }
860+
861+ cnt += bh->b_size >> 9;
862+ bh = nbh;
863+ }
864+
865+ rq->nr_segments = rq->nr_hw_segments = nr_segments;
866+
867+ if (cnt != rq->nr_sectors) {
868+ printk("botched request %u (%lu)\n", cnt, rq->nr_sectors);
869+ return 1;
870+ }
871+
872+ return 0;
873+}
874+
875+/*
876+ * really crude stats for now...
877+ */
878+static void pkt_account_rq(struct pktcdvd_device *pd, int read, int written,
879+ int bs)
880+{
881+ pd->stats.bh_s += (written / bs);
882+ pd->stats.secs_w += written;
883+ pd->stats.secs_r += read;
884+}
885+
886+/*
887+ * does request span two packets? 0 == yes, 1 == no
888+ */
889+static int pkt_one_zone(struct pktcdvd_device *pd, struct request *rq)
890+{
891+ if (!pd->settings.size)
892+ return 0;
893+
894+ if (!(rq->cmd & WRITE))
895+ return 1;
896+
897+ return ZONE(rq->sector, pd) == ZONE(rq->sector + rq->nr_sectors -1, pd);
898+}
899+
900+#if defined(CONFIG_CDROM_PKTCDVD_BEMPTY)
901+static void pkt_init_buffer(struct buffer_head *bh)
902+{
903+ set_bit(BH_Uptodate, &bh->b_state);
904+ set_bit(BH_Dirty, &bh->b_state);
905+ memset(bh->b_data, 0, bh->b_size);
906+}
907+
908+static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
909+{
910+ struct super_block *sb;
911+ struct super_operations *sop;
912+ unsigned long packet;
913+ int ret;
914+
915+ ret = 0;
916+ if ((sb = get_super(pd->pkt_dev)) == NULL)
917+ goto out;
918+ if ((sop = sb->s_op) == NULL)
919+ goto out;
920+ if (sop->block_empty == NULL)
921+ goto out;
922+
923+ packet = 0;
924+ if (sop->block_empty(sb, bh->b_blocknr, &packet)) {
925+ pkt_init_buffer(pd, bh);
926+ ret = 1;
927+ }
928+
929+out:
930+ return ret;
931+}
932+
933+#else /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */
934+
935+static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
936+{
937+ return 0;
938+}
939+
940+#endif /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */
941+
942+static int pkt_flush_cache(struct pktcdvd_device *pd);
943+
944+static void pkt_flush_writes(struct pktcdvd_device *pd)
945+{
946+ if (pd->unflushed_writes) {
947+ pd->unflushed_writes = 0;
948+ pkt_flush_cache(pd);
949+ }
950+}
951+
952+/*
953+ * basically just does a ll_rw_block for the bhs given to use, but we
954+ * don't return until we have them.
955+ */
956+static void pkt_read_bh(struct pktcdvd_device *pd, struct buffer_head *bh)
957+{
958+ /*
959+ * UDF says it's empty, woohoo
960+ */
961+ if (pkt_sb_empty(pd, bh))
962+ return;
963+
964+ down(&pd->cache_sync_mutex);
965+ pkt_flush_writes(pd);
966+ generic_make_request(READ, bh);
967+ up(&pd->cache_sync_mutex);
968+}
969+
970+static int pkt_index_bhs(struct buffer_head **bhs)
971+{
972+ struct buffer_head *bh;
973+ int index;
974+ int error = 0;
975+
976+ /*
977+ * now finish pending reads and connect the chain of buffers
978+ */
979+ index = 0;
980+ while (index < PACKET_MAX_SIZE) {
981+ bh = bhs[index];
982+
983+ /*
984+ * pin down private buffers (ie, force I/O to complete)
985+ */
986+ if (bh->b_end_io == pkt_end_io_read) {
987+ lock_buffer(bh);
988+ bh->b_end_io = pkt_end_io_write;
989+ }
990+
991+ if (!buffer_locked(bh))
992+ BUG();
993+
994+ if (!buffer_uptodate(bh)) {
995+ printk("pktcdvd: read failure (%s, sec %lu)\n",
996+ kdevname(bh->b_rdev), bh->b_rsector);
997+ error = 1;
998+ }
999+
1000+ /*
1001+ * attach previous
1002+ */
1003+ if (index) {
1004+ struct buffer_head *pbh = bhs[index - 1];
1005+
1006+ if ((pbh->b_rsector + (pbh->b_size >> 9)) != bh->b_rsector) {
1007+ printk("%lu -> %lu\n", pbh->b_rsector, bh->b_rsector);
1008+ index = 0;
1009+ break;
1010+ }
1011+ pbh->b_reqnext = bh;
1012+ }
1013+ index++;
1014+ }
1015+
1016+ if (error)
1017+ return 0;
1018+
1019+ if (index) {
1020+ index--;
1021+ bhs[index]->b_reqnext = NULL;
1022+ }
1023+
1024+ return index;
1025+}
1026+
1027+/*
1028+ * fill in the holes of a request
1029+ *
1030+ * Returns: 0, keep 'em coming -- 1, stop queueing
1031+ */
1032+static int pkt_gather_data(struct pktcdvd_device *pd, struct request *rq)
1033+{
1034+ unsigned long start_s, end_s, sector;
1035+ struct buffer_head *bh;
1036+ unsigned int sectors, index;
1037+ struct buffer_head *bhs[PACKET_MAX_SIZE];
1038+
1039+ memset(bhs, 0, sizeof(bhs));
1040+
1041+ /*
1042+ * all calculations are done with 512 byte sectors
1043+ */
1044+ sectors = pd->settings.size - rq->nr_sectors;
1045+ start_s = rq->sector - (rq->sector & (pd->settings.size - 1));
1046+ end_s = start_s + pd->settings.size;
1047+
1048+ VPRINTK("pkt_gather_data: cmd=%d\n", rq->cmd);
1049+ VPRINTK("need %d sectors for %s\n", sectors, kdevname(pd->dev));
1050+ VPRINTK("from %lu to %lu ", start_s, end_s);
1051+ VPRINTK("(%lu - %lu)\n", rq->bh->b_rsector, rq->bhtail->b_rsector +
1052+ rq->current_nr_sectors);
1053+
1054+ /*
1055+ * first fill-out map of the buffers we have
1056+ */
1057+ bh = rq->bh;
1058+ while (bh) {
1059+ index = (bh->b_rsector & (pd->settings.size - 1)) / (bh->b_size >> 9);
1060+
1061+ bhs[index] = bh;
1062+ bh = bh->b_reqnext;
1063+
1064+ /*
1065+ * make sure to detach from list!
1066+ */
1067+ bhs[index]->b_reqnext = NULL;
1068+ }
1069+
1070+ /*
1071+ * now get buffers for missing blocks, and schedule reads for them
1072+ */
1073+ for (index = 0, sector = start_s; sector < end_s; index++) {
1074+ if (bhs[index]) {
1075+ bh = bhs[index];
1076+ goto next;
1077+ }
1078+
1079+ bh = pkt_get_buffer(pd, sector, CD_FRAMESIZE);
1080+
1081+ bhs[index] = bh;
1082+ rq->nr_sectors += bh->b_size >> 9;
1083+ rq->nr_segments++;
1084+
1085+ if (!buffer_uptodate(bh)) {
1086+ bh->b_end_io = pkt_end_io_read;
1087+ pkt_read_bh(pd, bh);
1088+ }
1089+
1090+ next:
1091+ sector += bh->b_size >> 9;
1092+ }
1093+
1094+ index = pkt_index_bhs(bhs);
1095+#if 0
1096+ if (!index)
1097+ goto kill_it;
1098+#endif
1099+
1100+ rq->bh = bhs[0];
1101+ rq->bhtail = bhs[index];
1102+ rq->buffer = rq->bh->b_data;
1103+ rq->current_nr_sectors = rq->bh->b_size >> 9;
1104+ rq->hard_nr_sectors = rq->nr_sectors;
1105+ rq->sector = rq->hard_sector = start_s;
1106+
1107+ VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector);
1108+ if (pkt_init_rq(pd, rq)) {
1109+ for (index = 0; index < PACKET_MAX_SIZE; index++) {
1110+ bh = bhs[index];
1111+ printk("[%d] %lu %d (%p -> %p)\n", index, bh->b_rsector,
1112+ bh->b_size, bh, bh->b_reqnext);
1113+ }
1114+ goto kill_it;
1115+ }
1116+
1117+ pkt_account_rq(pd, sectors, rq->nr_sectors, rq->current_nr_sectors);
1118+
1119+ /*
1120+ * sanity check
1121+ */
1122+ if (rq->nr_sectors != pd->settings.size) {
1123+ printk("pktcdvd: request mismatch %lu (should be %u)\n",
1124+ rq->nr_sectors, pd->settings.size);
1125+ BUG();
1126+ }
1127+
1128+ return 0;
1129+
1130+ /*
1131+ * for now, just kill entire request and hope for the best...
1132+ */
1133+kill_it:
1134+ for (index = 0; index < PACKET_MAX_SIZE; index++) {
1135+ bh = bhs[index];
1136+ buffer_IO_error(bh);
1137+ if (bh->b_list == PKT_BUF_LIST)
1138+ pkt_put_buffer(bh);
1139+ }
1140+ end_that_request_last(pd->rq);
1141+ return 1;
1142+}
1143+
1144+/*
1145+ * Returns: 1, keep 'em coming -- 0, wait for wakeup
1146+ */
1147+static int pkt_do_request(struct pktcdvd_device *pd, struct request *rq)
1148+{
1149+ VPRINTK("do_request: bh=%ld, nr_sectors=%ld, size=%d, cmd=%d\n", rq->bh->b_blocknr, rq->nr_sectors, pd->settings.size, rq->cmd);
1150+
1151+ /*
1152+ * perfect match. the merge_* functions have already made sure that
1153+ * a request doesn't cross a packet boundary, so if the sector
1154+ * count matches it's good.
1155+ */
1156+ if (rq->nr_sectors == pd->settings.size) {
1157+ if (pkt_init_rq(pd, rq)) {
1158+ pkt_kill_request(pd, rq, 0);
1159+ return 1;
1160+ }
1161+
1162+ pkt_account_rq(pd, 0, rq->nr_sectors, rq->current_nr_sectors);
1163+ return 0;
1164+ }
1165+
1166+ /*
1167+ * paranoia...
1168+ */
1169+ if (rq->nr_sectors > pd->settings.size) {
1170+ printk("pktcdvd: request too big! BUG! %lu\n", rq->nr_sectors);
1171+ BUG();
1172+ }
1173+
1174+ return pkt_gather_data(pd, rq);
1175+}
1176+
1177+/*
1178+ * recover a failed write, query for relocation if possible
1179+ */
1180+static int pkt_start_recovery(struct pktcdvd_device *pd, struct request *rq)
1181+{
1182+ struct super_block *sb = get_super(pd->pkt_dev);
1183+ struct buffer_head *bhs[PACKET_MAX_SIZE], *bh, *obh;
1184+ unsigned long old_block, new_block, sector;
1185+ int i, sectors;
1186+
1187+ if (!sb || !sb->s_op || !sb->s_op->relocate_blocks)
1188+ goto fail;
1189+
1190+ old_block = (rq->sector & ~(pd->settings.size - 1)) / (rq->bh->b_size >> 9);
1191+ if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
1192+ goto fail;
1193+
1194+ memset(bhs, 0, sizeof(bhs));
1195+ bh = rq->bh;
1196+ while (bh) {
1197+ i = (bh->b_rsector & (pd->settings.size - 1)) / (bh->b_size >> 9);
1198+
1199+ bhs[i] = bh;
1200+ bh = bh->b_reqnext;
1201+ bhs[i]->b_reqnext = NULL;
1202+ }
1203+
1204+ sectors = 0;
1205+ sector = new_block * (rq->bh->b_size >> 9);
1206+ for (i = 0; i < PACKET_MAX_SIZE; i++) {
1207+ bh = bhs[i];
1208+
1209+ /*
1210+ * three cases -->
1211+ * 1) bh is not there at all
1212+ * 2) bh is there and not ours, get a new one and
1213+ * invalidate this block for the future
1214+ * 3) bh is there and ours, just change the sector
1215+ */
1216+ if (!bh) {
1217+ obh = pkt_get_hash(pd->pkt_dev, new_block,CD_FRAMESIZE);
1218+ bh = __pkt_get_buffer(pd, sector);
1219+ if (obh) {
1220+ if (buffer_uptodate(obh)) {
1221+ memcpy(bh->b_data, obh->b_data, obh->b_size);
1222+ set_bit(BH_Uptodate, &bh->b_state);
1223+ }
1224+ unlock_buffer(obh);
1225+ bforget(obh);
1226+ }
1227+ bhs[i] = bh;
1228+ } else if (bh->b_list != PKT_BUF_LIST) {
1229+ bhs[i] = pkt_get_buffer(pd, sector, CD_FRAMESIZE);
1230+ memcpy(bhs[i]->b_data, bh->b_data, CD_FRAMESIZE);
1231+ unlock_buffer(bh);
1232+ bforget(bh);
1233+ bh = bhs[i];
1234+ set_bit(BH_Uptodate, &bh->b_state);
1235+ } else {
1236+ bh->b_rsector = sector;
1237+ bh->b_blocknr = new_block;
1238+ }
1239+
1240+ sector += (bh->b_size >> 9);
1241+ new_block++;
1242+ sectors += (bh->b_size >> 9);
1243+ }
1244+
1245+ i = pkt_index_bhs(bhs);
1246+ if (!i)
1247+ goto fail;
1248+
1249+ rq->bh = bhs[0];
1250+ rq->bhtail = bhs[i];
1251+ rq->buffer = rq->bh->b_data;
1252+ rq->current_nr_sectors = rq->bh->b_size >> 9;
1253+ rq->hard_nr_sectors = rq->nr_sectors = sectors;
1254+ rq->sector = rq->hard_sector = rq->bh->b_rsector;
1255+ rq->errors = 0;
1256+ clear_bit(PACKET_RECOVERY, &pd->flags);
1257+ clear_bit(PACKET_BUSY, &pd->flags);
1258+ return 0;
1259+
1260+fail:
1261+ printk("pktcdvd: rq recovery not possible\n");
1262+ pkt_kill_request(pd, rq, 0);
1263+ clear_bit(PACKET_RECOVERY, &pd->flags);
1264+ return 1;
1265+}
1266+
1267+/*
1268+ * handle the requests that got queued for this writer
1269+ *
1270+ * returns 0 for busy (already doing something), or 1 for queue new one
1271+ *
1272+ */
1273+static int pkt_handle_queue(struct pktcdvd_device *pd, request_queue_t *q)
1274+{
1275+ struct request *rq;
1276+ int ret;
1277+
1278+ VPRINTK("handle_queue\n");
1279+
1280+ /*
1281+ * nothing for us to do
1282+ */
1283+ if (!test_bit(PACKET_RQ, &pd->flags))
1284+ return 1;
1285+
1286+ spin_lock_irq(&pd->lock);
1287+ rq = pd->rq;
1288+ spin_unlock_irq(&pd->lock);
1289+
1290+ if (test_bit(PACKET_RECOVERY, &pd->flags))
1291+ if (pkt_start_recovery(pd, rq))
1292+ return 1;
1293+
1294+ /*
1295+ * already being processed
1296+ */
1297+ if (test_and_set_bit(PACKET_BUSY, &pd->flags))
1298+ return 0;
1299+
1300+ /*
1301+ * nothing to do
1302+ */
1303+ ret = 1;
1304+ if (rq == NULL) {
1305+ printk("handle_queue: pd BUSY+RQ, but no rq\n");
1306+ clear_bit(PACKET_RQ, &pd->flags);
1307+ goto out;
1308+ }
1309+
1310+ /*
1311+ * reads are shipped directly to cd-rom, so they should not
1312+ * pop up here
1313+ */
1314+ if (rq->cmd == READ)
1315+ BUG();
1316+
1317+ if ((rq->current_nr_sectors << 9) != CD_FRAMESIZE) {
1318+ pkt_kill_request(pd, rq, 0);
1319+ goto out;
1320+ }
1321+
1322+ if (!pkt_do_request(pd, rq)) {
1323+ atomic_add(PACKET_MAX_SIZE, &pd->wrqcnt);
1324+ down(&pd->cache_sync_mutex);
1325+ pkt_inject_request(q, rq);
1326+ pd->unflushed_writes = 1;
1327+ up(&pd->cache_sync_mutex);
1328+ return 0;
1329+ }
1330+
1331+out:
1332+ clear_bit(PACKET_BUSY, &pd->flags);
1333+ return ret;
1334+}
1335+
1336+/*
1337+ * kpacketd is woken up, when writes have been queued for one of our
1338+ * registered devices
1339+ */
1340+static int kcdrwd(void *foobar)
1341+{
1342+ struct pktcdvd_device *pd = foobar;
1343+ request_queue_t *q, *my_queue;
1344+
1345+ /*
1346+ * exit_files, mm (move to lazy-tlb, so context switches are come
1347+ * extremely cheap) etc
1348+ */
1349+ daemonize();
1350+
1351+ current->policy = SCHED_OTHER;
1352+ current->nice = -20;
1353+ sprintf(current->comm, pd->name);
1354+
1355+ spin_lock_irq(&current->sigmask_lock);
1356+ siginitsetinv(&current->blocked, sigmask(SIGKILL));
1357+ flush_signals(current);
1358+ spin_unlock_irq(&current->sigmask_lock);
1359+
1360+ q = blk_get_queue(pd->dev);
1361+ my_queue = blk_get_queue(pd->pkt_dev);
1362+
1363+ for (;;) {
1364+ DECLARE_WAITQUEUE(wait, current);
1365+
1366+ add_wait_queue(&pd->wqueue, &wait);
1367+
1368+ /*
1369+ * if PACKET_BUSY is cleared, we can queue
1370+ * another request. otherwise we need to unplug the
1371+ * cd-rom queue and wait for buffers to be flushed
1372+ * (which will then wake us up again when done).
1373+ */
1374+ do {
1375+ pkt_handle_queue(pd, q);
1376+
1377+ set_current_state(TASK_INTERRUPTIBLE);
1378+
1379+ if (test_bit(PACKET_BUSY, &pd->flags))
1380+ break;
1381+
1382+ spin_lock_irq(&io_request_lock);
1383+ if (list_empty(&my_queue->queue_head)) {
1384+ spin_unlock_irq(&io_request_lock);
1385+ break;
1386+ }
1387+ set_current_state(TASK_RUNNING);
1388+
1389+ my_queue->request_fn(my_queue);
1390+ spin_unlock_irq(&io_request_lock);
1391+ } while (1);
1392+
1393+ generic_unplug_device(q);
1394+
1395+ schedule();
1396+ remove_wait_queue(&pd->wqueue, &wait);
1397+
1398+ /*
1399+ * got SIGKILL
1400+ */
1401+ if (signal_pending(current))
1402+ break;
1403+
1404+ }
1405+
1406+ complete_and_exit(&pd->cdrw.thr_compl, 0);
1407+ return 0;
1408+}
1409+
1410+static void pkt_attempt_remerge(struct pktcdvd_device *pd, request_queue_t *q,
1411+ struct request *rq)
1412+{
1413+ struct request *nxt;
1414+
1415+ while (!list_empty(&q->queue_head)) {
1416+ if (rq->nr_sectors == pd->settings.size)
1417+ break;
1418+
1419+ nxt = blkdev_entry_next_request(&q->queue_head);
1420+
1421+ if (ZONE(rq->sector, pd) != ZONE(nxt->sector, pd))
1422+ break;
1423+ else if (rq->sector + rq->nr_sectors > nxt->sector)
1424+ break;
1425+
1426+ rq->nr_sectors = rq->hard_nr_sectors += nxt->nr_sectors;
1427+ rq->bhtail->b_reqnext = nxt->bh;
1428+ rq->bhtail = nxt->bhtail;
1429+ list_del(&nxt->queue);
1430+ blkdev_release_request(nxt);
1431+ }
1432+}
1433+
1434+/*
1435+ * our request function.
1436+ *
1437+ * - reads are just tossed directly to the device, we don't care.
1438+ * - writes, regardless of size, are added as the current pd rq and
1439+ * kcdrwd is woken up to handle it. kcdrwd will also make sure to
1440+ * reinvoke this request handler, once the given request has been
1441+ * processed.
1442+ *
1443+ * Locks: io_request_lock held
1444+ *
1445+ * Notes: all writers have their own queue, so all requests are for the
1446+ * the same device
1447+ */
1448+static void pkt_request(request_queue_t *q)
1449+{
1450+ struct pktcdvd_device *pd = (struct pktcdvd_device *) q->queuedata;
1451+ unsigned long flags;
1452+
1453+ if (list_empty(&q->queue_head))
1454+ return;
1455+
1456+ while (!list_empty(&q->queue_head)) {
1457+ struct request *rq = blkdev_entry_next_request(&q->queue_head);
1458+
1459+ VPRINTK("pkt_request: cmd=%d, rq=%p, rq->sector=%ld, rq->nr_sectors=%ld\n", rq->cmd, rq, rq->sector, rq->nr_sectors);
1460+
1461+ blkdev_dequeue_request(rq);
1462+
1463+ rq->rq_dev = pd->dev;
1464+
1465+ if (rq->cmd == READ)
1466+ BUG();
1467+
1468+ if (test_bit(PACKET_RECOVERY, &pd->flags))
1469+ break;
1470+
1471+ /*
1472+ * paranoia, shouldn't trigger...
1473+ */
1474+ if (!pkt_one_zone(pd, rq)) {
1475+ printk("rq->cmd=%d, rq->sector=%ld, rq->nr_sectors=%ld\n",
1476+ rq->cmd, rq->sector, rq->nr_sectors);
1477+ BUG();
1478+ }
1479+
1480+ pkt_attempt_remerge(pd, q, rq);
1481+
1482+ spin_lock_irqsave(&pd->lock, flags);
1483+
1484+ /*
1485+ * already gathering data for another read. the
1486+ * rfn will be reinvoked once that is done
1487+ */
1488+ if (test_and_set_bit(PACKET_RQ, &pd->flags)) {
1489+ list_add(&rq->queue, &q->queue_head);
1490+ spin_unlock_irqrestore(&pd->lock, flags);
1491+ break;
1492+ }
1493+
1494+ if (pd->rq)
1495+ BUG();
1496+
1497+ pd->rq = rq;
1498+ spin_unlock_irqrestore(&pd->lock, flags);
1499+ break;
1500+ }
1501+ VPRINTK("wake up wait queue\n");
1502+ wake_up(&pd->wqueue);
1503+}
1504+
1505+static void pkt_print_settings(struct pktcdvd_device *pd)
1506+{
1507+ printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1508+ printk("%u blocks, ", pd->settings.size >> 2);
1509+ printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1510+}
1511+
1512+/*
1513+ * A generic sense dump / resolve mechanism should be implemented across
1514+ * all ATAPI + SCSI devices.
1515+ */
1516+static void pkt_dump_sense(struct request_sense *sense)
1517+{
1518+ char *info[9] = { "No sense", "Recovered error", "Not ready",
1519+ "Medium error", "Hardware error", "Illegal request",
1520+ "Unit attention", "Data protect", "Blank check" };
1521+
1522+ if (sense == NULL)
1523+ return;
1524+
1525+ if (sense->sense_key > 8) {
1526+ printk("pktcdvd: sense invalid\n");
1527+ return;
1528+ }
1529+
1530+ printk("pktcdvd: sense category %s ", info[sense->sense_key]);
1531+ printk("asc(%02x), ascq(%02x)\n", sense->asc, sense->ascq);
1532+}
1533+
1534+/*
1535+ * write mode select package based on pd->settings
1536+ */
1537+static int pkt_set_write_settings(struct pktcdvd_device *pd)
1538+{
1539+ struct cdrom_device_info *cdi = pd->cdi;
1540+ struct cdrom_generic_command cgc;
1541+ write_param_page *wp;
1542+ char buffer[128];
1543+ int ret, size;
1544+
1545+ memset(buffer, 0, sizeof(buffer));
1546+ init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1547+ if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
1548+ return ret;
1549+
1550+ size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1551+ pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1552+ if (size > sizeof(buffer))
1553+ size = sizeof(buffer);
1554+
1555+ /*
1556+ * now get it all
1557+ */
1558+ init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1559+ if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
1560+ return ret;
1561+
1562+ /*
1563+ * write page is offset header + block descriptor length
1564+ */
1565+ wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1566+
1567+ wp->fp = pd->settings.fp;
1568+ wp->track_mode = pd->settings.track_mode;
1569+ wp->write_type = pd->settings.write_type;
1570+ wp->data_block_type = pd->settings.block_mode;
1571+
1572+ wp->multi_session = 0;
1573+
1574+#ifdef PACKET_USE_LS
1575+ wp->link_size = 7;
1576+ wp->ls_v = 1;
1577+#endif
1578+
1579+ if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1580+ wp->session_format = 0;
1581+ wp->subhdr2 = 0x20;
1582+ } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1583+ wp->session_format = 0x20;
1584+ wp->subhdr2 = 8;
1585+#if 0
1586+ wp->mcn[0] = 0x80;
1587+ memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1588+#endif
1589+ } else {
1590+ /*
1591+ * paranoia
1592+ */
1593+ printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
1594+ return 1;
1595+ }
1596+ wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1597+
1598+ cgc.buflen = cgc.cmd[8] = size;
1599+ if ((ret = cdrom_mode_select(cdi, &cgc))) {
1600+ pkt_dump_sense(cgc.sense);
1601+ return ret;
1602+ }
1603+
1604+ pkt_print_settings(pd);
1605+ return 0;
1606+}
1607+
1608+/*
1609+ * 0 -- we can write to this track, 1 -- we can't
1610+ */
1611+static int pkt_good_track(track_information *ti)
1612+{
1613+ /*
1614+ * only good for CD-RW at the moment, not DVD-RW
1615+ */
1616+
1617+ /*
1618+ * FIXME: only for FP
1619+ */
1620+ if (ti->fp == 0)
1621+ return 0;
1622+
1623+ /*
1624+ * "good" settings as per Mt Fuji.
1625+ */
1626+ if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
1627+ return 0;
1628+
1629+ if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
1630+ return 0;
1631+
1632+ if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
1633+ return 0;
1634+
1635+ printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1636+ return 1;
1637+}
1638+
1639+/*
1640+ * 0 -- we can write to this disc, 1 -- we can't
1641+ */
1642+static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
1643+{
1644+ /*
1645+ * for disc type 0xff we should probably reserve a new track.
1646+ * but i'm not sure, should we leave this to user apps? probably.
1647+ */
1648+ if (di->disc_type == 0xff) {
1649+ printk("pktcdvd: Unknown disc. No track?\n");
1650+ return 1;
1651+ }
1652+
1653+ if (di->disc_type != 0x20 && di->disc_type != 0) {
1654+ printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
1655+ return 1;
1656+ }
1657+
1658+ if (di->erasable == 0) {
1659+ printk("pktcdvd: Disc not erasable\n");
1660+ return 1;
1661+ }
1662+
1663+ if (pd->track_status == PACKET_SESSION_RESERVED) {
1664+ printk("pktcdvd: Can't write to last track (reserved)\n");
1665+ return 1;
1666+ }
1667+
1668+ return 0;
1669+}
1670+
1671+static int pkt_probe_settings(struct pktcdvd_device *pd)
1672+{
1673+ disc_information di;
1674+ track_information ti;
1675+ int ret, track;
1676+
1677+ memset(&di, 0, sizeof(disc_information));
1678+ memset(&ti, 0, sizeof(track_information));
1679+
1680+ if ((ret = cdrom_get_disc_info(pd->dev, &di))) {
1681+ printk("failed get_disc\n");
1682+ return ret;
1683+ }
1684+
1685+ pd->disc_status = di.disc_status;
1686+ pd->track_status = di.border_status;
1687+
1688+ if (pkt_good_disc(pd, &di))
1689+ return -ENXIO;
1690+
1691+ printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
1692+ pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1693+
1694+ track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1695+ if ((ret = cdrom_get_track_info(pd->dev, track, 1, &ti))) {
1696+ printk("pktcdvd: failed get_track\n");
1697+ return ret;
1698+ }
1699+
1700+ if (pkt_good_track(&ti)) {
1701+ printk("pktcdvd: can't write to this track\n");
1702+ return -ENXIO;
1703+ }
1704+
1705+ /*
1706+ * we keep packet size in 512 byte units, makes it easier to
1707+ * deal with request calculations.
1708+ */
1709+ pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1710+ if (pd->settings.size == 0) {
1711+ printk("pktcdvd: detected zero packet size!\n");
1712+ pd->settings.size = 128;
1713+ }
1714+ pd->settings.fp = ti.fp;
1715+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1716+
1717+ if (ti.nwa_v) {
1718+ pd->nwa = be32_to_cpu(ti.next_writable);
1719+ set_bit(PACKET_NWA_VALID, &pd->flags);
1720+ }
1721+
1722+ /*
1723+ * in theory we could use lra on -RW media as well and just zero
1724+ * blocks that haven't been written yet, but in practice that
1725+ * is just a no-go. we'll use that for -R, naturally.
1726+ */
1727+ if (ti.lra_v) {
1728+ pd->lra = be32_to_cpu(ti.last_rec_address);
1729+ set_bit(PACKET_LRA_VALID, &pd->flags);
1730+ } else {
1731+ pd->lra = 0xffffffff;
1732+ set_bit(PACKET_LRA_VALID, &pd->flags);
1733+ }
1734+
1735+ /*
1736+ * fine for now
1737+ */
1738+ pd->settings.link_loss = 7;
1739+ pd->settings.write_type = 0; /* packet */
1740+ pd->settings.track_mode = ti.track_mode;
1741+
1742+ /*
1743+ * mode1 or mode2 disc
1744+ */
1745+ switch (ti.data_mode) {
1746+ case PACKET_MODE1:
1747+ pd->settings.block_mode = PACKET_BLOCK_MODE1;
1748+ break;
1749+ case PACKET_MODE2:
1750+ pd->settings.block_mode = PACKET_BLOCK_MODE2;
1751+ break;
1752+ default:
1753+ printk("pktcdvd: unknown data mode\n");
1754+ return 1;
1755+ }
1756+ return 0;
1757+}
1758+
1759+/*
1760+ * enable/disable write caching on drive
1761+ */
1762+static int pkt_write_caching(struct pktcdvd_device *pd, int set)
1763+{
1764+ struct cdrom_generic_command cgc;
1765+ unsigned char buf[64];
1766+ int ret;
1767+
1768+ memset(buf, 0, sizeof(buf));
1769+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1770+ cgc.buflen = pd->mode_offset + 12;
1771+
1772+ /*
1773+ * caching mode page might not be there, so quiet this command
1774+ */
1775+ cgc.quiet = 1;
1776+
1777+ if ((ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_WCACHING_PAGE, 0)))
1778+ return ret;
1779+
1780+ buf[pd->mode_offset + 10] |= (!!set << 2);
1781+
1782+ cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1783+ ret = cdrom_mode_select(pd->cdi, &cgc);
1784+ if (ret)
1785+ printk("pktcdvd: write caching control failed\n");
1786+ else if (!ret && set)
1787+ printk("pktcdvd: enabled write caching on %s\n", pd->name);
1788+ return ret;
1789+}
1790+
1791+/*
1792+ * flush the drive cache to media
1793+ */
1794+static int pkt_flush_cache(struct pktcdvd_device *pd)
1795+{
1796+ struct cdrom_generic_command cgc;
1797+
1798+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1799+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
1800+ cgc.quiet = 1;
1801+ cgc.timeout = 60*HZ;
1802+
1803+ /*
1804+ * the IMMED bit -- we default to not setting it, although that
1805+ * would allow a much faster close, this is safer
1806+ */
1807+#if 0
1808+ cgc.cmd[1] = 1 << 1;
1809+#endif
1810+ return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
1811+}
1812+
1813+/*
1814+ * Returns drive current write speed
1815+ */
1816+static int pkt_get_speed(struct pktcdvd_device *pd)
1817+{
1818+ struct cdrom_generic_command cgc;
1819+ unsigned char buf[64];
1820+ int ret, offset;
1821+
1822+ memset(buf, 0, sizeof(buf));
1823+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1824+
1825+ ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1826+ if (ret) {
1827+ cgc.buflen = pd->mode_offset + buf[pd->mode_offset + 9] + 2 +
1828+ sizeof(struct mode_page_header);
1829+ ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1830+ if (ret)
1831+ return ret;
1832+ }
1833+
1834+ offset = pd->mode_offset + 26;
1835+ pd->speed = ((buf[offset] << 8) | buf[offset + 1]) / 0xb0;
1836+ return 0;
1837+}
1838+
1839+/*
1840+ * speed is given as the normal factor, e.g. 4 for 4x
1841+ */
1842+static int pkt_set_speed(struct pktcdvd_device *pd, unsigned speed)
1843+{
1844+ struct cdrom_generic_command cgc;
1845+ unsigned read_speed;
1846+
1847+ /*
1848+ * we set read and write time so that read spindle speed is one and
1849+ * a half as fast as write. although a drive can typically read much
1850+ * faster than write, this minimizes the spin up/down when we write
1851+ * and gather data. maybe 1/1 factor is faster, needs a bit of testing.
1852+ */
1853+ speed = speed * 0xb0;
1854+ read_speed = (speed * 3) >> 1;
1855+ read_speed = min_t(unsigned, read_speed, 0xffff);
1856+
1857+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1858+ cgc.cmd[0] = 0xbb;
1859+ cgc.cmd[2] = (read_speed >> 8) & 0xff;
1860+ cgc.cmd[3] = read_speed & 0xff;
1861+ cgc.cmd[4] = (speed >> 8) & 0xff;
1862+ cgc.cmd[5] = speed & 0xff;
1863+
1864+ return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
1865+}
1866+
1867+/*
1868+ * Give me full power, Captain
1869+ */
1870+static int pkt_adjust_speed(struct pktcdvd_device *pd, int speed)
1871+{
1872+ disc_information dummy;
1873+ int ret;
1874+
1875+ /*
1876+ * FIXME: do proper unified cap page, also, this isn't proper
1877+ * Mt Fuji, but I think we can safely assume all drives support
1878+ * it. A hell of a lot more than support the GET_PERFORMANCE
1879+ * command (besides, we also use the old set speed command,
1880+ * not the streaming feature).
1881+ */
1882+ if ((ret = pkt_set_speed(pd, speed)))
1883+ return ret;
1884+
1885+ /*
1886+ * just do something with the disc -- next read will contain the
1887+ * maximum speed with this media
1888+ */
1889+ if ((ret = cdrom_get_disc_info(pd->dev, &dummy)))
1890+ return ret;
1891+
1892+ if ((ret = pkt_get_speed(pd))) {
1893+ printk("pktcdvd: failed get speed\n");
1894+ return ret;
1895+ }
1896+
1897+ DPRINTK("pktcdvd: speed (R/W) %u/%u\n", (pd->speed * 3) / 2, pd->speed);
1898+ return 0;
1899+}
1900+
1901+#if 0
1902+static int pkt_track_capacity(struct pktcdvd_device *pd)
1903+{
1904+ disc_information di;
1905+ track_information ti;
1906+ int l_track, i, ret;
1907+ unsigned long size = 0;
1908+
1909+ memset(&di, 0, sizeof(disc_information));
1910+ memset(&ti, 0, sizeof(track_information));
1911+
1912+ if ((ret = cdrom_get_disc_info(pd->dev, &di))) {
1913+ DPRINTK("failed get_disc\n");
1914+ return ret;
1915+ }
1916+
1917+ l_track = di.last_track_lsb | di.last_track_msb >> 8;
1918+ DPRINTK("pktcdvd: last track %d\n", l_track);
1919+ for (i = di.n_first_track; i <= l_track; i++) {
1920+ if ((ret = cdrom_get_track_info(pd->dev, i, 1, &ti))) {
1921+ DPRINTK("pktcdvd: failed get_track\n");
1922+ return ret;
1923+ }
1924+ size += be32_to_cpu(ti.track_size);
1925+ }
1926+ pkt_sizes[MINOR(pd->pkt_dev)] = size << 1;
1927+ return 0;
1928+}
1929+
1930+static int pkt_set_capacity(struct pktcdvd_device *pd)
1931+{
1932+ struct cdrom_generic_command cgc;
1933+ struct cdrom_device_info *cdi = pd->cdi;
1934+ struct cdvd_capacity cap;
1935+ int ret;
1936+
1937+ init_cdrom_command(&cgc, &cap, sizeof(cap));
1938+ cgc.cmd[0] = GPCMD_READ_CDVD_CAPACITY;
1939+ if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
1940+ return ret;
1941+
1942+ /*
1943+ * We should probably give up if read capacity fails, since then
1944+ * then disc is not ready to be written to -- for now I use
1945+ * raw devices and this is fine.
1946+ */
1947+ pkt_sizes[MINOR(pd->pkt_dev)] = be32_to_cpu(cap.lba) << 1;
1948+ return 0;
1949+}
1950+#endif
1951+
1952+static int pkt_open_write(struct pktcdvd_device *pd)
1953+{
1954+ int ret;
1955+
1956+ if ((ret = pkt_probe_settings(pd))) {
1957+ DPRINTK("pktcdvd: %s failed probe\n", pd->name);
1958+ return -EIO;
1959+ }
1960+
1961+ if ((ret = pkt_set_write_settings(pd))) {
1962+ DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
1963+ return -EIO;
1964+ }
1965+
1966+ (void) pkt_write_caching(pd, USE_WCACHING);
1967+
1968+ if ((ret = pkt_adjust_speed(pd, 16))) {
1969+ DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
1970+ return -EIO;
1971+ }
1972+ return 0;
1973+}
1974+
1975+/*
1976+ * called at open time.
1977+ */
1978+static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1979+{
1980+ int ret;
1981+ long lba;
1982+
1983+ if (!pd->dev)
1984+ return -ENXIO;
1985+
1986+ pd->bdev = bdget(kdev_t_to_nr(pd->dev));
1987+ if (!pd->bdev) {
1988+ printk("pktcdvd: can't find cdrom block device\n");
1989+ return -ENXIO;
1990+ }
1991+
1992+ if ((ret = blkdev_get(pd->bdev, FMODE_READ, 0, BDEV_FILE))) {
1993+ pd->bdev = NULL;
1994+ return ret;
1995+ }
1996+
1997+ if ((ret = cdrom_get_last_written(pd->dev, &lba))) {
1998+ printk("pktcdvd: cdrom_get_last_written failed\n");
1999+ return ret;
2000+ }
2001+
2002+ pkt_sizes[MINOR(pd->pkt_dev)] = lba << 1;
2003+
2004+ if (write) {
2005+ if ((ret = pkt_open_write(pd)))
2006+ return ret;
2007+ pkt_mark_readonly(pd, 0);
2008+ } else {
2009+ (void) pkt_adjust_speed(pd, 0xff);
2010+ pkt_mark_readonly(pd, 1);
2011+ }
2012+
2013+ if (write)
2014+ printk("pktcdvd: %lukB available on disc\n", lba << 1);
2015+
2016+ return 0;
2017+}
2018+
2019+/*
2020+ * called when the device is closed. makes sure that the device flushes
2021+ * the internal cache before we close.
2022+ */
2023+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2024+{
2025+ atomic_dec(&pd->refcnt);
2026+ if (atomic_read(&pd->refcnt) > 0)
2027+ return;
2028+
2029+ fsync_dev(pd->pkt_dev);
2030+
2031+ if (flush && pkt_flush_cache(pd))
2032+ DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
2033+
2034+ if (pd->bdev) {
2035+ blkdev_put(pd->bdev, BDEV_FILE);
2036+ pd->bdev = NULL;
2037+ }
2038+}
2039+
2040+static int pkt_open(struct inode *inode, struct file *file)
2041+{
2042+ struct pktcdvd_device *pd = NULL;
2043+ int ret;
2044+
2045+ VPRINTK("pktcdvd: entering open\n");
2046+
2047+ if (MINOR(inode->i_rdev) >= MAX_WRITERS) {
2048+ printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
2049+ ret = -ENODEV;
2050+ goto out;
2051+ }
2052+
2053+ /*
2054+ * either device is not configured, or pktsetup is old and doesn't
2055+ * use O_CREAT to create device
2056+ */
2057+ pd = &pkt_devs[MINOR(inode->i_rdev)];
2058+ if (!pd->dev && !(file->f_flags & O_CREAT)) {
2059+ VPRINTK("pktcdvd: not configured and O_CREAT not set\n");
2060+ ret = -ENXIO;
2061+ goto out;
2062+ }
2063+
2064+ atomic_inc(&pd->refcnt);
2065+ if (atomic_read(&pd->refcnt) > 1) {
2066+ if (file->f_mode & FMODE_WRITE) {
2067+ VPRINTK("pktcdvd: busy open for write\n");
2068+ ret = -EBUSY;
2069+ goto out_dec;
2070+ }
2071+
2072+ /*
2073+ * Not first open, everything is already set up
2074+ */
2075+ return 0;
2076+ }
2077+
2078+ if (((file->f_flags & O_ACCMODE) != O_RDONLY) || !(file->f_flags & O_CREAT)) {
2079+ if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
2080+ ret = -EIO;
2081+ goto out_dec;
2082+ }
2083+ }
2084+
2085+ /*
2086+ * needed here as well, since ext2 (among others) may change
2087+ * the blocksize at mount time
2088+ */
2089+ set_blocksize(pd->pkt_dev, CD_FRAMESIZE);
2090+ return 0;
2091+
2092+out_dec:
2093+ atomic_dec(&pd->refcnt);
2094+ if (atomic_read(&pd->refcnt) == 0) {
2095+ if (pd->bdev) {
2096+ blkdev_put(pd->bdev, BDEV_FILE);
2097+ pd->bdev = NULL;
2098+ }
2099+ }
2100+out:
2101+ VPRINTK("pktcdvd: failed open (%d)\n", ret);
2102+ return ret;
2103+}
2104+
2105+static int pkt_close(struct inode *inode, struct file *file)
2106+{
2107+ struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
2108+ int ret = 0;
2109+
2110+ if (pd->dev) {
2111+ int flush = !test_bit(PACKET_READONLY, &pd->flags);
2112+ pkt_release_dev(pd, flush);
2113+ }
2114+
2115+ return ret;
2116+}
2117+
2118+/*
2119+ * pktcdvd i/o elevator parts
2120+ */
2121+static inline int pkt_bh_rq_ordered(struct buffer_head *bh, struct request *rq,
2122+ struct list_head *head)
2123+{
2124+ struct list_head *next;
2125+ struct request *next_rq;
2126+
2127+ next = rq->queue.next;
2128+ if (next == head)
2129+ return 0;
2130+
2131+ next_rq = blkdev_entry_to_request(next);
2132+ if (next_rq->rq_dev != rq->rq_dev)
2133+ return bh->b_rsector > rq->sector;
2134+
2135+ if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
2136+ return 1;
2137+
2138+ if (next_rq->sector > rq->sector)
2139+ return 0;
2140+
2141+ if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
2142+ return 1;
2143+
2144+ return 0;
2145+}
2146+
2147+static int pkt_elevator_merge(request_queue_t *q, struct request **req,
2148+ struct list_head *head,
2149+ struct buffer_head *bh, int rw,
2150+ int max_sectors)
2151+{
2152+ struct list_head *entry = &q->queue_head;
2153+ unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
2154+
2155+ if (bh->b_reqnext)
2156+ BUG();
2157+
2158+ VPRINTK("pkt_elevator_merge: rw=%d, ms=%d, bh=%lu, dev=%d\n", rw, max_sectors, bh->b_rsector, bh->b_rdev);
2159+
2160+ while ((entry = entry->prev) != head) {
2161+ struct request *__rq = blkdev_entry_to_request(entry);
2162+ if (__rq->waiting)
2163+ continue;
2164+ if (__rq->rq_dev != bh->b_rdev)
2165+ continue;
2166+ if (!*req && pkt_bh_rq_ordered(bh, __rq, &q->queue_head))
2167+ *req = __rq;
2168+ if (__rq->cmd != rw)
2169+ continue;
2170+ if (__rq->nr_sectors + count > max_sectors)
2171+ continue;
2172+ if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
2173+ ret = ELEVATOR_BACK_MERGE;
2174+ *req = __rq;
2175+ break;
2176+ } else if (__rq->sector - count == bh->b_rsector) {
2177+ ret = ELEVATOR_FRONT_MERGE;
2178+ *req = __rq;
2179+ break;
2180+ }
2181+#if 0 /* makes sense, chance of two matches probably slim */
2182+ else if (*req)
2183+ break;
2184+#endif
2185+ }
2186+ VPRINTK("*req=%p, ret=%d\n", *req, ret);
2187+
2188+ return ret;
2189+}
2190+
2191+static int pkt_make_request(request_queue_t *q, int rw, struct buffer_head *bh)
2192+{
2193+ struct pktcdvd_device *pd;
2194+ struct buffer_head *new_bh;
2195+ int pending;
2196+
2197+ if (MINOR(bh->b_rdev) >= MAX_WRITERS) {
2198+ printk("pktcdvd: %s out of range\n", kdevname(bh->b_rdev));
2199+ goto end_io;
2200+ }
2201+
2202+ pd = &pkt_devs[MINOR(bh->b_rdev)];
2203+ if (!pd->dev) {
2204+ printk("pktcdvd: request received for non-active pd\n");
2205+ goto end_io;
2206+ }
2207+
2208+ /*
2209+ * quick remap a READ
2210+ */
2211+ if (rw == READ || rw == READA) {
2212+ down(&pd->cache_sync_mutex);
2213+ pkt_flush_writes(pd);
2214+ bh->b_rdev = pd->dev;
2215+ generic_make_request(rw, bh);
2216+ up(&pd->cache_sync_mutex);
2217+ return 0;
2218+ }
2219+
2220+ if (!(rw & WRITE))
2221+ BUG();
2222+
2223+ if (test_bit(PACKET_READONLY, &pd->flags)) {
2224+ printk("pktcdvd: WRITE for ro device %s (%lu)\n",
2225+ pd->name, bh->b_rsector);
2226+ goto end_io;
2227+ }
2228+
2229+ VPRINTK("pkt_make_request: bh:%p block:%ld size:%d\n",
2230+ bh, bh->b_blocknr, bh->b_size);
2231+
2232+ if (bh->b_size != CD_FRAMESIZE) {
2233+ printk("pktcdvd: wrong bh size\n");
2234+ goto end_io;
2235+ }
2236+
2237+ /*
2238+ * should be deadlock safe, since if we end up sleeping progress
2239+ * is guaranteed (there are pd->bhcnt buffers in flight)
2240+ */
2241+ do {
2242+ new_bh = kmem_cache_alloc(bh_cachep, GFP_NOIO);
2243+ if (new_bh)
2244+ break;
2245+
2246+ pending = atomic_read(&pd->bhcnt) - 8 * PACKET_MAX_SIZE;
2247+ if (pending < 0)
2248+ pending = 0;
2249+
2250+ wait_event(pd_bh_wait, atomic_read(&pd->bhcnt) <= pending);
2251+ } while (1);
2252+
2253+ atomic_inc(&pd->bhcnt);
2254+ new_bh->b_size = bh->b_size;
2255+ new_bh->b_list = PKT_BUF_LIST + 1;
2256+ new_bh->b_dev = bh->b_dev;
2257+ atomic_set(&new_bh->b_count, 1);
2258+ new_bh->b_rdev = bh->b_rdev;
2259+ new_bh->b_state = bh->b_state;
2260+ new_bh->b_page = bh->b_page;
2261+ new_bh->b_data = bh->b_data;
2262+ new_bh->b_private = bh;
2263+ new_bh->b_end_io = pkt_end_io_write_stacked;
2264+ new_bh->b_rsector = bh->b_rsector;
2265+
2266+ return pd->make_request_fn(q, rw, new_bh);
2267+
2268+end_io:
2269+ buffer_IO_error(bh);
2270+ return 0;
2271+}
2272+
2273+static void show_requests(request_queue_t *q)
2274+{
2275+ struct list_head *entry;
2276+
2277+ spin_lock_irq(&io_request_lock);
2278+
2279+ list_for_each(entry, &q->queue_head) {
2280+ struct request *rq = blkdev_entry_to_request(entry);
2281+ int zone = rq->sector & ~127;
2282+ int hole;
2283+
2284+ hole = 0;
2285+ if ((rq->sector + rq->nr_sectors - (rq->bhtail->b_size >> 9))
2286+ != rq->bhtail->b_rsector)
2287+ hole = 1;
2288+
2289+ printk("rq: cmd %d, sector %lu (-> %lu), zone %u, hole %d, nr_sectors %lu\n", rq->cmd, rq->sector, rq->sector + rq->nr_sectors - 1, zone, hole, rq->nr_sectors);
2290+ }
2291+
2292+ spin_unlock_irq(&io_request_lock);
2293+}
2294+
2295+static void sysrq_handle_show_requests(int key, struct pt_regs *pt_regs,
2296+ struct kbd_struct *kbd, struct tty_struct *tty)
2297+{
2298+ /*
2299+ * quick hack to show pending requests in /dev/pktcdvd0 queue
2300+ */
2301+ queue_proc *qp = blk_dev[PACKET_MAJOR].queue;
2302+ if (qp) {
2303+ request_queue_t *q = qp(MKDEV(PACKET_MAJOR, 0));
2304+ if (q)
2305+ show_requests(q);
2306+ }
2307+}
2308+static struct sysrq_key_op sysrq_show_requests_op = {
2309+ handler: sysrq_handle_show_requests,
2310+ help_msg: "showreQuests",
2311+ action_msg: "Show requests",
2312+};
2313+
2314+static void pkt_init_queue(struct pktcdvd_device *pd)
2315+{
2316+ request_queue_t *q = &pd->cdrw.r_queue;
2317+
2318+ blk_init_queue(q, pkt_request);
2319+ elevator_init(&q->elevator, ELEVATOR_PKTCDVD);
2320+ pd->make_request_fn = q->make_request_fn;
2321+ blk_queue_make_request(q, pkt_make_request);
2322+ blk_queue_headactive(q, 0);
2323+ q->front_merge_fn = pkt_front_merge_fn;
2324+ q->back_merge_fn = pkt_back_merge_fn;
2325+ q->merge_requests_fn = pkt_merge_requests_fn;
2326+ q->queuedata = pd;
2327+}
2328+
2329+static int pkt_proc_device(struct pktcdvd_device *pd, char *buf)
2330+{
2331+ char *b = buf, *msg;
2332+ struct list_head *foo;
2333+ int i;
2334+
2335+ b += sprintf(b, "\nWriter %s (%s):\n", pd->name, kdevname(pd->dev));
2336+
2337+ b += sprintf(b, "\nSettings:\n");
2338+ b += sprintf(b, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2339+
2340+ if (pd->settings.write_type == 0)
2341+ msg = "Packet";
2342+ else
2343+ msg = "Unknown";
2344+ b += sprintf(b, "\twrite type:\t\t%s\n", msg);
2345+
2346+ b += sprintf(b, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2347+ b += sprintf(b, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2348+
2349+ b += sprintf(b, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2350+
2351+ if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2352+ msg = "Mode 1";
2353+ else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2354+ msg = "Mode 2";
2355+ else
2356+ msg = "Unknown";
2357+ b += sprintf(b, "\tblock mode:\t\t%s\n", msg);
2358+
2359+ b += sprintf(b, "\nStatistics:\n");
2360+ b += sprintf(b, "\tbuffers started:\t%lu\n", pd->stats.bh_s);
2361+ b += sprintf(b, "\tbuffers ended:\t\t%lu\n", pd->stats.bh_e);
2362+ b += sprintf(b, "\tsectors written:\t%lu\n", pd->stats.secs_w);
2363+ b += sprintf(b, "\tsectors read:\t\t%lu\n", pd->stats.secs_r);
2364+ b += sprintf(b, "\tbuffer cache hits:\t%lu\n", pd->stats.bh_cache_hits);
2365+ b += sprintf(b, "\tpage cache hits:\t%lu\n", pd->stats.page_cache_hits);
2366+
2367+ b += sprintf(b, "\nMisc:\n");
2368+ b += sprintf(b, "\treference count:\t%d\n", atomic_read(&pd->refcnt));
2369+ b += sprintf(b, "\tflags:\t\t\t0x%lx\n", pd->flags);
2370+ b += sprintf(b, "\twrite speed:\t\t%ukB/s\n", pd->speed * 150);
2371+ b += sprintf(b, "\tstart offset:\t\t%lu\n", pd->offset);
2372+ b += sprintf(b, "\tmode page offset:\t%u\n", pd->mode_offset);
2373+
2374+ b += sprintf(b, "\nQueue state:\n");
2375+ b += sprintf(b, "\tfree buffers:\t\t%u\n", atomic_read(&pd->cdrw.free_bh));
2376+ b += sprintf(b, "\trequest active:\t\t%s\n", pd->rq ? "yes" : "no");
2377+ b += sprintf(b, "\twrite rq depth:\t\t%d\n", atomic_read(&pd->wrqcnt));
2378+
2379+ spin_lock_irq(&io_request_lock);
2380+ i = 0;
2381+ list_for_each(foo, &pd->cdrw.r_queue.queue_head)
2382+ i++;
2383+ spin_unlock_irq(&io_request_lock);
2384+ b += sprintf(b, "\tqueue requests:\t\t%u\n", i);
2385+
2386+ return b - buf;
2387+}
2388+
2389+static int pkt_read_proc(char *page, char **start, off_t off, int count,
2390+ int *eof, void *data)
2391+{
2392+ struct pktcdvd_device *pd = data;
2393+ char *buf = page;
2394+ int len;
2395+
2396+ len = pkt_proc_device(pd, buf);
2397+ buf += len;
2398+
2399+ if (len <= off + count)
2400+ *eof = 1;
2401+
2402+ *start = page + off;
2403+ len -= off;
2404+ if (len > count)
2405+ len = count;
2406+ if (len < 0)
2407+ len = 0;
2408+
2409+ return len;
2410+}
2411+
2412+static int pkt_new_dev(struct pktcdvd_device *pd, kdev_t dev)
2413+{
2414+ struct cdrom_device_info *cdi;
2415+ request_queue_t *q;
2416+ int i;
2417+
2418+ for (i = 0; i < MAX_WRITERS; i++) {
2419+ if (pkt_devs[i].dev == dev) {
2420+ printk("pktcdvd: %s already setup\n", kdevname(dev));
2421+ return -EBUSY;
2422+ }
2423+ }
2424+
2425+ for (i = 0; i < MAX_WRITERS; i++)
2426+ if (pd == &pkt_devs[i])
2427+ break;
2428+
2429+ if (i == MAX_WRITERS) {
2430+ printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
2431+ return -ENXIO;
2432+ }
2433+
2434+ cdi = cdrom_find_device(dev);
2435+ if (cdi == NULL) {
2436+ printk("pktcdvd: %s is not a CD-ROM\n", kdevname(dev));
2437+ return -ENXIO;
2438+ }
2439+
2440+ MOD_INC_USE_COUNT;
2441+
2442+ memset(pd, 0, sizeof(struct pktcdvd_device));
2443+ atomic_set(&pd->cdrw.free_bh, 0);
2444+
2445+ spin_lock_init(&pd->lock);
2446+ if (pkt_grow_bhlist(pd, PACKET_MAX_SIZE) < PACKET_MAX_SIZE) {
2447+ MOD_DEC_USE_COUNT;
2448+ printk("pktcdvd: not enough memory for buffers\n");
2449+ return -ENOMEM;
2450+ }
2451+
2452+ set_blocksize(dev, CD_FRAMESIZE);
2453+ pd->cdi = cdi;
2454+ pd->dev = dev;
2455+ pd->bdev = NULL;
2456+ pd->pkt_dev = MKDEV(PACKET_MAJOR, i);
2457+ sprintf(pd->name, "pktcdvd%d", i);
2458+ atomic_set(&pd->refcnt, 0);
2459+ atomic_set(&pd->wrqcnt, 0);
2460+ atomic_set(&pd->bhcnt, 0);
2461+ init_MUTEX(&pd->cache_sync_mutex);
2462+ pd->unflushed_writes = 0;
2463+ init_waitqueue_head(&pd->wqueue);
2464+ init_completion(&pd->cdrw.thr_compl);
2465+
2466+ /*
2467+ * store device merge functions (SCSI uses their own to build
2468+ * scatter-gather tables)
2469+ */
2470+ q = blk_get_queue(dev);
2471+ spin_lock_irq(&io_request_lock);
2472+ pkt_init_queue(pd);
2473+ pd->cdrw.front_merge_fn = q->front_merge_fn;
2474+ pd->cdrw.back_merge_fn = q->back_merge_fn;
2475+ pd->cdrw.merge_requests_fn = q->merge_requests_fn;
2476+ pd->cdrw.queuedata = q->queuedata;
2477+ spin_unlock_irq(&io_request_lock);
2478+
2479+ pd->cdrw.pid = kernel_thread(kcdrwd, pd, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
2480+ if (pd->cdrw.pid < 0) {
2481+ MOD_DEC_USE_COUNT;
2482+ printk("pktcdvd: can't start kernel thread\n");
2483+ blk_cleanup_queue(&pd->cdrw.r_queue);
2484+ pkt_shrink_bhlist(pd, PACKET_MAX_SIZE);
2485+ memset(pd, 0, sizeof(*pd));
2486+ return -EBUSY;
2487+ }
2488+
2489+ create_proc_read_entry(pd->name, 0, pkt_proc, pkt_read_proc, pd);
2490+ DPRINTK("pktcdvd: writer %s sucessfully registered\n", cdi->name);
2491+ return 0;
2492+}
2493+
2494+/*
2495+ * arg contains file descriptor of CD-ROM device.
2496+ */
2497+static int pkt_setup_dev(struct pktcdvd_device *pd, unsigned int arg)
2498+{
2499+ struct inode *inode;
2500+ struct file *file;
2501+ int ret;
2502+
2503+ if ((file = fget(arg)) == NULL) {
2504+ printk("pktcdvd: bad file descriptor passed\n");
2505+ return -EBADF;
2506+ }
2507+
2508+ ret = -EINVAL;
2509+ if ((inode = file->f_dentry->d_inode) == NULL) {
2510+ printk("pktcdvd: huh? file descriptor contains no inode?\n");
2511+ goto out;
2512+ }
2513+ ret = -ENOTBLK;
2514+ if (!S_ISBLK(inode->i_mode)) {
2515+ printk("pktcdvd: device is not a block device (duh)\n");
2516+ goto out;
2517+ }
2518+ ret = -EROFS;
2519+ if (IS_RDONLY(inode)) {
2520+ printk("pktcdvd: Can't write to read-only dev\n");
2521+ goto out;
2522+ }
2523+ if ((ret = pkt_new_dev(pd, inode->i_rdev))) {
2524+ printk("pktcdvd: all booked up\n");
2525+ goto out;
2526+ }
2527+
2528+ atomic_inc(&pd->refcnt);
2529+
2530+out:
2531+ fput(file);
2532+ return ret;
2533+}
2534+
2535+static int pkt_remove_dev(struct pktcdvd_device *pd)
2536+{
2537+ int ret;
2538+
2539+ if (pd->cdrw.pid >= 0) {
2540+ ret = kill_proc(pd->cdrw.pid, SIGKILL, 1);
2541+ if (ret) {
2542+ printk("pkt_exit: can't kill kernel thread\n");
2543+ return ret;
2544+ }
2545+ wait_for_completion(&pd->cdrw.thr_compl);
2546+ }
2547+
2548+ /*
2549+ * will also invalidate buffers for CD-ROM
2550+ */
2551+ invalidate_device(pd->pkt_dev, 1);
2552+
2553+ if ((ret = pkt_shrink_bhlist(pd, PACKET_MAX_SIZE)) != PACKET_MAX_SIZE)
2554+ printk("pktcdvd: leaked %d buffers\n", PACKET_MAX_SIZE - ret);
2555+
2556+ blk_cleanup_queue(&pd->cdrw.r_queue);
2557+ remove_proc_entry(pd->name, pkt_proc);
2558+ DPRINTK("pktcdvd: writer %s unregistered\n", pd->cdi->name);
2559+ memset(pd, 0, sizeof(struct pktcdvd_device));
2560+ MOD_DEC_USE_COUNT;
2561+ return 0;
2562+}
2563+
2564+static int pkt_media_change(kdev_t dev)
2565+{
2566+ struct pktcdvd_device *pd = pkt_find_dev(dev);
2567+ if (!pd)
2568+ return 0;
2569+ return cdrom_media_changed(pd->dev);
2570+}
2571+
2572+static int pkt_ioctl(struct inode *inode, struct file *file,
2573+ unsigned int cmd, unsigned long arg)
2574+{
2575+ struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
2576+
2577+ VPRINTK("pkt_ioctl: cmd %d, dev %x\n", cmd, inode->i_rdev);
2578+
2579+ if ((cmd != PACKET_SETUP_DEV) && !pd->dev) {
2580+ DPRINTK("pktcdvd: dev not setup\n");
2581+ return -ENXIO;
2582+ }
2583+
2584+ switch (cmd) {
2585+ case PACKET_GET_STATS:
2586+ if (copy_to_user(&arg, &pd->stats, sizeof(struct packet_stats)))
2587+ return -EFAULT;
2588+ break;
2589+
2590+ case PACKET_SETUP_DEV:
2591+ if (pd->dev) {
2592+ printk("pktcdvd: dev already setup\n");
2593+ return -EBUSY;
2594+ }
2595+ if (!capable(CAP_SYS_ADMIN))
2596+ return -EPERM;
2597+ return pkt_setup_dev(pd, arg);
2598+
2599+ case PACKET_TEARDOWN_DEV:
2600+ if (!capable(CAP_SYS_ADMIN))
2601+ return -EPERM;
2602+ if (atomic_read(&pd->refcnt) != 1)
2603+ return -EBUSY;
2604+ return pkt_remove_dev(pd);
2605+
2606+ case BLKGETSIZE:
2607+ return put_user(blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 1, (unsigned long *)arg);
2608+
2609+ case BLKGETSIZE64:
2610+ return put_user((u64)blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 10,
2611+ (u64 *)arg);
2612+
2613+ case BLKROSET:
2614+ if (capable(CAP_SYS_ADMIN))
2615+ set_bit(PACKET_READONLY, &pd->flags);
2616+ case BLKROGET:
2617+ case BLKSSZGET:
2618+ case BLKRASET:
2619+ case BLKRAGET:
2620+ case BLKFLSBUF:
2621+ if (!pd->bdev)
2622+ return -ENXIO;
2623+ return blk_ioctl(inode->i_rdev, cmd, arg);
2624+
2625+ /*
2626+ * forward selected CDROM ioctls to CD-ROM, for UDF
2627+ */
2628+ case CDROMMULTISESSION:
2629+ case CDROMREADTOCENTRY:
2630+ case CDROM_LAST_WRITTEN:
2631+ case CDROM_SEND_PACKET:
2632+ case SCSI_IOCTL_SEND_COMMAND:
2633+ if (!pd->bdev)
2634+ return -ENXIO;
2635+ return ioctl_by_bdev(pd->bdev, cmd, arg);
2636+
2637+ default:
2638+ printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
2639+ return -ENOTTY;
2640+ }
2641+
2642+ return 0;
2643+}
2644+
2645+static struct block_device_operations pktcdvd_ops = {
2646+ owner: THIS_MODULE,
2647+ open: pkt_open,
2648+ release: pkt_close,
2649+ ioctl: pkt_ioctl,
2650+ check_media_change: pkt_media_change,
2651+};
2652+
2653+int pkt_init(void)
2654+{
2655+ int i;
2656+
2657+ devfs_register(NULL, "pktcdvd", DEVFS_FL_DEFAULT, PACKET_MAJOR, 0,
2658+ S_IFBLK | S_IRUSR | S_IWUSR, &pktcdvd_ops, NULL);
2659+ if (devfs_register_blkdev(PACKET_MAJOR, "pktcdvd", &pktcdvd_ops)) {
2660+ printk("unable to register pktcdvd device\n");
2661+ return -EIO;
2662+ }
2663+
2664+ pkt_sizes = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2665+ if (pkt_sizes == NULL)
2666+ goto err;
2667+
2668+ pkt_blksize = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2669+ if (pkt_blksize == NULL)
2670+ goto err;
2671+
2672+ pkt_readahead = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2673+ if (pkt_readahead == NULL)
2674+ goto err;
2675+
2676+ pkt_devs = kmalloc(MAX_WRITERS * sizeof(struct pktcdvd_device), GFP_KERNEL);
2677+ if (pkt_devs == NULL)
2678+ goto err;
2679+
2680+ memset(pkt_devs, 0, MAX_WRITERS * sizeof(struct pktcdvd_device));
2681+ memset(pkt_sizes, 0, MAX_WRITERS * sizeof(int));
2682+ memset(pkt_blksize, 0, MAX_WRITERS * sizeof(int));
2683+
2684+ for (i = 0; i < MAX_WRITERS; i++)
2685+ pkt_readahead[i] = vm_max_readahead;
2686+
2687+ blk_size[PACKET_MAJOR] = pkt_sizes;
2688+ blksize_size[PACKET_MAJOR] = pkt_blksize;
2689+ max_readahead[PACKET_MAJOR] = pkt_readahead;
2690+ read_ahead[PACKET_MAJOR] = 128;
2691+ set_blocksize(MKDEV(PACKET_MAJOR, 0), CD_FRAMESIZE);
2692+
2693+ blk_dev[PACKET_MAJOR].queue = pkt_get_queue;
2694+
2695+ pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
2696+
2697+ register_sysrq_key('q', &sysrq_show_requests_op);
2698+
2699+ DPRINTK("pktcdvd: %s\n", VERSION_CODE);
2700+ return 0;
2701+
2702+err:
2703+ printk("pktcdvd: out of memory\n");
2704+ devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
2705+ DEVFS_SPECIAL_BLK, 0));
2706+ devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
2707+ kfree(pkt_devs);
2708+ kfree(pkt_sizes);
2709+ kfree(pkt_blksize);
2710+ kfree(pkt_readahead);
2711+ return -ENOMEM;
2712+}
2713+
2714+void pkt_exit(void)
2715+{
2716+ unregister_sysrq_key('q', &sysrq_show_requests_op);
2717+
2718+ devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
2719+ DEVFS_SPECIAL_BLK, 0));
2720+ devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
2721+ blk_dev[PACKET_MAJOR].queue = NULL;
2722+
2723+ remove_proc_entry("pktcdvd", proc_root_driver);
2724+ kfree(pkt_sizes);
2725+ kfree(pkt_blksize);
2726+ kfree(pkt_devs);
2727+ kfree(pkt_readahead);
2728+}
2729+
2730+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2731+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2732+MODULE_LICENSE("GPL");
2733+
2734+module_init(pkt_init);
2735+module_exit(pkt_exit);
2736diff -uNr linux-2.4.18/drivers/cdrom/Makefile pkt/drivers/cdrom/Makefile
2737--- linux-2.4.18/drivers/cdrom/Makefile Fri Dec 29 23:07:21 2000
2738+++ pkt/drivers/cdrom/Makefile Mon Jul 29 09:10:29 2002
2739@@ -27,6 +27,7 @@
2740 obj-$(CONFIG_BLK_DEV_IDECD) += cdrom.o
2741 obj-$(CONFIG_BLK_DEV_SR) += cdrom.o
2742 obj-$(CONFIG_PARIDE_PCD) += cdrom.o
2743+obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o
2744
2745 obj-$(CONFIG_AZTCD) += aztcd.o
2746 obj-$(CONFIG_CDU31A) += cdu31a.o cdrom.o
2747diff -uNr linux-2.4.18/drivers/ide/ide-cd.c pkt/drivers/ide/ide-cd.c
2748--- linux-2.4.18/drivers/ide/ide-cd.c Mon Jul 8 23:42:05 2002
2749+++ pkt/drivers/ide/ide-cd.c Mon Jul 29 09:10:30 2002
2750@@ -292,9 +292,11 @@
2751 * correctly reporting tray status -- from
2752 * Michael D Johnson <johnsom@orst.edu>
2753 *
2754+ * 4.99 - Added write support for packet writing.
2755+ *
2756 *************************************************************************/
2757
2758-#define IDECD_VERSION "4.59"
2759+#define IDECD_VERSION "4.99"
2760
2761 #include <linux/config.h>
2762 #include <linux/module.h>
2763@@ -526,7 +528,7 @@
2764
2765 memset(pc, 0, sizeof(struct packet_command));
2766 pc->c[0] = GPCMD_REQUEST_SENSE;
2767- pc->c[4] = pc->buflen = 18;
2768+ pc->c[4] = pc->buflen = 14;
2769 pc->buffer = (char *) sense;
2770 pc->sense = (struct request_sense *) failed_command;
2771
2772@@ -640,7 +642,7 @@
2773 cdrom_saw_media_change (drive);
2774
2775 /* Fail the request. */
2776- printk ("%s: tray open\n", drive->name);
2777+ /* printk ("%s: tray open\n", drive->name); */
2778 cdrom_end_request (0, drive);
2779 } else if (sense_key == UNIT_ATTENTION) {
2780 /* Media change. */
2781@@ -1200,6 +1202,8 @@
2782 * partitions not really working, but better check anyway...
2783 */
2784 if (rq->cmd == nxt->cmd && rq->rq_dev == nxt->rq_dev) {
2785+ if (rq->cmd == WRITE)
2786+ printk("merged write\n");
2787 rq->nr_sectors += nxt->nr_sectors;
2788 rq->hard_nr_sectors += nxt->nr_sectors;
2789 rq->bhtail->b_reqnext = nxt->bh;
2790@@ -2497,6 +2501,12 @@
2791 static
2792 void ide_cdrom_release_real (struct cdrom_device_info *cdi)
2793 {
2794+ struct cdrom_generic_command cgc;
2795+
2796+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2797+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
2798+ cgc.quiet = 1;
2799+ (void) ide_cdrom_packet(cdi, &cgc);
2800 }
2801
2802
2803@@ -2683,15 +2693,10 @@
2804 printk(" %dX", CDROM_CONFIG_FLAGS(drive)->max_speed);
2805 printk(" %s", CDROM_CONFIG_FLAGS(drive)->dvd ? "DVD-ROM" : "CD-ROM");
2806
2807- if (CDROM_CONFIG_FLAGS (drive)->dvd_r|CDROM_CONFIG_FLAGS (drive)->dvd_ram)
2808- printk (" DVD%s%s",
2809- (CDROM_CONFIG_FLAGS (drive)->dvd_r)? "-R" : "",
2810- (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "-RAM" : "");
2811-
2812- if (CDROM_CONFIG_FLAGS (drive)->cd_r|CDROM_CONFIG_FLAGS (drive)->cd_rw)
2813- printk (" CD%s%s",
2814- (CDROM_CONFIG_FLAGS (drive)->cd_r)? "-R" : "",
2815- (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
2816+ if (CDROM_CONFIG_FLAGS(drive)->dvd_r || CDROM_CONFIG_FLAGS(drive)->dvd_ram)
2817+ printk (" DVD-R%s", (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "AM" : "");
2818+ if (CDROM_CONFIG_FLAGS(drive)->cd_r ||CDROM_CONFIG_FLAGS(drive)->cd_rw)
2819+ printk (" CD-R%s", (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
2820
2821 if (CDROM_CONFIG_FLAGS (drive)->is_changer)
2822 printk (" changer w/%d slots", nslots);
2823@@ -2714,7 +2719,7 @@
2824 int major = HWIF(drive)->major;
2825 int minor = drive->select.b.unit << PARTN_BITS;
2826
2827- ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 2, &read_ahead[major], NULL);
2828+ ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 1024, &read_ahead[major], NULL);
2829 ide_add_setting(drive, "file_readahead", SETTING_RW, BLKFRAGET, BLKFRASET, TYPE_INTA, 0, INT_MAX, 1, 1024, &max_readahead[major][minor], NULL);
2830 ide_add_setting(drive, "max_kb_per_request", SETTING_RW, BLKSECTGET, BLKSECTSET, TYPE_INTA, 1, 255, 1, 2, &max_sectors[major][minor], NULL);
2831 ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
2832@@ -2731,7 +2736,7 @@
2833 /*
2834 * default to read-only always and fix latter at the bottom
2835 */
2836- set_device_ro(MKDEV(HWIF(drive)->major, minor), 1);
2837+ set_device_ro(MKDEV(HWIF(drive)->major, minor), 0);
2838 set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE);
2839
2840 drive->special.all = 0;
2841diff -uNr linux-2.4.18/drivers/scsi/Config.in pkt/drivers/scsi/Config.in
2842--- linux-2.4.18/drivers/scsi/Config.in Mon Jul 8 23:42:06 2002
2843+++ pkt/drivers/scsi/Config.in Mon Jul 29 09:10:30 2002
2844@@ -20,10 +20,6 @@
2845
2846 comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
2847
2848-#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
2849- bool ' Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES
2850-#fi
2851-
2852 bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
2853
2854 bool ' Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
2855diff -uNr linux-2.4.18/drivers/scsi/scsi_merge.c pkt/drivers/scsi/scsi_merge.c
2856--- linux-2.4.18/drivers/scsi/scsi_merge.c Mon Jul 8 23:42:04 2002
2857+++ pkt/drivers/scsi/scsi_merge.c Mon Jul 29 09:10:30 2002
2858@@ -71,11 +71,6 @@
2859 */
2860 #define DMA_SEGMENT_SIZE_LIMITED
2861
2862-#ifdef CONFIG_SCSI_DEBUG_QUEUES
2863-/*
2864- * Enable a bunch of additional consistency checking. Turn this off
2865- * if you are benchmarking.
2866- */
2867 static int dump_stats(struct request *req,
2868 int use_clustering,
2869 int dma_host,
2870@@ -100,22 +95,6 @@
2871 panic("Ththththaats all folks. Too dangerous to continue.\n");
2872 }
2873
2874-
2875-/*
2876- * Simple sanity check that we will use for the first go around
2877- * in order to ensure that we are doing the counting correctly.
2878- * This can be removed for optimization.
2879- */
2880-#define SANITY_CHECK(req, _CLUSTER, _DMA) \
2881- if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA, NULL) ) \
2882- { \
2883- printk("Incorrect segment count at 0x%p", current_text_addr()); \
2884- dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA, NULL)); \
2885- }
2886-#else
2887-#define SANITY_CHECK(req, _CLUSTER, _DMA)
2888-#endif
2889-
2890 static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
2891 {
2892 int jj;
2893@@ -532,7 +511,6 @@
2894 int max_segments) \
2895 { \
2896 int ret; \
2897- SANITY_CHECK(req, _CLUSTER, _DMA); \
2898 ret = __scsi_ ## _BACK_FRONT ## _merge_fn(q, \
2899 req, \
2900 bh, \
2901@@ -742,7 +720,6 @@
2902 int max_segments) \
2903 { \
2904 int ret; \
2905- SANITY_CHECK(req, _CLUSTER, _DMA); \
2906 ret = __scsi_merge_requests_fn(q, req, next, max_segments, _CLUSTER, _DMA); \
2907 return ret; \
2908 }
2909@@ -829,11 +806,7 @@
2910 /*
2911 * First we need to know how many scatter gather segments are needed.
2912 */
2913- if (!sg_count_valid) {
2914- count = __count_segments(req, use_clustering, dma_host, NULL);
2915- } else {
2916- count = req->nr_segments;
2917- }
2918+ count = __count_segments(req, use_clustering, dma_host, NULL);
2919
2920 /*
2921 * If the dma pool is nearly empty, then queue a minimal request
2922@@ -949,9 +922,7 @@
2923 */
2924 if (count != SCpnt->use_sg) {
2925 printk("Incorrect number of segments after building list\n");
2926-#ifdef CONFIG_SCSI_DEBUG_QUEUES
2927 dump_stats(req, use_clustering, dma_host, count);
2928-#endif
2929 }
2930 if (!dma_host) {
2931 return 1;
2932diff -uNr linux-2.4.18/drivers/scsi/sr.c pkt/drivers/scsi/sr.c
2933--- linux-2.4.18/drivers/scsi/sr.c Mon Feb 25 20:38:04 2002
2934+++ pkt/drivers/scsi/sr.c Mon Jul 29 09:10:30 2002
2935@@ -28,12 +28,16 @@
2936 * Modified by Jens Axboe <axboe@suse.de> - support DVD-RAM
2937 * transparently and loose the GHOST hack
2938 *
2939+ * Modified by Jens Axboe <axboe@suse.de> - support packet writing
2940+ * through generic packet layer.
2941+ *
2942 * Modified by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
2943 * check resource allocation in sr_init and some cleanups
2944 *
2945 */
2946
2947 #include <linux/module.h>
2948+#include <linux/config.h>
2949
2950 #include <linux/fs.h>
2951 #include <linux/kernel.h>
2952@@ -696,7 +700,7 @@
2953 cmd[2] = 0x2a;
2954 cmd[4] = 128;
2955 cmd[3] = cmd[5] = 0;
2956- rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL);
2957+ rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL, SR_TIMEOUT);
2958
2959 if (rc) {
2960 /* failed, drive doesn't have capabilities mode page */
2961@@ -728,16 +732,13 @@
2962 if ((buffer[n + 2] & 0x8) == 0)
2963 /* not a DVD drive */
2964 scsi_CDs[i].cdi.mask |= CDC_DVD;
2965- if ((buffer[n + 3] & 0x20) == 0) {
2966+ if ((buffer[n + 3] & 0x20) == 0)
2967 /* can't write DVD-RAM media */
2968 scsi_CDs[i].cdi.mask |= CDC_DVD_RAM;
2969- } else {
2970- scsi_CDs[i].device->writeable = 1;
2971- }
2972 if ((buffer[n + 3] & 0x10) == 0)
2973 /* can't write DVD-R media */
2974 scsi_CDs[i].cdi.mask |= CDC_DVD_R;
2975- if ((buffer[n + 3] & 0x2) == 0)
2976+ if ((buffer[n + 3] & 0x02) == 0)
2977 /* can't write CD-RW media */
2978 scsi_CDs[i].cdi.mask |= CDC_CD_RW;
2979 if ((buffer[n + 3] & 0x1) == 0)
2980@@ -757,6 +758,10 @@
2981 /*else I don't think it can close its tray
2982 scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
2983
2984+ if (~scsi_CDs[i].cdi.mask & (CDC_DVD_RAM | CDC_CD_RW))
2985+ /* can write to DVD-RAM or CD-RW */
2986+ scsi_CDs[i].device->writeable = 1;
2987+
2988 scsi_free(buffer, 512);
2989 }
2990
2991@@ -772,7 +777,10 @@
2992 if (device->scsi_level <= SCSI_2)
2993 cgc->cmd[1] |= device->lun << 5;
2994
2995- cgc->stat = sr_do_ioctl(MINOR(cdi->dev), cgc->cmd, cgc->buffer, cgc->buflen, cgc->quiet, cgc->data_direction, cgc->sense);
2996+ if (cgc->timeout <= 0)
2997+ cgc->timeout = 5 * HZ;
2998+
2999+ cgc->stat = sr_do_ioctl(MINOR(cdi->dev), cgc->cmd, cgc->buffer, cgc->buflen, cgc->quiet, cgc->data_direction, cgc->sense, cgc->timeout);
3000
3001 return cgc->stat;
3002 }
3003diff -uNr linux-2.4.18/drivers/scsi/sr.h pkt/drivers/scsi/sr.h
3004--- linux-2.4.18/drivers/scsi/sr.h Tue Jul 9 00:38:01 2002
3005+++ pkt/drivers/scsi/sr.h Mon Jul 29 09:10:30 2002
3006@@ -36,7 +36,7 @@
3007
3008 extern Scsi_CD *scsi_CDs;
3009
3010-int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int, struct request_sense *);
3011+int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int, struct request_sense *, int);
3012
3013 int sr_lock_door(struct cdrom_device_info *, int);
3014 int sr_tray_move(struct cdrom_device_info *, int);
3015diff -uNr linux-2.4.18/drivers/scsi/sr_ioctl.c pkt/drivers/scsi/sr_ioctl.c
3016--- linux-2.4.18/drivers/scsi/sr_ioctl.c Mon Oct 15 22:27:42 2001
3017+++ pkt/drivers/scsi/sr_ioctl.c Mon Jul 29 09:10:30 2002
3018@@ -68,14 +68,14 @@
3019 sr_cmd[6] = trk1_te.cdte_addr.msf.minute;
3020 sr_cmd[7] = trk1_te.cdte_addr.msf.second;
3021 sr_cmd[8] = trk1_te.cdte_addr.msf.frame;
3022- return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
3023+ return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
3024 }
3025
3026 /* We do our own retries because we want to know what the specific
3027 error code is. Normally the UNIT_ATTENTION code will automatically
3028 clear after one error */
3029
3030-int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite, struct request_sense *sense)
3031+int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite, struct request_sense *sense, int timeout)
3032 {
3033 Scsi_Request *SRpnt;
3034 Scsi_Device *SDev;
3035@@ -109,7 +109,7 @@
3036
3037
3038 scsi_wait_req(SRpnt, (void *) sr_cmd, (void *) buffer, buflength,
3039- IOCTL_TIMEOUT, IOCTL_RETRIES);
3040+ timeout, IOCTL_RETRIES);
3041
3042 req = &SRpnt->sr_request;
3043 if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
3044@@ -198,7 +198,7 @@
3045 sr_cmd[1] = (scsi_CDs[minor].device->scsi_level <= SCSI_2) ?
3046 ((scsi_CDs[minor].device->lun) << 5) : 0;
3047 sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
3048- return sr_do_ioctl(minor, sr_cmd, NULL, 0, 1, SCSI_DATA_NONE, NULL);
3049+ return sr_do_ioctl(minor, sr_cmd, NULL, 0, 1, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
3050 }
3051
3052 int sr_tray_move(struct cdrom_device_info *cdi, int pos)
3053@@ -211,7 +211,7 @@
3054 sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
3055 sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ;
3056
3057- return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
3058+ return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
3059 }
3060
3061 int sr_lock_door(struct cdrom_device_info *cdi, int lock)
3062@@ -289,7 +289,7 @@
3063 sr_cmd[8] = 24;
3064 sr_cmd[9] = 0;
3065
3066- result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ, NULL);
3067+ result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3068
3069 memcpy(mcn->medium_catalog_number, buffer + 9, 13);
3070 mcn->medium_catalog_number[13] = 0;
3071@@ -319,7 +319,7 @@
3072 sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
3073 sr_cmd[3] = speed & 0xff; /* LSB */
3074
3075- if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL))
3076+ if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT))
3077 return -EIO;
3078 return 0;
3079 }
3080@@ -349,7 +349,7 @@
3081 sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
3082 sr_cmd[8] = 12; /* LSB of length */
3083
3084- result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3085+ result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3086
3087 tochdr->cdth_trk0 = buffer[2];
3088 tochdr->cdth_trk1 = buffer[3];
3089@@ -369,7 +369,7 @@
3090 sr_cmd[6] = tocentry->cdte_track;
3091 sr_cmd[8] = 12; /* LSB of length */
3092
3093- result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ, NULL);
3094+ result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3095
3096 tocentry->cdte_ctrl = buffer[5] & 0xf;
3097 tocentry->cdte_adr = buffer[5] >> 4;
3098@@ -396,7 +396,7 @@
3099 sr_cmd[7] = ti->cdti_trk1;
3100 sr_cmd[8] = ti->cdti_ind1;
3101
3102- result = sr_do_ioctl(target, sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
3103+ result = sr_do_ioctl(target, sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
3104 if (result == -EDRIVE_CANT_DO_THIS)
3105 result = sr_fake_playtrkind(cdi, ti);
3106
3107@@ -462,7 +462,7 @@
3108 cmd[9] = 0x10;
3109 break;
3110 }
3111- return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL);
3112+ return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3113 }
3114
3115 /*
3116@@ -501,7 +501,7 @@
3117 cmd[4] = (unsigned char) (lba >> 8) & 0xff;
3118 cmd[5] = (unsigned char) lba & 0xff;
3119 cmd[8] = 1;
3120- rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL);
3121+ rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3122
3123 return rc;
3124 }
3125diff -uNr linux-2.4.18/drivers/scsi/sr_vendor.c pkt/drivers/scsi/sr_vendor.c
3126--- linux-2.4.18/drivers/scsi/sr_vendor.c Thu Jul 5 20:28:17 2001
3127+++ pkt/drivers/scsi/sr_vendor.c Mon Jul 29 09:10:30 2002
3128@@ -60,6 +60,8 @@
3129
3130 #define VENDOR_ID (scsi_CDs[minor].vendor)
3131
3132+#define VENDOR_TIMEOUT 30*HZ
3133+
3134 void sr_vendor_init(int minor)
3135 {
3136 #ifndef CONFIG_BLK_DEV_SR_VENDOR
3137@@ -134,7 +136,7 @@
3138 modesel->density = density;
3139 modesel->block_length_med = (blocklength >> 8) & 0xff;
3140 modesel->block_length_lo = blocklength & 0xff;
3141- if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE, NULL))) {
3142+ if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE, NULL, VENDOR_TIMEOUT))) {
3143 scsi_CDs[minor].device->sector_size = blocklength;
3144 }
3145 #ifdef DEBUG
3146@@ -179,7 +181,7 @@
3147 (scsi_CDs[minor].device->lun << 5) : 0;
3148 cmd[8] = 12;
3149 cmd[9] = 0x40;
3150- rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3151+ rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3152 if (rc != 0)
3153 break;
3154 if ((buffer[0] << 8) + buffer[1] < 0x0a) {
3155@@ -205,7 +207,7 @@
3156 (scsi_CDs[minor].device->lun << 5) : 0;
3157 cmd[1] |= 0x03;
3158 cmd[2] = 0xb0;
3159- rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ, NULL);
3160+ rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3161 if (rc != 0)
3162 break;
3163 if (buffer[14] != 0 && buffer[14] != 0xb0) {
3164@@ -231,7 +233,7 @@
3165 cmd[1] = (scsi_CDs[minor].device->scsi_level <= SCSI_2) ?
3166 (scsi_CDs[minor].device->lun << 5) : 0;
3167 cmd[1] |= 0x03;
3168- rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ, NULL);
3169+ rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3170 if (rc == -EINVAL) {
3171 printk(KERN_INFO "sr%d: Hmm, seems the drive "
3172 "doesn't support multisession CD's\n", minor);
3173@@ -257,7 +259,7 @@
3174 (scsi_CDs[minor].device->lun << 5) : 0;
3175 cmd[8] = 0x04;
3176 cmd[9] = 0x40;
3177- rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ, NULL);
3178+ rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3179 if (rc != 0) {
3180 break;
3181 }
3182@@ -272,7 +274,7 @@
3183 cmd[6] = rc & 0x7f; /* number of last session */
3184 cmd[8] = 0x0c;
3185 cmd[9] = 0x40;
3186- rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3187+ rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3188 if (rc != 0) {
3189 break;
3190 }
3191diff -uNr linux-2.4.18/include/linux/cdrom.h pkt/include/linux/cdrom.h
3192--- linux-2.4.18/include/linux/cdrom.h Tue Jul 9 00:40:52 2002
3193+++ pkt/include/linux/cdrom.h Mon Jul 29 09:03:07 2002
3194@@ -494,6 +494,7 @@
3195 /* Mode page codes for mode sense/set */
3196 #define GPMODE_R_W_ERROR_PAGE 0x01
3197 #define GPMODE_WRITE_PARMS_PAGE 0x05
3198+#define GPMODE_WCACHING_PAGE 0x08
3199 #define GPMODE_AUDIO_CTL_PAGE 0x0e
3200 #define GPMODE_POWER_PAGE 0x1a
3201 #define GPMODE_FAULT_FAIL_PAGE 0x1c
3202@@ -504,6 +505,11 @@
3203 * of MODE_SENSE_POWER_PAGE */
3204 #define GPMODE_CDROM_PAGE 0x0d
3205
3206+#define GPMODE_PAGE_CURRENT 0
3207+#define GPMODE_PAGE_CHANGE 1
3208+#define GPMODE_PAGE_DEFAULT 2
3209+#define GPMODE_PAGE_SAVE 3
3210+
3211
3212
3213 /* DVD struct types */
3214diff -uNr linux-2.4.18/include/linux/fs.h pkt/include/linux/fs.h
3215--- linux-2.4.18/include/linux/fs.h Tue Jul 9 00:51:20 2002
3216+++ pkt/include/linux/fs.h Mon Jul 29 10:02:28 2002
3217@@ -931,6 +931,7 @@
3218 int (*remount_fs) (struct super_block *, int *, char *);
3219 void (*clear_inode) (struct inode *);
3220 void (*umount_begin) (struct super_block *);
3221+ int (*relocate_blocks) (struct super_block *, unsigned long, unsigned long *);
3222
3223 /* Following are for knfsd to interact with "interesting" filesystems
3224 * Currently just reiserfs, but possibly FAT and others later
3225diff -uNr linux-2.4.18/include/linux/major.h pkt/include/linux/major.h
3226--- linux-2.4.18/include/linux/major.h Tue Sep 18 08:23:40 2001
3227+++ pkt/include/linux/major.h Mon Jul 29 10:02:28 2002
3228@@ -108,6 +108,8 @@
3229 #define SPECIALIX_NORMAL_MAJOR 75
3230 #define SPECIALIX_CALLOUT_MAJOR 76
3231
3232+#define PACKET_MAJOR 97
3233+
3234 #define COMPAQ_CISS_MAJOR 104
3235 #define COMPAQ_CISS_MAJOR1 105
3236 #define COMPAQ_CISS_MAJOR2 106
3237diff -uNr linux-2.4.18/include/linux/pktcdvd.h pkt/include/linux/pktcdvd.h
3238--- linux-2.4.18/include/linux/pktcdvd.h Thu Jan 1 01:00:00 1970
3239+++ pkt/include/linux/pktcdvd.h Mon Jul 29 09:11:41 2002
3240@@ -0,0 +1,212 @@
3241+/*
3242+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3243+ *
3244+ * May be copied or modified under the terms of the GNU General Public
3245+ * License. See linux/COPYING for more information.
3246+ *
3247+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
3248+ * DVD-RW devices.
3249+ *
3250+ */
3251+#ifndef __PKTCDVD_H
3252+#define __PKTCDVD_H
3253+
3254+/*
3255+ * 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
3256+ */
3257+#define PACKET_DEBUG 1
3258+
3259+#define MAX_WRITERS 8
3260+
3261+/*
3262+ * use drive write caching -- we need deferred error handling to be
3263+ * able to sucessfully recover with this option (drive will return good
3264+ * status as soon as the cdb is validated).
3265+ */
3266+#if defined(CONFIG_CDROM_PKTCDVD_WCACHE)
3267+#warning Enabling write caching, use at your own risk
3268+#define USE_WCACHING 1
3269+#else
3270+#define USE_WCACHING 0
3271+#endif
3272+
3273+/*
3274+ * No user-servicable parts beyond this point ->
3275+ */
3276+
3277+#if PACKET_DEBUG
3278+#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
3279+#else
3280+#define DPRINTK(fmt, args...)
3281+#endif
3282+
3283+#if PACKET_DEBUG > 1
3284+#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
3285+#else
3286+#define VPRINTK(fmt, args...)
3287+#endif
3288+
3289+#define PKT_BUF_LIST 0x89
3290+
3291+/*
3292+ * device types
3293+ */
3294+#define PACKET_CDR 1
3295+#define PACKET_CDRW 2
3296+#define PACKET_DVDR 3
3297+#define PACKET_DVDRW 4
3298+
3299+/*
3300+ * flags
3301+ */
3302+#define PACKET_WRITEABLE 1 /* pd is writeable */
3303+#define PACKET_NWA_VALID 2 /* next writeable address valid */
3304+#define PACKET_LRA_VALID 3 /* last recorded address valid */
3305+#define PACKET_READONLY 4 /* read only pd */
3306+#define PACKET_RECOVERY 5 /* rq recovery in progress */
3307+#define PACKET_RQ 6 /* current rq is set */
3308+#define PACKET_BUSY 7 /* current rq is being processed */
3309+
3310+/*
3311+ * Disc status -- from READ_DISC_INFO
3312+ */
3313+#define PACKET_DISC_EMPTY 0
3314+#define PACKET_DISC_INCOMPLETE 1
3315+#define PACKET_DISC_COMPLETE 2
3316+#define PACKET_DISC_OTHER 3
3317+
3318+/*
3319+ * write type, and corresponding data block type
3320+ */
3321+#define PACKET_MODE1 1
3322+#define PACKET_MODE2 2
3323+#define PACKET_BLOCK_MODE1 8
3324+#define PACKET_BLOCK_MODE2 10
3325+
3326+/*
3327+ * Last session/border status
3328+ */
3329+#define PACKET_SESSION_EMPTY 0
3330+#define PACKET_SESSION_INCOMPLETE 1
3331+#define PACKET_SESSION_RESERVED 2
3332+#define PACKET_SESSION_COMPLETE 3
3333+
3334+#define PACKET_MCN "4a656e734178626f65323030300000"
3335+
3336+#undef PACKET_USE_LS
3337+
3338+/*
3339+ * special requests
3340+ */
3341+#define PKT_THROTTLE_SPEED 1
3342+
3343+#define PKT_TRAY_UNLOCK 0
3344+#define PKT_TRAY_LOCK 1
3345+
3346+/*
3347+ * Very crude stats for now
3348+ */
3349+struct packet_stats
3350+{
3351+ unsigned long bh_s;
3352+ unsigned long bh_e;
3353+ unsigned long bh_cache_hits;
3354+ unsigned long page_cache_hits;
3355+ unsigned long secs_w;
3356+ unsigned long secs_r;
3357+};
3358+
3359+/*
3360+ * packet ioctls
3361+ */
3362+#define PACKET_IOCTL_MAGIC ('X')
3363+#define PACKET_GET_STATS _IOR(PACKET_IOCTL_MAGIC, 0, struct packet_stats)
3364+#define PACKET_SETUP_DEV _IOW(PACKET_IOCTL_MAGIC, 1, unsigned int)
3365+#define PACKET_TEARDOWN_DEV _IOW(PACKET_IOCTL_MAGIC, 2, unsigned int)
3366+
3367+#ifdef __KERNEL__
3368+#include <linux/blkdev.h>
3369+#include <linux/completion.h>
3370+
3371+struct packet_settings
3372+{
3373+ __u8 size; /* packet size in frames */
3374+ __u8 fp; /* fixed packets */
3375+ __u8 link_loss; /* the rest is specified
3376+ * as per Mt Fuji */
3377+ __u8 write_type;
3378+ __u8 track_mode;
3379+ __u8 block_mode;
3380+};
3381+
3382+struct packet_cdrw
3383+{
3384+ struct buffer_head *bhlist; /* string of bhs */
3385+ atomic_t free_bh;
3386+ merge_request_fn *front_merge_fn;
3387+ merge_request_fn *back_merge_fn;
3388+ merge_requests_fn *merge_requests_fn;
3389+ request_queue_t r_queue;
3390+ void *queuedata;
3391+ pid_t pid;
3392+ struct completion thr_compl;
3393+};
3394+
3395+struct pktcdvd_device
3396+{
3397+ struct block_device *bdev;
3398+ kdev_t dev; /* dev attached */
3399+ kdev_t pkt_dev; /* our dev */
3400+ char name[20];
3401+ struct cdrom_device_info *cdi; /* cdrom matching dev */
3402+ struct packet_settings settings;
3403+ struct packet_stats stats;
3404+ atomic_t refcnt;
3405+ __u8 speed; /* cur write speed */
3406+ unsigned long offset; /* start offset */
3407+ __u8 mode_offset; /* 0 / 8 */
3408+ __u8 type;
3409+ unsigned long flags;
3410+ __u8 disc_status;
3411+ __u8 track_status; /* last one */
3412+ __u32 nwa; /* next writable address */
3413+ __u32 lra; /* last recorded address */
3414+ spinlock_t lock;
3415+ struct packet_cdrw cdrw;
3416+ wait_queue_head_t wqueue;
3417+ struct request *rq;
3418+ atomic_t wrqcnt;
3419+ atomic_t bhcnt;
3420+
3421+ struct semaphore cache_sync_mutex;
3422+ int unflushed_writes;
3423+
3424+ make_request_fn *make_request_fn;
3425+};
3426+
3427+/*
3428+ * following possibly belongs in cdrom.h
3429+ */
3430+
3431+struct cdvd_capacity
3432+{
3433+ __u32 lba;
3434+ __u32 block_length;
3435+};
3436+
3437+void pkt_elevator_merge_req(struct request *rq, struct request *nxt) {}
3438+void pkt_elevator_cleanup(request_queue_t *q, struct request *rq, int count) {}
3439+
3440+#define ELEVATOR_PKTCDVD \
3441+((elevator_t) { \
3442+ 0, /* not used */ \
3443+ 0, /* not used */ \
3444+ \
3445+ pkt_elevator_merge, /* elevator_merge_fn */ \
3446+ pkt_elevator_cleanup, \
3447+ pkt_elevator_merge_req, \
3448+ })
3449+
3450+#endif /* __KERNEL__ */
3451+
3452+#endif /* __PKTCDVD_H */
This page took 0.74136 seconds and 4 git commands to generate.