]> git.pld-linux.org Git - packages/kernel.git/blame - kernel-cdrw-packet.patch
- added description of djurban's branch
[packages/kernel.git] / kernel-cdrw-packet.patch
CommitLineData
f87f0d90 1diff -u -r -N ../../linus/2.4/linux/Documentation/Configure.help linux/Documentation/Configure.help
2--- ../../linus/2.4/linux/Documentation/Configure.help Tue Aug 6 21:13:55 2002
3+++ linux/Documentation/Configure.help Tue Aug 6 21:21:50 2002
4@@ -694,6 +694,27 @@
e5ff5fa6 5 say M here and read <file:Documentation/modules.txt>. The module
6 will be called ide-cd.o.
7
8+
9+Packet writing on CD/DVD media (EXPERIMENTAL)
10+CONFIG_CDROM_PKTCDVD
11+ If you have a CDROM drive that supports packet writing, say Y to
12+ include preliminary support. It should work with any MMC/Mt Fuji
13+ complain ATAPI or SCSI drive, which is just about any newer CD
14+ writer.
15+
16+ Currently only writing to CD-RW discs is possible.
17+
18+ If you want to compile the driver as a module ( = code which can be
19+ inserted in and removed from the running kernel whenever you want),
20+ say M here and read Documentation/modules.txt. The module will be
21+ called pktcdvd.o
22+
23+Write caching
24+CONFIG_CDROM_PKTCDVD_WCACHE
25+ If enabled, write caching will be set for the CD-R/W device. For now
26+ this option is dangerous unless the CD-RW media is known good, as we
27+ don't do deferred write error handling yet.
28+
29 Include IDE/ATAPI TAPE support
30 CONFIG_BLK_DEV_IDETAPE
31 If you have an IDE tape drive using the ATAPI protocol, say Y.
f87f0d90 32diff -u -r -N ../../linus/2.4/linux/arch/sparc64/kernel/ioctl32.c linux/arch/sparc64/kernel/ioctl32.c
33--- ../../linus/2.4/linux/arch/sparc64/kernel/ioctl32.c Tue Aug 6 21:14:27 2002
34+++ linux/arch/sparc64/kernel/ioctl32.c Tue Aug 6 21:22:04 2002
35@@ -90,6 +90,7 @@
e5ff5fa6 36 #include <linux/atm_tcp.h>
37 #include <linux/sonet.h>
38 #include <linux/atm_suni.h>
39+#include <linux/pktcdvd.h>
40 #include <linux/mtd/mtd.h>
41
42 #include <net/bluetooth/bluetooth.h>
f87f0d90 43@@ -849,6 +850,41 @@
e5ff5fa6 44 return ret;
45 }
46
47+struct packet_stats32 {
48+ u32 bh_s;
49+ u32 bh_e;
50+ u32 bh_cache_hits;
51+ u32 page_cache_hits;
52+ u32 bh_w;
53+ u32 bh_r;
54+};
55+
56+static inline int pkt_getstats(unsigned int fd, unsigned int cmd, unsigned long arg)
57+{
58+ struct packet_stats p;
59+ struct packet_stats32 p32;
60+ mm_segment_t old_fs = get_fs();
61+ int ret;
62+
63+ ret = copy_from_user (&p32, (struct packet_stats32 *)arg, sizeof(struct packet_stats32));
64+ if (ret)
65+ return -EFAULT;
66+#define P(x) (p.x = (unsigned long)p32.x)
67+ P(bh_s);
68+ P(bh_e);
69+ P(bh_cache_hits);
70+ P(page_cache_hits);
71+ P(bh_w);
72+ P(bh_r);
73+#undef P
74+
75+ set_fs (KERNEL_DS);
76+ ret = sys_ioctl (fd, cmd, (long)&p);
77+ set_fs (old_fs);
78+
79+ return ret;
80+}
81+
82 struct hd_geometry32 {
83 unsigned char heads;
84 unsigned char sectors;
f87f0d90 85@@ -4553,6 +4589,12 @@
86 COMPATIBLE_IOCTL(RNDADDENTROPY)
87 COMPATIBLE_IOCTL(RNDZAPENTCNT)
88 COMPATIBLE_IOCTL(RNDCLEARPOOL)
e5ff5fa6 89+/* Big X, CDRW Packet Driver */
90+#if defined(CONFIG_CDROM_PKTCDVD)
91+COMPATIBLE_IOCTL(PACKET_SETUP_DEV)
92+COMPATIBLE_IOCTL(PACKET_TEARDOWN_DEV)
93+HANDLE_IOCTL(PACKET_GET_STATS, pkt_getstats)
94+#endif /* CONFIG_CDROM_PKTCDVD */
95 /* Bluetooth ioctls */
96 COMPATIBLE_IOCTL(HCIDEVUP)
97 COMPATIBLE_IOCTL(HCIDEVDOWN)
f87f0d90 98diff -u -r -N ../../linus/2.4/linux/drivers/block/Config.in linux/drivers/block/Config.in
99--- ../../linus/2.4/linux/drivers/block/Config.in Tue Aug 6 21:14:34 2002
100+++ linux/drivers/block/Config.in Tue Aug 6 21:22:08 2002
101@@ -39,6 +39,11 @@
e5ff5fa6 102 dep_tristate 'Mylex DAC960/DAC1100 PCI RAID Controller support' CONFIG_BLK_DEV_DAC960 $CONFIG_PCI
f87f0d90 103 dep_tristate 'Micro Memory MM5415 Battery Backed RAM support' CONFIG_BLK_DEV_UMEM $CONFIG_PCI $CONFIG_EXPERIMENTAL
e5ff5fa6 104
105+tristate 'Packet writing on CD/DVD media' CONFIG_CDROM_PKTCDVD
106+if [ "$CONFIG_CDROM_PKTCDVD" != "n" ]; then
e5ff5fa6 107+ bool ' Enable write caching' CONFIG_CDROM_PKTCDVD_WCACHE n
108+fi
109+
110 tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
111 dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
112
f87f0d90 113diff -u -r -N ../../linus/2.4/linux/drivers/block/Makefile linux/drivers/block/Makefile
114--- ../../linus/2.4/linux/drivers/block/Makefile Tue Aug 6 21:14:34 2002
115+++ linux/drivers/block/Makefile Tue Aug 6 21:22:08 2002
116@@ -31,6 +31,7 @@
e5ff5fa6 117 obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
f87f0d90 118 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
119 obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
e5ff5fa6 120+obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
121
f87f0d90 122 subdir-$(CONFIG_PARIDE) += paride
e5ff5fa6 123
f87f0d90 124diff -u -r -N ../../linus/2.4/linux/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c
125--- ../../linus/2.4/linux/drivers/block/ll_rw_blk.c Tue Aug 6 21:14:34 2002
126+++ linux/drivers/block/ll_rw_blk.c Tue Aug 6 21:22:08 2002
127@@ -1046,6 +1046,7 @@
e5ff5fa6 128 /* Test device size, when known. */
129 if (blk_size[major])
130 minorsize = blk_size[major][MINOR(bh->b_rdev)];
131+#if 0
132 if (minorsize) {
133 unsigned long maxsector = (minorsize << 1) + 1;
134 unsigned long sector = bh->b_rsector;
f87f0d90 135@@ -1069,6 +1070,7 @@
e5ff5fa6 136 return;
137 }
138 }
139+#endif
140
141 /*
142 * Resolve the mapping until finished. (drivers are
f87f0d90 143@@ -1270,8 +1272,8 @@
144
145 req->errors = 0;
146 if (!uptodate)
147- printk("end_request: I/O error, dev %s (%s), sector %lu\n",
148- kdevname(req->rq_dev), name, req->sector);
149+ printk("end_request: I/O error, cmd %d dev %s (%s), sector %lu\n",
150+ req->cmd, kdevname(req->rq_dev), name, req->sector);
151
152 if ((bh = req->bh) != NULL) {
153 nsect = bh->b_size >> 9;
154diff -u -r -N ../../linus/2.4/linux/drivers/block/pktcdvd.c linux/drivers/block/pktcdvd.c
155--- ../../linus/2.4/linux/drivers/block/pktcdvd.c Thu Jan 1 01:00:00 1970
156+++ linux/drivers/block/pktcdvd.c Thu Aug 8 20:44:32 2002
157@@ -0,0 +1,2524 @@
e5ff5fa6 158+/*
159+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
160+ *
161+ * May be copied or modified under the terms of the GNU General Public
162+ * License. See linux/COPYING for more information.
163+ *
164+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
165+ * DVD-RW devices (aka an exercise in block layer masturbation)
166+ *
167+ *
168+ * TODO: (circa order of when I will fix it)
169+ * - Only able to write on CD-RW media right now.
170+ * - check host application code on media and set it in write page
171+ * - Generic interface for UDF to submit large packets for variable length
172+ * packet writing
173+ * - interface for UDF <-> packet to negotiate a new location when a write
174+ * fails.
175+ * - handle OPC, especially for -RW media
176+ *
177+ * ------------------------------------------------------------------------
178+ *
179+ * Newer changes -- see ChangeLog
180+ *
181+ * 0.0.2d (26/10/2000)
182+ * - (scsi) use implicit segment recounting for all hba's
183+ * - fix speed setting, was consistenly off on most drives
184+ * - only print capacity when opening for write
185+ * - fix off-by-two error in getting/setting write+read speed (affected
186+ * reporting as well as actual speed used)
187+ * - possible to enable write caching on drive
188+ * - do ioctl marshalling on sparc64 from Ben Collins <bcollins@debian.org>
189+ * - avoid unaligned access on flags, should have been unsigned long of course
190+ * - fixed missed wakeup in kpacketd
191+ * - b_dev error (two places)
192+ * - fix buffer head b_count bugs
193+ * - fix hole merge bug, where tail could be added twice
194+ * - fsync and invalidate buffers on close
195+ * - check hash table for buffers first before using our own
196+ * - add read-ahead
197+ * - fixed several list races
198+ * - fix proc reporting for more than one device
199+ * - change to O_CREAT for creating devices
200+ * - added media_change hook
201+ * - added free buffers config option
202+ * - pkt_lock_tray fails on failed open (and oopses), remove it. unlock
203+ * is done explicitly in pkt_remove dev anyway.
204+ * - added proper elevator insertion (should probably be part of elevator.c)
205+ * - moved kernel thread info to private device, spawn one for each writer
206+ * - added separate buffer list for dirty packet buffers
207+ * - fixed nasty data corruption bug
208+ * - remember to account request even when we don't gather data for it
209+ * - add ioctl to force wakeup of kernel thread (for debug)
210+ * - fixed packet size setting bug on zero detected
211+ * - changed a lot of the proc reporting to be more readable to "humans"
212+ * - set full speed for read-only opens
213+ *
214+ * 0.0.2c (08/09/2000)
215+ * - inc usage count of buffer heads
216+ * - add internal buffer pool to avoid deadlock on oom
217+ * - gather data for as many buffers as we have, before initiating write. this
218+ * allows the laser to stay on longer, giving better performance.
219+ * - fix always busy when tray can't be locked
220+ * - remove request duplication nastiness, inject directly into the target
221+ * - adapted to devfs and elevator changes
222+ * - added proc interface
223+ *
224+ * 0.0.2b (21/06/2000)
225+ * - fix io_request_lock typos (missing '&')
226+ * - grab pkt_sem before invoking pkt_handle_queue
227+ * - SCSI uses queuedata too, mirror that in pd->queuedata (hack)
228+ * - remove SCSI sr debug messages
229+ * - really activate empty block querying (requires cvs UDF, CDRW branch)
230+ * - make sure sync_buffers doesn't consider us, or we can deadlock
231+ * - make sure people don't swap on us (for now ;)
232+ *
233+ * 0.0.2a (19/06/2000)
234+ * - add kpacketd kernel thread to handle actual data gathering
235+ * - pd->pkt_dev is now real device, not just minor
236+ * - add support for super_operations block_empty fn, to query fs for
237+ * unused blocks that don't need reading
238+ * - "cache" blocks that are contained in the UDF file/dir packet
239+ * - rewrite pkt_gather_data to a one-step solution
240+ * - add private pktcdvd elevator
241+ * - shutdown write access to device upon write failure
242+ * - fix off-by-one bug in capacity
243+ * - setup sourceforge project (packet-cd.sourceforge.net)
244+ * - add more blk ioctls to pkt_ioctl
245+ * - set inactive request queue head
246+ * - change panic calls to BUG, better with kdb
247+ * - have pkt_gather_data check correct block size and kill rq if wrong
248+ * - rework locking
249+ * - introduce per-pd queues, simplifies pkt_request
250+ * - store pd in queuedata
251+ *
252+ *************************************************************************/
253+
254+#define VERSION_CODE "v0.0.2p 03/03/2002 Jens Axboe (axboe@suse.de)"
255+
256+#include <linux/config.h>
257+#include <linux/module.h>
258+#include <linux/types.h>
259+#include <linux/kernel.h>
260+#include <linux/slab.h>
261+#include <linux/errno.h>
262+#include <linux/delay.h>
263+#include <linux/locks.h>
264+#include <linux/spinlock.h>
265+#include <linux/interrupt.h>
266+#include <linux/file.h>
267+#include <linux/blk.h>
268+#include <linux/blkpg.h>
269+#include <linux/cdrom.h>
270+#include <linux/ide.h>
271+#include <linux/smp_lock.h>
272+#include <linux/pktcdvd.h>
273+#include <linux/kernel_stat.h>
274+#include <linux/sysrq.h>
275+
276+#include <asm/unaligned.h>
277+#include <asm/uaccess.h>
278+
279+/*
280+ * remove for next version -- for now, disable the mention option in the
281+ * SCSI section
282+ */
283+#if defined(CONFIG_SCSI_DEBUG_QUEUES)
284+#error "Don't compile with 'Enable extra checks in new queueing code' enabled"
285+#endif
286+
287+#define SCSI_IOCTL_SEND_COMMAND 1
288+
289+/*
290+ * 32 buffers of 2048 bytes
291+ */
292+#define PACKET_MAX_SIZE 32
293+
294+#define NEXT_BH(bh, nbh) \
295+ (((bh)->b_rsector + ((bh)->b_size >> 9)) == (nbh)->b_rsector)
296+
297+#define BH_IN_ORDER(b1, b2) \
298+ ((b1)->b_rsector < (b2)->b_rsector)
299+
300+#define CONTIG_BH(b1, b2) \
301+ ((b1)->b_data + (b1)->b_size == (b2)->b_data)
302+
303+#define ZONE(sector, pd) \
304+ (((sector) + ((pd)->offset)) - (((sector) + ((pd)->offset)) & (((pd)->settings.size - 1))))
305+
306+static int *pkt_sizes;
307+static int *pkt_blksize;
308+static int *pkt_readahead;
309+static struct pktcdvd_device *pkt_devs;
310+static struct proc_dir_entry *pkt_proc;
311+static DECLARE_WAIT_QUEUE_HEAD(pd_bh_wait);
312+
313+/*
314+ * a bit of a kludge, but we want to be able to pass both real and packet
315+ * dev and get the right one.
316+ */
317+static inline struct pktcdvd_device *pkt_find_dev(kdev_t dev)
318+{
319+ int i;
320+
321+ for (i = 0; i < MAX_WRITERS; i++)
322+ if (pkt_devs[i].dev == dev || pkt_devs[i].pkt_dev == dev)
323+ return &pkt_devs[i];
324+
325+ return NULL;
326+}
327+
328+/*
329+ * The following functions are the plugins to the ll_rw_blk
330+ * layer and decides whether a given request / buffer head can be
331+ * merged. We differ in a couple of ways from "normal" block
332+ * devices:
333+ *
334+ * - don't merge when the buffer / request crosses a packet block
335+ * boundary
336+ * - merge buffer head even though it can't be added directly to the
337+ * front or back of the list. this gives us better performance, since
338+ * what would otherwise require multiple requests can now be handled
339+ * in one (hole merging)
340+ * - at this point its just writes, reads have already been remapped
341+ *
342+ * The device original merge_ functions are stored in the packet device
343+ * queue (pd->q)
344+ *
345+ */
346+static inline int pkt_do_merge(request_queue_t *q, struct request *rq,
347+ struct buffer_head *bh, int max_segs,
348+ merge_request_fn *merge_fn,
349+ struct pktcdvd_device *pd)
350+{
351+ void *ptr = q->queuedata;
352+ int ret;
353+
354+ if (rq->cmd != WRITE)
355+ BUG();
356+
357+ if (ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd))
358+ return ELEVATOR_NO_MERGE;
359+
360+ /*
361+ * NOTE: this is done under the io_request_lock/queue_lock, hence
362+ * it is safe
363+ */
364+ q->queuedata = pd->cdrw.queuedata;
365+ ret = merge_fn(q, rq, bh, max_segs);
366+ q->queuedata = ptr;
367+ return ret;
368+}
369+
370+static int pkt_front_merge_fn(request_queue_t *q, struct request *rq,
371+ struct buffer_head *bh, int max_segs)
372+{
373+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
374+
375+ return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.front_merge_fn, pd);
376+}
377+
378+static int pkt_back_merge_fn(request_queue_t *q, struct request *rq,
379+ struct buffer_head *bh, int max_segs)
380+{
381+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
382+
383+ return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.back_merge_fn, pd);
384+}
385+
386+/*
387+ * rules similar to above
388+ */
389+static int pkt_merge_requests_fn(request_queue_t *q, struct request *rq,
390+ struct request *nxt, int max_segs)
391+{
392+ struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev);
393+ struct packet_cdrw *cdrw = &pd->cdrw;
394+ void *ptr = q->queuedata;
395+ int ret;
396+
397+ if (ZONE(rq->sector, pd) != ZONE(nxt->sector + nxt->nr_sectors - 1, pd))
398+ return 0;
399+
400+ q->queuedata = cdrw->queuedata;
401+ ret = cdrw->merge_requests_fn(q, rq, nxt, max_segs);
402+ q->queuedata = ptr;
403+ return ret;
404+}
405+
406+static int pkt_grow_bhlist(struct pktcdvd_device *pd, int count)
407+{
408+ struct packet_cdrw *cdrw = &pd->cdrw;
409+ struct buffer_head *bh;
410+ int i = 0;
411+
412+ VPRINTK("grow_bhlist: count=%d\n", count);
413+
414+ while (i < count) {
415+ bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL);
416+ if (!bh)
417+ break;
418+
419+ bh->b_data = kmalloc(CD_FRAMESIZE, GFP_KERNEL);
420+ if (!bh->b_data) {
421+ kmem_cache_free(bh_cachep, bh);
422+ break;
423+ }
424+ bh->b_page = virt_to_page(bh->b_data);
425+
426+ spin_lock_irq(&pd->lock);
427+ bh->b_pprev = &cdrw->bhlist;
428+ bh->b_next = cdrw->bhlist;
429+ cdrw->bhlist = bh;
430+ spin_unlock_irq(&pd->lock);
431+
432+ bh->b_size = CD_FRAMESIZE;
433+ bh->b_list = PKT_BUF_LIST;
434+ atomic_inc(&cdrw->free_bh);
435+ i++;
436+ }
437+
438+ return i;
439+}
440+
441+static int pkt_shrink_bhlist(struct pktcdvd_device *pd, int count)
442+{
443+ struct packet_cdrw *cdrw = &pd->cdrw;
444+ struct buffer_head *bh;
445+ int i = 0;
446+
447+ VPRINTK("shrink_bhlist: count=%d\n", count);
448+
449+ while ((i < count) && cdrw->bhlist) {
450+ spin_lock_irq(&pd->lock);
451+ bh = cdrw->bhlist;
452+ cdrw->bhlist = bh->b_next;
453+ spin_unlock_irq(&pd->lock);
454+ if (bh->b_list != PKT_BUF_LIST)
455+ BUG();
456+ kfree(bh->b_data);
457+ kmem_cache_free(bh_cachep, bh);
458+ atomic_dec(&cdrw->free_bh);
459+ i++;
460+ }
461+
462+ return i;
463+}
464+
f87f0d90 465+/*
466+ * These functions manage a simple pool of buffer_heads.
467+ */
468+static struct buffer_head *pkt_get_stacked_bh(struct pktcdvd_device *pd)
469+{
470+ unsigned long flags;
471+ struct buffer_head *bh;
472+
473+ spin_lock_irqsave(&pd->lock, flags);
474+ bh = pd->stacked_bhlist;
475+ if (bh) {
476+ pd->stacked_bhlist = bh->b_next;
477+ bh->b_next = NULL;
478+ pd->stacked_bhcnt--;
479+ BUG_ON(pd->stacked_bhcnt < 0);
480+ }
481+ spin_unlock_irqrestore(&pd->lock, flags);
482+
483+ return bh;
484+}
485+
486+static void pkt_put_stacked_bh(struct pktcdvd_device *pd, struct buffer_head *bh)
487+{
488+ unsigned long flags;
489+
490+ spin_lock_irqsave(&pd->lock, flags);
491+ if (pd->stacked_bhcnt < STACKED_BH_POOL_SIZE) {
492+ bh->b_next = pd->stacked_bhlist;
493+ pd->stacked_bhlist = bh;
494+ pd->stacked_bhcnt++;
495+ bh = NULL;
496+ }
497+ spin_unlock_irqrestore(&pd->lock, flags);
498+ if (bh) {
499+ kmem_cache_free(bh_cachep, bh);
500+ }
501+}
502+
503+static void pkt_shrink_stacked_bhlist(struct pktcdvd_device *pd)
504+{
505+ struct buffer_head *bh;
506+
507+ while ((bh = pkt_get_stacked_bh(pd)) != NULL) {
508+ kmem_cache_free(bh_cachep, bh);
509+ }
510+}
511+
512+static int pkt_grow_stacked_bhlist(struct pktcdvd_device *pd)
513+{
514+ struct buffer_head *bh;
515+ int i;
516+
517+ for (i = 0; i < STACKED_BH_POOL_SIZE; i++) {
518+ bh = kmem_cache_alloc(bh_cachep, GFP_KERNEL);
519+ if (!bh) {
520+ pkt_shrink_stacked_bhlist(pd);
521+ return 0;
522+ }
523+ pkt_put_stacked_bh(pd, bh);
524+ }
525+ return 1;
526+}
527+
528+
e5ff5fa6 529+static request_queue_t *pkt_get_queue(kdev_t dev)
530+{
531+ struct pktcdvd_device *pd = pkt_find_dev(dev);
532+ if (!pd)
533+ return NULL;
534+ return &pd->cdrw.r_queue;
535+}
536+
537+static void pkt_put_buffer(struct buffer_head *bh)
538+{
539+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_dev)];
540+ unsigned long flags;
541+
542+ if (bh->b_list != PKT_BUF_LIST)
543+ return;
544+
545+ bh->b_state = 0;
546+ bh->b_reqnext = NULL;
547+ bh->b_end_io = NULL;
548+
549+ spin_lock_irqsave(&pd->lock, flags);
550+ bh->b_next = pd->cdrw.bhlist;
551+ pd->cdrw.bhlist = bh;
552+ spin_unlock_irqrestore(&pd->lock, flags);
553+ atomic_inc(&pd->cdrw.free_bh);
554+}
555+
556+static inline void __pkt_inject_request(request_queue_t *q, struct request *rq)
557+{
558+ struct list_head *head = &q->queue_head;
559+
560+ VPRINTK("__pkt_inject_request: list_empty == %d, size=%d, cmd=%d\n",
561+ list_empty(&q->queue_head), rq->bh->b_size >> 9, rq->cmd);
562+
563+ if (list_empty(&q->queue_head))
564+ q->plug_device_fn(q, rq->rq_dev);
565+
566+ list_add_tail(&rq->queue, head);
567+}
568+
569+static void pkt_inject_request(request_queue_t *q, struct request *rq)
570+{
571+ spin_lock_irq(&io_request_lock);
572+ __pkt_inject_request(q, rq);
573+ spin_unlock_irq(&io_request_lock);
574+}
575+
576+static inline void __pkt_end_request(struct pktcdvd_device *pd)
577+{
578+ pd->rq = NULL;
579+ clear_bit(PACKET_RQ, &pd->flags);
580+ clear_bit(PACKET_BUSY, &pd->flags);
581+}
582+
583+/*
584+ * io_request_lock must be held and interrupts disabled
585+ */
586+static void pkt_end_request(struct pktcdvd_device *pd)
587+{
588+ unsigned long flags;
589+
590+ spin_lock_irqsave(&pd->lock, flags);
591+ __pkt_end_request(pd);
592+ spin_unlock_irqrestore(&pd->lock, flags);
593+}
594+
595+
596+static inline void __pkt_kill_request(struct request *rq, int uptodate, char *name)
597+{
598+ struct buffer_head *bh = rq->bh, *nbh;
599+
600+ while (bh) {
601+ nbh = bh->b_reqnext;
602+ bh->b_reqnext = NULL;
603+
604+ if (bh->b_end_io) {
605+ bh->b_end_io(bh, uptodate);
606+ } else {
607+ mark_buffer_clean(bh);
608+ mark_buffer_uptodate(bh, uptodate);
609+ unlock_buffer(bh);
610+ }
611+
612+ bh = nbh;
613+ }
614+
615+ end_that_request_last(rq);
616+}
617+
618+
619+void pkt_kill_request(struct pktcdvd_device *pd, struct request *rq, int ok)
620+{
621+ printk("pktcdvd: killing request\n");
622+ spin_lock_irq(&io_request_lock);
623+ __pkt_kill_request(rq, ok, pd->name);
624+ spin_unlock_irq(&io_request_lock);
625+ pkt_end_request(pd);
626+}
627+
628+static void pkt_end_io_read(struct buffer_head *bh, int uptodate)
629+{
630+ if (!uptodate) {
631+ /* Obviously not correct, but it avoids locking up the kernel */
632+ printk("Ignoring read error on sector:%ld\n", bh->b_rsector);
633+ uptodate = 1;
634+ }
635+
636+ mark_buffer_uptodate(bh, uptodate);
637+ unlock_buffer(bh);
638+}
639+
e5ff5fa6 640+/*
641+ * if the buffer is already in the buffer cache, grab it if we can lock
642+ * it down
643+ */
644+static struct buffer_head *pkt_get_hash(kdev_t dev, unsigned long block, int size)
645+{
646+ struct buffer_head *bh = NULL;
647+
648+ bh = get_hash_table(dev, block, size);
649+ if (bh) {
650+ if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
651+ brelse(bh);
652+ if (atomic_set_buffer_clean(bh))
653+ refile_buffer(bh);
654+ SetPageReferenced(bh->b_page);
655+ } else {
656+ brelse(bh);
657+ bh = NULL;
658+ }
659+ }
660+
661+ return bh;
662+}
663+
664+static inline struct buffer_head *__pkt_get_buffer(struct pktcdvd_device *pd,
665+ unsigned long sector)
666+{
667+ struct buffer_head *bh;
668+
669+ if (!atomic_read(&pd->cdrw.free_bh))
670+ BUG();
671+
672+ atomic_dec(&pd->cdrw.free_bh);
673+
674+ spin_lock_irq(&pd->lock);
675+ bh = pd->cdrw.bhlist;
676+ pd->cdrw.bhlist = bh->b_next;
677+ bh->b_next = NULL;
678+ spin_unlock_irq(&pd->lock);
679+
680+ bh->b_next_free = NULL;
681+ bh->b_prev_free = NULL;
682+ bh->b_this_page = NULL;
683+ bh->b_pprev = NULL;
684+ bh->b_reqnext = NULL;
685+
686+ init_waitqueue_head(&bh->b_wait);
687+ atomic_set(&bh->b_count, 1);
688+ bh->b_list = PKT_BUF_LIST;
689+ bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req);
690+ bh->b_dev = pd->pkt_dev;
691+
692+ return bh;
693+}
694+
695+static void pkt_end_io_write(struct buffer_head *, int);
696+
697+static struct buffer_head *pkt_get_buffer(struct pktcdvd_device *pd,
698+ unsigned long sector, int size)
699+{
700+ unsigned long block = sector / (size >> 9);
701+ struct buffer_head *bh;
702+
703+ VPRINTK("get_buffer: sector=%ld, size=%d\n", sector, size);
704+
705+ bh = pkt_get_hash(pd->pkt_dev, block, size);
706+ if (bh)
707+ pd->stats.bh_cache_hits += (size >> 9);
708+ else
709+ bh = __pkt_get_buffer(pd, sector);
710+
711+ blk_started_io(bh->b_size >> 9);
712+ bh->b_blocknr = block;
713+ bh->b_end_io = pkt_end_io_write;
714+ bh->b_rsector = sector;
715+ bh->b_rdev = pd->dev;
716+ return bh;
717+}
718+
719+/*
720+ * this rq is done -- io_request_lock must be held and interrupts disabled
721+ */
722+static void pkt_rq_end_io(struct pktcdvd_device *pd)
723+{
724+ unsigned long flags;
725+
726+ VPRINTK("pkt_rq_end_io: rq=%p, cmd=%d, q=%p\n", pd->rq, pd->rq->cmd, pd->rq->q);
727+
728+ spin_lock_irqsave(&pd->lock, flags);
729+
730+ /*
731+ * debug checks
732+ */
733+ if (!test_bit(PACKET_RQ, &pd->flags))
734+ printk("pktcdvd: rq_end_io: RQ not set\n");
735+ if (!test_bit(PACKET_BUSY, &pd->flags))
736+ printk("pktcdvd: rq_end_io: BUSY not set\n");
737+
738+ __pkt_end_request(pd);
739+ wake_up(&pd->wqueue);
740+ spin_unlock_irqrestore(&pd->lock, flags);
741+}
742+
743+static inline void pkt_mark_readonly(struct pktcdvd_device *pd, int on)
744+{
745+ if (on)
746+ set_bit(PACKET_READONLY, &pd->flags);
747+ else
748+ clear_bit(PACKET_READONLY, &pd->flags);
749+}
750+
751+static inline void __pkt_end_io_write(struct pktcdvd_device *pd,
752+ struct buffer_head *bh, int uptodate)
753+{
754+ VPRINTK("end_io_write: bh=%ld, uptodate=%d\n", bh->b_blocknr, uptodate);
755+
756+ /*
757+ * general Linux bug, noone should clear the BH_Uptodate flag for
758+ * a failed write...
759+ */
760+ if (uptodate)
761+ mark_buffer_uptodate(bh, uptodate);
762+ else {
763+ printk("pktcdvd: %s: WRITE error sector %lu\n", pd->name, bh->b_rsector);
764+#if 0
765+ set_bit(PACKET_RECOVERY, &pd->flags);
766+ wake_up(&pd->wqueue);
767+#endif
768+ }
769+
770+ pd->stats.bh_e++;
771+
772+ atomic_dec(&pd->wrqcnt);
773+ if (atomic_read(&pd->wrqcnt) == 0) {
774+ pkt_rq_end_io(pd);
775+ }
776+
777+ unlock_buffer(bh);
778+}
779+
780+/*
781+ * we use this as our default b_end_io handler, since we need to take
782+ * the entire request off the list if just one of the clusters fail.
783+ * later on this should also talk to UDF about relocating blocks -- for
784+ * now we just drop the rq entirely. when doing the relocating we must also
785+ * lock the bh down to ensure that we can easily reconstruct the write should
786+ * it fail.
787+ */
788+static void pkt_end_io_write(struct buffer_head *bh, int uptodate)
789+{
790+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
791+
792+ __pkt_end_io_write(pd, bh, uptodate);
793+ pkt_put_buffer(bh);
794+}
795+
796+static void pkt_end_io_write_stacked(struct buffer_head *bh, int uptodate)
797+{
798+ struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
799+ struct buffer_head *rbh = bh->b_private;
800+
801+ __pkt_end_io_write(pd, bh, uptodate);
802+ rbh->b_end_io(rbh, uptodate);
f87f0d90 803+ pkt_put_stacked_bh(pd, bh);
804+ wake_up(&pd_bh_wait);
e5ff5fa6 805+}
806+
807+static int pkt_init_rq(struct pktcdvd_device *pd, struct request *rq)
808+{
809+ struct buffer_head *bh;
810+ unsigned int cnt, nr_segments;
811+
812+ cnt = 0;
813+ nr_segments = 1;
814+ bh = rq->bh;
815+ while (bh) {
816+ struct buffer_head *nbh = bh->b_reqnext;
817+
818+ bh->b_rdev = pd->pkt_dev;
819+
820+ /*
821+ * the buffer better be uptodate, mapped, and locked!
822+ */
823+ if (!buffer_uptodate(bh)) {
824+ printk("%lu not uptodate\n", bh->b_rsector);
825+ /*
826+ * It is not really the pktcdvd drivers problem if
827+ * someone wants to write stale data.
828+ */
829+ }
830+
831+ if (!buffer_locked(bh) || !buffer_mapped(bh)) {
832+ printk("%lu, state %lx\n", bh->b_rsector, bh->b_state);
833+ BUG();
834+ }
835+
836+ if (nbh) {
837+ if (!CONTIG_BH(bh, nbh))
838+ nr_segments++;
839+
840+ /*
841+ * if this happens, do report
842+ */
843+ if ((bh->b_rsector + (bh->b_size >> 9))!=nbh->b_rsector) {
844+ printk("%lu (%p)-> %lu (%p) (%lu in all)\n",
845+ bh->b_rsector, bh, nbh->b_rsector, nbh,
846+ rq->nr_sectors);
847+ return 1;
848+ }
849+ }
850+
851+ cnt += bh->b_size >> 9;
852+ bh = nbh;
853+ }
854+
855+ rq->nr_segments = rq->nr_hw_segments = nr_segments;
856+
857+ if (cnt != rq->nr_sectors) {
858+ printk("botched request %u (%lu)\n", cnt, rq->nr_sectors);
859+ return 1;
860+ }
861+
862+ return 0;
863+}
864+
865+/*
866+ * really crude stats for now...
867+ */
868+static void pkt_account_rq(struct pktcdvd_device *pd, int read, int written,
869+ int bs)
870+{
871+ pd->stats.bh_s += (written / bs);
872+ pd->stats.secs_w += written;
873+ pd->stats.secs_r += read;
874+}
875+
876+/*
877+ * does request span two packets? 0 == yes, 1 == no
878+ */
879+static int pkt_one_zone(struct pktcdvd_device *pd, struct request *rq)
880+{
881+ if (!pd->settings.size)
882+ return 0;
883+
884+ if (!(rq->cmd & WRITE))
885+ return 1;
886+
887+ return ZONE(rq->sector, pd) == ZONE(rq->sector + rq->nr_sectors -1, pd);
888+}
889+
890+#if defined(CONFIG_CDROM_PKTCDVD_BEMPTY)
891+static void pkt_init_buffer(struct buffer_head *bh)
892+{
893+ set_bit(BH_Uptodate, &bh->b_state);
894+ set_bit(BH_Dirty, &bh->b_state);
895+ memset(bh->b_data, 0, bh->b_size);
896+}
897+
898+static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
899+{
900+ struct super_block *sb;
901+ struct super_operations *sop;
902+ unsigned long packet;
903+ int ret;
904+
905+ ret = 0;
906+ if ((sb = get_super(pd->pkt_dev)) == NULL)
907+ goto out;
908+ if ((sop = sb->s_op) == NULL)
909+ goto out;
910+ if (sop->block_empty == NULL)
911+ goto out;
912+
913+ packet = 0;
914+ if (sop->block_empty(sb, bh->b_blocknr, &packet)) {
915+ pkt_init_buffer(pd, bh);
916+ ret = 1;
917+ }
918+
919+out:
920+ return ret;
921+}
922+
923+#else /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */
924+
925+static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
926+{
927+ return 0;
928+}
929+
930+#endif /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */
931+
932+static int pkt_flush_cache(struct pktcdvd_device *pd);
933+
934+static void pkt_flush_writes(struct pktcdvd_device *pd)
935+{
936+ if (pd->unflushed_writes) {
937+ pd->unflushed_writes = 0;
938+ pkt_flush_cache(pd);
939+ }
940+}
941+
942+/*
943+ * basically just does a ll_rw_block for the bhs given to use, but we
944+ * don't return until we have them.
945+ */
946+static void pkt_read_bh(struct pktcdvd_device *pd, struct buffer_head *bh)
947+{
948+ /*
949+ * UDF says it's empty, woohoo
950+ */
951+ if (pkt_sb_empty(pd, bh))
952+ return;
953+
954+ down(&pd->cache_sync_mutex);
955+ pkt_flush_writes(pd);
956+ generic_make_request(READ, bh);
957+ up(&pd->cache_sync_mutex);
958+}
959+
960+static int pkt_index_bhs(struct buffer_head **bhs)
961+{
962+ struct buffer_head *bh;
963+ int index;
964+ int error = 0;
965+
966+ /*
967+ * now finish pending reads and connect the chain of buffers
968+ */
969+ index = 0;
970+ while (index < PACKET_MAX_SIZE) {
971+ bh = bhs[index];
972+
973+ /*
974+ * pin down private buffers (ie, force I/O to complete)
975+ */
976+ if (bh->b_end_io == pkt_end_io_read) {
977+ lock_buffer(bh);
978+ bh->b_end_io = pkt_end_io_write;
979+ }
980+
981+ if (!buffer_locked(bh))
982+ BUG();
983+
984+ if (!buffer_uptodate(bh)) {
985+ printk("pktcdvd: read failure (%s, sec %lu)\n",
986+ kdevname(bh->b_rdev), bh->b_rsector);
987+ error = 1;
988+ }
989+
990+ /*
991+ * attach previous
992+ */
993+ if (index) {
994+ struct buffer_head *pbh = bhs[index - 1];
995+
996+ if ((pbh->b_rsector + (pbh->b_size >> 9)) != bh->b_rsector) {
997+ printk("%lu -> %lu\n", pbh->b_rsector, bh->b_rsector);
998+ index = 0;
999+ break;
1000+ }
1001+ pbh->b_reqnext = bh;
1002+ }
1003+ index++;
1004+ }
1005+
1006+ if (error)
1007+ return 0;
1008+
1009+ if (index) {
1010+ index--;
1011+ bhs[index]->b_reqnext = NULL;
1012+ }
1013+
1014+ return index;
1015+}
1016+
1017+/*
1018+ * fill in the holes of a request
1019+ *
1020+ * Returns: 0, keep 'em coming -- 1, stop queueing
1021+ */
1022+static int pkt_gather_data(struct pktcdvd_device *pd, struct request *rq)
1023+{
1024+ unsigned long start_s, end_s, sector;
1025+ struct buffer_head *bh;
1026+ unsigned int sectors, index;
1027+ struct buffer_head *bhs[PACKET_MAX_SIZE];
1028+
1029+ memset(bhs, 0, sizeof(bhs));
1030+
1031+ /*
1032+ * all calculations are done with 512 byte sectors
1033+ */
1034+ sectors = pd->settings.size - rq->nr_sectors;
1035+ start_s = rq->sector - (rq->sector & (pd->settings.size - 1));
1036+ end_s = start_s + pd->settings.size;
1037+
1038+ VPRINTK("pkt_gather_data: cmd=%d\n", rq->cmd);
1039+ VPRINTK("need %d sectors for %s\n", sectors, kdevname(pd->dev));
1040+ VPRINTK("from %lu to %lu ", start_s, end_s);
1041+ VPRINTK("(%lu - %lu)\n", rq->bh->b_rsector, rq->bhtail->b_rsector +
1042+ rq->current_nr_sectors);
1043+
1044+ /*
1045+ * first fill-out map of the buffers we have
1046+ */
1047+ bh = rq->bh;
1048+ while (bh) {
1049+ index = (bh->b_rsector & (pd->settings.size - 1)) / (bh->b_size >> 9);
1050+
1051+ bhs[index] = bh;
1052+ bh = bh->b_reqnext;
1053+
1054+ /*
1055+ * make sure to detach from list!
1056+ */
1057+ bhs[index]->b_reqnext = NULL;
1058+ }
1059+
1060+ /*
1061+ * now get buffers for missing blocks, and schedule reads for them
1062+ */
1063+ for (index = 0, sector = start_s; sector < end_s; index++) {
1064+ if (bhs[index]) {
1065+ bh = bhs[index];
1066+ goto next;
1067+ }
1068+
1069+ bh = pkt_get_buffer(pd, sector, CD_FRAMESIZE);
1070+
1071+ bhs[index] = bh;
1072+ rq->nr_sectors += bh->b_size >> 9;
1073+ rq->nr_segments++;
1074+
1075+ if (!buffer_uptodate(bh)) {
1076+ bh->b_end_io = pkt_end_io_read;
1077+ pkt_read_bh(pd, bh);
1078+ }
1079+
1080+ next:
1081+ sector += bh->b_size >> 9;
1082+ }
1083+
1084+ index = pkt_index_bhs(bhs);
1085+#if 0
1086+ if (!index)
1087+ goto kill_it;
1088+#endif
1089+
1090+ rq->bh = bhs[0];
1091+ rq->bhtail = bhs[index];
1092+ rq->buffer = rq->bh->b_data;
1093+ rq->current_nr_sectors = rq->bh->b_size >> 9;
1094+ rq->hard_nr_sectors = rq->nr_sectors;
1095+ rq->sector = rq->hard_sector = start_s;
1096+
1097+ VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector);
1098+ if (pkt_init_rq(pd, rq)) {
1099+ for (index = 0; index < PACKET_MAX_SIZE; index++) {
1100+ bh = bhs[index];
1101+ printk("[%d] %lu %d (%p -> %p)\n", index, bh->b_rsector,
1102+ bh->b_size, bh, bh->b_reqnext);
1103+ }
1104+ goto kill_it;
1105+ }
1106+
1107+ pkt_account_rq(pd, sectors, rq->nr_sectors, rq->current_nr_sectors);
1108+
1109+ /*
1110+ * sanity check
1111+ */
1112+ if (rq->nr_sectors != pd->settings.size) {
1113+ printk("pktcdvd: request mismatch %lu (should be %u)\n",
1114+ rq->nr_sectors, pd->settings.size);
1115+ BUG();
1116+ }
1117+
1118+ return 0;
1119+
1120+ /*
1121+ * for now, just kill entire request and hope for the best...
1122+ */
1123+kill_it:
1124+ for (index = 0; index < PACKET_MAX_SIZE; index++) {
1125+ bh = bhs[index];
1126+ buffer_IO_error(bh);
1127+ if (bh->b_list == PKT_BUF_LIST)
1128+ pkt_put_buffer(bh);
1129+ }
1130+ end_that_request_last(pd->rq);
1131+ return 1;
1132+}
1133+
1134+/*
1135+ * Returns: 1, keep 'em coming -- 0, wait for wakeup
1136+ */
1137+static int pkt_do_request(struct pktcdvd_device *pd, struct request *rq)
1138+{
1139+ VPRINTK("do_request: bh=%ld, nr_sectors=%ld, size=%d, cmd=%d\n", rq->bh->b_blocknr, rq->nr_sectors, pd->settings.size, rq->cmd);
1140+
1141+ /*
1142+ * perfect match. the merge_* functions have already made sure that
1143+ * a request doesn't cross a packet boundary, so if the sector
1144+ * count matches it's good.
1145+ */
1146+ if (rq->nr_sectors == pd->settings.size) {
1147+ if (pkt_init_rq(pd, rq)) {
1148+ pkt_kill_request(pd, rq, 0);
1149+ return 1;
1150+ }
1151+
1152+ pkt_account_rq(pd, 0, rq->nr_sectors, rq->current_nr_sectors);
1153+ return 0;
1154+ }
1155+
1156+ /*
1157+ * paranoia...
1158+ */
1159+ if (rq->nr_sectors > pd->settings.size) {
1160+ printk("pktcdvd: request too big! BUG! %lu\n", rq->nr_sectors);
1161+ BUG();
1162+ }
1163+
1164+ return pkt_gather_data(pd, rq);
1165+}
1166+
1167+/*
1168+ * recover a failed write, query for relocation if possible
1169+ */
1170+static int pkt_start_recovery(struct pktcdvd_device *pd, struct request *rq)
1171+{
1172+ struct super_block *sb = get_super(pd->pkt_dev);
1173+ struct buffer_head *bhs[PACKET_MAX_SIZE], *bh, *obh;
1174+ unsigned long old_block, new_block, sector;
1175+ int i, sectors;
1176+
1177+ if (!sb || !sb->s_op || !sb->s_op->relocate_blocks)
1178+ goto fail;
1179+
1180+ old_block = (rq->sector & ~(pd->settings.size - 1)) / (rq->bh->b_size >> 9);
1181+ if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
1182+ goto fail;
1183+
1184+ memset(bhs, 0, sizeof(bhs));
1185+ bh = rq->bh;
1186+ while (bh) {
1187+ i = (bh->b_rsector & (pd->settings.size - 1)) / (bh->b_size >> 9);
1188+
1189+ bhs[i] = bh;
1190+ bh = bh->b_reqnext;
1191+ bhs[i]->b_reqnext = NULL;
1192+ }
1193+
1194+ sectors = 0;
1195+ sector = new_block * (rq->bh->b_size >> 9);
1196+ for (i = 0; i < PACKET_MAX_SIZE; i++) {
1197+ bh = bhs[i];
1198+
1199+ /*
1200+ * three cases -->
1201+ * 1) bh is not there at all
1202+ * 2) bh is there and not ours, get a new one and
1203+ * invalidate this block for the future
1204+ * 3) bh is there and ours, just change the sector
1205+ */
1206+ if (!bh) {
1207+ obh = pkt_get_hash(pd->pkt_dev, new_block,CD_FRAMESIZE);
1208+ bh = __pkt_get_buffer(pd, sector);
1209+ if (obh) {
1210+ if (buffer_uptodate(obh)) {
1211+ memcpy(bh->b_data, obh->b_data, obh->b_size);
1212+ set_bit(BH_Uptodate, &bh->b_state);
1213+ }
1214+ unlock_buffer(obh);
1215+ bforget(obh);
1216+ }
1217+ bhs[i] = bh;
1218+ } else if (bh->b_list != PKT_BUF_LIST) {
1219+ bhs[i] = pkt_get_buffer(pd, sector, CD_FRAMESIZE);
1220+ memcpy(bhs[i]->b_data, bh->b_data, CD_FRAMESIZE);
1221+ unlock_buffer(bh);
1222+ bforget(bh);
1223+ bh = bhs[i];
1224+ set_bit(BH_Uptodate, &bh->b_state);
1225+ } else {
1226+ bh->b_rsector = sector;
1227+ bh->b_blocknr = new_block;
1228+ }
1229+
1230+ sector += (bh->b_size >> 9);
1231+ new_block++;
1232+ sectors += (bh->b_size >> 9);
1233+ }
1234+
1235+ i = pkt_index_bhs(bhs);
1236+ if (!i)
1237+ goto fail;
1238+
1239+ rq->bh = bhs[0];
1240+ rq->bhtail = bhs[i];
1241+ rq->buffer = rq->bh->b_data;
1242+ rq->current_nr_sectors = rq->bh->b_size >> 9;
1243+ rq->hard_nr_sectors = rq->nr_sectors = sectors;
1244+ rq->sector = rq->hard_sector = rq->bh->b_rsector;
1245+ rq->errors = 0;
1246+ clear_bit(PACKET_RECOVERY, &pd->flags);
1247+ clear_bit(PACKET_BUSY, &pd->flags);
1248+ return 0;
1249+
1250+fail:
1251+ printk("pktcdvd: rq recovery not possible\n");
1252+ pkt_kill_request(pd, rq, 0);
1253+ clear_bit(PACKET_RECOVERY, &pd->flags);
1254+ return 1;
1255+}
1256+
1257+/*
1258+ * handle the requests that got queued for this writer
1259+ *
1260+ * returns 0 for busy (already doing something), or 1 for queue new one
1261+ *
1262+ */
1263+static int pkt_handle_queue(struct pktcdvd_device *pd, request_queue_t *q)
1264+{
1265+ struct request *rq;
1266+ int ret;
1267+
1268+ VPRINTK("handle_queue\n");
1269+
1270+ /*
1271+ * nothing for us to do
1272+ */
1273+ if (!test_bit(PACKET_RQ, &pd->flags))
1274+ return 1;
1275+
1276+ spin_lock_irq(&pd->lock);
1277+ rq = pd->rq;
1278+ spin_unlock_irq(&pd->lock);
1279+
1280+ if (test_bit(PACKET_RECOVERY, &pd->flags))
1281+ if (pkt_start_recovery(pd, rq))
1282+ return 1;
1283+
1284+ /*
1285+ * already being processed
1286+ */
1287+ if (test_and_set_bit(PACKET_BUSY, &pd->flags))
1288+ return 0;
1289+
1290+ /*
1291+ * nothing to do
1292+ */
1293+ ret = 1;
1294+ if (rq == NULL) {
1295+ printk("handle_queue: pd BUSY+RQ, but no rq\n");
1296+ clear_bit(PACKET_RQ, &pd->flags);
1297+ goto out;
1298+ }
1299+
1300+ /*
1301+ * reads are shipped directly to cd-rom, so they should not
1302+ * pop up here
1303+ */
1304+ if (rq->cmd == READ)
1305+ BUG();
1306+
1307+ if ((rq->current_nr_sectors << 9) != CD_FRAMESIZE) {
1308+ pkt_kill_request(pd, rq, 0);
1309+ goto out;
1310+ }
1311+
1312+ if (!pkt_do_request(pd, rq)) {
1313+ atomic_add(PACKET_MAX_SIZE, &pd->wrqcnt);
1314+ down(&pd->cache_sync_mutex);
1315+ pkt_inject_request(q, rq);
1316+ pd->unflushed_writes = 1;
1317+ up(&pd->cache_sync_mutex);
1318+ return 0;
1319+ }
1320+
1321+out:
1322+ clear_bit(PACKET_BUSY, &pd->flags);
1323+ return ret;
1324+}
1325+
1326+/*
1327+ * kpacketd is woken up, when writes have been queued for one of our
1328+ * registered devices
1329+ */
1330+static int kcdrwd(void *foobar)
1331+{
1332+ struct pktcdvd_device *pd = foobar;
1333+ request_queue_t *q, *my_queue;
1334+
1335+ /*
1336+ * exit_files, mm (move to lazy-tlb, so context switches are come
1337+ * extremely cheap) etc
1338+ */
1339+ daemonize();
1340+
1341+ current->policy = SCHED_OTHER;
0db0975e 1342+ current->static_prio = -20;
e5ff5fa6 1343+ sprintf(current->comm, pd->name);
1344+
1345+ spin_lock_irq(&current->sigmask_lock);
1346+ siginitsetinv(&current->blocked, sigmask(SIGKILL));
1347+ flush_signals(current);
1348+ spin_unlock_irq(&current->sigmask_lock);
1349+
1350+ q = blk_get_queue(pd->dev);
1351+ my_queue = blk_get_queue(pd->pkt_dev);
1352+
1353+ for (;;) {
1354+ DECLARE_WAITQUEUE(wait, current);
1355+
1356+ add_wait_queue(&pd->wqueue, &wait);
1357+
1358+ /*
1359+ * if PACKET_BUSY is cleared, we can queue
1360+ * another request. otherwise we need to unplug the
1361+ * cd-rom queue and wait for buffers to be flushed
1362+ * (which will then wake us up again when done).
1363+ */
1364+ do {
1365+ pkt_handle_queue(pd, q);
1366+
1367+ set_current_state(TASK_INTERRUPTIBLE);
1368+
1369+ if (test_bit(PACKET_BUSY, &pd->flags))
1370+ break;
1371+
1372+ spin_lock_irq(&io_request_lock);
1373+ if (list_empty(&my_queue->queue_head)) {
1374+ spin_unlock_irq(&io_request_lock);
1375+ break;
1376+ }
1377+ set_current_state(TASK_RUNNING);
1378+
1379+ my_queue->request_fn(my_queue);
1380+ spin_unlock_irq(&io_request_lock);
1381+ } while (1);
1382+
1383+ generic_unplug_device(q);
1384+
1385+ schedule();
1386+ remove_wait_queue(&pd->wqueue, &wait);
1387+
1388+ /*
1389+ * got SIGKILL
1390+ */
1391+ if (signal_pending(current))
1392+ break;
1393+
1394+ }
1395+
1396+ complete_and_exit(&pd->cdrw.thr_compl, 0);
1397+ return 0;
1398+}
1399+
1400+static void pkt_attempt_remerge(struct pktcdvd_device *pd, request_queue_t *q,
1401+ struct request *rq)
1402+{
1403+ struct request *nxt;
1404+
1405+ while (!list_empty(&q->queue_head)) {
1406+ if (rq->nr_sectors == pd->settings.size)
1407+ break;
1408+
1409+ nxt = blkdev_entry_next_request(&q->queue_head);
1410+
1411+ if (ZONE(rq->sector, pd) != ZONE(nxt->sector, pd))
1412+ break;
1413+ else if (rq->sector + rq->nr_sectors > nxt->sector)
1414+ break;
1415+
1416+ rq->nr_sectors = rq->hard_nr_sectors += nxt->nr_sectors;
1417+ rq->bhtail->b_reqnext = nxt->bh;
1418+ rq->bhtail = nxt->bhtail;
1419+ list_del(&nxt->queue);
1420+ blkdev_release_request(nxt);
1421+ }
1422+}
1423+
1424+/*
1425+ * our request function.
1426+ *
1427+ * - reads are just tossed directly to the device, we don't care.
1428+ * - writes, regardless of size, are added as the current pd rq and
1429+ * kcdrwd is woken up to handle it. kcdrwd will also make sure to
1430+ * reinvoke this request handler, once the given request has been
1431+ * processed.
1432+ *
1433+ * Locks: io_request_lock held
1434+ *
1435+ * Notes: all writers have their own queue, so all requests are for the
1436+ * the same device
1437+ */
1438+static void pkt_request(request_queue_t *q)
1439+{
1440+ struct pktcdvd_device *pd = (struct pktcdvd_device *) q->queuedata;
1441+ unsigned long flags;
1442+
1443+ if (list_empty(&q->queue_head))
1444+ return;
1445+
1446+ while (!list_empty(&q->queue_head)) {
1447+ struct request *rq = blkdev_entry_next_request(&q->queue_head);
1448+
1449+ VPRINTK("pkt_request: cmd=%d, rq=%p, rq->sector=%ld, rq->nr_sectors=%ld\n", rq->cmd, rq, rq->sector, rq->nr_sectors);
1450+
1451+ blkdev_dequeue_request(rq);
1452+
1453+ rq->rq_dev = pd->dev;
1454+
1455+ if (rq->cmd == READ)
1456+ BUG();
1457+
1458+ if (test_bit(PACKET_RECOVERY, &pd->flags))
1459+ break;
1460+
1461+ /*
1462+ * paranoia, shouldn't trigger...
1463+ */
1464+ if (!pkt_one_zone(pd, rq)) {
1465+ printk("rq->cmd=%d, rq->sector=%ld, rq->nr_sectors=%ld\n",
1466+ rq->cmd, rq->sector, rq->nr_sectors);
1467+ BUG();
1468+ }
1469+
1470+ pkt_attempt_remerge(pd, q, rq);
1471+
1472+ spin_lock_irqsave(&pd->lock, flags);
1473+
1474+ /*
1475+ * already gathering data for another read. the
1476+ * rfn will be reinvoked once that is done
1477+ */
1478+ if (test_and_set_bit(PACKET_RQ, &pd->flags)) {
1479+ list_add(&rq->queue, &q->queue_head);
1480+ spin_unlock_irqrestore(&pd->lock, flags);
1481+ break;
1482+ }
1483+
1484+ if (pd->rq)
1485+ BUG();
1486+
1487+ pd->rq = rq;
1488+ spin_unlock_irqrestore(&pd->lock, flags);
1489+ break;
1490+ }
1491+ VPRINTK("wake up wait queue\n");
1492+ wake_up(&pd->wqueue);
1493+}
1494+
1495+static void pkt_print_settings(struct pktcdvd_device *pd)
1496+{
1497+ printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1498+ printk("%u blocks, ", pd->settings.size >> 2);
1499+ printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1500+}
1501+
1502+/*
1503+ * A generic sense dump / resolve mechanism should be implemented across
1504+ * all ATAPI + SCSI devices.
1505+ */
1506+static void pkt_dump_sense(struct request_sense *sense)
1507+{
1508+ char *info[9] = { "No sense", "Recovered error", "Not ready",
1509+ "Medium error", "Hardware error", "Illegal request",
1510+ "Unit attention", "Data protect", "Blank check" };
1511+
1512+ if (sense == NULL)
1513+ return;
1514+
1515+ if (sense->sense_key > 8) {
1516+ printk("pktcdvd: sense invalid\n");
1517+ return;
1518+ }
1519+
1520+ printk("pktcdvd: sense category %s ", info[sense->sense_key]);
1521+ printk("asc(%02x), ascq(%02x)\n", sense->asc, sense->ascq);
1522+}
1523+
1524+/*
1525+ * write mode select package based on pd->settings
1526+ */
1527+static int pkt_set_write_settings(struct pktcdvd_device *pd)
1528+{
1529+ struct cdrom_device_info *cdi = pd->cdi;
1530+ struct cdrom_generic_command cgc;
1531+ write_param_page *wp;
1532+ char buffer[128];
1533+ int ret, size;
1534+
1535+ memset(buffer, 0, sizeof(buffer));
1536+ init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1537+ if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
1538+ return ret;
1539+
1540+ size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1541+ pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1542+ if (size > sizeof(buffer))
1543+ size = sizeof(buffer);
1544+
1545+ /*
1546+ * now get it all
1547+ */
1548+ init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1549+ if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
1550+ return ret;
1551+
1552+ /*
1553+ * write page is offset header + block descriptor length
1554+ */
1555+ wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1556+
1557+ wp->fp = pd->settings.fp;
1558+ wp->track_mode = pd->settings.track_mode;
1559+ wp->write_type = pd->settings.write_type;
1560+ wp->data_block_type = pd->settings.block_mode;
1561+
1562+ wp->multi_session = 0;
1563+
1564+#ifdef PACKET_USE_LS
1565+ wp->link_size = 7;
1566+ wp->ls_v = 1;
1567+#endif
1568+
1569+ if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1570+ wp->session_format = 0;
1571+ wp->subhdr2 = 0x20;
1572+ } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1573+ wp->session_format = 0x20;
1574+ wp->subhdr2 = 8;
1575+#if 0
1576+ wp->mcn[0] = 0x80;
1577+ memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1578+#endif
1579+ } else {
1580+ /*
1581+ * paranoia
1582+ */
1583+ printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
1584+ return 1;
1585+ }
1586+ wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1587+
1588+ cgc.buflen = cgc.cmd[8] = size;
1589+ if ((ret = cdrom_mode_select(cdi, &cgc))) {
1590+ pkt_dump_sense(cgc.sense);
1591+ return ret;
1592+ }
1593+
1594+ pkt_print_settings(pd);
1595+ return 0;
1596+}
1597+
1598+/*
1599+ * 0 -- we can write to this track, 1 -- we can't
1600+ */
1601+static int pkt_good_track(track_information *ti)
1602+{
1603+ /*
1604+ * only good for CD-RW at the moment, not DVD-RW
1605+ */
1606+
1607+ /*
1608+ * FIXME: only for FP
1609+ */
1610+ if (ti->fp == 0)
1611+ return 0;
1612+
1613+ /*
1614+ * "good" settings as per Mt Fuji.
1615+ */
1616+ if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
1617+ return 0;
1618+
1619+ if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
1620+ return 0;
1621+
1622+ if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
1623+ return 0;
1624+
1625+ printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1626+ return 1;
1627+}
1628+
1629+/*
1630+ * 0 -- we can write to this disc, 1 -- we can't
1631+ */
1632+static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
1633+{
1634+ /*
1635+ * for disc type 0xff we should probably reserve a new track.
1636+ * but i'm not sure, should we leave this to user apps? probably.
1637+ */
1638+ if (di->disc_type == 0xff) {
1639+ printk("pktcdvd: Unknown disc. No track?\n");
1640+ return 1;
1641+ }
1642+
1643+ if (di->disc_type != 0x20 && di->disc_type != 0) {
1644+ printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
1645+ return 1;
1646+ }
1647+
1648+ if (di->erasable == 0) {
1649+ printk("pktcdvd: Disc not erasable\n");
1650+ return 1;
1651+ }
1652+
1653+ if (pd->track_status == PACKET_SESSION_RESERVED) {
1654+ printk("pktcdvd: Can't write to last track (reserved)\n");
1655+ return 1;
1656+ }
1657+
1658+ return 0;
1659+}
1660+
1661+static int pkt_probe_settings(struct pktcdvd_device *pd)
1662+{
1663+ disc_information di;
1664+ track_information ti;
1665+ int ret, track;
1666+
1667+ memset(&di, 0, sizeof(disc_information));
1668+ memset(&ti, 0, sizeof(track_information));
1669+
1670+ if ((ret = cdrom_get_disc_info(pd->dev, &di))) {
1671+ printk("failed get_disc\n");
1672+ return ret;
1673+ }
1674+
1675+ pd->disc_status = di.disc_status;
1676+ pd->track_status = di.border_status;
1677+
1678+ if (pkt_good_disc(pd, &di))
1679+ return -ENXIO;
1680+
1681+ printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
1682+ pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1683+
1684+ track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1685+ if ((ret = cdrom_get_track_info(pd->dev, track, 1, &ti))) {
1686+ printk("pktcdvd: failed get_track\n");
1687+ return ret;
1688+ }
1689+
1690+ if (pkt_good_track(&ti)) {
1691+ printk("pktcdvd: can't write to this track\n");
1692+ return -ENXIO;
1693+ }
1694+
1695+ /*
1696+ * we keep packet size in 512 byte units, makes it easier to
1697+ * deal with request calculations.
1698+ */
1699+ pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1700+ if (pd->settings.size == 0) {
1701+ printk("pktcdvd: detected zero packet size!\n");
1702+ pd->settings.size = 128;
1703+ }
1704+ pd->settings.fp = ti.fp;
1705+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1706+
1707+ if (ti.nwa_v) {
1708+ pd->nwa = be32_to_cpu(ti.next_writable);
1709+ set_bit(PACKET_NWA_VALID, &pd->flags);
1710+ }
1711+
1712+ /*
1713+ * in theory we could use lra on -RW media as well and just zero
1714+ * blocks that haven't been written yet, but in practice that
1715+ * is just a no-go. we'll use that for -R, naturally.
1716+ */
1717+ if (ti.lra_v) {
1718+ pd->lra = be32_to_cpu(ti.last_rec_address);
1719+ set_bit(PACKET_LRA_VALID, &pd->flags);
1720+ } else {
1721+ pd->lra = 0xffffffff;
1722+ set_bit(PACKET_LRA_VALID, &pd->flags);
1723+ }
1724+
1725+ /*
1726+ * fine for now
1727+ */
1728+ pd->settings.link_loss = 7;
1729+ pd->settings.write_type = 0; /* packet */
1730+ pd->settings.track_mode = ti.track_mode;
1731+
1732+ /*
1733+ * mode1 or mode2 disc
1734+ */
1735+ switch (ti.data_mode) {
1736+ case PACKET_MODE1:
1737+ pd->settings.block_mode = PACKET_BLOCK_MODE1;
1738+ break;
1739+ case PACKET_MODE2:
1740+ pd->settings.block_mode = PACKET_BLOCK_MODE2;
1741+ break;
1742+ default:
1743+ printk("pktcdvd: unknown data mode\n");
1744+ return 1;
1745+ }
1746+ return 0;
1747+}
1748+
1749+/*
1750+ * enable/disable write caching on drive
1751+ */
1752+static int pkt_write_caching(struct pktcdvd_device *pd, int set)
1753+{
1754+ struct cdrom_generic_command cgc;
1755+ unsigned char buf[64];
1756+ int ret;
1757+
1758+ memset(buf, 0, sizeof(buf));
1759+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1760+ cgc.buflen = pd->mode_offset + 12;
1761+
1762+ /*
1763+ * caching mode page might not be there, so quiet this command
1764+ */
1765+ cgc.quiet = 1;
1766+
1767+ if ((ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_WCACHING_PAGE, 0)))
1768+ return ret;
1769+
1770+ buf[pd->mode_offset + 10] |= (!!set << 2);
1771+
1772+ cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1773+ ret = cdrom_mode_select(pd->cdi, &cgc);
1774+ if (ret)
1775+ printk("pktcdvd: write caching control failed\n");
1776+ else if (!ret && set)
1777+ printk("pktcdvd: enabled write caching on %s\n", pd->name);
1778+ return ret;
1779+}
1780+
1781+/*
1782+ * flush the drive cache to media
1783+ */
1784+static int pkt_flush_cache(struct pktcdvd_device *pd)
1785+{
1786+ struct cdrom_generic_command cgc;
1787+
1788+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1789+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
1790+ cgc.quiet = 1;
1791+ cgc.timeout = 60*HZ;
1792+
1793+ /*
1794+ * the IMMED bit -- we default to not setting it, although that
1795+ * would allow a much faster close, this is safer
1796+ */
1797+#if 0
1798+ cgc.cmd[1] = 1 << 1;
1799+#endif
1800+ return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
1801+}
1802+
1803+/*
1804+ * Returns drive current write speed
1805+ */
1806+static int pkt_get_speed(struct pktcdvd_device *pd)
1807+{
1808+ struct cdrom_generic_command cgc;
1809+ unsigned char buf[64];
1810+ int ret, offset;
1811+
1812+ memset(buf, 0, sizeof(buf));
1813+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1814+
1815+ ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1816+ if (ret) {
1817+ cgc.buflen = pd->mode_offset + buf[pd->mode_offset + 9] + 2 +
1818+ sizeof(struct mode_page_header);
1819+ ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1820+ if (ret)
1821+ return ret;
1822+ }
1823+
1824+ offset = pd->mode_offset + 26;
1825+ pd->speed = ((buf[offset] << 8) | buf[offset + 1]) / 0xb0;
1826+ return 0;
1827+}
1828+
1829+/*
1830+ * speed is given as the normal factor, e.g. 4 for 4x
1831+ */
1832+static int pkt_set_speed(struct pktcdvd_device *pd, unsigned speed)
1833+{
1834+ struct cdrom_generic_command cgc;
1835+ unsigned read_speed;
1836+
1837+ /*
1838+ * we set read and write time so that read spindle speed is one and
1839+ * a half as fast as write. although a drive can typically read much
1840+ * faster than write, this minimizes the spin up/down when we write
1841+ * and gather data. maybe 1/1 factor is faster, needs a bit of testing.
1842+ */
1843+ speed = speed * 0xb0;
1844+ read_speed = (speed * 3) >> 1;
1845+ read_speed = min_t(unsigned, read_speed, 0xffff);
1846+
1847+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1848+ cgc.cmd[0] = 0xbb;
1849+ cgc.cmd[2] = (read_speed >> 8) & 0xff;
1850+ cgc.cmd[3] = read_speed & 0xff;
1851+ cgc.cmd[4] = (speed >> 8) & 0xff;
1852+ cgc.cmd[5] = speed & 0xff;
1853+
1854+ return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
1855+}
1856+
1857+/*
1858+ * Give me full power, Captain
1859+ */
1860+static int pkt_adjust_speed(struct pktcdvd_device *pd, int speed)
1861+{
1862+ disc_information dummy;
1863+ int ret;
1864+
1865+ /*
1866+ * FIXME: do proper unified cap page, also, this isn't proper
1867+ * Mt Fuji, but I think we can safely assume all drives support
1868+ * it. A hell of a lot more than support the GET_PERFORMANCE
1869+ * command (besides, we also use the old set speed command,
1870+ * not the streaming feature).
1871+ */
1872+ if ((ret = pkt_set_speed(pd, speed)))
1873+ return ret;
1874+
1875+ /*
1876+ * just do something with the disc -- next read will contain the
1877+ * maximum speed with this media
1878+ */
1879+ if ((ret = cdrom_get_disc_info(pd->dev, &dummy)))
1880+ return ret;
1881+
1882+ if ((ret = pkt_get_speed(pd))) {
1883+ printk("pktcdvd: failed get speed\n");
1884+ return ret;
1885+ }
1886+
1887+ DPRINTK("pktcdvd: speed (R/W) %u/%u\n", (pd->speed * 3) / 2, pd->speed);
1888+ return 0;
1889+}
1890+
e5ff5fa6 1891+static int pkt_open_write(struct pktcdvd_device *pd)
1892+{
1893+ int ret;
1894+
1895+ if ((ret = pkt_probe_settings(pd))) {
1896+ DPRINTK("pktcdvd: %s failed probe\n", pd->name);
1897+ return -EIO;
1898+ }
1899+
1900+ if ((ret = pkt_set_write_settings(pd))) {
1901+ DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
1902+ return -EIO;
1903+ }
1904+
1905+ (void) pkt_write_caching(pd, USE_WCACHING);
1906+
1907+ if ((ret = pkt_adjust_speed(pd, 16))) {
1908+ DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
1909+ return -EIO;
1910+ }
1911+ return 0;
1912+}
1913+
1914+/*
1915+ * called at open time.
1916+ */
1917+static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1918+{
1919+ int ret;
1920+ long lba;
1921+
1922+ if (!pd->dev)
1923+ return -ENXIO;
1924+
1925+ pd->bdev = bdget(kdev_t_to_nr(pd->dev));
1926+ if (!pd->bdev) {
1927+ printk("pktcdvd: can't find cdrom block device\n");
1928+ return -ENXIO;
1929+ }
1930+
1931+ if ((ret = blkdev_get(pd->bdev, FMODE_READ, 0, BDEV_FILE))) {
1932+ pd->bdev = NULL;
1933+ return ret;
1934+ }
1935+
1936+ if ((ret = cdrom_get_last_written(pd->dev, &lba))) {
1937+ printk("pktcdvd: cdrom_get_last_written failed\n");
1938+ return ret;
1939+ }
1940+
1941+ pkt_sizes[MINOR(pd->pkt_dev)] = lba << 1;
1942+
1943+ if (write) {
1944+ if ((ret = pkt_open_write(pd)))
1945+ return ret;
1946+ pkt_mark_readonly(pd, 0);
1947+ } else {
1948+ (void) pkt_adjust_speed(pd, 0xff);
1949+ pkt_mark_readonly(pd, 1);
1950+ }
1951+
1952+ if (write)
1953+ printk("pktcdvd: %lukB available on disc\n", lba << 1);
1954+
1955+ return 0;
1956+}
1957+
1958+/*
1959+ * called when the device is closed. makes sure that the device flushes
1960+ * the internal cache before we close.
1961+ */
1962+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
1963+{
1964+ atomic_dec(&pd->refcnt);
1965+ if (atomic_read(&pd->refcnt) > 0)
1966+ return;
1967+
1968+ fsync_dev(pd->pkt_dev);
1969+
1970+ if (flush && pkt_flush_cache(pd))
1971+ DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
1972+
1973+ if (pd->bdev) {
1974+ blkdev_put(pd->bdev, BDEV_FILE);
1975+ pd->bdev = NULL;
1976+ }
1977+}
1978+
1979+static int pkt_open(struct inode *inode, struct file *file)
1980+{
1981+ struct pktcdvd_device *pd = NULL;
1982+ int ret;
1983+
1984+ VPRINTK("pktcdvd: entering open\n");
1985+
1986+ if (MINOR(inode->i_rdev) >= MAX_WRITERS) {
1987+ printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
1988+ ret = -ENODEV;
1989+ goto out;
1990+ }
1991+
1992+ /*
1993+ * either device is not configured, or pktsetup is old and doesn't
1994+ * use O_CREAT to create device
1995+ */
1996+ pd = &pkt_devs[MINOR(inode->i_rdev)];
1997+ if (!pd->dev && !(file->f_flags & O_CREAT)) {
1998+ VPRINTK("pktcdvd: not configured and O_CREAT not set\n");
1999+ ret = -ENXIO;
2000+ goto out;
2001+ }
2002+
2003+ atomic_inc(&pd->refcnt);
2004+ if (atomic_read(&pd->refcnt) > 1) {
2005+ if (file->f_mode & FMODE_WRITE) {
2006+ VPRINTK("pktcdvd: busy open for write\n");
2007+ ret = -EBUSY;
2008+ goto out_dec;
2009+ }
2010+
2011+ /*
2012+ * Not first open, everything is already set up
2013+ */
2014+ return 0;
2015+ }
2016+
2017+ if (((file->f_flags & O_ACCMODE) != O_RDONLY) || !(file->f_flags & O_CREAT)) {
2018+ if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
2019+ ret = -EIO;
2020+ goto out_dec;
2021+ }
2022+ }
2023+
2024+ /*
2025+ * needed here as well, since ext2 (among others) may change
2026+ * the blocksize at mount time
2027+ */
2028+ set_blocksize(pd->pkt_dev, CD_FRAMESIZE);
2029+ return 0;
2030+
2031+out_dec:
2032+ atomic_dec(&pd->refcnt);
2033+ if (atomic_read(&pd->refcnt) == 0) {
2034+ if (pd->bdev) {
2035+ blkdev_put(pd->bdev, BDEV_FILE);
2036+ pd->bdev = NULL;
2037+ }
2038+ }
2039+out:
2040+ VPRINTK("pktcdvd: failed open (%d)\n", ret);
2041+ return ret;
2042+}
2043+
2044+static int pkt_close(struct inode *inode, struct file *file)
2045+{
2046+ struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
2047+ int ret = 0;
2048+
2049+ if (pd->dev) {
2050+ int flush = !test_bit(PACKET_READONLY, &pd->flags);
2051+ pkt_release_dev(pd, flush);
2052+ }
2053+
2054+ return ret;
2055+}
2056+
2057+/*
2058+ * pktcdvd i/o elevator parts
2059+ */
2060+static inline int pkt_bh_rq_ordered(struct buffer_head *bh, struct request *rq,
2061+ struct list_head *head)
2062+{
2063+ struct list_head *next;
2064+ struct request *next_rq;
2065+
2066+ next = rq->queue.next;
2067+ if (next == head)
2068+ return 0;
2069+
2070+ next_rq = blkdev_entry_to_request(next);
2071+ if (next_rq->rq_dev != rq->rq_dev)
2072+ return bh->b_rsector > rq->sector;
2073+
2074+ if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
2075+ return 1;
2076+
2077+ if (next_rq->sector > rq->sector)
2078+ return 0;
2079+
2080+ if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
2081+ return 1;
2082+
2083+ return 0;
2084+}
2085+
2086+static int pkt_elevator_merge(request_queue_t *q, struct request **req,
2087+ struct list_head *head,
2088+ struct buffer_head *bh, int rw,
2089+ int max_sectors)
2090+{
2091+ struct list_head *entry = &q->queue_head;
2092+ unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
2093+
2094+ if (bh->b_reqnext)
2095+ BUG();
2096+
2097+ VPRINTK("pkt_elevator_merge: rw=%d, ms=%d, bh=%lu, dev=%d\n", rw, max_sectors, bh->b_rsector, bh->b_rdev);
2098+
2099+ while ((entry = entry->prev) != head) {
2100+ struct request *__rq = blkdev_entry_to_request(entry);
2101+ if (__rq->waiting)
2102+ continue;
2103+ if (__rq->rq_dev != bh->b_rdev)
2104+ continue;
2105+ if (!*req && pkt_bh_rq_ordered(bh, __rq, &q->queue_head))
2106+ *req = __rq;
2107+ if (__rq->cmd != rw)
2108+ continue;
2109+ if (__rq->nr_sectors + count > max_sectors)
2110+ continue;
2111+ if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
2112+ ret = ELEVATOR_BACK_MERGE;
2113+ *req = __rq;
2114+ break;
2115+ } else if (__rq->sector - count == bh->b_rsector) {
2116+ ret = ELEVATOR_FRONT_MERGE;
2117+ *req = __rq;
2118+ break;
2119+ }
2120+#if 0 /* makes sense, chance of two matches probably slim */
2121+ else if (*req)
2122+ break;
2123+#endif
2124+ }
2125+ VPRINTK("*req=%p, ret=%d\n", *req, ret);
2126+
2127+ return ret;
2128+}
2129+
2130+static int pkt_make_request(request_queue_t *q, int rw, struct buffer_head *bh)
2131+{
2132+ struct pktcdvd_device *pd;
2133+ struct buffer_head *new_bh;
e5ff5fa6 2134+
2135+ if (MINOR(bh->b_rdev) >= MAX_WRITERS) {
2136+ printk("pktcdvd: %s out of range\n", kdevname(bh->b_rdev));
2137+ goto end_io;
2138+ }
2139+
2140+ pd = &pkt_devs[MINOR(bh->b_rdev)];
2141+ if (!pd->dev) {
2142+ printk("pktcdvd: request received for non-active pd\n");
2143+ goto end_io;
2144+ }
2145+
2146+ /*
2147+ * quick remap a READ
2148+ */
2149+ if (rw == READ || rw == READA) {
2150+ down(&pd->cache_sync_mutex);
2151+ pkt_flush_writes(pd);
2152+ bh->b_rdev = pd->dev;
2153+ generic_make_request(rw, bh);
2154+ up(&pd->cache_sync_mutex);
2155+ return 0;
2156+ }
2157+
2158+ if (!(rw & WRITE))
2159+ BUG();
2160+
2161+ if (test_bit(PACKET_READONLY, &pd->flags)) {
2162+ printk("pktcdvd: WRITE for ro device %s (%lu)\n",
2163+ pd->name, bh->b_rsector);
2164+ goto end_io;
2165+ }
2166+
2167+ VPRINTK("pkt_make_request: bh:%p block:%ld size:%d\n",
2168+ bh, bh->b_blocknr, bh->b_size);
2169+
2170+ if (bh->b_size != CD_FRAMESIZE) {
2171+ printk("pktcdvd: wrong bh size\n");
2172+ goto end_io;
2173+ }
2174+
2175+ /*
f87f0d90 2176+ * This is deadlock safe, since pkt_get_stacked_bh can only
2177+ * fail if there are already buffers in flight for this
2178+ * packet device. When the in-flight buffers finish, we
2179+ * will be woken up and try again.
e5ff5fa6 2180+ */
f87f0d90 2181+ new_bh = kmem_cache_alloc(bh_cachep, GFP_ATOMIC);
2182+ while (!new_bh) {
2183+ DECLARE_WAITQUEUE(wait, current);
e5ff5fa6 2184+
f87f0d90 2185+ generic_unplug_device(q);
2186+
2187+ add_wait_queue(&pd_bh_wait, &wait);
2188+ set_current_state(TASK_UNINTERRUPTIBLE);
e5ff5fa6 2189+
f87f0d90 2190+ new_bh = pkt_get_stacked_bh(pd);
2191+ if (!new_bh)
2192+ schedule();
2193+
2194+ set_current_state(TASK_RUNNING);
2195+ remove_wait_queue(&pd_bh_wait, &wait);
2196+ }
e5ff5fa6 2197+
e5ff5fa6 2198+ new_bh->b_size = bh->b_size;
2199+ new_bh->b_list = PKT_BUF_LIST + 1;
2200+ new_bh->b_dev = bh->b_dev;
2201+ atomic_set(&new_bh->b_count, 1);
2202+ new_bh->b_rdev = bh->b_rdev;
2203+ new_bh->b_state = bh->b_state;
2204+ new_bh->b_page = bh->b_page;
2205+ new_bh->b_data = bh->b_data;
2206+ new_bh->b_private = bh;
2207+ new_bh->b_end_io = pkt_end_io_write_stacked;
2208+ new_bh->b_rsector = bh->b_rsector;
2209+
2210+ return pd->make_request_fn(q, rw, new_bh);
2211+
2212+end_io:
2213+ buffer_IO_error(bh);
2214+ return 0;
2215+}
2216+
2217+static void show_requests(request_queue_t *q)
2218+{
2219+ struct list_head *entry;
2220+
2221+ spin_lock_irq(&io_request_lock);
2222+
2223+ list_for_each(entry, &q->queue_head) {
2224+ struct request *rq = blkdev_entry_to_request(entry);
2225+ int zone = rq->sector & ~127;
2226+ int hole;
2227+
2228+ hole = 0;
2229+ if ((rq->sector + rq->nr_sectors - (rq->bhtail->b_size >> 9))
2230+ != rq->bhtail->b_rsector)
2231+ hole = 1;
2232+
2233+ printk("rq: cmd %d, sector %lu (-> %lu), zone %u, hole %d, nr_sectors %lu\n", rq->cmd, rq->sector, rq->sector + rq->nr_sectors - 1, zone, hole, rq->nr_sectors);
2234+ }
2235+
2236+ spin_unlock_irq(&io_request_lock);
2237+}
2238+
2239+static void sysrq_handle_show_requests(int key, struct pt_regs *pt_regs,
2240+ struct kbd_struct *kbd, struct tty_struct *tty)
2241+{
2242+ /*
2243+ * quick hack to show pending requests in /dev/pktcdvd0 queue
2244+ */
2245+ queue_proc *qp = blk_dev[PACKET_MAJOR].queue;
2246+ if (qp) {
2247+ request_queue_t *q = qp(MKDEV(PACKET_MAJOR, 0));
2248+ if (q)
2249+ show_requests(q);
2250+ }
2251+}
2252+static struct sysrq_key_op sysrq_show_requests_op = {
2253+ handler: sysrq_handle_show_requests,
2254+ help_msg: "showreQuests",
2255+ action_msg: "Show requests",
2256+};
2257+
2258+static void pkt_init_queue(struct pktcdvd_device *pd)
2259+{
2260+ request_queue_t *q = &pd->cdrw.r_queue;
2261+
2262+ blk_init_queue(q, pkt_request);
2263+ elevator_init(&q->elevator, ELEVATOR_PKTCDVD);
2264+ pd->make_request_fn = q->make_request_fn;
2265+ blk_queue_make_request(q, pkt_make_request);
2266+ blk_queue_headactive(q, 0);
2267+ q->front_merge_fn = pkt_front_merge_fn;
2268+ q->back_merge_fn = pkt_back_merge_fn;
2269+ q->merge_requests_fn = pkt_merge_requests_fn;
2270+ q->queuedata = pd;
2271+}
2272+
2273+static int pkt_proc_device(struct pktcdvd_device *pd, char *buf)
2274+{
2275+ char *b = buf, *msg;
2276+ struct list_head *foo;
2277+ int i;
2278+
2279+ b += sprintf(b, "\nWriter %s (%s):\n", pd->name, kdevname(pd->dev));
2280+
2281+ b += sprintf(b, "\nSettings:\n");
2282+ b += sprintf(b, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2283+
2284+ if (pd->settings.write_type == 0)
2285+ msg = "Packet";
2286+ else
2287+ msg = "Unknown";
2288+ b += sprintf(b, "\twrite type:\t\t%s\n", msg);
2289+
2290+ b += sprintf(b, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2291+ b += sprintf(b, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2292+
2293+ b += sprintf(b, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2294+
2295+ if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2296+ msg = "Mode 1";
2297+ else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2298+ msg = "Mode 2";
2299+ else
2300+ msg = "Unknown";
2301+ b += sprintf(b, "\tblock mode:\t\t%s\n", msg);
2302+
2303+ b += sprintf(b, "\nStatistics:\n");
2304+ b += sprintf(b, "\tbuffers started:\t%lu\n", pd->stats.bh_s);
2305+ b += sprintf(b, "\tbuffers ended:\t\t%lu\n", pd->stats.bh_e);
2306+ b += sprintf(b, "\tsectors written:\t%lu\n", pd->stats.secs_w);
2307+ b += sprintf(b, "\tsectors read:\t\t%lu\n", pd->stats.secs_r);
2308+ b += sprintf(b, "\tbuffer cache hits:\t%lu\n", pd->stats.bh_cache_hits);
2309+ b += sprintf(b, "\tpage cache hits:\t%lu\n", pd->stats.page_cache_hits);
2310+
2311+ b += sprintf(b, "\nMisc:\n");
2312+ b += sprintf(b, "\treference count:\t%d\n", atomic_read(&pd->refcnt));
2313+ b += sprintf(b, "\tflags:\t\t\t0x%lx\n", pd->flags);
2314+ b += sprintf(b, "\twrite speed:\t\t%ukB/s\n", pd->speed * 150);
2315+ b += sprintf(b, "\tstart offset:\t\t%lu\n", pd->offset);
2316+ b += sprintf(b, "\tmode page offset:\t%u\n", pd->mode_offset);
2317+
2318+ b += sprintf(b, "\nQueue state:\n");
2319+ b += sprintf(b, "\tfree buffers:\t\t%u\n", atomic_read(&pd->cdrw.free_bh));
2320+ b += sprintf(b, "\trequest active:\t\t%s\n", pd->rq ? "yes" : "no");
2321+ b += sprintf(b, "\twrite rq depth:\t\t%d\n", atomic_read(&pd->wrqcnt));
2322+
2323+ spin_lock_irq(&io_request_lock);
2324+ i = 0;
2325+ list_for_each(foo, &pd->cdrw.r_queue.queue_head)
2326+ i++;
2327+ spin_unlock_irq(&io_request_lock);
2328+ b += sprintf(b, "\tqueue requests:\t\t%u\n", i);
2329+
2330+ return b - buf;
2331+}
2332+
2333+static int pkt_read_proc(char *page, char **start, off_t off, int count,
2334+ int *eof, void *data)
2335+{
2336+ struct pktcdvd_device *pd = data;
2337+ char *buf = page;
2338+ int len;
2339+
2340+ len = pkt_proc_device(pd, buf);
2341+ buf += len;
2342+
2343+ if (len <= off + count)
2344+ *eof = 1;
2345+
2346+ *start = page + off;
2347+ len -= off;
2348+ if (len > count)
2349+ len = count;
2350+ if (len < 0)
2351+ len = 0;
2352+
2353+ return len;
2354+}
2355+
2356+static int pkt_new_dev(struct pktcdvd_device *pd, kdev_t dev)
2357+{
2358+ struct cdrom_device_info *cdi;
2359+ request_queue_t *q;
2360+ int i;
2361+
2362+ for (i = 0; i < MAX_WRITERS; i++) {
2363+ if (pkt_devs[i].dev == dev) {
2364+ printk("pktcdvd: %s already setup\n", kdevname(dev));
2365+ return -EBUSY;
2366+ }
2367+ }
2368+
2369+ for (i = 0; i < MAX_WRITERS; i++)
2370+ if (pd == &pkt_devs[i])
2371+ break;
f87f0d90 2372+ BUG_ON(i == MAX_WRITERS);
e5ff5fa6 2373+
2374+ cdi = cdrom_find_device(dev);
2375+ if (cdi == NULL) {
2376+ printk("pktcdvd: %s is not a CD-ROM\n", kdevname(dev));
2377+ return -ENXIO;
2378+ }
2379+
2380+ MOD_INC_USE_COUNT;
2381+
2382+ memset(pd, 0, sizeof(struct pktcdvd_device));
2383+ atomic_set(&pd->cdrw.free_bh, 0);
2384+
2385+ spin_lock_init(&pd->lock);
2386+ if (pkt_grow_bhlist(pd, PACKET_MAX_SIZE) < PACKET_MAX_SIZE) {
2387+ MOD_DEC_USE_COUNT;
2388+ printk("pktcdvd: not enough memory for buffers\n");
2389+ return -ENOMEM;
2390+ }
2391+
f87f0d90 2392+ pd->stacked_bhcnt = 0;
2393+ if (!pkt_grow_stacked_bhlist(pd)) {
2394+ MOD_DEC_USE_COUNT;
2395+ printk("pktcdvd: not enough memory for buffer heads\n");
2396+ return -ENOMEM;
2397+ }
2398+
e5ff5fa6 2399+ set_blocksize(dev, CD_FRAMESIZE);
2400+ pd->cdi = cdi;
2401+ pd->dev = dev;
2402+ pd->bdev = NULL;
2403+ pd->pkt_dev = MKDEV(PACKET_MAJOR, i);
2404+ sprintf(pd->name, "pktcdvd%d", i);
2405+ atomic_set(&pd->refcnt, 0);
2406+ atomic_set(&pd->wrqcnt, 0);
e5ff5fa6 2407+ init_MUTEX(&pd->cache_sync_mutex);
2408+ pd->unflushed_writes = 0;
2409+ init_waitqueue_head(&pd->wqueue);
2410+ init_completion(&pd->cdrw.thr_compl);
2411+
2412+ /*
2413+ * store device merge functions (SCSI uses their own to build
2414+ * scatter-gather tables)
2415+ */
2416+ q = blk_get_queue(dev);
e5ff5fa6 2417+ pkt_init_queue(pd);
2418+ pd->cdrw.front_merge_fn = q->front_merge_fn;
2419+ pd->cdrw.back_merge_fn = q->back_merge_fn;
2420+ pd->cdrw.merge_requests_fn = q->merge_requests_fn;
2421+ pd->cdrw.queuedata = q->queuedata;
e5ff5fa6 2422+
2423+ pd->cdrw.pid = kernel_thread(kcdrwd, pd, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
2424+ if (pd->cdrw.pid < 0) {
2425+ MOD_DEC_USE_COUNT;
2426+ printk("pktcdvd: can't start kernel thread\n");
2427+ blk_cleanup_queue(&pd->cdrw.r_queue);
f87f0d90 2428+ pkt_shrink_stacked_bhlist(pd);
e5ff5fa6 2429+ pkt_shrink_bhlist(pd, PACKET_MAX_SIZE);
2430+ memset(pd, 0, sizeof(*pd));
2431+ return -EBUSY;
2432+ }
2433+
2434+ create_proc_read_entry(pd->name, 0, pkt_proc, pkt_read_proc, pd);
2435+ DPRINTK("pktcdvd: writer %s sucessfully registered\n", cdi->name);
2436+ return 0;
2437+}
2438+
2439+/*
2440+ * arg contains file descriptor of CD-ROM device.
2441+ */
2442+static int pkt_setup_dev(struct pktcdvd_device *pd, unsigned int arg)
2443+{
2444+ struct inode *inode;
2445+ struct file *file;
2446+ int ret;
2447+
2448+ if ((file = fget(arg)) == NULL) {
2449+ printk("pktcdvd: bad file descriptor passed\n");
2450+ return -EBADF;
2451+ }
2452+
2453+ ret = -EINVAL;
2454+ if ((inode = file->f_dentry->d_inode) == NULL) {
2455+ printk("pktcdvd: huh? file descriptor contains no inode?\n");
2456+ goto out;
2457+ }
2458+ ret = -ENOTBLK;
2459+ if (!S_ISBLK(inode->i_mode)) {
2460+ printk("pktcdvd: device is not a block device (duh)\n");
2461+ goto out;
2462+ }
2463+ ret = -EROFS;
2464+ if (IS_RDONLY(inode)) {
2465+ printk("pktcdvd: Can't write to read-only dev\n");
2466+ goto out;
2467+ }
2468+ if ((ret = pkt_new_dev(pd, inode->i_rdev))) {
2469+ printk("pktcdvd: all booked up\n");
2470+ goto out;
2471+ }
2472+
2473+ atomic_inc(&pd->refcnt);
2474+
2475+out:
2476+ fput(file);
2477+ return ret;
2478+}
2479+
2480+static int pkt_remove_dev(struct pktcdvd_device *pd)
2481+{
2482+ int ret;
2483+
2484+ if (pd->cdrw.pid >= 0) {
2485+ ret = kill_proc(pd->cdrw.pid, SIGKILL, 1);
2486+ if (ret) {
2487+ printk("pkt_exit: can't kill kernel thread\n");
2488+ return ret;
2489+ }
2490+ wait_for_completion(&pd->cdrw.thr_compl);
2491+ }
2492+
2493+ /*
2494+ * will also invalidate buffers for CD-ROM
2495+ */
2496+ invalidate_device(pd->pkt_dev, 1);
2497+
f87f0d90 2498+ pkt_shrink_stacked_bhlist(pd);
e5ff5fa6 2499+ if ((ret = pkt_shrink_bhlist(pd, PACKET_MAX_SIZE)) != PACKET_MAX_SIZE)
2500+ printk("pktcdvd: leaked %d buffers\n", PACKET_MAX_SIZE - ret);
2501+
2502+ blk_cleanup_queue(&pd->cdrw.r_queue);
2503+ remove_proc_entry(pd->name, pkt_proc);
2504+ DPRINTK("pktcdvd: writer %s unregistered\n", pd->cdi->name);
2505+ memset(pd, 0, sizeof(struct pktcdvd_device));
2506+ MOD_DEC_USE_COUNT;
2507+ return 0;
2508+}
2509+
2510+static int pkt_media_change(kdev_t dev)
2511+{
2512+ struct pktcdvd_device *pd = pkt_find_dev(dev);
2513+ if (!pd)
2514+ return 0;
2515+ return cdrom_media_changed(pd->dev);
2516+}
2517+
2518+static int pkt_ioctl(struct inode *inode, struct file *file,
2519+ unsigned int cmd, unsigned long arg)
2520+{
2521+ struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
2522+
2523+ VPRINTK("pkt_ioctl: cmd %d, dev %x\n", cmd, inode->i_rdev);
2524+
2525+ if ((cmd != PACKET_SETUP_DEV) && !pd->dev) {
2526+ DPRINTK("pktcdvd: dev not setup\n");
2527+ return -ENXIO;
2528+ }
2529+
2530+ switch (cmd) {
2531+ case PACKET_GET_STATS:
2532+ if (copy_to_user(&arg, &pd->stats, sizeof(struct packet_stats)))
2533+ return -EFAULT;
2534+ break;
2535+
2536+ case PACKET_SETUP_DEV:
f87f0d90 2537+ if (!capable(CAP_SYS_ADMIN))
2538+ return -EPERM;
e5ff5fa6 2539+ if (pd->dev) {
2540+ printk("pktcdvd: dev already setup\n");
2541+ return -EBUSY;
2542+ }
e5ff5fa6 2543+ return pkt_setup_dev(pd, arg);
2544+
2545+ case PACKET_TEARDOWN_DEV:
2546+ if (!capable(CAP_SYS_ADMIN))
2547+ return -EPERM;
2548+ if (atomic_read(&pd->refcnt) != 1)
2549+ return -EBUSY;
2550+ return pkt_remove_dev(pd);
2551+
2552+ case BLKGETSIZE:
2553+ return put_user(blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 1, (unsigned long *)arg);
2554+
2555+ case BLKGETSIZE64:
2556+ return put_user((u64)blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 10,
2557+ (u64 *)arg);
2558+
2559+ case BLKROSET:
2560+ if (capable(CAP_SYS_ADMIN))
2561+ set_bit(PACKET_READONLY, &pd->flags);
2562+ case BLKROGET:
2563+ case BLKSSZGET:
2564+ case BLKRASET:
2565+ case BLKRAGET:
2566+ case BLKFLSBUF:
2567+ if (!pd->bdev)
2568+ return -ENXIO;
2569+ return blk_ioctl(inode->i_rdev, cmd, arg);
2570+
2571+ /*
2572+ * forward selected CDROM ioctls to CD-ROM, for UDF
2573+ */
2574+ case CDROMMULTISESSION:
2575+ case CDROMREADTOCENTRY:
2576+ case CDROM_LAST_WRITTEN:
2577+ case CDROM_SEND_PACKET:
2578+ case SCSI_IOCTL_SEND_COMMAND:
2579+ if (!pd->bdev)
2580+ return -ENXIO;
2581+ return ioctl_by_bdev(pd->bdev, cmd, arg);
2582+
2583+ default:
2584+ printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
2585+ return -ENOTTY;
2586+ }
2587+
2588+ return 0;
2589+}
2590+
2591+static struct block_device_operations pktcdvd_ops = {
2592+ owner: THIS_MODULE,
2593+ open: pkt_open,
2594+ release: pkt_close,
2595+ ioctl: pkt_ioctl,
2596+ check_media_change: pkt_media_change,
2597+};
2598+
2599+int pkt_init(void)
2600+{
2601+ int i;
2602+
2603+ devfs_register(NULL, "pktcdvd", DEVFS_FL_DEFAULT, PACKET_MAJOR, 0,
2604+ S_IFBLK | S_IRUSR | S_IWUSR, &pktcdvd_ops, NULL);
2605+ if (devfs_register_blkdev(PACKET_MAJOR, "pktcdvd", &pktcdvd_ops)) {
2606+ printk("unable to register pktcdvd device\n");
2607+ return -EIO;
2608+ }
2609+
2610+ pkt_sizes = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2611+ if (pkt_sizes == NULL)
2612+ goto err;
2613+
2614+ pkt_blksize = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2615+ if (pkt_blksize == NULL)
2616+ goto err;
2617+
2618+ pkt_readahead = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2619+ if (pkt_readahead == NULL)
2620+ goto err;
2621+
2622+ pkt_devs = kmalloc(MAX_WRITERS * sizeof(struct pktcdvd_device), GFP_KERNEL);
2623+ if (pkt_devs == NULL)
2624+ goto err;
2625+
2626+ memset(pkt_devs, 0, MAX_WRITERS * sizeof(struct pktcdvd_device));
2627+ memset(pkt_sizes, 0, MAX_WRITERS * sizeof(int));
2628+ memset(pkt_blksize, 0, MAX_WRITERS * sizeof(int));
2629+
2630+ for (i = 0; i < MAX_WRITERS; i++)
2631+ pkt_readahead[i] = vm_max_readahead;
2632+
2633+ blk_size[PACKET_MAJOR] = pkt_sizes;
2634+ blksize_size[PACKET_MAJOR] = pkt_blksize;
2635+ max_readahead[PACKET_MAJOR] = pkt_readahead;
2636+ read_ahead[PACKET_MAJOR] = 128;
2637+ set_blocksize(MKDEV(PACKET_MAJOR, 0), CD_FRAMESIZE);
2638+
2639+ blk_dev[PACKET_MAJOR].queue = pkt_get_queue;
2640+
2641+ pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
2642+
2643+ register_sysrq_key('q', &sysrq_show_requests_op);
2644+
2645+ DPRINTK("pktcdvd: %s\n", VERSION_CODE);
2646+ return 0;
2647+
2648+err:
2649+ printk("pktcdvd: out of memory\n");
2650+ devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
2651+ DEVFS_SPECIAL_BLK, 0));
2652+ devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
2653+ kfree(pkt_devs);
2654+ kfree(pkt_sizes);
2655+ kfree(pkt_blksize);
2656+ kfree(pkt_readahead);
2657+ return -ENOMEM;
2658+}
2659+
2660+void pkt_exit(void)
2661+{
2662+ unregister_sysrq_key('q', &sysrq_show_requests_op);
2663+
2664+ devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
2665+ DEVFS_SPECIAL_BLK, 0));
2666+ devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
2667+ blk_dev[PACKET_MAJOR].queue = NULL;
2668+
2669+ remove_proc_entry("pktcdvd", proc_root_driver);
2670+ kfree(pkt_sizes);
2671+ kfree(pkt_blksize);
2672+ kfree(pkt_devs);
2673+ kfree(pkt_readahead);
2674+}
2675+
2676+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2677+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2678+MODULE_LICENSE("GPL");
2679+
2680+module_init(pkt_init);
2681+module_exit(pkt_exit);
f87f0d90 2682diff -u -r -N ../../linus/2.4/linux/drivers/cdrom/Makefile linux/drivers/cdrom/Makefile
2683--- ../../linus/2.4/linux/drivers/cdrom/Makefile Tue Aug 6 21:14:34 2002
2684+++ linux/drivers/cdrom/Makefile Tue Aug 6 21:22:09 2002
e5ff5fa6 2685@@ -27,6 +27,7 @@
2686 obj-$(CONFIG_BLK_DEV_IDECD) += cdrom.o
2687 obj-$(CONFIG_BLK_DEV_SR) += cdrom.o
2688 obj-$(CONFIG_PARIDE_PCD) += cdrom.o
2689+obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o
2690
2691 obj-$(CONFIG_AZTCD) += aztcd.o
2692 obj-$(CONFIG_CDU31A) += cdu31a.o cdrom.o
f87f0d90 2693diff -u -r -N ../../linus/2.4/linux/drivers/ide/ide-cd.c linux/drivers/ide/ide-cd.c
2694--- ../../linus/2.4/linux/drivers/ide/ide-cd.c Tue Aug 6 21:14:39 2002
2695+++ linux/drivers/ide/ide-cd.c Tue Aug 6 21:22:36 2002
e5ff5fa6 2696@@ -292,9 +292,11 @@
2697 * correctly reporting tray status -- from
2698 * Michael D Johnson <johnsom@orst.edu>
2699 *
2700+ * 4.99 - Added write support for packet writing.
2701+ *
2702 *************************************************************************/
2703
2704-#define IDECD_VERSION "4.59"
2705+#define IDECD_VERSION "4.99"
2706
2707 #include <linux/config.h>
2708 #include <linux/module.h>
2709@@ -526,7 +528,7 @@
2710
2711 memset(pc, 0, sizeof(struct packet_command));
2712 pc->c[0] = GPCMD_REQUEST_SENSE;
2713- pc->c[4] = pc->buflen = 18;
2714+ pc->c[4] = pc->buflen = 14;
2715 pc->buffer = (char *) sense;
2716 pc->sense = (struct request_sense *) failed_command;
2717
2718@@ -640,7 +642,7 @@
2719 cdrom_saw_media_change (drive);
2720
2721 /* Fail the request. */
2722- printk ("%s: tray open\n", drive->name);
2723+ /* printk ("%s: tray open\n", drive->name); */
2724 cdrom_end_request (0, drive);
2725 } else if (sense_key == UNIT_ATTENTION) {
2726 /* Media change. */
2727@@ -1200,6 +1202,8 @@
2728 * partitions not really working, but better check anyway...
2729 */
2730 if (rq->cmd == nxt->cmd && rq->rq_dev == nxt->rq_dev) {
2731+ if (rq->cmd == WRITE)
2732+ printk("merged write\n");
2733 rq->nr_sectors += nxt->nr_sectors;
2734 rq->hard_nr_sectors += nxt->nr_sectors;
2735 rq->bhtail->b_reqnext = nxt->bh;
2736@@ -2497,6 +2501,12 @@
2737 static
2738 void ide_cdrom_release_real (struct cdrom_device_info *cdi)
2739 {
2740+ struct cdrom_generic_command cgc;
2741+
2742+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2743+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
2744+ cgc.quiet = 1;
2745+ (void) ide_cdrom_packet(cdi, &cgc);
2746 }
2747
2748
f87f0d90 2749@@ -2685,15 +2695,10 @@
e5ff5fa6 2750 printk(" %dX", CDROM_CONFIG_FLAGS(drive)->max_speed);
2751 printk(" %s", CDROM_CONFIG_FLAGS(drive)->dvd ? "DVD-ROM" : "CD-ROM");
2752
2753- if (CDROM_CONFIG_FLAGS (drive)->dvd_r|CDROM_CONFIG_FLAGS (drive)->dvd_ram)
2754- printk (" DVD%s%s",
2755- (CDROM_CONFIG_FLAGS (drive)->dvd_r)? "-R" : "",
2756- (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "-RAM" : "");
2757-
2758- if (CDROM_CONFIG_FLAGS (drive)->cd_r|CDROM_CONFIG_FLAGS (drive)->cd_rw)
2759- printk (" CD%s%s",
2760- (CDROM_CONFIG_FLAGS (drive)->cd_r)? "-R" : "",
2761- (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
2762+ if (CDROM_CONFIG_FLAGS(drive)->dvd_r || CDROM_CONFIG_FLAGS(drive)->dvd_ram)
2763+ printk (" DVD-R%s", (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "AM" : "");
2764+ if (CDROM_CONFIG_FLAGS(drive)->cd_r ||CDROM_CONFIG_FLAGS(drive)->cd_rw)
2765+ printk (" CD-R%s", (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
2766
2767 if (CDROM_CONFIG_FLAGS (drive)->is_changer)
2768 printk (" changer w/%d slots", nslots);
f87f0d90 2769@@ -2716,7 +2721,7 @@
e5ff5fa6 2770 int major = HWIF(drive)->major;
2771 int minor = drive->select.b.unit << PARTN_BITS;
2772
2773- ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 2, &read_ahead[major], NULL);
2774+ ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 1024, &read_ahead[major], NULL);
2775 ide_add_setting(drive, "file_readahead", SETTING_RW, BLKFRAGET, BLKFRASET, TYPE_INTA, 0, INT_MAX, 1, 1024, &max_readahead[major][minor], NULL);
2776 ide_add_setting(drive, "max_kb_per_request", SETTING_RW, BLKSECTGET, BLKSECTSET, TYPE_INTA, 1, 255, 1, 2, &max_sectors[major][minor], NULL);
2777 ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
f87f0d90 2778@@ -2733,7 +2738,7 @@
e5ff5fa6 2779 /*
2780 * default to read-only always and fix latter at the bottom
2781 */
2782- set_device_ro(MKDEV(HWIF(drive)->major, minor), 1);
2783+ set_device_ro(MKDEV(HWIF(drive)->major, minor), 0);
2784 set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE);
2785
2786 drive->special.all = 0;
f87f0d90 2787diff -u -r -N ../../linus/2.4/linux/drivers/scsi/Config.in linux/drivers/scsi/Config.in
2788--- ../../linus/2.4/linux/drivers/scsi/Config.in Tue Aug 6 21:15:02 2002
2789+++ linux/drivers/scsi/Config.in Tue Aug 6 21:23:16 2002
e5ff5fa6 2790@@ -20,10 +20,6 @@
2791
2792 comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
2793
2794-#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
2795- bool ' Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES
2796-#fi
2797-
2798 bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
2799
2800 bool ' Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
f87f0d90 2801diff -u -r -N ../../linus/2.4/linux/drivers/scsi/scsi_merge.c linux/drivers/scsi/scsi_merge.c
2802--- ../../linus/2.4/linux/drivers/scsi/scsi_merge.c Tue Aug 6 21:15:21 2002
2803+++ linux/drivers/scsi/scsi_merge.c Tue Aug 6 21:23:18 2002
e5ff5fa6 2804@@ -71,11 +71,6 @@
2805 */
2806 #define DMA_SEGMENT_SIZE_LIMITED
2807
2808-#ifdef CONFIG_SCSI_DEBUG_QUEUES
2809-/*
2810- * Enable a bunch of additional consistency checking. Turn this off
2811- * if you are benchmarking.
2812- */
2813 static int dump_stats(struct request *req,
2814 int use_clustering,
2815 int dma_host,
2816@@ -100,22 +95,6 @@
2817 panic("Ththththaats all folks. Too dangerous to continue.\n");
2818 }
2819
2820-
2821-/*
2822- * Simple sanity check that we will use for the first go around
2823- * in order to ensure that we are doing the counting correctly.
2824- * This can be removed for optimization.
2825- */
2826-#define SANITY_CHECK(req, _CLUSTER, _DMA) \
2827- if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA, NULL) ) \
2828- { \
2829- printk("Incorrect segment count at 0x%p", current_text_addr()); \
2830- dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA, NULL)); \
2831- }
2832-#else
2833-#define SANITY_CHECK(req, _CLUSTER, _DMA)
2834-#endif
2835-
2836 static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
2837 {
2838 int jj;
2839@@ -532,7 +511,6 @@
2840 int max_segments) \
2841 { \
2842 int ret; \
2843- SANITY_CHECK(req, _CLUSTER, _DMA); \
2844 ret = __scsi_ ## _BACK_FRONT ## _merge_fn(q, \
2845 req, \
2846 bh, \
2847@@ -742,7 +720,6 @@
2848 int max_segments) \
2849 { \
2850 int ret; \
2851- SANITY_CHECK(req, _CLUSTER, _DMA); \
2852 ret = __scsi_merge_requests_fn(q, req, next, max_segments, _CLUSTER, _DMA); \
2853 return ret; \
2854 }
2855@@ -829,11 +806,7 @@
2856 /*
2857 * First we need to know how many scatter gather segments are needed.
2858 */
2859- if (!sg_count_valid) {
2860- count = __count_segments(req, use_clustering, dma_host, NULL);
2861- } else {
2862- count = req->nr_segments;
2863- }
2864+ count = __count_segments(req, use_clustering, dma_host, NULL);
2865
2866 /*
2867 * If the dma pool is nearly empty, then queue a minimal request
2868@@ -949,9 +922,7 @@
2869 */
2870 if (count != SCpnt->use_sg) {
2871 printk("Incorrect number of segments after building list\n");
2872-#ifdef CONFIG_SCSI_DEBUG_QUEUES
2873 dump_stats(req, use_clustering, dma_host, count);
2874-#endif
2875 }
2876 if (!dma_host) {
2877 return 1;
f87f0d90 2878diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr.c linux/drivers/scsi/sr.c
2879--- ../../linus/2.4/linux/drivers/scsi/sr.c Tue Aug 6 21:15:22 2002
2880+++ linux/drivers/scsi/sr.c Tue Aug 6 21:23:18 2002
e5ff5fa6 2881@@ -28,12 +28,16 @@
2882 * Modified by Jens Axboe <axboe@suse.de> - support DVD-RAM
2883 * transparently and loose the GHOST hack
2884 *
2885+ * Modified by Jens Axboe <axboe@suse.de> - support packet writing
2886+ * through generic packet layer.
2887+ *
2888 * Modified by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
2889 * check resource allocation in sr_init and some cleanups
2890 *
2891 */
2892
2893 #include <linux/module.h>
2894+#include <linux/config.h>
2895
2896 #include <linux/fs.h>
2897 #include <linux/kernel.h>
f87f0d90 2898@@ -716,7 +720,7 @@
e5ff5fa6 2899 cmd[2] = 0x2a;
2900 cmd[4] = 128;
2901 cmd[3] = cmd[5] = 0;
2902- rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL);
2903+ rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL, SR_TIMEOUT);
2904
2905 if (rc) {
2906 /* failed, drive doesn't have capabilities mode page */
f87f0d90 2907@@ -748,16 +752,13 @@
e5ff5fa6 2908 if ((buffer[n + 2] & 0x8) == 0)
2909 /* not a DVD drive */
2910 scsi_CDs[i].cdi.mask |= CDC_DVD;
2911- if ((buffer[n + 3] & 0x20) == 0) {
2912+ if ((buffer[n + 3] & 0x20) == 0)
2913 /* can't write DVD-RAM media */
2914 scsi_CDs[i].cdi.mask |= CDC_DVD_RAM;
2915- } else {
2916- scsi_CDs[i].device->writeable = 1;
2917- }
2918 if ((buffer[n + 3] & 0x10) == 0)
2919 /* can't write DVD-R media */
2920 scsi_CDs[i].cdi.mask |= CDC_DVD_R;
2921- if ((buffer[n + 3] & 0x2) == 0)
2922+ if ((buffer[n + 3] & 0x02) == 0)
2923 /* can't write CD-RW media */
2924 scsi_CDs[i].cdi.mask |= CDC_CD_RW;
2925 if ((buffer[n + 3] & 0x1) == 0)
f87f0d90 2926@@ -777,6 +778,10 @@
e5ff5fa6 2927 /*else I don't think it can close its tray
2928 scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
2929
2930+ if (~scsi_CDs[i].cdi.mask & (CDC_DVD_RAM | CDC_CD_RW))
2931+ /* can write to DVD-RAM or CD-RW */
2932+ scsi_CDs[i].device->writeable = 1;
2933+
2934 scsi_free(buffer, 512);
2935 }
2936
f87f0d90 2937@@ -792,7 +797,10 @@
e5ff5fa6 2938 if (device->scsi_level <= SCSI_2)
2939 cgc->cmd[1] |= device->lun << 5;
2940
2941- cgc->stat = sr_do_ioctl(MINOR(cdi->dev), cgc->cmd, cgc->buffer, cgc->buflen, cgc->quiet, cgc->data_direction, cgc->sense);
2942+ if (cgc->timeout <= 0)
2943+ cgc->timeout = 5 * HZ;
2944+
2945+ cgc->stat = sr_do_ioctl(MINOR(cdi->dev), cgc->cmd, cgc->buffer, cgc->buflen, cgc->quiet, cgc->data_direction, cgc->sense, cgc->timeout);
2946
2947 return cgc->stat;
2948 }
f87f0d90 2949diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr.h linux/drivers/scsi/sr.h
2950--- ../../linus/2.4/linux/drivers/scsi/sr.h Tue Aug 6 21:15:22 2002
2951+++ linux/drivers/scsi/sr.h Tue Aug 6 21:23:18 2002
e5ff5fa6 2952@@ -36,7 +36,7 @@
2953
2954 extern Scsi_CD *scsi_CDs;
2955
2956-int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int, struct request_sense *);
2957+int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int, struct request_sense *, int);
2958
2959 int sr_lock_door(struct cdrom_device_info *, int);
2960 int sr_tray_move(struct cdrom_device_info *, int);
f87f0d90 2961diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr_ioctl.c linux/drivers/scsi/sr_ioctl.c
2962--- ../../linus/2.4/linux/drivers/scsi/sr_ioctl.c Tue Aug 6 21:15:22 2002
2963+++ linux/drivers/scsi/sr_ioctl.c Tue Aug 6 21:23:18 2002
e5ff5fa6 2964@@ -68,14 +68,14 @@
2965 sr_cmd[6] = trk1_te.cdte_addr.msf.minute;
2966 sr_cmd[7] = trk1_te.cdte_addr.msf.second;
2967 sr_cmd[8] = trk1_te.cdte_addr.msf.frame;
2968- return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
2969+ return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
2970 }
2971
2972 /* We do our own retries because we want to know what the specific
2973 error code is. Normally the UNIT_ATTENTION code will automatically
2974 clear after one error */
2975
2976-int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite, struct request_sense *sense)
2977+int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite, struct request_sense *sense, int timeout)
2978 {
2979 Scsi_Request *SRpnt;
2980 Scsi_Device *SDev;
2981@@ -109,7 +109,7 @@
2982
2983
2984 scsi_wait_req(SRpnt, (void *) sr_cmd, (void *) buffer, buflength,
2985- IOCTL_TIMEOUT, IOCTL_RETRIES);
2986+ timeout, IOCTL_RETRIES);
2987
2988 req = &SRpnt->sr_request;
2989 if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
2990@@ -198,7 +198,7 @@
2991 sr_cmd[1] = (scsi_CDs[minor].device->scsi_level <= SCSI_2) ?
2992 ((scsi_CDs[minor].device->lun) << 5) : 0;
2993 sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
2994- return sr_do_ioctl(minor, sr_cmd, NULL, 0, 1, SCSI_DATA_NONE, NULL);
2995+ return sr_do_ioctl(minor, sr_cmd, NULL, 0, 1, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
2996 }
2997
2998 int sr_tray_move(struct cdrom_device_info *cdi, int pos)
2999@@ -211,7 +211,7 @@
3000 sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
3001 sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ;
3002
3003- return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
3004+ return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
3005 }
3006
3007 int sr_lock_door(struct cdrom_device_info *cdi, int lock)
3008@@ -289,7 +289,7 @@
3009 sr_cmd[8] = 24;
3010 sr_cmd[9] = 0;
3011
3012- result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ, NULL);
3013+ result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3014
3015 memcpy(mcn->medium_catalog_number, buffer + 9, 13);
3016 mcn->medium_catalog_number[13] = 0;
3017@@ -319,7 +319,7 @@
3018 sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
3019 sr_cmd[3] = speed & 0xff; /* LSB */
3020
3021- if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL))
3022+ if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT))
3023 return -EIO;
3024 return 0;
3025 }
3026@@ -349,7 +349,7 @@
3027 sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
3028 sr_cmd[8] = 12; /* LSB of length */
3029
3030- result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3031+ result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3032
3033 tochdr->cdth_trk0 = buffer[2];
3034 tochdr->cdth_trk1 = buffer[3];
3035@@ -369,7 +369,7 @@
3036 sr_cmd[6] = tocentry->cdte_track;
3037 sr_cmd[8] = 12; /* LSB of length */
3038
3039- result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ, NULL);
3040+ result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3041
3042 tocentry->cdte_ctrl = buffer[5] & 0xf;
3043 tocentry->cdte_adr = buffer[5] >> 4;
3044@@ -396,7 +396,7 @@
3045 sr_cmd[7] = ti->cdti_trk1;
3046 sr_cmd[8] = ti->cdti_ind1;
3047
3048- result = sr_do_ioctl(target, sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
3049+ result = sr_do_ioctl(target, sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
3050 if (result == -EDRIVE_CANT_DO_THIS)
3051 result = sr_fake_playtrkind(cdi, ti);
3052
3053@@ -462,7 +462,7 @@
3054 cmd[9] = 0x10;
3055 break;
3056 }
3057- return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL);
3058+ return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3059 }
3060
3061 /*
3062@@ -501,7 +501,7 @@
3063 cmd[4] = (unsigned char) (lba >> 8) & 0xff;
3064 cmd[5] = (unsigned char) lba & 0xff;
3065 cmd[8] = 1;
3066- rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL);
3067+ rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3068
3069 return rc;
3070 }
f87f0d90 3071diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr_vendor.c linux/drivers/scsi/sr_vendor.c
3072--- ../../linus/2.4/linux/drivers/scsi/sr_vendor.c Tue Aug 6 21:15:22 2002
3073+++ linux/drivers/scsi/sr_vendor.c Tue Aug 6 21:23:18 2002
e5ff5fa6 3074@@ -60,6 +60,8 @@
3075
3076 #define VENDOR_ID (scsi_CDs[minor].vendor)
3077
3078+#define VENDOR_TIMEOUT 30*HZ
3079+
3080 void sr_vendor_init(int minor)
3081 {
3082 #ifndef CONFIG_BLK_DEV_SR_VENDOR
3083@@ -134,7 +136,7 @@
3084 modesel->density = density;
3085 modesel->block_length_med = (blocklength >> 8) & 0xff;
3086 modesel->block_length_lo = blocklength & 0xff;
3087- if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE, NULL))) {
3088+ if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE, NULL, VENDOR_TIMEOUT))) {
3089 scsi_CDs[minor].device->sector_size = blocklength;
3090 }
3091 #ifdef DEBUG
3092@@ -179,7 +181,7 @@
3093 (scsi_CDs[minor].device->lun << 5) : 0;
3094 cmd[8] = 12;
3095 cmd[9] = 0x40;
3096- rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3097+ rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3098 if (rc != 0)
3099 break;
3100 if ((buffer[0] << 8) + buffer[1] < 0x0a) {
3101@@ -205,7 +207,7 @@
3102 (scsi_CDs[minor].device->lun << 5) : 0;
3103 cmd[1] |= 0x03;
3104 cmd[2] = 0xb0;
3105- rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ, NULL);
3106+ rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3107 if (rc != 0)
3108 break;
3109 if (buffer[14] != 0 && buffer[14] != 0xb0) {
3110@@ -231,7 +233,7 @@
3111 cmd[1] = (scsi_CDs[minor].device->scsi_level <= SCSI_2) ?
3112 (scsi_CDs[minor].device->lun << 5) : 0;
3113 cmd[1] |= 0x03;
3114- rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ, NULL);
3115+ rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3116 if (rc == -EINVAL) {
3117 printk(KERN_INFO "sr%d: Hmm, seems the drive "
3118 "doesn't support multisession CD's\n", minor);
3119@@ -257,7 +259,7 @@
3120 (scsi_CDs[minor].device->lun << 5) : 0;
3121 cmd[8] = 0x04;
3122 cmd[9] = 0x40;
3123- rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ, NULL);
3124+ rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3125 if (rc != 0) {
3126 break;
3127 }
3128@@ -272,7 +274,7 @@
3129 cmd[6] = rc & 0x7f; /* number of last session */
3130 cmd[8] = 0x0c;
3131 cmd[9] = 0x40;
3132- rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3133+ rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3134 if (rc != 0) {
3135 break;
3136 }
f87f0d90 3137diff -u -r -N ../../linus/2.4/linux/fs/udf/balloc.c linux/fs/udf/balloc.c
3138--- ../../linus/2.4/linux/fs/udf/balloc.c Tue Aug 6 21:16:21 2002
3139+++ linux/fs/udf/balloc.c Thu Aug 8 20:44:32 2002
3140@@ -461,8 +461,7 @@
3141 elen = 0;
3142 obloc = nbloc = UDF_I_LOCATION(table);
3143
3144- obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0));
3145- atomic_inc(&nbh->b_count);
3146+ obh = nbh = NULL;
3147
3148 while (count && (etype =
3149 udf_next_aext(table, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
3150@@ -506,7 +505,7 @@
3151 udf_write_aext(table, obloc, &oextoffset, eloc, elen, obh, 1);
3152 }
3153
3154- if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
3155+ if (nbh != obh)
3156 {
3157 i = -1;
3158 obloc = nbloc;
3159@@ -580,7 +579,10 @@
3160 {
3161 loffset = nextoffset;
3162 aed->lengthAllocDescs = cpu_to_le32(adsize);
3163- sptr = (obh)->b_data + nextoffset - adsize;
3164+ if (obh)
3165+ sptr = UDF_I_DATA(inode) + nextoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode) - adsize;
3166+ else
3167+ sptr = obh->b_data + nextoffset - adsize;
3168 dptr = nbh->b_data + sizeof(struct allocExtDesc);
3169 memcpy(dptr, sptr, adsize);
3170 nextoffset = sizeof(struct allocExtDesc) + adsize;
3171@@ -591,8 +593,8 @@
3172 aed->lengthAllocDescs = cpu_to_le32(0);
3173 sptr = (obh)->b_data + nextoffset;
3174 nextoffset = sizeof(struct allocExtDesc);
3175-
3176- if (memcmp(&UDF_I_LOCATION(table), &obloc, sizeof(lb_addr)))
3177+
3178+ if (obh)
3179 {
3180 aed = (struct allocExtDesc *)(obh)->b_data;
3181 aed->lengthAllocDescs =
3182@@ -631,15 +633,20 @@
3183 break;
3184 }
3185 }
3186- udf_update_tag(obh->b_data, loffset);
3187- mark_buffer_dirty(obh);
3188+ if (obh)
3189+ {
3190+ udf_update_tag(obh->b_data, loffset);
3191+ mark_buffer_dirty(obh);
3192+ }
3193+ else
3194+ mark_inode_dirty(table);
3195 }
3196
3197 if (elen) /* It's possible that stealing the block emptied the extent */
3198 {
3199 udf_write_aext(table, nbloc, &nextoffset, eloc, elen, nbh, 1);
3200
3201- if (!memcmp(&UDF_I_LOCATION(table), &nbloc, sizeof(lb_addr)))
3202+ if (!nbh)
3203 {
3204 UDF_I_LENALLOC(table) += adsize;
3205 mark_inode_dirty(table);
3206@@ -690,7 +697,7 @@
3207 extoffset = sizeof(struct unallocSpaceEntry);
3208 bloc = UDF_I_LOCATION(table);
3209
3210- bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
3211+ bh = NULL;
3212 eloc.logicalBlockNum = 0xFFFFFFFF;
3213
3214 while (first_block != eloc.logicalBlockNum && (etype =
3215@@ -768,8 +775,7 @@
3216 extoffset = sizeof(struct unallocSpaceEntry);
3217 bloc = UDF_I_LOCATION(table);
3218
3219- goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
3220- atomic_inc(&goal_bh->b_count);
3221+ goal_bh = bh = NULL;
3222
3223 while (spread && (etype =
3224 udf_next_aext(table, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1)
3225diff -u -r -N ../../linus/2.4/linux/fs/udf/dir.c linux/fs/udf/dir.c
3226--- ../../linus/2.4/linux/fs/udf/dir.c Tue Aug 6 21:16:21 2002
3227+++ linux/fs/udf/dir.c Thu Aug 8 20:44:32 2002
3228@@ -122,7 +122,9 @@
3229 nf_pos = (udf_ext0_offset(dir) >> 2);
3230
3231 fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
3232- if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
3233+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
3234+ fibh.sbh = fibh.ebh = NULL;
3235+ else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
3236 &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
3237 {
3238 offset >>= dir->i_sb->s_blocksize_bits;
3239@@ -136,40 +138,40 @@
3240 }
3241 else
3242 offset = 0;
3243- }
3244- else
3245- {
3246- udf_release_data(bh);
3247- return -ENOENT;
3248- }
3249-
3250- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
3251- {
3252- udf_release_data(bh);
3253- return -EIO;
3254- }
3255
3256- if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
3257- {
3258- i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
3259- if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
3260- i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
3261- for (num=0; i>0; i--)
3262+ if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
3263 {
3264- block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
3265- tmp = udf_tgetblk(dir->i_sb, block);
3266- if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
3267- bha[num++] = tmp;
3268- else
3269- brelse(tmp);
3270+ udf_release_data(bh);
3271+ return -EIO;
3272 }
3273- if (num)
3274+
3275+ if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
3276 {
3277- ll_rw_block(READA, num, bha);
3278- for (i=0; i<num; i++)
3279- brelse(bha[i]);
3280+ i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
3281+ if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
3282+ i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
3283+ for (num=0; i>0; i--)
3284+ {
3285+ block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
3286+ tmp = udf_tgetblk(dir->i_sb, block);
3287+ if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
3288+ bha[num++] = tmp;
3289+ else
3290+ brelse(tmp);
3291+ }
3292+ if (num)
3293+ {
3294+ ll_rw_block(READA, num, bha);
3295+ for (i=0; i<num; i++)
3296+ brelse(bha[i]);
3297+ }
3298 }
3299 }
3300+ else
3301+ {
3302+ udf_release_data(bh);
3303+ return -ENOENT;
3304+ }
3305
3306 while ( nf_pos < size )
3307 {
3308diff -u -r -N ../../linus/2.4/linux/fs/udf/directory.c linux/fs/udf/directory.c
3309--- ../../linus/2.4/linux/fs/udf/directory.c Tue Aug 6 21:16:21 2002
3310+++ linux/fs/udf/directory.c Thu Aug 8 20:44:32 2002
3311@@ -17,6 +17,7 @@
3312 */
3313
3314 #include "udfdecl.h"
3315+#include "udf_i.h"
3316
3317 #include <linux/fs.h>
3318 #include <linux/string.h>
3319@@ -84,6 +85,21 @@
3320
3321 fibh->soffset = fibh->eoffset;
3322
3323+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
3324+ {
3325+ fi = udf_get_fileident(UDF_I_DATA(dir) - udf_file_entry_alloc_offset(dir),
3326+ dir->i_sb->s_blocksize, &(fibh->eoffset));
3327+
3328+ if (!fi)
3329+ return NULL;
3330+
3331+ *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
3332+
3333+ memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
3334+
3335+ return fi;
3336+ }
3337+
3338 if (fibh->eoffset == dir->i_sb->s_blocksize)
3339 {
3340 int lextoffset = *extoffset;
3341@@ -275,53 +291,43 @@
3342 }
3343
3344 short_ad *
3345-udf_get_fileshortad(void * buffer, int maxoffset, int *offset, int inc)
3346+udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, int inc)
3347 {
3348- short_ad * sa;
3349- uint8_t * ptr;
3350+ short_ad *sa;
3351
3352- if ( (!buffer) || (!offset) )
3353+ if ( (!ptr) || (!offset) )
3354 {
3355 printk(KERN_ERR "udf: udf_get_fileshortad() invalidparms\n");
3356 return NULL;
3357 }
3358
3359- ptr = (uint8_t *)buffer;
3360-
3361- if ( (*offset > 0) && (*offset < maxoffset) )
3362- ptr += *offset;
3363- else
3364+ if ( (*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset) )
3365 return NULL;
3366-
3367- if ((sa = (short_ad *)ptr)->extLength == 0)
3368+ else if ((sa = (short_ad *)ptr)->extLength == 0)
3369 return NULL;
3370- else if (inc)
3371- (*offset) += sizeof(short_ad);
3372+
3373+ if (inc)
3374+ *offset += sizeof(short_ad);
3375 return sa;
3376 }
3377
3378 long_ad *
3379-udf_get_filelongad(void * buffer, int maxoffset, int * offset, int inc)
3380+udf_get_filelongad(uint8_t *ptr, int maxoffset, int * offset, int inc)
3381 {
3382- long_ad * la;
3383- uint8_t * ptr;
3384+ long_ad *la;
3385
3386- if ( (!buffer) || !(offset) )
3387+ if ( (!ptr) || (!offset) )
3388 {
3389 printk(KERN_ERR "udf: udf_get_filelongad() invalidparms\n");
3390 return NULL;
3391 }
3392
3393- ptr = (uint8_t *)buffer;
3394-
3395- if ( (*offset > 0) && (*offset < maxoffset) )
3396- ptr += *offset;
3397- else
3398+ if ( (*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset) )
3399 return NULL;
3400-
3401- if ((la = (long_ad *)ptr)->extLength == 0)
3402+ else if ((la = (long_ad *)ptr)->extLength == 0)
3403 return NULL;
3404- else if (inc)
3405- (*offset) += sizeof(long_ad);
3406+
3407+ if (inc)
3408+ *offset += sizeof(long_ad);
3409 return la;
3410 }
3411diff -u -r -N ../../linus/2.4/linux/fs/udf/ecma_167.h linux/fs/udf/ecma_167.h
3412--- ../../linus/2.4/linux/fs/udf/ecma_167.h Tue Aug 6 21:16:21 2002
3413+++ linux/fs/udf/ecma_167.h Tue Aug 6 21:23:58 2002
3414@@ -606,7 +606,7 @@
3415 #define FE_RECORD_FMT_CRLF 0x0A
3416 #define FE_RECORD_FMT_LFCR 0x0B
3417
3418-#define Record Display Attributes (ECMA 167r3 4/14.9.8) */
3419+/* Record Display Attributes (ECMA 167r3 4/14.9.8) */
3420 #define FE_RECORD_DISPLAY_ATTR_UNDEF 0x00
3421 #define FE_RECORD_DISPLAY_ATTR_1 0x01
3422 #define FE_RECORD_DISPLAY_ATTR_2 0x02
3423diff -u -r -N ../../linus/2.4/linux/fs/udf/file.c linux/fs/udf/file.c
3424--- ../../linus/2.4/linux/fs/udf/file.c Tue Aug 6 21:16:21 2002
3425+++ linux/fs/udf/file.c Thu Aug 8 20:44:32 2002
3426@@ -46,64 +46,36 @@
3427 static int udf_adinicb_readpage(struct file *file, struct page * page)
3428 {
3429 struct inode *inode = page->mapping->host;
3430-
3431- struct buffer_head *bh;
3432- int block;
3433 char *kaddr;
3434- int err = 0;
3435
3436 if (!PageLocked(page))
3437 PAGE_BUG(page);
3438
3439 kaddr = kmap(page);
3440 memset(kaddr, 0, PAGE_CACHE_SIZE);
3441- block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
3442- bh = sb_bread(inode->i_sb, block);
3443- if (!bh)
3444- {
3445- SetPageError(page);
3446- err = -EIO;
3447- goto out;
3448- }
3449- memcpy(kaddr, bh->b_data + udf_ext0_offset(inode), inode->i_size);
3450- brelse(bh);
3451+ memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), inode->i_size);
3452 flush_dcache_page(page);
3453 SetPageUptodate(page);
3454-out:
3455 kunmap(page);
3456 UnlockPage(page);
3457- return err;
3458+ return 0;
3459 }
3460
3461 static int udf_adinicb_writepage(struct page *page)
3462 {
3463 struct inode *inode = page->mapping->host;
3464-
3465- struct buffer_head *bh;
3466- int block;
3467 char *kaddr;
3468- int err = 0;
3469
3470 if (!PageLocked(page))
3471 PAGE_BUG(page);
3472
3473 kaddr = kmap(page);
3474- block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
3475- bh = sb_bread(inode->i_sb, block);
3476- if (!bh)
3477- {
3478- SetPageError(page);
3479- err = -EIO;
3480- goto out;
3481- }
3482- memcpy(bh->b_data + udf_ext0_offset(inode), kaddr, inode->i_size);
3483- mark_buffer_dirty(bh);
3484- brelse(bh);
3485+ memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), kaddr, inode->i_size);
3486+ mark_inode_dirty(inode);
3487 SetPageUptodate(page);
3488-out:
3489 kunmap(page);
3490 UnlockPage(page);
3491- return err;
3492+ return 0;
3493 }
3494
3495 static int udf_adinicb_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
3496@@ -115,31 +87,17 @@
3497 static int udf_adinicb_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
3498 {
3499 struct inode *inode = page->mapping->host;
3500-
3501- struct buffer_head *bh;
3502- int block;
3503 char *kaddr = page_address(page);
3504- int err = 0;
3505
3506- block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
3507- bh = sb_bread(inode->i_sb, block);
3508- if (!bh)
3509- {
3510- SetPageError(page);
3511- err = -EIO;
3512- goto out;
3513- }
3514- memcpy(bh->b_data + udf_file_entry_alloc_offset(inode) + offset,
3515+ memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset,
3516 kaddr + offset, to - offset);
3517- mark_buffer_dirty(bh);
3518- brelse(bh);
3519+ mark_inode_dirty(inode);
3520 SetPageUptodate(page);
3521-out:
3522 kunmap(page);
3523 /* only one page here */
3524 if (to > inode->i_size)
3525 inode->i_size = to;
3526- return err;
3527+ return 0;
3528 }
3529
3530 struct address_space_operations udf_adinicb_aops = {
3531@@ -231,9 +189,6 @@
3532 unsigned long arg)
3533 {
3534 int result = -EINVAL;
3535- struct buffer_head *bh = NULL;
3536- long_ad eaicb;
3537- uint8_t *ea = NULL;
3538
3539 if ( permission(inode, MAY_READ) != 0 )
3540 {
3541@@ -248,7 +203,6 @@
3542 return -EINVAL;
3543 }
3544
3545- /* first, do ioctls that don't need to udf_read */
3546 switch (cmd)
3547 {
3548 case UDF_GETVOLIDENT:
3549@@ -266,50 +220,16 @@
3550
3551 return result;
3552 }
3553- }
3554-
3555- /* ok, we need to read the inode */
3556- bh = udf_tread(inode->i_sb,
3557- udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
3558-
3559- if (!bh)
3560- {
3561- udf_debug("bread failed (inode=%ld)\n", inode->i_ino);
3562- return -EIO;
3563- }
3564-
3565- if (UDF_I_EXTENDED_FE(inode) == 0)
3566- {
3567- struct fileEntry *fe;
3568-
3569- fe = (struct fileEntry *)bh->b_data;
3570- eaicb = lela_to_cpu(fe->extendedAttrICB);
3571- if (UDF_I_LENEATTR(inode))
3572- ea = fe->extendedAttr;
3573- }
3574- else
3575- {
3576- struct extendedFileEntry *efe;
3577-
3578- efe = (struct extendedFileEntry *)bh->b_data;
3579- eaicb = lela_to_cpu(efe->extendedAttrICB);
3580- if (UDF_I_LENEATTR(inode))
3581- ea = efe->extendedAttr;
3582- }
3583-
3584- switch (cmd)
3585- {
3586 case UDF_GETEASIZE:
3587 result = put_user(UDF_I_LENEATTR(inode), (int *)arg);
3588 break;
3589
3590 case UDF_GETEABLOCK:
3591- result = copy_to_user((char *)arg, ea,
3592+ result = copy_to_user((char *)arg, UDF_I_DATA(inode),
3593 UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
3594 break;
3595 }
3596
3597- udf_release_data(bh);
3598 return result;
3599 }
3600
3601diff -u -r -N ../../linus/2.4/linux/fs/udf/ialloc.c linux/fs/udf/ialloc.c
3602--- ../../linus/2.4/linux/fs/udf/ialloc.c Tue Aug 6 21:16:21 2002
3603+++ linux/fs/udf/ialloc.c Thu Aug 8 20:44:32 2002
3604@@ -28,6 +28,7 @@
3605 #include <linux/locks.h>
3606 #include <linux/quotaops.h>
3607 #include <linux/udf_fs.h>
3608+#include <linux/slab.h>
3609
3610 #include "udf_i.h"
3611 #include "udf_sb.h"
3612@@ -130,13 +131,20 @@
3613 inode->i_blocks = 0;
3614 UDF_I_LENEATTR(inode) = 0;
3615 UDF_I_LENALLOC(inode) = 0;
3616+ UDF_I_USE(inode) = 0;
3617 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
3618 {
3619- UDF_I_EXTENDED_FE(inode) = 1;
3620+ UDF_I_EFE(inode) = 1;
3621 UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
3622+ UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
3623+ memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
3624 }
3625 else
3626- UDF_I_EXTENDED_FE(inode) = 0;
3627+ {
3628+ UDF_I_EFE(inode) = 0;
3629+ UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
3630+ memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct fileEntry));
3631+ }
3632 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
3633 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
3634 else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3635@@ -147,7 +155,6 @@
3636 UDF_I_CRTIME(inode) = CURRENT_TIME;
3637 UDF_I_UMTIME(inode) = UDF_I_UCTIME(inode) =
3638 UDF_I_UCRTIME(inode) = CURRENT_UTIME;
3639- UDF_I_NEW_INODE(inode) = 1;
3640 insert_inode_hash(inode);
3641 mark_inode_dirty(inode);
3642
3643diff -u -r -N ../../linus/2.4/linux/fs/udf/inode.c linux/fs/udf/inode.c
3644--- ../../linus/2.4/linux/fs/udf/inode.c Tue Aug 6 21:16:21 2002
3645+++ linux/fs/udf/inode.c Thu Aug 8 20:44:32 2002
3646@@ -38,6 +38,7 @@
3647 #include <linux/mm.h>
3648 #include <linux/smp_lock.h>
3649 #include <linux/module.h>
3650+#include <linux/slab.h>
3651
3652 #include "udf_i.h"
3653 #include "udf_sb.h"
3654@@ -122,6 +123,11 @@
3655 clear_inode(inode);
3656 }
3657
3658+void udf_clear_inode(struct inode *inode)
3659+{
3660+ kfree(UDF_I_DATA(inode));
3661+}
3662+
3663 void udf_discard_prealloc(struct inode * inode)
3664 {
3665 if (inode->i_size && inode->i_size != UDF_I_LENEXTENTS(inode) &&
3666@@ -162,10 +168,8 @@
3667
3668 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
3669 {
3670- struct buffer_head *bh = NULL;
3671 struct page *page;
3672 char *kaddr;
3673- int block;
3674
3675 /* from now on we have normal address_space methods */
3676 inode->i_data.a_ops = &udf_aops;
3677@@ -180,10 +184,6 @@
3678 return;
3679 }
3680
3681- block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
3682- bh = udf_tread(inode->i_sb, block);
3683- if (!bh)
3684- return;
3685 page = grab_cache_page(inode->i_mapping, 0);
3686 if (!PageLocked(page))
3687 PAGE_BUG(page);
3688@@ -192,21 +192,19 @@
3689 kaddr = kmap(page);
3690 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
3691 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
3692- memcpy(kaddr, bh->b_data + udf_file_entry_alloc_offset(inode),
3693+ memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
3694 UDF_I_LENALLOC(inode));
3695 flush_dcache_page(page);
3696 SetPageUptodate(page);
3697 kunmap(page);
3698 }
3699- memset(bh->b_data + udf_file_entry_alloc_offset(inode),
3700- 0, UDF_I_LENALLOC(inode));
3701+ memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
3702+ UDF_I_LENALLOC(inode));
3703 UDF_I_LENALLOC(inode) = 0;
3704 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3705 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
3706 else
3707 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
3708- mark_buffer_dirty_inode(bh, inode);
3709- udf_release_data(bh);
3710
3711 inode->i_data.a_ops->writepage(page);
3712 page_cache_release(page);
3713@@ -221,18 +219,21 @@
3714 struct buffer_head *sbh = NULL, *dbh = NULL;
3715 lb_addr bloc, eloc;
3716 uint32_t elen, extoffset;
3717+ uint8_t alloctype;
3718
3719 struct udf_fileident_bh sfibh, dfibh;
3720 loff_t f_pos = udf_ext0_offset(inode) >> 2;
3721 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
3722 struct fileIdentDesc cfi, *sfi, *dfi;
3723
3724+ if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3725+ alloctype = ICBTAG_FLAG_AD_SHORT;
3726+ else
3727+ alloctype = ICBTAG_FLAG_AD_LONG;
3728+
3729 if (!inode->i_size)
3730 {
3731- if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3732- UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
3733- else
3734- UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
3735+ UDF_I_ALLOCTYPE(inode) = alloctype;
3736 mark_inode_dirty(inode);
3737 return NULL;
3738 }
3739@@ -248,9 +249,6 @@
3740 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
3741 if (!newblock)
3742 return NULL;
3743- sbh = udf_tread(inode->i_sb, inode->i_ino);
3744- if (!sbh)
3745- return NULL;
3746 dbh = udf_tgetblk(inode->i_sb, newblock);
3747 if (!dbh)
3748 return NULL;
3749@@ -261,18 +259,19 @@
3750 mark_buffer_dirty_inode(dbh, inode);
3751
3752 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
3753- sfibh.sbh = sfibh.ebh = sbh;
3754+ sbh = sfibh.sbh = sfibh.ebh = NULL;
3755 dfibh.soffset = dfibh.eoffset = 0;
3756 dfibh.sbh = dfibh.ebh = dbh;
3757 while ( (f_pos < size) )
3758 {
3759+ UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
3760 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
3761 if (!sfi)
3762 {
3763- udf_release_data(sbh);
3764 udf_release_data(dbh);
3765 return NULL;
3766 }
3767+ UDF_I_ALLOCTYPE(inode) = alloctype;
3768 sfi->descTag.tagLocation = *block;
3769 dfibh.soffset = dfibh.eoffset;
3770 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
3771@@ -280,21 +279,15 @@
3772 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
3773 sfi->fileIdent + sfi->lengthOfImpUse))
3774 {
3775- udf_release_data(sbh);
3776+ UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
3777 udf_release_data(dbh);
3778 return NULL;
3779 }
3780 }
3781 mark_buffer_dirty_inode(dbh, inode);
3782
3783- memset(sbh->b_data + udf_file_entry_alloc_offset(inode),
3784- 0, UDF_I_LENALLOC(inode));
3785-
3786+ memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
3787 UDF_I_LENALLOC(inode) = 0;
3788- if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3789- UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
3790- else
3791- UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
3792 bloc = UDF_I_LOCATION(inode);
3793 eloc.logicalBlockNum = *block;
3794 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
3795@@ -304,7 +297,6 @@
3796 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
3797 /* UniqueID stuff */
3798
3799- mark_buffer_dirty(sbh);
3800 udf_release_data(sbh);
3801 mark_inode_dirty(inode);
3802 inode->i_version ++;
3803@@ -732,7 +724,7 @@
3804
3805 if (elen > numalloc)
3806 {
3807- laarr[c].extLength -=
3808+ laarr[i].extLength -=
3809 (numalloc << inode->i_sb->s_blocksize_bits);
3810 numalloc = 0;
3811 }
3812@@ -854,7 +846,6 @@
3813 void udf_truncate(struct inode * inode)
3814 {
3815 int offset;
3816- struct buffer_head *bh;
3817 int err;
3818
3819 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3820@@ -879,16 +870,8 @@
3821 }
3822 else
3823 {
3824- offset = (inode->i_size & (inode->i_sb->s_blocksize - 1)) +
3825- udf_file_entry_alloc_offset(inode);
3826-
3827- if ((bh = udf_tread(inode->i_sb,
3828- udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0))))
3829- {
3830- memset(bh->b_data + offset, 0x00, inode->i_sb->s_blocksize - offset);
3831- mark_buffer_dirty(bh);
3832- udf_release_data(bh);
3833- }
3834+ offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
3835+ memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
3836 UDF_I_LENALLOC(inode) = inode->i_size;
3837 }
3838 }
3839@@ -1037,7 +1020,6 @@
3840 int offset, alen;
3841
3842 inode->i_version = ++event;
3843- UDF_I_NEW_INODE(inode) = 0;
3844
3845 fe = (struct fileEntry *)bh->b_data;
3846 efe = (struct extendedFileEntry *)bh->b_data;
3847@@ -1049,14 +1031,28 @@
3848
3849 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
3850 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
3851- UDF_I_EXTENDED_FE(inode) = 1;
3852+ {
3853+ UDF_I_EFE(inode) = 1;
3854+ UDF_I_USE(inode) = 0;
3855+ UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
3856+ memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
3857+ }
3858 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
3859- UDF_I_EXTENDED_FE(inode) = 0;
3860+ {
3861+ UDF_I_EFE(inode) = 0;
3862+ UDF_I_USE(inode) = 0;
3863+ UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
3864+ memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
3865+ }
3866 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
3867 {
3868+ UDF_I_EFE(inode) = 0;
3869+ UDF_I_USE(inode) = 1;
3870 UDF_I_LENALLOC(inode) =
3871 le32_to_cpu(
3872 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
3873+ UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
3874+ memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
3875 return;
3876 }
3877
3878@@ -1079,7 +1075,7 @@
3879 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
3880 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
3881
3882- if (UDF_I_EXTENDED_FE(inode) == 0)
3883+ if (UDF_I_EFE(inode) == 0)
3884 {
3885 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
3886 (inode->i_sb->s_blocksize_bits - 9);
3887@@ -1325,19 +1321,11 @@
3888 udf_debug("bread failure\n");
3889 return -EIO;
3890 }
3891+
3892+ memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
3893+
3894 fe = (struct fileEntry *)bh->b_data;
3895 efe = (struct extendedFileEntry *)bh->b_data;
3896- if (UDF_I_NEW_INODE(inode) == 1)
3897- {
3898- if (UDF_I_EXTENDED_FE(inode) == 0)
3899- memset(bh->b_data, 0x00, sizeof(struct fileEntry));
3900- else
3901- memset(bh->b_data, 0x00, sizeof(struct extendedFileEntry));
3902- memset(bh->b_data + udf_file_entry_alloc_offset(inode) +
3903- UDF_I_LENALLOC(inode), 0x0, inode->i_sb->s_blocksize -
3904- udf_file_entry_alloc_offset(inode) - UDF_I_LENALLOC(inode));
3905- UDF_I_NEW_INODE(inode) = 0;
3906- }
3907
3908 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
3909 {
3910@@ -1345,6 +1333,7 @@
3911 (struct unallocSpaceEntry *)bh->b_data;
3912
3913 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
3914+ memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
3915 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
3916 sizeof(tag);
3917 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
3918@@ -1415,8 +1404,9 @@
3919 udf_release_data(tbh);
3920 }
3921
3922- if (UDF_I_EXTENDED_FE(inode) == 0)
3923+ if (UDF_I_EFE(inode) == 0)
3924 {
3925+ memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
3926 fe->logicalBlocksRecorded = cpu_to_le64(
3927 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
3928 (inode->i_sb->s_blocksize_bits - 9));
3929@@ -1439,6 +1429,7 @@
3930 }
3931 else
3932 {
3933+ memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
3934 efe->objectSize = cpu_to_le64(inode->i_size);
3935 efe->logicalBlocksRecorded = cpu_to_le64(
3936 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
3937@@ -1619,17 +1610,12 @@
3938 long_ad *lad = NULL;
3939 struct allocExtDesc *aed;
3940 int8_t etype;
3941+ uint8_t *ptr;
3942
3943- if (!(*bh))
3944- {
3945- if (!(*bh = udf_tread(inode->i_sb,
3946- udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
3947- {
3948- udf_debug("reading block %d failed!\n",
3949- udf_get_lb_pblock(inode->i_sb, *bloc, 0));
3950- return -1;
3951- }
3952- }
3953+ if (!*bh)
3954+ ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
3955+ else
3956+ ptr = (*bh)->b_data + *extoffset;
3957
3958 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
3959 adsize = sizeof(short_ad);
3960@@ -1668,7 +1654,7 @@
3961 {
3962 loffset = *extoffset;
3963 aed->lengthAllocDescs = cpu_to_le32(adsize);
3964- sptr = (*bh)->b_data + *extoffset - adsize;
3965+ sptr = ptr - adsize;
3966 dptr = nbh->b_data + sizeof(struct allocExtDesc);
3967 memcpy(dptr, sptr, adsize);
3968 *extoffset = sizeof(struct allocExtDesc) + adsize;
3969@@ -1677,10 +1663,10 @@
3970 {
3971 loffset = *extoffset + adsize;
3972 aed->lengthAllocDescs = cpu_to_le32(0);
3973- sptr = (*bh)->b_data + *extoffset;
3974+ sptr = ptr;
3975 *extoffset = sizeof(struct allocExtDesc);
3976
3977- if (memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
3978+ if (*bh)
3979 {
3980 aed = (struct allocExtDesc *)(*bh)->b_data;
3981 aed->lengthAllocDescs =
3982@@ -1720,18 +1706,23 @@
3983 break;
3984 }
3985 }
3986- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
3987- udf_update_tag((*bh)->b_data, loffset);
3988+ if (*bh)
3989+ {
3990+ if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
3991+ udf_update_tag((*bh)->b_data, loffset);
3992+ else
3993+ udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
3994+ mark_buffer_dirty_inode(*bh, inode);
3995+ udf_release_data(*bh);
3996+ }
3997 else
3998- udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
3999- mark_buffer_dirty_inode(*bh, inode);
4000- udf_release_data(*bh);
4001+ mark_inode_dirty(inode);
4002 *bh = nbh;
4003 }
4004
4005 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
4006
4007- if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
4008+ if (!*bh)
4009 {
4010 UDF_I_LENALLOC(inode) += adsize;
4011 mark_inode_dirty(inode);
4012@@ -1755,49 +1746,40 @@
4013 lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
4014 {
4015 int adsize;
4016- short_ad *sad = NULL;
4017- long_ad *lad = NULL;
4018+ uint8_t *ptr;
4019
4020- if (!(bh))
4021- {
4022- if (!(bh = udf_tread(inode->i_sb,
4023- udf_get_lb_pblock(inode->i_sb, bloc, 0))))
4024- {
4025- udf_debug("reading block %d failed!\n",
4026- udf_get_lb_pblock(inode->i_sb, bloc, 0));
4027- return -1;
4028- }
4029- }
4030+ if (!bh)
4031+ ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
4032 else
4033+ {
4034+ ptr = bh->b_data + *extoffset;
4035 atomic_inc(&bh->b_count);
4036-
4037- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
4038- adsize = sizeof(short_ad);
4039- else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
4040- adsize = sizeof(long_ad);
4041- else
4042- return -1;
4043+ }
4044
4045 switch (UDF_I_ALLOCTYPE(inode))
4046 {
4047 case ICBTAG_FLAG_AD_SHORT:
4048 {
4049- sad = (short_ad *)((bh)->b_data + *extoffset);
4050+ short_ad *sad = (short_ad *)ptr;
4051 sad->extLength = cpu_to_le32(elen);
4052 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
4053+ adsize = sizeof(short_ad);
4054 break;
4055 }
4056 case ICBTAG_FLAG_AD_LONG:
4057 {
4058- lad = (long_ad *)((bh)->b_data + *extoffset);
4059+ long_ad *lad = (long_ad *)ptr;
4060 lad->extLength = cpu_to_le32(elen);
4061 lad->extLocation = cpu_to_lelb(eloc);
4062 memset(lad->impUse, 0x00, sizeof(lad->impUse));
4063+ adsize = sizeof(long_ad);
4064 break;
4065 }
4066+ default:
4067+ return -1;
4068 }
4069
4070- if (memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4071+ if (bh)
4072 {
4073 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
4074 {
4075@@ -1806,30 +1788,28 @@
4076 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
4077 }
4078 mark_buffer_dirty_inode(bh, inode);
4079+ udf_release_data(bh);
4080 }
4081 else
4082- {
4083 mark_inode_dirty(inode);
4084- mark_buffer_dirty(bh);
4085- }
4086
4087 if (inc)
4088 *extoffset += adsize;
4089- udf_release_data(bh);
4090 return (elen >> 30);
4091 }
4092
4093 int8_t udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
4094 lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
4095 {
4096- uint16_t tagIdent;
4097- int pos, alen;
4098 int8_t etype;
4099
4100- if (!(*bh))
4101+ while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
4102+ (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
4103 {
4104- if (!(*bh = udf_tread(inode->i_sb,
4105- udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
4106+ *bloc = *eloc;
4107+ *extoffset = sizeof(struct allocExtDesc);
4108+ udf_release_data(*bh);
4109+ if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
4110 {
4111 udf_debug("reading block %d failed!\n",
4112 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
4113@@ -1837,154 +1817,38 @@
4114 }
4115 }
4116
4117- tagIdent = le16_to_cpu(((tag *)(*bh)->b_data)->tagIdent);
4118-
4119- if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
4120- {
4121- if (tagIdent == TAG_IDENT_FE || tagIdent == TAG_IDENT_EFE ||
4122- UDF_I_NEW_INODE(inode))
4123- {
4124- pos = udf_file_entry_alloc_offset(inode);
4125- alen = UDF_I_LENALLOC(inode) + pos;
4126- }
4127- else if (tagIdent == TAG_IDENT_USE)
4128- {
4129- pos = sizeof(struct unallocSpaceEntry);
4130- alen = UDF_I_LENALLOC(inode) + pos;
4131- }
4132- else
4133- return -1;
4134- }
4135- else if (tagIdent == TAG_IDENT_AED)
4136- {
4137- struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
4138-
4139- pos = sizeof(struct allocExtDesc);
4140- alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
4141- }
4142- else
4143- return -1;
4144-
4145- if (!(*extoffset))
4146- *extoffset = pos;
4147-
4148- switch (UDF_I_ALLOCTYPE(inode))
4149- {
4150- case ICBTAG_FLAG_AD_SHORT:
4151- {
4152- short_ad *sad;
4153-
4154- if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
4155- return -1;
4156-
4157- if ((etype = le32_to_cpu(sad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
4158- {
4159- bloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
4160- *extoffset = 0;
4161- udf_release_data(*bh);
4162- *bh = NULL;
4163- return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
4164- }
4165- else
4166- {
4167- eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
4168- eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
4169- *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
4170- }
4171- break;
4172- }
4173- case ICBTAG_FLAG_AD_LONG:
4174- {
4175- long_ad *lad;
4176-
4177- if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
4178- return -1;
4179-
4180- if ((etype = le32_to_cpu(lad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
4181- {
4182- *bloc = lelb_to_cpu(lad->extLocation);
4183- *extoffset = 0;
4184- udf_release_data(*bh);
4185- *bh = NULL;
4186- return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
4187- }
4188- else
4189- {
4190- *eloc = lelb_to_cpu(lad->extLocation);
4191- *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
4192- }
4193- break;
4194- }
4195- case ICBTAG_FLAG_AD_IN_ICB:
4196- {
4197- if (UDF_I_LENALLOC(inode) == 0)
4198- return -1;
4199- etype = (EXT_RECORDED_ALLOCATED >> 30);
4200- *eloc = UDF_I_LOCATION(inode);
4201- *elen = UDF_I_LENALLOC(inode);
4202- break;
4203- }
4204- default:
4205- {
4206- udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
4207- return -1;
4208- }
4209- }
4210- if (*elen)
4211- return etype;
4212-
4213- udf_debug("Empty Extent, inode=%ld, alloctype=%d, eloc=%d, elen=%d, etype=%d, extoffset=%d\n",
4214- inode->i_ino, UDF_I_ALLOCTYPE(inode), eloc->logicalBlockNum, *elen, etype, *extoffset);
4215- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
4216- *extoffset -= sizeof(short_ad);
4217- else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
4218- *extoffset -= sizeof(long_ad);
4219- return -1;
4220+ return etype;
4221 }
4222
4223 int8_t udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
4224 lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
4225 {
4226- int pos, alen;
4227+ int alen;
4228 int8_t etype;
4229+ uint8_t *ptr;
4230
4231- if (!(*bh))
4232+ if (!*bh)
4233 {
4234- if (!(*bh = udf_tread(inode->i_sb,
4235- udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
4236- {
4237- udf_debug("reading block %d failed!\n",
4238- udf_get_lb_pblock(inode->i_sb, *bloc, 0));
4239- return -1;
4240- }
4241- }
4242-
4243- if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
4244- {
4245- if (!(UDF_I_EXTENDED_FE(inode)))
4246- pos = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
4247- else
4248- pos = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
4249- alen = UDF_I_LENALLOC(inode) + pos;
4250+ if (!(*extoffset))
4251+ *extoffset = udf_file_entry_alloc_offset(inode);
4252+ ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
4253+ alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
4254 }
4255 else
4256 {
4257- struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
4258-
4259- pos = sizeof(struct allocExtDesc);
4260- alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
4261+ if (!(*extoffset))
4262+ *extoffset = sizeof(struct allocExtDesc);
4263+ ptr = (*bh)->b_data + *extoffset;
4264+ alen = le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
4265 }
4266
4267- if (!(*extoffset))
4268- *extoffset = pos;
4269-
4270 switch (UDF_I_ALLOCTYPE(inode))
4271 {
4272 case ICBTAG_FLAG_AD_SHORT:
4273 {
4274 short_ad *sad;
4275
4276- if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
4277+ if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
4278 return -1;
4279
4280 etype = le32_to_cpu(sad->extLength) >> 30;
4281@@ -1997,7 +1861,7 @@
4282 {
4283 long_ad *lad;
4284
4285- if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
4286+ if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
4287 return -1;
4288
4289 etype = le32_to_cpu(lad->extLength) >> 30;
4290@@ -2011,15 +1875,8 @@
4291 return -1;
4292 }
4293 }
4294- if (*elen)
4295- return etype;
4296
4297- udf_debug("Empty Extent!\n");
4298- if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
4299- *extoffset -= sizeof(short_ad);
4300- else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
4301- *extoffset -= sizeof(long_ad);
4302- return -1;
4303+ return etype;
4304 }
4305
4306 int8_t udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
4307@@ -2029,17 +1886,7 @@
4308 uint32_t oelen;
4309 int8_t etype;
4310
4311- if (!bh)
4312- {
4313- if (!(bh = udf_tread(inode->i_sb,
4314- udf_get_lb_pblock(inode->i_sb, bloc, 0))))
4315- {
4316- udf_debug("reading block %d failed!\n",
4317- udf_get_lb_pblock(inode->i_sb, bloc, 0));
4318- return -1;
4319- }
4320- }
4321- else
4322+ if (bh)
4323 atomic_inc(&bh->b_count);
4324
4325 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
4326@@ -2063,19 +1910,11 @@
4327 int8_t etype;
4328 struct allocExtDesc *aed;
4329
4330- if (!(nbh))
4331+ if (nbh)
4332 {
4333- if (!(nbh = udf_tread(inode->i_sb,
4334- udf_get_lb_pblock(inode->i_sb, nbloc, 0))))
4335- {
4336- udf_debug("reading block %d failed!\n",
4337- udf_get_lb_pblock(inode->i_sb, nbloc, 0));
4338- return -1;
4339- }
4340- }
4341- else
4342 atomic_inc(&nbh->b_count);
4343- atomic_inc(&nbh->b_count);
4344+ atomic_inc(&nbh->b_count);
4345+ }
4346
4347 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
4348 adsize = sizeof(short_ad);
4349@@ -2094,7 +1933,7 @@
4350 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
4351 {
4352 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
4353- if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
4354+ if (obh != nbh)
4355 {
4356 obloc = nbloc;
4357 udf_release_data(obh);
4358@@ -2106,12 +1945,12 @@
4359 memset(&eloc, 0x00, sizeof(lb_addr));
4360 elen = 0;
4361
4362- if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
4363+ if (nbh != obh)
4364 {
4365 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
4366 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
4367 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
4368- if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
4369+ if (!obh)
4370 {
4371 UDF_I_LENALLOC(inode) -= (adsize * 2);
4372 mark_inode_dirty(inode);
4373@@ -2131,7 +1970,7 @@
4374 else
4375 {
4376 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
4377- if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
4378+ if (!obh)
4379 {
4380 UDF_I_LENALLOC(inode) -= adsize;
4381 mark_inode_dirty(inode);
4382@@ -2206,9 +2045,7 @@
4383 ret = 0;
4384
4385 unlock_kernel();
4386-
4387- if (bh)
4388- udf_release_data(bh);
4389+ udf_release_data(bh);
4390
4391 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
4392 return udf_fixed_to_variable(ret);
4393diff -u -r -N ../../linus/2.4/linux/fs/udf/misc.c linux/fs/udf/misc.c
4394--- ../../linus/2.4/linux/fs/udf/misc.c Tue Aug 6 21:16:21 2002
4395+++ linux/fs/udf/misc.c Thu Aug 8 20:44:32 2002
4396@@ -73,7 +73,7 @@
4397
4398 *bh = udf_tread(inode->i_sb, inode->i_ino);
4399
4400- if (UDF_I_EXTENDED_FE(inode) == 0)
4401+ if (UDF_I_EFE(inode) == 0)
4402 {
4403 struct fileEntry *fe;
4404
4405@@ -189,7 +189,7 @@
4406
4407 *bh = udf_tread(inode->i_sb, inode->i_ino);
4408
4409- if (UDF_I_EXTENDED_FE(inode) == 0)
4410+ if (UDF_I_EFE(inode) == 0)
4411 {
4412 struct fileEntry *fe;
4413
4414diff -u -r -N ../../linus/2.4/linux/fs/udf/namei.c linux/fs/udf/namei.c
4415--- ../../linus/2.4/linux/fs/udf/namei.c Tue Aug 6 21:16:21 2002
4416+++ linux/fs/udf/namei.c Thu Aug 8 20:44:32 2002
4417@@ -56,12 +56,16 @@
4418 uint8_t lfi = cfi->lengthFileIdent;
4419 int padlen = fibh->eoffset - fibh->soffset - liu - lfi -
4420 sizeof(struct fileIdentDesc);
4421+ int adinicb = 0;
4422+
4423+ if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
4424+ adinicb = 1;
4425
4426 offset = fibh->soffset + sizeof(struct fileIdentDesc);
4427
4428 if (impuse)
4429 {
4430- if (offset + liu < 0)
4431+ if (adinicb || (offset + liu < 0))
4432 memcpy((uint8_t *)sfi->impUse, impuse, liu);
4433 else if (offset >= 0)
4434 memcpy(fibh->ebh->b_data + offset, impuse, liu);
4435@@ -76,7 +80,7 @@
4436
4437 if (fileident)
4438 {
4439- if (offset + lfi < 0)
4440+ if (adinicb || (offset + lfi < 0))
4441 memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
4442 else if (offset >= 0)
4443 memcpy(fibh->ebh->b_data + offset, fileident, lfi);
4444@@ -89,7 +93,7 @@
4445
4446 offset += lfi;
4447
4448- if (offset + padlen < 0)
4449+ if (adinicb || (offset + padlen < 0))
4450 memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
4451 else if (offset >= 0)
4452 memset(fibh->ebh->b_data + offset, 0x00, padlen);
4453@@ -123,7 +127,7 @@
4454 checksum += ((uint8_t *)&cfi->descTag)[i];
4455
4456 cfi->descTag.tagChecksum = checksum;
4457- if (sizeof(struct fileIdentDesc) <= -fibh->soffset)
4458+ if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset))
4459 memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc));
4460 else
4461 {
4462@@ -132,9 +136,14 @@
4463 sizeof(struct fileIdentDesc) + fibh->soffset);
4464 }
4465
4466- if (fibh->sbh != fibh->ebh)
4467- mark_buffer_dirty_inode(fibh->ebh, inode);
4468- mark_buffer_dirty_inode(fibh->sbh, inode);
4469+ if (adinicb)
4470+ mark_inode_dirty(inode);
4471+ else
4472+ {
4473+ if (fibh->sbh != fibh->ebh)
4474+ mark_buffer_dirty_inode(fibh->ebh, inode);
4475+ mark_buffer_dirty_inode(fibh->sbh, inode);
4476+ }
4477 return 0;
4478 }
4479
4480@@ -161,7 +170,9 @@
4481 f_pos = (udf_ext0_offset(dir) >> 2);
4482
4483 fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
4484- if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4485+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4486+ fibh->sbh = fibh->ebh = NULL;
4487+ else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4488 &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
4489 {
4490 offset >>= dir->i_sb->s_blocksize_bits;
4491@@ -175,6 +186,12 @@
4492 }
4493 else
4494 offset = 0;
4495+
4496+ if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
4497+ {
4498+ udf_release_data(bh);
4499+ return NULL;
4500+ }
4501 }
4502 else
4503 {
4504@@ -182,12 +199,6 @@
4505 return NULL;
4506 }
4507
4508- if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
4509- {
4510- udf_release_data(bh);
4511- return NULL;
4512- }
4513-
4514 while ( (f_pos < size) )
4515 {
4516 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
4517@@ -388,7 +399,9 @@
4518 f_pos = (udf_ext0_offset(dir) >> 2);
4519
4520 fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
4521- if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4522+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4523+ fibh->sbh = fibh->ebh = NULL;
4524+ else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4525 &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
4526 {
4527 offset >>= dir->i_sb->s_blocksize_bits;
4528@@ -409,94 +422,89 @@
4529 *err = -EIO;
4530 return NULL;
4531 }
4532-
4533+
4534 block = UDF_I_LOCATION(dir).logicalBlockNum;
4535-
4536- while ( (f_pos < size) )
4537+
4538+ }
4539+ else
4540+ {
4541+ block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
4542+ fibh->sbh = fibh->ebh = NULL;
4543+ fibh->soffset = fibh->eoffset = sb->s_blocksize;
4544+ goto add;
4545+ }
4546+
4547+ while ( (f_pos < size) )
4548+ {
4549+ fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
4550+
4551+ if (!fi)
4552 {
4553- fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
4554-
4555- if (!fi)
4556- {
4557- if (fibh->sbh != fibh->ebh)
4558- udf_release_data(fibh->ebh);
4559- udf_release_data(fibh->sbh);
4560- udf_release_data(bh);
4561- *err = -EIO;
4562- return NULL;
4563- }
4564-
4565- liu = le16_to_cpu(cfi->lengthOfImpUse);
4566- lfi = cfi->lengthFileIdent;
4567-
4568- if (fibh->sbh == fibh->ebh)
4569- nameptr = fi->fileIdent + liu;
4570+ if (fibh->sbh != fibh->ebh)
4571+ udf_release_data(fibh->ebh);
4572+ udf_release_data(fibh->sbh);
4573+ udf_release_data(bh);
4574+ *err = -EIO;
4575+ return NULL;
4576+ }
4577+
4578+ liu = le16_to_cpu(cfi->lengthOfImpUse);
4579+ lfi = cfi->lengthFileIdent;
4580+
4581+ if (fibh->sbh == fibh->ebh)
4582+ nameptr = fi->fileIdent + liu;
4583+ else
4584+ {
4585+ int poffset; /* Unpaded ending offset */
4586+
4587+ poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
4588+
4589+ if (poffset >= lfi)
4590+ nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
4591 else
4592 {
4593- int poffset; /* Unpaded ending offset */
4594-
4595- poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
4596-
4597- if (poffset >= lfi)
4598- nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
4599- else
4600- {
4601- nameptr = fname;
4602- memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
4603- memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
4604- }
4605+ nameptr = fname;
4606+ memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
4607+ memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
4608 }
4609-
4610- if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
4611+ }
4612+
4613+ if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
4614+ {
4615+ if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
4616 {
4617- if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
4618+ udf_release_data(bh);
4619+ cfi->descTag.tagSerialNum = cpu_to_le16(1);
4620+ cfi->fileVersionNum = cpu_to_le16(1);
4621+ cfi->fileCharacteristics = 0;
4622+ cfi->lengthFileIdent = namelen;
4623+ cfi->lengthOfImpUse = cpu_to_le16(0);
4624+ if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
4625+ return fi;
4626+ else
4627 {
4628- udf_release_data(bh);
4629- cfi->descTag.tagSerialNum = cpu_to_le16(1);
4630- cfi->fileVersionNum = cpu_to_le16(1);
4631- cfi->fileCharacteristics = 0;
4632- cfi->lengthFileIdent = namelen;
4633- cfi->lengthOfImpUse = cpu_to_le16(0);
4634- if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
4635- return fi;
4636- else
4637- {
4638- *err = -EIO;
4639- return NULL;
4640- }
4641+ *err = -EIO;
4642+ return NULL;
4643 }
4644 }
4645+ }
4646
4647- if (!lfi || !dentry)
4648- continue;
4649+ if (!lfi || !dentry)
4650+ continue;
4651
4652- if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
4653- udf_match(flen, fname, &(dentry->d_name)))
4654- {
4655- if (fibh->sbh != fibh->ebh)
4656- udf_release_data(fibh->ebh);
4657- udf_release_data(fibh->sbh);
4658- udf_release_data(bh);
4659- *err = -EEXIST;
4660- return NULL;
4661- }
4662- }
4663- }
4664- else
4665- {
4666- block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
4667- if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4668+ if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
4669+ udf_match(flen, fname, &(dentry->d_name)))
4670 {
4671- fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
4672- fibh->soffset = fibh->eoffset = udf_file_entry_alloc_offset(dir);
4673- }
4674- else
4675- {
4676- fibh->sbh = fibh->ebh = NULL;
4677- fibh->soffset = fibh->eoffset = sb->s_blocksize;
4678+ if (fibh->sbh != fibh->ebh)
4679+ udf_release_data(fibh->ebh);
4680+ udf_release_data(fibh->sbh);
4681+ udf_release_data(bh);
4682+ *err = -EEXIST;
4683+ return NULL;
4684 }
4685 }
4686
4687+add:
4688 f_pos += nfidlen;
4689
4690 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB &&
4691@@ -533,13 +541,17 @@
4692 fibh->sbh = fibh->ebh;
4693 }
4694
4695- if (UDF_I_ALLOCTYPE(dir) != ICBTAG_FLAG_AD_IN_ICB)
4696+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4697+ {
4698+ block = UDF_I_LOCATION(dir).logicalBlockNum;
4699+ fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset - udf_ext0_offset(dir));
4700+ }
4701+ else
4702+ {
4703 block = eloc.logicalBlockNum + ((elen - 1) >>
4704 dir->i_sb->s_blocksize_bits);
4705- else
4706- block = UDF_I_LOCATION(dir).logicalBlockNum;
4707-
4708- fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
4709+ fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
4710+ }
4711 }
4712 else
4713 {
4714@@ -784,7 +796,10 @@
4715 f_pos = (udf_ext0_offset(dir) >> 2);
4716
4717 fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
4718- if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4719+
4720+ if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4721+ fibh.sbh = fibh.ebh = NULL;
4722+ else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4723 &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
4724 {
4725 offset >>= dir->i_sb->s_blocksize_bits;
4726@@ -798,6 +813,12 @@
4727 }
4728 else
4729 offset = 0;
4730+
4731+ if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
4732+ {
4733+ udf_release_data(bh);
4734+ return 0;
4735+ }
4736 }
4737 else
4738 {
4739@@ -805,8 +826,6 @@
4740 return 0;
4741 }
4742
4743- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
4744- return 0;
4745
4746 while ( (f_pos < size) )
4747 {
4748@@ -823,6 +842,9 @@
4749
4750 if (cfi.lengthFileIdent && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0)
4751 {
4752+ if (fibh.sbh != fibh.ebh)
4753+ udf_release_data(fibh.ebh);
4754+ udf_release_data(fibh.sbh);
4755 udf_release_data(bh);
4756 return 0;
4757 }
4758diff -u -r -N ../../linus/2.4/linux/fs/udf/super.c linux/fs/udf/super.c
4759--- ../../linus/2.4/linux/fs/udf/super.c Tue Aug 6 21:16:21 2002
4760+++ linux/fs/udf/super.c Thu Aug 8 20:44:32 2002
4761@@ -104,6 +104,7 @@
4762 write_inode: udf_write_inode,
4763 put_inode: udf_put_inode,
4764 delete_inode: udf_delete_inode,
4765+ clear_inode: udf_clear_inode,
4766 put_super: udf_put_super,
4767 write_super: udf_write_super,
4768 statfs: udf_statfs,
4769@@ -313,10 +314,6 @@
4770 UDF_SB(sb)->s_gid = uopt.gid;
4771 UDF_SB(sb)->s_umask = uopt.umask;
4772
4773-#if UDFFS_RW != 1
4774- *flags |= MS_RDONLY;
4775-#endif
4776-
4777 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
4778 return 0;
4779 if (*flags & MS_RDONLY)
4780@@ -1373,10 +1370,6 @@
4781
4782 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
4783
4784-#if UDFFS_RW != 1
4785- sb->s_flags |= MS_RDONLY;
4786-#endif
4787-
4788 if (!udf_parse_options((char *)options, &uopt))
4789 goto error_out;
4790
4791@@ -1488,8 +1481,8 @@
4792 {
4793 timestamp ts;
4794 udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb), 0);
4795- udf_info("UDF %s-%s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
4796- UDFFS_VERSION, UDFFS_RW ? "rw" : "ro", UDFFS_DATE,
4797+ udf_info("UDF %s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
4798+ UDFFS_VERSION, UDFFS_DATE,
4799 UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute,
4800 ts.typeAndTimezone);
4801 }
4802diff -u -r -N ../../linus/2.4/linux/fs/udf/symlink.c linux/fs/udf/symlink.c
4803--- ../../linus/2.4/linux/fs/udf/symlink.c Tue Aug 6 21:16:21 2002
4804+++ linux/fs/udf/symlink.c Thu Aug 8 20:44:32 2002
4805@@ -87,14 +87,7 @@
4806
4807 lock_kernel();
4808 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
4809- {
4810- bh = udf_tread(inode->i_sb, inode->i_ino);
4811-
4812- if (!bh)
4813- goto out;
4814-
4815- symlink = bh->b_data + udf_file_entry_alloc_offset(inode);
4816- }
4817+ symlink = UDF_I_DATA(inode) + UDF_I_LENALLOC(inode);
4818 else
4819 {
4820 bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
4821diff -u -r -N ../../linus/2.4/linux/fs/udf/truncate.c linux/fs/udf/truncate.c
4822--- ../../linus/2.4/linux/fs/udf/truncate.c Tue Aug 6 21:16:21 2002
4823+++ linux/fs/udf/truncate.c Thu Aug 8 20:44:32 2002
4824@@ -57,7 +57,9 @@
4825 if (last_block - first_block > 0)
4826 {
4827 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
4828+ {
4829 mark_inode_dirty(inode);
4830+ }
4831
4832 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
4833 udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block);
4834@@ -94,7 +96,7 @@
4835 else
4836 lenalloc = extoffset - adsize;
4837
4838- if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4839+ if (!bh)
4840 lenalloc -= udf_file_entry_alloc_offset(inode);
4841 else
4842 lenalloc -= sizeof(struct allocExtDesc);
4843@@ -107,15 +109,15 @@
4844 extoffset = 0;
4845 if (lelen)
4846 {
4847- if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4848- memset(bh->b_data, 0x00, udf_file_entry_alloc_offset(inode));
4849+ if (!bh)
4850+ BUG();
4851 else
4852 memset(bh->b_data, 0x00, sizeof(struct allocExtDesc));
4853 udf_free_blocks(inode->i_sb, inode, bloc, 0, lelen);
4854 }
4855 else
4856 {
4857- if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4858+ if (!bh)
4859 {
4860 UDF_I_LENALLOC(inode) = lenalloc;
4861 mark_inode_dirty(inode);
4862@@ -134,9 +136,9 @@
4863 }
4864
4865 udf_release_data(bh);
4866- bh = NULL;
4867-
4868+ extoffset = sizeof(struct allocExtDesc);
4869 bloc = eloc;
4870+ bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, bloc, 0));
4871 if (elen)
4872 lelen = (elen + inode->i_sb->s_blocksize - 1) >>
4873 inode->i_sb->s_blocksize_bits;
4874@@ -152,15 +154,15 @@
4875
4876 if (lelen)
4877 {
4878- if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4879- memset(bh->b_data, 0x00, udf_file_entry_alloc_offset(inode));
4880+ if (!bh)
4881+ BUG();
4882 else
4883 memset(bh->b_data, 0x00, sizeof(struct allocExtDesc));
4884 udf_free_blocks(inode->i_sb, inode, bloc, 0, lelen);
4885 }
4886 else
4887 {
4888- if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4889+ if (!bh)
4890 {
4891 UDF_I_LENALLOC(inode) = lenalloc;
4892 mark_inode_dirty(inode);
4893diff -u -r -N ../../linus/2.4/linux/fs/udf/udf_i.h linux/fs/udf/udf_i.h
4894--- ../../linus/2.4/linux/fs/udf/udf_i.h Tue Aug 6 21:16:21 2002
4895+++ linux/fs/udf/udf_i.h Thu Aug 8 20:44:32 2002
4896@@ -9,14 +9,17 @@
4897 #define UDF_I_LENEXTENTS(X) ( UDF_I(X)->i_lenExtents )
4898 #define UDF_I_UNIQUE(X) ( UDF_I(X)->i_unique )
4899 #define UDF_I_ALLOCTYPE(X) ( UDF_I(X)->i_alloc_type )
4900-#define UDF_I_EXTENDED_FE(X) ( UDF_I(X)->i_extended_fe )
4901-#define UDF_I_STRAT4096(X) ( UDF_I(X)->i_strat_4096 )
4902-#define UDF_I_NEW_INODE(X) ( UDF_I(X)->i_new_inode )
4903+#define UDF_I_EFE(X) ( UDF_I(X)->i_efe )
4904+#define UDF_I_USE(X) ( UDF_I(X)->i_use )
4905+#define UDF_I_STRAT4096(X) ( UDF_I(X)->i_strat4096 )
4906 #define UDF_I_NEXT_ALLOC_BLOCK(X) ( UDF_I(X)->i_next_alloc_block )
4907 #define UDF_I_NEXT_ALLOC_GOAL(X) ( UDF_I(X)->i_next_alloc_goal )
4908 #define UDF_I_UMTIME(X) ( UDF_I(X)->i_umtime )
4909 #define UDF_I_UCTIME(X) ( UDF_I(X)->i_uctime )
4910 #define UDF_I_CRTIME(X) ( UDF_I(X)->i_crtime )
4911 #define UDF_I_UCRTIME(X) ( UDF_I(X)->i_ucrtime )
4912+#define UDF_I_SAD(X) ( UDF_I(X)->i_ext.i_sad )
4913+#define UDF_I_LAD(X) ( UDF_I(X)->i_ext.i_lad )
4914+#define UDF_I_DATA(X) ( UDF_I(X)->i_ext.i_data )
4915
4916 #endif /* !defined(_LINUX_UDF_I_H) */
4917diff -u -r -N ../../linus/2.4/linux/fs/udf/udfdecl.h linux/fs/udf/udfdecl.h
4918--- ../../linus/2.4/linux/fs/udf/udfdecl.h Tue Aug 6 21:16:21 2002
4919+++ linux/fs/udf/udfdecl.h Thu Aug 8 20:44:32 2002
4920@@ -34,9 +34,11 @@
4921 #define CURRENT_UTIME (xtime.tv_usec)
4922
4923 #define udf_file_entry_alloc_offset(inode)\
4924- ((UDF_I_EXTENDED_FE(inode) ?\
4925- sizeof(struct extendedFileEntry) :\
4926- sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode))
4927+ (UDF_I_USE(inode) ?\
4928+ sizeof(struct unallocSpaceEntry) :\
4929+ ((UDF_I_EFE(inode) ?\
4930+ sizeof(struct extendedFileEntry) :\
4931+ sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode)))
4932
4933 #define udf_ext0_offset(inode)\
4934 (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ?\
4935@@ -113,6 +115,7 @@
4936 extern void udf_read_inode(struct inode *);
4937 extern void udf_put_inode(struct inode *);
4938 extern void udf_delete_inode(struct inode *);
4939+extern void udf_clear_inode(struct inode *);
4940 extern void udf_write_inode(struct inode *, int);
4941 extern long udf_block_map(struct inode *, long);
4942 extern int8_t inode_bmap(struct inode *, int, lb_addr *, uint32_t *, lb_addr *, uint32_t *, uint32_t *, struct buffer_head **);
4943@@ -199,8 +202,8 @@
4944 /* directory.c */
4945 extern struct fileIdentDesc * udf_get_fileident(void * buffer, int bufsize, int * offset);
4946 extern extent_ad * udf_get_fileextent(void * buffer, int bufsize, int * offset);
4947-extern long_ad * udf_get_filelongad(void * buffer, int bufsize, int * offset, int);
4948-extern short_ad * udf_get_fileshortad(void * buffer, int bufsize, int * offset, int);
4949+extern long_ad * udf_get_filelongad(uint8_t *, int, int *, int);
4950+extern short_ad * udf_get_fileshortad(uint8_t *, int, int *, int);
4951 extern uint8_t * udf_get_filead(struct fileEntry *, uint8_t *, int, int, int, int *);
4952
4953 #endif /* __UDF_DECL_H */
4954diff -u -r -N ../../linus/2.4/linux/include/linux/cdrom.h linux/include/linux/cdrom.h
4955--- ../../linus/2.4/linux/include/linux/cdrom.h Tue Aug 6 21:17:10 2002
4956+++ linux/include/linux/cdrom.h Tue Aug 6 21:24:33 2002
e5ff5fa6 4957@@ -494,6 +494,7 @@
4958 /* Mode page codes for mode sense/set */
4959 #define GPMODE_R_W_ERROR_PAGE 0x01
4960 #define GPMODE_WRITE_PARMS_PAGE 0x05
4961+#define GPMODE_WCACHING_PAGE 0x08
4962 #define GPMODE_AUDIO_CTL_PAGE 0x0e
4963 #define GPMODE_POWER_PAGE 0x1a
4964 #define GPMODE_FAULT_FAIL_PAGE 0x1c
4965@@ -504,6 +505,11 @@
4966 * of MODE_SENSE_POWER_PAGE */
4967 #define GPMODE_CDROM_PAGE 0x0d
4968
4969+#define GPMODE_PAGE_CURRENT 0
4970+#define GPMODE_PAGE_CHANGE 1
4971+#define GPMODE_PAGE_DEFAULT 2
4972+#define GPMODE_PAGE_SAVE 3
4973+
4974
4975
4976 /* DVD struct types */
f87f0d90 4977diff -u -r -N ../../linus/2.4/linux/include/linux/fs.h linux/include/linux/fs.h
4978--- ../../linus/2.4/linux/include/linux/fs.h Tue Aug 6 21:17:11 2002
4979+++ linux/include/linux/fs.h Tue Aug 6 21:24:33 2002
4980@@ -894,6 +894,7 @@
e5ff5fa6 4981 int (*remount_fs) (struct super_block *, int *, char *);
4982 void (*clear_inode) (struct inode *);
4983 void (*umount_begin) (struct super_block *);
4984+ int (*relocate_blocks) (struct super_block *, unsigned long, unsigned long *);
4985
4986 /* Following are for knfsd to interact with "interesting" filesystems
4987 * Currently just reiserfs, but possibly FAT and others later
f87f0d90 4988diff -u -r -N ../../linus/2.4/linux/include/linux/major.h linux/include/linux/major.h
4989--- ../../linus/2.4/linux/include/linux/major.h Tue Aug 6 21:17:15 2002
4990+++ linux/include/linux/major.h Tue Aug 6 21:24:45 2002
e5ff5fa6 4991@@ -108,6 +108,8 @@
4992 #define SPECIALIX_NORMAL_MAJOR 75
4993 #define SPECIALIX_CALLOUT_MAJOR 76
4994
4995+#define PACKET_MAJOR 97
4996+
4997 #define COMPAQ_CISS_MAJOR 104
4998 #define COMPAQ_CISS_MAJOR1 105
4999 #define COMPAQ_CISS_MAJOR2 106
f87f0d90 5000diff -u -r -N ../../linus/2.4/linux/include/linux/pktcdvd.h linux/include/linux/pktcdvd.h
5001--- ../../linus/2.4/linux/include/linux/pktcdvd.h Thu Jan 1 01:00:00 1970
5002+++ linux/include/linux/pktcdvd.h Wed Aug 7 21:55:25 2002
5003@@ -0,0 +1,215 @@
e5ff5fa6 5004+/*
5005+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
5006+ *
5007+ * May be copied or modified under the terms of the GNU General Public
5008+ * License. See linux/COPYING for more information.
5009+ *
5010+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
5011+ * DVD-RW devices.
5012+ *
5013+ */
5014+#ifndef __PKTCDVD_H
5015+#define __PKTCDVD_H
5016+
5017+/*
5018+ * 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
5019+ */
5020+#define PACKET_DEBUG 1
5021+
5022+#define MAX_WRITERS 8
5023+
f87f0d90 5024+#define STACKED_BH_POOL_SIZE 64
5025+
e5ff5fa6 5026+/*
5027+ * use drive write caching -- we need deferred error handling to be
5028+ * able to sucessfully recover with this option (drive will return good
5029+ * status as soon as the cdb is validated).
5030+ */
5031+#if defined(CONFIG_CDROM_PKTCDVD_WCACHE)
5032+#warning Enabling write caching, use at your own risk
5033+#define USE_WCACHING 1
5034+#else
5035+#define USE_WCACHING 0
5036+#endif
5037+
5038+/*
5039+ * No user-servicable parts beyond this point ->
5040+ */
5041+
5042+#if PACKET_DEBUG
5043+#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
5044+#else
5045+#define DPRINTK(fmt, args...)
5046+#endif
5047+
5048+#if PACKET_DEBUG > 1
5049+#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
5050+#else
5051+#define VPRINTK(fmt, args...)
5052+#endif
5053+
5054+#define PKT_BUF_LIST 0x89
5055+
5056+/*
5057+ * device types
5058+ */
5059+#define PACKET_CDR 1
5060+#define PACKET_CDRW 2
5061+#define PACKET_DVDR 3
5062+#define PACKET_DVDRW 4
5063+
5064+/*
5065+ * flags
5066+ */
f87f0d90 5067+#define PACKET_WRITEABLE 1 /* pd is writeable */
e5ff5fa6 5068+#define PACKET_NWA_VALID 2 /* next writeable address valid */
5069+#define PACKET_LRA_VALID 3 /* last recorded address valid */
5070+#define PACKET_READONLY 4 /* read only pd */
5071+#define PACKET_RECOVERY 5 /* rq recovery in progress */
5072+#define PACKET_RQ 6 /* current rq is set */
5073+#define PACKET_BUSY 7 /* current rq is being processed */
5074+
5075+/*
5076+ * Disc status -- from READ_DISC_INFO
5077+ */
5078+#define PACKET_DISC_EMPTY 0
5079+#define PACKET_DISC_INCOMPLETE 1
5080+#define PACKET_DISC_COMPLETE 2
5081+#define PACKET_DISC_OTHER 3
5082+
5083+/*
5084+ * write type, and corresponding data block type
5085+ */
5086+#define PACKET_MODE1 1
5087+#define PACKET_MODE2 2
5088+#define PACKET_BLOCK_MODE1 8
5089+#define PACKET_BLOCK_MODE2 10
5090+
5091+/*
5092+ * Last session/border status
5093+ */
5094+#define PACKET_SESSION_EMPTY 0
5095+#define PACKET_SESSION_INCOMPLETE 1
5096+#define PACKET_SESSION_RESERVED 2
5097+#define PACKET_SESSION_COMPLETE 3
5098+
5099+#define PACKET_MCN "4a656e734178626f65323030300000"
5100+
5101+#undef PACKET_USE_LS
5102+
5103+/*
5104+ * special requests
5105+ */
5106+#define PKT_THROTTLE_SPEED 1
5107+
5108+#define PKT_TRAY_UNLOCK 0
5109+#define PKT_TRAY_LOCK 1
5110+
5111+/*
5112+ * Very crude stats for now
5113+ */
5114+struct packet_stats
5115+{
5116+ unsigned long bh_s;
5117+ unsigned long bh_e;
5118+ unsigned long bh_cache_hits;
5119+ unsigned long page_cache_hits;
5120+ unsigned long secs_w;
5121+ unsigned long secs_r;
5122+};
5123+
5124+/*
5125+ * packet ioctls
5126+ */
5127+#define PACKET_IOCTL_MAGIC ('X')
5128+#define PACKET_GET_STATS _IOR(PACKET_IOCTL_MAGIC, 0, struct packet_stats)
5129+#define PACKET_SETUP_DEV _IOW(PACKET_IOCTL_MAGIC, 1, unsigned int)
5130+#define PACKET_TEARDOWN_DEV _IOW(PACKET_IOCTL_MAGIC, 2, unsigned int)
5131+
5132+#ifdef __KERNEL__
5133+#include <linux/blkdev.h>
5134+#include <linux/completion.h>
5135+
5136+struct packet_settings
5137+{
5138+ __u8 size; /* packet size in frames */
5139+ __u8 fp; /* fixed packets */
5140+ __u8 link_loss; /* the rest is specified
5141+ * as per Mt Fuji */
5142+ __u8 write_type;
5143+ __u8 track_mode;
5144+ __u8 block_mode;
5145+};
5146+
5147+struct packet_cdrw
5148+{
5149+ struct buffer_head *bhlist; /* string of bhs */
5150+ atomic_t free_bh;
5151+ merge_request_fn *front_merge_fn;
5152+ merge_request_fn *back_merge_fn;
5153+ merge_requests_fn *merge_requests_fn;
5154+ request_queue_t r_queue;
5155+ void *queuedata;
5156+ pid_t pid;
5157+ struct completion thr_compl;
5158+};
5159+
5160+struct pktcdvd_device
5161+{
5162+ struct block_device *bdev;
5163+ kdev_t dev; /* dev attached */
5164+ kdev_t pkt_dev; /* our dev */
5165+ char name[20];
5166+ struct cdrom_device_info *cdi; /* cdrom matching dev */
5167+ struct packet_settings settings;
5168+ struct packet_stats stats;
5169+ atomic_t refcnt;
5170+ __u8 speed; /* cur write speed */
5171+ unsigned long offset; /* start offset */
5172+ __u8 mode_offset; /* 0 / 8 */
5173+ __u8 type;
5174+ unsigned long flags;
5175+ __u8 disc_status;
5176+ __u8 track_status; /* last one */
5177+ __u32 nwa; /* next writable address */
5178+ __u32 lra; /* last recorded address */
5179+ spinlock_t lock;
5180+ struct packet_cdrw cdrw;
5181+ wait_queue_head_t wqueue;
5182+ struct request *rq;
5183+ atomic_t wrqcnt;
f87f0d90 5184+ struct buffer_head *stacked_bhlist;
5185+ int stacked_bhcnt;
e5ff5fa6 5186+
5187+ struct semaphore cache_sync_mutex;
5188+ int unflushed_writes;
5189+
5190+ make_request_fn *make_request_fn;
5191+};
5192+
5193+/*
5194+ * following possibly belongs in cdrom.h
5195+ */
5196+
5197+struct cdvd_capacity
5198+{
5199+ __u32 lba;
5200+ __u32 block_length;
5201+};
5202+
5203+void pkt_elevator_merge_req(struct request *rq, struct request *nxt) {}
5204+void pkt_elevator_cleanup(request_queue_t *q, struct request *rq, int count) {}
5205+
5206+#define ELEVATOR_PKTCDVD \
5207+((elevator_t) { \
5208+ 0, /* not used */ \
5209+ 0, /* not used */ \
5210+ \
5211+ pkt_elevator_merge, /* elevator_merge_fn */ \
5212+ pkt_elevator_cleanup, \
5213+ pkt_elevator_merge_req, \
5214+ })
5215+
5216+#endif /* __KERNEL__ */
5217+
5218+#endif /* __PKTCDVD_H */
f87f0d90 5219diff -u -r -N ../../linus/2.4/linux/include/linux/udf_fs.h linux/include/linux/udf_fs.h
5220--- ../../linus/2.4/linux/include/linux/udf_fs.h Tue Aug 6 21:17:20 2002
5221+++ linux/include/linux/udf_fs.h Thu Aug 8 20:44:32 2002
5222@@ -30,7 +30,6 @@
5223 * HISTORY
5224 *
5225 */
5226-#include <linux/config.h>
5227
5228 #ifndef _UDF_FS_H
5229 #define _UDF_FS_H 1
5230@@ -38,19 +37,9 @@
5231 #define UDF_PREALLOCATE
5232 #define UDF_DEFAULT_PREALLOC_BLOCKS 8
5233
5234-#define UDFFS_DATE "2002/03/11"
5235+#define UDFFS_DATE "2002/03/14"
5236 #define UDFFS_VERSION "0.9.6"
5237
5238-#if !defined(UDFFS_RW)
5239-
5240-#if defined(CONFIG_UDF_RW)
5241-#define UDFFS_RW 1
5242-#else /* !defined(CONFIG_UDF_RW) */
5243-#define UDFFS_RW 0
5244-#endif /* defined(CONFIG_UDF_RW) */
5245-
5246-#endif /* !defined(UDFFS_RW) */
5247-
5248 #define UDFFS_DEBUG
5249
5250 #ifdef UDFFS_DEBUG
5251@@ -67,4 +56,12 @@
5252 #define udf_info(f, a...) \
5253 printk (KERN_INFO "UDF-fs INFO " f, ##a);
5254
5255+#ifdef __KERNEL__
5256+
5257+#ifndef LINUX_VERSION_CODE
5258+#include <linux/version.h>
5259+#endif
5260+
5261+#endif /* __KERNEL__ */
5262+
5263 #endif /* _UDF_FS_H */
5264diff -u -r -N ../../linus/2.4/linux/include/linux/udf_fs_i.h linux/include/linux/udf_fs_i.h
5265--- ../../linus/2.4/linux/include/linux/udf_fs_i.h Tue Aug 6 21:17:20 2002
5266+++ linux/include/linux/udf_fs_i.h Thu Aug 8 20:44:32 2002
5267@@ -23,30 +23,49 @@
5268 #ifndef _ECMA_167_H
5269 typedef struct
5270 {
5271- __u32 logicalBlockNum;
5272- __u16 partitionReferenceNum;
5273+ __u32 logicalBlockNum;
5274+ __u16 partitionReferenceNum;
5275 } __attribute__ ((packed)) lb_addr;
5276+
5277+typedef struct
5278+{
5279+ __u32 extLength;
5280+ __u32 extPosition;
5281+} __attribute__ ((packed)) short_ad;
5282+
5283+typedef struct
5284+{
5285+ __u32 extLength;
5286+ lb_addr extLocation;
5287+ __u8 impUse[6];
5288+} __attribute__ ((packed)) long_ad;
5289 #endif
5290
5291 struct udf_inode_info
5292 {
5293- long i_umtime;
5294- long i_uctime;
5295- long i_crtime;
5296- long i_ucrtime;
5297+ long i_umtime;
5298+ long i_uctime;
5299+ long i_crtime;
5300+ long i_ucrtime;
5301 /* Physical address of inode */
5302- lb_addr i_location;
5303- __u64 i_unique;
5304- __u32 i_lenEAttr;
5305- __u32 i_lenAlloc;
5306- __u64 i_lenExtents;
5307- __u32 i_next_alloc_block;
5308- __u32 i_next_alloc_goal;
5309- unsigned i_alloc_type : 3;
5310- unsigned i_extended_fe : 1;
5311- unsigned i_strat_4096 : 1;
5312- unsigned i_new_inode : 1;
5313- unsigned reserved : 26;
5314+ lb_addr i_location;
5315+ __u64 i_unique;
5316+ __u32 i_lenEAttr;
5317+ __u32 i_lenAlloc;
5318+ __u64 i_lenExtents;
5319+ __u32 i_next_alloc_block;
5320+ __u32 i_next_alloc_goal;
5321+ unsigned i_alloc_type : 3;
5322+ unsigned i_efe : 1;
5323+ unsigned i_use : 1;
5324+ unsigned i_strat4096 : 1;
5325+ unsigned reserved : 26;
5326+ union
5327+ {
5328+ short_ad *i_sad;
5329+ long_ad *i_lad;
5330+ __u8 *i_data;
5331+ } i_ext;
5332 };
5333
5334 #endif
This page took 1.180777 seconds and 4 git commands to generate.