1 diff -u -r -N ../../linus/2.4/linux/Documentation/Configure.help linux/Documentation/Configure.help
2 --- ../../linus/2.4/linux/Documentation/Configure.help Tue Aug 6 21:13:55 2002
3 +++ linux/Documentation/Configure.help Tue Aug 6 21:21:50 2002
5 say M here and read <file:Documentation/modules.txt>. The module
6 will be called ide-cd.o.
9 +Packet writing on CD/DVD media (EXPERIMENTAL)
11 + If you have a CDROM drive that supports packet writing, say Y to
12 + include preliminary support. It should work with any MMC/Mt Fuji
13 + complain ATAPI or SCSI drive, which is just about any newer CD
16 + Currently only writing to CD-RW discs is possible.
18 + If you want to compile the driver as a module ( = code which can be
19 + inserted in and removed from the running kernel whenever you want),
20 + say M here and read Documentation/modules.txt. The module will be
24 +CONFIG_CDROM_PKTCDVD_WCACHE
25 + If enabled, write caching will be set for the CD-R/W device. For now
26 + this option is dangerous unless the CD-RW media is known good, as we
27 + don't do deferred write error handling yet.
29 Include IDE/ATAPI TAPE support
30 CONFIG_BLK_DEV_IDETAPE
31 If you have an IDE tape drive using the ATAPI protocol, say Y.
32 diff -u -r -N ../../linus/2.4/linux/arch/sparc64/kernel/ioctl32.c linux/arch/sparc64/kernel/ioctl32.c
33 --- ../../linus/2.4/linux/arch/sparc64/kernel/ioctl32.c Tue Aug 6 21:14:27 2002
34 +++ linux/arch/sparc64/kernel/ioctl32.c Tue Aug 6 21:22:04 2002
36 #include <linux/atm_tcp.h>
37 #include <linux/sonet.h>
38 #include <linux/atm_suni.h>
39 +#include <linux/pktcdvd.h>
40 #include <linux/mtd/mtd.h>
42 #include <net/bluetooth/bluetooth.h>
47 +struct packet_stats32 {
51 + u32 page_cache_hits;
56 +static inline int pkt_getstats(unsigned int fd, unsigned int cmd, unsigned long arg)
58 + struct packet_stats p;
59 + struct packet_stats32 p32;
60 + mm_segment_t old_fs = get_fs();
63 + ret = copy_from_user (&p32, (struct packet_stats32 *)arg, sizeof(struct packet_stats32));
66 +#define P(x) (p.x = (unsigned long)p32.x)
76 + ret = sys_ioctl (fd, cmd, (long)&p);
82 struct hd_geometry32 {
84 unsigned char sectors;
85 @@ -4553,6 +4589,12 @@
86 COMPATIBLE_IOCTL(RNDADDENTROPY)
87 COMPATIBLE_IOCTL(RNDZAPENTCNT)
88 COMPATIBLE_IOCTL(RNDCLEARPOOL)
89 +/* Big X, CDRW Packet Driver */
90 +#if defined(CONFIG_CDROM_PKTCDVD)
91 +COMPATIBLE_IOCTL(PACKET_SETUP_DEV)
92 +COMPATIBLE_IOCTL(PACKET_TEARDOWN_DEV)
93 +HANDLE_IOCTL(PACKET_GET_STATS, pkt_getstats)
94 +#endif /* CONFIG_CDROM_PKTCDVD */
95 /* Bluetooth ioctls */
96 COMPATIBLE_IOCTL(HCIDEVUP)
97 COMPATIBLE_IOCTL(HCIDEVDOWN)
98 diff -u -r -N ../../linus/2.4/linux/drivers/block/Config.in linux/drivers/block/Config.in
99 --- ../../linus/2.4/linux/drivers/block/Config.in Tue Aug 6 21:14:34 2002
100 +++ linux/drivers/block/Config.in Tue Aug 6 21:22:08 2002
102 dep_tristate 'Mylex DAC960/DAC1100 PCI RAID Controller support' CONFIG_BLK_DEV_DAC960 $CONFIG_PCI
103 dep_tristate 'Micro Memory MM5415 Battery Backed RAM support' CONFIG_BLK_DEV_UMEM $CONFIG_PCI $CONFIG_EXPERIMENTAL
105 +tristate 'Packet writing on CD/DVD media' CONFIG_CDROM_PKTCDVD
106 +if [ "$CONFIG_CDROM_PKTCDVD" != "n" ]; then
107 + bool ' Enable write caching' CONFIG_CDROM_PKTCDVD_WCACHE n
110 tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
111 dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
113 diff -u -r -N ../../linus/2.4/linux/drivers/block/Makefile linux/drivers/block/Makefile
114 --- ../../linus/2.4/linux/drivers/block/Makefile Tue Aug 6 21:14:34 2002
115 +++ linux/drivers/block/Makefile Tue Aug 6 21:22:08 2002
117 obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
118 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
119 obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
120 +obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
122 subdir-$(CONFIG_PARIDE) += paride
124 diff -u -r -N ../../linus/2.4/linux/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c
125 --- ../../linus/2.4/linux/drivers/block/ll_rw_blk.c Tue Aug 6 21:14:34 2002
126 +++ linux/drivers/block/ll_rw_blk.c Tue Aug 6 21:22:08 2002
127 @@ -1046,6 +1046,7 @@
128 /* Test device size, when known. */
130 minorsize = blk_size[major][MINOR(bh->b_rdev)];
133 unsigned long maxsector = (minorsize << 1) + 1;
134 unsigned long sector = bh->b_rsector;
135 @@ -1069,6 +1070,7 @@
142 * Resolve the mapping until finished. (drivers are
143 @@ -1270,8 +1272,8 @@
147 - printk("end_request: I/O error, dev %s (%s), sector %lu\n",
148 - kdevname(req->rq_dev), name, req->sector);
149 + printk("end_request: I/O error, cmd %d dev %s (%s), sector %lu\n",
150 + req->cmd, kdevname(req->rq_dev), name, req->sector);
152 if ((bh = req->bh) != NULL) {
153 nsect = bh->b_size >> 9;
154 diff -u -r -N ../../linus/2.4/linux/drivers/block/pktcdvd.c linux/drivers/block/pktcdvd.c
155 --- ../../linus/2.4/linux/drivers/block/pktcdvd.c Thu Jan 1 01:00:00 1970
156 +++ linux/drivers/block/pktcdvd.c Thu Aug 8 20:44:32 2002
159 + * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
161 + * May be copied or modified under the terms of the GNU General Public
162 + * License. See linux/COPYING for more information.
164 + * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
165 + * DVD-RW devices (aka an exercise in block layer masturbation)
168 + * TODO: (circa order of when I will fix it)
169 + * - Only able to write on CD-RW media right now.
170 + * - check host application code on media and set it in write page
171 + * - Generic interface for UDF to submit large packets for variable length
173 + * - interface for UDF <-> packet to negotiate a new location when a write
175 + * - handle OPC, especially for -RW media
177 + * ------------------------------------------------------------------------
179 + * Newer changes -- see ChangeLog
181 + * 0.0.2d (26/10/2000)
182 + * - (scsi) use implicit segment recounting for all hba's
183 + * - fix speed setting, was consistenly off on most drives
184 + * - only print capacity when opening for write
185 + * - fix off-by-two error in getting/setting write+read speed (affected
186 + * reporting as well as actual speed used)
187 + * - possible to enable write caching on drive
188 + * - do ioctl marshalling on sparc64 from Ben Collins <bcollins@debian.org>
189 + * - avoid unaligned access on flags, should have been unsigned long of course
190 + * - fixed missed wakeup in kpacketd
191 + * - b_dev error (two places)
192 + * - fix buffer head b_count bugs
193 + * - fix hole merge bug, where tail could be added twice
194 + * - fsync and invalidate buffers on close
195 + * - check hash table for buffers first before using our own
197 + * - fixed several list races
198 + * - fix proc reporting for more than one device
199 + * - change to O_CREAT for creating devices
200 + * - added media_change hook
201 + * - added free buffers config option
202 + * - pkt_lock_tray fails on failed open (and oopses), remove it. unlock
203 + * is done explicitly in pkt_remove dev anyway.
204 + * - added proper elevator insertion (should probably be part of elevator.c)
205 + * - moved kernel thread info to private device, spawn one for each writer
206 + * - added separate buffer list for dirty packet buffers
207 + * - fixed nasty data corruption bug
208 + * - remember to account request even when we don't gather data for it
209 + * - add ioctl to force wakeup of kernel thread (for debug)
210 + * - fixed packet size setting bug on zero detected
211 + * - changed a lot of the proc reporting to be more readable to "humans"
212 + * - set full speed for read-only opens
214 + * 0.0.2c (08/09/2000)
215 + * - inc usage count of buffer heads
216 + * - add internal buffer pool to avoid deadlock on oom
217 + * - gather data for as many buffers as we have, before initiating write. this
218 + * allows the laser to stay on longer, giving better performance.
219 + * - fix always busy when tray can't be locked
220 + * - remove request duplication nastiness, inject directly into the target
221 + * - adapted to devfs and elevator changes
222 + * - added proc interface
224 + * 0.0.2b (21/06/2000)
225 + * - fix io_request_lock typos (missing '&')
226 + * - grab pkt_sem before invoking pkt_handle_queue
227 + * - SCSI uses queuedata too, mirror that in pd->queuedata (hack)
228 + * - remove SCSI sr debug messages
229 + * - really activate empty block querying (requires cvs UDF, CDRW branch)
230 + * - make sure sync_buffers doesn't consider us, or we can deadlock
231 + * - make sure people don't swap on us (for now ;)
233 + * 0.0.2a (19/06/2000)
234 + * - add kpacketd kernel thread to handle actual data gathering
235 + * - pd->pkt_dev is now real device, not just minor
236 + * - add support for super_operations block_empty fn, to query fs for
237 + * unused blocks that don't need reading
238 + * - "cache" blocks that are contained in the UDF file/dir packet
239 + * - rewrite pkt_gather_data to a one-step solution
240 + * - add private pktcdvd elevator
241 + * - shutdown write access to device upon write failure
242 + * - fix off-by-one bug in capacity
243 + * - setup sourceforge project (packet-cd.sourceforge.net)
244 + * - add more blk ioctls to pkt_ioctl
245 + * - set inactive request queue head
246 + * - change panic calls to BUG, better with kdb
247 + * - have pkt_gather_data check correct block size and kill rq if wrong
249 + * - introduce per-pd queues, simplifies pkt_request
250 + * - store pd in queuedata
252 + *************************************************************************/
254 +#define VERSION_CODE "v0.0.2p 03/03/2002 Jens Axboe (axboe@suse.de)"
256 +#include <linux/config.h>
257 +#include <linux/module.h>
258 +#include <linux/types.h>
259 +#include <linux/kernel.h>
260 +#include <linux/slab.h>
261 +#include <linux/errno.h>
262 +#include <linux/delay.h>
263 +#include <linux/locks.h>
264 +#include <linux/spinlock.h>
265 +#include <linux/interrupt.h>
266 +#include <linux/file.h>
267 +#include <linux/blk.h>
268 +#include <linux/blkpg.h>
269 +#include <linux/cdrom.h>
270 +#include <linux/ide.h>
271 +#include <linux/smp_lock.h>
272 +#include <linux/pktcdvd.h>
273 +#include <linux/kernel_stat.h>
274 +#include <linux/sysrq.h>
276 +#include <asm/unaligned.h>
277 +#include <asm/uaccess.h>
280 + * remove for next version -- for now, disable the mention option in the
283 +#if defined(CONFIG_SCSI_DEBUG_QUEUES)
284 +#error "Don't compile with 'Enable extra checks in new queueing code' enabled"
287 +#define SCSI_IOCTL_SEND_COMMAND 1
290 + * 32 buffers of 2048 bytes
292 +#define PACKET_MAX_SIZE 32
294 +#define NEXT_BH(bh, nbh) \
295 + (((bh)->b_rsector + ((bh)->b_size >> 9)) == (nbh)->b_rsector)
297 +#define BH_IN_ORDER(b1, b2) \
298 + ((b1)->b_rsector < (b2)->b_rsector)
300 +#define CONTIG_BH(b1, b2) \
301 + ((b1)->b_data + (b1)->b_size == (b2)->b_data)
303 +#define ZONE(sector, pd) \
304 + (((sector) + ((pd)->offset)) - (((sector) + ((pd)->offset)) & (((pd)->settings.size - 1))))
306 +static int *pkt_sizes;
307 +static int *pkt_blksize;
308 +static int *pkt_readahead;
309 +static struct pktcdvd_device *pkt_devs;
310 +static struct proc_dir_entry *pkt_proc;
311 +static DECLARE_WAIT_QUEUE_HEAD(pd_bh_wait);
314 + * a bit of a kludge, but we want to be able to pass both real and packet
315 + * dev and get the right one.
317 +static inline struct pktcdvd_device *pkt_find_dev(kdev_t dev)
321 + for (i = 0; i < MAX_WRITERS; i++)
322 + if (pkt_devs[i].dev == dev || pkt_devs[i].pkt_dev == dev)
323 + return &pkt_devs[i];
329 + * The following functions are the plugins to the ll_rw_blk
330 + * layer and decides whether a given request / buffer head can be
331 + * merged. We differ in a couple of ways from "normal" block
334 + * - don't merge when the buffer / request crosses a packet block
336 + * - merge buffer head even though it can't be added directly to the
337 + * front or back of the list. this gives us better performance, since
338 + * what would otherwise require multiple requests can now be handled
339 + * in one (hole merging)
340 + * - at this point its just writes, reads have already been remapped
342 + * The device original merge_ functions are stored in the packet device
346 +static inline int pkt_do_merge(request_queue_t *q, struct request *rq,
347 + struct buffer_head *bh, int max_segs,
348 + merge_request_fn *merge_fn,
349 + struct pktcdvd_device *pd)
351 + void *ptr = q->queuedata;
354 + if (rq->cmd != WRITE)
357 + if (ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd))
358 + return ELEVATOR_NO_MERGE;
361 + * NOTE: this is done under the io_request_lock/queue_lock, hence
364 + q->queuedata = pd->cdrw.queuedata;
365 + ret = merge_fn(q, rq, bh, max_segs);
366 + q->queuedata = ptr;
370 +static int pkt_front_merge_fn(request_queue_t *q, struct request *rq,
371 + struct buffer_head *bh, int max_segs)
373 + struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
375 + return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.front_merge_fn, pd);
378 +static int pkt_back_merge_fn(request_queue_t *q, struct request *rq,
379 + struct buffer_head *bh, int max_segs)
381 + struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
383 + return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.back_merge_fn, pd);
387 + * rules similar to above
389 +static int pkt_merge_requests_fn(request_queue_t *q, struct request *rq,
390 + struct request *nxt, int max_segs)
392 + struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev);
393 + struct packet_cdrw *cdrw = &pd->cdrw;
394 + void *ptr = q->queuedata;
397 + if (ZONE(rq->sector, pd) != ZONE(nxt->sector + nxt->nr_sectors - 1, pd))
400 + q->queuedata = cdrw->queuedata;
401 + ret = cdrw->merge_requests_fn(q, rq, nxt, max_segs);
402 + q->queuedata = ptr;
406 +static int pkt_grow_bhlist(struct pktcdvd_device *pd, int count)
408 + struct packet_cdrw *cdrw = &pd->cdrw;
409 + struct buffer_head *bh;
412 + VPRINTK("grow_bhlist: count=%d\n", count);
414 + while (i < count) {
415 + bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL);
419 + bh->b_data = kmalloc(CD_FRAMESIZE, GFP_KERNEL);
421 + kmem_cache_free(bh_cachep, bh);
424 + bh->b_page = virt_to_page(bh->b_data);
426 + spin_lock_irq(&pd->lock);
427 + bh->b_pprev = &cdrw->bhlist;
428 + bh->b_next = cdrw->bhlist;
430 + spin_unlock_irq(&pd->lock);
432 + bh->b_size = CD_FRAMESIZE;
433 + bh->b_list = PKT_BUF_LIST;
434 + atomic_inc(&cdrw->free_bh);
441 +static int pkt_shrink_bhlist(struct pktcdvd_device *pd, int count)
443 + struct packet_cdrw *cdrw = &pd->cdrw;
444 + struct buffer_head *bh;
447 + VPRINTK("shrink_bhlist: count=%d\n", count);
449 + while ((i < count) && cdrw->bhlist) {
450 + spin_lock_irq(&pd->lock);
452 + cdrw->bhlist = bh->b_next;
453 + spin_unlock_irq(&pd->lock);
454 + if (bh->b_list != PKT_BUF_LIST)
457 + kmem_cache_free(bh_cachep, bh);
458 + atomic_dec(&cdrw->free_bh);
466 + * These functions manage a simple pool of buffer_heads.
468 +static struct buffer_head *pkt_get_stacked_bh(struct pktcdvd_device *pd)
470 + unsigned long flags;
471 + struct buffer_head *bh;
473 + spin_lock_irqsave(&pd->lock, flags);
474 + bh = pd->stacked_bhlist;
476 + pd->stacked_bhlist = bh->b_next;
478 + pd->stacked_bhcnt--;
479 + BUG_ON(pd->stacked_bhcnt < 0);
481 + spin_unlock_irqrestore(&pd->lock, flags);
486 +static void pkt_put_stacked_bh(struct pktcdvd_device *pd, struct buffer_head *bh)
488 + unsigned long flags;
490 + spin_lock_irqsave(&pd->lock, flags);
491 + if (pd->stacked_bhcnt < STACKED_BH_POOL_SIZE) {
492 + bh->b_next = pd->stacked_bhlist;
493 + pd->stacked_bhlist = bh;
494 + pd->stacked_bhcnt++;
497 + spin_unlock_irqrestore(&pd->lock, flags);
499 + kmem_cache_free(bh_cachep, bh);
503 +static void pkt_shrink_stacked_bhlist(struct pktcdvd_device *pd)
505 + struct buffer_head *bh;
507 + while ((bh = pkt_get_stacked_bh(pd)) != NULL) {
508 + kmem_cache_free(bh_cachep, bh);
512 +static int pkt_grow_stacked_bhlist(struct pktcdvd_device *pd)
514 + struct buffer_head *bh;
517 + for (i = 0; i < STACKED_BH_POOL_SIZE; i++) {
518 + bh = kmem_cache_alloc(bh_cachep, GFP_KERNEL);
520 + pkt_shrink_stacked_bhlist(pd);
523 + pkt_put_stacked_bh(pd, bh);
529 +static request_queue_t *pkt_get_queue(kdev_t dev)
531 + struct pktcdvd_device *pd = pkt_find_dev(dev);
534 + return &pd->cdrw.r_queue;
537 +static void pkt_put_buffer(struct buffer_head *bh)
539 + struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_dev)];
540 + unsigned long flags;
542 + if (bh->b_list != PKT_BUF_LIST)
546 + bh->b_reqnext = NULL;
547 + bh->b_end_io = NULL;
549 + spin_lock_irqsave(&pd->lock, flags);
550 + bh->b_next = pd->cdrw.bhlist;
551 + pd->cdrw.bhlist = bh;
552 + spin_unlock_irqrestore(&pd->lock, flags);
553 + atomic_inc(&pd->cdrw.free_bh);
556 +static inline void __pkt_inject_request(request_queue_t *q, struct request *rq)
558 + struct list_head *head = &q->queue_head;
560 + VPRINTK("__pkt_inject_request: list_empty == %d, size=%d, cmd=%d\n",
561 + list_empty(&q->queue_head), rq->bh->b_size >> 9, rq->cmd);
563 + if (list_empty(&q->queue_head))
564 + q->plug_device_fn(q, rq->rq_dev);
566 + list_add_tail(&rq->queue, head);
569 +static void pkt_inject_request(request_queue_t *q, struct request *rq)
571 + spin_lock_irq(&io_request_lock);
572 + __pkt_inject_request(q, rq);
573 + spin_unlock_irq(&io_request_lock);
576 +static inline void __pkt_end_request(struct pktcdvd_device *pd)
579 + clear_bit(PACKET_RQ, &pd->flags);
580 + clear_bit(PACKET_BUSY, &pd->flags);
584 + * io_request_lock must be held and interrupts disabled
586 +static void pkt_end_request(struct pktcdvd_device *pd)
588 + unsigned long flags;
590 + spin_lock_irqsave(&pd->lock, flags);
591 + __pkt_end_request(pd);
592 + spin_unlock_irqrestore(&pd->lock, flags);
596 +static inline void __pkt_kill_request(struct request *rq, int uptodate, char *name)
598 + struct buffer_head *bh = rq->bh, *nbh;
601 + nbh = bh->b_reqnext;
602 + bh->b_reqnext = NULL;
604 + if (bh->b_end_io) {
605 + bh->b_end_io(bh, uptodate);
607 + mark_buffer_clean(bh);
608 + mark_buffer_uptodate(bh, uptodate);
615 + end_that_request_last(rq);
619 +void pkt_kill_request(struct pktcdvd_device *pd, struct request *rq, int ok)
621 + printk("pktcdvd: killing request\n");
622 + spin_lock_irq(&io_request_lock);
623 + __pkt_kill_request(rq, ok, pd->name);
624 + spin_unlock_irq(&io_request_lock);
625 + pkt_end_request(pd);
628 +static void pkt_end_io_read(struct buffer_head *bh, int uptodate)
631 + /* Obviously not correct, but it avoids locking up the kernel */
632 + printk("Ignoring read error on sector:%ld\n", bh->b_rsector);
636 + mark_buffer_uptodate(bh, uptodate);
641 + * if the buffer is already in the buffer cache, grab it if we can lock
644 +static struct buffer_head *pkt_get_hash(kdev_t dev, unsigned long block, int size)
646 + struct buffer_head *bh = NULL;
648 + bh = get_hash_table(dev, block, size);
650 + if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
652 + if (atomic_set_buffer_clean(bh))
654 + SetPageReferenced(bh->b_page);
664 +static inline struct buffer_head *__pkt_get_buffer(struct pktcdvd_device *pd,
665 + unsigned long sector)
667 + struct buffer_head *bh;
669 + if (!atomic_read(&pd->cdrw.free_bh))
672 + atomic_dec(&pd->cdrw.free_bh);
674 + spin_lock_irq(&pd->lock);
675 + bh = pd->cdrw.bhlist;
676 + pd->cdrw.bhlist = bh->b_next;
678 + spin_unlock_irq(&pd->lock);
680 + bh->b_next_free = NULL;
681 + bh->b_prev_free = NULL;
682 + bh->b_this_page = NULL;
683 + bh->b_pprev = NULL;
684 + bh->b_reqnext = NULL;
686 + init_waitqueue_head(&bh->b_wait);
687 + atomic_set(&bh->b_count, 1);
688 + bh->b_list = PKT_BUF_LIST;
689 + bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req);
690 + bh->b_dev = pd->pkt_dev;
695 +static void pkt_end_io_write(struct buffer_head *, int);
697 +static struct buffer_head *pkt_get_buffer(struct pktcdvd_device *pd,
698 + unsigned long sector, int size)
700 + unsigned long block = sector / (size >> 9);
701 + struct buffer_head *bh;
703 + VPRINTK("get_buffer: sector=%ld, size=%d\n", sector, size);
705 + bh = pkt_get_hash(pd->pkt_dev, block, size);
707 + pd->stats.bh_cache_hits += (size >> 9);
709 + bh = __pkt_get_buffer(pd, sector);
711 + blk_started_io(bh->b_size >> 9);
712 + bh->b_blocknr = block;
713 + bh->b_end_io = pkt_end_io_write;
714 + bh->b_rsector = sector;
715 + bh->b_rdev = pd->dev;
720 + * this rq is done -- io_request_lock must be held and interrupts disabled
722 +static void pkt_rq_end_io(struct pktcdvd_device *pd)
724 + unsigned long flags;
726 + VPRINTK("pkt_rq_end_io: rq=%p, cmd=%d, q=%p\n", pd->rq, pd->rq->cmd, pd->rq->q);
728 + spin_lock_irqsave(&pd->lock, flags);
733 + if (!test_bit(PACKET_RQ, &pd->flags))
734 + printk("pktcdvd: rq_end_io: RQ not set\n");
735 + if (!test_bit(PACKET_BUSY, &pd->flags))
736 + printk("pktcdvd: rq_end_io: BUSY not set\n");
738 + __pkt_end_request(pd);
739 + wake_up(&pd->wqueue);
740 + spin_unlock_irqrestore(&pd->lock, flags);
743 +static inline void pkt_mark_readonly(struct pktcdvd_device *pd, int on)
746 + set_bit(PACKET_READONLY, &pd->flags);
748 + clear_bit(PACKET_READONLY, &pd->flags);
751 +static inline void __pkt_end_io_write(struct pktcdvd_device *pd,
752 + struct buffer_head *bh, int uptodate)
754 + VPRINTK("end_io_write: bh=%ld, uptodate=%d\n", bh->b_blocknr, uptodate);
757 + * general Linux bug, noone should clear the BH_Uptodate flag for
758 + * a failed write...
761 + mark_buffer_uptodate(bh, uptodate);
763 + printk("pktcdvd: %s: WRITE error sector %lu\n", pd->name, bh->b_rsector);
765 + set_bit(PACKET_RECOVERY, &pd->flags);
766 + wake_up(&pd->wqueue);
772 + atomic_dec(&pd->wrqcnt);
773 + if (atomic_read(&pd->wrqcnt) == 0) {
781 + * we use this as our default b_end_io handler, since we need to take
782 + * the entire request off the list if just one of the clusters fail.
783 + * later on this should also talk to UDF about relocating blocks -- for
784 + * now we just drop the rq entirely. when doing the relocating we must also
785 + * lock the bh down to ensure that we can easily reconstruct the write should
788 +static void pkt_end_io_write(struct buffer_head *bh, int uptodate)
790 + struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
792 + __pkt_end_io_write(pd, bh, uptodate);
793 + pkt_put_buffer(bh);
796 +static void pkt_end_io_write_stacked(struct buffer_head *bh, int uptodate)
798 + struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
799 + struct buffer_head *rbh = bh->b_private;
801 + __pkt_end_io_write(pd, bh, uptodate);
802 + rbh->b_end_io(rbh, uptodate);
803 + pkt_put_stacked_bh(pd, bh);
804 + wake_up(&pd_bh_wait);
807 +static int pkt_init_rq(struct pktcdvd_device *pd, struct request *rq)
809 + struct buffer_head *bh;
810 + unsigned int cnt, nr_segments;
816 + struct buffer_head *nbh = bh->b_reqnext;
818 + bh->b_rdev = pd->pkt_dev;
821 + * the buffer better be uptodate, mapped, and locked!
823 + if (!buffer_uptodate(bh)) {
824 + printk("%lu not uptodate\n", bh->b_rsector);
826 + * It is not really the pktcdvd drivers problem if
827 + * someone wants to write stale data.
831 + if (!buffer_locked(bh) || !buffer_mapped(bh)) {
832 + printk("%lu, state %lx\n", bh->b_rsector, bh->b_state);
837 + if (!CONTIG_BH(bh, nbh))
841 + * if this happens, do report
843 + if ((bh->b_rsector + (bh->b_size >> 9))!=nbh->b_rsector) {
844 + printk("%lu (%p)-> %lu (%p) (%lu in all)\n",
845 + bh->b_rsector, bh, nbh->b_rsector, nbh,
851 + cnt += bh->b_size >> 9;
855 + rq->nr_segments = rq->nr_hw_segments = nr_segments;
857 + if (cnt != rq->nr_sectors) {
858 + printk("botched request %u (%lu)\n", cnt, rq->nr_sectors);
866 + * really crude stats for now...
868 +static void pkt_account_rq(struct pktcdvd_device *pd, int read, int written,
871 + pd->stats.bh_s += (written / bs);
872 + pd->stats.secs_w += written;
873 + pd->stats.secs_r += read;
877 + * does request span two packets? 0 == yes, 1 == no
879 +static int pkt_one_zone(struct pktcdvd_device *pd, struct request *rq)
881 + if (!pd->settings.size)
884 + if (!(rq->cmd & WRITE))
887 + return ZONE(rq->sector, pd) == ZONE(rq->sector + rq->nr_sectors -1, pd);
890 +#if defined(CONFIG_CDROM_PKTCDVD_BEMPTY)
891 +static void pkt_init_buffer(struct buffer_head *bh)
893 + set_bit(BH_Uptodate, &bh->b_state);
894 + set_bit(BH_Dirty, &bh->b_state);
895 + memset(bh->b_data, 0, bh->b_size);
898 +static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
900 + struct super_block *sb;
901 + struct super_operations *sop;
902 + unsigned long packet;
906 + if ((sb = get_super(pd->pkt_dev)) == NULL)
908 + if ((sop = sb->s_op) == NULL)
910 + if (sop->block_empty == NULL)
914 + if (sop->block_empty(sb, bh->b_blocknr, &packet)) {
915 + pkt_init_buffer(pd, bh);
923 +#else /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */
925 +static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
930 +#endif /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */
932 +static int pkt_flush_cache(struct pktcdvd_device *pd);
934 +static void pkt_flush_writes(struct pktcdvd_device *pd)
936 + if (pd->unflushed_writes) {
937 + pd->unflushed_writes = 0;
938 + pkt_flush_cache(pd);
943 + * basically just does a ll_rw_block for the bhs given to use, but we
944 + * don't return until we have them.
946 +static void pkt_read_bh(struct pktcdvd_device *pd, struct buffer_head *bh)
949 + * UDF says it's empty, woohoo
951 + if (pkt_sb_empty(pd, bh))
954 + down(&pd->cache_sync_mutex);
955 + pkt_flush_writes(pd);
956 + generic_make_request(READ, bh);
957 + up(&pd->cache_sync_mutex);
960 +static int pkt_index_bhs(struct buffer_head **bhs)
962 + struct buffer_head *bh;
967 + * now finish pending reads and connect the chain of buffers
970 + while (index < PACKET_MAX_SIZE) {
974 + * pin down private buffers (ie, force I/O to complete)
976 + if (bh->b_end_io == pkt_end_io_read) {
978 + bh->b_end_io = pkt_end_io_write;
981 + if (!buffer_locked(bh))
984 + if (!buffer_uptodate(bh)) {
985 + printk("pktcdvd: read failure (%s, sec %lu)\n",
986 + kdevname(bh->b_rdev), bh->b_rsector);
994 + struct buffer_head *pbh = bhs[index - 1];
996 + if ((pbh->b_rsector + (pbh->b_size >> 9)) != bh->b_rsector) {
997 + printk("%lu -> %lu\n", pbh->b_rsector, bh->b_rsector);
1001 + pbh->b_reqnext = bh;
1011 + bhs[index]->b_reqnext = NULL;
1018 + * fill in the holes of a request
1020 + * Returns: 0, keep 'em coming -- 1, stop queueing
1022 +static int pkt_gather_data(struct pktcdvd_device *pd, struct request *rq)
1024 + unsigned long start_s, end_s, sector;
1025 + struct buffer_head *bh;
1026 + unsigned int sectors, index;
1027 + struct buffer_head *bhs[PACKET_MAX_SIZE];
1029 + memset(bhs, 0, sizeof(bhs));
1032 + * all calculations are done with 512 byte sectors
1034 + sectors = pd->settings.size - rq->nr_sectors;
1035 + start_s = rq->sector - (rq->sector & (pd->settings.size - 1));
1036 + end_s = start_s + pd->settings.size;
1038 + VPRINTK("pkt_gather_data: cmd=%d\n", rq->cmd);
1039 + VPRINTK("need %d sectors for %s\n", sectors, kdevname(pd->dev));
1040 + VPRINTK("from %lu to %lu ", start_s, end_s);
1041 + VPRINTK("(%lu - %lu)\n", rq->bh->b_rsector, rq->bhtail->b_rsector +
1042 + rq->current_nr_sectors);
1045 + * first fill-out map of the buffers we have
1049 + index = (bh->b_rsector & (pd->settings.size - 1)) / (bh->b_size >> 9);
1052 + bh = bh->b_reqnext;
1055 + * make sure to detach from list!
1057 + bhs[index]->b_reqnext = NULL;
1061 + * now get buffers for missing blocks, and schedule reads for them
1063 + for (index = 0, sector = start_s; sector < end_s; index++) {
1069 + bh = pkt_get_buffer(pd, sector, CD_FRAMESIZE);
1072 + rq->nr_sectors += bh->b_size >> 9;
1073 + rq->nr_segments++;
1075 + if (!buffer_uptodate(bh)) {
1076 + bh->b_end_io = pkt_end_io_read;
1077 + pkt_read_bh(pd, bh);
1081 + sector += bh->b_size >> 9;
1084 + index = pkt_index_bhs(bhs);
1091 + rq->bhtail = bhs[index];
1092 + rq->buffer = rq->bh->b_data;
1093 + rq->current_nr_sectors = rq->bh->b_size >> 9;
1094 + rq->hard_nr_sectors = rq->nr_sectors;
1095 + rq->sector = rq->hard_sector = start_s;
1097 + VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector);
1098 + if (pkt_init_rq(pd, rq)) {
1099 + for (index = 0; index < PACKET_MAX_SIZE; index++) {
1101 + printk("[%d] %lu %d (%p -> %p)\n", index, bh->b_rsector,
1102 + bh->b_size, bh, bh->b_reqnext);
1107 + pkt_account_rq(pd, sectors, rq->nr_sectors, rq->current_nr_sectors);
1112 + if (rq->nr_sectors != pd->settings.size) {
1113 + printk("pktcdvd: request mismatch %lu (should be %u)\n",
1114 + rq->nr_sectors, pd->settings.size);
1121 + * for now, just kill entire request and hope for the best...
1124 + for (index = 0; index < PACKET_MAX_SIZE; index++) {
1126 + buffer_IO_error(bh);
1127 + if (bh->b_list == PKT_BUF_LIST)
1128 + pkt_put_buffer(bh);
1130 + end_that_request_last(pd->rq);
1135 + * Returns: 1, keep 'em coming -- 0, wait for wakeup
1137 +static int pkt_do_request(struct pktcdvd_device *pd, struct request *rq)
1139 + VPRINTK("do_request: bh=%ld, nr_sectors=%ld, size=%d, cmd=%d\n", rq->bh->b_blocknr, rq->nr_sectors, pd->settings.size, rq->cmd);
1142 + * perfect match. the merge_* functions have already made sure that
1143 + * a request doesn't cross a packet boundary, so if the sector
1144 + * count matches it's good.
1146 + if (rq->nr_sectors == pd->settings.size) {
1147 + if (pkt_init_rq(pd, rq)) {
1148 + pkt_kill_request(pd, rq, 0);
1152 + pkt_account_rq(pd, 0, rq->nr_sectors, rq->current_nr_sectors);
1159 + if (rq->nr_sectors > pd->settings.size) {
1160 + printk("pktcdvd: request too big! BUG! %lu\n", rq->nr_sectors);
1164 + return pkt_gather_data(pd, rq);
1168 + * recover a failed write, query for relocation if possible
1170 +static int pkt_start_recovery(struct pktcdvd_device *pd, struct request *rq)
1172 + struct super_block *sb = get_super(pd->pkt_dev);
1173 + struct buffer_head *bhs[PACKET_MAX_SIZE], *bh, *obh;
1174 + unsigned long old_block, new_block, sector;
1177 + if (!sb || !sb->s_op || !sb->s_op->relocate_blocks)
1180 + old_block = (rq->sector & ~(pd->settings.size - 1)) / (rq->bh->b_size >> 9);
1181 + if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
1184 + memset(bhs, 0, sizeof(bhs));
1187 + i = (bh->b_rsector & (pd->settings.size - 1)) / (bh->b_size >> 9);
1190 + bh = bh->b_reqnext;
1191 + bhs[i]->b_reqnext = NULL;
1195 + sector = new_block * (rq->bh->b_size >> 9);
1196 + for (i = 0; i < PACKET_MAX_SIZE; i++) {
1201 + * 1) bh is not there at all
1202 + * 2) bh is there and not ours, get a new one and
1203 + * invalidate this block for the future
1204 + * 3) bh is there and ours, just change the sector
1207 + obh = pkt_get_hash(pd->pkt_dev, new_block,CD_FRAMESIZE);
1208 + bh = __pkt_get_buffer(pd, sector);
1210 + if (buffer_uptodate(obh)) {
1211 + memcpy(bh->b_data, obh->b_data, obh->b_size);
1212 + set_bit(BH_Uptodate, &bh->b_state);
1214 + unlock_buffer(obh);
1218 + } else if (bh->b_list != PKT_BUF_LIST) {
1219 + bhs[i] = pkt_get_buffer(pd, sector, CD_FRAMESIZE);
1220 + memcpy(bhs[i]->b_data, bh->b_data, CD_FRAMESIZE);
1221 + unlock_buffer(bh);
1224 + set_bit(BH_Uptodate, &bh->b_state);
1226 + bh->b_rsector = sector;
1227 + bh->b_blocknr = new_block;
1230 + sector += (bh->b_size >> 9);
1232 + sectors += (bh->b_size >> 9);
1235 + i = pkt_index_bhs(bhs);
1240 + rq->bhtail = bhs[i];
1241 + rq->buffer = rq->bh->b_data;
1242 + rq->current_nr_sectors = rq->bh->b_size >> 9;
1243 + rq->hard_nr_sectors = rq->nr_sectors = sectors;
1244 + rq->sector = rq->hard_sector = rq->bh->b_rsector;
1246 + clear_bit(PACKET_RECOVERY, &pd->flags);
1247 + clear_bit(PACKET_BUSY, &pd->flags);
1251 + printk("pktcdvd: rq recovery not possible\n");
1252 + pkt_kill_request(pd, rq, 0);
1253 + clear_bit(PACKET_RECOVERY, &pd->flags);
1258 + * handle the requests that got queued for this writer
1260 + * returns 0 for busy (already doing something), or 1 for queue new one
1263 +static int pkt_handle_queue(struct pktcdvd_device *pd, request_queue_t *q)
1265 + struct request *rq;
1268 + VPRINTK("handle_queue\n");
1271 + * nothing for us to do
1273 + if (!test_bit(PACKET_RQ, &pd->flags))
1276 + spin_lock_irq(&pd->lock);
1278 + spin_unlock_irq(&pd->lock);
1280 + if (test_bit(PACKET_RECOVERY, &pd->flags))
1281 + if (pkt_start_recovery(pd, rq))
1285 + * already being processed
1287 + if (test_and_set_bit(PACKET_BUSY, &pd->flags))
1295 + printk("handle_queue: pd BUSY+RQ, but no rq\n");
1296 + clear_bit(PACKET_RQ, &pd->flags);
1301 + * reads are shipped directly to cd-rom, so they should not
1304 + if (rq->cmd == READ)
1307 + if ((rq->current_nr_sectors << 9) != CD_FRAMESIZE) {
1308 + pkt_kill_request(pd, rq, 0);
1312 + if (!pkt_do_request(pd, rq)) {
1313 + atomic_add(PACKET_MAX_SIZE, &pd->wrqcnt);
1314 + down(&pd->cache_sync_mutex);
1315 + pkt_inject_request(q, rq);
1316 + pd->unflushed_writes = 1;
1317 + up(&pd->cache_sync_mutex);
1322 + clear_bit(PACKET_BUSY, &pd->flags);
1327 + * kpacketd is woken up, when writes have been queued for one of our
1328 + * registered devices
1330 +static int kcdrwd(void *foobar)
1332 + struct pktcdvd_device *pd = foobar;
1333 + request_queue_t *q, *my_queue;
1336 + * exit_files, mm (move to lazy-tlb, so context switches are come
1337 + * extremely cheap) etc
1341 + current->policy = SCHED_OTHER;
1342 + current->static_prio = -20;
1343 + sprintf(current->comm, pd->name);
1345 + spin_lock_irq(¤t->sigmask_lock);
1346 + siginitsetinv(¤t->blocked, sigmask(SIGKILL));
1347 + flush_signals(current);
1348 + spin_unlock_irq(¤t->sigmask_lock);
1350 + q = blk_get_queue(pd->dev);
1351 + my_queue = blk_get_queue(pd->pkt_dev);
1354 + DECLARE_WAITQUEUE(wait, current);
1356 + add_wait_queue(&pd->wqueue, &wait);
1359 + * if PACKET_BUSY is cleared, we can queue
1360 + * another request. otherwise we need to unplug the
1361 + * cd-rom queue and wait for buffers to be flushed
1362 + * (which will then wake us up again when done).
1365 + pkt_handle_queue(pd, q);
1367 + set_current_state(TASK_INTERRUPTIBLE);
1369 + if (test_bit(PACKET_BUSY, &pd->flags))
1372 + spin_lock_irq(&io_request_lock);
1373 + if (list_empty(&my_queue->queue_head)) {
1374 + spin_unlock_irq(&io_request_lock);
1377 + set_current_state(TASK_RUNNING);
1379 + my_queue->request_fn(my_queue);
1380 + spin_unlock_irq(&io_request_lock);
1383 + generic_unplug_device(q);
1386 + remove_wait_queue(&pd->wqueue, &wait);
1391 + if (signal_pending(current))
1396 + complete_and_exit(&pd->cdrw.thr_compl, 0);
1400 +static void pkt_attempt_remerge(struct pktcdvd_device *pd, request_queue_t *q,
1401 + struct request *rq)
1403 + struct request *nxt;
1405 + while (!list_empty(&q->queue_head)) {
1406 + if (rq->nr_sectors == pd->settings.size)
1409 + nxt = blkdev_entry_next_request(&q->queue_head);
1411 + if (ZONE(rq->sector, pd) != ZONE(nxt->sector, pd))
1413 + else if (rq->sector + rq->nr_sectors > nxt->sector)
1416 + rq->nr_sectors = rq->hard_nr_sectors += nxt->nr_sectors;
1417 + rq->bhtail->b_reqnext = nxt->bh;
1418 + rq->bhtail = nxt->bhtail;
1419 + list_del(&nxt->queue);
1420 + blkdev_release_request(nxt);
1425 + * our request function.
1427 + * - reads are just tossed directly to the device, we don't care.
1428 + * - writes, regardless of size, are added as the current pd rq and
1429 + * kcdrwd is woken up to handle it. kcdrwd will also make sure to
1430 + * reinvoke this request handler, once the given request has been
1433 + * Locks: io_request_lock held
1435 + * Notes: all writers have their own queue, so all requests are for the
1438 +static void pkt_request(request_queue_t *q)
1440 + struct pktcdvd_device *pd = (struct pktcdvd_device *) q->queuedata;
1441 + unsigned long flags;
1443 + if (list_empty(&q->queue_head))
1446 + while (!list_empty(&q->queue_head)) {
1447 + struct request *rq = blkdev_entry_next_request(&q->queue_head);
1449 + VPRINTK("pkt_request: cmd=%d, rq=%p, rq->sector=%ld, rq->nr_sectors=%ld\n", rq->cmd, rq, rq->sector, rq->nr_sectors);
1451 + blkdev_dequeue_request(rq);
1453 + rq->rq_dev = pd->dev;
1455 + if (rq->cmd == READ)
1458 + if (test_bit(PACKET_RECOVERY, &pd->flags))
1462 + * paranoia, shouldn't trigger...
1464 + if (!pkt_one_zone(pd, rq)) {
1465 + printk("rq->cmd=%d, rq->sector=%ld, rq->nr_sectors=%ld\n",
1466 + rq->cmd, rq->sector, rq->nr_sectors);
1470 + pkt_attempt_remerge(pd, q, rq);
1472 + spin_lock_irqsave(&pd->lock, flags);
1475 + * already gathering data for another read. the
1476 + * rfn will be reinvoked once that is done
1478 + if (test_and_set_bit(PACKET_RQ, &pd->flags)) {
1479 + list_add(&rq->queue, &q->queue_head);
1480 + spin_unlock_irqrestore(&pd->lock, flags);
1488 + spin_unlock_irqrestore(&pd->lock, flags);
1491 + VPRINTK("wake up wait queue\n");
1492 + wake_up(&pd->wqueue);
1495 +static void pkt_print_settings(struct pktcdvd_device *pd)
1497 + printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1498 + printk("%u blocks, ", pd->settings.size >> 2);
1499 + printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1503 + * A generic sense dump / resolve mechanism should be implemented across
1504 + * all ATAPI + SCSI devices.
1506 +static void pkt_dump_sense(struct request_sense *sense)
1508 + char *info[9] = { "No sense", "Recovered error", "Not ready",
1509 + "Medium error", "Hardware error", "Illegal request",
1510 + "Unit attention", "Data protect", "Blank check" };
1512 + if (sense == NULL)
1515 + if (sense->sense_key > 8) {
1516 + printk("pktcdvd: sense invalid\n");
1520 + printk("pktcdvd: sense category %s ", info[sense->sense_key]);
1521 + printk("asc(%02x), ascq(%02x)\n", sense->asc, sense->ascq);
1525 + * write mode select package based on pd->settings
1527 +static int pkt_set_write_settings(struct pktcdvd_device *pd)
1529 + struct cdrom_device_info *cdi = pd->cdi;
1530 + struct cdrom_generic_command cgc;
1531 + write_param_page *wp;
1535 + memset(buffer, 0, sizeof(buffer));
1536 + init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1537 + if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
1540 + size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1541 + pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1542 + if (size > sizeof(buffer))
1543 + size = sizeof(buffer);
1548 + init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1549 + if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
1553 + * write page is offset header + block descriptor length
1555 + wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1557 + wp->fp = pd->settings.fp;
1558 + wp->track_mode = pd->settings.track_mode;
1559 + wp->write_type = pd->settings.write_type;
1560 + wp->data_block_type = pd->settings.block_mode;
1562 + wp->multi_session = 0;
1564 +#ifdef PACKET_USE_LS
1565 + wp->link_size = 7;
1569 + if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1570 + wp->session_format = 0;
1571 + wp->subhdr2 = 0x20;
1572 + } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1573 + wp->session_format = 0x20;
1576 + wp->mcn[0] = 0x80;
1577 + memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1583 + printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
1586 + wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1588 + cgc.buflen = cgc.cmd[8] = size;
1589 + if ((ret = cdrom_mode_select(cdi, &cgc))) {
1590 + pkt_dump_sense(cgc.sense);
1594 + pkt_print_settings(pd);
1599 + * 0 -- we can write to this track, 1 -- we can't
1601 +static int pkt_good_track(track_information *ti)
1604 + * only good for CD-RW at the moment, not DVD-RW
1608 + * FIXME: only for FP
1614 + * "good" settings as per Mt Fuji.
1616 + if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
1619 + if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
1622 + if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
1625 + printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1630 + * 0 -- we can write to this disc, 1 -- we can't
1632 +static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
1635 + * for disc type 0xff we should probably reserve a new track.
1636 + * but i'm not sure, should we leave this to user apps? probably.
1638 + if (di->disc_type == 0xff) {
1639 + printk("pktcdvd: Unknown disc. No track?\n");
1643 + if (di->disc_type != 0x20 && di->disc_type != 0) {
1644 + printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
1648 + if (di->erasable == 0) {
1649 + printk("pktcdvd: Disc not erasable\n");
1653 + if (pd->track_status == PACKET_SESSION_RESERVED) {
1654 + printk("pktcdvd: Can't write to last track (reserved)\n");
1661 +static int pkt_probe_settings(struct pktcdvd_device *pd)
1663 + disc_information di;
1664 + track_information ti;
1667 + memset(&di, 0, sizeof(disc_information));
1668 + memset(&ti, 0, sizeof(track_information));
1670 + if ((ret = cdrom_get_disc_info(pd->dev, &di))) {
1671 + printk("failed get_disc\n");
1675 + pd->disc_status = di.disc_status;
1676 + pd->track_status = di.border_status;
1678 + if (pkt_good_disc(pd, &di))
1681 + printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
1682 + pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1684 + track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1685 + if ((ret = cdrom_get_track_info(pd->dev, track, 1, &ti))) {
1686 + printk("pktcdvd: failed get_track\n");
1690 + if (pkt_good_track(&ti)) {
1691 + printk("pktcdvd: can't write to this track\n");
1696 + * we keep packet size in 512 byte units, makes it easier to
1697 + * deal with request calculations.
1699 + pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1700 + if (pd->settings.size == 0) {
1701 + printk("pktcdvd: detected zero packet size!\n");
1702 + pd->settings.size = 128;
1704 + pd->settings.fp = ti.fp;
1705 + pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1708 + pd->nwa = be32_to_cpu(ti.next_writable);
1709 + set_bit(PACKET_NWA_VALID, &pd->flags);
1713 + * in theory we could use lra on -RW media as well and just zero
1714 + * blocks that haven't been written yet, but in practice that
1715 + * is just a no-go. we'll use that for -R, naturally.
1718 + pd->lra = be32_to_cpu(ti.last_rec_address);
1719 + set_bit(PACKET_LRA_VALID, &pd->flags);
1721 + pd->lra = 0xffffffff;
1722 + set_bit(PACKET_LRA_VALID, &pd->flags);
1728 + pd->settings.link_loss = 7;
1729 + pd->settings.write_type = 0; /* packet */
1730 + pd->settings.track_mode = ti.track_mode;
1733 + * mode1 or mode2 disc
1735 + switch (ti.data_mode) {
1736 + case PACKET_MODE1:
1737 + pd->settings.block_mode = PACKET_BLOCK_MODE1;
1739 + case PACKET_MODE2:
1740 + pd->settings.block_mode = PACKET_BLOCK_MODE2;
1743 + printk("pktcdvd: unknown data mode\n");
1750 + * enable/disable write caching on drive
1752 +static int pkt_write_caching(struct pktcdvd_device *pd, int set)
1754 + struct cdrom_generic_command cgc;
1755 + unsigned char buf[64];
1758 + memset(buf, 0, sizeof(buf));
1759 + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1760 + cgc.buflen = pd->mode_offset + 12;
1763 + * caching mode page might not be there, so quiet this command
1767 + if ((ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_WCACHING_PAGE, 0)))
1770 + buf[pd->mode_offset + 10] |= (!!set << 2);
1772 + cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1773 + ret = cdrom_mode_select(pd->cdi, &cgc);
1775 + printk("pktcdvd: write caching control failed\n");
1776 + else if (!ret && set)
1777 + printk("pktcdvd: enabled write caching on %s\n", pd->name);
1782 + * flush the drive cache to media
1784 +static int pkt_flush_cache(struct pktcdvd_device *pd)
1786 + struct cdrom_generic_command cgc;
1788 + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1789 + cgc.cmd[0] = GPCMD_FLUSH_CACHE;
1791 + cgc.timeout = 60*HZ;
1794 + * the IMMED bit -- we default to not setting it, although that
1795 + * would allow a much faster close, this is safer
1798 + cgc.cmd[1] = 1 << 1;
1800 + return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
1804 + * Returns drive current write speed
1806 +static int pkt_get_speed(struct pktcdvd_device *pd)
1808 + struct cdrom_generic_command cgc;
1809 + unsigned char buf[64];
1812 + memset(buf, 0, sizeof(buf));
1813 + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1815 + ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1817 + cgc.buflen = pd->mode_offset + buf[pd->mode_offset + 9] + 2 +
1818 + sizeof(struct mode_page_header);
1819 + ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1824 + offset = pd->mode_offset + 26;
1825 + pd->speed = ((buf[offset] << 8) | buf[offset + 1]) / 0xb0;
1830 + * speed is given as the normal factor, e.g. 4 for 4x
1832 +static int pkt_set_speed(struct pktcdvd_device *pd, unsigned speed)
1834 + struct cdrom_generic_command cgc;
1835 + unsigned read_speed;
1838 + * we set read and write time so that read spindle speed is one and
1839 + * a half as fast as write. although a drive can typically read much
1840 + * faster than write, this minimizes the spin up/down when we write
1841 + * and gather data. maybe 1/1 factor is faster, needs a bit of testing.
1843 + speed = speed * 0xb0;
1844 + read_speed = (speed * 3) >> 1;
1845 + read_speed = min_t(unsigned, read_speed, 0xffff);
1847 + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1848 + cgc.cmd[0] = 0xbb;
1849 + cgc.cmd[2] = (read_speed >> 8) & 0xff;
1850 + cgc.cmd[3] = read_speed & 0xff;
1851 + cgc.cmd[4] = (speed >> 8) & 0xff;
1852 + cgc.cmd[5] = speed & 0xff;
1854 + return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
1858 + * Give me full power, Captain
1860 +static int pkt_adjust_speed(struct pktcdvd_device *pd, int speed)
1862 + disc_information dummy;
1866 + * FIXME: do proper unified cap page, also, this isn't proper
1867 + * Mt Fuji, but I think we can safely assume all drives support
1868 + * it. A hell of a lot more than support the GET_PERFORMANCE
1869 + * command (besides, we also use the old set speed command,
1870 + * not the streaming feature).
1872 + if ((ret = pkt_set_speed(pd, speed)))
1876 + * just do something with the disc -- next read will contain the
1877 + * maximum speed with this media
1879 + if ((ret = cdrom_get_disc_info(pd->dev, &dummy)))
1882 + if ((ret = pkt_get_speed(pd))) {
1883 + printk("pktcdvd: failed get speed\n");
1887 + DPRINTK("pktcdvd: speed (R/W) %u/%u\n", (pd->speed * 3) / 2, pd->speed);
1891 +static int pkt_open_write(struct pktcdvd_device *pd)
1895 + if ((ret = pkt_probe_settings(pd))) {
1896 + DPRINTK("pktcdvd: %s failed probe\n", pd->name);
1900 + if ((ret = pkt_set_write_settings(pd))) {
1901 + DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
1905 + (void) pkt_write_caching(pd, USE_WCACHING);
1907 + if ((ret = pkt_adjust_speed(pd, 16))) {
1908 + DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
1915 + * called at open time.
1917 +static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1925 + pd->bdev = bdget(kdev_t_to_nr(pd->dev));
1927 + printk("pktcdvd: can't find cdrom block device\n");
1931 + if ((ret = blkdev_get(pd->bdev, FMODE_READ, 0, BDEV_FILE))) {
1936 + if ((ret = cdrom_get_last_written(pd->dev, &lba))) {
1937 + printk("pktcdvd: cdrom_get_last_written failed\n");
1941 + pkt_sizes[MINOR(pd->pkt_dev)] = lba << 1;
1944 + if ((ret = pkt_open_write(pd)))
1946 + pkt_mark_readonly(pd, 0);
1948 + (void) pkt_adjust_speed(pd, 0xff);
1949 + pkt_mark_readonly(pd, 1);
1953 + printk("pktcdvd: %lukB available on disc\n", lba << 1);
1959 + * called when the device is closed. makes sure that the device flushes
1960 + * the internal cache before we close.
1962 +static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
1964 + atomic_dec(&pd->refcnt);
1965 + if (atomic_read(&pd->refcnt) > 0)
1968 + fsync_dev(pd->pkt_dev);
1970 + if (flush && pkt_flush_cache(pd))
1971 + DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
1974 + blkdev_put(pd->bdev, BDEV_FILE);
1979 +static int pkt_open(struct inode *inode, struct file *file)
1981 + struct pktcdvd_device *pd = NULL;
1984 + VPRINTK("pktcdvd: entering open\n");
1986 + if (MINOR(inode->i_rdev) >= MAX_WRITERS) {
1987 + printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
1993 + * either device is not configured, or pktsetup is old and doesn't
1994 + * use O_CREAT to create device
1996 + pd = &pkt_devs[MINOR(inode->i_rdev)];
1997 + if (!pd->dev && !(file->f_flags & O_CREAT)) {
1998 + VPRINTK("pktcdvd: not configured and O_CREAT not set\n");
2003 + atomic_inc(&pd->refcnt);
2004 + if (atomic_read(&pd->refcnt) > 1) {
2005 + if (file->f_mode & FMODE_WRITE) {
2006 + VPRINTK("pktcdvd: busy open for write\n");
2012 + * Not first open, everything is already set up
2017 + if (((file->f_flags & O_ACCMODE) != O_RDONLY) || !(file->f_flags & O_CREAT)) {
2018 + if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
2025 + * needed here as well, since ext2 (among others) may change
2026 + * the blocksize at mount time
2028 + set_blocksize(pd->pkt_dev, CD_FRAMESIZE);
2032 + atomic_dec(&pd->refcnt);
2033 + if (atomic_read(&pd->refcnt) == 0) {
2035 + blkdev_put(pd->bdev, BDEV_FILE);
2040 + VPRINTK("pktcdvd: failed open (%d)\n", ret);
2044 +static int pkt_close(struct inode *inode, struct file *file)
2046 + struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
2050 + int flush = !test_bit(PACKET_READONLY, &pd->flags);
2051 + pkt_release_dev(pd, flush);
2058 + * pktcdvd i/o elevator parts
2060 +static inline int pkt_bh_rq_ordered(struct buffer_head *bh, struct request *rq,
2061 + struct list_head *head)
2063 + struct list_head *next;
2064 + struct request *next_rq;
2066 + next = rq->queue.next;
2070 + next_rq = blkdev_entry_to_request(next);
2071 + if (next_rq->rq_dev != rq->rq_dev)
2072 + return bh->b_rsector > rq->sector;
2074 + if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
2077 + if (next_rq->sector > rq->sector)
2080 + if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
2086 +static int pkt_elevator_merge(request_queue_t *q, struct request **req,
2087 + struct list_head *head,
2088 + struct buffer_head *bh, int rw,
2091 + struct list_head *entry = &q->queue_head;
2092 + unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
2094 + if (bh->b_reqnext)
2097 + VPRINTK("pkt_elevator_merge: rw=%d, ms=%d, bh=%lu, dev=%d\n", rw, max_sectors, bh->b_rsector, bh->b_rdev);
2099 + while ((entry = entry->prev) != head) {
2100 + struct request *__rq = blkdev_entry_to_request(entry);
2101 + if (__rq->waiting)
2103 + if (__rq->rq_dev != bh->b_rdev)
2105 + if (!*req && pkt_bh_rq_ordered(bh, __rq, &q->queue_head))
2107 + if (__rq->cmd != rw)
2109 + if (__rq->nr_sectors + count > max_sectors)
2111 + if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
2112 + ret = ELEVATOR_BACK_MERGE;
2115 + } else if (__rq->sector - count == bh->b_rsector) {
2116 + ret = ELEVATOR_FRONT_MERGE;
2120 +#if 0 /* makes sense, chance of two matches probably slim */
2125 + VPRINTK("*req=%p, ret=%d\n", *req, ret);
2130 +static int pkt_make_request(request_queue_t *q, int rw, struct buffer_head *bh)
2132 + struct pktcdvd_device *pd;
2133 + struct buffer_head *new_bh;
2135 + if (MINOR(bh->b_rdev) >= MAX_WRITERS) {
2136 + printk("pktcdvd: %s out of range\n", kdevname(bh->b_rdev));
2140 + pd = &pkt_devs[MINOR(bh->b_rdev)];
2142 + printk("pktcdvd: request received for non-active pd\n");
2147 + * quick remap a READ
2149 + if (rw == READ || rw == READA) {
2150 + down(&pd->cache_sync_mutex);
2151 + pkt_flush_writes(pd);
2152 + bh->b_rdev = pd->dev;
2153 + generic_make_request(rw, bh);
2154 + up(&pd->cache_sync_mutex);
2158 + if (!(rw & WRITE))
2161 + if (test_bit(PACKET_READONLY, &pd->flags)) {
2162 + printk("pktcdvd: WRITE for ro device %s (%lu)\n",
2163 + pd->name, bh->b_rsector);
2167 + VPRINTK("pkt_make_request: bh:%p block:%ld size:%d\n",
2168 + bh, bh->b_blocknr, bh->b_size);
2170 + if (bh->b_size != CD_FRAMESIZE) {
2171 + printk("pktcdvd: wrong bh size\n");
2176 + * This is deadlock safe, since pkt_get_stacked_bh can only
2177 + * fail if there are already buffers in flight for this
2178 + * packet device. When the in-flight buffers finish, we
2179 + * will be woken up and try again.
2181 + new_bh = kmem_cache_alloc(bh_cachep, GFP_ATOMIC);
2183 + DECLARE_WAITQUEUE(wait, current);
2185 + generic_unplug_device(q);
2187 + add_wait_queue(&pd_bh_wait, &wait);
2188 + set_current_state(TASK_UNINTERRUPTIBLE);
2190 + new_bh = pkt_get_stacked_bh(pd);
2194 + set_current_state(TASK_RUNNING);
2195 + remove_wait_queue(&pd_bh_wait, &wait);
2198 + new_bh->b_size = bh->b_size;
2199 + new_bh->b_list = PKT_BUF_LIST + 1;
2200 + new_bh->b_dev = bh->b_dev;
2201 + atomic_set(&new_bh->b_count, 1);
2202 + new_bh->b_rdev = bh->b_rdev;
2203 + new_bh->b_state = bh->b_state;
2204 + new_bh->b_page = bh->b_page;
2205 + new_bh->b_data = bh->b_data;
2206 + new_bh->b_private = bh;
2207 + new_bh->b_end_io = pkt_end_io_write_stacked;
2208 + new_bh->b_rsector = bh->b_rsector;
2210 + return pd->make_request_fn(q, rw, new_bh);
2213 + buffer_IO_error(bh);
2217 +static void show_requests(request_queue_t *q)
2219 + struct list_head *entry;
2221 + spin_lock_irq(&io_request_lock);
2223 + list_for_each(entry, &q->queue_head) {
2224 + struct request *rq = blkdev_entry_to_request(entry);
2225 + int zone = rq->sector & ~127;
2229 + if ((rq->sector + rq->nr_sectors - (rq->bhtail->b_size >> 9))
2230 + != rq->bhtail->b_rsector)
2233 + printk("rq: cmd %d, sector %lu (-> %lu), zone %u, hole %d, nr_sectors %lu\n", rq->cmd, rq->sector, rq->sector + rq->nr_sectors - 1, zone, hole, rq->nr_sectors);
2236 + spin_unlock_irq(&io_request_lock);
2239 +static void sysrq_handle_show_requests(int key, struct pt_regs *pt_regs,
2240 + struct kbd_struct *kbd, struct tty_struct *tty)
2243 + * quick hack to show pending requests in /dev/pktcdvd0 queue
2245 + queue_proc *qp = blk_dev[PACKET_MAJOR].queue;
2247 + request_queue_t *q = qp(MKDEV(PACKET_MAJOR, 0));
2252 +static struct sysrq_key_op sysrq_show_requests_op = {
2253 + handler: sysrq_handle_show_requests,
2254 + help_msg: "showreQuests",
2255 + action_msg: "Show requests",
2258 +static void pkt_init_queue(struct pktcdvd_device *pd)
2260 + request_queue_t *q = &pd->cdrw.r_queue;
2262 + blk_init_queue(q, pkt_request);
2263 + elevator_init(&q->elevator, ELEVATOR_PKTCDVD);
2264 + pd->make_request_fn = q->make_request_fn;
2265 + blk_queue_make_request(q, pkt_make_request);
2266 + blk_queue_headactive(q, 0);
2267 + q->front_merge_fn = pkt_front_merge_fn;
2268 + q->back_merge_fn = pkt_back_merge_fn;
2269 + q->merge_requests_fn = pkt_merge_requests_fn;
2270 + q->queuedata = pd;
2273 +static int pkt_proc_device(struct pktcdvd_device *pd, char *buf)
2275 + char *b = buf, *msg;
2276 + struct list_head *foo;
2279 + b += sprintf(b, "\nWriter %s (%s):\n", pd->name, kdevname(pd->dev));
2281 + b += sprintf(b, "\nSettings:\n");
2282 + b += sprintf(b, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2284 + if (pd->settings.write_type == 0)
2288 + b += sprintf(b, "\twrite type:\t\t%s\n", msg);
2290 + b += sprintf(b, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2291 + b += sprintf(b, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2293 + b += sprintf(b, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2295 + if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2297 + else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2301 + b += sprintf(b, "\tblock mode:\t\t%s\n", msg);
2303 + b += sprintf(b, "\nStatistics:\n");
2304 + b += sprintf(b, "\tbuffers started:\t%lu\n", pd->stats.bh_s);
2305 + b += sprintf(b, "\tbuffers ended:\t\t%lu\n", pd->stats.bh_e);
2306 + b += sprintf(b, "\tsectors written:\t%lu\n", pd->stats.secs_w);
2307 + b += sprintf(b, "\tsectors read:\t\t%lu\n", pd->stats.secs_r);
2308 + b += sprintf(b, "\tbuffer cache hits:\t%lu\n", pd->stats.bh_cache_hits);
2309 + b += sprintf(b, "\tpage cache hits:\t%lu\n", pd->stats.page_cache_hits);
2311 + b += sprintf(b, "\nMisc:\n");
2312 + b += sprintf(b, "\treference count:\t%d\n", atomic_read(&pd->refcnt));
2313 + b += sprintf(b, "\tflags:\t\t\t0x%lx\n", pd->flags);
2314 + b += sprintf(b, "\twrite speed:\t\t%ukB/s\n", pd->speed * 150);
2315 + b += sprintf(b, "\tstart offset:\t\t%lu\n", pd->offset);
2316 + b += sprintf(b, "\tmode page offset:\t%u\n", pd->mode_offset);
2318 + b += sprintf(b, "\nQueue state:\n");
2319 + b += sprintf(b, "\tfree buffers:\t\t%u\n", atomic_read(&pd->cdrw.free_bh));
2320 + b += sprintf(b, "\trequest active:\t\t%s\n", pd->rq ? "yes" : "no");
2321 + b += sprintf(b, "\twrite rq depth:\t\t%d\n", atomic_read(&pd->wrqcnt));
2323 + spin_lock_irq(&io_request_lock);
2325 + list_for_each(foo, &pd->cdrw.r_queue.queue_head)
2327 + spin_unlock_irq(&io_request_lock);
2328 + b += sprintf(b, "\tqueue requests:\t\t%u\n", i);
2333 +static int pkt_read_proc(char *page, char **start, off_t off, int count,
2334 + int *eof, void *data)
2336 + struct pktcdvd_device *pd = data;
2340 + len = pkt_proc_device(pd, buf);
2343 + if (len <= off + count)
2346 + *start = page + off;
2356 +static int pkt_new_dev(struct pktcdvd_device *pd, kdev_t dev)
2358 + struct cdrom_device_info *cdi;
2359 + request_queue_t *q;
2362 + for (i = 0; i < MAX_WRITERS; i++) {
2363 + if (pkt_devs[i].dev == dev) {
2364 + printk("pktcdvd: %s already setup\n", kdevname(dev));
2369 + for (i = 0; i < MAX_WRITERS; i++)
2370 + if (pd == &pkt_devs[i])
2372 + BUG_ON(i == MAX_WRITERS);
2374 + cdi = cdrom_find_device(dev);
2375 + if (cdi == NULL) {
2376 + printk("pktcdvd: %s is not a CD-ROM\n", kdevname(dev));
2380 + MOD_INC_USE_COUNT;
2382 + memset(pd, 0, sizeof(struct pktcdvd_device));
2383 + atomic_set(&pd->cdrw.free_bh, 0);
2385 + spin_lock_init(&pd->lock);
2386 + if (pkt_grow_bhlist(pd, PACKET_MAX_SIZE) < PACKET_MAX_SIZE) {
2387 + MOD_DEC_USE_COUNT;
2388 + printk("pktcdvd: not enough memory for buffers\n");
2392 + pd->stacked_bhcnt = 0;
2393 + if (!pkt_grow_stacked_bhlist(pd)) {
2394 + MOD_DEC_USE_COUNT;
2395 + printk("pktcdvd: not enough memory for buffer heads\n");
2399 + set_blocksize(dev, CD_FRAMESIZE);
2403 + pd->pkt_dev = MKDEV(PACKET_MAJOR, i);
2404 + sprintf(pd->name, "pktcdvd%d", i);
2405 + atomic_set(&pd->refcnt, 0);
2406 + atomic_set(&pd->wrqcnt, 0);
2407 + init_MUTEX(&pd->cache_sync_mutex);
2408 + pd->unflushed_writes = 0;
2409 + init_waitqueue_head(&pd->wqueue);
2410 + init_completion(&pd->cdrw.thr_compl);
2413 + * store device merge functions (SCSI uses their own to build
2414 + * scatter-gather tables)
2416 + q = blk_get_queue(dev);
2417 + pkt_init_queue(pd);
2418 + pd->cdrw.front_merge_fn = q->front_merge_fn;
2419 + pd->cdrw.back_merge_fn = q->back_merge_fn;
2420 + pd->cdrw.merge_requests_fn = q->merge_requests_fn;
2421 + pd->cdrw.queuedata = q->queuedata;
2423 + pd->cdrw.pid = kernel_thread(kcdrwd, pd, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
2424 + if (pd->cdrw.pid < 0) {
2425 + MOD_DEC_USE_COUNT;
2426 + printk("pktcdvd: can't start kernel thread\n");
2427 + blk_cleanup_queue(&pd->cdrw.r_queue);
2428 + pkt_shrink_stacked_bhlist(pd);
2429 + pkt_shrink_bhlist(pd, PACKET_MAX_SIZE);
2430 + memset(pd, 0, sizeof(*pd));
2434 + create_proc_read_entry(pd->name, 0, pkt_proc, pkt_read_proc, pd);
2435 + DPRINTK("pktcdvd: writer %s sucessfully registered\n", cdi->name);
2440 + * arg contains file descriptor of CD-ROM device.
2442 +static int pkt_setup_dev(struct pktcdvd_device *pd, unsigned int arg)
2444 + struct inode *inode;
2445 + struct file *file;
2448 + if ((file = fget(arg)) == NULL) {
2449 + printk("pktcdvd: bad file descriptor passed\n");
2454 + if ((inode = file->f_dentry->d_inode) == NULL) {
2455 + printk("pktcdvd: huh? file descriptor contains no inode?\n");
2459 + if (!S_ISBLK(inode->i_mode)) {
2460 + printk("pktcdvd: device is not a block device (duh)\n");
2464 + if (IS_RDONLY(inode)) {
2465 + printk("pktcdvd: Can't write to read-only dev\n");
2468 + if ((ret = pkt_new_dev(pd, inode->i_rdev))) {
2469 + printk("pktcdvd: all booked up\n");
2473 + atomic_inc(&pd->refcnt);
2480 +static int pkt_remove_dev(struct pktcdvd_device *pd)
2484 + if (pd->cdrw.pid >= 0) {
2485 + ret = kill_proc(pd->cdrw.pid, SIGKILL, 1);
2487 + printk("pkt_exit: can't kill kernel thread\n");
2490 + wait_for_completion(&pd->cdrw.thr_compl);
2494 + * will also invalidate buffers for CD-ROM
2496 + invalidate_device(pd->pkt_dev, 1);
2498 + pkt_shrink_stacked_bhlist(pd);
2499 + if ((ret = pkt_shrink_bhlist(pd, PACKET_MAX_SIZE)) != PACKET_MAX_SIZE)
2500 + printk("pktcdvd: leaked %d buffers\n", PACKET_MAX_SIZE - ret);
2502 + blk_cleanup_queue(&pd->cdrw.r_queue);
2503 + remove_proc_entry(pd->name, pkt_proc);
2504 + DPRINTK("pktcdvd: writer %s unregistered\n", pd->cdi->name);
2505 + memset(pd, 0, sizeof(struct pktcdvd_device));
2506 + MOD_DEC_USE_COUNT;
2510 +static int pkt_media_change(kdev_t dev)
2512 + struct pktcdvd_device *pd = pkt_find_dev(dev);
2515 + return cdrom_media_changed(pd->dev);
2518 +static int pkt_ioctl(struct inode *inode, struct file *file,
2519 + unsigned int cmd, unsigned long arg)
2521 + struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
2523 + VPRINTK("pkt_ioctl: cmd %d, dev %x\n", cmd, inode->i_rdev);
2525 + if ((cmd != PACKET_SETUP_DEV) && !pd->dev) {
2526 + DPRINTK("pktcdvd: dev not setup\n");
2531 + case PACKET_GET_STATS:
2532 + if (copy_to_user(&arg, &pd->stats, sizeof(struct packet_stats)))
2536 + case PACKET_SETUP_DEV:
2537 + if (!capable(CAP_SYS_ADMIN))
2540 + printk("pktcdvd: dev already setup\n");
2543 + return pkt_setup_dev(pd, arg);
2545 + case PACKET_TEARDOWN_DEV:
2546 + if (!capable(CAP_SYS_ADMIN))
2548 + if (atomic_read(&pd->refcnt) != 1)
2550 + return pkt_remove_dev(pd);
2553 + return put_user(blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 1, (unsigned long *)arg);
2555 + case BLKGETSIZE64:
2556 + return put_user((u64)blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 10,
2560 + if (capable(CAP_SYS_ADMIN))
2561 + set_bit(PACKET_READONLY, &pd->flags);
2569 + return blk_ioctl(inode->i_rdev, cmd, arg);
2572 + * forward selected CDROM ioctls to CD-ROM, for UDF
2574 + case CDROMMULTISESSION:
2575 + case CDROMREADTOCENTRY:
2576 + case CDROM_LAST_WRITTEN:
2577 + case CDROM_SEND_PACKET:
2578 + case SCSI_IOCTL_SEND_COMMAND:
2581 + return ioctl_by_bdev(pd->bdev, cmd, arg);
2584 + printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
2591 +static struct block_device_operations pktcdvd_ops = {
2592 + owner: THIS_MODULE,
2594 + release: pkt_close,
2596 + check_media_change: pkt_media_change,
2603 + devfs_register(NULL, "pktcdvd", DEVFS_FL_DEFAULT, PACKET_MAJOR, 0,
2604 + S_IFBLK | S_IRUSR | S_IWUSR, &pktcdvd_ops, NULL);
2605 + if (devfs_register_blkdev(PACKET_MAJOR, "pktcdvd", &pktcdvd_ops)) {
2606 + printk("unable to register pktcdvd device\n");
2610 + pkt_sizes = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2611 + if (pkt_sizes == NULL)
2614 + pkt_blksize = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2615 + if (pkt_blksize == NULL)
2618 + pkt_readahead = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
2619 + if (pkt_readahead == NULL)
2622 + pkt_devs = kmalloc(MAX_WRITERS * sizeof(struct pktcdvd_device), GFP_KERNEL);
2623 + if (pkt_devs == NULL)
2626 + memset(pkt_devs, 0, MAX_WRITERS * sizeof(struct pktcdvd_device));
2627 + memset(pkt_sizes, 0, MAX_WRITERS * sizeof(int));
2628 + memset(pkt_blksize, 0, MAX_WRITERS * sizeof(int));
2630 + for (i = 0; i < MAX_WRITERS; i++)
2631 + pkt_readahead[i] = vm_max_readahead;
2633 + blk_size[PACKET_MAJOR] = pkt_sizes;
2634 + blksize_size[PACKET_MAJOR] = pkt_blksize;
2635 + max_readahead[PACKET_MAJOR] = pkt_readahead;
2636 + read_ahead[PACKET_MAJOR] = 128;
2637 + set_blocksize(MKDEV(PACKET_MAJOR, 0), CD_FRAMESIZE);
2639 + blk_dev[PACKET_MAJOR].queue = pkt_get_queue;
2641 + pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
2643 + register_sysrq_key('q', &sysrq_show_requests_op);
2645 + DPRINTK("pktcdvd: %s\n", VERSION_CODE);
2649 + printk("pktcdvd: out of memory\n");
2650 + devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
2651 + DEVFS_SPECIAL_BLK, 0));
2652 + devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
2655 + kfree(pkt_blksize);
2656 + kfree(pkt_readahead);
2660 +void pkt_exit(void)
2662 + unregister_sysrq_key('q', &sysrq_show_requests_op);
2664 + devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
2665 + DEVFS_SPECIAL_BLK, 0));
2666 + devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
2667 + blk_dev[PACKET_MAJOR].queue = NULL;
2669 + remove_proc_entry("pktcdvd", proc_root_driver);
2671 + kfree(pkt_blksize);
2673 + kfree(pkt_readahead);
2676 +MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2677 +MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2678 +MODULE_LICENSE("GPL");
2680 +module_init(pkt_init);
2681 +module_exit(pkt_exit);
2682 diff -u -r -N ../../linus/2.4/linux/drivers/cdrom/Makefile linux/drivers/cdrom/Makefile
2683 --- ../../linus/2.4/linux/drivers/cdrom/Makefile Tue Aug 6 21:14:34 2002
2684 +++ linux/drivers/cdrom/Makefile Tue Aug 6 21:22:09 2002
2686 obj-$(CONFIG_BLK_DEV_IDECD) += cdrom.o
2687 obj-$(CONFIG_BLK_DEV_SR) += cdrom.o
2688 obj-$(CONFIG_PARIDE_PCD) += cdrom.o
2689 +obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o
2691 obj-$(CONFIG_AZTCD) += aztcd.o
2692 obj-$(CONFIG_CDU31A) += cdu31a.o cdrom.o
2693 diff -u -r -N ../../linus/2.4/linux/drivers/ide/ide-cd.c linux/drivers/ide/ide-cd.c
2694 --- ../../linus/2.4/linux/drivers/ide/ide-cd.c Tue Aug 6 21:14:39 2002
2695 +++ linux/drivers/ide/ide-cd.c Tue Aug 6 21:22:36 2002
2696 @@ -292,9 +292,11 @@
2697 * correctly reporting tray status -- from
2698 * Michael D Johnson <johnsom@orst.edu>
2700 + * 4.99 - Added write support for packet writing.
2702 *************************************************************************/
2704 -#define IDECD_VERSION "4.59"
2705 +#define IDECD_VERSION "4.99"
2707 #include <linux/config.h>
2708 #include <linux/module.h>
2711 memset(pc, 0, sizeof(struct packet_command));
2712 pc->c[0] = GPCMD_REQUEST_SENSE;
2713 - pc->c[4] = pc->buflen = 18;
2714 + pc->c[4] = pc->buflen = 14;
2715 pc->buffer = (char *) sense;
2716 pc->sense = (struct request_sense *) failed_command;
2719 cdrom_saw_media_change (drive);
2721 /* Fail the request. */
2722 - printk ("%s: tray open\n", drive->name);
2723 + /* printk ("%s: tray open\n", drive->name); */
2724 cdrom_end_request (0, drive);
2725 } else if (sense_key == UNIT_ATTENTION) {
2727 @@ -1200,6 +1202,8 @@
2728 * partitions not really working, but better check anyway...
2730 if (rq->cmd == nxt->cmd && rq->rq_dev == nxt->rq_dev) {
2731 + if (rq->cmd == WRITE)
2732 + printk("merged write\n");
2733 rq->nr_sectors += nxt->nr_sectors;
2734 rq->hard_nr_sectors += nxt->nr_sectors;
2735 rq->bhtail->b_reqnext = nxt->bh;
2736 @@ -2497,6 +2501,12 @@
2738 void ide_cdrom_release_real (struct cdrom_device_info *cdi)
2740 + struct cdrom_generic_command cgc;
2742 + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2743 + cgc.cmd[0] = GPCMD_FLUSH_CACHE;
2745 + (void) ide_cdrom_packet(cdi, &cgc);
2749 @@ -2685,15 +2695,10 @@
2750 printk(" %dX", CDROM_CONFIG_FLAGS(drive)->max_speed);
2751 printk(" %s", CDROM_CONFIG_FLAGS(drive)->dvd ? "DVD-ROM" : "CD-ROM");
2753 - if (CDROM_CONFIG_FLAGS (drive)->dvd_r|CDROM_CONFIG_FLAGS (drive)->dvd_ram)
2754 - printk (" DVD%s%s",
2755 - (CDROM_CONFIG_FLAGS (drive)->dvd_r)? "-R" : "",
2756 - (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "-RAM" : "");
2758 - if (CDROM_CONFIG_FLAGS (drive)->cd_r|CDROM_CONFIG_FLAGS (drive)->cd_rw)
2759 - printk (" CD%s%s",
2760 - (CDROM_CONFIG_FLAGS (drive)->cd_r)? "-R" : "",
2761 - (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
2762 + if (CDROM_CONFIG_FLAGS(drive)->dvd_r || CDROM_CONFIG_FLAGS(drive)->dvd_ram)
2763 + printk (" DVD-R%s", (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "AM" : "");
2764 + if (CDROM_CONFIG_FLAGS(drive)->cd_r ||CDROM_CONFIG_FLAGS(drive)->cd_rw)
2765 + printk (" CD-R%s", (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
2767 if (CDROM_CONFIG_FLAGS (drive)->is_changer)
2768 printk (" changer w/%d slots", nslots);
2769 @@ -2716,7 +2721,7 @@
2770 int major = HWIF(drive)->major;
2771 int minor = drive->select.b.unit << PARTN_BITS;
2773 - ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 2, &read_ahead[major], NULL);
2774 + ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 1024, &read_ahead[major], NULL);
2775 ide_add_setting(drive, "file_readahead", SETTING_RW, BLKFRAGET, BLKFRASET, TYPE_INTA, 0, INT_MAX, 1, 1024, &max_readahead[major][minor], NULL);
2776 ide_add_setting(drive, "max_kb_per_request", SETTING_RW, BLKSECTGET, BLKSECTSET, TYPE_INTA, 1, 255, 1, 2, &max_sectors[major][minor], NULL);
2777 ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
2778 @@ -2733,7 +2738,7 @@
2780 * default to read-only always and fix latter at the bottom
2782 - set_device_ro(MKDEV(HWIF(drive)->major, minor), 1);
2783 + set_device_ro(MKDEV(HWIF(drive)->major, minor), 0);
2784 set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE);
2786 drive->special.all = 0;
2787 diff -u -r -N ../../linus/2.4/linux/drivers/scsi/Config.in linux/drivers/scsi/Config.in
2788 --- ../../linus/2.4/linux/drivers/scsi/Config.in Tue Aug 6 21:15:02 2002
2789 +++ linux/drivers/scsi/Config.in Tue Aug 6 21:23:16 2002
2792 comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
2794 -#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
2795 - bool ' Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES
2798 bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
2800 bool ' Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
2801 diff -u -r -N ../../linus/2.4/linux/drivers/scsi/scsi_merge.c linux/drivers/scsi/scsi_merge.c
2802 --- ../../linus/2.4/linux/drivers/scsi/scsi_merge.c Tue Aug 6 21:15:21 2002
2803 +++ linux/drivers/scsi/scsi_merge.c Tue Aug 6 21:23:18 2002
2806 #define DMA_SEGMENT_SIZE_LIMITED
2808 -#ifdef CONFIG_SCSI_DEBUG_QUEUES
2810 - * Enable a bunch of additional consistency checking. Turn this off
2811 - * if you are benchmarking.
2813 static int dump_stats(struct request *req,
2817 panic("Ththththaats all folks. Too dangerous to continue.\n");
2822 - * Simple sanity check that we will use for the first go around
2823 - * in order to ensure that we are doing the counting correctly.
2824 - * This can be removed for optimization.
2826 -#define SANITY_CHECK(req, _CLUSTER, _DMA) \
2827 - if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA, NULL) ) \
2829 - printk("Incorrect segment count at 0x%p", current_text_addr()); \
2830 - dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA, NULL)); \
2833 -#define SANITY_CHECK(req, _CLUSTER, _DMA)
2836 static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
2843 - SANITY_CHECK(req, _CLUSTER, _DMA); \
2844 ret = __scsi_ ## _BACK_FRONT ## _merge_fn(q, \
2851 - SANITY_CHECK(req, _CLUSTER, _DMA); \
2852 ret = __scsi_merge_requests_fn(q, req, next, max_segments, _CLUSTER, _DMA); \
2855 @@ -829,11 +806,7 @@
2857 * First we need to know how many scatter gather segments are needed.
2859 - if (!sg_count_valid) {
2860 - count = __count_segments(req, use_clustering, dma_host, NULL);
2862 - count = req->nr_segments;
2864 + count = __count_segments(req, use_clustering, dma_host, NULL);
2867 * If the dma pool is nearly empty, then queue a minimal request
2870 if (count != SCpnt->use_sg) {
2871 printk("Incorrect number of segments after building list\n");
2872 -#ifdef CONFIG_SCSI_DEBUG_QUEUES
2873 dump_stats(req, use_clustering, dma_host, count);
2878 diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr.c linux/drivers/scsi/sr.c
2879 --- ../../linus/2.4/linux/drivers/scsi/sr.c Tue Aug 6 21:15:22 2002
2880 +++ linux/drivers/scsi/sr.c Tue Aug 6 21:23:18 2002
2882 * Modified by Jens Axboe <axboe@suse.de> - support DVD-RAM
2883 * transparently and loose the GHOST hack
2885 + * Modified by Jens Axboe <axboe@suse.de> - support packet writing
2886 + * through generic packet layer.
2888 * Modified by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
2889 * check resource allocation in sr_init and some cleanups
2893 #include <linux/module.h>
2894 +#include <linux/config.h>
2896 #include <linux/fs.h>
2897 #include <linux/kernel.h>
2901 cmd[3] = cmd[5] = 0;
2902 - rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL);
2903 + rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL, SR_TIMEOUT);
2906 /* failed, drive doesn't have capabilities mode page */
2907 @@ -748,16 +752,13 @@
2908 if ((buffer[n + 2] & 0x8) == 0)
2909 /* not a DVD drive */
2910 scsi_CDs[i].cdi.mask |= CDC_DVD;
2911 - if ((buffer[n + 3] & 0x20) == 0) {
2912 + if ((buffer[n + 3] & 0x20) == 0)
2913 /* can't write DVD-RAM media */
2914 scsi_CDs[i].cdi.mask |= CDC_DVD_RAM;
2916 - scsi_CDs[i].device->writeable = 1;
2918 if ((buffer[n + 3] & 0x10) == 0)
2919 /* can't write DVD-R media */
2920 scsi_CDs[i].cdi.mask |= CDC_DVD_R;
2921 - if ((buffer[n + 3] & 0x2) == 0)
2922 + if ((buffer[n + 3] & 0x02) == 0)
2923 /* can't write CD-RW media */
2924 scsi_CDs[i].cdi.mask |= CDC_CD_RW;
2925 if ((buffer[n + 3] & 0x1) == 0)
2926 @@ -777,6 +778,10 @@
2927 /*else I don't think it can close its tray
2928 scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
2930 + if (~scsi_CDs[i].cdi.mask & (CDC_DVD_RAM | CDC_CD_RW))
2931 + /* can write to DVD-RAM or CD-RW */
2932 + scsi_CDs[i].device->writeable = 1;
2934 scsi_free(buffer, 512);
2937 @@ -792,7 +797,10 @@
2938 if (device->scsi_level <= SCSI_2)
2939 cgc->cmd[1] |= device->lun << 5;
2941 - cgc->stat = sr_do_ioctl(MINOR(cdi->dev), cgc->cmd, cgc->buffer, cgc->buflen, cgc->quiet, cgc->data_direction, cgc->sense);
2942 + if (cgc->timeout <= 0)
2943 + cgc->timeout = 5 * HZ;
2945 + cgc->stat = sr_do_ioctl(MINOR(cdi->dev), cgc->cmd, cgc->buffer, cgc->buflen, cgc->quiet, cgc->data_direction, cgc->sense, cgc->timeout);
2949 diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr.h linux/drivers/scsi/sr.h
2950 --- ../../linus/2.4/linux/drivers/scsi/sr.h Tue Aug 6 21:15:22 2002
2951 +++ linux/drivers/scsi/sr.h Tue Aug 6 21:23:18 2002
2954 extern Scsi_CD *scsi_CDs;
2956 -int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int, struct request_sense *);
2957 +int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int, struct request_sense *, int);
2959 int sr_lock_door(struct cdrom_device_info *, int);
2960 int sr_tray_move(struct cdrom_device_info *, int);
2961 diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr_ioctl.c linux/drivers/scsi/sr_ioctl.c
2962 --- ../../linus/2.4/linux/drivers/scsi/sr_ioctl.c Tue Aug 6 21:15:22 2002
2963 +++ linux/drivers/scsi/sr_ioctl.c Tue Aug 6 21:23:18 2002
2965 sr_cmd[6] = trk1_te.cdte_addr.msf.minute;
2966 sr_cmd[7] = trk1_te.cdte_addr.msf.second;
2967 sr_cmd[8] = trk1_te.cdte_addr.msf.frame;
2968 - return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
2969 + return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
2972 /* We do our own retries because we want to know what the specific
2973 error code is. Normally the UNIT_ATTENTION code will automatically
2974 clear after one error */
2976 -int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite, struct request_sense *sense)
2977 +int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite, struct request_sense *sense, int timeout)
2979 Scsi_Request *SRpnt;
2984 scsi_wait_req(SRpnt, (void *) sr_cmd, (void *) buffer, buflength,
2985 - IOCTL_TIMEOUT, IOCTL_RETRIES);
2986 + timeout, IOCTL_RETRIES);
2988 req = &SRpnt->sr_request;
2989 if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
2991 sr_cmd[1] = (scsi_CDs[minor].device->scsi_level <= SCSI_2) ?
2992 ((scsi_CDs[minor].device->lun) << 5) : 0;
2993 sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
2994 - return sr_do_ioctl(minor, sr_cmd, NULL, 0, 1, SCSI_DATA_NONE, NULL);
2995 + return sr_do_ioctl(minor, sr_cmd, NULL, 0, 1, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
2998 int sr_tray_move(struct cdrom_device_info *cdi, int pos)
3000 sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
3001 sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ;
3003 - return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
3004 + return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
3007 int sr_lock_door(struct cdrom_device_info *cdi, int lock)
3012 - result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ, NULL);
3013 + result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3015 memcpy(mcn->medium_catalog_number, buffer + 9, 13);
3016 mcn->medium_catalog_number[13] = 0;
3018 sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
3019 sr_cmd[3] = speed & 0xff; /* LSB */
3021 - if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL))
3022 + if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT))
3027 sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
3028 sr_cmd[8] = 12; /* LSB of length */
3030 - result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3031 + result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3033 tochdr->cdth_trk0 = buffer[2];
3034 tochdr->cdth_trk1 = buffer[3];
3036 sr_cmd[6] = tocentry->cdte_track;
3037 sr_cmd[8] = 12; /* LSB of length */
3039 - result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ, NULL);
3040 + result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3042 tocentry->cdte_ctrl = buffer[5] & 0xf;
3043 tocentry->cdte_adr = buffer[5] >> 4;
3045 sr_cmd[7] = ti->cdti_trk1;
3046 sr_cmd[8] = ti->cdti_ind1;
3048 - result = sr_do_ioctl(target, sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
3049 + result = sr_do_ioctl(target, sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
3050 if (result == -EDRIVE_CANT_DO_THIS)
3051 result = sr_fake_playtrkind(cdi, ti);
3057 - return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL);
3058 + return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3063 cmd[4] = (unsigned char) (lba >> 8) & 0xff;
3064 cmd[5] = (unsigned char) lba & 0xff;
3066 - rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL);
3067 + rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
3071 diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr_vendor.c linux/drivers/scsi/sr_vendor.c
3072 --- ../../linus/2.4/linux/drivers/scsi/sr_vendor.c Tue Aug 6 21:15:22 2002
3073 +++ linux/drivers/scsi/sr_vendor.c Tue Aug 6 21:23:18 2002
3076 #define VENDOR_ID (scsi_CDs[minor].vendor)
3078 +#define VENDOR_TIMEOUT 30*HZ
3080 void sr_vendor_init(int minor)
3082 #ifndef CONFIG_BLK_DEV_SR_VENDOR
3084 modesel->density = density;
3085 modesel->block_length_med = (blocklength >> 8) & 0xff;
3086 modesel->block_length_lo = blocklength & 0xff;
3087 - if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE, NULL))) {
3088 + if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE, NULL, VENDOR_TIMEOUT))) {
3089 scsi_CDs[minor].device->sector_size = blocklength;
3093 (scsi_CDs[minor].device->lun << 5) : 0;
3096 - rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3097 + rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3100 if ((buffer[0] << 8) + buffer[1] < 0x0a) {
3102 (scsi_CDs[minor].device->lun << 5) : 0;
3105 - rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ, NULL);
3106 + rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3109 if (buffer[14] != 0 && buffer[14] != 0xb0) {
3111 cmd[1] = (scsi_CDs[minor].device->scsi_level <= SCSI_2) ?
3112 (scsi_CDs[minor].device->lun << 5) : 0;
3114 - rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ, NULL);
3115 + rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3116 if (rc == -EINVAL) {
3117 printk(KERN_INFO "sr%d: Hmm, seems the drive "
3118 "doesn't support multisession CD's\n", minor);
3120 (scsi_CDs[minor].device->lun << 5) : 0;
3123 - rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ, NULL);
3124 + rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3129 cmd[6] = rc & 0x7f; /* number of last session */
3132 - rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
3133 + rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
3137 diff -u -r -N ../../linus/2.4/linux/fs/udf/balloc.c linux/fs/udf/balloc.c
3138 --- ../../linus/2.4/linux/fs/udf/balloc.c Tue Aug 6 21:16:21 2002
3139 +++ linux/fs/udf/balloc.c Thu Aug 8 20:44:32 2002
3142 obloc = nbloc = UDF_I_LOCATION(table);
3144 - obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0));
3145 - atomic_inc(&nbh->b_count);
3148 while (count && (etype =
3149 udf_next_aext(table, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
3151 udf_write_aext(table, obloc, &oextoffset, eloc, elen, obh, 1);
3154 - if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
3159 @@ -580,7 +579,10 @@
3161 loffset = nextoffset;
3162 aed->lengthAllocDescs = cpu_to_le32(adsize);
3163 - sptr = (obh)->b_data + nextoffset - adsize;
3165 + sptr = UDF_I_DATA(inode) + nextoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode) - adsize;
3167 + sptr = obh->b_data + nextoffset - adsize;
3168 dptr = nbh->b_data + sizeof(struct allocExtDesc);
3169 memcpy(dptr, sptr, adsize);
3170 nextoffset = sizeof(struct allocExtDesc) + adsize;
3172 aed->lengthAllocDescs = cpu_to_le32(0);
3173 sptr = (obh)->b_data + nextoffset;
3174 nextoffset = sizeof(struct allocExtDesc);
3176 - if (memcmp(&UDF_I_LOCATION(table), &obloc, sizeof(lb_addr)))
3180 aed = (struct allocExtDesc *)(obh)->b_data;
3181 aed->lengthAllocDescs =
3182 @@ -631,15 +633,20 @@
3186 - udf_update_tag(obh->b_data, loffset);
3187 - mark_buffer_dirty(obh);
3190 + udf_update_tag(obh->b_data, loffset);
3191 + mark_buffer_dirty(obh);
3194 + mark_inode_dirty(table);
3197 if (elen) /* It's possible that stealing the block emptied the extent */
3199 udf_write_aext(table, nbloc, &nextoffset, eloc, elen, nbh, 1);
3201 - if (!memcmp(&UDF_I_LOCATION(table), &nbloc, sizeof(lb_addr)))
3204 UDF_I_LENALLOC(table) += adsize;
3205 mark_inode_dirty(table);
3207 extoffset = sizeof(struct unallocSpaceEntry);
3208 bloc = UDF_I_LOCATION(table);
3210 - bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
3212 eloc.logicalBlockNum = 0xFFFFFFFF;
3214 while (first_block != eloc.logicalBlockNum && (etype =
3216 extoffset = sizeof(struct unallocSpaceEntry);
3217 bloc = UDF_I_LOCATION(table);
3219 - goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
3220 - atomic_inc(&goal_bh->b_count);
3221 + goal_bh = bh = NULL;
3223 while (spread && (etype =
3224 udf_next_aext(table, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1)
3225 diff -u -r -N ../../linus/2.4/linux/fs/udf/dir.c linux/fs/udf/dir.c
3226 --- ../../linus/2.4/linux/fs/udf/dir.c Tue Aug 6 21:16:21 2002
3227 +++ linux/fs/udf/dir.c Thu Aug 8 20:44:32 2002
3229 nf_pos = (udf_ext0_offset(dir) >> 2);
3231 fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
3232 - if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
3233 + if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
3234 + fibh.sbh = fibh.ebh = NULL;
3235 + else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
3236 &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
3238 offset >>= dir->i_sb->s_blocksize_bits;
3239 @@ -136,40 +138,40 @@
3246 - udf_release_data(bh);
3250 - if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
3252 - udf_release_data(bh);
3256 - if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
3258 - i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
3259 - if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
3260 - i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
3261 - for (num=0; i>0; i--)
3262 + if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
3264 - block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
3265 - tmp = udf_tgetblk(dir->i_sb, block);
3266 - if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
3270 + udf_release_data(bh);
3275 + if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
3277 - ll_rw_block(READA, num, bha);
3278 - for (i=0; i<num; i++)
3280 + i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
3281 + if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
3282 + i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
3283 + for (num=0; i>0; i--)
3285 + block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
3286 + tmp = udf_tgetblk(dir->i_sb, block);
3287 + if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
3294 + ll_rw_block(READA, num, bha);
3295 + for (i=0; i<num; i++)
3302 + udf_release_data(bh);
3306 while ( nf_pos < size )
3308 diff -u -r -N ../../linus/2.4/linux/fs/udf/directory.c linux/fs/udf/directory.c
3309 --- ../../linus/2.4/linux/fs/udf/directory.c Tue Aug 6 21:16:21 2002
3310 +++ linux/fs/udf/directory.c Thu Aug 8 20:44:32 2002
3314 #include "udfdecl.h"
3317 #include <linux/fs.h>
3318 #include <linux/string.h>
3321 fibh->soffset = fibh->eoffset;
3323 + if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
3325 + fi = udf_get_fileident(UDF_I_DATA(dir) - udf_file_entry_alloc_offset(dir),
3326 + dir->i_sb->s_blocksize, &(fibh->eoffset));
3331 + *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
3333 + memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
3338 if (fibh->eoffset == dir->i_sb->s_blocksize)
3340 int lextoffset = *extoffset;
3341 @@ -275,53 +291,43 @@
3345 -udf_get_fileshortad(void * buffer, int maxoffset, int *offset, int inc)
3346 +udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, int inc)
3352 - if ( (!buffer) || (!offset) )
3353 + if ( (!ptr) || (!offset) )
3355 printk(KERN_ERR "udf: udf_get_fileshortad() invalidparms\n");
3359 - ptr = (uint8_t *)buffer;
3361 - if ( (*offset > 0) && (*offset < maxoffset) )
3364 + if ( (*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset) )
3367 - if ((sa = (short_ad *)ptr)->extLength == 0)
3368 + else if ((sa = (short_ad *)ptr)->extLength == 0)
3371 - (*offset) += sizeof(short_ad);
3374 + *offset += sizeof(short_ad);
3379 -udf_get_filelongad(void * buffer, int maxoffset, int * offset, int inc)
3380 +udf_get_filelongad(uint8_t *ptr, int maxoffset, int * offset, int inc)
3386 - if ( (!buffer) || !(offset) )
3387 + if ( (!ptr) || (!offset) )
3389 printk(KERN_ERR "udf: udf_get_filelongad() invalidparms\n");
3393 - ptr = (uint8_t *)buffer;
3395 - if ( (*offset > 0) && (*offset < maxoffset) )
3398 + if ( (*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset) )
3401 - if ((la = (long_ad *)ptr)->extLength == 0)
3402 + else if ((la = (long_ad *)ptr)->extLength == 0)
3405 - (*offset) += sizeof(long_ad);
3408 + *offset += sizeof(long_ad);
3411 diff -u -r -N ../../linus/2.4/linux/fs/udf/ecma_167.h linux/fs/udf/ecma_167.h
3412 --- ../../linus/2.4/linux/fs/udf/ecma_167.h Tue Aug 6 21:16:21 2002
3413 +++ linux/fs/udf/ecma_167.h Tue Aug 6 21:23:58 2002
3415 #define FE_RECORD_FMT_CRLF 0x0A
3416 #define FE_RECORD_FMT_LFCR 0x0B
3418 -#define Record Display Attributes (ECMA 167r3 4/14.9.8) */
3419 +/* Record Display Attributes (ECMA 167r3 4/14.9.8) */
3420 #define FE_RECORD_DISPLAY_ATTR_UNDEF 0x00
3421 #define FE_RECORD_DISPLAY_ATTR_1 0x01
3422 #define FE_RECORD_DISPLAY_ATTR_2 0x02
3423 diff -u -r -N ../../linus/2.4/linux/fs/udf/file.c linux/fs/udf/file.c
3424 --- ../../linus/2.4/linux/fs/udf/file.c Tue Aug 6 21:16:21 2002
3425 +++ linux/fs/udf/file.c Thu Aug 8 20:44:32 2002
3427 static int udf_adinicb_readpage(struct file *file, struct page * page)
3429 struct inode *inode = page->mapping->host;
3431 - struct buffer_head *bh;
3436 if (!PageLocked(page))
3440 memset(kaddr, 0, PAGE_CACHE_SIZE);
3441 - block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
3442 - bh = sb_bread(inode->i_sb, block);
3445 - SetPageError(page);
3449 - memcpy(kaddr, bh->b_data + udf_ext0_offset(inode), inode->i_size);
3451 + memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), inode->i_size);
3452 flush_dcache_page(page);
3453 SetPageUptodate(page);
3461 static int udf_adinicb_writepage(struct page *page)
3463 struct inode *inode = page->mapping->host;
3465 - struct buffer_head *bh;
3470 if (!PageLocked(page))
3474 - block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
3475 - bh = sb_bread(inode->i_sb, block);
3478 - SetPageError(page);
3482 - memcpy(bh->b_data + udf_ext0_offset(inode), kaddr, inode->i_size);
3483 - mark_buffer_dirty(bh);
3485 + memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), kaddr, inode->i_size);
3486 + mark_inode_dirty(inode);
3487 SetPageUptodate(page);
3495 static int udf_adinicb_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
3496 @@ -115,31 +87,17 @@
3497 static int udf_adinicb_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
3499 struct inode *inode = page->mapping->host;
3501 - struct buffer_head *bh;
3503 char *kaddr = page_address(page);
3506 - block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
3507 - bh = sb_bread(inode->i_sb, block);
3510 - SetPageError(page);
3514 - memcpy(bh->b_data + udf_file_entry_alloc_offset(inode) + offset,
3515 + memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset,
3516 kaddr + offset, to - offset);
3517 - mark_buffer_dirty(bh);
3519 + mark_inode_dirty(inode);
3520 SetPageUptodate(page);
3523 /* only one page here */
3524 if (to > inode->i_size)
3530 struct address_space_operations udf_adinicb_aops = {
3534 int result = -EINVAL;
3535 - struct buffer_head *bh = NULL;
3537 - uint8_t *ea = NULL;
3539 if ( permission(inode, MAY_READ) != 0 )
3545 - /* first, do ioctls that don't need to udf_read */
3548 case UDF_GETVOLIDENT:
3549 @@ -266,50 +220,16 @@
3555 - /* ok, we need to read the inode */
3556 - bh = udf_tread(inode->i_sb,
3557 - udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
3561 - udf_debug("bread failed (inode=%ld)\n", inode->i_ino);
3565 - if (UDF_I_EXTENDED_FE(inode) == 0)
3567 - struct fileEntry *fe;
3569 - fe = (struct fileEntry *)bh->b_data;
3570 - eaicb = lela_to_cpu(fe->extendedAttrICB);
3571 - if (UDF_I_LENEATTR(inode))
3572 - ea = fe->extendedAttr;
3576 - struct extendedFileEntry *efe;
3578 - efe = (struct extendedFileEntry *)bh->b_data;
3579 - eaicb = lela_to_cpu(efe->extendedAttrICB);
3580 - if (UDF_I_LENEATTR(inode))
3581 - ea = efe->extendedAttr;
3587 result = put_user(UDF_I_LENEATTR(inode), (int *)arg);
3590 case UDF_GETEABLOCK:
3591 - result = copy_to_user((char *)arg, ea,
3592 + result = copy_to_user((char *)arg, UDF_I_DATA(inode),
3593 UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
3597 - udf_release_data(bh);
3601 diff -u -r -N ../../linus/2.4/linux/fs/udf/ialloc.c linux/fs/udf/ialloc.c
3602 --- ../../linus/2.4/linux/fs/udf/ialloc.c Tue Aug 6 21:16:21 2002
3603 +++ linux/fs/udf/ialloc.c Thu Aug 8 20:44:32 2002
3605 #include <linux/locks.h>
3606 #include <linux/quotaops.h>
3607 #include <linux/udf_fs.h>
3608 +#include <linux/slab.h>
3612 @@ -130,13 +131,20 @@
3613 inode->i_blocks = 0;
3614 UDF_I_LENEATTR(inode) = 0;
3615 UDF_I_LENALLOC(inode) = 0;
3616 + UDF_I_USE(inode) = 0;
3617 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
3619 - UDF_I_EXTENDED_FE(inode) = 1;
3620 + UDF_I_EFE(inode) = 1;
3621 UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
3622 + UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
3623 + memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
3626 - UDF_I_EXTENDED_FE(inode) = 0;
3628 + UDF_I_EFE(inode) = 0;
3629 + UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
3630 + memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct fileEntry));
3632 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
3633 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
3634 else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3636 UDF_I_CRTIME(inode) = CURRENT_TIME;
3637 UDF_I_UMTIME(inode) = UDF_I_UCTIME(inode) =
3638 UDF_I_UCRTIME(inode) = CURRENT_UTIME;
3639 - UDF_I_NEW_INODE(inode) = 1;
3640 insert_inode_hash(inode);
3641 mark_inode_dirty(inode);
3643 diff -u -r -N ../../linus/2.4/linux/fs/udf/inode.c linux/fs/udf/inode.c
3644 --- ../../linus/2.4/linux/fs/udf/inode.c Tue Aug 6 21:16:21 2002
3645 +++ linux/fs/udf/inode.c Thu Aug 8 20:44:32 2002
3647 #include <linux/mm.h>
3648 #include <linux/smp_lock.h>
3649 #include <linux/module.h>
3650 +#include <linux/slab.h>
3654 @@ -122,6 +123,11 @@
3658 +void udf_clear_inode(struct inode *inode)
3660 + kfree(UDF_I_DATA(inode));
3663 void udf_discard_prealloc(struct inode * inode)
3665 if (inode->i_size && inode->i_size != UDF_I_LENEXTENTS(inode) &&
3666 @@ -162,10 +168,8 @@
3668 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
3670 - struct buffer_head *bh = NULL;
3675 /* from now on we have normal address_space methods */
3676 inode->i_data.a_ops = &udf_aops;
3677 @@ -180,10 +184,6 @@
3681 - block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
3682 - bh = udf_tread(inode->i_sb, block);
3685 page = grab_cache_page(inode->i_mapping, 0);
3686 if (!PageLocked(page))
3688 @@ -192,21 +192,19 @@
3690 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
3691 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
3692 - memcpy(kaddr, bh->b_data + udf_file_entry_alloc_offset(inode),
3693 + memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
3694 UDF_I_LENALLOC(inode));
3695 flush_dcache_page(page);
3696 SetPageUptodate(page);
3699 - memset(bh->b_data + udf_file_entry_alloc_offset(inode),
3700 - 0, UDF_I_LENALLOC(inode));
3701 + memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
3702 + UDF_I_LENALLOC(inode));
3703 UDF_I_LENALLOC(inode) = 0;
3704 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3705 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
3707 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
3708 - mark_buffer_dirty_inode(bh, inode);
3709 - udf_release_data(bh);
3711 inode->i_data.a_ops->writepage(page);
3712 page_cache_release(page);
3713 @@ -221,18 +219,21 @@
3714 struct buffer_head *sbh = NULL, *dbh = NULL;
3716 uint32_t elen, extoffset;
3717 + uint8_t alloctype;
3719 struct udf_fileident_bh sfibh, dfibh;
3720 loff_t f_pos = udf_ext0_offset(inode) >> 2;
3721 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
3722 struct fileIdentDesc cfi, *sfi, *dfi;
3724 + if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3725 + alloctype = ICBTAG_FLAG_AD_SHORT;
3727 + alloctype = ICBTAG_FLAG_AD_LONG;
3731 - if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3732 - UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
3734 - UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
3735 + UDF_I_ALLOCTYPE(inode) = alloctype;
3736 mark_inode_dirty(inode);
3740 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
3743 - sbh = udf_tread(inode->i_sb, inode->i_ino);
3746 dbh = udf_tgetblk(inode->i_sb, newblock);
3749 @@ -261,18 +259,19 @@
3750 mark_buffer_dirty_inode(dbh, inode);
3752 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
3753 - sfibh.sbh = sfibh.ebh = sbh;
3754 + sbh = sfibh.sbh = sfibh.ebh = NULL;
3755 dfibh.soffset = dfibh.eoffset = 0;
3756 dfibh.sbh = dfibh.ebh = dbh;
3757 while ( (f_pos < size) )
3759 + UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
3760 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
3763 - udf_release_data(sbh);
3764 udf_release_data(dbh);
3767 + UDF_I_ALLOCTYPE(inode) = alloctype;
3768 sfi->descTag.tagLocation = *block;
3769 dfibh.soffset = dfibh.eoffset;
3770 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
3771 @@ -280,21 +279,15 @@
3772 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
3773 sfi->fileIdent + sfi->lengthOfImpUse))
3775 - udf_release_data(sbh);
3776 + UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
3777 udf_release_data(dbh);
3781 mark_buffer_dirty_inode(dbh, inode);
3783 - memset(sbh->b_data + udf_file_entry_alloc_offset(inode),
3784 - 0, UDF_I_LENALLOC(inode));
3786 + memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
3787 UDF_I_LENALLOC(inode) = 0;
3788 - if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
3789 - UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
3791 - UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
3792 bloc = UDF_I_LOCATION(inode);
3793 eloc.logicalBlockNum = *block;
3794 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
3796 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
3797 /* UniqueID stuff */
3799 - mark_buffer_dirty(sbh);
3800 udf_release_data(sbh);
3801 mark_inode_dirty(inode);
3802 inode->i_version ++;
3805 if (elen > numalloc)
3807 - laarr[c].extLength -=
3808 + laarr[i].extLength -=
3809 (numalloc << inode->i_sb->s_blocksize_bits);
3813 void udf_truncate(struct inode * inode)
3816 - struct buffer_head *bh;
3819 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3820 @@ -879,16 +870,8 @@
3824 - offset = (inode->i_size & (inode->i_sb->s_blocksize - 1)) +
3825 - udf_file_entry_alloc_offset(inode);
3827 - if ((bh = udf_tread(inode->i_sb,
3828 - udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0))))
3830 - memset(bh->b_data + offset, 0x00, inode->i_sb->s_blocksize - offset);
3831 - mark_buffer_dirty(bh);
3832 - udf_release_data(bh);
3834 + offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
3835 + memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
3836 UDF_I_LENALLOC(inode) = inode->i_size;
3839 @@ -1037,7 +1020,6 @@
3842 inode->i_version = ++event;
3843 - UDF_I_NEW_INODE(inode) = 0;
3845 fe = (struct fileEntry *)bh->b_data;
3846 efe = (struct extendedFileEntry *)bh->b_data;
3847 @@ -1049,14 +1031,28 @@
3849 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
3850 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
3851 - UDF_I_EXTENDED_FE(inode) = 1;
3853 + UDF_I_EFE(inode) = 1;
3854 + UDF_I_USE(inode) = 0;
3855 + UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
3856 + memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
3858 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
3859 - UDF_I_EXTENDED_FE(inode) = 0;
3861 + UDF_I_EFE(inode) = 0;
3862 + UDF_I_USE(inode) = 0;
3863 + UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
3864 + memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
3866 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
3868 + UDF_I_EFE(inode) = 0;
3869 + UDF_I_USE(inode) = 1;
3870 UDF_I_LENALLOC(inode) =
3872 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
3873 + UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
3874 + memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
3878 @@ -1079,7 +1075,7 @@
3879 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
3880 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
3882 - if (UDF_I_EXTENDED_FE(inode) == 0)
3883 + if (UDF_I_EFE(inode) == 0)
3885 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
3886 (inode->i_sb->s_blocksize_bits - 9);
3887 @@ -1325,19 +1321,11 @@
3888 udf_debug("bread failure\n");
3892 + memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
3894 fe = (struct fileEntry *)bh->b_data;
3895 efe = (struct extendedFileEntry *)bh->b_data;
3896 - if (UDF_I_NEW_INODE(inode) == 1)
3898 - if (UDF_I_EXTENDED_FE(inode) == 0)
3899 - memset(bh->b_data, 0x00, sizeof(struct fileEntry));
3901 - memset(bh->b_data, 0x00, sizeof(struct extendedFileEntry));
3902 - memset(bh->b_data + udf_file_entry_alloc_offset(inode) +
3903 - UDF_I_LENALLOC(inode), 0x0, inode->i_sb->s_blocksize -
3904 - udf_file_entry_alloc_offset(inode) - UDF_I_LENALLOC(inode));
3905 - UDF_I_NEW_INODE(inode) = 0;
3908 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
3910 @@ -1345,6 +1333,7 @@
3911 (struct unallocSpaceEntry *)bh->b_data;
3913 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
3914 + memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
3915 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
3917 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
3918 @@ -1415,8 +1404,9 @@
3919 udf_release_data(tbh);
3922 - if (UDF_I_EXTENDED_FE(inode) == 0)
3923 + if (UDF_I_EFE(inode) == 0)
3925 + memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
3926 fe->logicalBlocksRecorded = cpu_to_le64(
3927 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
3928 (inode->i_sb->s_blocksize_bits - 9));
3929 @@ -1439,6 +1429,7 @@
3933 + memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
3934 efe->objectSize = cpu_to_le64(inode->i_size);
3935 efe->logicalBlocksRecorded = cpu_to_le64(
3936 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
3937 @@ -1619,17 +1610,12 @@
3938 long_ad *lad = NULL;
3939 struct allocExtDesc *aed;
3945 - if (!(*bh = udf_tread(inode->i_sb,
3946 - udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
3948 - udf_debug("reading block %d failed!\n",
3949 - udf_get_lb_pblock(inode->i_sb, *bloc, 0));
3954 + ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
3956 + ptr = (*bh)->b_data + *extoffset;
3958 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
3959 adsize = sizeof(short_ad);
3960 @@ -1668,7 +1654,7 @@
3962 loffset = *extoffset;
3963 aed->lengthAllocDescs = cpu_to_le32(adsize);
3964 - sptr = (*bh)->b_data + *extoffset - adsize;
3965 + sptr = ptr - adsize;
3966 dptr = nbh->b_data + sizeof(struct allocExtDesc);
3967 memcpy(dptr, sptr, adsize);
3968 *extoffset = sizeof(struct allocExtDesc) + adsize;
3969 @@ -1677,10 +1663,10 @@
3971 loffset = *extoffset + adsize;
3972 aed->lengthAllocDescs = cpu_to_le32(0);
3973 - sptr = (*bh)->b_data + *extoffset;
3975 *extoffset = sizeof(struct allocExtDesc);
3977 - if (memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
3980 aed = (struct allocExtDesc *)(*bh)->b_data;
3981 aed->lengthAllocDescs =
3982 @@ -1720,18 +1706,23 @@
3986 - if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
3987 - udf_update_tag((*bh)->b_data, loffset);
3990 + if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
3991 + udf_update_tag((*bh)->b_data, loffset);
3993 + udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
3994 + mark_buffer_dirty_inode(*bh, inode);
3995 + udf_release_data(*bh);
3998 - udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
3999 - mark_buffer_dirty_inode(*bh, inode);
4000 - udf_release_data(*bh);
4001 + mark_inode_dirty(inode);
4005 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
4007 - if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
4010 UDF_I_LENALLOC(inode) += adsize;
4011 mark_inode_dirty(inode);
4012 @@ -1755,49 +1746,40 @@
4013 lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
4016 - short_ad *sad = NULL;
4017 - long_ad *lad = NULL;
4022 - if (!(bh = udf_tread(inode->i_sb,
4023 - udf_get_lb_pblock(inode->i_sb, bloc, 0))))
4025 - udf_debug("reading block %d failed!\n",
4026 - udf_get_lb_pblock(inode->i_sb, bloc, 0));
4031 + ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
4034 + ptr = bh->b_data + *extoffset;
4035 atomic_inc(&bh->b_count);
4037 - if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
4038 - adsize = sizeof(short_ad);
4039 - else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
4040 - adsize = sizeof(long_ad);
4045 switch (UDF_I_ALLOCTYPE(inode))
4047 case ICBTAG_FLAG_AD_SHORT:
4049 - sad = (short_ad *)((bh)->b_data + *extoffset);
4050 + short_ad *sad = (short_ad *)ptr;
4051 sad->extLength = cpu_to_le32(elen);
4052 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
4053 + adsize = sizeof(short_ad);
4056 case ICBTAG_FLAG_AD_LONG:
4058 - lad = (long_ad *)((bh)->b_data + *extoffset);
4059 + long_ad *lad = (long_ad *)ptr;
4060 lad->extLength = cpu_to_le32(elen);
4061 lad->extLocation = cpu_to_lelb(eloc);
4062 memset(lad->impUse, 0x00, sizeof(lad->impUse));
4063 + adsize = sizeof(long_ad);
4070 - if (memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4073 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
4075 @@ -1806,30 +1788,28 @@
4076 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
4078 mark_buffer_dirty_inode(bh, inode);
4079 + udf_release_data(bh);
4083 mark_inode_dirty(inode);
4084 - mark_buffer_dirty(bh);
4088 *extoffset += adsize;
4089 - udf_release_data(bh);
4090 return (elen >> 30);
4093 int8_t udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
4094 lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
4096 - uint16_t tagIdent;
4101 + while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
4102 + (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
4104 - if (!(*bh = udf_tread(inode->i_sb,
4105 - udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
4107 + *extoffset = sizeof(struct allocExtDesc);
4108 + udf_release_data(*bh);
4109 + if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
4111 udf_debug("reading block %d failed!\n",
4112 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
4113 @@ -1837,154 +1817,38 @@
4117 - tagIdent = le16_to_cpu(((tag *)(*bh)->b_data)->tagIdent);
4119 - if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
4121 - if (tagIdent == TAG_IDENT_FE || tagIdent == TAG_IDENT_EFE ||
4122 - UDF_I_NEW_INODE(inode))
4124 - pos = udf_file_entry_alloc_offset(inode);
4125 - alen = UDF_I_LENALLOC(inode) + pos;
4127 - else if (tagIdent == TAG_IDENT_USE)
4129 - pos = sizeof(struct unallocSpaceEntry);
4130 - alen = UDF_I_LENALLOC(inode) + pos;
4135 - else if (tagIdent == TAG_IDENT_AED)
4137 - struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
4139 - pos = sizeof(struct allocExtDesc);
4140 - alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
4145 - if (!(*extoffset))
4148 - switch (UDF_I_ALLOCTYPE(inode))
4150 - case ICBTAG_FLAG_AD_SHORT:
4154 - if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
4157 - if ((etype = le32_to_cpu(sad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
4159 - bloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
4161 - udf_release_data(*bh);
4163 - return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
4167 - eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
4168 - eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
4169 - *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
4173 - case ICBTAG_FLAG_AD_LONG:
4177 - if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
4180 - if ((etype = le32_to_cpu(lad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
4182 - *bloc = lelb_to_cpu(lad->extLocation);
4184 - udf_release_data(*bh);
4186 - return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
4190 - *eloc = lelb_to_cpu(lad->extLocation);
4191 - *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
4195 - case ICBTAG_FLAG_AD_IN_ICB:
4197 - if (UDF_I_LENALLOC(inode) == 0)
4199 - etype = (EXT_RECORDED_ALLOCATED >> 30);
4200 - *eloc = UDF_I_LOCATION(inode);
4201 - *elen = UDF_I_LENALLOC(inode);
4206 - udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
4213 - udf_debug("Empty Extent, inode=%ld, alloctype=%d, eloc=%d, elen=%d, etype=%d, extoffset=%d\n",
4214 - inode->i_ino, UDF_I_ALLOCTYPE(inode), eloc->logicalBlockNum, *elen, etype, *extoffset);
4215 - if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
4216 - *extoffset -= sizeof(short_ad);
4217 - else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
4218 - *extoffset -= sizeof(long_ad);
4223 int8_t udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
4224 lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
4234 - if (!(*bh = udf_tread(inode->i_sb,
4235 - udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
4237 - udf_debug("reading block %d failed!\n",
4238 - udf_get_lb_pblock(inode->i_sb, *bloc, 0));
4243 - if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
4245 - if (!(UDF_I_EXTENDED_FE(inode)))
4246 - pos = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
4248 - pos = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
4249 - alen = UDF_I_LENALLOC(inode) + pos;
4250 + if (!(*extoffset))
4251 + *extoffset = udf_file_entry_alloc_offset(inode);
4252 + ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
4253 + alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
4257 - struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
4259 - pos = sizeof(struct allocExtDesc);
4260 - alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
4261 + if (!(*extoffset))
4262 + *extoffset = sizeof(struct allocExtDesc);
4263 + ptr = (*bh)->b_data + *extoffset;
4264 + alen = le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
4267 - if (!(*extoffset))
4270 switch (UDF_I_ALLOCTYPE(inode))
4272 case ICBTAG_FLAG_AD_SHORT:
4276 - if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
4277 + if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
4280 etype = le32_to_cpu(sad->extLength) >> 30;
4281 @@ -1997,7 +1861,7 @@
4285 - if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
4286 + if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
4289 etype = le32_to_cpu(lad->extLength) >> 30;
4290 @@ -2011,15 +1875,8 @@
4297 - udf_debug("Empty Extent!\n");
4298 - if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
4299 - *extoffset -= sizeof(short_ad);
4300 - else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
4301 - *extoffset -= sizeof(long_ad);
4306 int8_t udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
4307 @@ -2029,17 +1886,7 @@
4313 - if (!(bh = udf_tread(inode->i_sb,
4314 - udf_get_lb_pblock(inode->i_sb, bloc, 0))))
4316 - udf_debug("reading block %d failed!\n",
4317 - udf_get_lb_pblock(inode->i_sb, bloc, 0));
4323 atomic_inc(&bh->b_count);
4325 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
4326 @@ -2063,19 +1910,11 @@
4328 struct allocExtDesc *aed;
4333 - if (!(nbh = udf_tread(inode->i_sb,
4334 - udf_get_lb_pblock(inode->i_sb, nbloc, 0))))
4336 - udf_debug("reading block %d failed!\n",
4337 - udf_get_lb_pblock(inode->i_sb, nbloc, 0));
4342 atomic_inc(&nbh->b_count);
4343 - atomic_inc(&nbh->b_count);
4344 + atomic_inc(&nbh->b_count);
4347 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
4348 adsize = sizeof(short_ad);
4349 @@ -2094,7 +1933,7 @@
4350 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
4352 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
4353 - if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
4357 udf_release_data(obh);
4358 @@ -2106,12 +1945,12 @@
4359 memset(&eloc, 0x00, sizeof(lb_addr));
4362 - if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
4365 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
4366 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
4367 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
4368 - if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
4371 UDF_I_LENALLOC(inode) -= (adsize * 2);
4372 mark_inode_dirty(inode);
4373 @@ -2131,7 +1970,7 @@
4376 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
4377 - if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
4380 UDF_I_LENALLOC(inode) -= adsize;
4381 mark_inode_dirty(inode);
4382 @@ -2206,9 +2045,7 @@
4388 - udf_release_data(bh);
4389 + udf_release_data(bh);
4391 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
4392 return udf_fixed_to_variable(ret);
4393 diff -u -r -N ../../linus/2.4/linux/fs/udf/misc.c linux/fs/udf/misc.c
4394 --- ../../linus/2.4/linux/fs/udf/misc.c Tue Aug 6 21:16:21 2002
4395 +++ linux/fs/udf/misc.c Thu Aug 8 20:44:32 2002
4398 *bh = udf_tread(inode->i_sb, inode->i_ino);
4400 - if (UDF_I_EXTENDED_FE(inode) == 0)
4401 + if (UDF_I_EFE(inode) == 0)
4403 struct fileEntry *fe;
4407 *bh = udf_tread(inode->i_sb, inode->i_ino);
4409 - if (UDF_I_EXTENDED_FE(inode) == 0)
4410 + if (UDF_I_EFE(inode) == 0)
4412 struct fileEntry *fe;
4414 diff -u -r -N ../../linus/2.4/linux/fs/udf/namei.c linux/fs/udf/namei.c
4415 --- ../../linus/2.4/linux/fs/udf/namei.c Tue Aug 6 21:16:21 2002
4416 +++ linux/fs/udf/namei.c Thu Aug 8 20:44:32 2002
4418 uint8_t lfi = cfi->lengthFileIdent;
4419 int padlen = fibh->eoffset - fibh->soffset - liu - lfi -
4420 sizeof(struct fileIdentDesc);
4423 + if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
4426 offset = fibh->soffset + sizeof(struct fileIdentDesc);
4430 - if (offset + liu < 0)
4431 + if (adinicb || (offset + liu < 0))
4432 memcpy((uint8_t *)sfi->impUse, impuse, liu);
4433 else if (offset >= 0)
4434 memcpy(fibh->ebh->b_data + offset, impuse, liu);
4439 - if (offset + lfi < 0)
4440 + if (adinicb || (offset + lfi < 0))
4441 memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
4442 else if (offset >= 0)
4443 memcpy(fibh->ebh->b_data + offset, fileident, lfi);
4448 - if (offset + padlen < 0)
4449 + if (adinicb || (offset + padlen < 0))
4450 memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
4451 else if (offset >= 0)
4452 memset(fibh->ebh->b_data + offset, 0x00, padlen);
4454 checksum += ((uint8_t *)&cfi->descTag)[i];
4456 cfi->descTag.tagChecksum = checksum;
4457 - if (sizeof(struct fileIdentDesc) <= -fibh->soffset)
4458 + if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset))
4459 memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc));
4462 @@ -132,9 +136,14 @@
4463 sizeof(struct fileIdentDesc) + fibh->soffset);
4466 - if (fibh->sbh != fibh->ebh)
4467 - mark_buffer_dirty_inode(fibh->ebh, inode);
4468 - mark_buffer_dirty_inode(fibh->sbh, inode);
4470 + mark_inode_dirty(inode);
4473 + if (fibh->sbh != fibh->ebh)
4474 + mark_buffer_dirty_inode(fibh->ebh, inode);
4475 + mark_buffer_dirty_inode(fibh->sbh, inode);
4481 f_pos = (udf_ext0_offset(dir) >> 2);
4483 fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
4484 - if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4485 + if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4486 + fibh->sbh = fibh->ebh = NULL;
4487 + else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4488 &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
4490 offset >>= dir->i_sb->s_blocksize_bits;
4491 @@ -175,6 +186,12 @@
4496 + if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
4498 + udf_release_data(bh);
4504 @@ -182,12 +199,6 @@
4508 - if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
4510 - udf_release_data(bh);
4514 while ( (f_pos < size) )
4516 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
4518 f_pos = (udf_ext0_offset(dir) >> 2);
4520 fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
4521 - if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4522 + if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4523 + fibh->sbh = fibh->ebh = NULL;
4524 + else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4525 &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
4527 offset >>= dir->i_sb->s_blocksize_bits;
4528 @@ -409,94 +422,89 @@
4534 block = UDF_I_LOCATION(dir).logicalBlockNum;
4536 - while ( (f_pos < size) )
4541 + block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
4542 + fibh->sbh = fibh->ebh = NULL;
4543 + fibh->soffset = fibh->eoffset = sb->s_blocksize;
4547 + while ( (f_pos < size) )
4549 + fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
4553 - fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
4557 - if (fibh->sbh != fibh->ebh)
4558 - udf_release_data(fibh->ebh);
4559 - udf_release_data(fibh->sbh);
4560 - udf_release_data(bh);
4565 - liu = le16_to_cpu(cfi->lengthOfImpUse);
4566 - lfi = cfi->lengthFileIdent;
4568 - if (fibh->sbh == fibh->ebh)
4569 - nameptr = fi->fileIdent + liu;
4570 + if (fibh->sbh != fibh->ebh)
4571 + udf_release_data(fibh->ebh);
4572 + udf_release_data(fibh->sbh);
4573 + udf_release_data(bh);
4578 + liu = le16_to_cpu(cfi->lengthOfImpUse);
4579 + lfi = cfi->lengthFileIdent;
4581 + if (fibh->sbh == fibh->ebh)
4582 + nameptr = fi->fileIdent + liu;
4585 + int poffset; /* Unpaded ending offset */
4587 + poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
4589 + if (poffset >= lfi)
4590 + nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
4593 - int poffset; /* Unpaded ending offset */
4595 - poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
4597 - if (poffset >= lfi)
4598 - nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
4602 - memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
4603 - memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
4606 + memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
4607 + memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
4610 - if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
4613 + if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
4615 + if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
4617 - if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
4618 + udf_release_data(bh);
4619 + cfi->descTag.tagSerialNum = cpu_to_le16(1);
4620 + cfi->fileVersionNum = cpu_to_le16(1);
4621 + cfi->fileCharacteristics = 0;
4622 + cfi->lengthFileIdent = namelen;
4623 + cfi->lengthOfImpUse = cpu_to_le16(0);
4624 + if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
4628 - udf_release_data(bh);
4629 - cfi->descTag.tagSerialNum = cpu_to_le16(1);
4630 - cfi->fileVersionNum = cpu_to_le16(1);
4631 - cfi->fileCharacteristics = 0;
4632 - cfi->lengthFileIdent = namelen;
4633 - cfi->lengthOfImpUse = cpu_to_le16(0);
4634 - if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
4647 - if (!lfi || !dentry)
4649 + if (!lfi || !dentry)
4652 - if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
4653 - udf_match(flen, fname, &(dentry->d_name)))
4655 - if (fibh->sbh != fibh->ebh)
4656 - udf_release_data(fibh->ebh);
4657 - udf_release_data(fibh->sbh);
4658 - udf_release_data(bh);
4666 - block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
4667 - if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4668 + if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
4669 + udf_match(flen, fname, &(dentry->d_name)))
4671 - fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
4672 - fibh->soffset = fibh->eoffset = udf_file_entry_alloc_offset(dir);
4676 - fibh->sbh = fibh->ebh = NULL;
4677 - fibh->soffset = fibh->eoffset = sb->s_blocksize;
4678 + if (fibh->sbh != fibh->ebh)
4679 + udf_release_data(fibh->ebh);
4680 + udf_release_data(fibh->sbh);
4681 + udf_release_data(bh);
4690 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB &&
4691 @@ -533,13 +541,17 @@
4692 fibh->sbh = fibh->ebh;
4695 - if (UDF_I_ALLOCTYPE(dir) != ICBTAG_FLAG_AD_IN_ICB)
4696 + if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4698 + block = UDF_I_LOCATION(dir).logicalBlockNum;
4699 + fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset - udf_ext0_offset(dir));
4703 block = eloc.logicalBlockNum + ((elen - 1) >>
4704 dir->i_sb->s_blocksize_bits);
4706 - block = UDF_I_LOCATION(dir).logicalBlockNum;
4708 - fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
4709 + fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
4714 @@ -784,7 +796,10 @@
4715 f_pos = (udf_ext0_offset(dir) >> 2);
4717 fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
4718 - if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4720 + if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
4721 + fibh.sbh = fibh.ebh = NULL;
4722 + else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
4723 &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
4725 offset >>= dir->i_sb->s_blocksize_bits;
4726 @@ -798,6 +813,12 @@
4731 + if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
4733 + udf_release_data(bh);
4743 - if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
4746 while ( (f_pos < size) )
4750 if (cfi.lengthFileIdent && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0)
4752 + if (fibh.sbh != fibh.ebh)
4753 + udf_release_data(fibh.ebh);
4754 + udf_release_data(fibh.sbh);
4755 udf_release_data(bh);
4758 diff -u -r -N ../../linus/2.4/linux/fs/udf/super.c linux/fs/udf/super.c
4759 --- ../../linus/2.4/linux/fs/udf/super.c Tue Aug 6 21:16:21 2002
4760 +++ linux/fs/udf/super.c Thu Aug 8 20:44:32 2002
4762 write_inode: udf_write_inode,
4763 put_inode: udf_put_inode,
4764 delete_inode: udf_delete_inode,
4765 + clear_inode: udf_clear_inode,
4766 put_super: udf_put_super,
4767 write_super: udf_write_super,
4769 @@ -313,10 +314,6 @@
4770 UDF_SB(sb)->s_gid = uopt.gid;
4771 UDF_SB(sb)->s_umask = uopt.umask;
4774 - *flags |= MS_RDONLY;
4777 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
4779 if (*flags & MS_RDONLY)
4780 @@ -1373,10 +1370,6 @@
4782 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
4785 - sb->s_flags |= MS_RDONLY;
4788 if (!udf_parse_options((char *)options, &uopt))
4791 @@ -1488,8 +1481,8 @@
4794 udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb), 0);
4795 - udf_info("UDF %s-%s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
4796 - UDFFS_VERSION, UDFFS_RW ? "rw" : "ro", UDFFS_DATE,
4797 + udf_info("UDF %s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
4798 + UDFFS_VERSION, UDFFS_DATE,
4799 UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute,
4800 ts.typeAndTimezone);
4802 diff -u -r -N ../../linus/2.4/linux/fs/udf/symlink.c linux/fs/udf/symlink.c
4803 --- ../../linus/2.4/linux/fs/udf/symlink.c Tue Aug 6 21:16:21 2002
4804 +++ linux/fs/udf/symlink.c Thu Aug 8 20:44:32 2002
4808 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
4810 - bh = udf_tread(inode->i_sb, inode->i_ino);
4815 - symlink = bh->b_data + udf_file_entry_alloc_offset(inode);
4817 + symlink = UDF_I_DATA(inode) + UDF_I_LENALLOC(inode);
4820 bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
4821 diff -u -r -N ../../linus/2.4/linux/fs/udf/truncate.c linux/fs/udf/truncate.c
4822 --- ../../linus/2.4/linux/fs/udf/truncate.c Tue Aug 6 21:16:21 2002
4823 +++ linux/fs/udf/truncate.c Thu Aug 8 20:44:32 2002
4825 if (last_block - first_block > 0)
4827 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
4829 mark_inode_dirty(inode);
4832 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
4833 udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block);
4836 lenalloc = extoffset - adsize;
4838 - if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4840 lenalloc -= udf_file_entry_alloc_offset(inode);
4842 lenalloc -= sizeof(struct allocExtDesc);
4843 @@ -107,15 +109,15 @@
4847 - if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4848 - memset(bh->b_data, 0x00, udf_file_entry_alloc_offset(inode));
4852 memset(bh->b_data, 0x00, sizeof(struct allocExtDesc));
4853 udf_free_blocks(inode->i_sb, inode, bloc, 0, lelen);
4857 - if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4860 UDF_I_LENALLOC(inode) = lenalloc;
4861 mark_inode_dirty(inode);
4865 udf_release_data(bh);
4868 + extoffset = sizeof(struct allocExtDesc);
4870 + bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, bloc, 0));
4872 lelen = (elen + inode->i_sb->s_blocksize - 1) >>
4873 inode->i_sb->s_blocksize_bits;
4874 @@ -152,15 +154,15 @@
4878 - if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4879 - memset(bh->b_data, 0x00, udf_file_entry_alloc_offset(inode));
4883 memset(bh->b_data, 0x00, sizeof(struct allocExtDesc));
4884 udf_free_blocks(inode->i_sb, inode, bloc, 0, lelen);
4888 - if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
4891 UDF_I_LENALLOC(inode) = lenalloc;
4892 mark_inode_dirty(inode);
4893 diff -u -r -N ../../linus/2.4/linux/fs/udf/udf_i.h linux/fs/udf/udf_i.h
4894 --- ../../linus/2.4/linux/fs/udf/udf_i.h Tue Aug 6 21:16:21 2002
4895 +++ linux/fs/udf/udf_i.h Thu Aug 8 20:44:32 2002
4897 #define UDF_I_LENEXTENTS(X) ( UDF_I(X)->i_lenExtents )
4898 #define UDF_I_UNIQUE(X) ( UDF_I(X)->i_unique )
4899 #define UDF_I_ALLOCTYPE(X) ( UDF_I(X)->i_alloc_type )
4900 -#define UDF_I_EXTENDED_FE(X) ( UDF_I(X)->i_extended_fe )
4901 -#define UDF_I_STRAT4096(X) ( UDF_I(X)->i_strat_4096 )
4902 -#define UDF_I_NEW_INODE(X) ( UDF_I(X)->i_new_inode )
4903 +#define UDF_I_EFE(X) ( UDF_I(X)->i_efe )
4904 +#define UDF_I_USE(X) ( UDF_I(X)->i_use )
4905 +#define UDF_I_STRAT4096(X) ( UDF_I(X)->i_strat4096 )
4906 #define UDF_I_NEXT_ALLOC_BLOCK(X) ( UDF_I(X)->i_next_alloc_block )
4907 #define UDF_I_NEXT_ALLOC_GOAL(X) ( UDF_I(X)->i_next_alloc_goal )
4908 #define UDF_I_UMTIME(X) ( UDF_I(X)->i_umtime )
4909 #define UDF_I_UCTIME(X) ( UDF_I(X)->i_uctime )
4910 #define UDF_I_CRTIME(X) ( UDF_I(X)->i_crtime )
4911 #define UDF_I_UCRTIME(X) ( UDF_I(X)->i_ucrtime )
4912 +#define UDF_I_SAD(X) ( UDF_I(X)->i_ext.i_sad )
4913 +#define UDF_I_LAD(X) ( UDF_I(X)->i_ext.i_lad )
4914 +#define UDF_I_DATA(X) ( UDF_I(X)->i_ext.i_data )
4916 #endif /* !defined(_LINUX_UDF_I_H) */
4917 diff -u -r -N ../../linus/2.4/linux/fs/udf/udfdecl.h linux/fs/udf/udfdecl.h
4918 --- ../../linus/2.4/linux/fs/udf/udfdecl.h Tue Aug 6 21:16:21 2002
4919 +++ linux/fs/udf/udfdecl.h Thu Aug 8 20:44:32 2002
4921 #define CURRENT_UTIME (xtime.tv_usec)
4923 #define udf_file_entry_alloc_offset(inode)\
4924 - ((UDF_I_EXTENDED_FE(inode) ?\
4925 - sizeof(struct extendedFileEntry) :\
4926 - sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode))
4927 + (UDF_I_USE(inode) ?\
4928 + sizeof(struct unallocSpaceEntry) :\
4929 + ((UDF_I_EFE(inode) ?\
4930 + sizeof(struct extendedFileEntry) :\
4931 + sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode)))
4933 #define udf_ext0_offset(inode)\
4934 (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ?\
4936 extern void udf_read_inode(struct inode *);
4937 extern void udf_put_inode(struct inode *);
4938 extern void udf_delete_inode(struct inode *);
4939 +extern void udf_clear_inode(struct inode *);
4940 extern void udf_write_inode(struct inode *, int);
4941 extern long udf_block_map(struct inode *, long);
4942 extern int8_t inode_bmap(struct inode *, int, lb_addr *, uint32_t *, lb_addr *, uint32_t *, uint32_t *, struct buffer_head **);
4945 extern struct fileIdentDesc * udf_get_fileident(void * buffer, int bufsize, int * offset);
4946 extern extent_ad * udf_get_fileextent(void * buffer, int bufsize, int * offset);
4947 -extern long_ad * udf_get_filelongad(void * buffer, int bufsize, int * offset, int);
4948 -extern short_ad * udf_get_fileshortad(void * buffer, int bufsize, int * offset, int);
4949 +extern long_ad * udf_get_filelongad(uint8_t *, int, int *, int);
4950 +extern short_ad * udf_get_fileshortad(uint8_t *, int, int *, int);
4951 extern uint8_t * udf_get_filead(struct fileEntry *, uint8_t *, int, int, int, int *);
4953 #endif /* __UDF_DECL_H */
4954 diff -u -r -N ../../linus/2.4/linux/include/linux/cdrom.h linux/include/linux/cdrom.h
4955 --- ../../linus/2.4/linux/include/linux/cdrom.h Tue Aug 6 21:17:10 2002
4956 +++ linux/include/linux/cdrom.h Tue Aug 6 21:24:33 2002
4958 /* Mode page codes for mode sense/set */
4959 #define GPMODE_R_W_ERROR_PAGE 0x01
4960 #define GPMODE_WRITE_PARMS_PAGE 0x05
4961 +#define GPMODE_WCACHING_PAGE 0x08
4962 #define GPMODE_AUDIO_CTL_PAGE 0x0e
4963 #define GPMODE_POWER_PAGE 0x1a
4964 #define GPMODE_FAULT_FAIL_PAGE 0x1c
4965 @@ -504,6 +505,11 @@
4966 * of MODE_SENSE_POWER_PAGE */
4967 #define GPMODE_CDROM_PAGE 0x0d
4969 +#define GPMODE_PAGE_CURRENT 0
4970 +#define GPMODE_PAGE_CHANGE 1
4971 +#define GPMODE_PAGE_DEFAULT 2
4972 +#define GPMODE_PAGE_SAVE 3
4976 /* DVD struct types */
4977 diff -u -r -N ../../linus/2.4/linux/include/linux/fs.h linux/include/linux/fs.h
4978 --- ../../linus/2.4/linux/include/linux/fs.h Tue Aug 6 21:17:11 2002
4979 +++ linux/include/linux/fs.h Tue Aug 6 21:24:33 2002
4981 int (*remount_fs) (struct super_block *, int *, char *);
4982 void (*clear_inode) (struct inode *);
4983 void (*umount_begin) (struct super_block *);
4984 + int (*relocate_blocks) (struct super_block *, unsigned long, unsigned long *);
4986 /* Following are for knfsd to interact with "interesting" filesystems
4987 * Currently just reiserfs, but possibly FAT and others later
4988 diff -u -r -N ../../linus/2.4/linux/include/linux/major.h linux/include/linux/major.h
4989 --- ../../linus/2.4/linux/include/linux/major.h Tue Aug 6 21:17:15 2002
4990 +++ linux/include/linux/major.h Tue Aug 6 21:24:45 2002
4992 #define SPECIALIX_NORMAL_MAJOR 75
4993 #define SPECIALIX_CALLOUT_MAJOR 76
4995 +#define PACKET_MAJOR 97
4997 #define COMPAQ_CISS_MAJOR 104
4998 #define COMPAQ_CISS_MAJOR1 105
4999 #define COMPAQ_CISS_MAJOR2 106
5000 diff -u -r -N ../../linus/2.4/linux/include/linux/pktcdvd.h linux/include/linux/pktcdvd.h
5001 --- ../../linus/2.4/linux/include/linux/pktcdvd.h Thu Jan 1 01:00:00 1970
5002 +++ linux/include/linux/pktcdvd.h Wed Aug 7 21:55:25 2002
5005 + * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
5007 + * May be copied or modified under the terms of the GNU General Public
5008 + * License. See linux/COPYING for more information.
5010 + * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
5014 +#ifndef __PKTCDVD_H
5015 +#define __PKTCDVD_H
5018 + * 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
5020 +#define PACKET_DEBUG 1
5022 +#define MAX_WRITERS 8
5024 +#define STACKED_BH_POOL_SIZE 64
5027 + * use drive write caching -- we need deferred error handling to be
5028 + * able to sucessfully recover with this option (drive will return good
5029 + * status as soon as the cdb is validated).
5031 +#if defined(CONFIG_CDROM_PKTCDVD_WCACHE)
5032 +#warning Enabling write caching, use at your own risk
5033 +#define USE_WCACHING 1
5035 +#define USE_WCACHING 0
5039 + * No user-servicable parts beyond this point ->
5043 +#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
5045 +#define DPRINTK(fmt, args...)
5048 +#if PACKET_DEBUG > 1
5049 +#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
5051 +#define VPRINTK(fmt, args...)
5054 +#define PKT_BUF_LIST 0x89
5059 +#define PACKET_CDR 1
5060 +#define PACKET_CDRW 2
5061 +#define PACKET_DVDR 3
5062 +#define PACKET_DVDRW 4
5067 +#define PACKET_WRITEABLE 1 /* pd is writeable */
5068 +#define PACKET_NWA_VALID 2 /* next writeable address valid */
5069 +#define PACKET_LRA_VALID 3 /* last recorded address valid */
5070 +#define PACKET_READONLY 4 /* read only pd */
5071 +#define PACKET_RECOVERY 5 /* rq recovery in progress */
5072 +#define PACKET_RQ 6 /* current rq is set */
5073 +#define PACKET_BUSY 7 /* current rq is being processed */
5076 + * Disc status -- from READ_DISC_INFO
5078 +#define PACKET_DISC_EMPTY 0
5079 +#define PACKET_DISC_INCOMPLETE 1
5080 +#define PACKET_DISC_COMPLETE 2
5081 +#define PACKET_DISC_OTHER 3
5084 + * write type, and corresponding data block type
5086 +#define PACKET_MODE1 1
5087 +#define PACKET_MODE2 2
5088 +#define PACKET_BLOCK_MODE1 8
5089 +#define PACKET_BLOCK_MODE2 10
5092 + * Last session/border status
5094 +#define PACKET_SESSION_EMPTY 0
5095 +#define PACKET_SESSION_INCOMPLETE 1
5096 +#define PACKET_SESSION_RESERVED 2
5097 +#define PACKET_SESSION_COMPLETE 3
5099 +#define PACKET_MCN "4a656e734178626f65323030300000"
5101 +#undef PACKET_USE_LS
5104 + * special requests
5106 +#define PKT_THROTTLE_SPEED 1
5108 +#define PKT_TRAY_UNLOCK 0
5109 +#define PKT_TRAY_LOCK 1
5112 + * Very crude stats for now
5114 +struct packet_stats
5116 + unsigned long bh_s;
5117 + unsigned long bh_e;
5118 + unsigned long bh_cache_hits;
5119 + unsigned long page_cache_hits;
5120 + unsigned long secs_w;
5121 + unsigned long secs_r;
5127 +#define PACKET_IOCTL_MAGIC ('X')
5128 +#define PACKET_GET_STATS _IOR(PACKET_IOCTL_MAGIC, 0, struct packet_stats)
5129 +#define PACKET_SETUP_DEV _IOW(PACKET_IOCTL_MAGIC, 1, unsigned int)
5130 +#define PACKET_TEARDOWN_DEV _IOW(PACKET_IOCTL_MAGIC, 2, unsigned int)
5133 +#include <linux/blkdev.h>
5134 +#include <linux/completion.h>
5136 +struct packet_settings
5138 + __u8 size; /* packet size in frames */
5139 + __u8 fp; /* fixed packets */
5140 + __u8 link_loss; /* the rest is specified
5141 + * as per Mt Fuji */
5149 + struct buffer_head *bhlist; /* string of bhs */
5151 + merge_request_fn *front_merge_fn;
5152 + merge_request_fn *back_merge_fn;
5153 + merge_requests_fn *merge_requests_fn;
5154 + request_queue_t r_queue;
5157 + struct completion thr_compl;
5160 +struct pktcdvd_device
5162 + struct block_device *bdev;
5163 + kdev_t dev; /* dev attached */
5164 + kdev_t pkt_dev; /* our dev */
5166 + struct cdrom_device_info *cdi; /* cdrom matching dev */
5167 + struct packet_settings settings;
5168 + struct packet_stats stats;
5170 + __u8 speed; /* cur write speed */
5171 + unsigned long offset; /* start offset */
5172 + __u8 mode_offset; /* 0 / 8 */
5174 + unsigned long flags;
5176 + __u8 track_status; /* last one */
5177 + __u32 nwa; /* next writable address */
5178 + __u32 lra; /* last recorded address */
5180 + struct packet_cdrw cdrw;
5181 + wait_queue_head_t wqueue;
5182 + struct request *rq;
5184 + struct buffer_head *stacked_bhlist;
5185 + int stacked_bhcnt;
5187 + struct semaphore cache_sync_mutex;
5188 + int unflushed_writes;
5190 + make_request_fn *make_request_fn;
5194 + * following possibly belongs in cdrom.h
5197 +struct cdvd_capacity
5200 + __u32 block_length;
5203 +void pkt_elevator_merge_req(struct request *rq, struct request *nxt) {}
5204 +void pkt_elevator_cleanup(request_queue_t *q, struct request *rq, int count) {}
5206 +#define ELEVATOR_PKTCDVD \
5208 + 0, /* not used */ \
5209 + 0, /* not used */ \
5211 + pkt_elevator_merge, /* elevator_merge_fn */ \
5212 + pkt_elevator_cleanup, \
5213 + pkt_elevator_merge_req, \
5216 +#endif /* __KERNEL__ */
5218 +#endif /* __PKTCDVD_H */
5219 diff -u -r -N ../../linus/2.4/linux/include/linux/udf_fs.h linux/include/linux/udf_fs.h
5220 --- ../../linus/2.4/linux/include/linux/udf_fs.h Tue Aug 6 21:17:20 2002
5221 +++ linux/include/linux/udf_fs.h Thu Aug 8 20:44:32 2002
5226 -#include <linux/config.h>
5231 #define UDF_PREALLOCATE
5232 #define UDF_DEFAULT_PREALLOC_BLOCKS 8
5234 -#define UDFFS_DATE "2002/03/11"
5235 +#define UDFFS_DATE "2002/03/14"
5236 #define UDFFS_VERSION "0.9.6"
5238 -#if !defined(UDFFS_RW)
5240 -#if defined(CONFIG_UDF_RW)
5242 -#else /* !defined(CONFIG_UDF_RW) */
5244 -#endif /* defined(CONFIG_UDF_RW) */
5246 -#endif /* !defined(UDFFS_RW) */
5252 #define udf_info(f, a...) \
5253 printk (KERN_INFO "UDF-fs INFO " f, ##a);
5257 +#ifndef LINUX_VERSION_CODE
5258 +#include <linux/version.h>
5261 +#endif /* __KERNEL__ */
5263 #endif /* _UDF_FS_H */
5264 diff -u -r -N ../../linus/2.4/linux/include/linux/udf_fs_i.h linux/include/linux/udf_fs_i.h
5265 --- ../../linus/2.4/linux/include/linux/udf_fs_i.h Tue Aug 6 21:17:20 2002
5266 +++ linux/include/linux/udf_fs_i.h Thu Aug 8 20:44:32 2002
5271 - __u32 logicalBlockNum;
5272 - __u16 partitionReferenceNum;
5273 + __u32 logicalBlockNum;
5274 + __u16 partitionReferenceNum;
5275 } __attribute__ ((packed)) lb_addr;
5280 + __u32 extPosition;
5281 +} __attribute__ ((packed)) short_ad;
5286 + lb_addr extLocation;
5288 +} __attribute__ ((packed)) long_ad;
5291 struct udf_inode_info
5301 /* Physical address of inode */
5302 - lb_addr i_location;
5306 - __u64 i_lenExtents;
5307 - __u32 i_next_alloc_block;
5308 - __u32 i_next_alloc_goal;
5309 - unsigned i_alloc_type : 3;
5310 - unsigned i_extended_fe : 1;
5311 - unsigned i_strat_4096 : 1;
5312 - unsigned i_new_inode : 1;
5313 - unsigned reserved : 26;
5314 + lb_addr i_location;
5318 + __u64 i_lenExtents;
5319 + __u32 i_next_alloc_block;
5320 + __u32 i_next_alloc_goal;
5321 + unsigned i_alloc_type : 3;
5322 + unsigned i_efe : 1;
5323 + unsigned i_use : 1;
5324 + unsigned i_strat4096 : 1;
5325 + unsigned reserved : 26;