]> git.pld-linux.org Git - packages/kernel.git/blame - raw-2.2.17.diff
run depmod before geninitrd and rc-boot
[packages/kernel.git] / raw-2.2.17.diff
CommitLineData
dee792c4 1--- linux-2.2.17.raw/drivers/char/Makefile.~1~ Mon Sep 4 18:39:17 2000
2+++ linux-2.2.17.raw/drivers/char/Makefile Wed Oct 4 18:49:06 2000
3@@ -20,7 +20,7 @@
4
5 L_TARGET := char.a
6 M_OBJS :=
7-L_OBJS := tty_io.o n_tty.o tty_ioctl.o mem.o random.o
8+L_OBJS := tty_io.o n_tty.o tty_ioctl.o mem.o random.o raw.o
9 LX_OBJS := pty.o misc.o
10
11 ifdef CONFIG_VT
12--- linux-2.2.17.raw/drivers/char/mem.c.~1~ Tue Jan 4 18:12:14 2000
13+++ linux-2.2.17.raw/drivers/char/mem.c Wed Oct 4 18:49:06 2000
14@@ -17,6 +17,7 @@
15 #include <linux/joystick.h>
16 #include <linux/i2c.h>
17 #include <linux/capability.h>
18+#include <linux/raw.h>
19
20 #include <asm/uaccess.h>
21 #include <asm/io.h>
22@@ -620,6 +621,7 @@
23 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
24 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
25 rand_initialize();
26+ raw_init();
27 #ifdef CONFIG_USB
28 #ifdef CONFIG_USB_UHCI
29 uhci_init();
30--- linux-2.2.17.raw/drivers/char/raw.c.~1~ Wed Oct 4 18:49:06 2000
31+++ linux-2.2.17.raw/drivers/char/raw.c Wed Oct 4 18:51:58 2000
32@@ -0,0 +1,387 @@
33+/*
34+ * linux/drivers/char/raw.c
35+ *
36+ * Front-end raw character devices. These can be bound to any block
37+ * devices to provide genuine Unix raw character device semantics.
38+ *
39+ * We reserve minor number 0 for a control interface. ioctl()s on this
40+ * device are used to bind the other minor numbers to block devices.
41+ */
42+
43+#include <linux/fs.h>
44+#include <linux/iobuf.h>
45+#include <linux/major.h>
46+#include <linux/blkdev.h>
47+#include <linux/raw.h>
48+#include <asm/uaccess.h>
49+
50+#define dprintk(x...)
51+
52+static kdev_t raw_device_bindings[256] = {};
53+static int raw_device_inuse[256] = {};
54+static int raw_device_sector_size[256] = {};
55+static int raw_device_sector_bits[256] = {};
56+
57+extern struct file_operations * get_blkfops(unsigned int major);
58+
59+static ssize_t rw_raw_dev(int rw, struct file *, char *, size_t, loff_t *);
60+
61+ssize_t raw_read(struct file *, char *, size_t, loff_t *);
62+ssize_t raw_write(struct file *, const char *, size_t, loff_t *);
63+int raw_open(struct inode *, struct file *);
64+int raw_release(struct inode *, struct file *);
65+int raw_ctl_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
66+
67+
68+static struct file_operations raw_fops = {
69+ NULL, /* llseek */
70+ raw_read, /* read */
71+ raw_write, /* write */
72+ NULL, /* readdir */
73+ NULL, /* poll */
74+ NULL, /* ioctl */
75+ NULL, /* mmap */
76+ raw_open, /* open */
77+ NULL, /* flush */
78+ raw_release, /* release */
79+ NULL /* fsync */
80+};
81+
82+static struct file_operations raw_ctl_fops = {
83+ NULL, /* llseek */
84+ NULL, /* read */
85+ NULL, /* write */
86+ NULL, /* readdir */
87+ NULL, /* poll */
88+ raw_ctl_ioctl, /* ioctl */
89+ NULL, /* mmap */
90+ raw_open, /* open */
91+ NULL, /* flush */
92+ NULL, /* no special release code */
93+ NULL /* fsync */
94+};
95+
96+
97+
98+void __init raw_init(void)
99+{
100+ register_chrdev(RAW_MAJOR, "raw", &raw_fops);
101+}
102+
103+
104+/*
105+ * The raw IO open and release code needs to fake appropriate
106+ * open/release calls to the underlying block devices.
107+ */
108+
109+static int bdev_open(kdev_t dev, int mode)
110+{
111+ int err = 0;
112+ struct file dummy_file = {};
113+ struct dentry dummy_dentry = {};
114+ struct inode * inode = get_empty_inode();
115+
116+ if (!inode)
117+ return -ENOMEM;
118+
119+ dummy_file.f_op = get_blkfops(MAJOR(dev));
120+ if (!dummy_file.f_op) {
121+ err = -ENODEV;
122+ goto done;
123+ }
124+
125+ if (dummy_file.f_op->open) {
126+ inode->i_rdev = dev;
127+ dummy_dentry.d_inode = inode;
128+ dummy_file.f_dentry = &dummy_dentry;
129+ dummy_file.f_mode = mode;
130+ err = dummy_file.f_op->open(inode, &dummy_file);
131+ }
132+
133+ done:
134+ iput(inode);
135+ return err;
136+}
137+
138+static int bdev_close(kdev_t dev)
139+{
140+ int err;
141+ struct inode * inode = get_empty_inode();
142+
143+ if (!inode)
144+ return -ENOMEM;
145+
146+ inode->i_rdev = dev;
147+ err = blkdev_release(inode);
148+ iput(inode);
149+ return err;
150+}
151+
152+
153+
154+/*
155+ * Open/close code for raw IO.
156+ */
157+
158+int raw_open(struct inode *inode, struct file *filp)
159+{
160+ int minor;
161+ kdev_t bdev;
162+ int err;
163+ int sector_size;
164+ int sector_bits;
165+
166+ minor = MINOR(inode->i_rdev);
167+
168+ /*
169+ * Is it the control device?
170+ */
171+
172+ if (minor == 0) {
173+ filp->f_op = &raw_ctl_fops;
174+ return 0;
175+ }
176+
177+ /*
178+ * No, it is a normal raw device. All we need to do on open is
179+ * to check that the device is bound, and force the underlying
180+ * block device to a sector-size blocksize.
181+ */
182+
183+ bdev = raw_device_bindings[minor];
184+ if (bdev == NODEV)
185+ return -ENODEV;
186+
187+ err = bdev_open(bdev, filp->f_mode);
188+ if (err)
189+ return err;
190+
191+ /*
192+ * Don't change the blocksize if we already have users using
193+ * this device
194+ */
195+
196+ if (raw_device_inuse[minor]++)
197+ return 0;
198+
199+ /*
200+ * Don't interfere with mounted devices: we cannot safely set
201+ * the blocksize on a device which is already mounted.
202+ */
203+
204+ sector_size = 512;
205+ if (lookup_vfsmnt(bdev) != NULL) {
206+ if (blksize_size[MAJOR(bdev)])
207+ sector_size = blksize_size[MAJOR(bdev)][MINOR(bdev)];
208+ } else {
209+ if (hardsect_size[MAJOR(bdev)])
210+ sector_size = hardsect_size[MAJOR(bdev)][MINOR(bdev)];
211+ }
212+
213+ set_blocksize(bdev, sector_size);
214+ raw_device_sector_size[minor] = sector_size;
215+
216+ for (sector_bits = 0; !(sector_size & 1); )
217+ sector_size>>=1, sector_bits++;
218+ raw_device_sector_bits[minor] = sector_bits;
219+
220+ return 0;
221+}
222+
223+int raw_release(struct inode *inode, struct file *filp)
224+{
225+ int minor;
226+ kdev_t bdev;
227+
228+ minor = MINOR(inode->i_rdev);
229+ bdev = raw_device_bindings[minor];
230+ bdev_close(bdev);
231+ raw_device_inuse[minor]--;
232+ return 0;
233+}
234+
235+
236+
237+/*
238+ * Deal with ioctls against the raw-device control interface, to bind
239+ * and unbind other raw devices.
240+ */
241+
242+int raw_ctl_ioctl(struct inode *inode,
243+ struct file *flip,
244+ unsigned int command,
245+ unsigned long arg)
246+{
247+ struct raw_config_request rq;
248+ int err = 0;
249+ int minor;
250+
251+ switch (command) {
252+ case RAW_SETBIND:
253+ case RAW_GETBIND:
254+
255+ /* First, find out which raw minor we want */
256+
257+ err = copy_from_user(&rq, (void *) arg, sizeof(rq));
258+ if (err)
259+ break;
260+
261+ minor = rq.raw_minor;
262+ if (minor == 0 || minor > MINORMASK) {
263+ err = -EINVAL;
264+ break;
265+ }
266+
267+ if (command == RAW_SETBIND) {
268+ /*
269+ * For now, we don't need to check that the underlying
270+ * block device is present or not: we can do that when
271+ * the raw device is opened. Just check that the
272+ * major/minor numbers make sense.
273+ */
274+
275+ if (rq.block_major == NODEV ||
276+ rq.block_major > MAX_BLKDEV ||
277+ rq.block_minor > MINORMASK) {
278+ err = -EINVAL;
279+ break;
280+ }
281+
282+ if (raw_device_inuse[minor]) {
283+ err = -EBUSY;
284+ break;
285+ }
286+ raw_device_bindings[minor] =
287+ MKDEV(rq.block_major, rq.block_minor);
288+ } else {
289+ rq.block_major = MAJOR(raw_device_bindings[minor]);
290+ rq.block_minor = MINOR(raw_device_bindings[minor]);
291+ err = copy_to_user((void *) arg, &rq, sizeof(rq));
292+ }
293+ break;
294+
295+ default:
296+ err = -EINVAL;
297+ }
298+
299+ return err;
300+}
301+
302+
303+
304+ssize_t raw_read(struct file *filp, char * buf,
305+ size_t size, loff_t *offp)
306+{
307+ return rw_raw_dev(READ, filp, buf, size, offp);
308+}
309+
310+ssize_t raw_write(struct file *filp, const char *buf,
311+ size_t size, loff_t *offp)
312+{
313+ return rw_raw_dev(WRITE, filp, (char *) buf, size, offp);
314+}
315+
316+#define SECTOR_BITS 9
317+#define SECTOR_SIZE (1U << SECTOR_BITS)
318+#define SECTOR_MASK (SECTOR_SIZE - 1)
319+
320+ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
321+ size_t size, loff_t *offp)
322+{
323+ struct kiobuf * iobuf;
324+ int err;
325+ unsigned long blocknr, blocks;
326+ unsigned long b[KIO_MAX_SECTORS];
327+ size_t transferred;
328+ int iosize;
329+ int i;
330+ int minor;
331+ kdev_t dev;
332+ unsigned long limit;
333+
334+ int sector_size, sector_bits, sector_mask;
335+ int max_sectors;
336+
337+ /*
338+ * First, a few checks on device size limits
339+ */
340+
341+ minor = MINOR(filp->f_dentry->d_inode->i_rdev);
342+ dev = raw_device_bindings[minor];
343+ sector_size = raw_device_sector_size[minor];
344+ sector_bits = raw_device_sector_bits[minor];
345+ sector_mask = sector_size- 1;
346+ max_sectors = KIO_MAX_SECTORS >> (sector_bits - 9);
347+
348+ if (blk_size[MAJOR(dev)])
349+ limit = (((loff_t) blk_size[MAJOR(dev)][MINOR(dev)]) << BLOCK_SIZE_BITS) >> sector_bits;
350+ else
351+ limit = INT_MAX;
352+ dprintk ("rw_raw_dev: dev %d:%d (+%d)\n",
353+ MAJOR(dev), MINOR(dev), limit);
354+
355+ if ((*offp & sector_mask) || (size & sector_mask))
356+ return -EINVAL;
357+ if ((*offp >> sector_bits) >= limit) {
358+ if (size)
359+ return -ENXIO;
360+ return 0;
361+ }
362+
363+ /*
364+ * We'll just use one kiobuf
365+ */
366+
367+ err = alloc_kiovec(1, &iobuf);
368+ if (err)
369+ return err;
370+
371+ /*
372+ * Split the IO into KIO_MAX_SECTORS chunks, mapping and
373+ * unmapping the single kiobuf as we go to perform each chunk of
374+ * IO.
375+ */
376+
377+ transferred = 0;
378+ blocknr = *offp >> sector_bits;
379+ while (size > 0) {
380+ blocks = size >> sector_bits;
381+ if (blocks > max_sectors)
382+ blocks = max_sectors;
383+ if (blocks > limit - blocknr)
384+ blocks = limit - blocknr;
385+ if (!blocks)
386+ break;
387+
388+ iosize = blocks << sector_bits;
389+
390+ err = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
391+ if (err)
392+ break;
393+
394+ for (i=0; i < blocks; i++)
395+ b[i] = blocknr++;
396+
397+ err = brw_kiovec(rw, 1, &iobuf, dev, b, sector_size, 0);
398+
399+ if (err >= 0) {
400+ transferred += err;
401+ size -= err;
402+ buf += err;
403+ }
404+
405+ unmap_kiobuf(iobuf);
406+
407+ if (err != iosize)
408+ break;
409+ }
410+
411+ free_kiovec(1, &iobuf);
412+
413+ if (transferred) {
414+ *offp += transferred;
415+ return transferred;
416+ }
417+
418+ return err;
419+}
420--- linux-2.2.17.raw/fs/Makefile.~1~ Thu Aug 26 01:29:49 1999
421+++ linux-2.2.17.raw/fs/Makefile Wed Oct 4 18:49:06 2000
422@@ -13,7 +13,7 @@
423 O_OBJS = open.o read_write.o devices.o file_table.o buffer.o \
424 super.o block_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
425 ioctl.o readdir.o select.o fifo.o locks.o filesystems.o \
426- dcache.o inode.o attr.o bad_inode.o file.o $(BINFMTS)
427+ dcache.o inode.o attr.o bad_inode.o file.o iobuf.o $(BINFMTS)
428
429 MOD_LIST_NAME := FS_MODULES
430 ALL_SUB_DIRS = coda minix ext2 fat msdos vfat proc isofs nfs umsdos ntfs \
431--- linux-2.2.17.raw/fs/buffer.c.~1~ Mon Sep 4 18:39:22 2000
432+++ linux-2.2.17.raw/fs/buffer.c Wed Oct 4 18:52:01 2000
433@@ -43,6 +43,7 @@
434 #include <linux/file.h>
435 #include <linux/init.h>
436 #include <linux/quotaops.h>
437+#include <linux/iobuf.h>
438
439 #include <asm/uaccess.h>
440 #include <asm/io.h>
441@@ -1259,6 +1260,183 @@
442 bad_count:
443 printk ("Whoops: end_buffer_io_async: b_count != 1 on async io.\n");
444 return;
445+}
446+
447+
448+/*
449+ * For brw_kiovec: submit a set of buffer_head temporary IOs and wait
450+ * for them to complete. Clean up the buffer_heads afterwards.
451+ */
452+
453+#define dprintk(x...)
454+
455+static int do_kio(int rw, int nr, struct buffer_head *bh[], int size)
456+{
457+ int iosize;
458+ int i;
459+ int err;
460+ struct buffer_head *tmp;
461+
462+ dprintk ("do_kio start\n");
463+
464+ ll_rw_block(rw, nr, bh);
465+ iosize = err = 0;
466+
467+ for (i = nr; --i >= 0; ) {
468+ tmp = bh[i];
469+ wait_on_buffer(tmp);
470+ if (!buffer_uptodate(tmp)) {
471+ err = -EIO;
472+ /* We are waiting on bh'es in reverse order so
473+ clearing iosize on error calculates the
474+ amount of IO before the first error. */
475+ iosize = 0;
476+ }
477+
478+ free_async_buffers(tmp);
479+ iosize += size;
480+ }
481+
482+ dprintk ("do_kio end %d %d\n", iosize, err);
483+
484+ if (iosize)
485+ return iosize;
486+ else
487+ return err;
488+}
489+
490+/*
491+ * Start I/O on a physical range of kernel memory, defined by a vector
492+ * of kiobuf structs (much like a user-space iovec list).
493+ *
494+ * The kiobuf must already be locked for IO. IO is submitted
495+ * asynchronously: you need to check page->locked, page->uptodate, and
496+ * maybe wait on page->wait.
497+ *
498+ * It is up to the caller to make sure that there are enough blocks
499+ * passed in to completely map the iobufs to disk.
500+ */
501+
502+int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
503+ kdev_t dev, unsigned long b[], int size, int bmap)
504+{
505+ int err;
506+ int length;
507+ int transferred;
508+ int i;
509+ int bufind;
510+ int pageind;
511+ int bhind;
512+ int offset;
513+ unsigned long blocknr;
514+ struct kiobuf * iobuf = NULL;
515+ unsigned long page;
516+ struct page * map;
517+ struct buffer_head *tmp, *bh[KIO_MAX_SECTORS];
518+
519+ /*
520+ * First, do some alignment and validity checks
521+ */
522+ for (i = 0; i < nr; i++) {
523+ iobuf = iovec[i];
524+ if ((iobuf->offset & (size-1)) ||
525+ (iobuf->length & (size-1)))
526+ return -EINVAL;
527+ if (!iobuf->locked)
528+ panic("brw_kiovec: iobuf not locked for I/O");
529+ if (!iobuf->nr_pages)
530+ panic("brw_kiovec: iobuf not initialised");
531+ }
532+
533+ /* DEBUG */
534+#if 0
535+ return iobuf->length;
536+#endif
537+ dprintk ("brw_kiovec: start\n");
538+
539+ /*
540+ * OK to walk down the iovec doing page IO on each page we find.
541+ */
542+ bufind = bhind = transferred = err = 0;
543+ for (i = 0; i < nr; i++) {
544+ iobuf = iovec[i];
545+ offset = iobuf->offset;
546+ length = iobuf->length;
547+ dprintk ("iobuf %d %d %d\n", offset, length, size);
548+
549+ for (pageind = 0; pageind < iobuf->nr_pages; pageind++) {
550+ page = iobuf->pagelist[pageind];
551+ map = iobuf->maplist[pageind];
552+
553+ while (length > 0) {
554+ blocknr = b[bufind++];
555+ tmp = get_unused_buffer_head(0);
556+ if (!tmp) {
557+ err = -ENOMEM;
558+ goto error;
559+ }
560+
561+ tmp->b_dev = B_FREE;
562+ tmp->b_size = size;
563+ tmp->b_data = (char *) (page + offset);
564+ tmp->b_this_page = tmp;
565+
566+ init_buffer(tmp, dev, blocknr,
567+ end_buffer_io_sync, NULL);
568+ if (rw == WRITE) {
569+ set_bit(BH_Uptodate, &tmp->b_state);
570+ set_bit(BH_Dirty, &tmp->b_state);
571+ }
572+
573+ dprintk ("buffer %d (%d) at %p\n",
574+ bhind, tmp->b_blocknr, tmp->b_data);
575+ bh[bhind++] = tmp;
576+ length -= size;
577+ offset += size;
578+
579+ /*
580+ * Start the IO if we have got too much or if
581+ * this is the end of the last iobuf
582+ */
583+ if (bhind >= KIO_MAX_SECTORS) {
584+ err = do_kio(rw, bhind, bh, size);
585+ if (err >= 0)
586+ transferred += err;
587+ else
588+ goto finished;
589+ bhind = 0;
590+ }
591+
592+ if (offset >= PAGE_SIZE) {
593+ offset = 0;
594+ break;
595+ }
596+ } /* End of block loop */
597+ } /* End of page loop */
598+ } /* End of iovec loop */
599+
600+ /* Is there any IO still left to submit? */
601+ if (bhind) {
602+ err = do_kio(rw, bhind, bh, size);
603+ if (err >= 0)
604+ transferred += err;
605+ else
606+ goto finished;
607+ }
608+
609+ finished:
610+ dprintk ("brw_kiovec: end (%d, %d)\n", transferred, err);
611+ if (transferred)
612+ return transferred;
613+ return err;
614+
615+ error:
616+ /* We got an error allocation the bh'es. Just free the current
617+ buffer_heads and exit. */
618+ for (i = 0; i < bhind; i++) {
619+ free_async_buffers(bh[i]);
620+ }
621+ goto finished;
622 }
623
624 /*
625--- linux-2.2.17.raw/fs/iobuf.c.~1~ Wed Oct 4 18:49:06 2000
626+++ linux-2.2.17.raw/fs/iobuf.c Wed Oct 4 18:49:06 2000
627@@ -0,0 +1,104 @@
628+/*
629+ * iobuf.c
630+ *
631+ * Keep track of the general-purpose IO-buffer structures used to track
632+ * abstract kernel-space io buffers.
633+ *
634+ */
635+
636+#include <linux/iobuf.h>
637+#include <linux/malloc.h>
638+#include <linux/slab.h>
639+
640+static kmem_cache_t *kiobuf_cachep;
641+
642+void __init kiobuf_init(void)
643+{
644+ kiobuf_cachep = kmem_cache_create("kiobuf",
645+ sizeof(struct kiobuf),
646+ 0,
647+ SLAB_HWCACHE_ALIGN, NULL, NULL);
648+ if(!kiobuf_cachep)
649+ panic("Cannot create kernel iobuf cache\n");
650+}
651+
652+
653+int alloc_kiovec(int nr, struct kiobuf **bufp)
654+{
655+ int i;
656+ struct kiobuf *iobuf;
657+
658+ for (i = 0; i < nr; i++) {
659+ iobuf = kmem_cache_alloc(kiobuf_cachep, SLAB_KERNEL);
660+ if (!iobuf) {
661+ free_kiovec(i, bufp);
662+ return -ENOMEM;
663+ }
664+
665+ memset(iobuf, 0, sizeof(*iobuf));
666+ iobuf->array_len = KIO_STATIC_PAGES;
667+ iobuf->pagelist = iobuf->page_array;
668+ iobuf->maplist = iobuf->map_array;
669+ *bufp++ = iobuf;
670+ }
671+
672+ return 0;
673+}
674+
675+void free_kiovec(int nr, struct kiobuf **bufp)
676+{
677+ struct kiobuf *iobuf;
678+ int i;
679+
680+ for (i = 0; i < nr; i++) {
681+ iobuf = bufp[i];
682+ if (iobuf->array_len > KIO_STATIC_PAGES) {
683+ kfree (iobuf->pagelist);
684+ kfree (iobuf->maplist);
685+ }
686+ kmem_cache_free(kiobuf_cachep, bufp[i]);
687+ }
688+}
689+
690+int expand_kiobuf(struct kiobuf *iobuf, int wanted)
691+{
692+ unsigned long * pagelist;
693+ struct page ** maplist;
694+
695+ if (iobuf->array_len >= wanted)
696+ return 0;
697+
698+ pagelist = (unsigned long *)
699+ kmalloc(wanted * sizeof(unsigned long), GFP_KERNEL);
700+ if (!pagelist)
701+ return -ENOMEM;
702+
703+ maplist = (struct page **)
704+ kmalloc(wanted * sizeof(struct page **), GFP_KERNEL);
705+ if (!maplist) {
706+ kfree(pagelist);
707+ return -ENOMEM;
708+ }
709+
710+ /* Did it grow while we waited? */
711+ if (iobuf->array_len >= wanted) {
712+ kfree(pagelist);
713+ kfree(maplist);
714+ return 0;
715+ }
716+
717+ memcpy (pagelist, iobuf->pagelist, wanted * sizeof(unsigned long));
718+ memcpy (maplist, iobuf->maplist, wanted * sizeof(struct page **));
719+
720+ if (iobuf->array_len > KIO_STATIC_PAGES) {
721+ kfree (iobuf->pagelist);
722+ kfree (iobuf->maplist);
723+ }
724+
725+ iobuf->pagelist = pagelist;
726+ iobuf->maplist = maplist;
727+ iobuf->array_len = wanted;
728+ return 0;
729+}
730+
731+
732--- linux-2.2.17.raw/include/linux/iobuf.h.~1~ Wed Oct 4 18:49:06 2000
733+++ linux-2.2.17.raw/include/linux/iobuf.h Wed Oct 4 18:49:06 2000
734@@ -0,0 +1,70 @@
735+/*
736+ * iobuf.h
737+ *
738+ * Defines the structures used to track abstract kernel-space io buffers.
739+ *
740+ */
741+
742+#ifndef __LINUX_IOBUF_H
743+#define __LINUX_IOBUF_H
744+
745+#include <linux/mm.h>
746+#include <linux/init.h>
747+
748+/*
749+ * The kiobuf structure describes a physical set of pages reserved
750+ * locked for IO. The reference counts on each page will have been
751+ * incremented, and the flags field will indicate whether or not we have
752+ * pre-locked all of the pages for IO.
753+ *
754+ * kiobufs may be passed in arrays to form a kiovec, but we must
755+ * preserve the property that no page is present more than once over the
756+ * entire iovec.
757+ */
758+
759+#define KIO_MAX_ATOMIC_IO 64 /* in kb */
760+#define KIO_MAX_ATOMIC_BYTES (64 * 1024)
761+#define KIO_STATIC_PAGES (KIO_MAX_ATOMIC_IO / (PAGE_SIZE >> 10))
762+#define KIO_MAX_SECTORS (KIO_MAX_ATOMIC_IO * 2)
763+
764+struct kiobuf
765+{
766+ int nr_pages; /* Pages actually referenced */
767+ int array_len; /* Space in the allocated lists */
768+ int offset; /* Offset to start of valid data */
769+ int length; /* Number of valid bytes of data */
770+
771+ /* Keep separate track of the physical addresses and page
772+ * structs involved. If we do IO to a memory-mapped device
773+ * region, there won't necessarily be page structs defined for
774+ * every address. */
775+
776+ unsigned long * pagelist;
777+ struct page ** maplist;
778+
779+ unsigned int locked : 1; /* If set, pages has been locked */
780+
781+ /* Always embed enough struct pages for 64k of IO */
782+ unsigned long page_array[KIO_STATIC_PAGES];
783+ struct page * map_array[KIO_STATIC_PAGES];
784+};
785+
786+
787+/* mm/memory.c */
788+
789+int map_user_kiobuf(int rw, struct kiobuf *, unsigned long va, size_t len);
790+void unmap_kiobuf(struct kiobuf *iobuf);
791+
792+/* fs/iobuf.c */
793+
794+void __init kiobuf_init(void);
795+int alloc_kiovec(int nr, struct kiobuf **);
796+void free_kiovec(int nr, struct kiobuf **);
797+int expand_kiobuf(struct kiobuf *, int);
798+
799+/* fs/buffer.c */
800+
801+int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
802+ kdev_t dev, unsigned long b[], int size, int bmap);
803+
804+#endif /* __LINUX_IOBUF_H */
805--- linux-2.2.17.raw/include/linux/major.h.~1~ Wed Jun 7 22:26:44 2000
806+++ linux-2.2.17.raw/include/linux/major.h Wed Oct 4 18:49:06 2000
807@@ -117,6 +117,8 @@
808
809 #define AURORA_MAJOR 79
810
811+#define RAW_MAJOR 162
812+
813 #define UNIX98_PTY_MASTER_MAJOR 128
814 #define UNIX98_PTY_MAJOR_COUNT 8
815 #define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT)
816--- linux-2.2.17.raw/include/linux/raw.h.~1~ Wed Oct 4 18:49:06 2000
817+++ linux-2.2.17.raw/include/linux/raw.h Wed Oct 4 18:49:06 2000
818@@ -0,0 +1,23 @@
819+#ifndef __LINUX_RAW_H
820+#define __LINUX_RAW_H
821+
822+#include <linux/types.h>
823+
824+#define RAW_SETBIND _IO( 0xac, 0 )
825+#define RAW_GETBIND _IO( 0xac, 1 )
826+
827+struct raw_config_request
828+{
829+ int raw_minor;
830+ __u64 block_major;
831+ __u64 block_minor;
832+};
833+
834+#ifdef __KERNEL__
835+
836+/* drivers/char/raw.c */
837+extern void raw_init(void);
838+
839+#endif /* __KERNEL__ */
840+
841+#endif /* __LINUX_RAW_H */
842--- linux-2.2.17.raw/init/main.c.~1~ Mon Sep 4 18:39:28 2000
843+++ linux-2.2.17.raw/init/main.c Wed Oct 4 18:50:50 2000
844@@ -22,6 +22,7 @@
845 #include <linux/smp_lock.h>
846 #include <linux/blk.h>
847 #include <linux/hdreg.h>
848+#include <linux/iobuf.h>
849
850 #include <asm/io.h>
851 #include <asm/bugs.h>
852@@ -1413,6 +1414,7 @@
853 #ifdef CONFIG_ARCH_S390
854 ccwcache_init();
855 #endif
856+ kiobuf_init();
857 signals_init();
858 inode_init();
859 file_table_init();
860--- linux-2.2.17.raw/kernel/ksyms.c.~1~ Mon Sep 4 18:39:28 2000
861+++ linux-2.2.17.raw/kernel/ksyms.c Wed Oct 4 18:51:58 2000
862@@ -39,6 +39,7 @@
863 #include <linux/poll.h>
864 #include <linux/mm.h>
865 #include <linux/capability.h>
866+#include <linux/iobuf.h>
867
868 #if defined(CONFIG_PROC_FS)
869 #include <linux/proc_fs.h>
870@@ -240,6 +241,14 @@
871 EXPORT_SYMBOL(max_sectors);
872 EXPORT_SYMBOL(max_segments);
873 EXPORT_SYMBOL(max_readahead);
874+
875+/* kiobuf support */
876+EXPORT_SYMBOL(map_user_kiobuf);
877+EXPORT_SYMBOL(unmap_kiobuf);
878+EXPORT_SYMBOL(alloc_kiovec);
879+EXPORT_SYMBOL(free_kiovec);
880+EXPORT_SYMBOL(expand_kiobuf);
881+EXPORT_SYMBOL(brw_kiovec);
882
883 /* tty routines */
884 EXPORT_SYMBOL(tty_hangup);
885--- linux-2.2.17.raw/mm/memory.c.~1~ Tue Jan 4 18:12:26 2000
886+++ linux-2.2.17.raw/mm/memory.c Wed Oct 4 18:49:06 2000
887@@ -37,6 +37,8 @@
888 #include <linux/mman.h>
889 #include <linux/swap.h>
890 #include <linux/smp_lock.h>
891+#include <linux/pagemap.h>
892+#include <linux/iobuf.h>
893
894 #include <asm/uaccess.h>
895 #include <asm/pgtable.h>
896@@ -395,6 +397,183 @@
897 if (mm->rss < 0)
898 mm->rss = 0;
899 }
900+}
901+
902+
903+/*
904+ * Do a quick page-table lookup for a single page.
905+ */
906+static unsigned long get_page(unsigned long address)
907+{
908+ pgd_t *pgd;
909+ pmd_t *pmd;
910+
911+ pgd = pgd_offset(current->mm, address);
912+ pmd = pmd_offset(pgd, address);
913+ if (pmd) {
914+ pte_t * pte = pte_offset(pmd, address);
915+ if (pte && pte_present(*pte)) {
916+ return pte_page(*pte);
917+ }
918+ }
919+
920+ printk(KERN_ERR "Missing page in lock_down_page\n");
921+ return 0;
922+}
923+
924+/*
925+ * Given a physical address, is there a useful struct page pointing to it?
926+ */
927+
928+static struct page * get_page_map(unsigned long page)
929+{
930+ struct page *map;
931+
932+ if (MAP_NR(page) >= max_mapnr)
933+ return 0;
934+ if (page == ZERO_PAGE(page))
935+ return 0;
936+ map = mem_map + MAP_NR(page);
937+ if (PageReserved(map))
938+ return 0;
939+ return map;
940+}
941+
942+/*
943+ * Force in an entire range of pages from the current process's user VA,
944+ * and pin and lock the pages for IO.
945+ */
946+
947+#define dprintk(x...)
948+int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
949+{
950+ unsigned long ptr, end;
951+ int err;
952+ struct mm_struct * mm;
953+ struct vm_area_struct * vma = 0;
954+ unsigned long page;
955+ struct page * map;
956+ int doublepage = 0;
957+ int repeat = 0;
958+ int i;
959+
960+ /* Make sure the iobuf is not already mapped somewhere. */
961+ if (iobuf->nr_pages)
962+ return -EINVAL;
963+
964+ mm = current->mm;
965+ dprintk ("map_user_kiobuf: begin\n");
966+
967+ ptr = va & PAGE_MASK;
968+ end = (va + len + PAGE_SIZE - 1) & PAGE_MASK;
969+ err = expand_kiobuf(iobuf, (end - ptr) >> PAGE_SHIFT);
970+ if (err)
971+ return err;
972+
973+ repeat:
974+ down(&mm->mmap_sem);
975+
976+ err = -EFAULT;
977+ iobuf->locked = 1;
978+ iobuf->offset = va & ~PAGE_MASK;
979+ iobuf->length = len;
980+
981+ i = 0;
982+
983+ /*
984+ * First of all, try to fault in all of the necessary pages
985+ */
986+ while (ptr < end) {
987+ if (!vma || ptr >= vma->vm_end) {
988+ vma = find_vma(current->mm, ptr);
989+ if (!vma)
990+ goto out_unlock;
991+ }
992+ if (!handle_mm_fault(current, vma, ptr, (rw==READ)))
993+ goto out_unlock;
994+ page = get_page(ptr);
995+ if (!page) {
996+ printk (KERN_ERR "Missing page in map_user_kiobuf\n");
997+ goto out_unlock;
998+ }
999+ map = get_page_map(page);
1000+ if (map) {
1001+ if (PageLocked(map))
1002+ goto retry;
1003+ atomic_inc(&map->count);
1004+ set_bit(PG_locked, &map->flags);
1005+ }
1006+ dprintk ("Installing page %p %p: %d\n", (void *)page, map, i);
1007+ iobuf->pagelist[i] = page;
1008+ iobuf->maplist[i] = map;
1009+ iobuf->nr_pages = ++i;
1010+
1011+ ptr += PAGE_SIZE;
1012+ }
1013+
1014+ up(&mm->mmap_sem);
1015+ dprintk ("map_user_kiobuf: end OK\n");
1016+ return 0;
1017+
1018+ out_unlock:
1019+ up(&mm->mmap_sem);
1020+ unmap_kiobuf(iobuf);
1021+ dprintk ("map_user_kiobuf: end %d\n", err);
1022+ return err;
1023+
1024+ retry:
1025+
1026+ /*
1027+ * Undo the locking so far, wait on the page we got to, and try again.
1028+ */
1029+ unmap_kiobuf(iobuf);
1030+ up(&mm->mmap_sem);
1031+
1032+ /*
1033+ * Did the release also unlock the page we got stuck on?
1034+ */
1035+ if (!PageLocked(map)) {
1036+ /* If so, we may well have the page mapped twice in the
1037+ * IO address range. Bad news. Of course, it _might_
1038+ * just be a coincidence, but if it happens more than
1039+ * once, chances are we have a double-mapped page. */
1040+ if (++doublepage >= 3) {
1041+ return -EINVAL;
1042+ }
1043+ }
1044+
1045+ /*
1046+ * Try again...
1047+ */
1048+ wait_on_page(map);
1049+ if (++repeat < 16)
1050+ goto repeat;
1051+ return -EAGAIN;
1052+}
1053+
1054+
1055+/*
1056+ * Unmap all of the pages referenced by a kiobuf. We release the pages,
1057+ * and unlock them if they were locked.
1058+ */
1059+
1060+void unmap_kiobuf (struct kiobuf *iobuf)
1061+{
1062+ int i;
1063+ struct page *map;
1064+
1065+ for (i = 0; i < iobuf->nr_pages; i++) {
1066+ map = iobuf->maplist[i];
1067+
1068+ if (map && iobuf->locked) {
1069+ __free_page(map);
1070+ clear_bit(PG_locked, &map->flags);
1071+ wake_up(&map->wait);
1072+ }
1073+ }
1074+
1075+ iobuf->nr_pages = 0;
1076+ iobuf->locked = 0;
1077 }
1078
1079 static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
This page took 0.159928 seconds and 4 git commands to generate.