1 --- linux-2.2.17.raw/drivers/char/Makefile.~1~ Mon Sep 4 18:39:17 2000
2 +++ linux-2.2.17.raw/drivers/char/Makefile Wed Oct 4 18:49:06 2000
7 -L_OBJS := tty_io.o n_tty.o tty_ioctl.o mem.o random.o
8 +L_OBJS := tty_io.o n_tty.o tty_ioctl.o mem.o random.o raw.o
9 LX_OBJS := pty.o misc.o
12 --- linux-2.2.17.raw/drivers/char/mem.c.~1~ Tue Jan 4 18:12:14 2000
13 +++ linux-2.2.17.raw/drivers/char/mem.c Wed Oct 4 18:49:06 2000
15 #include <linux/joystick.h>
16 #include <linux/i2c.h>
17 #include <linux/capability.h>
18 +#include <linux/raw.h>
20 #include <asm/uaccess.h>
23 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
24 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
28 #ifdef CONFIG_USB_UHCI
30 --- linux-2.2.17.raw/drivers/char/raw.c.~1~ Wed Oct 4 18:49:06 2000
31 +++ linux-2.2.17.raw/drivers/char/raw.c Wed Oct 4 18:51:58 2000
34 + * linux/drivers/char/raw.c
36 + * Front-end raw character devices. These can be bound to any block
37 + * devices to provide genuine Unix raw character device semantics.
39 + * We reserve minor number 0 for a control interface. ioctl()s on this
40 + * device are used to bind the other minor numbers to block devices.
43 +#include <linux/fs.h>
44 +#include <linux/iobuf.h>
45 +#include <linux/major.h>
46 +#include <linux/blkdev.h>
47 +#include <linux/raw.h>
48 +#include <asm/uaccess.h>
50 +#define dprintk(x...)
52 +static kdev_t raw_device_bindings[256] = {};
53 +static int raw_device_inuse[256] = {};
54 +static int raw_device_sector_size[256] = {};
55 +static int raw_device_sector_bits[256] = {};
57 +extern struct file_operations * get_blkfops(unsigned int major);
59 +static ssize_t rw_raw_dev(int rw, struct file *, char *, size_t, loff_t *);
61 +ssize_t raw_read(struct file *, char *, size_t, loff_t *);
62 +ssize_t raw_write(struct file *, const char *, size_t, loff_t *);
63 +int raw_open(struct inode *, struct file *);
64 +int raw_release(struct inode *, struct file *);
65 +int raw_ctl_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
68 +static struct file_operations raw_fops = {
70 + raw_read, /* read */
71 + raw_write, /* write */
76 + raw_open, /* open */
78 + raw_release, /* release */
82 +static struct file_operations raw_ctl_fops = {
88 + raw_ctl_ioctl, /* ioctl */
90 + raw_open, /* open */
92 + NULL, /* no special release code */
98 +void __init raw_init(void)
100 + register_chrdev(RAW_MAJOR, "raw", &raw_fops);
105 + * The raw IO open and release code needs to fake appropriate
106 + * open/release calls to the underlying block devices.
109 +static int bdev_open(kdev_t dev, int mode)
112 + struct file dummy_file = {};
113 + struct dentry dummy_dentry = {};
114 + struct inode * inode = get_empty_inode();
119 + dummy_file.f_op = get_blkfops(MAJOR(dev));
120 + if (!dummy_file.f_op) {
125 + if (dummy_file.f_op->open) {
126 + inode->i_rdev = dev;
127 + dummy_dentry.d_inode = inode;
128 + dummy_file.f_dentry = &dummy_dentry;
129 + dummy_file.f_mode = mode;
130 + err = dummy_file.f_op->open(inode, &dummy_file);
138 +static int bdev_close(kdev_t dev)
141 + struct inode * inode = get_empty_inode();
146 + inode->i_rdev = dev;
147 + err = blkdev_release(inode);
155 + * Open/close code for raw IO.
158 +int raw_open(struct inode *inode, struct file *filp)
166 + minor = MINOR(inode->i_rdev);
169 + * Is it the control device?
173 + filp->f_op = &raw_ctl_fops;
178 + * No, it is a normal raw device. All we need to do on open is
179 + * to check that the device is bound, and force the underlying
180 + * block device to a sector-size blocksize.
183 + bdev = raw_device_bindings[minor];
187 + err = bdev_open(bdev, filp->f_mode);
192 + * Don't change the blocksize if we already have users using
196 + if (raw_device_inuse[minor]++)
200 + * Don't interfere with mounted devices: we cannot safely set
201 + * the blocksize on a device which is already mounted.
205 + if (lookup_vfsmnt(bdev) != NULL) {
206 + if (blksize_size[MAJOR(bdev)])
207 + sector_size = blksize_size[MAJOR(bdev)][MINOR(bdev)];
209 + if (hardsect_size[MAJOR(bdev)])
210 + sector_size = hardsect_size[MAJOR(bdev)][MINOR(bdev)];
213 + set_blocksize(bdev, sector_size);
214 + raw_device_sector_size[minor] = sector_size;
216 + for (sector_bits = 0; !(sector_size & 1); )
217 + sector_size>>=1, sector_bits++;
218 + raw_device_sector_bits[minor] = sector_bits;
223 +int raw_release(struct inode *inode, struct file *filp)
228 + minor = MINOR(inode->i_rdev);
229 + bdev = raw_device_bindings[minor];
231 + raw_device_inuse[minor]--;
238 + * Deal with ioctls against the raw-device control interface, to bind
239 + * and unbind other raw devices.
242 +int raw_ctl_ioctl(struct inode *inode,
244 + unsigned int command,
247 + struct raw_config_request rq;
255 + /* First, find out which raw minor we want */
257 + err = copy_from_user(&rq, (void *) arg, sizeof(rq));
261 + minor = rq.raw_minor;
262 + if (minor == 0 || minor > MINORMASK) {
267 + if (command == RAW_SETBIND) {
269 + * For now, we don't need to check that the underlying
270 + * block device is present or not: we can do that when
271 + * the raw device is opened. Just check that the
272 + * major/minor numbers make sense.
275 + if (rq.block_major == NODEV ||
276 + rq.block_major > MAX_BLKDEV ||
277 + rq.block_minor > MINORMASK) {
282 + if (raw_device_inuse[minor]) {
286 + raw_device_bindings[minor] =
287 + MKDEV(rq.block_major, rq.block_minor);
289 + rq.block_major = MAJOR(raw_device_bindings[minor]);
290 + rq.block_minor = MINOR(raw_device_bindings[minor]);
291 + err = copy_to_user((void *) arg, &rq, sizeof(rq));
304 +ssize_t raw_read(struct file *filp, char * buf,
305 + size_t size, loff_t *offp)
307 + return rw_raw_dev(READ, filp, buf, size, offp);
310 +ssize_t raw_write(struct file *filp, const char *buf,
311 + size_t size, loff_t *offp)
313 + return rw_raw_dev(WRITE, filp, (char *) buf, size, offp);
316 +#define SECTOR_BITS 9
317 +#define SECTOR_SIZE (1U << SECTOR_BITS)
318 +#define SECTOR_MASK (SECTOR_SIZE - 1)
320 +ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
321 + size_t size, loff_t *offp)
323 + struct kiobuf * iobuf;
325 + unsigned long blocknr, blocks;
326 + unsigned long b[KIO_MAX_SECTORS];
327 + size_t transferred;
332 + unsigned long limit;
334 + int sector_size, sector_bits, sector_mask;
338 + * First, a few checks on device size limits
341 + minor = MINOR(filp->f_dentry->d_inode->i_rdev);
342 + dev = raw_device_bindings[minor];
343 + sector_size = raw_device_sector_size[minor];
344 + sector_bits = raw_device_sector_bits[minor];
345 + sector_mask = sector_size- 1;
346 + max_sectors = KIO_MAX_SECTORS >> (sector_bits - 9);
348 + if (blk_size[MAJOR(dev)])
349 + limit = (((loff_t) blk_size[MAJOR(dev)][MINOR(dev)]) << BLOCK_SIZE_BITS) >> sector_bits;
352 + dprintk ("rw_raw_dev: dev %d:%d (+%d)\n",
353 + MAJOR(dev), MINOR(dev), limit);
355 + if ((*offp & sector_mask) || (size & sector_mask))
357 + if ((*offp >> sector_bits) >= limit) {
364 + * We'll just use one kiobuf
367 + err = alloc_kiovec(1, &iobuf);
372 + * Split the IO into KIO_MAX_SECTORS chunks, mapping and
373 + * unmapping the single kiobuf as we go to perform each chunk of
378 + blocknr = *offp >> sector_bits;
380 + blocks = size >> sector_bits;
381 + if (blocks > max_sectors)
382 + blocks = max_sectors;
383 + if (blocks > limit - blocknr)
384 + blocks = limit - blocknr;
388 + iosize = blocks << sector_bits;
390 + err = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
394 + for (i=0; i < blocks; i++)
397 + err = brw_kiovec(rw, 1, &iobuf, dev, b, sector_size, 0);
400 + transferred += err;
405 + unmap_kiobuf(iobuf);
411 + free_kiovec(1, &iobuf);
414 + *offp += transferred;
415 + return transferred;
420 --- linux-2.2.17.raw/fs/Makefile.~1~ Thu Aug 26 01:29:49 1999
421 +++ linux-2.2.17.raw/fs/Makefile Wed Oct 4 18:49:06 2000
423 O_OBJS = open.o read_write.o devices.o file_table.o buffer.o \
424 super.o block_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
425 ioctl.o readdir.o select.o fifo.o locks.o filesystems.o \
426 - dcache.o inode.o attr.o bad_inode.o file.o $(BINFMTS)
427 + dcache.o inode.o attr.o bad_inode.o file.o iobuf.o $(BINFMTS)
429 MOD_LIST_NAME := FS_MODULES
430 ALL_SUB_DIRS = coda minix ext2 fat msdos vfat proc isofs nfs umsdos ntfs \
431 --- linux-2.2.17.raw/fs/buffer.c.~1~ Mon Sep 4 18:39:22 2000
432 +++ linux-2.2.17.raw/fs/buffer.c Wed Oct 4 18:52:01 2000
434 #include <linux/file.h>
435 #include <linux/init.h>
436 #include <linux/quotaops.h>
437 +#include <linux/iobuf.h>
439 #include <asm/uaccess.h>
441 @@ -1259,6 +1260,183 @@
443 printk ("Whoops: end_buffer_io_async: b_count != 1 on async io.\n");
449 + * For brw_kiovec: submit a set of buffer_head temporary IOs and wait
450 + * for them to complete. Clean up the buffer_heads afterwards.
453 +#define dprintk(x...)
455 +static int do_kio(int rw, int nr, struct buffer_head *bh[], int size)
460 + struct buffer_head *tmp;
462 + dprintk ("do_kio start\n");
464 + ll_rw_block(rw, nr, bh);
467 + for (i = nr; --i >= 0; ) {
469 + wait_on_buffer(tmp);
470 + if (!buffer_uptodate(tmp)) {
472 + /* We are waiting on bh'es in reverse order so
473 + clearing iosize on error calculates the
474 + amount of IO before the first error. */
478 + free_async_buffers(tmp);
482 + dprintk ("do_kio end %d %d\n", iosize, err);
491 + * Start I/O on a physical range of kernel memory, defined by a vector
492 + * of kiobuf structs (much like a user-space iovec list).
494 + * The kiobuf must already be locked for IO. IO is submitted
495 + * asynchronously: you need to check page->locked, page->uptodate, and
496 + * maybe wait on page->wait.
498 + * It is up to the caller to make sure that there are enough blocks
499 + * passed in to completely map the iobufs to disk.
502 +int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
503 + kdev_t dev, unsigned long b[], int size, int bmap)
513 + unsigned long blocknr;
514 + struct kiobuf * iobuf = NULL;
515 + unsigned long page;
517 + struct buffer_head *tmp, *bh[KIO_MAX_SECTORS];
520 + * First, do some alignment and validity checks
522 + for (i = 0; i < nr; i++) {
524 + if ((iobuf->offset & (size-1)) ||
525 + (iobuf->length & (size-1)))
527 + if (!iobuf->locked)
528 + panic("brw_kiovec: iobuf not locked for I/O");
529 + if (!iobuf->nr_pages)
530 + panic("brw_kiovec: iobuf not initialised");
535 + return iobuf->length;
537 + dprintk ("brw_kiovec: start\n");
540 + * OK to walk down the iovec doing page IO on each page we find.
542 + bufind = bhind = transferred = err = 0;
543 + for (i = 0; i < nr; i++) {
545 + offset = iobuf->offset;
546 + length = iobuf->length;
547 + dprintk ("iobuf %d %d %d\n", offset, length, size);
549 + for (pageind = 0; pageind < iobuf->nr_pages; pageind++) {
550 + page = iobuf->pagelist[pageind];
551 + map = iobuf->maplist[pageind];
553 + while (length > 0) {
554 + blocknr = b[bufind++];
555 + tmp = get_unused_buffer_head(0);
561 + tmp->b_dev = B_FREE;
562 + tmp->b_size = size;
563 + tmp->b_data = (char *) (page + offset);
564 + tmp->b_this_page = tmp;
566 + init_buffer(tmp, dev, blocknr,
567 + end_buffer_io_sync, NULL);
569 + set_bit(BH_Uptodate, &tmp->b_state);
570 + set_bit(BH_Dirty, &tmp->b_state);
573 + dprintk ("buffer %d (%d) at %p\n",
574 + bhind, tmp->b_blocknr, tmp->b_data);
580 + * Start the IO if we have got too much or if
581 + * this is the end of the last iobuf
583 + if (bhind >= KIO_MAX_SECTORS) {
584 + err = do_kio(rw, bhind, bh, size);
586 + transferred += err;
592 + if (offset >= PAGE_SIZE) {
596 + } /* End of block loop */
597 + } /* End of page loop */
598 + } /* End of iovec loop */
600 + /* Is there any IO still left to submit? */
602 + err = do_kio(rw, bhind, bh, size);
604 + transferred += err;
610 + dprintk ("brw_kiovec: end (%d, %d)\n", transferred, err);
612 + return transferred;
616 + /* We got an error allocation the bh'es. Just free the current
617 + buffer_heads and exit. */
618 + for (i = 0; i < bhind; i++) {
619 + free_async_buffers(bh[i]);
625 --- linux-2.2.17.raw/fs/iobuf.c.~1~ Wed Oct 4 18:49:06 2000
626 +++ linux-2.2.17.raw/fs/iobuf.c Wed Oct 4 18:49:06 2000
631 + * Keep track of the general-purpose IO-buffer structures used to track
632 + * abstract kernel-space io buffers.
636 +#include <linux/iobuf.h>
637 +#include <linux/malloc.h>
638 +#include <linux/slab.h>
640 +static kmem_cache_t *kiobuf_cachep;
642 +void __init kiobuf_init(void)
644 + kiobuf_cachep = kmem_cache_create("kiobuf",
645 + sizeof(struct kiobuf),
647 + SLAB_HWCACHE_ALIGN, NULL, NULL);
649 + panic("Cannot create kernel iobuf cache\n");
653 +int alloc_kiovec(int nr, struct kiobuf **bufp)
656 + struct kiobuf *iobuf;
658 + for (i = 0; i < nr; i++) {
659 + iobuf = kmem_cache_alloc(kiobuf_cachep, SLAB_KERNEL);
661 + free_kiovec(i, bufp);
665 + memset(iobuf, 0, sizeof(*iobuf));
666 + iobuf->array_len = KIO_STATIC_PAGES;
667 + iobuf->pagelist = iobuf->page_array;
668 + iobuf->maplist = iobuf->map_array;
675 +void free_kiovec(int nr, struct kiobuf **bufp)
677 + struct kiobuf *iobuf;
680 + for (i = 0; i < nr; i++) {
682 + if (iobuf->array_len > KIO_STATIC_PAGES) {
683 + kfree (iobuf->pagelist);
684 + kfree (iobuf->maplist);
686 + kmem_cache_free(kiobuf_cachep, bufp[i]);
690 +int expand_kiobuf(struct kiobuf *iobuf, int wanted)
692 + unsigned long * pagelist;
693 + struct page ** maplist;
695 + if (iobuf->array_len >= wanted)
698 + pagelist = (unsigned long *)
699 + kmalloc(wanted * sizeof(unsigned long), GFP_KERNEL);
703 + maplist = (struct page **)
704 + kmalloc(wanted * sizeof(struct page **), GFP_KERNEL);
710 + /* Did it grow while we waited? */
711 + if (iobuf->array_len >= wanted) {
717 + memcpy (pagelist, iobuf->pagelist, wanted * sizeof(unsigned long));
718 + memcpy (maplist, iobuf->maplist, wanted * sizeof(struct page **));
720 + if (iobuf->array_len > KIO_STATIC_PAGES) {
721 + kfree (iobuf->pagelist);
722 + kfree (iobuf->maplist);
725 + iobuf->pagelist = pagelist;
726 + iobuf->maplist = maplist;
727 + iobuf->array_len = wanted;
732 --- linux-2.2.17.raw/include/linux/iobuf.h.~1~ Wed Oct 4 18:49:06 2000
733 +++ linux-2.2.17.raw/include/linux/iobuf.h Wed Oct 4 18:49:06 2000
738 + * Defines the structures used to track abstract kernel-space io buffers.
742 +#ifndef __LINUX_IOBUF_H
743 +#define __LINUX_IOBUF_H
745 +#include <linux/mm.h>
746 +#include <linux/init.h>
749 + * The kiobuf structure describes a physical set of pages reserved
750 + * locked for IO. The reference counts on each page will have been
751 + * incremented, and the flags field will indicate whether or not we have
752 + * pre-locked all of the pages for IO.
754 + * kiobufs may be passed in arrays to form a kiovec, but we must
755 + * preserve the property that no page is present more than once over the
759 +#define KIO_MAX_ATOMIC_IO 64 /* in kb */
760 +#define KIO_MAX_ATOMIC_BYTES (64 * 1024)
761 +#define KIO_STATIC_PAGES (KIO_MAX_ATOMIC_IO / (PAGE_SIZE >> 10))
762 +#define KIO_MAX_SECTORS (KIO_MAX_ATOMIC_IO * 2)
766 + int nr_pages; /* Pages actually referenced */
767 + int array_len; /* Space in the allocated lists */
768 + int offset; /* Offset to start of valid data */
769 + int length; /* Number of valid bytes of data */
771 + /* Keep separate track of the physical addresses and page
772 + * structs involved. If we do IO to a memory-mapped device
773 + * region, there won't necessarily be page structs defined for
774 + * every address. */
776 + unsigned long * pagelist;
777 + struct page ** maplist;
779 + unsigned int locked : 1; /* If set, pages has been locked */
781 + /* Always embed enough struct pages for 64k of IO */
782 + unsigned long page_array[KIO_STATIC_PAGES];
783 + struct page * map_array[KIO_STATIC_PAGES];
789 +int map_user_kiobuf(int rw, struct kiobuf *, unsigned long va, size_t len);
790 +void unmap_kiobuf(struct kiobuf *iobuf);
794 +void __init kiobuf_init(void);
795 +int alloc_kiovec(int nr, struct kiobuf **);
796 +void free_kiovec(int nr, struct kiobuf **);
797 +int expand_kiobuf(struct kiobuf *, int);
801 +int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
802 + kdev_t dev, unsigned long b[], int size, int bmap);
804 +#endif /* __LINUX_IOBUF_H */
805 --- linux-2.2.17.raw/include/linux/major.h.~1~ Wed Jun 7 22:26:44 2000
806 +++ linux-2.2.17.raw/include/linux/major.h Wed Oct 4 18:49:06 2000
809 #define AURORA_MAJOR 79
811 +#define RAW_MAJOR 162
813 #define UNIX98_PTY_MASTER_MAJOR 128
814 #define UNIX98_PTY_MAJOR_COUNT 8
815 #define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT)
816 --- linux-2.2.17.raw/include/linux/raw.h.~1~ Wed Oct 4 18:49:06 2000
817 +++ linux-2.2.17.raw/include/linux/raw.h Wed Oct 4 18:49:06 2000
819 +#ifndef __LINUX_RAW_H
820 +#define __LINUX_RAW_H
822 +#include <linux/types.h>
824 +#define RAW_SETBIND _IO( 0xac, 0 )
825 +#define RAW_GETBIND _IO( 0xac, 1 )
827 +struct raw_config_request
836 +/* drivers/char/raw.c */
837 +extern void raw_init(void);
839 +#endif /* __KERNEL__ */
841 +#endif /* __LINUX_RAW_H */
842 --- linux-2.2.17.raw/init/main.c.~1~ Mon Sep 4 18:39:28 2000
843 +++ linux-2.2.17.raw/init/main.c Wed Oct 4 18:50:50 2000
845 #include <linux/smp_lock.h>
846 #include <linux/blk.h>
847 #include <linux/hdreg.h>
848 +#include <linux/iobuf.h>
851 #include <asm/bugs.h>
852 @@ -1413,6 +1414,7 @@
853 #ifdef CONFIG_ARCH_S390
860 --- linux-2.2.17.raw/kernel/ksyms.c.~1~ Mon Sep 4 18:39:28 2000
861 +++ linux-2.2.17.raw/kernel/ksyms.c Wed Oct 4 18:51:58 2000
863 #include <linux/poll.h>
864 #include <linux/mm.h>
865 #include <linux/capability.h>
866 +#include <linux/iobuf.h>
868 #if defined(CONFIG_PROC_FS)
869 #include <linux/proc_fs.h>
871 EXPORT_SYMBOL(max_sectors);
872 EXPORT_SYMBOL(max_segments);
873 EXPORT_SYMBOL(max_readahead);
875 +/* kiobuf support */
876 +EXPORT_SYMBOL(map_user_kiobuf);
877 +EXPORT_SYMBOL(unmap_kiobuf);
878 +EXPORT_SYMBOL(alloc_kiovec);
879 +EXPORT_SYMBOL(free_kiovec);
880 +EXPORT_SYMBOL(expand_kiobuf);
881 +EXPORT_SYMBOL(brw_kiovec);
884 EXPORT_SYMBOL(tty_hangup);
885 --- linux-2.2.17.raw/mm/memory.c.~1~ Tue Jan 4 18:12:26 2000
886 +++ linux-2.2.17.raw/mm/memory.c Wed Oct 4 18:49:06 2000
888 #include <linux/mman.h>
889 #include <linux/swap.h>
890 #include <linux/smp_lock.h>
891 +#include <linux/pagemap.h>
892 +#include <linux/iobuf.h>
894 #include <asm/uaccess.h>
895 #include <asm/pgtable.h>
896 @@ -395,6 +397,183 @@
904 + * Do a quick page-table lookup for a single page.
906 +static unsigned long get_page(unsigned long address)
911 + pgd = pgd_offset(current->mm, address);
912 + pmd = pmd_offset(pgd, address);
914 + pte_t * pte = pte_offset(pmd, address);
915 + if (pte && pte_present(*pte)) {
916 + return pte_page(*pte);
920 + printk(KERN_ERR "Missing page in lock_down_page\n");
925 + * Given a physical address, is there a useful struct page pointing to it?
928 +static struct page * get_page_map(unsigned long page)
932 + if (MAP_NR(page) >= max_mapnr)
934 + if (page == ZERO_PAGE(page))
936 + map = mem_map + MAP_NR(page);
937 + if (PageReserved(map))
943 + * Force in an entire range of pages from the current process's user VA,
944 + * and pin and lock the pages for IO.
947 +#define dprintk(x...)
948 +int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
950 + unsigned long ptr, end;
952 + struct mm_struct * mm;
953 + struct vm_area_struct * vma = 0;
954 + unsigned long page;
956 + int doublepage = 0;
960 + /* Make sure the iobuf is not already mapped somewhere. */
961 + if (iobuf->nr_pages)
965 + dprintk ("map_user_kiobuf: begin\n");
967 + ptr = va & PAGE_MASK;
968 + end = (va + len + PAGE_SIZE - 1) & PAGE_MASK;
969 + err = expand_kiobuf(iobuf, (end - ptr) >> PAGE_SHIFT);
974 + down(&mm->mmap_sem);
978 + iobuf->offset = va & ~PAGE_MASK;
979 + iobuf->length = len;
984 + * First of all, try to fault in all of the necessary pages
986 + while (ptr < end) {
987 + if (!vma || ptr >= vma->vm_end) {
988 + vma = find_vma(current->mm, ptr);
992 + if (!handle_mm_fault(current, vma, ptr, (rw==READ)))
994 + page = get_page(ptr);
996 + printk (KERN_ERR "Missing page in map_user_kiobuf\n");
999 + map = get_page_map(page);
1001 + if (PageLocked(map))
1003 + atomic_inc(&map->count);
1004 + set_bit(PG_locked, &map->flags);
1006 + dprintk ("Installing page %p %p: %d\n", (void *)page, map, i);
1007 + iobuf->pagelist[i] = page;
1008 + iobuf->maplist[i] = map;
1009 + iobuf->nr_pages = ++i;
1014 + up(&mm->mmap_sem);
1015 + dprintk ("map_user_kiobuf: end OK\n");
1019 + up(&mm->mmap_sem);
1020 + unmap_kiobuf(iobuf);
1021 + dprintk ("map_user_kiobuf: end %d\n", err);
1027 + * Undo the locking so far, wait on the page we got to, and try again.
1029 + unmap_kiobuf(iobuf);
1030 + up(&mm->mmap_sem);
1033 + * Did the release also unlock the page we got stuck on?
1035 + if (!PageLocked(map)) {
1036 + /* If so, we may well have the page mapped twice in the
1037 + * IO address range. Bad news. Of course, it _might_
1038 + * just be a coincidence, but if it happens more than
1039 + * once, chances are we have a double-mapped page. */
1040 + if (++doublepage >= 3) {
1048 + wait_on_page(map);
1049 + if (++repeat < 16)
1056 + * Unmap all of the pages referenced by a kiobuf. We release the pages,
1057 + * and unlock them if they were locked.
1060 +void unmap_kiobuf (struct kiobuf *iobuf)
1065 + for (i = 0; i < iobuf->nr_pages; i++) {
1066 + map = iobuf->maplist[i];
1068 + if (map && iobuf->locked) {
1070 + clear_bit(PG_locked, &map->flags);
1071 + wake_up(&map->wait);
1075 + iobuf->nr_pages = 0;
1076 + iobuf->locked = 0;
1079 static inline void zeromap_pte_range(pte_t * pte, unsigned long address,