diff -urNp linux-1580/drivers/char/raw.c linux-1620/drivers/char/raw.c --- linux-1580/drivers/char/raw.c +++ linux-1620/drivers/char/raw.c @@ -86,12 +86,6 @@ int raw_open(struct inode *inode, struct filp->f_op = &raw_ctl_fops; return 0; } - - if (!filp->f_iobuf) { - err = alloc_kiovec(1, &filp->f_iobuf); - if (err) - return err; - } down(&raw_devices[minor].mutex); /* @@ -295,7 +289,6 @@ ssize_t rw_raw_dev(int rw, struct file * size_t size, loff_t *offp) { struct kiobuf * iobuf; - int new_iobuf; int err = 0; unsigned long blocknr, blocks; size_t transferred; @@ -314,18 +307,10 @@ ssize_t rw_raw_dev(int rw, struct file * minor = MINOR(filp->f_dentry->d_inode->i_rdev); - new_iobuf = 0; - iobuf = filp->f_iobuf; - if (test_and_set_bit(0, &filp->f_iobuf_lock)) { - /* - * A parallel read/write is using the preallocated iobuf - * so just run slow and allocate a new one. - */ - err = alloc_kiovec(1, &iobuf); - if (err) - goto out; - new_iobuf = 1; - } + err = alloc_kiovec(1, &iobuf); + if (err) + return err; + dev = to_kdev_t(raw_devices[minor].binding->bd_dev); sector_size = raw_devices[minor].sector_size; @@ -398,10 +383,6 @@ ssize_t rw_raw_dev(int rw, struct file * } out_free: - if (!new_iobuf) - clear_bit(0, &filp->f_iobuf_lock); - else - free_kiovec(1, &iobuf); - out: + free_kiovec(1, &iobuf); return err; } diff -urNp linux-1580/fs/iobuf.c linux-1620/fs/iobuf.c --- linux-1580/fs/iobuf.c +++ linux-1620/fs/iobuf.c @@ -8,8 +8,6 @@ #include #include -#include - static kmem_cache_t *kiobuf_cachep; @@ -27,6 +25,8 @@ void end_kio_request(struct kiobuf *kiob static int kiobuf_init(struct kiobuf *iobuf) { + int retval; + init_waitqueue_head(&iobuf->wait_queue); iobuf->array_len = 0; iobuf->nr_pages = 0; @@ -35,7 +35,16 @@ static int kiobuf_init(struct kiobuf *io iobuf->blocks = NULL; atomic_set(&iobuf->io_count, 0); iobuf->end_io = NULL; - return expand_kiobuf(iobuf, KIO_STATIC_PAGES); + iobuf->initialized = 0; + retval = expand_kiobuf(iobuf, KIO_STATIC_PAGES); + if (retval) return retval; + retval = alloc_kiobuf_bhs(iobuf); + if (retval) { + kfree(iobuf->maplist); + return retval; + } + iobuf->initialized = 1; + return 0; } int alloc_kiobuf_bhs(struct kiobuf * kiobuf) @@ -89,6 +98,21 @@ void free_kiobuf_bhs(struct kiobuf * kio } } +void kiobuf_ctor(void * objp, kmem_cache_t * cachep, unsigned long flag) +{ + struct kiobuf * iobuf = (struct kiobuf *) objp; + kiobuf_init(iobuf); +} + +void kiobuf_dtor(void * objp, kmem_cache_t * cachep, unsigned long flag) +{ + struct kiobuf * iobuf = (struct kiobuf *) objp; + if (iobuf->initialized) { + kfree(iobuf->maplist); + free_kiobuf_bhs(iobuf); + } +} + int alloc_kiovec(int nr, struct kiobuf **bufp) { int i; @@ -98,10 +122,11 @@ int alloc_kiovec(int nr, struct kiobuf * iobuf = kmem_cache_alloc(kiobuf_cachep, GFP_KERNEL); if (unlikely(!iobuf)) goto nomem; - if (unlikely(kiobuf_init(iobuf))) - goto nomem2; - if (unlikely(alloc_kiobuf_bhs(iobuf))) - goto nomem2; + if (unlikely(!iobuf->initialized)) { + /* try again to complete previously failed ctor */ + if (unlikely(kiobuf_init(iobuf))) + goto nomem2; + } bufp[i] = iobuf; } @@ -121,11 +146,10 @@ void free_kiovec(int nr, struct kiobuf * for (i = 0; i < nr; i++) { iobuf = bufp[i]; - if (iobuf->locked) - unlock_kiovec(1, &iobuf); - kfree(iobuf->maplist); - free_kiobuf_bhs(iobuf); - kmem_cache_free(kiobuf_cachep, bufp[i]); + init_waitqueue_head(&iobuf->wait_queue); + iobuf->io_count.counter = 0; + iobuf->end_io = NULL; + kmem_cache_free(kiobuf_cachep, iobuf); } } @@ -180,7 +204,7 @@ repeat: void __init iobuf_cache_init(void) { kiobuf_cachep = kmem_cache_create("kiobuf", sizeof(struct kiobuf), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN, kiobuf_ctor, kiobuf_dtor); if (!kiobuf_cachep) panic("Cannot create kiobuf SLAB cache"); } diff -urNp linux-1580/include/linux/iobuf.h linux-1620/include/linux/iobuf.h --- linux-1580/include/linux/iobuf.h +++ linux-1620/include/linux/iobuf.h @@ -39,7 +39,8 @@ struct kiobuf int offset; /* Offset to start of valid data */ int length; /* Number of valid bytes of data */ - unsigned int locked : 1; /* If set, pages has been locked */ + unsigned int locked : 1, /* If set, pages has been locked */ + initialized:1; /* If set, done initialize */ struct page ** maplist; struct buffer_head ** bh;