]> git.pld-linux.org Git - packages/kernel.git/blame - linux-2.4.18-kiobuf.patch
- spaces->tab
[packages/kernel.git] / linux-2.4.18-kiobuf.patch
CommitLineData
f939223c
JR
1diff -urNp linux-1580/drivers/char/raw.c linux-1620/drivers/char/raw.c
2--- linux-1580/drivers/char/raw.c
3+++ linux-1620/drivers/char/raw.c
4@@ -86,12 +86,6 @@ int raw_open(struct inode *inode, struct
5 filp->f_op = &raw_ctl_fops;
6 return 0;
7 }
8-
9- if (!filp->f_iobuf) {
10- err = alloc_kiovec(1, &filp->f_iobuf);
11- if (err)
12- return err;
13- }
14
15 down(&raw_devices[minor].mutex);
16 /*
17@@ -295,7 +289,6 @@ ssize_t rw_raw_dev(int rw, struct file *
18 size_t size, loff_t *offp)
19 {
20 struct kiobuf * iobuf;
21- int new_iobuf;
22 int err = 0;
23 unsigned long blocknr, blocks;
24 size_t transferred;
25@@ -314,18 +307,10 @@ ssize_t rw_raw_dev(int rw, struct file *
26
27 minor = MINOR(filp->f_dentry->d_inode->i_rdev);
28
29- new_iobuf = 0;
30- iobuf = filp->f_iobuf;
31- if (test_and_set_bit(0, &filp->f_iobuf_lock)) {
32- /*
33- * A parallel read/write is using the preallocated iobuf
34- * so just run slow and allocate a new one.
35- */
36- err = alloc_kiovec(1, &iobuf);
37- if (err)
38- goto out;
39- new_iobuf = 1;
40- }
41+ err = alloc_kiovec(1, &iobuf);
42+ if (err)
43+ return err;
44+
45
46 dev = to_kdev_t(raw_devices[minor].binding->bd_dev);
47 sector_size = raw_devices[minor].sector_size;
48@@ -398,10 +383,6 @@ ssize_t rw_raw_dev(int rw, struct file *
49 }
50
51 out_free:
52- if (!new_iobuf)
53- clear_bit(0, &filp->f_iobuf_lock);
54- else
55- free_kiovec(1, &iobuf);
56- out:
57+ free_kiovec(1, &iobuf);
58 return err;
59 }
60diff -urNp linux-1580/fs/iobuf.c linux-1620/fs/iobuf.c
61--- linux-1580/fs/iobuf.c
62+++ linux-1620/fs/iobuf.c
63@@ -8,8 +8,6 @@
64
65 #include <linux/iobuf.h>
66 #include <linux/slab.h>
67-#include <linux/vmalloc.h>
68-
69
70 static kmem_cache_t *kiobuf_cachep;
71
72@@ -27,6 +25,8 @@ void end_kio_request(struct kiobuf *kiob
73
74 static int kiobuf_init(struct kiobuf *iobuf)
75 {
76+ int retval;
77+
78 init_waitqueue_head(&iobuf->wait_queue);
79 iobuf->array_len = 0;
80 iobuf->nr_pages = 0;
81@@ -35,7 +35,16 @@ static int kiobuf_init(struct kiobuf *io
82 iobuf->blocks = NULL;
83 atomic_set(&iobuf->io_count, 0);
84 iobuf->end_io = NULL;
85- return expand_kiobuf(iobuf, KIO_STATIC_PAGES);
86+ iobuf->initialized = 0;
87+ retval = expand_kiobuf(iobuf, KIO_STATIC_PAGES);
88+ if (retval) return retval;
89+ retval = alloc_kiobuf_bhs(iobuf);
90+ if (retval) {
91+ kfree(iobuf->maplist);
92+ return retval;
93+ }
94+ iobuf->initialized = 1;
95+ return 0;
96 }
97
98 int alloc_kiobuf_bhs(struct kiobuf * kiobuf)
99@@ -89,6 +98,21 @@ void free_kiobuf_bhs(struct kiobuf * kio
100 }
101 }
102
103+void kiobuf_ctor(void * objp, kmem_cache_t * cachep, unsigned long flag)
104+{
105+ struct kiobuf * iobuf = (struct kiobuf *) objp;
106+ kiobuf_init(iobuf);
107+}
108+
109+void kiobuf_dtor(void * objp, kmem_cache_t * cachep, unsigned long flag)
110+{
111+ struct kiobuf * iobuf = (struct kiobuf *) objp;
112+ if (iobuf->initialized) {
113+ kfree(iobuf->maplist);
114+ free_kiobuf_bhs(iobuf);
115+ }
116+}
117+
118 int alloc_kiovec(int nr, struct kiobuf **bufp)
119 {
120 int i;
121@@ -98,10 +122,11 @@ int alloc_kiovec(int nr, struct kiobuf *
122 iobuf = kmem_cache_alloc(kiobuf_cachep, GFP_KERNEL);
123 if (unlikely(!iobuf))
124 goto nomem;
125- if (unlikely(kiobuf_init(iobuf)))
126- goto nomem2;
127- if (unlikely(alloc_kiobuf_bhs(iobuf)))
128- goto nomem2;
129+ if (unlikely(!iobuf->initialized)) {
130+ /* try again to complete previously failed ctor */
131+ if (unlikely(kiobuf_init(iobuf)))
132+ goto nomem2;
133+ }
134 bufp[i] = iobuf;
135 }
136
137@@ -121,11 +146,10 @@ void free_kiovec(int nr, struct kiobuf *
138
139 for (i = 0; i < nr; i++) {
140 iobuf = bufp[i];
141- if (iobuf->locked)
142- unlock_kiovec(1, &iobuf);
143- kfree(iobuf->maplist);
144- free_kiobuf_bhs(iobuf);
145- kmem_cache_free(kiobuf_cachep, bufp[i]);
146+ init_waitqueue_head(&iobuf->wait_queue);
147+ iobuf->io_count.counter = 0;
148+ iobuf->end_io = NULL;
149+ kmem_cache_free(kiobuf_cachep, iobuf);
150 }
151 }
152
153@@ -180,7 +204,7 @@ repeat:
154 void __init iobuf_cache_init(void)
155 {
156 kiobuf_cachep = kmem_cache_create("kiobuf", sizeof(struct kiobuf),
157- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
158+ 0, SLAB_HWCACHE_ALIGN, kiobuf_ctor, kiobuf_dtor);
159 if (!kiobuf_cachep)
160 panic("Cannot create kiobuf SLAB cache");
161 }
162diff -urNp linux-1580/include/linux/iobuf.h linux-1620/include/linux/iobuf.h
163--- linux-1580/include/linux/iobuf.h
164+++ linux-1620/include/linux/iobuf.h
165@@ -39,7 +39,8 @@ struct kiobuf
166 int offset; /* Offset to start of valid data */
167 int length; /* Number of valid bytes of data */
168
169- unsigned int locked : 1; /* If set, pages has been locked */
170+ unsigned int locked : 1, /* If set, pages has been locked */
171+ initialized:1; /* If set, done initialize */
172
173 struct page ** maplist;
174 struct buffer_head ** bh;
This page took 0.223605 seconds and 4 git commands to generate.