]> git.pld-linux.org Git - packages/kernel.git/blame - dm-crypt-dont-allocate-partial-pages.patch
- 3.13.7
[packages/kernel.git] / dm-crypt-dont-allocate-partial-pages.patch
CommitLineData
101a7448
ŁK
1dm-crypt: don't allocate pages for a partial request.
2
3This patch changes crypt_alloc_buffer so that it always allocates pages for
4a full request.
5
6This change enables further simplification and removing of one refcounts
7in the next patches.
8
9Note: the next patch is needed to fix a theoretical deadlock
10
11Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
12
13---
3f63f64f
ŁK
14 drivers/md/dm-crypt.c | 134 +++++++++-----------------------------------------
15 1 file changed, 26 insertions(+), 108 deletions(-)
101a7448 16
3f63f64f 17Index: linux-3.10.4-fast/drivers/md/dm-crypt.c
101a7448 18===================================================================
3f63f64f
ŁK
19--- linux-3.10.4-fast.orig/drivers/md/dm-crypt.c 2013-07-31 17:03:18.000000000 +0200
20+++ linux-3.10.4-fast/drivers/md/dm-crypt.c 2013-07-31 17:03:21.000000000 +0200
101a7448
ŁK
21@@ -59,7 +59,6 @@ struct dm_crypt_io {
22 atomic_t io_pending;
23 int error;
24 sector_t sector;
25- struct dm_crypt_io *base_io;
26 };
27
28 struct dm_crypt_request {
29@@ -162,7 +161,6 @@ struct crypt_config {
30 };
31
32 #define MIN_IOS 16
33-#define MIN_POOL_PAGES 32
34
35 static struct kmem_cache *_crypt_io_pool;
36
37@@ -777,14 +775,13 @@ static int crypt_convert(struct crypt_co
38 return 0;
39 }
40
41+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
42+
43 /*
44 * Generate a new unfragmented bio with the given size
45 * This should never violate the device limitations
46- * May return a smaller bio when running out of pages, indicated by
47- * *out_of_pages set to 1.
48 */
49-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
50- unsigned *out_of_pages)
51+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
52 {
53 struct crypt_config *cc = io->cc;
54 struct bio *clone;
3f63f64f 55@@ -798,37 +795,23 @@ static struct bio *crypt_alloc_buffer(st
101a7448
ŁK
56 return NULL;
57
58 clone_init(io, clone);
59- *out_of_pages = 0;
60
61 for (i = 0; i < nr_iovecs; i++) {
62 page = mempool_alloc(cc->page_pool, gfp_mask);
63- if (!page) {
64- *out_of_pages = 1;
65- break;
66- }
67-
68- /*
69- * If additional pages cannot be allocated without waiting,
70- * return a partially-allocated bio. The caller will then try
71- * to allocate more bios while submitting this partial bio.
72- */
73- gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
74
75 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
76
77 if (!bio_add_page(clone, page, len, 0)) {
3f63f64f 78+ DMERR("bio_add_page failed for page %d: the underlying device has stricter limits than dm-crypt target", i);
101a7448
ŁK
79 mempool_free(page, cc->page_pool);
80- break;
81+ crypt_free_buffer_pages(cc, clone);
82+ bio_put(clone);
83+ return NULL;
84 }
85
86 size -= len;
87 }
88
89- if (!clone->bi_size) {
90- bio_put(clone);
91- return NULL;
92- }
93-
94 return clone;
95 }
96
3f63f64f 97@@ -854,7 +837,6 @@ static struct dm_crypt_io *crypt_io_allo
101a7448
ŁK
98 io->base_bio = bio;
99 io->sector = sector;
100 io->error = 0;
101- io->base_io = NULL;
102 io->ctx.req = NULL;
103 atomic_set(&io->io_pending, 0);
104
3f63f64f 105@@ -869,13 +851,11 @@ static void crypt_inc_pending(struct dm_
101a7448
ŁK
106 /*
107 * One of the bios was finished. Check for completion of
108 * the whole request and correctly clean up the buffer.
109- * If base_io is set, wait for the last fragment to complete.
110 */
111 static void crypt_dec_pending(struct dm_crypt_io *io)
112 {
113 struct crypt_config *cc = io->cc;
114 struct bio *base_bio = io->base_bio;
115- struct dm_crypt_io *base_io = io->base_io;
116 int error = io->error;
117
118 if (!atomic_dec_and_test(&io->io_pending))
3f63f64f 119@@ -885,13 +865,7 @@ static void crypt_dec_pending(struct dm_
101a7448
ŁK
120 mempool_free(io->ctx.req, cc->req_pool);
121 mempool_free(io, cc->io_pool);
122
123- if (likely(!base_io))
124- bio_endio(base_bio, error);
125- else {
126- if (error && !base_io->error)
127- base_io->error = error;
128- crypt_dec_pending(base_io);
129- }
130+ bio_endio(base_bio, error);
131 }
132
133 /*
3f63f64f 134@@ -1027,10 +1001,7 @@ static void kcryptd_crypt_write_convert(
101a7448
ŁK
135 {
136 struct crypt_config *cc = io->cc;
137 struct bio *clone;
138- struct dm_crypt_io *new_io;
139 int crypt_finished;
140- unsigned out_of_pages = 0;
141- unsigned remaining = io->base_bio->bi_size;
142 sector_t sector = io->sector;
143 int r;
144
3f63f64f 145@@ -1040,81 +1011,28 @@ static void kcryptd_crypt_write_convert(
101a7448
ŁK
146 crypt_inc_pending(io);
147 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
148
149- /*
150- * The allocated buffers can be smaller than the whole bio,
151- * so repeat the whole process until all the data can be handled.
152- */
153- while (remaining) {
154- clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
155- if (unlikely(!clone)) {
156- io->error = -ENOMEM;
157- break;
158- }
159-
160- io->ctx.bio_out = clone;
161- io->ctx.idx_out = 0;
162-
163- remaining -= clone->bi_size;
164- sector += bio_sectors(clone);
165-
166- crypt_inc_pending(io);
167-
168- r = crypt_convert(cc, &io->ctx);
169- if (r < 0)
170- io->error = -EIO;
171-
172- crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
173-
174- /* Encryption was already finished, submit io now */
175- if (crypt_finished) {
176- kcryptd_crypt_write_io_submit(io, 0);
177-
178- /*
179- * If there was an error, do not try next fragments.
180- * For async, error is processed in async handler.
181- */
182- if (unlikely(r < 0))
183- break;
184+ clone = crypt_alloc_buffer(io, io->base_bio->bi_size);
185+ if (unlikely(!clone)) {
3f63f64f 186+ io->error = -EIO;
101a7448
ŁK
187+ goto dec;
188+ }
189
190- io->sector = sector;
191- }
192+ io->ctx.bio_out = clone;
193+ io->ctx.idx_out = 0;
194
195- /*
196- * Out of memory -> run queues
197- * But don't wait if split was due to the io size restriction
198- */
199- if (unlikely(out_of_pages))
200- congestion_wait(BLK_RW_ASYNC, HZ/100);
201+ sector += bio_sectors(clone);
202
203- /*
204- * With async crypto it is unsafe to share the crypto context
205- * between fragments, so switch to a new dm_crypt_io structure.
206- */
207- if (unlikely(!crypt_finished && remaining)) {
208- new_io = crypt_io_alloc(io->cc, io->base_bio,
209- sector);
210- crypt_inc_pending(new_io);
211- crypt_convert_init(cc, &new_io->ctx, NULL,
212- io->base_bio, sector);
213- new_io->ctx.idx_in = io->ctx.idx_in;
214- new_io->ctx.offset_in = io->ctx.offset_in;
215-
216- /*
217- * Fragments after the first use the base_io
218- * pending count.
219- */
220- if (!io->base_io)
221- new_io->base_io = io;
222- else {
223- new_io->base_io = io->base_io;
224- crypt_inc_pending(io->base_io);
225- crypt_dec_pending(io);
226- }
227+ crypt_inc_pending(io);
228+ r = crypt_convert(cc, &io->ctx);
229+ if (r)
230+ io->error = -EIO;
231+ crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
232
233- io = new_io;
234- }
235- }
236+ /* Encryption was already finished, submit io now */
237+ if (crypt_finished)
238+ kcryptd_crypt_write_io_submit(io, 0);
239
240+dec:
241 crypt_dec_pending(io);
242 }
243
3f63f64f 244@@ -1553,7 +1471,7 @@ static int crypt_ctr(struct dm_target *t
101a7448
ŁK
245 goto bad;
246 }
247
248- cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
249+ cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
250 if (!cc->page_pool) {
251 ti->error = "Cannot allocate page mempool";
252 goto bad;
This page took 0.10027 seconds and 4 git commands to generate.