]> git.pld-linux.org Git - packages/kernel.git/blame - dm-crypt-dont-allocate-partial-pages.patch
- fix vserver patch for 3.14.5
[packages/kernel.git] / dm-crypt-dont-allocate-partial-pages.patch
CommitLineData
101a7448
ŁK
1dm-crypt: don't allocate pages for a partial request.
2
3This patch changes crypt_alloc_buffer so that it always allocates pages for
4a full request.
5
6This change enables further simplification and removing of one refcounts
7in the next patches.
8
9Note: the next patch is needed to fix a theoretical deadlock
10
11Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
12
13---
0a2e4279
ŁK
14 drivers/md/dm-crypt.c | 139 ++++++++++----------------------------------------
15 1 file changed, 30 insertions(+), 109 deletions(-)
101a7448 16
0a2e4279 17Index: linux-3.14/drivers/md/dm-crypt.c
101a7448 18===================================================================
0a2e4279
ŁK
19--- linux-3.14.orig/drivers/md/dm-crypt.c 2014-04-04 20:48:50.000000000 +0200
20+++ linux-3.14/drivers/md/dm-crypt.c 2014-04-04 20:57:36.000000000 +0200
21@@ -58,7 +58,6 @@ struct dm_crypt_io {
101a7448
ŁK
22 atomic_t io_pending;
23 int error;
24 sector_t sector;
25- struct dm_crypt_io *base_io;
0a2e4279 26 } CRYPTO_MINALIGN_ATTR;
101a7448
ŁK
27
28 struct dm_crypt_request {
0a2e4279 29@@ -172,7 +171,6 @@ struct crypt_config {
101a7448
ŁK
30 };
31
32 #define MIN_IOS 16
33-#define MIN_POOL_PAGES 32
34
35 static struct kmem_cache *_crypt_io_pool;
36
0a2e4279 37@@ -951,14 +949,13 @@ static int crypt_convert(struct crypt_co
101a7448
ŁK
38 return 0;
39 }
40
41+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
42+
43 /*
44 * Generate a new unfragmented bio with the given size
45 * This should never violate the device limitations
46- * May return a smaller bio when running out of pages, indicated by
47- * *out_of_pages set to 1.
48 */
49-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
50- unsigned *out_of_pages)
51+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
52 {
53 struct crypt_config *cc = io->cc;
54 struct bio *clone;
0a2e4279
ŁK
55@@ -966,41 +963,27 @@ static struct bio *crypt_alloc_buffer(st
56 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
57 unsigned i, len;
58 struct page *page;
59+ struct bio_vec *bvec;
60
61 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
62 if (!clone)
101a7448
ŁK
63 return NULL;
64
65 clone_init(io, clone);
66- *out_of_pages = 0;
67
68 for (i = 0; i < nr_iovecs; i++) {
69 page = mempool_alloc(cc->page_pool, gfp_mask);
70- if (!page) {
71- *out_of_pages = 1;
72- break;
73- }
74-
75- /*
76- * If additional pages cannot be allocated without waiting,
77- * return a partially-allocated bio. The caller will then try
78- * to allocate more bios while submitting this partial bio.
79- */
80- gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
81
82 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
83
0a2e4279
ŁK
84- if (!bio_add_page(clone, page, len, 0)) {
85- mempool_free(page, cc->page_pool);
101a7448 86- break;
0a2e4279
ŁK
87- }
88+ bvec = &clone->bi_io_vec[clone->bi_vcnt++];
89+ bvec->bv_page = page;
90+ bvec->bv_len = len;
91+ bvec->bv_offset = 0;
101a7448 92
0a2e4279
ŁK
93- size -= len;
94- }
95+ clone->bi_iter.bi_size += len;
101a7448 96
0a2e4279 97- if (!clone->bi_iter.bi_size) {
101a7448
ŁK
98- bio_put(clone);
99- return NULL;
0a2e4279
ŁK
100+ size -= len;
101 }
101a7448 102
0a2e4279
ŁK
103 return clone;
104@@ -1025,7 +1008,6 @@ static void crypt_io_init(struct dm_cryp
101a7448
ŁK
105 io->base_bio = bio;
106 io->sector = sector;
107 io->error = 0;
108- io->base_io = NULL;
109 io->ctx.req = NULL;
110 atomic_set(&io->io_pending, 0);
0a2e4279
ŁK
111 }
112@@ -1038,13 +1020,11 @@ static void crypt_inc_pending(struct dm_
101a7448
ŁK
113 /*
114 * One of the bios was finished. Check for completion of
115 * the whole request and correctly clean up the buffer.
116- * If base_io is set, wait for the last fragment to complete.
117 */
118 static void crypt_dec_pending(struct dm_crypt_io *io)
119 {
120 struct crypt_config *cc = io->cc;
121 struct bio *base_bio = io->base_bio;
122- struct dm_crypt_io *base_io = io->base_io;
123 int error = io->error;
124
125 if (!atomic_dec_and_test(&io->io_pending))
0a2e4279
ŁK
126@@ -1055,13 +1035,7 @@ static void crypt_dec_pending(struct dm_
127 if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
128 mempool_free(io, cc->io_pool);
101a7448
ŁK
129
130- if (likely(!base_io))
131- bio_endio(base_bio, error);
132- else {
133- if (error && !base_io->error)
134- base_io->error = error;
135- crypt_dec_pending(base_io);
136- }
137+ bio_endio(base_bio, error);
138 }
139
140 /*
0a2e4279 141@@ -1197,10 +1171,7 @@ static void kcryptd_crypt_write_convert(
101a7448
ŁK
142 {
143 struct crypt_config *cc = io->cc;
144 struct bio *clone;
145- struct dm_crypt_io *new_io;
146 int crypt_finished;
147- unsigned out_of_pages = 0;
0a2e4279 148- unsigned remaining = io->base_bio->bi_iter.bi_size;
101a7448
ŁK
149 sector_t sector = io->sector;
150 int r;
151
0a2e4279 152@@ -1210,80 +1181,30 @@ static void kcryptd_crypt_write_convert(
101a7448
ŁK
153 crypt_inc_pending(io);
154 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
155
156- /*
157- * The allocated buffers can be smaller than the whole bio,
158- * so repeat the whole process until all the data can be handled.
159- */
160- while (remaining) {
161- clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
162- if (unlikely(!clone)) {
163- io->error = -ENOMEM;
164- break;
165- }
166-
167- io->ctx.bio_out = clone;
0a2e4279 168- io->ctx.iter_out = clone->bi_iter;
101a7448 169-
0a2e4279 170- remaining -= clone->bi_iter.bi_size;
101a7448
ŁK
171- sector += bio_sectors(clone);
172-
173- crypt_inc_pending(io);
174-
175- r = crypt_convert(cc, &io->ctx);
176- if (r < 0)
177- io->error = -EIO;
178-
179- crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
180-
181- /* Encryption was already finished, submit io now */
182- if (crypt_finished) {
183- kcryptd_crypt_write_io_submit(io, 0);
184-
185- /*
186- * If there was an error, do not try next fragments.
187- * For async, error is processed in async handler.
188- */
189- if (unlikely(r < 0))
190- break;
0a2e4279 191+ clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
101a7448 192+ if (unlikely(!clone)) {
3f63f64f 193+ io->error = -EIO;
101a7448
ŁK
194+ goto dec;
195+ }
196
197- io->sector = sector;
198- }
199+ io->ctx.bio_out = clone;
0a2e4279 200+ io->ctx.iter_out = clone->bi_iter;
101a7448
ŁK
201
202- /*
203- * Out of memory -> run queues
204- * But don't wait if split was due to the io size restriction
205- */
206- if (unlikely(out_of_pages))
207- congestion_wait(BLK_RW_ASYNC, HZ/100);
208+ sector += bio_sectors(clone);
209
210- /*
211- * With async crypto it is unsafe to share the crypto context
212- * between fragments, so switch to a new dm_crypt_io structure.
213- */
214- if (unlikely(!crypt_finished && remaining)) {
0a2e4279
ŁK
215- new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
216- crypt_io_init(new_io, io->cc, io->base_bio, sector);
101a7448
ŁK
217- crypt_inc_pending(new_io);
218- crypt_convert_init(cc, &new_io->ctx, NULL,
219- io->base_bio, sector);
0a2e4279 220- new_io->ctx.iter_in = io->ctx.iter_in;
101a7448
ŁK
221-
222- /*
223- * Fragments after the first use the base_io
224- * pending count.
225- */
226- if (!io->base_io)
227- new_io->base_io = io;
228- else {
229- new_io->base_io = io->base_io;
230- crypt_inc_pending(io->base_io);
231- crypt_dec_pending(io);
232- }
233+ crypt_inc_pending(io);
234+ r = crypt_convert(cc, &io->ctx);
235+ if (r)
236+ io->error = -EIO;
237+ crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
238
239- io = new_io;
240- }
101a7448 241+ /* Encryption was already finished, submit io now */
0a2e4279 242+ if (crypt_finished) {
101a7448 243+ kcryptd_crypt_write_io_submit(io, 0);
0a2e4279
ŁK
244+ io->sector = sector;
245 }
101a7448
ŁK
246
247+dec:
248 crypt_dec_pending(io);
249 }
250
0a2e4279
ŁK
251@@ -1738,7 +1659,7 @@ static int crypt_ctr(struct dm_target *t
252 sizeof(struct dm_crypt_io) + cc->dmreq_start +
253 sizeof(struct dm_crypt_request) + cc->iv_size;
101a7448
ŁK
254
255- cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
256+ cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
257 if (!cc->page_pool) {
258 ti->error = "Cannot allocate page mempool";
259 goto bad;
This page took 0.086957 seconds and 4 git commands to generate.