1 dm-crypt: don't allocate pages for a partial request.
3 This patch changes crypt_alloc_buffer so that it always allocates pages for
6 This change enables further simplification and removing of one refcounts
9 Note: the next patch is needed to fix a theoretical deadlock
11 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
14 drivers/md/dm-crypt.c | 134 +++++++++-----------------------------------------
15 1 file changed, 26 insertions(+), 108 deletions(-)
17 Index: linux-3.10.4-fast/drivers/md/dm-crypt.c
18 ===================================================================
19 --- linux-3.10.4-fast.orig/drivers/md/dm-crypt.c 2013-07-31 17:03:18.000000000 +0200
20 +++ linux-3.10.4-fast/drivers/md/dm-crypt.c 2013-07-31 17:03:21.000000000 +0200
21 @@ -59,7 +59,6 @@ struct dm_crypt_io {
25 - struct dm_crypt_io *base_io;
28 struct dm_crypt_request {
29 @@ -162,7 +161,6 @@ struct crypt_config {
33 -#define MIN_POOL_PAGES 32
35 static struct kmem_cache *_crypt_io_pool;
37 @@ -777,14 +775,13 @@ static int crypt_convert(struct crypt_co
41 +static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
44 * Generate a new unfragmented bio with the given size
45 * This should never violate the device limitations
46 - * May return a smaller bio when running out of pages, indicated by
47 - * *out_of_pages set to 1.
49 -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
50 - unsigned *out_of_pages)
51 +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
53 struct crypt_config *cc = io->cc;
55 @@ -798,37 +795,23 @@ static struct bio *crypt_alloc_buffer(st
58 clone_init(io, clone);
61 for (i = 0; i < nr_iovecs; i++) {
62 page = mempool_alloc(cc->page_pool, gfp_mask);
69 - * If additional pages cannot be allocated without waiting,
70 - * return a partially-allocated bio. The caller will then try
71 - * to allocate more bios while submitting this partial bio.
73 - gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
75 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
77 if (!bio_add_page(clone, page, len, 0)) {
78 + DMERR("bio_add_page failed for page %d: the underlying device has stricter limits than dm-crypt target", i);
79 mempool_free(page, cc->page_pool);
81 + crypt_free_buffer_pages(cc, clone);
89 - if (!clone->bi_size) {
97 @@ -854,7 +837,6 @@ static struct dm_crypt_io *crypt_io_allo
101 - io->base_io = NULL;
103 atomic_set(&io->io_pending, 0);
105 @@ -869,13 +851,11 @@ static void crypt_inc_pending(struct dm_
107 * One of the bios was finished. Check for completion of
108 * the whole request and correctly clean up the buffer.
109 - * If base_io is set, wait for the last fragment to complete.
111 static void crypt_dec_pending(struct dm_crypt_io *io)
113 struct crypt_config *cc = io->cc;
114 struct bio *base_bio = io->base_bio;
115 - struct dm_crypt_io *base_io = io->base_io;
116 int error = io->error;
118 if (!atomic_dec_and_test(&io->io_pending))
119 @@ -885,13 +865,7 @@ static void crypt_dec_pending(struct dm_
120 mempool_free(io->ctx.req, cc->req_pool);
121 mempool_free(io, cc->io_pool);
123 - if (likely(!base_io))
124 - bio_endio(base_bio, error);
126 - if (error && !base_io->error)
127 - base_io->error = error;
128 - crypt_dec_pending(base_io);
130 + bio_endio(base_bio, error);
134 @@ -1027,10 +1001,7 @@ static void kcryptd_crypt_write_convert(
136 struct crypt_config *cc = io->cc;
138 - struct dm_crypt_io *new_io;
140 - unsigned out_of_pages = 0;
141 - unsigned remaining = io->base_bio->bi_size;
142 sector_t sector = io->sector;
145 @@ -1040,81 +1011,28 @@ static void kcryptd_crypt_write_convert(
146 crypt_inc_pending(io);
147 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
150 - * The allocated buffers can be smaller than the whole bio,
151 - * so repeat the whole process until all the data can be handled.
153 - while (remaining) {
154 - clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
155 - if (unlikely(!clone)) {
156 - io->error = -ENOMEM;
160 - io->ctx.bio_out = clone;
161 - io->ctx.idx_out = 0;
163 - remaining -= clone->bi_size;
164 - sector += bio_sectors(clone);
166 - crypt_inc_pending(io);
168 - r = crypt_convert(cc, &io->ctx);
172 - crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
174 - /* Encryption was already finished, submit io now */
175 - if (crypt_finished) {
176 - kcryptd_crypt_write_io_submit(io, 0);
179 - * If there was an error, do not try next fragments.
180 - * For async, error is processed in async handler.
182 - if (unlikely(r < 0))
184 + clone = crypt_alloc_buffer(io, io->base_bio->bi_size);
185 + if (unlikely(!clone)) {
190 - io->sector = sector;
192 + io->ctx.bio_out = clone;
193 + io->ctx.idx_out = 0;
196 - * Out of memory -> run queues
197 - * But don't wait if split was due to the io size restriction
199 - if (unlikely(out_of_pages))
200 - congestion_wait(BLK_RW_ASYNC, HZ/100);
201 + sector += bio_sectors(clone);
204 - * With async crypto it is unsafe to share the crypto context
205 - * between fragments, so switch to a new dm_crypt_io structure.
207 - if (unlikely(!crypt_finished && remaining)) {
208 - new_io = crypt_io_alloc(io->cc, io->base_bio,
210 - crypt_inc_pending(new_io);
211 - crypt_convert_init(cc, &new_io->ctx, NULL,
212 - io->base_bio, sector);
213 - new_io->ctx.idx_in = io->ctx.idx_in;
214 - new_io->ctx.offset_in = io->ctx.offset_in;
217 - * Fragments after the first use the base_io
221 - new_io->base_io = io;
223 - new_io->base_io = io->base_io;
224 - crypt_inc_pending(io->base_io);
225 - crypt_dec_pending(io);
227 + crypt_inc_pending(io);
228 + r = crypt_convert(cc, &io->ctx);
231 + crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
236 + /* Encryption was already finished, submit io now */
237 + if (crypt_finished)
238 + kcryptd_crypt_write_io_submit(io, 0);
241 crypt_dec_pending(io);
244 @@ -1553,7 +1471,7 @@ static int crypt_ctr(struct dm_target *t
248 - cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
249 + cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
250 if (!cc->page_pool) {
251 ti->error = "Cannot allocate page mempool";