1 dm-crypt: don't allocate pages for a partial request.
3 This patch changes crypt_alloc_buffer so that it always allocates pages for
6 This change enables further simplification and removing of one refcounts
9 Note: the next patch is needed to fix a theoretical deadlock
11 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
14 drivers/md/dm-crypt.c | 139 ++++++++++----------------------------------------
15 1 file changed, 30 insertions(+), 109 deletions(-)
17 Index: linux-3.14/drivers/md/dm-crypt.c
18 ===================================================================
19 --- linux-3.14.orig/drivers/md/dm-crypt.c 2014-04-04 20:48:50.000000000 +0200
20 +++ linux-3.14/drivers/md/dm-crypt.c 2014-04-04 20:57:36.000000000 +0200
21 @@ -58,7 +58,6 @@ struct dm_crypt_io {
25 - struct dm_crypt_io *base_io;
26 } CRYPTO_MINALIGN_ATTR;
28 struct dm_crypt_request {
29 @@ -172,7 +171,6 @@ struct crypt_config {
33 -#define MIN_POOL_PAGES 32
35 static struct kmem_cache *_crypt_io_pool;
37 @@ -951,14 +949,13 @@ static int crypt_convert(struct crypt_co
41 +static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
44 * Generate a new unfragmented bio with the given size
45 * This should never violate the device limitations
46 - * May return a smaller bio when running out of pages, indicated by
47 - * *out_of_pages set to 1.
49 -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
50 - unsigned *out_of_pages)
51 +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
53 struct crypt_config *cc = io->cc;
55 @@ -966,41 +963,27 @@ static struct bio *crypt_alloc_buffer(st
56 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
59 + struct bio_vec *bvec;
61 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
65 clone_init(io, clone);
68 for (i = 0; i < nr_iovecs; i++) {
69 page = mempool_alloc(cc->page_pool, gfp_mask);
76 - * If additional pages cannot be allocated without waiting,
77 - * return a partially-allocated bio. The caller will then try
78 - * to allocate more bios while submitting this partial bio.
80 - gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
82 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
84 - if (!bio_add_page(clone, page, len, 0)) {
85 - mempool_free(page, cc->page_pool);
88 + bvec = &clone->bi_io_vec[clone->bi_vcnt++];
89 + bvec->bv_page = page;
91 + bvec->bv_offset = 0;
95 + clone->bi_iter.bi_size += len;
97 - if (!clone->bi_iter.bi_size) {
104 @@ -1025,7 +1008,6 @@ static void crypt_io_init(struct dm_cryp
108 - io->base_io = NULL;
110 atomic_set(&io->io_pending, 0);
112 @@ -1038,13 +1020,11 @@ static void crypt_inc_pending(struct dm_
114 * One of the bios was finished. Check for completion of
115 * the whole request and correctly clean up the buffer.
116 - * If base_io is set, wait for the last fragment to complete.
118 static void crypt_dec_pending(struct dm_crypt_io *io)
120 struct crypt_config *cc = io->cc;
121 struct bio *base_bio = io->base_bio;
122 - struct dm_crypt_io *base_io = io->base_io;
123 int error = io->error;
125 if (!atomic_dec_and_test(&io->io_pending))
126 @@ -1055,13 +1035,7 @@ static void crypt_dec_pending(struct dm_
127 if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
128 mempool_free(io, cc->io_pool);
130 - if (likely(!base_io))
131 - bio_endio(base_bio, error);
133 - if (error && !base_io->error)
134 - base_io->error = error;
135 - crypt_dec_pending(base_io);
137 + bio_endio(base_bio, error);
141 @@ -1197,10 +1171,7 @@ static void kcryptd_crypt_write_convert(
143 struct crypt_config *cc = io->cc;
145 - struct dm_crypt_io *new_io;
147 - unsigned out_of_pages = 0;
148 - unsigned remaining = io->base_bio->bi_iter.bi_size;
149 sector_t sector = io->sector;
152 @@ -1210,80 +1181,30 @@ static void kcryptd_crypt_write_convert(
153 crypt_inc_pending(io);
154 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
157 - * The allocated buffers can be smaller than the whole bio,
158 - * so repeat the whole process until all the data can be handled.
160 - while (remaining) {
161 - clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
162 - if (unlikely(!clone)) {
163 - io->error = -ENOMEM;
167 - io->ctx.bio_out = clone;
168 - io->ctx.iter_out = clone->bi_iter;
170 - remaining -= clone->bi_iter.bi_size;
171 - sector += bio_sectors(clone);
173 - crypt_inc_pending(io);
175 - r = crypt_convert(cc, &io->ctx);
179 - crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
181 - /* Encryption was already finished, submit io now */
182 - if (crypt_finished) {
183 - kcryptd_crypt_write_io_submit(io, 0);
186 - * If there was an error, do not try next fragments.
187 - * For async, error is processed in async handler.
189 - if (unlikely(r < 0))
191 + clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
192 + if (unlikely(!clone)) {
197 - io->sector = sector;
199 + io->ctx.bio_out = clone;
200 + io->ctx.iter_out = clone->bi_iter;
203 - * Out of memory -> run queues
204 - * But don't wait if split was due to the io size restriction
206 - if (unlikely(out_of_pages))
207 - congestion_wait(BLK_RW_ASYNC, HZ/100);
208 + sector += bio_sectors(clone);
211 - * With async crypto it is unsafe to share the crypto context
212 - * between fragments, so switch to a new dm_crypt_io structure.
214 - if (unlikely(!crypt_finished && remaining)) {
215 - new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
216 - crypt_io_init(new_io, io->cc, io->base_bio, sector);
217 - crypt_inc_pending(new_io);
218 - crypt_convert_init(cc, &new_io->ctx, NULL,
219 - io->base_bio, sector);
220 - new_io->ctx.iter_in = io->ctx.iter_in;
223 - * Fragments after the first use the base_io
227 - new_io->base_io = io;
229 - new_io->base_io = io->base_io;
230 - crypt_inc_pending(io->base_io);
231 - crypt_dec_pending(io);
233 + crypt_inc_pending(io);
234 + r = crypt_convert(cc, &io->ctx);
237 + crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
241 + /* Encryption was already finished, submit io now */
242 + if (crypt_finished) {
243 + kcryptd_crypt_write_io_submit(io, 0);
244 + io->sector = sector;
248 crypt_dec_pending(io);
251 @@ -1738,7 +1659,7 @@ static int crypt_ctr(struct dm_target *t
252 sizeof(struct dm_crypt_io) + cc->dmreq_start +
253 sizeof(struct dm_crypt_request) + cc->iv_size;
255 - cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
256 + cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
257 if (!cc->page_pool) {
258 ti->error = "Cannot allocate page mempool";