]>
Commit | Line | Data |
---|---|---|
0a2e4279 ŁK |
1 | dm-crypt: use per-bio data |
2 | ||
3 | This patch changes dm-crypt so that it uses auxiliary data allocated with | |
4 | the bio. | |
5 | ||
6 | Dm-crypt requires two allocations per request - struct dm_crypt_io and | |
7 | struct ablkcipher_request (with other data appended to it). It used | |
8 | mempool for the allocation. | |
9 | ||
10 | Some requests may require more dm_crypt_ios and ablkcipher_requests, | |
11 | however most requests need just one of each of these two structures to | |
12 | complete. | |
13 | ||
14 | This patch changes it so that the first dm_crypt_io and ablkcipher_request | |
15 | and allocated with the bio (using target per_bio_data_size option). If the | |
16 | request needs additional values, they are allocated from the mempool. | |
17 | ||
18 | Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> | |
19 | ||
20 | --- | |
21 | drivers/md/dm-crypt.c | 40 ++++++++++++++++++++++++++-------------- | |
22 | 1 file changed, 26 insertions(+), 14 deletions(-) | |
23 | ||
24 | Index: linux-3.14-rc4/drivers/md/dm-crypt.c | |
25 | =================================================================== | |
26 | --- linux-3.14-rc4.orig/drivers/md/dm-crypt.c 2014-02-27 17:48:31.000000000 +0100 | |
27 | +++ linux-3.14-rc4/drivers/md/dm-crypt.c 2014-02-27 17:48:31.000000000 +0100 | |
28 | @@ -59,7 +59,7 @@ struct dm_crypt_io { | |
29 | int error; | |
30 | sector_t sector; | |
31 | struct dm_crypt_io *base_io; | |
32 | -}; | |
33 | +} CRYPTO_MINALIGN_ATTR; | |
34 | ||
35 | struct dm_crypt_request { | |
36 | struct convert_context *ctx; | |
37 | @@ -162,6 +162,8 @@ struct crypt_config { | |
38 | */ | |
39 | unsigned int dmreq_start; | |
40 | ||
41 | + unsigned int per_bio_data_size; | |
42 | + | |
43 | unsigned long flags; | |
44 | unsigned int key_size; | |
45 | unsigned int key_parts; /* independent parts in key buffer */ | |
46 | @@ -895,6 +897,14 @@ static void crypt_alloc_req(struct crypt | |
47 | kcryptd_async_done, dmreq_of_req(cc, ctx->req)); | |
48 | } | |
49 | ||
50 | +static void crypt_free_req(struct crypt_config *cc, | |
51 | + struct ablkcipher_request *req, struct bio *base_bio) | |
52 | +{ | |
53 | + struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); | |
54 | + if ((struct ablkcipher_request *)(io + 1) != req) | |
55 | + mempool_free(req, cc->req_pool); | |
56 | +} | |
57 | + | |
58 | /* | |
59 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
60 | */ | |
61 | @@ -1008,12 +1018,9 @@ static void crypt_free_buffer_pages(stru | |
62 | } | |
63 | } | |
64 | ||
65 | -static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, | |
66 | - struct bio *bio, sector_t sector) | |
67 | +static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, | |
68 | + struct bio *bio, sector_t sector) | |
69 | { | |
70 | - struct dm_crypt_io *io; | |
71 | - | |
72 | - io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
73 | io->cc = cc; | |
74 | io->base_bio = bio; | |
75 | io->sector = sector; | |
76 | @@ -1021,8 +1028,6 @@ static struct dm_crypt_io *crypt_io_allo | |
77 | io->base_io = NULL; | |
78 | io->ctx.req = NULL; | |
79 | atomic_set(&io->io_pending, 0); | |
80 | - | |
81 | - return io; | |
82 | } | |
83 | ||
84 | static void crypt_inc_pending(struct dm_crypt_io *io) | |
85 | @@ -1046,8 +1051,9 @@ static void crypt_dec_pending(struct dm_ | |
86 | return; | |
87 | ||
88 | if (io->ctx.req) | |
89 | - mempool_free(io->ctx.req, cc->req_pool); | |
90 | - mempool_free(io, cc->io_pool); | |
91 | + crypt_free_req(cc, io->ctx.req, base_bio); | |
92 | + if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) | |
93 | + mempool_free(io, cc->io_pool); | |
94 | ||
95 | if (likely(!base_io)) | |
96 | bio_endio(base_bio, error); | |
97 | @@ -1255,8 +1261,8 @@ static void kcryptd_crypt_write_convert( | |
98 | * between fragments, so switch to a new dm_crypt_io structure. | |
99 | */ | |
100 | if (unlikely(!crypt_finished && remaining)) { | |
101 | - new_io = crypt_io_alloc(io->cc, io->base_bio, | |
102 | - sector); | |
103 | + new_io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
104 | + crypt_io_init(new_io, io->cc, io->base_bio, sector); | |
105 | crypt_inc_pending(new_io); | |
106 | crypt_convert_init(cc, &new_io->ctx, NULL, | |
107 | io->base_bio, sector); | |
108 | @@ -1325,7 +1331,7 @@ static void kcryptd_async_done(struct cr | |
109 | if (error < 0) | |
110 | io->error = -EIO; | |
111 | ||
112 | - mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); | |
113 | + crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); | |
114 | ||
115 | if (!atomic_dec_and_test(&ctx->cc_pending)) | |
116 | return; | |
117 | @@ -1728,6 +1734,10 @@ static int crypt_ctr(struct dm_target *t | |
118 | goto bad; | |
119 | } | |
120 | ||
121 | + cc->per_bio_data_size = ti->per_bio_data_size = | |
122 | + sizeof(struct dm_crypt_io) + cc->dmreq_start + | |
123 | + sizeof(struct dm_crypt_request) + cc->iv_size; | |
124 | + | |
125 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); | |
126 | if (!cc->page_pool) { | |
127 | ti->error = "Cannot allocate page mempool"; | |
128 | @@ -1824,7 +1834,9 @@ static int crypt_map(struct dm_target *t | |
129 | return DM_MAPIO_REMAPPED; | |
130 | } | |
131 | ||
132 | - io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); | |
133 | + io = dm_per_bio_data(bio, cc->per_bio_data_size); | |
134 | + crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); | |
135 | + io->ctx.req = (struct ablkcipher_request *)(io + 1); | |
136 | ||
137 | if (bio_data_dir(io->base_bio) == READ) { | |
138 | if (kcryptd_io_read(io, GFP_NOWAIT)) |