1 dm-crypt: remove per-cpu structure
3 Remove per-cpu structure and make it per-convert_context instead.
4 This allows moving requests between different cpus.
6 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
9 drivers/md/dm-crypt.c | 61 +++++++++-----------------------------------------
10 1 file changed, 12 insertions(+), 49 deletions(-)
12 Index: linux-3.10.4-fast/drivers/md/dm-crypt.c
13 ===================================================================
14 --- linux-3.10.4-fast.orig/drivers/md/dm-crypt.c 2013-07-31 16:59:48.000000000 +0200
15 +++ linux-3.10.4-fast/drivers/md/dm-crypt.c 2013-07-31 17:03:10.000000000 +0200
17 #include <linux/crypto.h>
18 #include <linux/workqueue.h>
19 #include <linux/backing-dev.h>
20 -#include <linux/percpu.h>
21 #include <linux/atomic.h>
22 #include <linux/scatterlist.h>
24 @@ -44,6 +43,7 @@ struct convert_context {
28 + struct ablkcipher_request *req;
32 @@ -105,15 +105,7 @@ struct iv_lmk_private {
33 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
36 - * Duplicated per-CPU state for cipher.
39 - struct ablkcipher_request *req;
43 - * The fields in here must be read only after initialization,
44 - * changing state should be in crypt_cpu.
45 + * The fields in here must be read only after initialization.
49 @@ -143,12 +135,6 @@ struct crypt_config {
54 - * Duplicated per cpu state. Access through
55 - * per_cpu_ptr() only.
57 - struct crypt_cpu __percpu *cpu;
59 /* ESSIV: struct crypto_cipher *essiv_tfm */
61 struct crypto_ablkcipher **tfms;
62 @@ -184,11 +170,6 @@ static void clone_init(struct dm_crypt_i
63 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
64 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
66 -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
68 - return this_cpu_ptr(cc->cpu);
72 * Use this to access cipher attributes that are the same for each CPU.
74 @@ -738,16 +719,15 @@ static void kcryptd_async_done(struct cr
75 static void crypt_alloc_req(struct crypt_config *cc,
76 struct convert_context *ctx)
78 - struct crypt_cpu *this_cc = this_crypt_config(cc);
79 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
82 - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
84 + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
86 - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
87 - ablkcipher_request_set_callback(this_cc->req,
88 + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
89 + ablkcipher_request_set_callback(ctx->req,
90 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
91 - kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
92 + kcryptd_async_done, dmreq_of_req(cc, ctx->req));
96 @@ -756,7 +736,6 @@ static void crypt_alloc_req(struct crypt
97 static int crypt_convert(struct crypt_config *cc,
98 struct convert_context *ctx)
100 - struct crypt_cpu *this_cc = this_crypt_config(cc);
103 atomic_set(&ctx->cc_pending, 1);
104 @@ -768,7 +747,7 @@ static int crypt_convert(struct crypt_co
106 atomic_inc(&ctx->cc_pending);
108 - r = crypt_convert_block(cc, ctx, this_cc->req);
109 + r = crypt_convert_block(cc, ctx, ctx->req);
113 @@ -777,7 +756,7 @@ static int crypt_convert(struct crypt_co
114 INIT_COMPLETION(ctx->restart);
117 - this_cc->req = NULL;
122 @@ -876,6 +855,7 @@ static struct dm_crypt_io *crypt_io_allo
126 + io->ctx.req = NULL;
127 atomic_set(&io->io_pending, 0);
130 @@ -901,6 +881,8 @@ static void crypt_dec_pending(struct dm_
131 if (!atomic_dec_and_test(&io->io_pending))
135 + mempool_free(io->ctx.req, cc->req_pool);
136 mempool_free(io, cc->io_pool);
138 if (likely(!base_io))
139 @@ -1326,8 +1308,6 @@ static int crypt_wipe_key(struct crypt_c
140 static void crypt_dtr(struct dm_target *ti)
142 struct crypt_config *cc = ti->private;
143 - struct crypt_cpu *cpu_cc;
148 @@ -1339,13 +1319,6 @@ static void crypt_dtr(struct dm_target *
150 destroy_workqueue(cc->crypt_queue);
153 - for_each_possible_cpu(cpu) {
154 - cpu_cc = per_cpu_ptr(cc->cpu, cpu);
156 - mempool_free(cpu_cc->req, cc->req_pool);
162 @@ -1364,9 +1337,6 @@ static void crypt_dtr(struct dm_target *
164 dm_put_device(ti, cc->dev);
167 - free_percpu(cc->cpu);
170 kzfree(cc->cipher_string);
172 @@ -1421,13 +1391,6 @@ static int crypt_ctr_cipher(struct dm_ta
174 DMWARN("Ignoring unexpected additional cipher options");
176 - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
177 - __alignof__(struct crypt_cpu));
179 - ti->error = "Cannot allocate per cpu state";
184 * For compatibility with the original dm-crypt mapping format, if
185 * only the cipher name is supplied, use cbc-plain.