1 dm-crypt: remove per-cpu structure
3 Dm-crypt used per-cpu structures to hold pointers to ablkcipher_request.
4 The code assumed that the work item keeps executing on a single CPU, so it
5 used no synchronization when accessing this structure.
7 When we disable a CPU by writing zero to
8 /sys/devices/system/cpu/cpu*/online, the work item could be moved to
9 another CPU. This causes crashes in dm-crypt because the code starts using
10 a wrong ablkcipher_request.
12 This patch fixes this bug by removing the percpu definition. The structure
13 ablkcipher_request is accessed via a pointer from convert_context.
14 Consequently, if the work item is rescheduled to a different CPU, the
15 thread still uses the same ablkcipher_request.
17 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
18 Cc: stable@vger.kernel.org
21 drivers/md/dm-crypt.c | 61 +++++++++-----------------------------------------
22 1 file changed, 12 insertions(+), 49 deletions(-)
24 Index: linux-3.14-rc1/drivers/md/dm-crypt.c
25 ===================================================================
26 --- linux-3.14-rc1.orig/drivers/md/dm-crypt.c 2014-02-03 19:18:23.000000000 +0100
27 +++ linux-3.14-rc1/drivers/md/dm-crypt.c 2014-02-03 19:21:35.000000000 +0100
29 #include <linux/crypto.h>
30 #include <linux/workqueue.h>
31 #include <linux/backing-dev.h>
32 -#include <linux/percpu.h>
33 #include <linux/atomic.h>
34 #include <linux/scatterlist.h>
36 @@ -43,6 +42,7 @@ struct convert_context {
37 struct bvec_iter iter_out;
40 + struct ablkcipher_request *req;
44 @@ -111,15 +111,7 @@ struct iv_tcw_private {
45 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
48 - * Duplicated per-CPU state for cipher.
51 - struct ablkcipher_request *req;
55 - * The fields in here must be read only after initialization,
56 - * changing state should be in crypt_cpu.
57 + * The fields in here must be read only after initialization.
61 @@ -150,12 +142,6 @@ struct crypt_config {
66 - * Duplicated per cpu state. Access through
67 - * per_cpu_ptr() only.
69 - struct crypt_cpu __percpu *cpu;
71 /* ESSIV: struct crypto_cipher *essiv_tfm */
73 struct crypto_ablkcipher **tfms;
74 @@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_i
75 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
76 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
78 -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
80 - return this_cpu_ptr(cc->cpu);
84 * Use this to access cipher attributes that are the same for each CPU.
86 @@ -903,16 +884,15 @@ static void kcryptd_async_done(struct cr
87 static void crypt_alloc_req(struct crypt_config *cc,
88 struct convert_context *ctx)
90 - struct crypt_cpu *this_cc = this_crypt_config(cc);
91 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
94 - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
96 + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
98 - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
99 - ablkcipher_request_set_callback(this_cc->req,
100 + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
101 + ablkcipher_request_set_callback(ctx->req,
102 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
103 - kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
104 + kcryptd_async_done, dmreq_of_req(cc, ctx->req));
108 @@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt
109 static int crypt_convert(struct crypt_config *cc,
110 struct convert_context *ctx)
112 - struct crypt_cpu *this_cc = this_crypt_config(cc);
115 atomic_set(&ctx->cc_pending, 1);
116 @@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_co
118 atomic_inc(&ctx->cc_pending);
120 - r = crypt_convert_block(cc, ctx, this_cc->req);
121 + r = crypt_convert_block(cc, ctx, ctx->req);
125 @@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_co
126 reinit_completion(&ctx->restart);
129 - this_cc->req = NULL;
134 @@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_allo
138 + io->ctx.req = NULL;
139 atomic_set(&io->io_pending, 0);
142 @@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_
143 if (!atomic_dec_and_test(&io->io_pending))
147 + mempool_free(io->ctx.req, cc->req_pool);
148 mempool_free(io, cc->io_pool);
150 if (likely(!base_io))
151 @@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_c
152 static void crypt_dtr(struct dm_target *ti)
154 struct crypt_config *cc = ti->private;
155 - struct crypt_cpu *cpu_cc;
160 @@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *
162 destroy_workqueue(cc->crypt_queue);
165 - for_each_possible_cpu(cpu) {
166 - cpu_cc = per_cpu_ptr(cc->cpu, cpu);
168 - mempool_free(cpu_cc->req, cc->req_pool);
174 @@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *
176 dm_put_device(ti, cc->dev);
179 - free_percpu(cc->cpu);
182 kzfree(cc->cipher_string);
184 @@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_ta
186 DMWARN("Ignoring unexpected additional cipher options");
188 - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
189 - __alignof__(struct crypt_cpu));
191 - ti->error = "Cannot allocate per cpu state";
196 * For compatibility with the original dm-crypt mapping format, if
197 * only the cipher name is supplied, use cbc-plain.