]> git.pld-linux.org Git - packages/kernel.git/blame - dm-crypt-remove-percpu.patch
- 3.14.32
[packages/kernel.git] / dm-crypt-remove-percpu.patch
CommitLineData
101a7448
ŁK
1dm-crypt: remove per-cpu structure
2
0a2e4279
ŁK
3Dm-crypt used per-cpu structures to hold pointers to ablkcipher_request.
4The code assumed that the work item keeps executing on a single CPU, so it
5used no synchronization when accessing this structure.
6
7When we disable a CPU by writing zero to
8/sys/devices/system/cpu/cpu*/online, the work item could be moved to
9another CPU. This causes crashes in dm-crypt because the code starts using
10a wrong ablkcipher_request.
11
12This patch fixes this bug by removing the percpu definition. The structure
13ablkcipher_request is accessed via a pointer from convert_context.
14Consequently, if the work item is rescheduled to a different CPU, the
15thread still uses the same ablkcipher_request.
101a7448
ŁK
16
17Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
0a2e4279 18Cc: stable@vger.kernel.org
101a7448
ŁK
19
20---
21 drivers/md/dm-crypt.c | 61 +++++++++-----------------------------------------
22 1 file changed, 12 insertions(+), 49 deletions(-)
23
0a2e4279 24Index: linux-3.14-rc1/drivers/md/dm-crypt.c
101a7448 25===================================================================
0a2e4279
ŁK
26--- linux-3.14-rc1.orig/drivers/md/dm-crypt.c 2014-02-03 19:18:23.000000000 +0100
27+++ linux-3.14-rc1/drivers/md/dm-crypt.c 2014-02-03 19:21:35.000000000 +0100
28@@ -19,7 +19,6 @@
101a7448
ŁK
29 #include <linux/crypto.h>
30 #include <linux/workqueue.h>
31 #include <linux/backing-dev.h>
32-#include <linux/percpu.h>
33 #include <linux/atomic.h>
34 #include <linux/scatterlist.h>
35 #include <asm/page.h>
0a2e4279
ŁK
36@@ -43,6 +42,7 @@ struct convert_context {
37 struct bvec_iter iter_out;
101a7448
ŁK
38 sector_t cc_sector;
39 atomic_t cc_pending;
40+ struct ablkcipher_request *req;
41 };
42
43 /*
0a2e4279 44@@ -111,15 +111,7 @@ struct iv_tcw_private {
101a7448
ŁK
45 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
46
47 /*
48- * Duplicated per-CPU state for cipher.
49- */
50-struct crypt_cpu {
51- struct ablkcipher_request *req;
52-};
53-
54-/*
55- * The fields in here must be read only after initialization,
56- * changing state should be in crypt_cpu.
57+ * The fields in here must be read only after initialization.
58 */
59 struct crypt_config {
60 struct dm_dev *dev;
0a2e4279 61@@ -150,12 +142,6 @@ struct crypt_config {
101a7448
ŁK
62 sector_t iv_offset;
63 unsigned int iv_size;
64
65- /*
66- * Duplicated per cpu state. Access through
67- * per_cpu_ptr() only.
68- */
69- struct crypt_cpu __percpu *cpu;
70-
71 /* ESSIV: struct crypto_cipher *essiv_tfm */
72 void *iv_private;
73 struct crypto_ablkcipher **tfms;
0a2e4279 74@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_i
101a7448
ŁK
75 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
76 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
77
78-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
79-{
80- return this_cpu_ptr(cc->cpu);
81-}
82-
83 /*
84 * Use this to access cipher attributes that are the same for each CPU.
85 */
0a2e4279 86@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct cr
101a7448
ŁK
87 static void crypt_alloc_req(struct crypt_config *cc,
88 struct convert_context *ctx)
89 {
90- struct crypt_cpu *this_cc = this_crypt_config(cc);
91 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
92
93- if (!this_cc->req)
94- this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
95+ if (!ctx->req)
96+ ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
97
98- ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
99- ablkcipher_request_set_callback(this_cc->req,
100+ ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
101+ ablkcipher_request_set_callback(ctx->req,
102 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
103- kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
104+ kcryptd_async_done, dmreq_of_req(cc, ctx->req));
105 }
106
107 /*
0a2e4279 108@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt
101a7448
ŁK
109 static int crypt_convert(struct crypt_config *cc,
110 struct convert_context *ctx)
111 {
112- struct crypt_cpu *this_cc = this_crypt_config(cc);
113 int r;
114
115 atomic_set(&ctx->cc_pending, 1);
0a2e4279 116@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_co
101a7448
ŁK
117
118 atomic_inc(&ctx->cc_pending);
119
120- r = crypt_convert_block(cc, ctx, this_cc->req);
121+ r = crypt_convert_block(cc, ctx, ctx->req);
122
123 switch (r) {
124 /* async */
0a2e4279
ŁK
125@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_co
126 reinit_completion(&ctx->restart);
101a7448
ŁK
127 /* fall through*/
128 case -EINPROGRESS:
129- this_cc->req = NULL;
130+ ctx->req = NULL;
131 ctx->cc_sector++;
132 continue;
133
0a2e4279 134@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_allo
101a7448
ŁK
135 io->sector = sector;
136 io->error = 0;
137 io->base_io = NULL;
138+ io->ctx.req = NULL;
139 atomic_set(&io->io_pending, 0);
140
141 return io;
0a2e4279 142@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_
101a7448
ŁK
143 if (!atomic_dec_and_test(&io->io_pending))
144 return;
145
146+ if (io->ctx.req)
147+ mempool_free(io->ctx.req, cc->req_pool);
148 mempool_free(io, cc->io_pool);
149
150 if (likely(!base_io))
0a2e4279 151@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_c
101a7448
ŁK
152 static void crypt_dtr(struct dm_target *ti)
153 {
154 struct crypt_config *cc = ti->private;
155- struct crypt_cpu *cpu_cc;
156- int cpu;
157
158 ti->private = NULL;
159
0a2e4279 160@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *
101a7448
ŁK
161 if (cc->crypt_queue)
162 destroy_workqueue(cc->crypt_queue);
163
164- if (cc->cpu)
165- for_each_possible_cpu(cpu) {
166- cpu_cc = per_cpu_ptr(cc->cpu, cpu);
167- if (cpu_cc->req)
168- mempool_free(cpu_cc->req, cc->req_pool);
169- }
170-
171 crypt_free_tfms(cc);
172
173 if (cc->bs)
0a2e4279 174@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *
101a7448
ŁK
175 if (cc->dev)
176 dm_put_device(ti, cc->dev);
177
178- if (cc->cpu)
179- free_percpu(cc->cpu);
180-
181 kzfree(cc->cipher);
182 kzfree(cc->cipher_string);
183
0a2e4279 184@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_ta
101a7448
ŁK
185 if (tmp)
186 DMWARN("Ignoring unexpected additional cipher options");
187
188- cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
189- __alignof__(struct crypt_cpu));
190- if (!cc->cpu) {
191- ti->error = "Cannot allocate per cpu state";
192- goto bad_mem;
193- }
194-
195 /*
196 * For compatibility with the original dm-crypt mapping format, if
197 * only the cipher name is supplied, use cbc-plain.
This page took 0.136923 seconds and 4 git commands to generate.