]> git.pld-linux.org Git - packages/kernel.git/blame - dm-crypt-remove-percpu.patch
- 3.1.101
[packages/kernel.git] / dm-crypt-remove-percpu.patch
CommitLineData
101a7448
ŁK
1dm-crypt: remove per-cpu structure
2
3Remove per-cpu structure and make it per-convert_context instead.
4This allows moving requests between different cpus.
5
6Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
7
8---
9 drivers/md/dm-crypt.c | 61 +++++++++-----------------------------------------
10 1 file changed, 12 insertions(+), 49 deletions(-)
11
39348b5e 12Index: linux-3.10.4-fast/drivers/md/dm-crypt.c
101a7448 13===================================================================
39348b5e
ŁK
14--- linux-3.10.4-fast.orig/drivers/md/dm-crypt.c 2013-07-31 16:59:48.000000000 +0200
15+++ linux-3.10.4-fast/drivers/md/dm-crypt.c 2013-07-31 17:03:10.000000000 +0200
101a7448
ŁK
16@@ -18,7 +18,6 @@
17 #include <linux/crypto.h>
18 #include <linux/workqueue.h>
19 #include <linux/backing-dev.h>
20-#include <linux/percpu.h>
21 #include <linux/atomic.h>
22 #include <linux/scatterlist.h>
23 #include <asm/page.h>
24@@ -44,6 +43,7 @@ struct convert_context {
25 unsigned int idx_out;
26 sector_t cc_sector;
27 atomic_t cc_pending;
28+ struct ablkcipher_request *req;
29 };
30
31 /*
32@@ -105,15 +105,7 @@ struct iv_lmk_private {
33 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
34
35 /*
36- * Duplicated per-CPU state for cipher.
37- */
38-struct crypt_cpu {
39- struct ablkcipher_request *req;
40-};
41-
42-/*
43- * The fields in here must be read only after initialization,
44- * changing state should be in crypt_cpu.
45+ * The fields in here must be read only after initialization.
46 */
47 struct crypt_config {
48 struct dm_dev *dev;
49@@ -143,12 +135,6 @@ struct crypt_config {
50 sector_t iv_offset;
51 unsigned int iv_size;
52
53- /*
54- * Duplicated per cpu state. Access through
55- * per_cpu_ptr() only.
56- */
57- struct crypt_cpu __percpu *cpu;
58-
59 /* ESSIV: struct crypto_cipher *essiv_tfm */
60 void *iv_private;
61 struct crypto_ablkcipher **tfms;
62@@ -184,11 +170,6 @@ static void clone_init(struct dm_crypt_i
63 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
64 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
65
66-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
67-{
68- return this_cpu_ptr(cc->cpu);
69-}
70-
71 /*
72 * Use this to access cipher attributes that are the same for each CPU.
73 */
74@@ -738,16 +719,15 @@ static void kcryptd_async_done(struct cr
75 static void crypt_alloc_req(struct crypt_config *cc,
76 struct convert_context *ctx)
77 {
78- struct crypt_cpu *this_cc = this_crypt_config(cc);
79 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
80
81- if (!this_cc->req)
82- this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
83+ if (!ctx->req)
84+ ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
85
86- ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
87- ablkcipher_request_set_callback(this_cc->req,
88+ ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
89+ ablkcipher_request_set_callback(ctx->req,
90 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
91- kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
92+ kcryptd_async_done, dmreq_of_req(cc, ctx->req));
93 }
94
95 /*
96@@ -756,7 +736,6 @@ static void crypt_alloc_req(struct crypt
97 static int crypt_convert(struct crypt_config *cc,
98 struct convert_context *ctx)
99 {
100- struct crypt_cpu *this_cc = this_crypt_config(cc);
101 int r;
102
103 atomic_set(&ctx->cc_pending, 1);
104@@ -768,7 +747,7 @@ static int crypt_convert(struct crypt_co
105
106 atomic_inc(&ctx->cc_pending);
107
108- r = crypt_convert_block(cc, ctx, this_cc->req);
109+ r = crypt_convert_block(cc, ctx, ctx->req);
110
111 switch (r) {
112 /* async */
113@@ -777,7 +756,7 @@ static int crypt_convert(struct crypt_co
114 INIT_COMPLETION(ctx->restart);
115 /* fall through*/
116 case -EINPROGRESS:
117- this_cc->req = NULL;
118+ ctx->req = NULL;
119 ctx->cc_sector++;
120 continue;
121
39348b5e 122@@ -876,6 +855,7 @@ static struct dm_crypt_io *crypt_io_allo
101a7448
ŁK
123 io->sector = sector;
124 io->error = 0;
125 io->base_io = NULL;
126+ io->ctx.req = NULL;
127 atomic_set(&io->io_pending, 0);
128
129 return io;
39348b5e 130@@ -901,6 +881,8 @@ static void crypt_dec_pending(struct dm_
101a7448
ŁK
131 if (!atomic_dec_and_test(&io->io_pending))
132 return;
133
134+ if (io->ctx.req)
135+ mempool_free(io->ctx.req, cc->req_pool);
136 mempool_free(io, cc->io_pool);
137
138 if (likely(!base_io))
39348b5e 139@@ -1326,8 +1308,6 @@ static int crypt_wipe_key(struct crypt_c
101a7448
ŁK
140 static void crypt_dtr(struct dm_target *ti)
141 {
142 struct crypt_config *cc = ti->private;
143- struct crypt_cpu *cpu_cc;
144- int cpu;
145
146 ti->private = NULL;
147
39348b5e 148@@ -1339,13 +1319,6 @@ static void crypt_dtr(struct dm_target *
101a7448
ŁK
149 if (cc->crypt_queue)
150 destroy_workqueue(cc->crypt_queue);
151
152- if (cc->cpu)
153- for_each_possible_cpu(cpu) {
154- cpu_cc = per_cpu_ptr(cc->cpu, cpu);
155- if (cpu_cc->req)
156- mempool_free(cpu_cc->req, cc->req_pool);
157- }
158-
159 crypt_free_tfms(cc);
160
161 if (cc->bs)
39348b5e 162@@ -1364,9 +1337,6 @@ static void crypt_dtr(struct dm_target *
101a7448
ŁK
163 if (cc->dev)
164 dm_put_device(ti, cc->dev);
165
166- if (cc->cpu)
167- free_percpu(cc->cpu);
168-
169 kzfree(cc->cipher);
170 kzfree(cc->cipher_string);
171
39348b5e 172@@ -1421,13 +1391,6 @@ static int crypt_ctr_cipher(struct dm_ta
101a7448
ŁK
173 if (tmp)
174 DMWARN("Ignoring unexpected additional cipher options");
175
176- cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
177- __alignof__(struct crypt_cpu));
178- if (!cc->cpu) {
179- ti->error = "Cannot allocate per cpu state";
180- goto bad_mem;
181- }
182-
183 /*
184 * For compatibility with the original dm-crypt mapping format, if
185 * only the cipher name is supplied, use cbc-plain.
This page took 0.05915 seconds and 4 git commands to generate.