]> git.pld-linux.org Git - packages/kernel.git/commitdiff
- dm-crypt per-cpu patch applied upstream
authorŁukasz Krotowski <lkrotowski@pld-linux.org>
Wed, 23 Jul 2014 19:44:16 +0000 (21:44 +0200)
committerŁukasz Krotowski <lkrotowski@pld-linux.org>
Wed, 23 Jul 2014 20:20:48 +0000 (22:20 +0200)
dm-crypt-remove-percpu.patch [deleted file]
kernel.spec

diff --git a/dm-crypt-remove-percpu.patch b/dm-crypt-remove-percpu.patch
deleted file mode 100644 (file)
index 1589b52..0000000
+++ /dev/null
@@ -1,197 +0,0 @@
-dm-crypt: remove per-cpu structure
-
-Dm-crypt used per-cpu structures to hold pointers to ablkcipher_request.
-The code assumed that the work item keeps executing on a single CPU, so it
-used no synchronization when accessing this structure.
-
-When we disable a CPU by writing zero to
-/sys/devices/system/cpu/cpu*/online, the work item could be moved to
-another CPU. This causes crashes in dm-crypt because the code starts using
-a wrong ablkcipher_request.
-
-This patch fixes this bug by removing the percpu definition. The structure
-ablkcipher_request is accessed via a pointer from convert_context.
-Consequently, if the work item is rescheduled to a different CPU, the
-thread still uses the same ablkcipher_request.
-
-Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
-Cc: stable@vger.kernel.org
-
----
- drivers/md/dm-crypt.c |   61 +++++++++-----------------------------------------
- 1 file changed, 12 insertions(+), 49 deletions(-)
-
-Index: linux-3.14-rc1/drivers/md/dm-crypt.c
-===================================================================
---- linux-3.14-rc1.orig/drivers/md/dm-crypt.c  2014-02-03 19:18:23.000000000 +0100
-+++ linux-3.14-rc1/drivers/md/dm-crypt.c       2014-02-03 19:21:35.000000000 +0100
-@@ -19,7 +19,6 @@
- #include <linux/crypto.h>
- #include <linux/workqueue.h>
- #include <linux/backing-dev.h>
--#include <linux/percpu.h>
- #include <linux/atomic.h>
- #include <linux/scatterlist.h>
- #include <asm/page.h>
-@@ -43,6 +42,7 @@ struct convert_context {
-       struct bvec_iter iter_out;
-       sector_t cc_sector;
-       atomic_t cc_pending;
-+      struct ablkcipher_request *req;
- };
- /*
-@@ -111,15 +111,7 @@ struct iv_tcw_private {
- enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
- /*
-- * Duplicated per-CPU state for cipher.
-- */
--struct crypt_cpu {
--      struct ablkcipher_request *req;
--};
--
--/*
-- * The fields in here must be read only after initialization,
-- * changing state should be in crypt_cpu.
-+ * The fields in here must be read only after initialization.
-  */
- struct crypt_config {
-       struct dm_dev *dev;
-@@ -150,12 +142,6 @@ struct crypt_config {
-       sector_t iv_offset;
-       unsigned int iv_size;
--      /*
--       * Duplicated per cpu state. Access through
--       * per_cpu_ptr() only.
--       */
--      struct crypt_cpu __percpu *cpu;
--
-       /* ESSIV: struct crypto_cipher *essiv_tfm */
-       void *iv_private;
-       struct crypto_ablkcipher **tfms;
-@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_i
- static void kcryptd_queue_crypt(struct dm_crypt_io *io);
- static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
--static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
--{
--      return this_cpu_ptr(cc->cpu);
--}
--
- /*
-  * Use this to access cipher attributes that are the same for each CPU.
-  */
-@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct cr
- static void crypt_alloc_req(struct crypt_config *cc,
-                           struct convert_context *ctx)
- {
--      struct crypt_cpu *this_cc = this_crypt_config(cc);
-       unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
--      if (!this_cc->req)
--              this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
-+      if (!ctx->req)
-+              ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
--      ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
--      ablkcipher_request_set_callback(this_cc->req,
-+      ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
-+      ablkcipher_request_set_callback(ctx->req,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
--          kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
-+          kcryptd_async_done, dmreq_of_req(cc, ctx->req));
- }
- /*
-@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt
- static int crypt_convert(struct crypt_config *cc,
-                        struct convert_context *ctx)
- {
--      struct crypt_cpu *this_cc = this_crypt_config(cc);
-       int r;
-       atomic_set(&ctx->cc_pending, 1);
-@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_co
-               atomic_inc(&ctx->cc_pending);
--              r = crypt_convert_block(cc, ctx, this_cc->req);
-+              r = crypt_convert_block(cc, ctx, ctx->req);
-               switch (r) {
-               /* async */
-@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_co
-                       reinit_completion(&ctx->restart);
-                       /* fall through*/
-               case -EINPROGRESS:
--                      this_cc->req = NULL;
-+                      ctx->req = NULL;
-                       ctx->cc_sector++;
-                       continue;
-@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_allo
-       io->sector = sector;
-       io->error = 0;
-       io->base_io = NULL;
-+      io->ctx.req = NULL;
-       atomic_set(&io->io_pending, 0);
-       return io;
-@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_
-       if (!atomic_dec_and_test(&io->io_pending))
-               return;
-+      if (io->ctx.req)
-+              mempool_free(io->ctx.req, cc->req_pool);
-       mempool_free(io, cc->io_pool);
-       if (likely(!base_io))
-@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_c
- static void crypt_dtr(struct dm_target *ti)
- {
-       struct crypt_config *cc = ti->private;
--      struct crypt_cpu *cpu_cc;
--      int cpu;
-       ti->private = NULL;
-@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *
-       if (cc->crypt_queue)
-               destroy_workqueue(cc->crypt_queue);
--      if (cc->cpu)
--              for_each_possible_cpu(cpu) {
--                      cpu_cc = per_cpu_ptr(cc->cpu, cpu);
--                      if (cpu_cc->req)
--                              mempool_free(cpu_cc->req, cc->req_pool);
--              }
--
-       crypt_free_tfms(cc);
-       if (cc->bs)
-@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *
-       if (cc->dev)
-               dm_put_device(ti, cc->dev);
--      if (cc->cpu)
--              free_percpu(cc->cpu);
--
-       kzfree(cc->cipher);
-       kzfree(cc->cipher_string);
-@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_ta
-       if (tmp)
-               DMWARN("Ignoring unexpected additional cipher options");
--      cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
--                               __alignof__(struct crypt_cpu));
--      if (!cc->cpu) {
--              ti->error = "Cannot allocate per cpu state";
--              goto bad_mem;
--      }
--
-       /*
-        * For compatibility with the original dm-crypt mapping format, if
-        * only the cipher name is supplied, use cbc-plain.
index 83243d890d06fc6eed05494b3ea66830574e74f8..9b5de393744ce43133232306906e72967bc00532 100644 (file)
@@ -225,15 +225,14 @@ Patch250: kernel-fix_256colors_menuconfig.patch
 Patch400:      kernel-virtio-gl-accel.patch
 
 # http://people.redhat.com/mpatocka/patches/kernel/dm-crypt-paralelizace/current/series.html
-Patch500:      dm-crypt-remove-percpu.patch
-Patch501:      bio-kmalloc-align.patch
-Patch502:      dm-crypt-per_bio_data.patch
-Patch503:      dm-crypt-unbound-workqueue.patch
-Patch504:      dm-crypt-dont-allocate-partial-pages.patch
-Patch505:      dm-crypt-fix-allocation-deadlock.patch
-Patch506:      dm-crypt-remove-io-pool.patch
-Patch507:      dm-crypt-offload-writes-to-thread.patch
-Patch508:      dm-crypt-sort-requests.patch
+Patch500:      bio-kmalloc-align.patch
+Patch501:      dm-crypt-per_bio_data.patch
+Patch502:      dm-crypt-unbound-workqueue.patch
+Patch503:      dm-crypt-dont-allocate-partial-pages.patch
+Patch504:      dm-crypt-fix-allocation-deadlock.patch
+Patch505:      dm-crypt-remove-io-pool.patch
+Patch506:      dm-crypt-offload-writes-to-thread.patch
+Patch507:      dm-crypt-sort-requests.patch
 
 Patch2000:     kernel-small_fixes.patch
 Patch2001:     kernel-pwc-uncompress.patch
@@ -743,7 +742,6 @@ cd linux-%{basever}
 %patch505 -p1
 %patch506 -p1
 %patch507 -p1
-%patch508 -p1
 %endif
 
 %endif # vanilla
This page took 1.541083 seconds and 4 git commands to generate.