1 --- linux-4.18/scripts/clang-version.sh~ 2018-08-12 22:41:04.000000000 +0200
2 +++ linux-4.18/scripts/clang-version.sh 2018-08-12 23:52:07.650403870 +0200
7 -if !( $compiler --version | grep -q clang) ; then
8 +if ! ( $compiler --version | grep -q clang) ; then
12 From 432061b3da64e488be3403124a72a9250bbe96d4 Mon Sep 17 00:00:00 2001
13 From: Mikulas Patocka <mpatocka@redhat.com>
14 Date: Wed, 5 Sep 2018 09:17:45 -0400
15 Subject: dm: disable CRYPTO_TFM_REQ_MAY_SLEEP to fix a GFP_KERNEL recursion
18 There's a XFS on dm-crypt deadlock, recursing back to itself due to the
19 crypto subsystems use of GFP_KERNEL, reported here:
20 https://bugzilla.kernel.org/show_bug.cgi?id=200835
22 * dm-crypt calls crypt_convert in xts mode
23 * init_crypt from xts.c calls kmalloc(GFP_KERNEL)
24 * kmalloc(GFP_KERNEL) recurses into the XFS filesystem, the filesystem
25 tries to submit some bios and wait for them, causing a deadlock
27 Fix this by updating both the DM crypt and integrity targets to no
28 longer use the CRYPTO_TFM_REQ_MAY_SLEEP flag, which will change the
29 crypto allocations from GFP_KERNEL to GFP_ATOMIC, therefore they can't
30 recurse into a filesystem. A GFP_ATOMIC allocation can fail, but
31 init_crypt() in xts.c handles the allocation failure gracefully - it
32 will fall back to preallocated buffer if the allocation fails.
34 The crypto API maintainer says that the crypto API only needs to
35 allocate memory when dealing with unaligned buffers and therefore
36 turning CRYPTO_TFM_REQ_MAY_SLEEP off is safe (see this discussion:
37 https://www.redhat.com/archives/dm-devel/2018-August/msg00195.html )
39 Cc: stable@vger.kernel.org
40 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
41 Signed-off-by: Mike Snitzer <snitzer@redhat.com>
43 drivers/md/dm-crypt.c | 10 +++++-----
44 drivers/md/dm-integrity.c | 4 ++--
45 2 files changed, 7 insertions(+), 7 deletions(-)
47 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
48 index f266c81f396f..0481223b1deb 100644
49 @@ -334,7 +334,7 @@ static int crypt_iv_essiv_init(struct cr
51 sg_init_one(&sg, cc->key, cc->key_size);
52 ahash_request_set_tfm(req, essiv->hash_tfm);
53 - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
54 + ahash_request_set_callback(req, 0, NULL, NULL);
55 ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
57 err = crypto_ahash_digest(req);
58 @@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
61 desc->tfm = lmk->hash_tfm;
62 - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
65 r = crypto_shash_init(desc);
67 @@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
69 /* calculate crc32 for every 32bit part and xor it */
70 desc->tfm = tcw->crc32_tfm;
71 - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
73 for (i = 0; i < 4; i++) {
74 r = crypto_shash_init(desc);
76 @@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
77 * requests if driver request queue is full.
79 skcipher_request_set_callback(ctx->r.req,
80 - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
81 + CRYPTO_TFM_REQ_MAY_BACKLOG,
82 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
85 @@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
86 * requests if driver request queue is full.
88 aead_request_set_callback(ctx->r.req_aead,
89 - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
90 + CRYPTO_TFM_REQ_MAY_BACKLOG,
91 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
94 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
95 index 378878599466..89ccb64342de 100644
96 --- a/drivers/md/dm-integrity.c
97 +++ b/drivers/md/dm-integrity.c
98 @@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
101 desc->tfm = ic->journal_mac;
102 - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
105 r = crypto_shash_init(desc);
107 @@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
108 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
111 - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
112 + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
113 complete_journal_encrypt, comp);
115 r = crypto_skcipher_encrypt(req);