1 dm-crypt: avoid deadlock in mempools
3 This patch fixes a theoretical deadlock introduced in the previous patch.
5 The function crypt_alloc_buffer may be called concurrently. If we allocate
6 from the mempool concurrently, there is a possibility of deadlock.
7 For example, if we have mempool of 256 pages, two processes, each wanting 256,
8 pages allocate from the mempool concurrently, it may deadlock in a situation
9 where both processes have allocated 128 pages and the mempool is exhausted.
11 In order to avoid this scenarios, we allocate the pages under a mutex.
13 In order to not degrade performance with excessive locking, we try
14 non-blocking allocations without a mutex first and if it fails, we fallback
15 to a blocking allocation with a mutex.
17 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
20 drivers/md/dm-crypt.c | 41 ++++++++++++++++++++++++++++++++++++-----
21 1 file changed, 36 insertions(+), 5 deletions(-)
23 Index: linux-3.14/drivers/md/dm-crypt.c
24 ===================================================================
25 --- linux-3.14.orig/drivers/md/dm-crypt.c 2014-04-04 20:59:46.000000000 +0200
26 +++ linux-3.14/drivers/md/dm-crypt.c 2014-04-04 21:04:40.000000000 +0200
27 @@ -124,6 +124,7 @@ struct crypt_config {
31 + struct mutex bio_alloc_lock;
33 struct workqueue_struct *io_queue;
34 struct workqueue_struct *crypt_queue;
35 @@ -954,27 +955,51 @@ static void crypt_free_buffer_pages(stru
37 * Generate a new unfragmented bio with the given size
38 * This should never violate the device limitations
40 + * This function may be called concurrently. If we allocate from the mempool
41 + * concurrently, there is a possibility of deadlock. For example, if we have
42 + * mempool of 256 pages, two processes, each wanting 256, pages allocate from
43 + * the mempool concurrently, it may deadlock in a situation where both processes
44 + * have allocated 128 pages and the mempool is exhausted.
46 + * In order to avoid this scenarios, we allocate the pages under a mutex.
48 + * In order to not degrade performance with excessive locking, we try
49 + * non-blocking allocations without a mutex first and if it fails, we fallback
50 + * to a blocking allocation with a mutex.
52 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
54 struct crypt_config *cc = io->cc;
56 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
57 - gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
59 + gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
60 + unsigned i, len, remaining_size;
65 + if (unlikely(gfp_mask & __GFP_WAIT))
66 + mutex_lock(&cc->bio_alloc_lock);
68 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
73 clone_init(io, clone);
75 + remaining_size = size;
77 for (i = 0; i < nr_iovecs; i++) {
78 page = mempool_alloc(cc->page_pool, gfp_mask);
80 + crypt_free_buffer_pages(cc, clone);
82 + gfp_mask |= __GFP_WAIT;
86 - len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
87 + len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
89 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
91 @@ -983,9 +1008,13 @@ static struct bio *crypt_alloc_buffer(st
93 clone->bi_iter.bi_size += len;
96 + remaining_size -= len;
100 + if (unlikely(gfp_mask & __GFP_WAIT))
101 + mutex_unlock(&cc->bio_alloc_lock);
106 @@ -1671,6 +1700,8 @@ static int crypt_ctr(struct dm_target *t
110 + mutex_init(&cc->bio_alloc_lock);
113 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
114 ti->error = "Invalid iv_offset sector";