1 dm-crypt: avoid deadlock in mempools
3 This patch fixes a theoretical deadlock introduced in the previous patch.
5 The function crypt_alloc_buffer may be called concurrently. If we allocate
6 from the mempool concurrently, there is a possibility of deadlock.
7 For example, if we have mempool of 256 pages, two processes, each wanting 256,
8 pages allocate from the mempool concurrently, it may deadlock in a situation
9 where both processes have allocated 128 pages and the mempool is exhausted.
11 In order to avoid this scenarios, we allocate the pages under a mutex.
13 In order to not degrade performance with excessive locking, we try
14 non-blocking allocations without a mutex first and if it fails, we fallback
15 to a blocking allocation with a mutex.
17 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
20 drivers/md/dm-crypt.c | 36 +++++++++++++++++++++++++++++++++---
21 1 file changed, 33 insertions(+), 3 deletions(-)
23 Index: linux-3.10.4-fast/drivers/md/dm-crypt.c
24 ===================================================================
25 --- linux-3.10.4-fast.orig/drivers/md/dm-crypt.c 2013-07-31 17:03:21.000000000 +0200
26 +++ linux-3.10.4-fast/drivers/md/dm-crypt.c 2013-07-31 17:03:24.000000000 +0200
27 @@ -118,6 +118,7 @@ struct crypt_config {
31 + struct mutex bio_alloc_lock;
33 struct workqueue_struct *io_queue;
34 struct workqueue_struct *crypt_queue;
35 @@ -780,24 +781,46 @@ static void crypt_free_buffer_pages(stru
37 * Generate a new unfragmented bio with the given size
38 * This should never violate the device limitations
40 + * This function may be called concurrently. If we allocate from the mempool
41 + * concurrently, there is a possibility of deadlock. For example, if we have
42 + * mempool of 256 pages, two processes, each wanting 256, pages allocate from
43 + * the mempool concurrently, it may deadlock in a situation where both processes
44 + * have allocated 128 pages and the mempool is exhausted.
46 + * In order to avoid this scenarios, we allocate the pages under a mutex.
48 + * In order to not degrade performance with excessive locking, we try
49 + * non-blocking allocations without a mutex first and if it fails, we fallback
50 + * to a blocking allocation with a mutex.
52 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
54 struct crypt_config *cc = io->cc;
56 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
57 - gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
58 + gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
63 + if (unlikely(gfp_mask & __GFP_WAIT))
64 + mutex_lock(&cc->bio_alloc_lock);
66 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
71 clone_init(io, clone);
73 for (i = 0; i < nr_iovecs; i++) {
74 page = mempool_alloc(cc->page_pool, gfp_mask);
76 + crypt_free_buffer_pages(cc, clone);
78 + gfp_mask |= __GFP_WAIT;
82 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
84 @@ -806,12 +829,17 @@ static struct bio *crypt_alloc_buffer(st
85 mempool_free(page, cc->page_pool);
86 crypt_free_buffer_pages(cc, clone);
97 + if (unlikely(gfp_mask & __GFP_WAIT))
98 + mutex_unlock(&cc->bio_alloc_lock);
103 @@ -1483,6 +1511,8 @@ static int crypt_ctr(struct dm_target *t
107 + mutex_init(&cc->bio_alloc_lock);
110 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
111 ti->error = "Invalid iv_offset sector";