]>
Commit | Line | Data |
---|---|---|
101a7448 ŁK |
1 | dm-crypt: offload writes to thread |
2 | ||
3 | Submitting write bios directly in the encryption thread caused serious | |
4 | performance degradation. On multiprocessor machine encryption requests | |
5 | finish in a different order than they were submitted in. Consequently, write | |
6 | requests would be submitted in a different order and it could cause severe | |
7 | performance degradation. | |
8 | ||
9 | This patch moves submitting write requests to a separate thread so that | |
10 | the requests can be sorted before submitting. | |
11 | ||
12 | Sorting is implemented in the next patch. | |
13 | ||
14 | Note: it is required that a previous patch "dm-crypt: don't allocate pages | |
15 | for a partial request." is applied before applying this patch. Without | |
16 | that, this patch could introduce a crash. | |
17 | ||
18 | Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> | |
19 | ||
20 | --- | |
21 | drivers/md/dm-crypt.c | 120 ++++++++++++++++++++++++++++++++++++++++---------- | |
22 | 1 file changed, 97 insertions(+), 23 deletions(-) | |
23 | ||
39348b5e | 24 | Index: linux-3.10.4-fast/drivers/md/dm-crypt.c |
101a7448 | 25 | =================================================================== |
39348b5e ŁK |
26 | --- linux-3.10.4-fast.orig/drivers/md/dm-crypt.c 2013-07-31 17:03:24.000000000 +0200 |
27 | +++ linux-3.10.4-fast/drivers/md/dm-crypt.c 2013-07-31 17:03:27.000000000 +0200 | |
101a7448 ŁK |
28 | @@ -17,6 +17,7 @@ |
29 | #include <linux/slab.h> | |
30 | #include <linux/crypto.h> | |
31 | #include <linux/workqueue.h> | |
32 | +#include <linux/kthread.h> | |
33 | #include <linux/backing-dev.h> | |
34 | #include <linux/atomic.h> | |
35 | #include <linux/scatterlist.h> | |
36 | @@ -59,6 +60,8 @@ struct dm_crypt_io { | |
37 | atomic_t io_pending; | |
38 | int error; | |
39 | sector_t sector; | |
40 | + | |
41 | + struct list_head list; | |
42 | }; | |
43 | ||
44 | struct dm_crypt_request { | |
45 | @@ -123,6 +126,10 @@ struct crypt_config { | |
46 | struct workqueue_struct *io_queue; | |
47 | struct workqueue_struct *crypt_queue; | |
48 | ||
49 | + struct task_struct *write_thread; | |
50 | + wait_queue_head_t write_thread_wait; | |
51 | + struct list_head write_thread_list; | |
52 | + | |
53 | char *cipher; | |
54 | char *cipher_string; | |
55 | ||
39348b5e | 56 | @@ -975,37 +982,89 @@ static int kcryptd_io_read(struct dm_cry |
101a7448 ŁK |
57 | return 0; |
58 | } | |
59 | ||
60 | +static void kcryptd_io_read_work(struct work_struct *work) | |
61 | +{ | |
62 | + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
63 | + | |
64 | + crypt_inc_pending(io); | |
65 | + if (kcryptd_io_read(io, GFP_NOIO)) | |
66 | + io->error = -ENOMEM; | |
67 | + crypt_dec_pending(io); | |
68 | +} | |
69 | + | |
70 | +static void kcryptd_queue_read(struct dm_crypt_io *io) | |
71 | +{ | |
72 | + struct crypt_config *cc = io->cc; | |
73 | + | |
74 | + INIT_WORK(&io->work, kcryptd_io_read_work); | |
75 | + queue_work(cc->io_queue, &io->work); | |
76 | +} | |
77 | + | |
78 | static void kcryptd_io_write(struct dm_crypt_io *io) | |
79 | { | |
80 | struct bio *clone = io->ctx.bio_out; | |
81 | + | |
82 | generic_make_request(clone); | |
83 | } | |
84 | ||
85 | -static void kcryptd_io(struct work_struct *work) | |
86 | +static int dmcrypt_write(void *data) | |
87 | { | |
88 | - struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
89 | + struct crypt_config *cc = data; | |
90 | + while (1) { | |
91 | + struct list_head local_list; | |
92 | + struct blk_plug plug; | |
93 | ||
94 | - if (bio_data_dir(io->base_bio) == READ) { | |
95 | - crypt_inc_pending(io); | |
96 | - if (kcryptd_io_read(io, GFP_NOIO)) | |
97 | - io->error = -ENOMEM; | |
98 | - crypt_dec_pending(io); | |
99 | - } else | |
100 | - kcryptd_io_write(io); | |
101 | -} | |
102 | + DECLARE_WAITQUEUE(wait, current); | |
103 | ||
104 | -static void kcryptd_queue_io(struct dm_crypt_io *io) | |
105 | -{ | |
106 | - struct crypt_config *cc = io->cc; | |
107 | + spin_lock_irq(&cc->write_thread_wait.lock); | |
108 | +continue_locked: | |
109 | ||
110 | - INIT_WORK(&io->work, kcryptd_io); | |
111 | - queue_work(cc->io_queue, &io->work); | |
112 | + if (!list_empty(&cc->write_thread_list)) | |
113 | + goto pop_from_list; | |
114 | + | |
115 | + __set_current_state(TASK_INTERRUPTIBLE); | |
116 | + __add_wait_queue(&cc->write_thread_wait, &wait); | |
117 | + | |
118 | + spin_unlock_irq(&cc->write_thread_wait.lock); | |
119 | + | |
120 | + if (unlikely(kthread_should_stop())) { | |
121 | + set_task_state(current, TASK_RUNNING); | |
122 | + remove_wait_queue(&cc->write_thread_wait, &wait); | |
123 | + break; | |
124 | + } | |
125 | + | |
126 | + schedule(); | |
127 | + | |
128 | + set_task_state(current, TASK_RUNNING); | |
129 | + spin_lock_irq(&cc->write_thread_wait.lock); | |
130 | + __remove_wait_queue(&cc->write_thread_wait, &wait); | |
131 | + goto continue_locked; | |
132 | + | |
133 | +pop_from_list: | |
134 | + local_list = cc->write_thread_list; | |
135 | + local_list.next->prev = &local_list; | |
136 | + local_list.prev->next = &local_list; | |
137 | + INIT_LIST_HEAD(&cc->write_thread_list); | |
138 | + | |
139 | + spin_unlock_irq(&cc->write_thread_wait.lock); | |
140 | + | |
141 | + blk_start_plug(&plug); | |
142 | + do { | |
143 | + struct dm_crypt_io *io = container_of(local_list.next, | |
144 | + struct dm_crypt_io, list); | |
145 | + list_del(&io->list); | |
146 | + kcryptd_io_write(io); | |
147 | + } while (!list_empty(&local_list)); | |
148 | + blk_finish_plug(&plug); | |
149 | + } | |
150 | + return 0; | |
151 | } | |
152 | ||
153 | -static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) | |
154 | +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io) | |
155 | { | |
156 | struct bio *clone = io->ctx.bio_out; | |
157 | struct crypt_config *cc = io->cc; | |
158 | + unsigned long flags; | |
159 | ||
160 | if (unlikely(io->error < 0)) { | |
161 | crypt_free_buffer_pages(cc, clone); | |
39348b5e | 162 | @@ -1019,10 +1078,10 @@ static void kcryptd_crypt_write_io_submi |
101a7448 ŁK |
163 | |
164 | clone->bi_sector = cc->start + io->sector; | |
165 | ||
166 | - if (async) | |
167 | - kcryptd_queue_io(io); | |
168 | - else | |
169 | - generic_make_request(clone); | |
170 | + spin_lock_irqsave(&cc->write_thread_wait.lock, flags); | |
171 | + list_add_tail(&io->list, &cc->write_thread_list); | |
172 | + wake_up_locked(&cc->write_thread_wait); | |
173 | + spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags); | |
174 | } | |
175 | ||
176 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |
39348b5e | 177 | @@ -1058,7 +1117,7 @@ static void kcryptd_crypt_write_convert( |
101a7448 ŁK |
178 | |
179 | /* Encryption was already finished, submit io now */ | |
180 | if (crypt_finished) | |
181 | - kcryptd_crypt_write_io_submit(io, 0); | |
182 | + kcryptd_crypt_write_io_submit(io); | |
183 | ||
184 | dec: | |
185 | crypt_dec_pending(io); | |
39348b5e | 186 | @@ -1116,7 +1175,7 @@ static void kcryptd_async_done(struct cr |
101a7448 ŁK |
187 | if (bio_data_dir(io->base_bio) == READ) |
188 | kcryptd_crypt_read_done(io); | |
189 | else | |
190 | - kcryptd_crypt_write_io_submit(io, 1); | |
191 | + kcryptd_crypt_write_io_submit(io); | |
192 | } | |
193 | ||
194 | static void kcryptd_crypt(struct work_struct *work) | |
39348b5e | 195 | @@ -1260,6 +1319,9 @@ static void crypt_dtr(struct dm_target * |
101a7448 ŁK |
196 | if (!cc) |
197 | return; | |
198 | ||
199 | + if (cc->write_thread) | |
200 | + kthread_stop(cc->write_thread); | |
201 | + | |
202 | if (cc->io_queue) | |
203 | destroy_workqueue(cc->io_queue); | |
204 | if (cc->crypt_queue) | |
39348b5e | 205 | @@ -1576,6 +1638,18 @@ static int crypt_ctr(struct dm_target *t |
101a7448 ŁK |
206 | goto bad; |
207 | } | |
208 | ||
209 | + init_waitqueue_head(&cc->write_thread_wait); | |
210 | + INIT_LIST_HEAD(&cc->write_thread_list); | |
211 | + | |
212 | + cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); | |
213 | + if (IS_ERR(cc->write_thread)) { | |
214 | + ret = PTR_ERR(cc->write_thread); | |
215 | + cc->write_thread = NULL; | |
216 | + ti->error = "Couldn't spawn write thread"; | |
217 | + goto bad; | |
218 | + } | |
219 | + wake_up_process(cc->write_thread); | |
220 | + | |
221 | ti->num_flush_bios = 1; | |
222 | ti->discard_zeroes_data_unsupported = true; | |
223 | ||
39348b5e | 224 | @@ -1607,7 +1681,7 @@ static int crypt_map(struct dm_target *t |
101a7448 ŁK |
225 | |
226 | if (bio_data_dir(io->base_bio) == READ) { | |
227 | if (kcryptd_io_read(io, GFP_NOWAIT)) | |
228 | - kcryptd_queue_io(io); | |
229 | + kcryptd_queue_read(io); | |
230 | } else | |
231 | kcryptd_queue_crypt(io); | |
232 |