]>
Commit | Line | Data |
---|---|---|
1748dab8 AM |
1 | diff -Nur linux-2.6.0.orig/drivers/md/dm-crypt.c linux-2.6.0/drivers/md/dm-crypt.c |
2 | --- linux-2.6.0.orig/drivers/md/dm-crypt.c 1970-01-01 01:00:00.000000000 +0100 | |
3 | +++ linux-2.6.0/drivers/md/dm-crypt.c 2004-01-02 21:34:12.424513048 +0100 | |
4 | @@ -0,0 +1,807 @@ | |
5 | +/* | |
6 | + * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | |
7 | + * | |
8 | + * This file is released under the GPL. | |
9 | + */ | |
10 | + | |
11 | +#include <linux/module.h> | |
12 | +#include <linux/init.h> | |
13 | +#include <linux/kernel.h> | |
14 | +#include <linux/bio.h> | |
15 | +#include <linux/mempool.h> | |
16 | +#include <linux/slab.h> | |
17 | +#include <linux/crypto.h> | |
18 | +#include <linux/spinlock.h> | |
19 | +#include <asm/scatterlist.h> | |
20 | + | |
21 | +#include "dm.h" | |
22 | +#include "dm-daemon.h" | |
23 | + | |
24 | +/* | |
25 | + * per bio private data | |
26 | + */ | |
27 | +struct crypt_io { | |
28 | + struct dm_target *target; | |
29 | + struct bio *bio; | |
30 | + struct bio *first_clone; | |
31 | + atomic_t pending; | |
32 | + int error; | |
33 | +}; | |
34 | + | |
35 | +/* | |
36 | + * context holding the current state of a multi-part conversion | |
37 | + */ | |
38 | +struct convert_context { | |
39 | + struct bio *bio_in; | |
40 | + struct bio *bio_out; | |
41 | + unsigned int offset_in; | |
42 | + unsigned int offset_out; | |
43 | + int idx_in; | |
44 | + int idx_out; | |
45 | + sector_t sector; | |
46 | + int write; | |
47 | +}; | |
48 | + | |
49 | +/* | |
50 | + * Crypt: maps a linear range of a block device | |
51 | + * and encrypts / decrypts at the same time. | |
52 | + */ | |
53 | +struct crypt_config { | |
54 | + struct dm_dev *dev; | |
55 | + sector_t start; | |
56 | + | |
57 | + /* | |
58 | + * pool for per bio private data and | |
59 | + * for encryption buffer pages | |
60 | + */ | |
61 | + mempool_t *io_pool; | |
62 | + mempool_t *page_pool; | |
63 | + | |
64 | + /* | |
65 | + * crypto related data | |
66 | + */ | |
67 | + struct crypto_tfm *tfm; | |
68 | + sector_t iv_offset; | |
69 | + int (*iv_generator)(struct crypt_config *cc, u8 *iv, sector_t sector); | |
70 | + int iv_size; | |
71 | + int key_size; | |
72 | + u8 key[0]; | |
73 | +}; | |
74 | + | |
75 | +#define MIN_IOS 256 | |
76 | +#define MIN_POOL_PAGES 32 | |
77 | +#define MIN_BIO_PAGES 8 | |
78 | + | |
79 | +static kmem_cache_t *_crypt_io_pool; | |
80 | + | |
81 | +/* | |
82 | + * Mempool alloc and free functions for the page | |
83 | + */ | |
84 | +static void *mempool_alloc_page(int gfp_mask, void *data) | |
85 | +{ | |
86 | + return alloc_page(gfp_mask); | |
87 | +} | |
88 | + | |
89 | +static void mempool_free_page(void *page, void *data) | |
90 | +{ | |
91 | + __free_page(page); | |
92 | +} | |
93 | + | |
94 | + | |
95 | +/* | |
96 | + * Different IV generation algorithms | |
97 | + */ | |
98 | +static int crypt_iv_plain(struct crypt_config *cc, u8 *iv, sector_t sector) | |
99 | +{ | |
100 | + *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); | |
101 | + if (cc->iv_size > sizeof(u32) / sizeof(u8)) | |
102 | + memset(iv + (sizeof(u32) / sizeof(u8)), 0, | |
103 | + cc->iv_size - (sizeof(u32) / sizeof(u8))); | |
104 | + | |
105 | + return 0; | |
106 | +} | |
107 | + | |
108 | +static inline int | |
109 | +crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, | |
110 | + struct scatterlist *in, unsigned int length, | |
111 | + int write, sector_t sector) | |
112 | +{ | |
113 | + u8 iv[cc->iv_size]; | |
114 | + int r; | |
115 | + | |
116 | + if (cc->iv_generator) { | |
117 | + r = cc->iv_generator(cc, iv, sector); | |
118 | + if (r < 0) | |
119 | + return r; | |
120 | + | |
121 | + if (write) | |
122 | + r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); | |
123 | + else | |
124 | + r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); | |
125 | + } else { | |
126 | + if (write) | |
127 | + r = crypto_cipher_encrypt(cc->tfm, out, in, length); | |
128 | + else | |
129 | + r = crypto_cipher_decrypt(cc->tfm, out, in, length); | |
130 | + } | |
131 | + | |
132 | + return r; | |
133 | +} | |
134 | + | |
135 | +static void | |
136 | +crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, | |
137 | + struct bio *bio_out, struct bio *bio_in, | |
138 | + sector_t sector, int write) | |
139 | +{ | |
140 | + ctx->bio_in = bio_in; | |
141 | + ctx->bio_out = bio_out; | |
142 | + ctx->offset_in = 0; | |
143 | + ctx->offset_out = 0; | |
144 | + ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | |
145 | + ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | |
146 | + ctx->sector = sector + cc->iv_offset; | |
147 | + ctx->write = write; | |
148 | +} | |
149 | + | |
150 | +/* | |
151 | + * Encrypt / decrypt data from one bio to another one (can be the same one) | |
152 | + */ | |
153 | +static int crypt_convert(struct crypt_config *cc, | |
154 | + struct convert_context *ctx) | |
155 | +{ | |
156 | + int r = 0; | |
157 | + | |
158 | + while(ctx->idx_in < ctx->bio_in->bi_vcnt && | |
159 | + ctx->idx_out < ctx->bio_out->bi_vcnt) { | |
160 | + struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | |
161 | + struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | |
162 | + struct scatterlist sg_in = { | |
163 | + .page = bv_in->bv_page, | |
164 | + .offset = bv_in->bv_offset + ctx->offset_in, | |
165 | + .length = 1 << SECTOR_SHIFT | |
166 | + }; | |
167 | + struct scatterlist sg_out = { | |
168 | + .page = bv_out->bv_page, | |
169 | + .offset = bv_out->bv_offset + ctx->offset_out, | |
170 | + .length = 1 << SECTOR_SHIFT | |
171 | + }; | |
172 | + | |
173 | + ctx->offset_in += sg_in.length; | |
174 | + if (ctx->offset_in >= bv_in->bv_len) { | |
175 | + ctx->offset_in = 0; | |
176 | + ctx->idx_in++; | |
177 | + } | |
178 | + | |
179 | + ctx->offset_out += sg_out.length; | |
180 | + if (ctx->offset_out >= bv_out->bv_len) { | |
181 | + ctx->offset_out = 0; | |
182 | + ctx->idx_out++; | |
183 | + } | |
184 | + | |
185 | + r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, | |
186 | + ctx->write, ctx->sector); | |
187 | + if (r < 0) | |
188 | + break; | |
189 | + | |
190 | + ctx->sector++; | |
191 | + } | |
192 | + | |
193 | + return r; | |
194 | +} | |
195 | + | |
196 | +/* | |
197 | + * Generate a new unfragmented bio with the given size | |
198 | + * This should never violate the device limitations | |
199 | + * May return a smaller bio when running out of pages | |
200 | + */ | |
201 | +static struct bio * | |
202 | +crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | |
203 | + struct bio *base_bio, int *bio_vec_idx) | |
204 | +{ | |
205 | + struct bio *bio; | |
206 | + int nr_iovecs = dm_div_up(size, PAGE_SIZE); | |
207 | + int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | |
208 | + int flags = current->flags; | |
209 | + int i; | |
210 | + | |
211 | + /* | |
212 | + * Tell VM to act less aggressively and fail earlier. | |
213 | + * This is not necessary but increases throughput. | |
214 | + * FIXME: Is this really intelligent? | |
215 | + */ | |
216 | + current->flags &= ~PF_MEMALLOC; | |
217 | + | |
218 | + if (base_bio) | |
219 | + bio = bio_clone(base_bio, GFP_NOIO); | |
220 | + else | |
221 | + bio = bio_alloc(GFP_NOIO, nr_iovecs); | |
222 | + if (!bio) | |
223 | + return NULL; | |
224 | + | |
225 | + /* if the last bio was not complete, continue where that one ended */ | |
226 | + bio->bi_idx = *bio_vec_idx; | |
227 | + bio->bi_vcnt = *bio_vec_idx; | |
228 | + bio->bi_size = 0; | |
229 | + | |
230 | + /* bio->bi_idx pages have already been allocated */ | |
231 | + size -= bio->bi_idx * PAGE_SIZE; | |
232 | + | |
233 | + for(i = bio->bi_idx; i < nr_iovecs; i++) { | |
234 | + struct bio_vec *bv = bio_iovec_idx(bio, i); | |
235 | + | |
236 | + bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); | |
237 | + if (!bv->bv_page) | |
238 | + break; | |
239 | + | |
240 | + /* | |
241 | + * if additional pages cannot be allocated without waiting, | |
242 | + * return a partially allocated bio, the caller will then try | |
243 | + * to allocate additional bios while submitting this partial bio | |
244 | + */ | |
245 | + if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1)) | |
246 | + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | |
247 | + | |
248 | + bv->bv_offset = 0; | |
249 | + if (size > PAGE_SIZE) | |
250 | + bv->bv_len = PAGE_SIZE; | |
251 | + else | |
252 | + bv->bv_len = size; | |
253 | + | |
254 | + bio->bi_size += bv->bv_len; | |
255 | + bio->bi_vcnt++; | |
256 | + size -= bv->bv_len; | |
257 | + } | |
258 | + | |
259 | + if (flags & PF_MEMALLOC) | |
260 | + current->flags |= PF_MEMALLOC; | |
261 | + | |
262 | + if (!bio->bi_size) { | |
263 | + bio_put(bio); | |
264 | + return NULL; | |
265 | + } | |
266 | + | |
267 | + /* | |
268 | + * Remember the last bio_vec allocated to be able | |
269 | + * to correctly continue after the splitting. | |
270 | + */ | |
271 | + *bio_vec_idx = bio->bi_vcnt; | |
272 | + | |
273 | + return bio; | |
274 | +} | |
275 | + | |
276 | +static void crypt_free_buffer_pages(struct crypt_config *cc, | |
277 | + struct bio *bio, unsigned int bytes) | |
278 | +{ | |
279 | + unsigned int start, end; | |
280 | + struct bio_vec *bv; | |
281 | + int i; | |
282 | + | |
283 | + /* | |
284 | + * This is ugly, but Jens Axboe thinks that using bi_idx in the | |
285 | + * endio function is too dangerous at the moment, so I calculate the | |
286 | + * correct position using bi_vcnt and bi_size. | |
287 | + * The bv_offset and bv_len fields might already be modified but we | |
288 | + * know that we always allocated whole pages. | |
289 | + * A fix to the bi_idx issue in the kernel is in the works, so | |
290 | + * we will hopefully be able to revert to the cleaner solution soon. | |
291 | + */ | |
292 | + i = bio->bi_vcnt - 1; | |
293 | + bv = bio_iovec_idx(bio, i); | |
294 | + end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size; | |
295 | + start = end - bytes; | |
296 | + | |
297 | + start >>= PAGE_SHIFT; | |
298 | + if (!bio->bi_size) | |
299 | + end = bio->bi_vcnt; | |
300 | + else | |
301 | + end >>= PAGE_SHIFT; | |
302 | + | |
303 | + for(i = start; i < end; i++) { | |
304 | + bv = bio_iovec_idx(bio, i); | |
305 | + BUG_ON(!bv->bv_page); | |
306 | + mempool_free(bv->bv_page, cc->page_pool); | |
307 | + bv->bv_page = NULL; | |
308 | + } | |
309 | +} | |
310 | + | |
311 | +/* | |
312 | + * One of the bios was finished. Check for completion of | |
313 | + * the whole request and correctly clean up the buffer. | |
314 | + */ | |
315 | +static void dec_pending(struct crypt_io *io, int error) | |
316 | +{ | |
317 | + struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
318 | + | |
319 | + if (error < 0) | |
320 | + io->error = error; | |
321 | + | |
322 | + if (!atomic_dec_and_test(&io->pending)) | |
323 | + return; | |
324 | + | |
325 | + if (io->first_clone) | |
326 | + bio_put(io->first_clone); | |
327 | + | |
328 | + if (io->bio) | |
329 | + bio_endio(io->bio, io->bio->bi_size, io->error); | |
330 | + | |
331 | + mempool_free(io, cc->io_pool); | |
332 | +} | |
333 | + | |
334 | +/* | |
335 | + * kcryptd: | |
336 | + * | |
337 | + * Needed because it would be very unwise to do decryption in an | |
338 | + * interrupt context, so bios returning from read requests get | |
339 | + * queued here. | |
340 | + */ | |
341 | +static spinlock_t _kcryptd_lock = SPIN_LOCK_UNLOCKED; | |
342 | +static struct bio *_kcryptd_bio_head; | |
343 | +static struct bio *_kcryptd_bio_tail; | |
344 | + | |
345 | +static struct dm_daemon _kcryptd; | |
346 | + | |
347 | +/* | |
348 | + * Fetch a list of the complete bios. | |
349 | + */ | |
350 | +static struct bio *kcryptd_get_bios(void) | |
351 | +{ | |
352 | + struct bio *bio; | |
353 | + | |
354 | + spin_lock_irq(&_kcryptd_lock); | |
355 | + bio = _kcryptd_bio_head; | |
356 | + if (bio) | |
357 | + _kcryptd_bio_head = _kcryptd_bio_tail = NULL; | |
358 | + spin_unlock_irq(&_kcryptd_lock); | |
359 | + | |
360 | + return bio; | |
361 | +} | |
362 | + | |
363 | +/* | |
364 | + * Append bio to work queue | |
365 | + */ | |
366 | +static void kcryptd_queue_bio(struct bio *bio) | |
367 | +{ | |
368 | + unsigned long flags; | |
369 | + | |
370 | + bio->bi_next = NULL; | |
371 | + spin_lock_irqsave(&_kcryptd_lock, flags); | |
372 | + if (_kcryptd_bio_tail) | |
373 | + _kcryptd_bio_tail->bi_next = bio; | |
374 | + else | |
375 | + _kcryptd_bio_head = bio; | |
376 | + _kcryptd_bio_tail = bio; | |
377 | + spin_unlock_irqrestore(&_kcryptd_lock, flags); | |
378 | +} | |
379 | + | |
380 | +static jiffy_t kcryptd_do_work(void) | |
381 | +{ | |
382 | + int r; | |
383 | + struct bio *bio; | |
384 | + struct bio *next_bio; | |
385 | + struct crypt_io *io; | |
386 | + struct crypt_config *cc; | |
387 | + struct convert_context ctx; | |
388 | + | |
389 | + bio = kcryptd_get_bios(); | |
390 | + | |
391 | + while (bio) { | |
392 | + io = (struct crypt_io *) bio->bi_private; | |
393 | + cc = (struct crypt_config *) io->target->private; | |
394 | + | |
395 | + crypt_convert_init(cc, &ctx, io->bio, io->bio, | |
396 | + io->bio->bi_sector - io->target->begin, 0); | |
397 | + r = crypt_convert(cc, &ctx); | |
398 | + | |
399 | + next_bio = bio->bi_next; | |
400 | + bio->bi_next = NULL; | |
401 | + | |
402 | + bio_put(bio); | |
403 | + dec_pending(io, r); | |
404 | + | |
405 | + bio = next_bio; | |
406 | + } | |
407 | + | |
408 | + return 0; | |
409 | +} | |
410 | + | |
411 | +/* | |
412 | + * Decode key from its hex representation | |
413 | + */ | |
414 | +static int crypt_decode_key(u8 *key, char *hex, int size) | |
415 | +{ | |
416 | + char buffer[3]; | |
417 | + char *endp; | |
418 | + int i; | |
419 | + | |
420 | + buffer[2] = '\0'; | |
421 | + | |
422 | + for(i = 0; i < size; i++) { | |
423 | + buffer[0] = *hex++; | |
424 | + buffer[1] = *hex++; | |
425 | + | |
426 | + key[i] = (u8)simple_strtoul(buffer, &endp, 16); | |
427 | + | |
428 | + if (endp != &buffer[2]) | |
429 | + return -EINVAL; | |
430 | + } | |
431 | + | |
432 | + if (*hex != '\0') | |
433 | + return -EINVAL; | |
434 | + | |
435 | + return 0; | |
436 | +} | |
437 | + | |
438 | +/* | |
439 | + * Encode key into its hex representation | |
440 | + */ | |
441 | +static void crypt_encode_key(char *hex, u8 *key, int size) | |
442 | +{ | |
443 | + static char hex_digits[] = "0123456789abcdef"; | |
444 | + int i; | |
445 | + | |
446 | + for(i = 0; i < size; i++) { | |
447 | + *hex++ = hex_digits[*key >> 4]; | |
448 | + *hex++ = hex_digits[*key & 0x0f]; | |
449 | + key++; | |
450 | + } | |
451 | + | |
452 | + *hex++ = '\0'; | |
453 | +} | |
454 | + | |
455 | +/* | |
456 | + * Construct an encryption mapping: | |
457 | + * <cipher> <key> <iv_offset> <dev_path> <start> | |
458 | + */ | |
459 | +static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
460 | +{ | |
461 | + struct crypt_config *cc; | |
462 | + struct crypto_tfm *tfm; | |
463 | + char *tmp; | |
464 | + char *cipher; | |
465 | + char *mode; | |
466 | + int crypto_flags; | |
467 | + int key_size; | |
468 | + | |
469 | + if (argc != 5) { | |
470 | + ti->error = "dm-crypt: Not enough arguments"; | |
471 | + return -EINVAL; | |
472 | + } | |
473 | + | |
474 | + tmp = argv[0]; | |
475 | + cipher = strsep(&tmp, "-"); | |
476 | + mode = strsep(&tmp, "-"); | |
477 | + | |
478 | + if (tmp) | |
479 | + DMWARN("dm-crypt: Unexpected additional cipher options"); | |
480 | + | |
481 | + key_size = strlen(argv[1]) >> 1; | |
482 | + | |
483 | + cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | |
484 | + if (cc == NULL) { | |
485 | + ti->error = | |
486 | + "dm-crypt: Cannot allocate transparent encryption context"; | |
487 | + return -ENOMEM; | |
488 | + } | |
489 | + | |
490 | + if (!mode || strcmp(mode, "plain") == 0) | |
491 | + cc->iv_generator = crypt_iv_plain; | |
492 | + else if (strcmp(mode, "ecb") == 0) | |
493 | + cc->iv_generator = NULL; | |
494 | + else { | |
495 | + ti->error = "dm-crypt: Invalid chaining mode"; | |
496 | + return -EINVAL; | |
497 | + } | |
498 | + | |
499 | + if (cc->iv_generator) | |
500 | + crypto_flags = CRYPTO_TFM_MODE_CBC; | |
501 | + else | |
502 | + crypto_flags = CRYPTO_TFM_MODE_ECB; | |
503 | + | |
504 | + tfm = crypto_alloc_tfm(cipher, crypto_flags); | |
505 | + if (!tfm) { | |
506 | + ti->error = "dm-crypt: Error allocating crypto tfm"; | |
507 | + goto bad1; | |
508 | + } | |
509 | + | |
510 | + if (tfm->crt_u.cipher.cit_decrypt_iv && tfm->crt_u.cipher.cit_encrypt_iv) | |
511 | + /* at least a 32 bit sector number should fit in our buffer */ | |
512 | + cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), sizeof(u32) / sizeof(u8)); | |
513 | + else { | |
514 | + cc->iv_size = 0; | |
515 | + if (cc->iv_generator) { | |
516 | + DMWARN("dm-crypt: Selected cipher does not support IVs"); | |
517 | + cc->iv_generator = NULL; | |
518 | + } | |
519 | + } | |
520 | + | |
521 | + cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, | |
522 | + mempool_free_slab, _crypt_io_pool); | |
523 | + if (!cc->io_pool) { | |
524 | + ti->error = "dm-crypt: Cannot allocate crypt io mempool"; | |
525 | + goto bad2; | |
526 | + } | |
527 | + | |
528 | + cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, | |
529 | + mempool_free_page, NULL); | |
530 | + if (!cc->page_pool) { | |
531 | + ti->error = "dm-crypt: Cannot allocate page mempool"; | |
532 | + goto bad3; | |
533 | + } | |
534 | + | |
535 | + cc->tfm = tfm; | |
536 | + cc->key_size = key_size; | |
537 | + if ((key_size == 0 && strcmp(argv[1], "-") != 0) | |
538 | + || crypt_decode_key(cc->key, argv[1], key_size) < 0) { | |
539 | + ti->error = "dm-crypt: Error decoding key"; | |
540 | + goto bad4; | |
541 | + } | |
542 | + | |
543 | + if (tfm->crt_u.cipher.cit_setkey(tfm, cc->key, key_size) < 0) { | |
544 | + ti->error = "dm-crypt: Error setting key"; | |
545 | + goto bad4; | |
546 | + } | |
547 | + | |
548 | + if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) { | |
549 | + ti->error = "dm-crypt: Invalid iv_offset sector"; | |
550 | + goto bad4; | |
551 | + } | |
552 | + | |
553 | + if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) { | |
554 | + ti->error = "dm-crypt: Invalid device sector"; | |
555 | + goto bad4; | |
556 | + } | |
557 | + | |
558 | + if (dm_get_device(ti, argv[3], cc->start, ti->len, | |
559 | + dm_table_get_mode(ti->table), &cc->dev)) { | |
560 | + ti->error = "dm-crypt: Device lookup failed"; | |
561 | + goto bad4; | |
562 | + } | |
563 | + | |
564 | + ti->private = cc; | |
565 | + return 0; | |
566 | + | |
567 | +bad4: | |
568 | + mempool_destroy(cc->page_pool); | |
569 | +bad3: | |
570 | + mempool_destroy(cc->io_pool); | |
571 | +bad2: | |
572 | + crypto_free_tfm(tfm); | |
573 | +bad1: | |
574 | + kfree(cc); | |
575 | + return -EINVAL; | |
576 | +} | |
577 | + | |
578 | +static void crypt_dtr(struct dm_target *ti) | |
579 | +{ | |
580 | + struct crypt_config *cc = (struct crypt_config *) ti->private; | |
581 | + | |
582 | + mempool_destroy(cc->page_pool); | |
583 | + mempool_destroy(cc->io_pool); | |
584 | + | |
585 | + crypto_free_tfm(cc->tfm); | |
586 | + dm_put_device(ti, cc->dev); | |
587 | + kfree(cc); | |
588 | +} | |
589 | + | |
590 | +static int crypt_endio(struct bio *bio, unsigned int done, int error) | |
591 | +{ | |
592 | + struct crypt_io *io = (struct crypt_io *) bio->bi_private; | |
593 | + struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
594 | + | |
595 | + if (bio_rw(bio) == WRITE) { | |
596 | + /* | |
597 | + * free the processed pages, even if | |
598 | + * it's only a partially completed write | |
599 | + */ | |
600 | + crypt_free_buffer_pages(cc, bio, done); | |
601 | + } | |
602 | + | |
603 | + if (bio->bi_size) | |
604 | + return 1; | |
605 | + | |
606 | + /* | |
607 | + * successful reads are decrypted by the worker thread | |
608 | + */ | |
609 | + if ((bio_rw(bio) == READ || bio_rw(bio) == READA) | |
610 | + && bio_flagged(bio, BIO_UPTODATE)) { | |
611 | + kcryptd_queue_bio(bio); | |
612 | + dm_daemon_wake(&_kcryptd); | |
613 | + return 0; | |
614 | + } | |
615 | + | |
616 | + bio_put(bio); | |
617 | + dec_pending(io, error); | |
618 | + | |
619 | + return error; | |
620 | +} | |
621 | + | |
622 | +static int crypt_map(struct dm_target *ti, struct bio *bio) | |
623 | +{ | |
624 | + struct crypt_config *cc = (struct crypt_config *) ti->private; | |
625 | + struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
626 | + struct bio *clone = NULL; | |
627 | + struct convert_context ctx; | |
628 | + unsigned int remaining = bio->bi_size; | |
629 | + sector_t sector = bio->bi_sector - ti->begin; | |
630 | + int bio_vec_idx = 0; | |
631 | + int r = 0; | |
632 | + | |
633 | + io->target = ti; | |
634 | + io->bio = bio; | |
635 | + io->first_clone = NULL; | |
636 | + io->error = 0; | |
637 | + atomic_set(&io->pending, 1); /* hold a reference */ | |
638 | + | |
639 | + if (bio_rw(bio) == WRITE) | |
640 | + crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); | |
641 | + | |
642 | + /* | |
643 | + * The allocated buffers can be smaller than the whole bio, | |
644 | + * so repeat the whole process until all the data can be handled. | |
645 | + */ | |
646 | + while (remaining) { | |
647 | + if (bio_rw(bio) == WRITE) { | |
648 | + clone = crypt_alloc_buffer(cc, bio->bi_size, | |
649 | + io->first_clone, | |
650 | + &bio_vec_idx); | |
651 | + if (clone) { | |
652 | + ctx.bio_out = clone; | |
653 | + r = crypt_convert(cc, &ctx); | |
654 | + if (r < 0) { | |
655 | + crypt_free_buffer_pages(cc, clone, | |
656 | + clone->bi_size); | |
657 | + bio_put(clone); | |
658 | + goto cleanup; | |
659 | + } | |
660 | + } | |
661 | + } else | |
662 | + clone = bio_clone(bio, GFP_NOIO); | |
663 | + | |
664 | + if (!clone) { | |
665 | + r = -ENOMEM; | |
666 | + goto cleanup; | |
667 | + } | |
668 | + | |
669 | + if (!io->first_clone) { | |
670 | + /* | |
671 | + * hold a reference to the first clone, because it | |
672 | + * holds the bio_vec array and that can't be freed | |
673 | + * before all other clones are released | |
674 | + */ | |
675 | + bio_get(clone); | |
676 | + io->first_clone = clone; | |
677 | + } | |
678 | + atomic_inc(&io->pending); | |
679 | + | |
680 | + clone->bi_private = io; | |
681 | + clone->bi_end_io = crypt_endio; | |
682 | + clone->bi_bdev = cc->dev->bdev; | |
683 | + clone->bi_sector = cc->start + sector; | |
684 | + clone->bi_rw = bio->bi_rw; | |
685 | + | |
686 | + remaining -= clone->bi_size; | |
687 | + sector += bio_sectors(clone); | |
688 | + | |
689 | + generic_make_request(clone); | |
690 | + } | |
691 | + | |
692 | + /* drop reference, clones could have returned before we reach this */ | |
693 | + dec_pending(io, 0); | |
694 | + return 0; | |
695 | + | |
696 | +cleanup: | |
697 | + if (io->first_clone) { | |
698 | + dec_pending(io, r); | |
699 | + return 0; | |
700 | + } | |
701 | + | |
702 | + /* if no bio has been dispatched yet, we can directly return the error */ | |
703 | + mempool_free(io, cc->io_pool); | |
704 | + return r; | |
705 | +} | |
706 | + | |
707 | +static int crypt_status(struct dm_target *ti, status_type_t type, | |
708 | + char *result, unsigned int maxlen) | |
709 | +{ | |
710 | + struct crypt_config *cc = (struct crypt_config *) ti->private; | |
711 | + char buffer[32]; | |
712 | + const char *cipher; | |
713 | + const char *mode = NULL; | |
714 | + int offset; | |
715 | + | |
716 | + switch (type) { | |
717 | + case STATUSTYPE_INFO: | |
718 | + result[0] = '\0'; | |
719 | + break; | |
720 | + | |
721 | + case STATUSTYPE_TABLE: | |
722 | + cipher = crypto_tfm_alg_name(cc->tfm); | |
723 | + | |
724 | + switch(cc->tfm->crt_u.cipher.cit_mode) { | |
725 | + case CRYPTO_TFM_MODE_CBC: | |
726 | + mode = "cbc"; | |
727 | + break; | |
728 | + case CRYPTO_TFM_MODE_ECB: | |
729 | + mode = "ecb"; | |
730 | + break; | |
731 | + default: | |
732 | + BUG(); | |
733 | + } | |
734 | + | |
735 | + snprintf(result, maxlen, "%s-%s ", cipher, mode); | |
736 | + offset = strlen(result); | |
737 | + | |
738 | + if (cc->key_size > 0) { | |
739 | + if ((maxlen - offset) < ((cc->key_size << 1) + 1)) | |
740 | + return -ENOMEM; | |
741 | + | |
742 | + crypt_encode_key(result + offset, cc->key, cc->key_size); | |
743 | + offset += cc->key_size << 1; | |
744 | + } else { | |
745 | + if (offset >= maxlen) | |
746 | + return -ENOMEM; | |
747 | + result[offset++] = '-'; | |
748 | + } | |
749 | + | |
750 | + format_dev_t(buffer, cc->dev->bdev->bd_dev); | |
751 | + snprintf(result + offset, maxlen - offset, " " SECTOR_FORMAT | |
752 | + " %s " SECTOR_FORMAT, cc->iv_offset, | |
753 | + buffer, cc->start); | |
754 | + break; | |
755 | + } | |
756 | + return 0; | |
757 | +} | |
758 | + | |
759 | +static struct target_type crypt_target = { | |
760 | + .name = "crypt", | |
761 | + .module = THIS_MODULE, | |
762 | + .ctr = crypt_ctr, | |
763 | + .dtr = crypt_dtr, | |
764 | + .map = crypt_map, | |
765 | + .status = crypt_status, | |
766 | +}; | |
767 | + | |
768 | +static int __init dm_crypt_init(void) | |
769 | +{ | |
770 | + int r; | |
771 | + | |
772 | + _crypt_io_pool = kmem_cache_create("dm-crypt_io", | |
773 | + sizeof(struct crypt_io), | |
774 | + 0, 0, NULL, NULL); | |
775 | + if (!_crypt_io_pool) | |
776 | + return -ENOMEM; | |
777 | + | |
778 | + r = dm_daemon_start(&_kcryptd, "kcryptd", kcryptd_do_work); | |
779 | + if (r) { | |
780 | + DMERR("couldn't create kcryptd: %d", r); | |
781 | + kmem_cache_destroy(_crypt_io_pool); | |
782 | + return r; | |
783 | + } | |
784 | + | |
785 | + r = dm_register_target(&crypt_target); | |
786 | + if (r < 0) { | |
787 | + DMERR("crypt: register failed %d", r); | |
788 | + dm_daemon_stop(&_kcryptd); | |
789 | + kmem_cache_destroy(_crypt_io_pool); | |
790 | + } | |
791 | + | |
792 | + return r; | |
793 | +} | |
794 | + | |
795 | +static void __exit dm_crypt_exit(void) | |
796 | +{ | |
797 | + int r = dm_unregister_target(&crypt_target); | |
798 | + | |
799 | + if (r < 0) | |
800 | + DMERR("crypt: unregister failed %d", r); | |
801 | + | |
802 | + dm_daemon_stop(&_kcryptd); | |
803 | + kmem_cache_destroy(_crypt_io_pool); | |
804 | +} | |
805 | + | |
806 | +module_init(dm_crypt_init); | |
807 | +module_exit(dm_crypt_exit); | |
808 | + | |
809 | +MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); | |
810 | +MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
811 | +MODULE_LICENSE("GPL"); | |
812 | diff -Nur linux-2.6.0.orig/drivers/md/dm-daemon.c linux-2.6.0/drivers/md/dm-daemon.c | |
813 | --- linux-2.6.0.orig/drivers/md/dm-daemon.c 1970-01-01 01:00:00.000000000 +0100 | |
814 | +++ linux-2.6.0/drivers/md/dm-daemon.c 2004-01-02 21:32:07.227545880 +0100 | |
815 | @@ -0,0 +1,103 @@ | |
816 | +/* | |
817 | + * Copyright (C) 2003 Sistina Software | |
818 | + * | |
819 | + * This file is released under the LGPL. | |
820 | + */ | |
821 | + | |
822 | +#include "dm.h" | |
823 | +#include "dm-daemon.h" | |
824 | + | |
825 | +#include <linux/module.h> | |
826 | +#include <linux/sched.h> | |
827 | +#include <linux/suspend.h> | |
828 | +#include <linux/completion.h> | |
829 | + | |
830 | +static int daemon(void *arg) | |
831 | +{ | |
832 | + struct dm_daemon *dd = (struct dm_daemon *) arg; | |
833 | + DECLARE_WAITQUEUE(wq, current); | |
834 | + | |
835 | + daemonize("%s", dd->name); | |
836 | + | |
837 | + atomic_set(&dd->please_die, 0); | |
838 | + | |
839 | + add_wait_queue(&dd->job_queue, &wq); | |
840 | + | |
841 | + complete(&dd->start); | |
842 | + | |
843 | + /* | |
844 | + * dd->fn() could do anything, very likely it will | |
845 | + * suspend. So we can't set the state to | |
846 | + * TASK_INTERRUPTIBLE before calling it. In order to | |
847 | + * prevent a race with a waking thread we do this little | |
848 | + * dance with the dd->woken variable. | |
849 | + */ | |
850 | + while (1) { | |
851 | + if (atomic_read(&dd->please_die)) | |
852 | + goto out; | |
853 | + | |
854 | + if (current->flags & PF_FREEZE) | |
855 | + refrigerator(PF_IOTHREAD); | |
856 | + | |
857 | + do { | |
858 | + set_current_state(TASK_RUNNING); | |
859 | + atomic_set(&dd->woken, 0); | |
860 | + dd->fn(); | |
861 | + set_current_state(TASK_INTERRUPTIBLE); | |
862 | + | |
863 | + } while (atomic_read(&dd->woken)); | |
864 | + | |
865 | + schedule(); | |
866 | + } | |
867 | + | |
868 | + out: | |
869 | + remove_wait_queue(&dd->job_queue, &wq); | |
870 | + complete_and_exit(&dd->run, 0); | |
871 | +} | |
872 | + | |
873 | +int dm_daemon_start(struct dm_daemon *dd, const char *name, jiffy_t (*fn)(void)) | |
874 | +{ | |
875 | + pid_t pid = 0; | |
876 | + | |
877 | + /* | |
878 | + * Initialise the dm_daemon. | |
879 | + */ | |
880 | + dd->fn = fn; | |
881 | + strncpy(dd->name, name, sizeof(dd->name) - 1); | |
882 | + init_completion(&dd->start); | |
883 | + init_completion(&dd->run); | |
884 | + init_waitqueue_head(&dd->job_queue); | |
885 | + | |
886 | + /* | |
887 | + * Start the new thread. | |
888 | + */ | |
889 | + pid = kernel_thread(daemon, dd, CLONE_KERNEL); | |
890 | + if (pid <= 0) { | |
891 | + DMERR("Failed to start %s thread", name); | |
892 | + return -EAGAIN; | |
893 | + } | |
894 | + | |
895 | + /* | |
896 | + * wait for the daemon to up this mutex. | |
897 | + */ | |
898 | + wait_for_completion(&dd->start); | |
899 | + | |
900 | + return 0; | |
901 | +} | |
902 | + | |
903 | +void dm_daemon_stop(struct dm_daemon *dd) | |
904 | +{ | |
905 | + atomic_set(&dd->please_die, 1); | |
906 | + dm_daemon_wake(dd); | |
907 | + wait_for_completion(&dd->run); | |
908 | +} | |
909 | + | |
910 | +void dm_daemon_wake(struct dm_daemon *dd) | |
911 | +{ | |
912 | + atomic_set(&dd->woken, 1); | |
913 | + wake_up_interruptible(&dd->job_queue); | |
914 | +} | |
915 | + | |
916 | +EXPORT_SYMBOL(dm_daemon_start); | |
917 | +EXPORT_SYMBOL(dm_daemon_stop); | |
918 | +EXPORT_SYMBOL(dm_daemon_wake); | |
919 | diff -Nur linux-2.6.0.orig/drivers/md/dm-daemon.h linux-2.6.0/drivers/md/dm-daemon.h | |
920 | --- linux-2.6.0.orig/drivers/md/dm-daemon.h 1970-01-01 01:00:00.000000000 +0100 | |
921 | +++ linux-2.6.0/drivers/md/dm-daemon.h 2004-01-02 21:32:07.233544968 +0100 | |
922 | @@ -0,0 +1,33 @@ | |
923 | +/* | |
924 | + * Copyright (C) 2003 Sistina Software | |
925 | + * | |
926 | + * This file is released under the LGPL. | |
927 | + */ | |
928 | + | |
929 | +#ifndef DM_DAEMON_H | |
930 | +#define DM_DAEMON_H | |
931 | + | |
932 | +#include <asm/atomic.h> | |
933 | +#include <linux/completion.h> | |
934 | + | |
935 | +/* | |
936 | + * The daemons work function returns a *hint* as to when it | |
937 | + * should next be woken up. | |
938 | + */ | |
939 | +struct dm_daemon { | |
940 | + jiffy_t (*fn)(void); | |
941 | + char name[16]; | |
942 | + atomic_t please_die; | |
943 | + struct completion start; | |
944 | + struct completion run; | |
945 | + | |
946 | + atomic_t woken; | |
947 | + wait_queue_head_t job_queue; | |
948 | +}; | |
949 | + | |
950 | +int dm_daemon_start(struct dm_daemon *dd, const char *name, jiffy_t (*fn)(void)); | |
951 | +void dm_daemon_stop(struct dm_daemon *dd); | |
952 | +void dm_daemon_wake(struct dm_daemon *dd); | |
953 | +int dm_daemon_running(struct dm_daemon *dd); | |
954 | + | |
955 | +#endif | |
956 | diff -Nur linux-2.6.0.orig/drivers/md/dm.h linux-2.6.0/drivers/md/dm.h | |
957 | --- linux-2.6.0.orig/drivers/md/dm.h 2003-11-24 02:31:53.000000000 +0100 | |
958 | +++ linux-2.6.0/drivers/md/dm.h 2004-01-02 21:32:07.234544816 +0100 | |
959 | @@ -20,6 +20,12 @@ | |
960 | #define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x) | |
961 | ||
962 | /* | |
963 | + * FIXME: There must be a better place for this. | |
964 | + */ | |
965 | +typedef typeof(jiffies) jiffy_t; | |
966 | + | |
967 | + | |
968 | +/* | |
969 | * FIXME: I think this should be with the definition of sector_t | |
970 | * in types.h. | |
971 | */ | |
972 | diff -Nur linux-2.6.0.orig/drivers/md/drivers/md/dm-crypt.c linux-2.6.0/drivers/md/drivers/md/dm-crypt.c | |
973 | --- linux-2.6.0.orig/drivers/md/drivers/md/dm-crypt.c 1970-01-01 01:00:00.000000000 +0100 | |
974 | +++ linux-2.6.0/drivers/md/drivers/md/dm-crypt.c 2004-01-02 21:32:07.242543600 +0100 | |
975 | @@ -0,0 +1,788 @@ | |
976 | +/* | |
977 | + * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | |
978 | + * | |
979 | + * This file is released under the GPL. | |
980 | + */ | |
981 | + | |
982 | +#include "dm.h" | |
983 | +#include "dm-daemon.h" | |
984 | + | |
985 | +#include <linux/module.h> | |
986 | +#include <linux/init.h> | |
987 | +#include <linux/bio.h> | |
988 | +#include <linux/mempool.h> | |
989 | +#include <linux/slab.h> | |
990 | +#include <linux/crypto.h> | |
991 | +#include <linux/spinlock.h> | |
992 | +#include <asm/scatterlist.h> | |
993 | + | |
994 | +/* | |
995 | + * per bio private data | |
996 | + */ | |
997 | +struct crypt_io { | |
998 | + struct dm_target *target; | |
999 | + struct bio *bio; | |
1000 | + struct bio *first_clone; | |
1001 | + atomic_t pending; | |
1002 | + int error; | |
1003 | +}; | |
1004 | + | |
1005 | +/* | |
1006 | + * context holding the current state of a multi-part conversion | |
1007 | + */ | |
1008 | +struct convert_context { | |
1009 | + struct bio *bio_in; | |
1010 | + struct bio *bio_out; | |
1011 | + unsigned int offset_in; | |
1012 | + unsigned int offset_out; | |
1013 | + int idx_in; | |
1014 | + int idx_out; | |
1015 | + sector_t sector; | |
1016 | + int write; | |
1017 | +}; | |
1018 | + | |
1019 | +/* | |
1020 | + * Crypt: maps a linear range of a block device | |
1021 | + * and encrypts / decrypts at the same time. | |
1022 | + */ | |
1023 | +struct crypt_c { | |
1024 | + struct dm_dev *dev; | |
1025 | + sector_t start; | |
1026 | + | |
1027 | + /* | |
1028 | + * pool for per bio private data and | |
1029 | + * for encryption buffer pages | |
1030 | + */ | |
1031 | + mempool_t *io_pool; | |
1032 | + mempool_t *page_pool; | |
1033 | + | |
1034 | + /* | |
1035 | + * crypto related data | |
1036 | + */ | |
1037 | + struct crypto_tfm *tfm; | |
1038 | + sector_t iv_offset; | |
1039 | + int iv_size; | |
1040 | + int key_size; | |
1041 | + u8 key[0]; | |
1042 | +}; | |
1043 | + | |
1044 | +#define MIN_IOS 256 | |
1045 | +#define MIN_POOL_PAGES 16 | |
1046 | +#define MIN_BIO_PAGES 8 | |
1047 | + | |
1048 | +static kmem_cache_t *_io_cache; | |
1049 | + | |
1050 | +/* | |
1051 | + * Mempool alloc and free functions for the page and io pool | |
1052 | + */ | |
1053 | +static void *mempool_alloc_page(int gfp_mask, void *data) | |
1054 | +{ | |
1055 | + return alloc_page(gfp_mask); | |
1056 | +} | |
1057 | + | |
1058 | +static void mempool_free_page(void *page, void *data) | |
1059 | +{ | |
1060 | + __free_page(page); | |
1061 | +} | |
1062 | + | |
1063 | +static inline struct page *crypt_alloc_page(struct crypt_c *cc, int gfp_mask) | |
1064 | +{ | |
1065 | + return mempool_alloc(cc->page_pool, gfp_mask); | |
1066 | +} | |
1067 | + | |
1068 | +static inline void crypt_free_page(struct crypt_c *cc, struct page *page) | |
1069 | +{ | |
1070 | + mempool_free(page, cc->page_pool); | |
1071 | +} | |
1072 | + | |
1073 | +static inline struct crypt_io *crypt_alloc_io(struct crypt_c *cc) | |
1074 | +{ | |
1075 | + return mempool_alloc(cc->io_pool, GFP_NOIO); | |
1076 | +} | |
1077 | + | |
1078 | +static inline void crypt_free_io(struct crypt_c *cc, struct crypt_io *io) | |
1079 | +{ | |
1080 | + return mempool_free(io, cc->io_pool); | |
1081 | +} | |
1082 | + | |
1083 | +/* | |
1084 | + * Encrypt / decrypt a single sector, source and destination buffers | |
1085 | + * are stored in scatterlists. In CBC mode initialise the "previous | |
1086 | + * block" with the sector number (it's not a real chaining because | |
1087 | + * it would not allow to seek on the device...) | |
1088 | + */ | |
1089 | +static inline int | |
1090 | +crypt_convert_scatterlist(struct crypt_c *cc, struct scatterlist *out, | |
1091 | + struct scatterlist *in, unsigned int length, | |
1092 | + int write, sector_t sector) | |
1093 | +{ | |
1094 | + u8 iv[cc->iv_size]; | |
1095 | + int r; | |
1096 | + | |
1097 | + if (cc->iv_size) { | |
1098 | + *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); | |
1099 | + if (cc->iv_size > sizeof(u32) / sizeof(u8)) | |
1100 | + memset(iv + (sizeof(u32) / sizeof(u8)), 0, | |
1101 | + cc->iv_size - (sizeof(u32) / sizeof(u8))); | |
1102 | + | |
1103 | + if (write) | |
1104 | + r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); | |
1105 | + else | |
1106 | + r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); | |
1107 | + } else { | |
1108 | + if (write) | |
1109 | + r = crypto_cipher_encrypt(cc->tfm, out, in, length); | |
1110 | + else | |
1111 | + r = crypto_cipher_decrypt(cc->tfm, out, in, length); | |
1112 | + } | |
1113 | + | |
1114 | + return r; | |
1115 | +} | |
1116 | + | |
1117 | +static void | |
1118 | +crypt_convert_init(struct crypt_c *cc, struct convert_context *ctx, | |
1119 | + struct bio *bio_out, struct bio *bio_in, | |
1120 | + sector_t sector, int write) | |
1121 | +{ | |
1122 | + ctx->bio_in = bio_in; | |
1123 | + ctx->bio_out = bio_out; | |
1124 | + ctx->offset_in = 0; | |
1125 | + ctx->offset_out = 0; | |
1126 | + ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | |
1127 | + ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | |
1128 | + ctx->sector = sector + cc->iv_offset; | |
1129 | + ctx->write = write; | |
1130 | +} | |
1131 | + | |
1132 | +/* | |
1133 | + * Encrypt / decrypt data from one bio to another one (may be the same) | |
1134 | + */ | |
1135 | +static int crypt_convert(struct crypt_c *cc, | |
1136 | + struct convert_context *ctx) | |
1137 | +{ | |
1138 | + int r = 0; | |
1139 | + | |
1140 | + while(ctx->idx_in < ctx->bio_in->bi_vcnt && | |
1141 | + ctx->idx_out < ctx->bio_out->bi_vcnt) { | |
1142 | + struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | |
1143 | + struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | |
1144 | + struct scatterlist sg_in = { | |
1145 | + .page = bv_in->bv_page, | |
1146 | + .offset = bv_in->bv_offset + ctx->offset_in, | |
1147 | + .length = 1 << SECTOR_SHIFT | |
1148 | + }; | |
1149 | + struct scatterlist sg_out = { | |
1150 | + .page = bv_out->bv_page, | |
1151 | + .offset = bv_out->bv_offset + ctx->offset_out, | |
1152 | + .length = 1 << SECTOR_SHIFT | |
1153 | + }; | |
1154 | + | |
1155 | + ctx->offset_in += sg_in.length; | |
1156 | + if (ctx->offset_in >= bv_in->bv_len) { | |
1157 | + ctx->offset_in = 0; | |
1158 | + ctx->idx_in++; | |
1159 | + } | |
1160 | + | |
1161 | + ctx->offset_out += sg_out.length; | |
1162 | + if (ctx->offset_out >= bv_out->bv_len) { | |
1163 | + ctx->offset_out = 0; | |
1164 | + ctx->idx_out++; | |
1165 | + } | |
1166 | + | |
1167 | + r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, | |
1168 | + ctx->write, ctx->sector); | |
1169 | + if (r < 0) | |
1170 | + break; | |
1171 | + | |
1172 | + ctx->sector++; | |
1173 | + } | |
1174 | + | |
1175 | + return r; | |
1176 | +} | |
1177 | + | |
1178 | +/* | |
1179 | + * Generate a new unfragmented bio with the given size | |
1180 | + * This should never violate the device limitations | |
1181 | + * May return a smaller bio when running out of pages | |
1182 | + */ | |
1183 | +static struct bio * | |
1184 | +crypt_alloc_buffer(struct crypt_c *cc, unsigned int size, | |
1185 | + struct bio *base_bio, int *bio_vec_idx) | |
1186 | +{ | |
1187 | + struct bio *bio; | |
1188 | + int nr_iovecs = dm_div_up(size, PAGE_SIZE); | |
1189 | + int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | |
1190 | + int i; | |
1191 | + | |
1192 | + if (base_bio) | |
1193 | + bio = bio_clone(base_bio, GFP_NOIO); | |
1194 | + else | |
1195 | + bio = bio_alloc(GFP_NOIO, nr_iovecs); | |
1196 | + if (!bio) | |
1197 | + return NULL; | |
1198 | + | |
1199 | + /* if the last bio was not complete, continue where that one ends */ | |
1200 | + bio->bi_idx = *bio_vec_idx; | |
1201 | + bio->bi_vcnt = *bio_vec_idx; | |
1202 | + bio->bi_size = 0; | |
1203 | + | |
1204 | + /* bio->bi_idx pages have already been allocated */ | |
1205 | + size -= bio->bi_idx * PAGE_SIZE; | |
1206 | + | |
1207 | + for(i = bio->bi_idx; i < nr_iovecs; i++) { | |
1208 | + struct bio_vec *bv = bio_iovec_idx(bio, i); | |
1209 | + | |
1210 | + bv->bv_page = crypt_alloc_page(cc, gfp_mask); | |
1211 | + if (!bv->bv_page) | |
1212 | + break; | |
1213 | + | |
1214 | + /* | |
1215 | + * if additional pages cannot be allocated without waiting | |
1216 | + * return a partially allocated bio, the caller will then try | |
1217 | + * to allocate additional bios while submitting this partial bio | |
1218 | + */ | |
1219 | + if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1)) | |
1220 | + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | |
1221 | + | |
1222 | + bv->bv_offset = 0; | |
1223 | + if (size > PAGE_SIZE) | |
1224 | + bv->bv_len = PAGE_SIZE; | |
1225 | + else | |
1226 | + bv->bv_len = size; | |
1227 | + | |
1228 | + bio->bi_size += bv->bv_len; | |
1229 | + bio->bi_vcnt++; | |
1230 | + size -= bv->bv_len; | |
1231 | + } | |
1232 | + | |
1233 | + if (!bio->bi_size) { | |
1234 | + bio_put(bio); | |
1235 | + return NULL; | |
1236 | + } | |
1237 | + | |
1238 | + /* | |
1239 | + * remember the last bio_vec allocated to be able to correctly | |
1240 | + * continue after splitting caused by memory pressure | |
1241 | + */ | |
1242 | + *bio_vec_idx = bio->bi_vcnt; | |
1243 | + | |
1244 | + return bio; | |
1245 | +} | |
1246 | + | |
1247 | +static void crypt_free_buffer_pages(struct crypt_c *cc, struct bio *bio, | |
1248 | + unsigned int bytes) | |
1249 | +{ | |
1250 | + int i = bio->bi_idx; | |
1251 | + | |
1252 | + while(bytes) { | |
1253 | + struct bio_vec *bv = bio_iovec_idx(bio, i++); | |
1254 | + crypt_free_page(cc, bv->bv_page); | |
1255 | + bytes -= bv->bv_len; | |
1256 | + } | |
1257 | +} | |
1258 | + | |
1259 | +/* | |
1260 | + * One of the bios was finished. Check for completion of | |
1261 | + * the whole request and correctly cleanup the buffer. | |
1262 | + */ | |
1263 | +static void dec_pending(struct crypt_io *io, int error) | |
1264 | +{ | |
1265 | + struct crypt_c *cc = (struct crypt_c *) io->target->private; | |
1266 | + | |
1267 | + if (!atomic_dec_and_test(&io->pending)) | |
1268 | + return; | |
1269 | + | |
1270 | + if (io->first_clone) | |
1271 | + bio_put(io->first_clone); | |
1272 | + | |
1273 | + if (error < 0) | |
1274 | + io->error = error; | |
1275 | + | |
1276 | + if (io->bio) | |
1277 | + bio_endio(io->bio, io->bio->bi_size, io->error); | |
1278 | + | |
1279 | + crypt_free_io(cc, io); | |
1280 | +} | |
1281 | + | |
1282 | +/* | |
1283 | + * kcryptd: | |
1284 | + * | |
1285 | + * Needed because we can't decrypt when called in an | |
1286 | + * interrupt context, so returning bios from read requests get | |
1287 | + * queued here. | |
1288 | + */ | |
1289 | +static spinlock_t _kcryptd_lock = SPIN_LOCK_UNLOCKED; | |
1290 | +static struct bio *_bio_head; | |
1291 | +static struct bio *_bio_tail; | |
1292 | + | |
1293 | +static struct dm_daemon _kcryptd; | |
1294 | + | |
1295 | +/* | |
1296 | + * Fetch a list of the complete bios. | |
1297 | + */ | |
1298 | +static struct bio *kcryptd_get_bios(void) | |
1299 | +{ | |
1300 | + struct bio *bio; | |
1301 | + | |
1302 | + spin_lock_irq(&_kcryptd_lock); | |
1303 | + bio = _bio_head; | |
1304 | + if (bio) | |
1305 | + _bio_head = _bio_tail = NULL; | |
1306 | + spin_unlock_irq(&_kcryptd_lock); | |
1307 | + | |
1308 | + return bio; | |
1309 | +} | |
1310 | + | |
1311 | +/* | |
1312 | + * Append bio to work queue | |
1313 | + */ | |
1314 | +static void kcryptd_queue_bio(struct bio *bio) | |
1315 | +{ | |
1316 | + unsigned long flags; | |
1317 | + | |
1318 | + spin_lock_irqsave(&_kcryptd_lock, flags); | |
1319 | + if (_bio_tail) | |
1320 | + _bio_tail->bi_next = bio; | |
1321 | + else | |
1322 | + _bio_head = bio; | |
1323 | + _bio_tail = bio; | |
1324 | + spin_unlock_irqrestore(&_kcryptd_lock, flags); | |
1325 | +} | |
1326 | + | |
1327 | +static jiffy_t kcryptd(void) | |
1328 | +{ | |
1329 | + int r; | |
1330 | + struct bio *bio; | |
1331 | + struct bio *next_bio; | |
1332 | + struct crypt_io *io; | |
1333 | + struct crypt_c *cc; | |
1334 | + struct convert_context ctx; | |
1335 | + | |
1336 | + bio = kcryptd_get_bios(); | |
1337 | + | |
1338 | + while (bio) { | |
1339 | + io = (struct crypt_io *) bio->bi_private; | |
1340 | + cc = (struct crypt_c *) io->target->private; | |
1341 | + | |
1342 | + crypt_convert_init(cc, &ctx, io->bio, io->bio, | |
1343 | + io->bio->bi_sector - io->target->begin, 0); | |
1344 | + r = crypt_convert(cc, &ctx); | |
1345 | + | |
1346 | + next_bio = bio->bi_next; | |
1347 | + | |
1348 | + bio->bi_next = NULL; | |
1349 | + bio_put(bio); | |
1350 | + dec_pending(io, r); | |
1351 | + | |
1352 | + bio = next_bio; | |
1353 | + } | |
1354 | + | |
1355 | + return 0; | |
1356 | +} | |
1357 | + | |
1358 | +/* | |
1359 | + * Decode key from its hex representation | |
1360 | + */ | |
1361 | +static int crypt_decode_key(u8 *key, char *hex, int size) | |
1362 | +{ | |
1363 | + int i; | |
1364 | + for(i = 0; i < size; i++) { | |
1365 | + int digits; | |
1366 | + if (*hex >= 'a' && *hex <= 'f') | |
1367 | + digits = *hex - ('a' - 10); | |
1368 | + else if (*hex >= 'A' && *hex <= 'F') | |
1369 | + digits = *hex - ('A' - 10); | |
1370 | + else if (*hex >= '0' && *hex <= '9') | |
1371 | + digits = *hex - '0'; | |
1372 | + else | |
1373 | + return -EINVAL; | |
1374 | + | |
1375 | + digits <<= 4; | |
1376 | + hex++; | |
1377 | + | |
1378 | + if (*hex >= 'a' && *hex <= 'f') | |
1379 | + digits += *hex - ('a' - 10); | |
1380 | + else if (*hex >= 'A' && *hex <= 'F') | |
1381 | + digits += *hex - ('A' - 10); | |
1382 | + else if (*hex >= '0' && *hex <= '9') | |
1383 | + digits += *hex - '0'; | |
1384 | + else | |
1385 | + return -EINVAL; | |
1386 | + | |
1387 | + hex++; | |
1388 | + key[i] = (u8)digits; | |
1389 | + } | |
1390 | + | |
1391 | + if (*hex != '\0') | |
1392 | + return -EINVAL; | |
1393 | + | |
1394 | + return 0; | |
1395 | +} | |
1396 | + | |
1397 | +/* | |
1398 | + * Encode key into its hex representation | |
1399 | + */ | |
1400 | +static void crypt_encode_key(char *hex, u8 *key, int size) | |
1401 | +{ | |
1402 | + static char hex_digits[] = "0123456789abcdef"; | |
1403 | + int i; | |
1404 | + | |
1405 | + for(i = 0; i < size; i++) { | |
1406 | + *hex++ = hex_digits[*key >> 4]; | |
1407 | + *hex++ = hex_digits[*key & 0x0f]; | |
1408 | + key++; | |
1409 | + } | |
1410 | + | |
1411 | + *hex++ = '\0'; | |
1412 | +} | |
1413 | + | |
1414 | +/* | |
1415 | + * Construct an encryption mapping: | |
1416 | + * <cipher> <key> <iv_offset> <dev_path> <start> | |
1417 | + */ | |
1418 | +static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1419 | +{ | |
1420 | + struct crypt_c *cc; | |
1421 | + struct crypto_tfm *tfm; | |
1422 | + char *tmp; | |
1423 | + char *cipher; | |
1424 | + char *mode; | |
1425 | + int crypto_flags; | |
1426 | + int iv_size; | |
1427 | + int key_size; | |
1428 | + | |
1429 | + if (argc != 5) { | |
1430 | + ti->error = "dm-crypt: Not enough arguments"; | |
1431 | + return -EINVAL; | |
1432 | + } | |
1433 | + | |
1434 | + tmp = argv[0]; | |
1435 | + cipher = strsep(&tmp, "-"); | |
1436 | + mode = strsep(&tmp, "-"); | |
1437 | + | |
1438 | + if (tmp) | |
1439 | + DMWARN("dm-crypt: Unexpected additional cipher options"); | |
1440 | + | |
1441 | + if (!mode || strcmp(mode, "cbc") == 0) | |
1442 | + crypto_flags = CRYPTO_TFM_MODE_CBC; | |
1443 | + else if (strcmp(mode, "ecb") == 0) | |
1444 | + crypto_flags = CRYPTO_TFM_MODE_ECB; | |
1445 | + else { | |
1446 | + ti->error = "dm-crypt: Invalid chaining mode"; | |
1447 | + return -EINVAL; | |
1448 | + } | |
1449 | + | |
1450 | + tfm = crypto_alloc_tfm(cipher, crypto_flags); | |
1451 | + if (!tfm) { | |
1452 | + ti->error = "dm-crypt: Error allocating crypto tfm"; | |
1453 | + return -EINVAL; | |
1454 | + } | |
1455 | + | |
1456 | + key_size = strlen(argv[1]) >> 1; | |
1457 | + if (tfm->crt_u.cipher.cit_decrypt_iv && tfm->crt_u.cipher.cit_encrypt_iv) | |
1458 | + iv_size = max(crypto_tfm_alg_ivsize(tfm), sizeof(u32) / sizeof(u8)); | |
1459 | + else | |
1460 | + iv_size = 0; | |
1461 | + | |
1462 | + cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | |
1463 | + if (cc == NULL) { | |
1464 | + ti->error = | |
1465 | + "dm-crypt: Cannot allocate transparent encryption context"; | |
1466 | + crypto_free_tfm(tfm); | |
1467 | + return -ENOMEM; | |
1468 | + } | |
1469 | + | |
1470 | + cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, | |
1471 | + mempool_free_slab, _io_cache); | |
1472 | + if (!cc->io_pool) { | |
1473 | + ti->error = "dm-crypt: Cannot allocate crypt io mempool"; | |
1474 | + goto bad1; | |
1475 | + } | |
1476 | + | |
1477 | + cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, | |
1478 | + mempool_free_page, NULL); | |
1479 | + if (!cc->page_pool) { | |
1480 | + ti->error = "dm-crypt: Cannot allocate page mempool"; | |
1481 | + goto bad2; | |
1482 | + } | |
1483 | + | |
1484 | + cc->tfm = tfm; | |
1485 | + cc->iv_size = iv_size; | |
1486 | + cc->key_size = key_size; | |
1487 | + if ((key_size == 0 && strcmp(argv[1], "-") != 0) | |
1488 | + || crypt_decode_key(cc->key, argv[1], key_size) < 0) { | |
1489 | + ti->error = "dm-crypt: Error decoding key"; | |
1490 | + goto bad3; | |
1491 | + } | |
1492 | + | |
1493 | + if (tfm->crt_u.cipher.cit_setkey(tfm, cc->key, key_size) < 0) { | |
1494 | + ti->error = "dm-crypt: Error setting key"; | |
1495 | + goto bad3; | |
1496 | + } | |
1497 | + | |
1498 | + if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) { | |
1499 | + ti->error = "dm-crypt: Invalid iv_offset sector"; | |
1500 | + goto bad3; | |
1501 | + } | |
1502 | + | |
1503 | + if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) { | |
1504 | + ti->error = "dm-crypt: Invalid device sector"; | |
1505 | + goto bad3; | |
1506 | + } | |
1507 | + | |
1508 | + if (dm_get_device(ti, argv[3], cc->start, ti->len, | |
1509 | + dm_table_get_mode(ti->table), &cc->dev)) { | |
1510 | + ti->error = "dm-crypt: Device lookup failed"; | |
1511 | + goto bad3; | |
1512 | + } | |
1513 | + | |
1514 | + ti->private = cc; | |
1515 | + return 0; | |
1516 | + | |
1517 | +bad3: | |
1518 | + mempool_destroy(cc->page_pool); | |
1519 | +bad2: | |
1520 | + mempool_destroy(cc->io_pool); | |
1521 | +bad1: | |
1522 | + crypto_free_tfm(tfm); | |
1523 | + kfree(cc); | |
1524 | + return -EINVAL; | |
1525 | +} | |
1526 | + | |
1527 | +static void crypt_dtr(struct dm_target *ti) | |
1528 | +{ | |
1529 | + struct crypt_c *cc = (struct crypt_c *) ti->private; | |
1530 | + | |
1531 | + mempool_destroy(cc->page_pool); | |
1532 | + mempool_destroy(cc->io_pool); | |
1533 | + | |
1534 | + crypto_free_tfm(cc->tfm); | |
1535 | + dm_put_device(ti, cc->dev); | |
1536 | + kfree(cc); | |
1537 | +} | |
1538 | + | |
1539 | +static int crypt_endio(struct bio *bio, unsigned int done, int error) | |
1540 | +{ | |
1541 | + struct crypt_io *io = (struct crypt_io *) bio->bi_private; | |
1542 | + struct crypt_c *cc = (struct crypt_c *) io->target->private; | |
1543 | + | |
1544 | + if (bio_rw(bio) == WRITE) { | |
1545 | + /* | |
1546 | + * free the processed pages, even if | |
1547 | + * it's only a partially completed write | |
1548 | + */ | |
1549 | + crypt_free_buffer_pages(cc, bio, done); | |
1550 | + } | |
1551 | + | |
1552 | + if (bio->bi_size) | |
1553 | + return 1; | |
1554 | + | |
1555 | + /* | |
1556 | + * successful reads get decrypted by the worker thread | |
1557 | + * because we never want to decrypt in an irq context | |
1558 | + */ | |
1559 | + if ((bio_rw(bio) == READ || bio_rw(bio) == READA) | |
1560 | + && bio_flagged(bio, BIO_UPTODATE)) { | |
1561 | + kcryptd_queue_bio(bio); | |
1562 | + dm_daemon_wake(&_kcryptd); | |
1563 | + return 0; | |
1564 | + } | |
1565 | + | |
1566 | + bio_put(bio); | |
1567 | + dec_pending(io, error); | |
1568 | + | |
1569 | + return error; | |
1570 | +} | |
1571 | + | |
1572 | +static int crypt_map(struct dm_target *ti, struct bio *bio) | |
1573 | +{ | |
1574 | + struct crypt_c *cc = (struct crypt_c *) ti->private; | |
1575 | + struct crypt_io *io = crypt_alloc_io(cc); | |
1576 | + struct bio *clone = NULL; | |
1577 | + struct convert_context ctx; | |
1578 | + unsigned int remaining = bio->bi_size; | |
1579 | + sector_t sector = bio->bi_sector - ti->begin; | |
1580 | + int bio_vec_idx = 0; | |
1581 | + int r = 0; | |
1582 | + | |
1583 | + io->target = ti; | |
1584 | + io->bio = bio; | |
1585 | + io->first_clone = NULL; | |
1586 | + io->error = 0; | |
1587 | + atomic_set(&io->pending, 1); /* hold a reference */ | |
1588 | + | |
1589 | + if (bio_rw(bio) == WRITE) | |
1590 | + crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); | |
1591 | + | |
1592 | + /* | |
1593 | + * The allocated buffers can be smaller then the whole bio, | |
1594 | + * so repeat the whole process until all the data can be handled. | |
1595 | + */ | |
1596 | + while (remaining) { | |
1597 | + if (bio_rw(bio) == WRITE) { | |
1598 | + clone = crypt_alloc_buffer(cc, bio->bi_size, | |
1599 | + io->first_clone, | |
1600 | + &bio_vec_idx); | |
1601 | + if (clone) { | |
1602 | + ctx.bio_out = clone; | |
1603 | + r = crypt_convert(cc, &ctx); | |
1604 | + if (r < 0) { | |
1605 | + crypt_free_buffer_pages(cc, clone, | |
1606 | + clone->bi_size); | |
1607 | + bio_put(clone); | |
1608 | + goto cleanup; | |
1609 | + } | |
1610 | + } | |
1611 | + } else | |
1612 | + clone = bio_clone(bio, GFP_NOIO); | |
1613 | + | |
1614 | + if (!clone) { | |
1615 | + r = -ENOMEM; | |
1616 | + goto cleanup; | |
1617 | + } | |
1618 | + | |
1619 | + if (!io->first_clone) { | |
1620 | + /* | |
1621 | + * hold a reference to the first clone, because it holds | |
1622 | + * the bio_vec array and that needs to be released only | |
1623 | + * after all other clones are released | |
1624 | + */ | |
1625 | + bio_get(clone); | |
1626 | + io->first_clone = clone; | |
1627 | + } | |
1628 | + atomic_inc(&io->pending); | |
1629 | + | |
1630 | + clone->bi_private = io; | |
1631 | + clone->bi_end_io = crypt_endio; | |
1632 | + clone->bi_bdev = cc->dev->bdev; | |
1633 | + clone->bi_sector = cc->start + sector; | |
1634 | + clone->bi_rw = bio->bi_rw; | |
1635 | + | |
1636 | + remaining -= clone->bi_size; | |
1637 | + sector += bio_sectors(clone); | |
1638 | + | |
1639 | + generic_make_request(clone); | |
1640 | + } | |
1641 | + | |
1642 | + /* drop reference, clones could have returned before we reach this */ | |
1643 | + dec_pending(io, 0); | |
1644 | + return 0; | |
1645 | + | |
1646 | +cleanup: | |
1647 | + if (io->first_clone) { | |
1648 | + dec_pending(io, r); | |
1649 | + return 0; | |
1650 | + } | |
1651 | + | |
1652 | + /* if no bio has been dispatched yet, we can directly return the error */ | |
1653 | + crypt_free_io(cc, io); | |
1654 | + return r; | |
1655 | +} | |
1656 | + | |
1657 | +static int crypt_status(struct dm_target *ti, status_type_t type, | |
1658 | + char *result, unsigned int maxlen) | |
1659 | +{ | |
1660 | + struct crypt_c *cc = (struct crypt_c *) ti->private; | |
1661 | + char buffer[32]; | |
1662 | + const char *cipher; | |
1663 | + const char *mode = NULL; | |
1664 | + int offset; | |
1665 | + | |
1666 | + switch (type) { | |
1667 | + case STATUSTYPE_INFO: | |
1668 | + result[0] = '\0'; | |
1669 | + break; | |
1670 | + | |
1671 | + case STATUSTYPE_TABLE: | |
1672 | + cipher = crypto_tfm_alg_name(cc->tfm); | |
1673 | + | |
1674 | + switch(cc->tfm->crt_u.cipher.cit_mode) { | |
1675 | + case CRYPTO_TFM_MODE_CBC: | |
1676 | + mode = "cbc"; | |
1677 | + break; | |
1678 | + case CRYPTO_TFM_MODE_ECB: | |
1679 | + mode = "ecb"; | |
1680 | + break; | |
1681 | + default: | |
1682 | + BUG(); | |
1683 | + } | |
1684 | + | |
1685 | + snprintf(result, maxlen, "%s-%s ", cipher, mode); | |
1686 | + offset = strlen(result); | |
1687 | + | |
1688 | + if (cc->key_size > 0) { | |
1689 | + if ((maxlen - offset) < ((cc->key_size << 1) + 1)) | |
1690 | + return -ENOMEM; | |
1691 | + | |
1692 | + crypt_encode_key(result + offset, cc->key, cc->key_size); | |
1693 | + offset += cc->key_size << 1; | |
1694 | + } else { | |
1695 | + if (offset >= maxlen) | |
1696 | + return -ENOMEM; | |
1697 | + result[offset++] = '-'; | |
1698 | + } | |
1699 | + | |
1700 | + format_dev_t(buffer, cc->dev->bdev->bd_dev); | |
1701 | + snprintf(result + offset, maxlen - offset, " " SECTOR_FORMAT | |
1702 | + " %s " SECTOR_FORMAT, cc->iv_offset, | |
1703 | + buffer, cc->start); | |
1704 | + break; | |
1705 | + } | |
1706 | + return 0; | |
1707 | +} | |
1708 | + | |
1709 | +static struct target_type crypt_target = { | |
1710 | + .name = "crypt", | |
1711 | + .module = THIS_MODULE, | |
1712 | + .ctr = crypt_ctr, | |
1713 | + .dtr = crypt_dtr, | |
1714 | + .map = crypt_map, | |
1715 | + .status = crypt_status, | |
1716 | +}; | |
1717 | + | |
1718 | +int __init dm_crypt_init(void) | |
1719 | +{ | |
1720 | + int r; | |
1721 | + | |
1722 | + _io_cache = kmem_cache_create("dm-crypt_io", sizeof(struct crypt_io), | |
1723 | + 0, 0, NULL, NULL); | |
1724 | + if (!_io_cache) | |
1725 | + return -ENOMEM; | |
1726 | + | |
1727 | + r = dm_daemon_start(&_kcryptd, "kcryptd", kcryptd); | |
1728 | + if (r) { | |
1729 | + DMERR("couldn't create kcryptd: %d", r); | |
1730 | + kmem_cache_destroy(_io_cache); | |
1731 | + return r; | |
1732 | + } | |
1733 | + | |
1734 | + r = dm_register_target(&crypt_target); | |
1735 | + if (r < 0) { | |
1736 | + DMERR("crypt: register failed %d", r); | |
1737 | + dm_daemon_stop(&_kcryptd); | |
1738 | + kmem_cache_destroy(_io_cache); | |
1739 | + } | |
1740 | + | |
1741 | + return r; | |
1742 | +} | |
1743 | + | |
1744 | +void __exit dm_crypt_exit(void) | |
1745 | +{ | |
1746 | + int r = dm_unregister_target(&crypt_target); | |
1747 | + | |
1748 | + if (r < 0) | |
1749 | + DMERR("crypt: unregister failed %d", r); | |
1750 | + | |
1751 | + dm_daemon_stop(&_kcryptd); | |
1752 | + kmem_cache_destroy(_io_cache); | |
1753 | +} | |
1754 | + | |
1755 | +/* | |
1756 | + * module hooks | |
1757 | + */ | |
1758 | +module_init(dm_crypt_init) | |
1759 | +module_exit(dm_crypt_exit) | |
1760 | + | |
1761 | +MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); | |
1762 | +MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
1763 | +MODULE_LICENSE("GPL"); | |
1764 | diff -Nur linux-2.6.0.orig/drivers/md/drivers/md/dm-daemon.c linux-2.6.0/drivers/md/drivers/md/dm-daemon.c | |
1765 | --- linux-2.6.0.orig/drivers/md/drivers/md/dm-daemon.c 1970-01-01 01:00:00.000000000 +0100 | |
1766 | +++ linux-2.6.0/drivers/md/drivers/md/dm-daemon.c 2004-01-02 21:32:07.244543296 +0100 | |
1767 | @@ -0,0 +1,103 @@ | |
1768 | +/* | |
1769 | + * Copyright (C) 2003 Sistina Software | |
1770 | + * | |
1771 | + * This file is released under the LGPL. | |
1772 | + */ | |
1773 | + | |
1774 | +#include "dm.h" | |
1775 | +#include "dm-daemon.h" | |
1776 | + | |
1777 | +#include <linux/module.h> | |
1778 | +#include <linux/sched.h> | |
1779 | +#include <linux/suspend.h> | |
1780 | +#include <linux/completion.h> | |
1781 | + | |
1782 | +static int daemon(void *arg) | |
1783 | +{ | |
1784 | + struct dm_daemon *dd = (struct dm_daemon *) arg; | |
1785 | + DECLARE_WAITQUEUE(wq, current); | |
1786 | + | |
1787 | + daemonize("%s", dd->name); | |
1788 | + | |
1789 | + atomic_set(&dd->please_die, 0); | |
1790 | + | |
1791 | + add_wait_queue(&dd->job_queue, &wq); | |
1792 | + | |
1793 | + complete(&dd->start); | |
1794 | + | |
1795 | + /* | |
1796 | + * dd->fn() could do anything, very likely it will | |
1797 | + * suspend. So we can't set the state to | |
1798 | + * TASK_INTERRUPTIBLE before calling it. In order to | |
1799 | + * prevent a race with a waking thread we do this little | |
1800 | + * dance with the dd->woken variable. | |
1801 | + */ | |
1802 | + while (1) { | |
1803 | + if (atomic_read(&dd->please_die)) | |
1804 | + goto out; | |
1805 | + | |
1806 | + if (current->flags & PF_FREEZE) | |
1807 | + refrigerator(PF_IOTHREAD); | |
1808 | + | |
1809 | + do { | |
1810 | + set_current_state(TASK_RUNNING); | |
1811 | + atomic_set(&dd->woken, 0); | |
1812 | + dd->fn(); | |
1813 | + set_current_state(TASK_INTERRUPTIBLE); | |
1814 | + | |
1815 | + } while (atomic_read(&dd->woken)); | |
1816 | + | |
1817 | + schedule(); | |
1818 | + } | |
1819 | + | |
1820 | + out: | |
1821 | + remove_wait_queue(&dd->job_queue, &wq); | |
1822 | + complete_and_exit(&dd->run, 0); | |
1823 | +} | |
1824 | + | |
1825 | +int dm_daemon_start(struct dm_daemon *dd, const char *name, jiffy_t (*fn)(void)) | |
1826 | +{ | |
1827 | + pid_t pid = 0; | |
1828 | + | |
1829 | + /* | |
1830 | + * Initialise the dm_daemon. | |
1831 | + */ | |
1832 | + dd->fn = fn; | |
1833 | + strncpy(dd->name, name, sizeof(dd->name) - 1); | |
1834 | + init_completion(&dd->start); | |
1835 | + init_completion(&dd->run); | |
1836 | + init_waitqueue_head(&dd->job_queue); | |
1837 | + | |
1838 | + /* | |
1839 | + * Start the new thread. | |
1840 | + */ | |
1841 | + pid = kernel_thread(daemon, dd, CLONE_KERNEL); | |
1842 | + if (pid <= 0) { | |
1843 | + DMERR("Failed to start %s thread", name); | |
1844 | + return -EAGAIN; | |
1845 | + } | |
1846 | + | |
1847 | + /* | |
1848 | + * wait for the daemon to up this mutex. | |
1849 | + */ | |
1850 | + wait_for_completion(&dd->start); | |
1851 | + | |
1852 | + return 0; | |
1853 | +} | |
1854 | + | |
1855 | +void dm_daemon_stop(struct dm_daemon *dd) | |
1856 | +{ | |
1857 | + atomic_set(&dd->please_die, 1); | |
1858 | + dm_daemon_wake(dd); | |
1859 | + wait_for_completion(&dd->run); | |
1860 | +} | |
1861 | + | |
1862 | +void dm_daemon_wake(struct dm_daemon *dd) | |
1863 | +{ | |
1864 | + atomic_set(&dd->woken, 1); | |
1865 | + wake_up_interruptible(&dd->job_queue); | |
1866 | +} | |
1867 | + | |
1868 | +EXPORT_SYMBOL(dm_daemon_start); | |
1869 | +EXPORT_SYMBOL(dm_daemon_stop); | |
1870 | +EXPORT_SYMBOL(dm_daemon_wake); | |
1871 | diff -Nur linux-2.6.0.orig/drivers/md/drivers/md/dm-daemon.h linux-2.6.0/drivers/md/drivers/md/dm-daemon.h | |
1872 | --- linux-2.6.0.orig/drivers/md/drivers/md/dm-daemon.h 1970-01-01 01:00:00.000000000 +0100 | |
1873 | +++ linux-2.6.0/drivers/md/drivers/md/dm-daemon.h 2004-01-02 21:32:07.244543296 +0100 | |
1874 | @@ -0,0 +1,33 @@ | |
1875 | +/* | |
1876 | + * Copyright (C) 2003 Sistina Software | |
1877 | + * | |
1878 | + * This file is released under the LGPL. | |
1879 | + */ | |
1880 | + | |
1881 | +#ifndef DM_DAEMON_H | |
1882 | +#define DM_DAEMON_H | |
1883 | + | |
1884 | +#include <asm/atomic.h> | |
1885 | +#include <linux/completion.h> | |
1886 | + | |
1887 | +/* | |
1888 | + * The daemons work function returns a *hint* as to when it | |
1889 | + * should next be woken up. | |
1890 | + */ | |
1891 | +struct dm_daemon { | |
1892 | + jiffy_t (*fn)(void); | |
1893 | + char name[16]; | |
1894 | + atomic_t please_die; | |
1895 | + struct completion start; | |
1896 | + struct completion run; | |
1897 | + | |
1898 | + atomic_t woken; | |
1899 | + wait_queue_head_t job_queue; | |
1900 | +}; | |
1901 | + | |
1902 | +int dm_daemon_start(struct dm_daemon *dd, const char *name, jiffy_t (*fn)(void)); | |
1903 | +void dm_daemon_stop(struct dm_daemon *dd); | |
1904 | +void dm_daemon_wake(struct dm_daemon *dd); | |
1905 | +int dm_daemon_running(struct dm_daemon *dd); | |
1906 | + | |
1907 | +#endif | |
1908 | diff -Nur linux-2.6.0.orig/drivers/md/Kconfig linux-2.6.0/drivers/md/Kconfig | |
1909 | --- linux-2.6.0.orig/drivers/md/Kconfig 2003-11-24 02:31:11.000000000 +0100 | |
1910 | +++ linux-2.6.0/drivers/md/Kconfig 2004-01-02 21:32:07.251542232 +0100 | |
1911 | @@ -142,5 +142,17 @@ | |
1912 | Recent tools use a new version of the ioctl interface, only | |
1913 | select this option if you intend using such tools. | |
1914 | ||
1915 | +config DM_CRYPT | |
1916 | + tristate "Crypt target support" | |
1917 | + depends on BLK_DEV_DM && EXPERIMENTAL | |
1918 | + select CRYPTO | |
1919 | + ---help--- | |
1920 | + This device-mapper target allows you to create a device that | |
1921 | + transparently encrypts the data on it. You'll need to activate | |
1922 | + the required ciphers in the cryptoapi configuration in order to | |
1923 | + be able to use it. | |
1924 | + | |
1925 | + If unsure, say N. | |
1926 | + | |
1927 | endmenu | |
1928 | ||
1929 | diff -Nur linux-2.6.0.orig/drivers/md/Makefile linux-2.6.0/drivers/md/Makefile | |
1930 | --- linux-2.6.0.orig/drivers/md/Makefile 2003-11-24 02:32:03.000000000 +0100 | |
1931 | +++ linux-2.6.0/drivers/md/Makefile 2004-01-02 21:32:07.251542232 +0100 | |
1932 | @@ -3,7 +3,7 @@ | |
1933 | # | |
1934 | ||
1935 | dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ | |
1936 | - dm-ioctl.o | |
1937 | + dm-ioctl.o dm-daemon.o | |
1938 | ||
1939 | # Note: link order is important. All raid personalities | |
1940 | # and xor.o must come before md.o, as they each initialise | |
1941 | @@ -17,3 +17,4 @@ | |
1942 | obj-$(CONFIG_MD_MULTIPATH) += multipath.o | |
1943 | obj-$(CONFIG_BLK_DEV_MD) += md.o | |
1944 | obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o | |
1945 | +obj-$(CONFIG_DM_CRYPT) += dm-crypt.o |