1 diff -Nur linux-2.6.0.orig/drivers/md/dm-crypt.c linux-2.6.0/drivers/md/dm-crypt.c
2 --- linux-2.6.0.orig/drivers/md/dm-crypt.c 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-2.6.0/drivers/md/dm-crypt.c 2004-01-02 21:34:12.424513048 +0100
6 + * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
8 + * This file is released under the GPL.
11 +#include <linux/module.h>
12 +#include <linux/init.h>
13 +#include <linux/kernel.h>
14 +#include <linux/bio.h>
15 +#include <linux/mempool.h>
16 +#include <linux/slab.h>
17 +#include <linux/crypto.h>
18 +#include <linux/spinlock.h>
19 +#include <asm/scatterlist.h>
22 +#include "dm-daemon.h"
25 + * per bio private data
28 + struct dm_target *target;
30 + struct bio *first_clone;
36 + * context holding the current state of a multi-part conversion
38 +struct convert_context {
40 + struct bio *bio_out;
41 + unsigned int offset_in;
42 + unsigned int offset_out;
50 + * Crypt: maps a linear range of a block device
51 + * and encrypts / decrypts at the same time.
53 +struct crypt_config {
58 + * pool for per bio private data and
59 + * for encryption buffer pages
62 + mempool_t *page_pool;
65 + * crypto related data
67 + struct crypto_tfm *tfm;
69 + int (*iv_generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
76 +#define MIN_POOL_PAGES 32
77 +#define MIN_BIO_PAGES 8
79 +static kmem_cache_t *_crypt_io_pool;
82 + * Mempool alloc and free functions for the page
84 +static void *mempool_alloc_page(int gfp_mask, void *data)
86 + return alloc_page(gfp_mask);
89 +static void mempool_free_page(void *page, void *data)
96 + * Different IV generation algorithms
98 +static int crypt_iv_plain(struct crypt_config *cc, u8 *iv, sector_t sector)
100 + *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
101 + if (cc->iv_size > sizeof(u32) / sizeof(u8))
102 + memset(iv + (sizeof(u32) / sizeof(u8)), 0,
103 + cc->iv_size - (sizeof(u32) / sizeof(u8)));
109 +crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
110 + struct scatterlist *in, unsigned int length,
111 + int write, sector_t sector)
113 + u8 iv[cc->iv_size];
116 + if (cc->iv_generator) {
117 + r = cc->iv_generator(cc, iv, sector);
122 + r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv);
124 + r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv);
127 + r = crypto_cipher_encrypt(cc->tfm, out, in, length);
129 + r = crypto_cipher_decrypt(cc->tfm, out, in, length);
136 +crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
137 + struct bio *bio_out, struct bio *bio_in,
138 + sector_t sector, int write)
140 + ctx->bio_in = bio_in;
141 + ctx->bio_out = bio_out;
142 + ctx->offset_in = 0;
143 + ctx->offset_out = 0;
144 + ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
145 + ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
146 + ctx->sector = sector + cc->iv_offset;
147 + ctx->write = write;
151 + * Encrypt / decrypt data from one bio to another one (can be the same one)
153 +static int crypt_convert(struct crypt_config *cc,
154 + struct convert_context *ctx)
158 + while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
159 + ctx->idx_out < ctx->bio_out->bi_vcnt) {
160 + struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
161 + struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
162 + struct scatterlist sg_in = {
163 + .page = bv_in->bv_page,
164 + .offset = bv_in->bv_offset + ctx->offset_in,
165 + .length = 1 << SECTOR_SHIFT
167 + struct scatterlist sg_out = {
168 + .page = bv_out->bv_page,
169 + .offset = bv_out->bv_offset + ctx->offset_out,
170 + .length = 1 << SECTOR_SHIFT
173 + ctx->offset_in += sg_in.length;
174 + if (ctx->offset_in >= bv_in->bv_len) {
175 + ctx->offset_in = 0;
179 + ctx->offset_out += sg_out.length;
180 + if (ctx->offset_out >= bv_out->bv_len) {
181 + ctx->offset_out = 0;
185 + r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
186 + ctx->write, ctx->sector);
197 + * Generate a new unfragmented bio with the given size
198 + * This should never violate the device limitations
199 + * May return a smaller bio when running out of pages
202 +crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
203 + struct bio *base_bio, int *bio_vec_idx)
206 + int nr_iovecs = dm_div_up(size, PAGE_SIZE);
207 + int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
208 + int flags = current->flags;
212 + * Tell VM to act less aggressively and fail earlier.
213 + * This is not necessary but increases throughput.
214 + * FIXME: Is this really intelligent?
216 + current->flags &= ~PF_MEMALLOC;
219 + bio = bio_clone(base_bio, GFP_NOIO);
221 + bio = bio_alloc(GFP_NOIO, nr_iovecs);
225 + /* if the last bio was not complete, continue where that one ended */
226 + bio->bi_idx = *bio_vec_idx;
227 + bio->bi_vcnt = *bio_vec_idx;
230 + /* bio->bi_idx pages have already been allocated */
231 + size -= bio->bi_idx * PAGE_SIZE;
233 + for(i = bio->bi_idx; i < nr_iovecs; i++) {
234 + struct bio_vec *bv = bio_iovec_idx(bio, i);
236 + bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
241 + * if additional pages cannot be allocated without waiting,
242 + * return a partially allocated bio, the caller will then try
243 + * to allocate additional bios while submitting this partial bio
245 + if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1))
246 + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
249 + if (size > PAGE_SIZE)
250 + bv->bv_len = PAGE_SIZE;
254 + bio->bi_size += bv->bv_len;
256 + size -= bv->bv_len;
259 + if (flags & PF_MEMALLOC)
260 + current->flags |= PF_MEMALLOC;
262 + if (!bio->bi_size) {
268 + * Remember the last bio_vec allocated to be able
269 + * to correctly continue after the splitting.
271 + *bio_vec_idx = bio->bi_vcnt;
276 +static void crypt_free_buffer_pages(struct crypt_config *cc,
277 + struct bio *bio, unsigned int bytes)
279 + unsigned int start, end;
280 + struct bio_vec *bv;
284 + * This is ugly, but Jens Axboe thinks that using bi_idx in the
285 + * endio function is too dangerous at the moment, so I calculate the
286 + * correct position using bi_vcnt and bi_size.
287 + * The bv_offset and bv_len fields might already be modified but we
288 + * know that we always allocated whole pages.
289 + * A fix to the bi_idx issue in the kernel is in the works, so
290 + * we will hopefully be able to revert to the cleaner solution soon.
292 + i = bio->bi_vcnt - 1;
293 + bv = bio_iovec_idx(bio, i);
294 + end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size;
295 + start = end - bytes;
297 + start >>= PAGE_SHIFT;
299 + end = bio->bi_vcnt;
301 + end >>= PAGE_SHIFT;
303 + for(i = start; i < end; i++) {
304 + bv = bio_iovec_idx(bio, i);
305 + BUG_ON(!bv->bv_page);
306 + mempool_free(bv->bv_page, cc->page_pool);
307 + bv->bv_page = NULL;
312 + * One of the bios was finished. Check for completion of
313 + * the whole request and correctly clean up the buffer.
315 +static void dec_pending(struct crypt_io *io, int error)
317 + struct crypt_config *cc = (struct crypt_config *) io->target->private;
322 + if (!atomic_dec_and_test(&io->pending))
325 + if (io->first_clone)
326 + bio_put(io->first_clone);
329 + bio_endio(io->bio, io->bio->bi_size, io->error);
331 + mempool_free(io, cc->io_pool);
337 + * Needed because it would be very unwise to do decryption in an
338 + * interrupt context, so bios returning from read requests get
341 +static spinlock_t _kcryptd_lock = SPIN_LOCK_UNLOCKED;
342 +static struct bio *_kcryptd_bio_head;
343 +static struct bio *_kcryptd_bio_tail;
345 +static struct dm_daemon _kcryptd;
348 + * Fetch a list of the complete bios.
350 +static struct bio *kcryptd_get_bios(void)
354 + spin_lock_irq(&_kcryptd_lock);
355 + bio = _kcryptd_bio_head;
357 + _kcryptd_bio_head = _kcryptd_bio_tail = NULL;
358 + spin_unlock_irq(&_kcryptd_lock);
364 + * Append bio to work queue
366 +static void kcryptd_queue_bio(struct bio *bio)
368 + unsigned long flags;
370 + bio->bi_next = NULL;
371 + spin_lock_irqsave(&_kcryptd_lock, flags);
372 + if (_kcryptd_bio_tail)
373 + _kcryptd_bio_tail->bi_next = bio;
375 + _kcryptd_bio_head = bio;
376 + _kcryptd_bio_tail = bio;
377 + spin_unlock_irqrestore(&_kcryptd_lock, flags);
380 +static jiffy_t kcryptd_do_work(void)
384 + struct bio *next_bio;
385 + struct crypt_io *io;
386 + struct crypt_config *cc;
387 + struct convert_context ctx;
389 + bio = kcryptd_get_bios();
392 + io = (struct crypt_io *) bio->bi_private;
393 + cc = (struct crypt_config *) io->target->private;
395 + crypt_convert_init(cc, &ctx, io->bio, io->bio,
396 + io->bio->bi_sector - io->target->begin, 0);
397 + r = crypt_convert(cc, &ctx);
399 + next_bio = bio->bi_next;
400 + bio->bi_next = NULL;
403 + dec_pending(io, r);
412 + * Decode key from its hex representation
414 +static int crypt_decode_key(u8 *key, char *hex, int size)
422 + for(i = 0; i < size; i++) {
423 + buffer[0] = *hex++;
424 + buffer[1] = *hex++;
426 + key[i] = (u8)simple_strtoul(buffer, &endp, 16);
428 + if (endp != &buffer[2])
439 + * Encode key into its hex representation
441 +static void crypt_encode_key(char *hex, u8 *key, int size)
443 + static char hex_digits[] = "0123456789abcdef";
446 + for(i = 0; i < size; i++) {
447 + *hex++ = hex_digits[*key >> 4];
448 + *hex++ = hex_digits[*key & 0x0f];
456 + * Construct an encryption mapping:
457 + * <cipher> <key> <iv_offset> <dev_path> <start>
459 +static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
461 + struct crypt_config *cc;
462 + struct crypto_tfm *tfm;
470 + ti->error = "dm-crypt: Not enough arguments";
475 + cipher = strsep(&tmp, "-");
476 + mode = strsep(&tmp, "-");
479 + DMWARN("dm-crypt: Unexpected additional cipher options");
481 + key_size = strlen(argv[1]) >> 1;
483 + cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
486 + "dm-crypt: Cannot allocate transparent encryption context";
490 + if (!mode || strcmp(mode, "plain") == 0)
491 + cc->iv_generator = crypt_iv_plain;
492 + else if (strcmp(mode, "ecb") == 0)
493 + cc->iv_generator = NULL;
495 + ti->error = "dm-crypt: Invalid chaining mode";
499 + if (cc->iv_generator)
500 + crypto_flags = CRYPTO_TFM_MODE_CBC;
502 + crypto_flags = CRYPTO_TFM_MODE_ECB;
504 + tfm = crypto_alloc_tfm(cipher, crypto_flags);
506 + ti->error = "dm-crypt: Error allocating crypto tfm";
510 + if (tfm->crt_u.cipher.cit_decrypt_iv && tfm->crt_u.cipher.cit_encrypt_iv)
511 + /* at least a 32 bit sector number should fit in our buffer */
512 + cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), sizeof(u32) / sizeof(u8));
515 + if (cc->iv_generator) {
516 + DMWARN("dm-crypt: Selected cipher does not support IVs");
517 + cc->iv_generator = NULL;
521 + cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
522 + mempool_free_slab, _crypt_io_pool);
523 + if (!cc->io_pool) {
524 + ti->error = "dm-crypt: Cannot allocate crypt io mempool";
528 + cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page,
529 + mempool_free_page, NULL);
530 + if (!cc->page_pool) {
531 + ti->error = "dm-crypt: Cannot allocate page mempool";
536 + cc->key_size = key_size;
537 + if ((key_size == 0 && strcmp(argv[1], "-") != 0)
538 + || crypt_decode_key(cc->key, argv[1], key_size) < 0) {
539 + ti->error = "dm-crypt: Error decoding key";
543 + if (tfm->crt_u.cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
544 + ti->error = "dm-crypt: Error setting key";
548 + if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) {
549 + ti->error = "dm-crypt: Invalid iv_offset sector";
553 + if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) {
554 + ti->error = "dm-crypt: Invalid device sector";
558 + if (dm_get_device(ti, argv[3], cc->start, ti->len,
559 + dm_table_get_mode(ti->table), &cc->dev)) {
560 + ti->error = "dm-crypt: Device lookup failed";
568 + mempool_destroy(cc->page_pool);
570 + mempool_destroy(cc->io_pool);
572 + crypto_free_tfm(tfm);
578 +static void crypt_dtr(struct dm_target *ti)
580 + struct crypt_config *cc = (struct crypt_config *) ti->private;
582 + mempool_destroy(cc->page_pool);
583 + mempool_destroy(cc->io_pool);
585 + crypto_free_tfm(cc->tfm);
586 + dm_put_device(ti, cc->dev);
590 +static int crypt_endio(struct bio *bio, unsigned int done, int error)
592 + struct crypt_io *io = (struct crypt_io *) bio->bi_private;
593 + struct crypt_config *cc = (struct crypt_config *) io->target->private;
595 + if (bio_rw(bio) == WRITE) {
597 + * free the processed pages, even if
598 + * it's only a partially completed write
600 + crypt_free_buffer_pages(cc, bio, done);
607 + * successful reads are decrypted by the worker thread
609 + if ((bio_rw(bio) == READ || bio_rw(bio) == READA)
610 + && bio_flagged(bio, BIO_UPTODATE)) {
611 + kcryptd_queue_bio(bio);
612 + dm_daemon_wake(&_kcryptd);
617 + dec_pending(io, error);
622 +static int crypt_map(struct dm_target *ti, struct bio *bio)
624 + struct crypt_config *cc = (struct crypt_config *) ti->private;
625 + struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO);
626 + struct bio *clone = NULL;
627 + struct convert_context ctx;
628 + unsigned int remaining = bio->bi_size;
629 + sector_t sector = bio->bi_sector - ti->begin;
630 + int bio_vec_idx = 0;
635 + io->first_clone = NULL;
637 + atomic_set(&io->pending, 1); /* hold a reference */
639 + if (bio_rw(bio) == WRITE)
640 + crypt_convert_init(cc, &ctx, NULL, bio, sector, 1);
643 + * The allocated buffers can be smaller than the whole bio,
644 + * so repeat the whole process until all the data can be handled.
646 + while (remaining) {
647 + if (bio_rw(bio) == WRITE) {
648 + clone = crypt_alloc_buffer(cc, bio->bi_size,
652 + ctx.bio_out = clone;
653 + r = crypt_convert(cc, &ctx);
655 + crypt_free_buffer_pages(cc, clone,
662 + clone = bio_clone(bio, GFP_NOIO);
669 + if (!io->first_clone) {
671 + * hold a reference to the first clone, because it
672 + * holds the bio_vec array and that can't be freed
673 + * before all other clones are released
676 + io->first_clone = clone;
678 + atomic_inc(&io->pending);
680 + clone->bi_private = io;
681 + clone->bi_end_io = crypt_endio;
682 + clone->bi_bdev = cc->dev->bdev;
683 + clone->bi_sector = cc->start + sector;
684 + clone->bi_rw = bio->bi_rw;
686 + remaining -= clone->bi_size;
687 + sector += bio_sectors(clone);
689 + generic_make_request(clone);
692 + /* drop reference, clones could have returned before we reach this */
693 + dec_pending(io, 0);
697 + if (io->first_clone) {
698 + dec_pending(io, r);
702 + /* if no bio has been dispatched yet, we can directly return the error */
703 + mempool_free(io, cc->io_pool);
707 +static int crypt_status(struct dm_target *ti, status_type_t type,
708 + char *result, unsigned int maxlen)
710 + struct crypt_config *cc = (struct crypt_config *) ti->private;
712 + const char *cipher;
713 + const char *mode = NULL;
717 + case STATUSTYPE_INFO:
721 + case STATUSTYPE_TABLE:
722 + cipher = crypto_tfm_alg_name(cc->tfm);
724 + switch(cc->tfm->crt_u.cipher.cit_mode) {
725 + case CRYPTO_TFM_MODE_CBC:
728 + case CRYPTO_TFM_MODE_ECB:
735 + snprintf(result, maxlen, "%s-%s ", cipher, mode);
736 + offset = strlen(result);
738 + if (cc->key_size > 0) {
739 + if ((maxlen - offset) < ((cc->key_size << 1) + 1))
742 + crypt_encode_key(result + offset, cc->key, cc->key_size);
743 + offset += cc->key_size << 1;
745 + if (offset >= maxlen)
747 + result[offset++] = '-';
750 + format_dev_t(buffer, cc->dev->bdev->bd_dev);
751 + snprintf(result + offset, maxlen - offset, " " SECTOR_FORMAT
752 + " %s " SECTOR_FORMAT, cc->iv_offset,
753 + buffer, cc->start);
759 +static struct target_type crypt_target = {
761 + .module = THIS_MODULE,
765 + .status = crypt_status,
768 +static int __init dm_crypt_init(void)
772 + _crypt_io_pool = kmem_cache_create("dm-crypt_io",
773 + sizeof(struct crypt_io),
775 + if (!_crypt_io_pool)
778 + r = dm_daemon_start(&_kcryptd, "kcryptd", kcryptd_do_work);
780 + DMERR("couldn't create kcryptd: %d", r);
781 + kmem_cache_destroy(_crypt_io_pool);
785 + r = dm_register_target(&crypt_target);
787 + DMERR("crypt: register failed %d", r);
788 + dm_daemon_stop(&_kcryptd);
789 + kmem_cache_destroy(_crypt_io_pool);
795 +static void __exit dm_crypt_exit(void)
797 + int r = dm_unregister_target(&crypt_target);
800 + DMERR("crypt: unregister failed %d", r);
802 + dm_daemon_stop(&_kcryptd);
803 + kmem_cache_destroy(_crypt_io_pool);
806 +module_init(dm_crypt_init);
807 +module_exit(dm_crypt_exit);
809 +MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
810 +MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
811 +MODULE_LICENSE("GPL");
812 diff -Nur linux-2.6.0.orig/drivers/md/dm-daemon.c linux-2.6.0/drivers/md/dm-daemon.c
813 --- linux-2.6.0.orig/drivers/md/dm-daemon.c 1970-01-01 01:00:00.000000000 +0100
814 +++ linux-2.6.0/drivers/md/dm-daemon.c 2004-01-02 21:32:07.227545880 +0100
817 + * Copyright (C) 2003 Sistina Software
819 + * This file is released under the LGPL.
823 +#include "dm-daemon.h"
825 +#include <linux/module.h>
826 +#include <linux/sched.h>
827 +#include <linux/suspend.h>
828 +#include <linux/completion.h>
830 +static int daemon(void *arg)
832 + struct dm_daemon *dd = (struct dm_daemon *) arg;
833 + DECLARE_WAITQUEUE(wq, current);
835 + daemonize("%s", dd->name);
837 + atomic_set(&dd->please_die, 0);
839 + add_wait_queue(&dd->job_queue, &wq);
841 + complete(&dd->start);
844 + * dd->fn() could do anything, very likely it will
845 + * suspend. So we can't set the state to
846 + * TASK_INTERRUPTIBLE before calling it. In order to
847 + * prevent a race with a waking thread we do this little
848 + * dance with the dd->woken variable.
851 + if (atomic_read(&dd->please_die))
854 + if (current->flags & PF_FREEZE)
855 + refrigerator(PF_IOTHREAD);
858 + set_current_state(TASK_RUNNING);
859 + atomic_set(&dd->woken, 0);
861 + set_current_state(TASK_INTERRUPTIBLE);
863 + } while (atomic_read(&dd->woken));
869 + remove_wait_queue(&dd->job_queue, &wq);
870 + complete_and_exit(&dd->run, 0);
873 +int dm_daemon_start(struct dm_daemon *dd, const char *name, jiffy_t (*fn)(void))
878 + * Initialise the dm_daemon.
881 + strncpy(dd->name, name, sizeof(dd->name) - 1);
882 + init_completion(&dd->start);
883 + init_completion(&dd->run);
884 + init_waitqueue_head(&dd->job_queue);
887 + * Start the new thread.
889 + pid = kernel_thread(daemon, dd, CLONE_KERNEL);
891 + DMERR("Failed to start %s thread", name);
896 + * wait for the daemon to up this mutex.
898 + wait_for_completion(&dd->start);
903 +void dm_daemon_stop(struct dm_daemon *dd)
905 + atomic_set(&dd->please_die, 1);
906 + dm_daemon_wake(dd);
907 + wait_for_completion(&dd->run);
910 +void dm_daemon_wake(struct dm_daemon *dd)
912 + atomic_set(&dd->woken, 1);
913 + wake_up_interruptible(&dd->job_queue);
916 +EXPORT_SYMBOL(dm_daemon_start);
917 +EXPORT_SYMBOL(dm_daemon_stop);
918 +EXPORT_SYMBOL(dm_daemon_wake);
919 diff -Nur linux-2.6.0.orig/drivers/md/dm-daemon.h linux-2.6.0/drivers/md/dm-daemon.h
920 --- linux-2.6.0.orig/drivers/md/dm-daemon.h 1970-01-01 01:00:00.000000000 +0100
921 +++ linux-2.6.0/drivers/md/dm-daemon.h 2004-01-02 21:32:07.233544968 +0100
924 + * Copyright (C) 2003 Sistina Software
926 + * This file is released under the LGPL.
932 +#include <asm/atomic.h>
933 +#include <linux/completion.h>
936 + * The daemons work function returns a *hint* as to when it
937 + * should next be woken up.
940 + jiffy_t (*fn)(void);
942 + atomic_t please_die;
943 + struct completion start;
944 + struct completion run;
947 + wait_queue_head_t job_queue;
950 +int dm_daemon_start(struct dm_daemon *dd, const char *name, jiffy_t (*fn)(void));
951 +void dm_daemon_stop(struct dm_daemon *dd);
952 +void dm_daemon_wake(struct dm_daemon *dd);
953 +int dm_daemon_running(struct dm_daemon *dd);
956 diff -Nur linux-2.6.0.orig/drivers/md/dm.h linux-2.6.0/drivers/md/dm.h
957 --- linux-2.6.0.orig/drivers/md/dm.h 2003-11-24 02:31:53.000000000 +0100
958 +++ linux-2.6.0/drivers/md/dm.h 2004-01-02 21:32:07.234544816 +0100
960 #define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x)
963 + * FIXME: There must be a better place for this.
965 +typedef typeof(jiffies) jiffy_t;
969 * FIXME: I think this should be with the definition of sector_t
972 diff -Nur linux-2.6.0.orig/drivers/md/drivers/md/dm-crypt.c linux-2.6.0/drivers/md/drivers/md/dm-crypt.c
973 --- linux-2.6.0.orig/drivers/md/drivers/md/dm-crypt.c 1970-01-01 01:00:00.000000000 +0100
974 +++ linux-2.6.0/drivers/md/drivers/md/dm-crypt.c 2004-01-02 21:32:07.242543600 +0100
977 + * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
979 + * This file is released under the GPL.
983 +#include "dm-daemon.h"
985 +#include <linux/module.h>
986 +#include <linux/init.h>
987 +#include <linux/bio.h>
988 +#include <linux/mempool.h>
989 +#include <linux/slab.h>
990 +#include <linux/crypto.h>
991 +#include <linux/spinlock.h>
992 +#include <asm/scatterlist.h>
995 + * per bio private data
998 + struct dm_target *target;
1000 + struct bio *first_clone;
1006 + * context holding the current state of a multi-part conversion
1008 +struct convert_context {
1009 + struct bio *bio_in;
1010 + struct bio *bio_out;
1011 + unsigned int offset_in;
1012 + unsigned int offset_out;
1020 + * Crypt: maps a linear range of a block device
1021 + * and encrypts / decrypts at the same time.
1024 + struct dm_dev *dev;
1028 + * pool for per bio private data and
1029 + * for encryption buffer pages
1031 + mempool_t *io_pool;
1032 + mempool_t *page_pool;
1035 + * crypto related data
1037 + struct crypto_tfm *tfm;
1038 + sector_t iv_offset;
1044 +#define MIN_IOS 256
1045 +#define MIN_POOL_PAGES 16
1046 +#define MIN_BIO_PAGES 8
1048 +static kmem_cache_t *_io_cache;
1051 + * Mempool alloc and free functions for the page and io pool
1053 +static void *mempool_alloc_page(int gfp_mask, void *data)
1055 + return alloc_page(gfp_mask);
1058 +static void mempool_free_page(void *page, void *data)
1060 + __free_page(page);
1063 +static inline struct page *crypt_alloc_page(struct crypt_c *cc, int gfp_mask)
1065 + return mempool_alloc(cc->page_pool, gfp_mask);
1068 +static inline void crypt_free_page(struct crypt_c *cc, struct page *page)
1070 + mempool_free(page, cc->page_pool);
1073 +static inline struct crypt_io *crypt_alloc_io(struct crypt_c *cc)
1075 + return mempool_alloc(cc->io_pool, GFP_NOIO);
1078 +static inline void crypt_free_io(struct crypt_c *cc, struct crypt_io *io)
1080 + return mempool_free(io, cc->io_pool);
1084 + * Encrypt / decrypt a single sector, source and destination buffers
1085 + * are stored in scatterlists. In CBC mode initialise the "previous
1086 + * block" with the sector number (it's not a real chaining because
1087 + * it would not allow to seek on the device...)
1090 +crypt_convert_scatterlist(struct crypt_c *cc, struct scatterlist *out,
1091 + struct scatterlist *in, unsigned int length,
1092 + int write, sector_t sector)
1094 + u8 iv[cc->iv_size];
1097 + if (cc->iv_size) {
1098 + *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
1099 + if (cc->iv_size > sizeof(u32) / sizeof(u8))
1100 + memset(iv + (sizeof(u32) / sizeof(u8)), 0,
1101 + cc->iv_size - (sizeof(u32) / sizeof(u8)));
1104 + r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv);
1106 + r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv);
1109 + r = crypto_cipher_encrypt(cc->tfm, out, in, length);
1111 + r = crypto_cipher_decrypt(cc->tfm, out, in, length);
1118 +crypt_convert_init(struct crypt_c *cc, struct convert_context *ctx,
1119 + struct bio *bio_out, struct bio *bio_in,
1120 + sector_t sector, int write)
1122 + ctx->bio_in = bio_in;
1123 + ctx->bio_out = bio_out;
1124 + ctx->offset_in = 0;
1125 + ctx->offset_out = 0;
1126 + ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
1127 + ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
1128 + ctx->sector = sector + cc->iv_offset;
1129 + ctx->write = write;
1133 + * Encrypt / decrypt data from one bio to another one (may be the same)
1135 +static int crypt_convert(struct crypt_c *cc,
1136 + struct convert_context *ctx)
1140 + while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
1141 + ctx->idx_out < ctx->bio_out->bi_vcnt) {
1142 + struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
1143 + struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
1144 + struct scatterlist sg_in = {
1145 + .page = bv_in->bv_page,
1146 + .offset = bv_in->bv_offset + ctx->offset_in,
1147 + .length = 1 << SECTOR_SHIFT
1149 + struct scatterlist sg_out = {
1150 + .page = bv_out->bv_page,
1151 + .offset = bv_out->bv_offset + ctx->offset_out,
1152 + .length = 1 << SECTOR_SHIFT
1155 + ctx->offset_in += sg_in.length;
1156 + if (ctx->offset_in >= bv_in->bv_len) {
1157 + ctx->offset_in = 0;
1161 + ctx->offset_out += sg_out.length;
1162 + if (ctx->offset_out >= bv_out->bv_len) {
1163 + ctx->offset_out = 0;
1167 + r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
1168 + ctx->write, ctx->sector);
1179 + * Generate a new unfragmented bio with the given size
1180 + * This should never violate the device limitations
1181 + * May return a smaller bio when running out of pages
1183 +static struct bio *
1184 +crypt_alloc_buffer(struct crypt_c *cc, unsigned int size,
1185 + struct bio *base_bio, int *bio_vec_idx)
1188 + int nr_iovecs = dm_div_up(size, PAGE_SIZE);
1189 + int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
1193 + bio = bio_clone(base_bio, GFP_NOIO);
1195 + bio = bio_alloc(GFP_NOIO, nr_iovecs);
1199 + /* if the last bio was not complete, continue where that one ends */
1200 + bio->bi_idx = *bio_vec_idx;
1201 + bio->bi_vcnt = *bio_vec_idx;
1204 + /* bio->bi_idx pages have already been allocated */
1205 + size -= bio->bi_idx * PAGE_SIZE;
1207 + for(i = bio->bi_idx; i < nr_iovecs; i++) {
1208 + struct bio_vec *bv = bio_iovec_idx(bio, i);
1210 + bv->bv_page = crypt_alloc_page(cc, gfp_mask);
1215 + * if additional pages cannot be allocated without waiting
1216 + * return a partially allocated bio, the caller will then try
1217 + * to allocate additional bios while submitting this partial bio
1219 + if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1))
1220 + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
1222 + bv->bv_offset = 0;
1223 + if (size > PAGE_SIZE)
1224 + bv->bv_len = PAGE_SIZE;
1226 + bv->bv_len = size;
1228 + bio->bi_size += bv->bv_len;
1230 + size -= bv->bv_len;
1233 + if (!bio->bi_size) {
1239 + * remember the last bio_vec allocated to be able to correctly
1240 + * continue after splitting caused by memory pressure
1242 + *bio_vec_idx = bio->bi_vcnt;
1247 +static void crypt_free_buffer_pages(struct crypt_c *cc, struct bio *bio,
1248 + unsigned int bytes)
1250 + int i = bio->bi_idx;
1253 + struct bio_vec *bv = bio_iovec_idx(bio, i++);
1254 + crypt_free_page(cc, bv->bv_page);
1255 + bytes -= bv->bv_len;
1260 + * One of the bios was finished. Check for completion of
1261 + * the whole request and correctly cleanup the buffer.
1263 +static void dec_pending(struct crypt_io *io, int error)
1265 + struct crypt_c *cc = (struct crypt_c *) io->target->private;
1267 + if (!atomic_dec_and_test(&io->pending))
1270 + if (io->first_clone)
1271 + bio_put(io->first_clone);
1274 + io->error = error;
1277 + bio_endio(io->bio, io->bio->bi_size, io->error);
1279 + crypt_free_io(cc, io);
1285 + * Needed because we can't decrypt when called in an
1286 + * interrupt context, so returning bios from read requests get
1289 +static spinlock_t _kcryptd_lock = SPIN_LOCK_UNLOCKED;
1290 +static struct bio *_bio_head;
1291 +static struct bio *_bio_tail;
1293 +static struct dm_daemon _kcryptd;
1296 + * Fetch a list of the complete bios.
1298 +static struct bio *kcryptd_get_bios(void)
1302 + spin_lock_irq(&_kcryptd_lock);
1305 + _bio_head = _bio_tail = NULL;
1306 + spin_unlock_irq(&_kcryptd_lock);
1312 + * Append bio to work queue
1314 +static void kcryptd_queue_bio(struct bio *bio)
1316 + unsigned long flags;
1318 + spin_lock_irqsave(&_kcryptd_lock, flags);
1320 + _bio_tail->bi_next = bio;
1324 + spin_unlock_irqrestore(&_kcryptd_lock, flags);
1327 +static jiffy_t kcryptd(void)
1331 + struct bio *next_bio;
1332 + struct crypt_io *io;
1333 + struct crypt_c *cc;
1334 + struct convert_context ctx;
1336 + bio = kcryptd_get_bios();
1339 + io = (struct crypt_io *) bio->bi_private;
1340 + cc = (struct crypt_c *) io->target->private;
1342 + crypt_convert_init(cc, &ctx, io->bio, io->bio,
1343 + io->bio->bi_sector - io->target->begin, 0);
1344 + r = crypt_convert(cc, &ctx);
1346 + next_bio = bio->bi_next;
1348 + bio->bi_next = NULL;
1350 + dec_pending(io, r);
1359 + * Decode key from its hex representation
1361 +static int crypt_decode_key(u8 *key, char *hex, int size)
1364 + for(i = 0; i < size; i++) {
1366 + if (*hex >= 'a' && *hex <= 'f')
1367 + digits = *hex - ('a' - 10);
1368 + else if (*hex >= 'A' && *hex <= 'F')
1369 + digits = *hex - ('A' - 10);
1370 + else if (*hex >= '0' && *hex <= '9')
1371 + digits = *hex - '0';
1378 + if (*hex >= 'a' && *hex <= 'f')
1379 + digits += *hex - ('a' - 10);
1380 + else if (*hex >= 'A' && *hex <= 'F')
1381 + digits += *hex - ('A' - 10);
1382 + else if (*hex >= '0' && *hex <= '9')
1383 + digits += *hex - '0';
1388 + key[i] = (u8)digits;
1398 + * Encode key into its hex representation
1400 +static void crypt_encode_key(char *hex, u8 *key, int size)
1402 + static char hex_digits[] = "0123456789abcdef";
1405 + for(i = 0; i < size; i++) {
1406 + *hex++ = hex_digits[*key >> 4];
1407 + *hex++ = hex_digits[*key & 0x0f];
1415 + * Construct an encryption mapping:
1416 + * <cipher> <key> <iv_offset> <dev_path> <start>
1418 +static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1420 + struct crypt_c *cc;
1421 + struct crypto_tfm *tfm;
1430 + ti->error = "dm-crypt: Not enough arguments";
1435 + cipher = strsep(&tmp, "-");
1436 + mode = strsep(&tmp, "-");
1439 + DMWARN("dm-crypt: Unexpected additional cipher options");
1441 + if (!mode || strcmp(mode, "cbc") == 0)
1442 + crypto_flags = CRYPTO_TFM_MODE_CBC;
1443 + else if (strcmp(mode, "ecb") == 0)
1444 + crypto_flags = CRYPTO_TFM_MODE_ECB;
1446 + ti->error = "dm-crypt: Invalid chaining mode";
1450 + tfm = crypto_alloc_tfm(cipher, crypto_flags);
1452 + ti->error = "dm-crypt: Error allocating crypto tfm";
1456 + key_size = strlen(argv[1]) >> 1;
1457 + if (tfm->crt_u.cipher.cit_decrypt_iv && tfm->crt_u.cipher.cit_encrypt_iv)
1458 + iv_size = max(crypto_tfm_alg_ivsize(tfm), sizeof(u32) / sizeof(u8));
1462 + cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1465 + "dm-crypt: Cannot allocate transparent encryption context";
1466 + crypto_free_tfm(tfm);
1470 + cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
1471 + mempool_free_slab, _io_cache);
1472 + if (!cc->io_pool) {
1473 + ti->error = "dm-crypt: Cannot allocate crypt io mempool";
1477 + cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page,
1478 + mempool_free_page, NULL);
1479 + if (!cc->page_pool) {
1480 + ti->error = "dm-crypt: Cannot allocate page mempool";
1485 + cc->iv_size = iv_size;
1486 + cc->key_size = key_size;
1487 + if ((key_size == 0 && strcmp(argv[1], "-") != 0)
1488 + || crypt_decode_key(cc->key, argv[1], key_size) < 0) {
1489 + ti->error = "dm-crypt: Error decoding key";
1493 + if (tfm->crt_u.cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
1494 + ti->error = "dm-crypt: Error setting key";
1498 + if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) {
1499 + ti->error = "dm-crypt: Invalid iv_offset sector";
1503 + if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) {
1504 + ti->error = "dm-crypt: Invalid device sector";
1508 + if (dm_get_device(ti, argv[3], cc->start, ti->len,
1509 + dm_table_get_mode(ti->table), &cc->dev)) {
1510 + ti->error = "dm-crypt: Device lookup failed";
1518 + mempool_destroy(cc->page_pool);
1520 + mempool_destroy(cc->io_pool);
1522 + crypto_free_tfm(tfm);
1527 +static void crypt_dtr(struct dm_target *ti)
1529 + struct crypt_c *cc = (struct crypt_c *) ti->private;
1531 + mempool_destroy(cc->page_pool);
1532 + mempool_destroy(cc->io_pool);
1534 + crypto_free_tfm(cc->tfm);
1535 + dm_put_device(ti, cc->dev);
1539 +static int crypt_endio(struct bio *bio, unsigned int done, int error)
1541 + struct crypt_io *io = (struct crypt_io *) bio->bi_private;
1542 + struct crypt_c *cc = (struct crypt_c *) io->target->private;
1544 + if (bio_rw(bio) == WRITE) {
1546 + * free the processed pages, even if
1547 + * it's only a partially completed write
1549 + crypt_free_buffer_pages(cc, bio, done);
1556 + * successful reads get decrypted by the worker thread
1557 + * because we never want to decrypt in an irq context
1559 + if ((bio_rw(bio) == READ || bio_rw(bio) == READA)
1560 + && bio_flagged(bio, BIO_UPTODATE)) {
1561 + kcryptd_queue_bio(bio);
1562 + dm_daemon_wake(&_kcryptd);
1567 + dec_pending(io, error);
1572 +static int crypt_map(struct dm_target *ti, struct bio *bio)
1574 + struct crypt_c *cc = (struct crypt_c *) ti->private;
1575 + struct crypt_io *io = crypt_alloc_io(cc);
1576 + struct bio *clone = NULL;
1577 + struct convert_context ctx;
1578 + unsigned int remaining = bio->bi_size;
1579 + sector_t sector = bio->bi_sector - ti->begin;
1580 + int bio_vec_idx = 0;
1585 + io->first_clone = NULL;
1587 + atomic_set(&io->pending, 1); /* hold a reference */
1589 + if (bio_rw(bio) == WRITE)
1590 + crypt_convert_init(cc, &ctx, NULL, bio, sector, 1);
1593 + * The allocated buffers can be smaller then the whole bio,
1594 + * so repeat the whole process until all the data can be handled.
1596 + while (remaining) {
1597 + if (bio_rw(bio) == WRITE) {
1598 + clone = crypt_alloc_buffer(cc, bio->bi_size,
1602 + ctx.bio_out = clone;
1603 + r = crypt_convert(cc, &ctx);
1605 + crypt_free_buffer_pages(cc, clone,
1612 + clone = bio_clone(bio, GFP_NOIO);
1619 + if (!io->first_clone) {
1621 + * hold a reference to the first clone, because it holds
1622 + * the bio_vec array and that needs to be released only
1623 + * after all other clones are released
1626 + io->first_clone = clone;
1628 + atomic_inc(&io->pending);
1630 + clone->bi_private = io;
1631 + clone->bi_end_io = crypt_endio;
1632 + clone->bi_bdev = cc->dev->bdev;
1633 + clone->bi_sector = cc->start + sector;
1634 + clone->bi_rw = bio->bi_rw;
1636 + remaining -= clone->bi_size;
1637 + sector += bio_sectors(clone);
1639 + generic_make_request(clone);
1642 + /* drop reference, clones could have returned before we reach this */
1643 + dec_pending(io, 0);
1647 + if (io->first_clone) {
1648 + dec_pending(io, r);
1652 + /* if no bio has been dispatched yet, we can directly return the error */
1653 + crypt_free_io(cc, io);
1657 +static int crypt_status(struct dm_target *ti, status_type_t type,
1658 + char *result, unsigned int maxlen)
1660 + struct crypt_c *cc = (struct crypt_c *) ti->private;
1662 + const char *cipher;
1663 + const char *mode = NULL;
1667 + case STATUSTYPE_INFO:
1671 + case STATUSTYPE_TABLE:
1672 + cipher = crypto_tfm_alg_name(cc->tfm);
1674 + switch(cc->tfm->crt_u.cipher.cit_mode) {
1675 + case CRYPTO_TFM_MODE_CBC:
1678 + case CRYPTO_TFM_MODE_ECB:
1685 + snprintf(result, maxlen, "%s-%s ", cipher, mode);
1686 + offset = strlen(result);
1688 + if (cc->key_size > 0) {
1689 + if ((maxlen - offset) < ((cc->key_size << 1) + 1))
1692 + crypt_encode_key(result + offset, cc->key, cc->key_size);
1693 + offset += cc->key_size << 1;
1695 + if (offset >= maxlen)
1697 + result[offset++] = '-';
1700 + format_dev_t(buffer, cc->dev->bdev->bd_dev);
1701 + snprintf(result + offset, maxlen - offset, " " SECTOR_FORMAT
1702 + " %s " SECTOR_FORMAT, cc->iv_offset,
1703 + buffer, cc->start);
1709 +static struct target_type crypt_target = {
1711 + .module = THIS_MODULE,
1715 + .status = crypt_status,
1718 +int __init dm_crypt_init(void)
1722 + _io_cache = kmem_cache_create("dm-crypt_io", sizeof(struct crypt_io),
1723 + 0, 0, NULL, NULL);
1727 + r = dm_daemon_start(&_kcryptd, "kcryptd", kcryptd);
1729 + DMERR("couldn't create kcryptd: %d", r);
1730 + kmem_cache_destroy(_io_cache);
1734 + r = dm_register_target(&crypt_target);
1736 + DMERR("crypt: register failed %d", r);
1737 + dm_daemon_stop(&_kcryptd);
1738 + kmem_cache_destroy(_io_cache);
1744 +void __exit dm_crypt_exit(void)
1746 + int r = dm_unregister_target(&crypt_target);
1749 + DMERR("crypt: unregister failed %d", r);
1751 + dm_daemon_stop(&_kcryptd);
1752 + kmem_cache_destroy(_io_cache);
1758 +module_init(dm_crypt_init)
1759 +module_exit(dm_crypt_exit)
1761 +MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1762 +MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1763 +MODULE_LICENSE("GPL");
1764 diff -Nur linux-2.6.0.orig/drivers/md/drivers/md/dm-daemon.c linux-2.6.0/drivers/md/drivers/md/dm-daemon.c
1765 --- linux-2.6.0.orig/drivers/md/drivers/md/dm-daemon.c 1970-01-01 01:00:00.000000000 +0100
1766 +++ linux-2.6.0/drivers/md/drivers/md/dm-daemon.c 2004-01-02 21:32:07.244543296 +0100
1769 + * Copyright (C) 2003 Sistina Software
1771 + * This file is released under the LGPL.
1775 +#include "dm-daemon.h"
1777 +#include <linux/module.h>
1778 +#include <linux/sched.h>
1779 +#include <linux/suspend.h>
1780 +#include <linux/completion.h>
1782 +static int daemon(void *arg)
1784 + struct dm_daemon *dd = (struct dm_daemon *) arg;
1785 + DECLARE_WAITQUEUE(wq, current);
1787 + daemonize("%s", dd->name);
1789 + atomic_set(&dd->please_die, 0);
1791 + add_wait_queue(&dd->job_queue, &wq);
1793 + complete(&dd->start);
1796 + * dd->fn() could do anything, very likely it will
1797 + * suspend. So we can't set the state to
1798 + * TASK_INTERRUPTIBLE before calling it. In order to
1799 + * prevent a race with a waking thread we do this little
1800 + * dance with the dd->woken variable.
1803 + if (atomic_read(&dd->please_die))
1806 + if (current->flags & PF_FREEZE)
1807 + refrigerator(PF_IOTHREAD);
1810 + set_current_state(TASK_RUNNING);
1811 + atomic_set(&dd->woken, 0);
1813 + set_current_state(TASK_INTERRUPTIBLE);
1815 + } while (atomic_read(&dd->woken));
1821 + remove_wait_queue(&dd->job_queue, &wq);
1822 + complete_and_exit(&dd->run, 0);
1825 +int dm_daemon_start(struct dm_daemon *dd, const char *name, jiffy_t (*fn)(void))
1830 + * Initialise the dm_daemon.
1833 + strncpy(dd->name, name, sizeof(dd->name) - 1);
1834 + init_completion(&dd->start);
1835 + init_completion(&dd->run);
1836 + init_waitqueue_head(&dd->job_queue);
1839 + * Start the new thread.
1841 + pid = kernel_thread(daemon, dd, CLONE_KERNEL);
1843 + DMERR("Failed to start %s thread", name);
1848 + * wait for the daemon to up this mutex.
1850 + wait_for_completion(&dd->start);
1855 +void dm_daemon_stop(struct dm_daemon *dd)
1857 + atomic_set(&dd->please_die, 1);
1858 + dm_daemon_wake(dd);
1859 + wait_for_completion(&dd->run);
1862 +void dm_daemon_wake(struct dm_daemon *dd)
1864 + atomic_set(&dd->woken, 1);
1865 + wake_up_interruptible(&dd->job_queue);
1868 +EXPORT_SYMBOL(dm_daemon_start);
1869 +EXPORT_SYMBOL(dm_daemon_stop);
1870 +EXPORT_SYMBOL(dm_daemon_wake);
1871 diff -Nur linux-2.6.0.orig/drivers/md/drivers/md/dm-daemon.h linux-2.6.0/drivers/md/drivers/md/dm-daemon.h
1872 --- linux-2.6.0.orig/drivers/md/drivers/md/dm-daemon.h 1970-01-01 01:00:00.000000000 +0100
1873 +++ linux-2.6.0/drivers/md/drivers/md/dm-daemon.h 2004-01-02 21:32:07.244543296 +0100
1876 + * Copyright (C) 2003 Sistina Software
1878 + * This file is released under the LGPL.
1881 +#ifndef DM_DAEMON_H
1882 +#define DM_DAEMON_H
1884 +#include <asm/atomic.h>
1885 +#include <linux/completion.h>
1888 + * The daemons work function returns a *hint* as to when it
1889 + * should next be woken up.
1892 + jiffy_t (*fn)(void);
1894 + atomic_t please_die;
1895 + struct completion start;
1896 + struct completion run;
1899 + wait_queue_head_t job_queue;
1902 +int dm_daemon_start(struct dm_daemon *dd, const char *name, jiffy_t (*fn)(void));
1903 +void dm_daemon_stop(struct dm_daemon *dd);
1904 +void dm_daemon_wake(struct dm_daemon *dd);
1905 +int dm_daemon_running(struct dm_daemon *dd);
1908 diff -Nur linux-2.6.0.orig/drivers/md/Kconfig linux-2.6.0/drivers/md/Kconfig
1909 --- linux-2.6.0.orig/drivers/md/Kconfig 2003-11-24 02:31:11.000000000 +0100
1910 +++ linux-2.6.0/drivers/md/Kconfig 2004-01-02 21:32:07.251542232 +0100
1911 @@ -142,5 +142,17 @@
1912 Recent tools use a new version of the ioctl interface, only
1913 select this option if you intend using such tools.
1916 + tristate "Crypt target support"
1917 + depends on BLK_DEV_DM && EXPERIMENTAL
1920 + This device-mapper target allows you to create a device that
1921 + transparently encrypts the data on it. You'll need to activate
1922 + the required ciphers in the cryptoapi configuration in order to
1923 + be able to use it.
1929 diff -Nur linux-2.6.0.orig/drivers/md/Makefile linux-2.6.0/drivers/md/Makefile
1930 --- linux-2.6.0.orig/drivers/md/Makefile 2003-11-24 02:32:03.000000000 +0100
1931 +++ linux-2.6.0/drivers/md/Makefile 2004-01-02 21:32:07.251542232 +0100
1935 dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
1937 + dm-ioctl.o dm-daemon.o
1939 # Note: link order is important. All raid personalities
1940 # and xor.o must come before md.o, as they each initialise
1942 obj-$(CONFIG_MD_MULTIPATH) += multipath.o
1943 obj-$(CONFIG_BLK_DEV_MD) += md.o
1944 obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
1945 +obj-$(CONFIG_DM_CRYPT) += dm-crypt.o