1 diff -ruN linux-2.4/crypto/Config.in linux-2.4-cl/crypto/Config.in
2 --- linux-2.4/crypto/Config.in 2003-08-16 19:11:38.783804504 +0200
3 +++ linux-2.4-cl/crypto/Config.in 2003-08-16 17:45:20.596330070 +0200
5 mainmenu_option next_comment
6 comment 'Cryptographic options'
8 -if [ "$CONFIG_INET_AH" = "y" -o \
9 +if [ "$CONFIG_BLK_DEV_CRYPTOLOOP" = "y" -o \
10 + "$CONFIG_BLK_DEV_CRYPTOLOOP" = "m" -o \
11 + "$CONFIG_INET_AH" = "y" -o \
12 "$CONFIG_INET_AH" = "m" -o \
13 "$CONFIG_INET_ESP" = "y" -o \
14 "$CONFIG_INET_ESP" = "m" -o \
15 diff -ruN linux-2.4/drivers/block/Config.in linux-2.4-cl/drivers/block/Config.in
16 --- linux-2.4/drivers/block/Config.in 2003-08-16 19:08:10.836128909 +0200
17 +++ linux-2.4-cl/drivers/block/Config.in 2003-08-16 19:28:45.732770341 +0200
19 dep_tristate 'Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)' CONFIG_BLK_DEV_UMEM $CONFIG_PCI $CONFIG_EXPERIMENTAL
21 tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
22 +dep_tristate ' Cryptoloop support' CONFIG_BLK_DEV_CRYPTOLOOP $CONFIG_BLK_DEV_LOOP
23 dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
25 tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
26 diff -ruN linux-2.4/drivers/block/cryptoloop.c linux-2.4-cl/drivers/block/cryptoloop.c
27 --- linux-2.4/drivers/block/cryptoloop.c 1970-01-01 01:00:00.000000000 +0100
28 +++ linux-2.4-cl/drivers/block/cryptoloop.c 2003-08-16 18:30:05.268601777 +0200
31 + Linux loop encryption enabling module
33 + Copyright (C) 2002 Herbert Valerio Riedel <hvr@gnu.org>
34 + Copyright (C) 2003 Fruhwirth Clemens <clemens@endorphin.org>
36 + This module is free software; you can redistribute it and/or modify
37 + it under the terms of the GNU General Public License as published by
38 + the Free Software Foundation; either version 2 of the License, or
39 + (at your option) any later version.
41 + This module is distributed in the hope that it will be useful,
42 + but WITHOUT ANY WARRANTY; without even the implied warranty of
43 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
44 + GNU General Public License for more details.
46 + You should have received a copy of the GNU General Public License
47 + along with this module; if not, write to the Free Software
48 + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
51 +#include <linux/module.h>
53 +#include <linux/init.h>
54 +#include <linux/string.h>
55 +#include <linux/crypto.h>
56 +#include <linux/blkdev.h>
57 +#include <linux/loop.h>
58 +#include <asm/semaphore.h>
59 +#include <asm/uaccess.h>
60 +#include <asm/scatterlist.h>
62 +MODULE_LICENSE("GPL");
63 +MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
64 +MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
67 +cryptoloop_init(struct loop_device *lo, /* const */ struct loop_info *info)
70 + char cms[LO_NAME_SIZE]; /* cipher-mode string */
73 + char *cmsp = cms; /* c-m string pointer */
74 + struct crypto_tfm *tfm = NULL;
76 + /* encryption breaks for non sector aligned offsets */
78 + if (info->lo_offset % LOOP_IV_SECTOR_SIZE)
81 + strncpy(cms, info->lo_name, LO_NAME_SIZE);
82 + cms[LO_NAME_SIZE - 1] = 0;
83 + cipher = strsep(&cmsp, "-");
84 + mode = strsep(&cmsp, "-");
86 + if (mode == NULL || strcmp(mode, "cbc") == 0)
87 + tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC);
88 + else if (strcmp(mode, "ecb") == 0)
89 + tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB);
93 + err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key,
94 + info->lo_encrypt_key_size);
103 + crypto_free_tfm(tfm);
109 +typedef int (*encdec_t)(struct crypto_tfm *tfm,
110 + struct scatterlist *sg_out,
111 + struct scatterlist *sg_in,
112 + unsigned int nsg, u8 *iv);
115 +cryptoloop_transfer(struct loop_device *lo, int cmd, char *raw_buf,
116 + char *loop_buf, int size, sector_t IV)
118 + struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
119 + struct scatterlist sg_out = { 0, };
120 + struct scatterlist sg_in = { 0, };
122 + encdec_t encdecfunc;
129 + encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv;
133 + encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv;
137 + const int sz = min(size, LOOP_IV_SECTOR_SIZE);
138 + u32 iv[4] = { 0, };
139 + iv[0] = cpu_to_le32(IV & 0xffffffff);
141 + sg_in.page = virt_to_page(in);
142 + sg_in.offset = (unsigned long)in & ~PAGE_MASK;
145 + sg_out.page = virt_to_page(out);
146 + sg_out.offset = (unsigned long)out & ~PAGE_MASK;
147 + sg_out.length = sz;
149 + encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv);
162 +cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
168 +cryptoloop_release(struct loop_device *lo)
170 + struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
172 + crypto_free_tfm(tfm);
173 + lo->key_data = NULL;
176 + printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n");
180 +static struct loop_func_table cryptoloop_funcs = {
181 + .number = LO_CRYPT_CRYPTOAPI,
182 + .init = cryptoloop_init,
183 + .ioctl = cryptoloop_ioctl,
184 + .transfer = cryptoloop_transfer,
185 + .release = cryptoloop_release,
186 + /* .owner = THIS_MODULE */
190 +init_cryptoloop(void)
192 + int rc = loop_register_transfer(&cryptoloop_funcs);
195 + printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
200 +cleanup_cryptoloop(void)
202 + if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI))
204 + "cryptoloop: loop_unregister_transfer failed\n");
207 +module_init(init_cryptoloop);
208 +module_exit(cleanup_cryptoloop);
209 diff -ruN linux-2.4/drivers/block/loop.c linux-2.4-cl/drivers/block/loop.c
210 --- linux-2.4/drivers/block/loop.c 2003-08-16 19:11:44.513638284 +0200
211 +++ linux-2.4-cl/drivers/block/loop.c 2003-08-16 19:38:49.487870443 +0200
213 * problem above. Encryption modules that used to rely on the old scheme
214 * should just call ->i_mapping->bmap() to calculate the physical block
217 + * IV is now passed as (512 byte) sector number.
218 + * Jari Ruusu <jari.ruusu@pp.inet.fi>, May 18 2001
220 + * External encryption module locking bug fixed.
221 + * Ingo Rohloff <rohloff@in.tum.de>, June 21 2001
223 + * Make device backed loop work with swap (pre-allocated buffers + queue rewrite).
224 + * Jari Ruusu <jari.ruusu@pp.inet.fi>, September 2 2001
226 + * File backed code now uses file->f_op->read/write. Based on Andrew Morton's idea.
227 + * Jari Ruusu <jari.ruusu@pp.inet.fi>, May 23 2002
230 #include <linux/config.h>
232 static struct loop_device *loop_dev;
233 static int *loop_sizes;
234 static int *loop_blksizes;
235 +static int *loop_hardsizes;
236 static devfs_handle_t devfs_handle; /* For the directory */
241 static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf,
242 - char *loop_buf, int size, int real_block)
243 + char *loop_buf, int size, sector_t real_block)
245 - if (raw_buf != loop_buf) {
247 - memcpy(loop_buf, raw_buf, size);
249 - memcpy(raw_buf, loop_buf, size);
251 + /* this code is only called from file backed loop */
252 + /* and that code expects this function to be no-op */
254 + if (current->need_resched)
259 static int transfer_xor(struct loop_device *lo, int cmd, char *raw_buf,
260 - char *loop_buf, int size, int real_block)
261 + char *loop_buf, int size, sector_t real_block)
263 char *in, *out, *key;
265 @@ -118,12 +129,13 @@
266 keysize = lo->lo_encrypt_key_size;
267 for (i = 0; i < size; i++)
268 *out++ = *in++ ^ key[(i & 511) % keysize];
269 + if (current->need_resched)
274 static int none_status(struct loop_device *lo, struct loop_info *info)
276 - lo->lo_flags |= LO_FLAGS_BH_REMAP;
280 @@ -149,321 +161,367 @@
281 /* xfer_funcs[0] is special - its release function is never called */
282 struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
288 -#define MAX_DISK_SIZE 1024*1024*1024
290 -static int compute_loop_size(struct loop_device *lo, struct dentry * lo_dentry, kdev_t lodev)
292 - if (S_ISREG(lo_dentry->d_inode->i_mode))
293 - return (lo_dentry->d_inode->i_size - lo->lo_offset) >> BLOCK_SIZE_BITS;
294 - if (blk_size[MAJOR(lodev)])
295 - return blk_size[MAJOR(lodev)][MINOR(lodev)] -
296 - (lo->lo_offset >> BLOCK_SIZE_BITS);
297 - return MAX_DISK_SIZE;
299 + * First number of 'lo_prealloc' is the default number of RAM pages
300 + * to pre-allocate for each device backed loop. Every (configured)
301 + * device backed loop pre-allocates this amount of RAM pages unless
302 + * later 'lo_prealloc' numbers provide an override. 'lo_prealloc'
303 + * overrides are defined in pairs: loop_index,number_of_pages
305 +static int lo_prealloc[9] = { 125, 999, 0, 999, 0, 999, 0, 999, 0 };
306 +#define LO_PREALLOC_MIN 4 /* minimum user defined pre-allocated RAM pages */
307 +#define LO_PREALLOC_MAX 512 /* maximum user defined pre-allocated RAM pages */
310 +MODULE_PARM(lo_prealloc, "1-9i");
311 +MODULE_PARM_DESC(lo_prealloc, "Number of pre-allocated pages [,index,pages]...");
313 +static int __init lo_prealloc_setup(char *str)
317 + for (x = 0; x < (sizeof(lo_prealloc) / sizeof(int)); x++) {
318 + z = get_option(&str, &y);
320 + lo_prealloc[x] = y;
326 +__setup("lo_prealloc=", lo_prealloc_setup);
329 -static void figure_loop_size(struct loop_device *lo)
331 - loop_sizes[lo->lo_number] = compute_loop_size(lo,
332 - lo->lo_backing_file->f_dentry,
336 + * This is loop helper thread nice value in range
337 + * from 0 (low priority) to -20 (high priority).
339 +#if defined(DEF_NICE) && defined(DEF_COUNTER)
340 +static int lo_nice = -20; /* old scheduler default */
342 +static int lo_nice = -1; /* O(1) scheduler default */
345 -static int lo_send(struct loop_device *lo, struct buffer_head *bh, int bsize,
348 +MODULE_PARM(lo_nice, "1i");
349 +MODULE_PARM_DESC(lo_nice, "Loop thread scheduler nice (0 ... -20)");
351 +static int __init lo_nice_setup(char *str)
353 - struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
354 - struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
355 - struct address_space_operations *aops = mapping->a_ops;
357 - char *kaddr, *data;
358 - unsigned long index;
359 - unsigned size, offset;
362 - down(&mapping->host->i_sem);
363 - index = pos >> PAGE_CACHE_SHIFT;
364 - offset = pos & (PAGE_CACHE_SIZE - 1);
368 - int IV = index * (PAGE_CACHE_SIZE/bsize) + offset/bsize;
369 - int transfer_result;
372 - size = PAGE_CACHE_SIZE - offset;
376 - page = grab_cache_page(mapping, index);
379 - kaddr = kmap(page);
380 - if (aops->prepare_write(file, page, offset, offset+size))
382 - flush_dcache_page(page);
383 - transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV);
384 - if (transfer_result) {
386 - * The transfer failed, but we still write the data to
387 - * keep prepare/commit calls balanced.
389 - printk(KERN_ERR "loop: transfer error block %ld\n", index);
390 - memset(kaddr + offset, 0, size);
392 - if (aops->commit_write(file, page, offset, offset+size))
394 - if (transfer_result)
403 - page_cache_release(page);
405 - up(&mapping->host->i_sem);
411 - page_cache_release(page);
413 - up(&mapping->host->i_sem);
415 + if (get_option(&str, &y) == 1)
419 +__setup("lo_nice=", lo_nice_setup);
422 -struct lo_read_data {
423 - struct loop_device *lo;
428 + struct buffer_head **q0;
429 + struct buffer_head **q1;
430 + struct buffer_head **q2;
434 +} que_look_up_table;
436 -static int lo_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
437 +static void loop_prealloc_cleanup(struct loop_device *lo)
440 - unsigned long count = desc->count;
441 - struct lo_read_data *p = (struct lo_read_data*)desc->buf;
442 - struct loop_device *lo = p->lo;
443 - int IV = page->index * (PAGE_CACHE_SIZE/p->bsize) + offset/p->bsize;
448 - kaddr = kmap(page);
449 - if (lo_do_transfer(lo, READ, kaddr + offset, p->data, size, IV)) {
451 - printk(KERN_ERR "loop: transfer error block %ld\n",page->index);
452 - desc->error = -EINVAL;
456 - desc->count = count - size;
457 - desc->written += size;
462 -static int lo_receive(struct loop_device *lo, struct buffer_head *bh, int bsize,
465 - struct lo_read_data cookie;
466 - read_descriptor_t desc;
470 - cookie.data = bh->b_data;
471 - cookie.bsize = bsize;
473 - desc.count = bh->b_size;
474 - desc.buf = (char*)&cookie;
476 - spin_lock_irq(&lo->lo_lock);
477 - file = lo->lo_backing_file;
478 - spin_unlock_irq(&lo->lo_lock);
479 - do_generic_file_read(file, &pos, &desc, lo_read_actor);
481 + struct buffer_head *bh;
483 + while ((bh = lo->lo_bh_free)) {
484 + __free_page(bh->b_page);
485 + lo->lo_bh_free = bh->b_reqnext;
486 + bh->b_reqnext = NULL;
487 + kmem_cache_free(bh_cachep, bh);
491 -static inline int loop_get_bs(struct loop_device *lo)
492 +static int loop_prealloc_init(struct loop_device *lo, int y)
495 + struct buffer_head *bh;
498 - if (blksize_size[MAJOR(lo->lo_device)])
499 - bs = blksize_size[MAJOR(lo->lo_device)][MINOR(lo->lo_device)];
503 + y = lo_prealloc[0];
504 + for (x = 1; x < (sizeof(lo_prealloc) / sizeof(int)); x += 2) {
505 + if (lo_prealloc[x + 1] && (lo->lo_number == lo_prealloc[x])) {
506 + y = lo_prealloc[x + 1];
511 + lo->lo_bh_flsh = (y * 3) / 4;
514 + for (x = 0; x < y; x++) {
515 + bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL);
517 + loop_prealloc_cleanup(lo);
520 + bh->b_page = alloc_page(GFP_KERNEL);
522 + bh->b_reqnext = NULL;
523 + kmem_cache_free(bh_cachep, bh);
524 + loop_prealloc_cleanup(lo);
527 + bh->b_reqnext = lo->lo_bh_free;
528 + lo->lo_bh_free = bh;
533 -static inline unsigned long loop_get_iv(struct loop_device *lo,
534 - unsigned long sector)
535 +static void loop_add_queue_last(struct loop_device *lo, struct buffer_head *bh, struct buffer_head **q)
537 - int bs = loop_get_bs(lo);
538 - unsigned long offset, IV;
539 + unsigned long flags;
541 - IV = sector / (bs >> 9) + lo->lo_offset / bs;
542 - offset = ((sector % (bs >> 9)) << 9) + lo->lo_offset % bs;
545 + spin_lock_irqsave(&lo->lo_lock, flags);
547 + bh->b_reqnext = (*q)->b_reqnext;
548 + (*q)->b_reqnext = bh;
550 + bh->b_reqnext = bh;
553 + spin_unlock_irqrestore(&lo->lo_lock, flags);
556 + if (waitqueue_active(&lo->lo_bh_wait))
557 + wake_up_interruptible(&lo->lo_bh_wait);
560 -static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw)
561 +static void loop_add_queue_first(struct loop_device *lo, struct buffer_head *bh, struct buffer_head **q)
566 - pos = ((loff_t) bh->b_rsector << 9) + lo->lo_offset;
569 - ret = lo_send(lo, bh, loop_get_bs(lo), pos);
571 - ret = lo_receive(lo, bh, loop_get_bs(lo), pos);
574 + spin_lock_irq(&lo->lo_lock);
576 + bh->b_reqnext = (*q)->b_reqnext;
577 + (*q)->b_reqnext = bh;
579 + bh->b_reqnext = bh;
582 + spin_unlock_irq(&lo->lo_lock);
585 -static void loop_end_io_transfer(struct buffer_head *bh, int uptodate);
586 -static void loop_put_buffer(struct buffer_head *bh)
587 +static struct buffer_head *loop_get_bh(struct loop_device *lo, int *list_nr,
588 + que_look_up_table *qt)
591 - * check b_end_io, may just be a remapped bh and not an allocated one
593 - if (bh && bh->b_end_io == loop_end_io_transfer) {
594 - __free_page(bh->b_page);
595 - kmem_cache_free(bh_cachep, bh);
596 + struct buffer_head *bh = NULL, *last;
598 + spin_lock_irq(&lo->lo_lock);
599 + if ((last = *qt->q0)) {
600 + bh = last->b_reqnext;
604 + last->b_reqnext = bh->b_reqnext;
605 + bh->b_reqnext = NULL;
607 + } else if ((last = *qt->q1)) {
608 + bh = last->b_reqnext;
612 + last->b_reqnext = bh->b_reqnext;
613 + bh->b_reqnext = NULL;
615 + } else if ((last = *qt->q2)) {
616 + bh = last->b_reqnext;
620 + last->b_reqnext = bh->b_reqnext;
621 + bh->b_reqnext = NULL;
624 + spin_unlock_irq(&lo->lo_lock);
629 - * Add buffer_head to back of pending list
631 -static void loop_add_bh(struct loop_device *lo, struct buffer_head *bh)
632 +static void loop_put_buffer(struct loop_device *lo, struct buffer_head *b)
637 spin_lock_irqsave(&lo->lo_lock, flags);
638 - if (lo->lo_bhtail) {
639 - lo->lo_bhtail->b_reqnext = bh;
640 - lo->lo_bhtail = bh;
642 - lo->lo_bh = lo->lo_bhtail = bh;
643 + b->b_reqnext = lo->lo_bh_free;
644 + lo->lo_bh_free = b;
645 + wk = lo->lo_bh_need;
646 spin_unlock_irqrestore(&lo->lo_lock, flags);
648 - up(&lo->lo_bh_mutex);
649 + if (wk && waitqueue_active(&lo->lo_bh_wait))
650 + wake_up_interruptible(&lo->lo_bh_wait);
654 - * Grab first pending buffer
656 -static struct buffer_head *loop_get_bh(struct loop_device *lo)
657 +static void loop_end_io_transfer_wr(struct buffer_head *bh, int uptodate)
659 - struct buffer_head *bh;
661 - spin_lock_irq(&lo->lo_lock);
662 - if ((bh = lo->lo_bh)) {
663 - if (bh == lo->lo_bhtail)
664 - lo->lo_bhtail = NULL;
665 - lo->lo_bh = bh->b_reqnext;
666 - bh->b_reqnext = NULL;
668 - spin_unlock_irq(&lo->lo_lock);
669 + struct loop_device *lo = &loop_dev[MINOR(bh->b_dev)];
670 + struct buffer_head *rbh = bh->b_private;
673 + rbh->b_reqnext = NULL;
674 + rbh->b_end_io(rbh, uptodate);
675 + loop_put_buffer(lo, bh);
676 + if (atomic_dec_and_test(&lo->lo_pending))
677 + wake_up_interruptible(&lo->lo_bh_wait);
681 - * when buffer i/o has completed. if BH_Dirty is set, this was a WRITE
682 - * and lo->transfer stuff has already been done. if not, it was a READ
683 - * so queue it for the loop thread and let it do the transfer out of
684 - * b_end_io context (we don't want to do decrypt of a page with irqs
687 -static void loop_end_io_transfer(struct buffer_head *bh, int uptodate)
688 +static void loop_end_io_transfer_rd(struct buffer_head *bh, int uptodate)
690 struct loop_device *lo = &loop_dev[MINOR(bh->b_dev)];
692 - if (!uptodate || test_bit(BH_Dirty, &bh->b_state)) {
693 - struct buffer_head *rbh = bh->b_private;
695 - rbh->b_end_io(rbh, uptodate);
696 - if (atomic_dec_and_test(&lo->lo_pending))
697 - up(&lo->lo_bh_mutex);
698 - loop_put_buffer(bh);
700 - loop_add_bh(lo, bh);
702 + loop_end_io_transfer_wr(bh, uptodate);
704 + loop_add_queue_last(lo, bh, &lo->lo_bhQue0);
707 static struct buffer_head *loop_get_buffer(struct loop_device *lo,
708 - struct buffer_head *rbh)
709 + struct buffer_head *rbh, int from_thread, int rw)
711 struct buffer_head *bh;
713 + unsigned long flags;
716 - * for xfer_funcs that can operate on the same bh, do that
718 - if (lo->lo_flags & LO_FLAGS_BH_REMAP) {
721 + spin_lock_irqsave(&lo->lo_lock, flags);
722 + bh = lo->lo_bh_free;
724 + lo->lo_bh_free = bh->b_reqnext;
726 + lo->lo_bh_need = 0;
729 + lo->lo_bh_need = 1;
731 + spin_unlock_irqrestore(&lo->lo_lock, flags);
733 + return (struct buffer_head *)0;
736 - bh = kmem_cache_alloc(bh_cachep, SLAB_NOIO);
740 - run_task_queue(&tq_disk);
741 - set_current_state(TASK_INTERRUPTIBLE);
742 - schedule_timeout(HZ);
744 - memset(bh, 0, sizeof(*bh));
746 + memset(bh, 0, sizeof(struct buffer_head));
749 + bh->b_private = rbh;
750 bh->b_size = rbh->b_size;
751 bh->b_dev = rbh->b_rdev;
752 + bh->b_rdev = lo->lo_device;
753 bh->b_state = (1 << BH_Req) | (1 << BH_Mapped) | (1 << BH_Lock);
754 + bh->b_data = page_address(bh->b_page);
755 + bh->b_end_io = (rw == WRITE) ? loop_end_io_transfer_wr : loop_end_io_transfer_rd;
756 + bh->b_rsector = rbh->b_rsector + (lo->lo_offset >> 9);
757 + init_waitqueue_head(&bh->b_wait);
760 - * easy way out, although it does waste some memory for < PAGE_SIZE
761 - * blocks... if highmem bounce buffering can get away with it,
765 - bh->b_page = alloc_page(GFP_NOIO);
771 - run_task_queue(&tq_disk);
772 - set_current_state(TASK_INTERRUPTIBLE);
773 - schedule_timeout(HZ);
775 +#define MAX_DISK_SIZE 1024*1024*1024
777 - bh->b_data = page_address(bh->b_page);
778 - bh->b_end_io = loop_end_io_transfer;
779 - bh->b_private = rbh;
780 - init_waitqueue_head(&bh->b_wait);
781 +static int compute_loop_size(struct loop_device *lo, struct dentry * lo_dentry, kdev_t lodev)
783 + if (S_ISREG(lo_dentry->d_inode->i_mode))
784 + return (lo_dentry->d_inode->i_size - lo->lo_offset) >> BLOCK_SIZE_BITS;
785 + if (blk_size[MAJOR(lodev)])
786 + return blk_size[MAJOR(lodev)][MINOR(lodev)] -
787 + (lo->lo_offset >> BLOCK_SIZE_BITS);
788 + return MAX_DISK_SIZE;
792 - bh->b_rsector = rbh->b_rsector + (lo->lo_offset >> 9);
793 - spin_lock_irq(&lo->lo_lock);
794 - bh->b_rdev = lo->lo_device;
795 - spin_unlock_irq(&lo->lo_lock);
796 +static void figure_loop_size(struct loop_device *lo)
798 + loop_sizes[lo->lo_number] = compute_loop_size(lo,
799 + lo->lo_backing_file->f_dentry,
804 +static int loop_file_io(struct file *file, char *buf, int size, loff_t *ppos, int w)
815 + x = file->f_op->write(file, buf + y, z, ppos);
818 + x = file->f_op->read(file, buf + y, z, ppos);
824 + if ((x == -EAGAIN) || (x == -ENOMEM) || (x == -ERESTART) || (x == -EINTR)) {
825 + run_task_queue(&tq_disk);
826 + set_current_state(TASK_INTERRUPTIBLE);
827 + schedule_timeout(HZ / 2);
833 + } while (y < size);
837 +static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw)
840 + struct file *file = lo->lo_backing_file;
842 + unsigned int size, len;
845 + pos = ((loff_t) bh->b_rsector << 9) + lo->lo_offset;
846 + buf = page_address(lo->lo_bh_free->b_page);
849 + IV = bh->b_rsector + (lo->lo_offset >> 9);
851 + if (lo->lo_encrypt_type == LO_CRYPT_NONE) {
852 + /* this code relies that NONE transfer is a no-op */
859 + if (lo_do_transfer(lo, WRITE, buf, data, size, IV)) {
860 + printk(KERN_ERR "loop%d: write transfer error, sector %lu\n", lo->lo_number, IV);
863 + if (loop_file_io(file, buf, size, &pos, 1)) {
864 + printk(KERN_ERR "loop%d: write i/o error, sector %lu\n", lo->lo_number, IV);
868 + if (loop_file_io(file, buf, size, &pos, 0)) {
869 + printk(KERN_ERR "loop%d: read i/o error, sector %lu\n", lo->lo_number, IV);
872 + if (lo_do_transfer(lo, READ, buf, data, size, IV)) {
873 + printk(KERN_ERR "loop%d: read transfer error, sector %lu\n", lo->lo_number, IV);
884 static int loop_make_request(request_queue_t *q, int rw, struct buffer_head *rbh)
886 - struct buffer_head *bh = NULL;
887 + struct buffer_head *bh;
888 struct loop_device *lo;
891 + set_current_state(TASK_RUNNING);
892 if (!buffer_locked(rbh))
896 } else if (rw == READA) {
898 } else if (rw != READ) {
899 - printk(KERN_ERR "loop: unknown command (%d)\n", rw);
900 + printk(KERN_ERR "loop%d: unknown command (%d)\n", lo->lo_number, rw);
904 @@ -493,35 +551,43 @@
905 * file backed, queue for loop_thread to handle
907 if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
909 - * rbh locked at this point, noone else should clear
913 - set_bit(BH_Dirty, &rbh->b_state);
914 - loop_add_bh(lo, rbh);
915 + loop_add_queue_last(lo, rbh, (rw == WRITE) ? &lo->lo_bhQue1 : &lo->lo_bhQue0);
920 + * device backed, just remap rdev & rsector for NONE transfer
922 + if (lo->lo_encrypt_type == LO_CRYPT_NONE) {
923 + rbh->b_rsector += lo->lo_offset >> 9;
924 + rbh->b_rdev = lo->lo_device;
925 + generic_make_request(rw, rbh);
926 + if (atomic_dec_and_test(&lo->lo_pending))
927 + wake_up_interruptible(&lo->lo_bh_wait);
932 - * piggy old buffer on original, and submit for I/O
933 + * device backed, start reads and writes now if buffer available
935 - bh = loop_get_buffer(lo, rbh);
936 - IV = loop_get_iv(lo, rbh->b_rsector);
937 + bh = loop_get_buffer(lo, rbh, 0, rw);
939 + /* just queue request and let thread handle alloc later */
940 + loop_add_queue_last(lo, rbh, (rw == WRITE) ? &lo->lo_bhQue1 : &lo->lo_bhQue2);
944 - set_bit(BH_Dirty, &bh->b_state);
945 - if (lo_do_transfer(lo, WRITE, bh->b_data, rbh->b_data,
947 + if (lo_do_transfer(lo, WRITE, bh->b_data, rbh->b_data, bh->b_size, bh->b_rsector)) {
948 + loop_put_buffer(lo, bh);
953 generic_make_request(rw, bh);
957 if (atomic_dec_and_test(&lo->lo_pending))
958 - up(&lo->lo_bh_mutex);
959 - loop_put_buffer(bh);
960 + wake_up_interruptible(&lo->lo_bh_wait);
962 buffer_IO_error(rbh);
968 -static inline void loop_handle_bh(struct loop_device *lo,struct buffer_head *bh)
973 - * For block backed loop, we know this is a READ
975 - if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
976 - int rw = !!test_and_clear_bit(BH_Dirty, &bh->b_state);
978 - ret = do_bh_filebacked(lo, bh, rw);
979 - bh->b_end_io(bh, !ret);
981 - struct buffer_head *rbh = bh->b_private;
982 - unsigned long IV = loop_get_iv(lo, rbh->b_rsector);
984 - ret = lo_do_transfer(lo, READ, bh->b_data, rbh->b_data,
987 - rbh->b_end_io(rbh, !ret);
988 - loop_put_buffer(bh);
993 * worker thread that handles reads/writes to file backed loop devices,
994 * to avoid blocking in our make_request_fn. it also does loop decrypting
996 static int loop_thread(void *data)
998 struct loop_device *lo = data;
999 - struct buffer_head *bh;
1000 + struct buffer_head *bh, *xbh;
1001 + int x, rw, qi = 0, flushcnt = 0;
1002 + wait_queue_t waitq;
1003 + que_look_up_table qt[4] = {
1004 + { &lo->lo_bhQue0, &lo->lo_bhQue1, &lo->lo_bhQue2, 0, 1, 2 },
1005 + { &lo->lo_bhQue2, &lo->lo_bhQue0, &lo->lo_bhQue1, 2, 0, 1 },
1006 + { &lo->lo_bhQue0, &lo->lo_bhQue2, &lo->lo_bhQue1, 0, 2, 1 },
1007 + { &lo->lo_bhQue1, &lo->lo_bhQue0, &lo->lo_bhQue2, 1, 0, 2 }
1009 + static const struct rlimit loop_rlim_defaults[RLIM_NLIMITS] = INIT_RLIMITS;
1011 + init_waitqueue_entry(&waitq, current);
1012 + memcpy(¤t->rlim[0], &loop_rlim_defaults[0], sizeof(current->rlim));
1014 exit_files(current);
1016 @@ -576,6 +629,19 @@
1017 flush_signals(current);
1018 spin_unlock_irq(¤t->sigmask_lock);
1022 + if (lo_nice < -20)
1024 +#if defined(DEF_NICE) && defined(DEF_COUNTER)
1025 + /* old scheduler syntax */
1026 + current->policy = SCHED_OTHER;
1027 + current->nice = lo_nice;
1029 + /* O(1) scheduler syntax */
1030 + set_user_nice(current, lo_nice);
1033 spin_lock_irq(&lo->lo_lock);
1034 lo->lo_state = Lo_bound;
1035 atomic_inc(&lo->lo_pending);
1036 @@ -589,23 +655,104 @@
1040 - down_interruptible(&lo->lo_bh_mutex);
1041 + add_wait_queue(&lo->lo_bh_wait, &waitq);
1043 + set_current_state(TASK_INTERRUPTIBLE);
1044 + if (!atomic_read(&lo->lo_pending))
1048 + spin_lock_irq(&lo->lo_lock);
1049 + if (lo->lo_bhQue0) {
1051 + } else if (lo->lo_bhQue1 || lo->lo_bhQue2) {
1052 + /* file backed works too because lo->lo_bh_need == 0 */
1053 + if (lo->lo_bh_free || !lo->lo_bh_need)
1056 + spin_unlock_irq(&lo->lo_lock);
1062 + set_current_state(TASK_RUNNING);
1063 + remove_wait_queue(&lo->lo_bh_wait, &waitq);
1066 - * could be upped because of tear-down, not because of
1067 + * could be woken because of tear-down, not because of
1070 if (!atomic_read(&lo->lo_pending))
1073 - bh = loop_get_bh(lo);
1075 - printk("loop: missing bh\n");
1077 + * read queues using alternating order to prevent starvation
1079 + bh = loop_get_bh(lo, &x, &qt[++qi & 3]);
1084 + * x list tag usage(buffer-allocated)
1085 + * --- ------------- -----------------------
1086 + * 0 lo->lo_bhQue0 dev-read(y) / file-read
1087 + * 1 lo->lo_bhQue1 dev-write(n) / file-write
1088 + * 2 lo->lo_bhQue2 dev-read(n)
1090 + rw = (x == 1) ? WRITE : READ;
1091 + if ((x >= 1) && !(lo->lo_flags & LO_FLAGS_DO_BMAP)) {
1092 + /* loop_make_request didn't allocate a buffer, do that now */
1093 + xbh = loop_get_buffer(lo, bh, 1, rw);
1095 + run_task_queue(&tq_disk);
1097 + loop_add_queue_first(lo, bh, (rw == WRITE) ? &lo->lo_bhQue1 : &lo->lo_bhQue2);
1098 + /* lo->lo_bh_need should be 1 now, go back to sleep */
1101 + if (rw == WRITE) {
1102 + if (lo_do_transfer(lo, WRITE, xbh->b_data, bh->b_data, xbh->b_size, xbh->b_rsector)) {
1103 + loop_put_buffer(lo, xbh);
1104 + buffer_IO_error(bh);
1105 + atomic_dec(&lo->lo_pending);
1109 + generic_make_request(rw, xbh);
1111 + /* start I/O if there are no more requests lacking buffers */
1113 + spin_lock_irq(&lo->lo_lock);
1114 + if (!lo->lo_bhQue1 && !lo->lo_bhQue2)
1116 + spin_unlock_irq(&lo->lo_lock);
1117 + if (x || (++flushcnt >= lo->lo_bh_flsh)) {
1118 + run_task_queue(&tq_disk);
1122 + /* request not completely processed yet */
1125 + if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
1126 + /* request is for file backed device */
1127 + x = do_bh_filebacked(lo, bh, rw);
1128 + bh->b_reqnext = NULL;
1129 + bh->b_end_io(bh, !x);
1131 + /* device backed read has completed, do decrypt now */
1132 + xbh = bh->b_private;
1133 + /* must not use bh->b_rsector as IV, as it may be modified by LVM at this point */
1134 + /* instead, recompute IV from original request */
1135 + x = lo_do_transfer(lo, READ, bh->b_data, xbh->b_data, bh->b_size, xbh->b_rsector + (lo->lo_offset >> 9));
1136 + xbh->b_reqnext = NULL;
1137 + xbh->b_end_io(xbh, !x);
1138 + loop_put_buffer(lo, bh);
1140 - loop_handle_bh(lo, bh);
1143 - * upped both for pending work and tear-down, lo_pending
1144 + * woken both for pending work and tear-down, lo_pending
1145 * will hit zero then
1147 if (atomic_dec_and_test(&lo->lo_pending))
1148 @@ -616,15 +763,34 @@
1152 +static void loop_set_softblksz(struct loop_device *lo, kdev_t dev)
1156 + if (blksize_size[MAJOR(lo->lo_device)])
1157 + bs = blksize_size[MAJOR(lo->lo_device)][MINOR(lo->lo_device)];
1160 + if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
1161 + x = loop_sizes[lo->lo_number];
1162 + if ((bs == 8192) && (x & 7))
1164 + if ((bs == 4096) && (x & 3))
1166 + if ((bs == 2048) && (x & 1))
1169 + set_blocksize(dev, bs);
1172 static int loop_set_fd(struct loop_device *lo, struct file *lo_file, kdev_t dev,
1176 struct inode *inode;
1179 + int lo_flags = 0, hardsz = 512;
1185 @@ -643,33 +809,44 @@
1186 if (!(file->f_mode & FMODE_WRITE))
1187 lo_flags |= LO_FLAGS_READ_ONLY;
1189 + lo->lo_bh_free = lo->lo_bhQue2 = lo->lo_bhQue1 = lo->lo_bhQue0 = NULL;
1190 + lo->lo_bh_need = lo->lo_bh_flsh = 0;
1191 + init_waitqueue_head(&lo->lo_bh_wait);
1192 if (S_ISBLK(inode->i_mode)) {
1193 lo_device = inode->i_rdev;
1194 if (lo_device == dev) {
1198 + if (loop_prealloc_init(lo, 0)) {
1202 + hardsz = get_hardsect_size(lo_device);
1203 } else if (S_ISREG(inode->i_mode)) {
1204 - struct address_space_operations *aops = inode->i_mapping->a_ops;
1206 * If we can't read - sorry. If we only can't write - well,
1207 * it's going to be read-only.
1209 - if (!aops->readpage)
1210 + if (!file->f_op || !file->f_op->read)
1213 - if (!aops->prepare_write || !aops->commit_write)
1214 + if (!file->f_op->write)
1215 lo_flags |= LO_FLAGS_READ_ONLY;
1217 lo_device = inode->i_dev;
1218 lo_flags |= LO_FLAGS_DO_BMAP;
1219 + if (loop_prealloc_init(lo, 1)) {
1229 - if (IS_RDONLY (inode) || is_read_only(lo_device)
1230 + if ((S_ISREG(inode->i_mode) && IS_RDONLY(inode)) || is_read_only(lo_device)
1231 || !(lo_file->f_mode & FMODE_WRITE))
1232 lo_flags |= LO_FLAGS_READ_ONLY;
1234 @@ -681,18 +858,17 @@
1235 lo->transfer = NULL;
1237 figure_loop_size(lo);
1238 - lo->old_gfp_mask = inode->i_mapping->gfp_mask;
1239 - inode->i_mapping->gfp_mask &= ~(__GFP_IO|__GFP_FS);
1242 - if (blksize_size[MAJOR(lo_device)])
1243 - bs = blksize_size[MAJOR(lo_device)][MINOR(lo_device)];
1246 + if (lo_flags & LO_FLAGS_DO_BMAP) {
1247 + lo->old_gfp_mask = inode->i_mapping->gfp_mask;
1248 + inode->i_mapping->gfp_mask = GFP_NOIO | __GFP_HIGH;
1250 + lo->old_gfp_mask = -1;
1253 - set_blocksize(dev, bs);
1254 + loop_hardsizes[MINOR(dev)] = hardsz;
1255 + loop_set_softblksz(lo, dev);
1257 - lo->lo_bh = lo->lo_bhtail = NULL;
1258 kernel_thread(loop_thread, lo, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
1261 @@ -751,11 +927,12 @@
1262 spin_lock_irq(&lo->lo_lock);
1263 lo->lo_state = Lo_rundown;
1264 if (atomic_dec_and_test(&lo->lo_pending))
1265 - up(&lo->lo_bh_mutex);
1266 + wake_up_interruptible(&lo->lo_bh_wait);
1267 spin_unlock_irq(&lo->lo_lock);
1271 + loop_prealloc_cleanup(lo);
1272 lo->lo_backing_file = NULL;
1274 loop_release_xfer(lo);
1275 @@ -770,14 +947,15 @@
1276 memset(lo->lo_name, 0, LO_NAME_SIZE);
1277 loop_sizes[lo->lo_number] = 0;
1278 invalidate_bdev(bdev, 0);
1279 - filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp;
1281 + filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp;
1282 lo->lo_state = Lo_unbound;
1288 -static int loop_set_status(struct loop_device *lo, struct loop_info *arg)
1289 +static int loop_set_status(struct loop_device *lo, kdev_t dev, struct loop_info *arg)
1291 struct loop_info info;
1294 lo->lo_key_owner = current->uid;
1296 figure_loop_size(lo);
1297 + loop_set_softblksz(lo, dev);
1301 @@ -872,7 +1051,7 @@
1302 err = loop_clr_fd(lo, inode->i_bdev);
1304 case LOOP_SET_STATUS:
1305 - err = loop_set_status(lo, (struct loop_info *) arg);
1306 + err = loop_set_status(lo, inode->i_rdev, (struct loop_info *) arg);
1308 case LOOP_GET_STATUS:
1309 err = loop_get_status(lo, (struct loop_info *) arg);
1310 @@ -905,7 +1084,7 @@
1311 static int lo_open(struct inode *inode, struct file *file)
1313 struct loop_device *lo;
1319 @@ -920,10 +1099,6 @@
1320 lo = &loop_dev[dev];
1322 down(&lo->lo_ctl_mutex);
1324 - type = lo->lo_encrypt_type;
1325 - if (type && xfer_funcs[type] && xfer_funcs[type]->lock)
1326 - xfer_funcs[type]->lock(lo);
1328 up(&lo->lo_ctl_mutex);
1330 @@ -932,7 +1107,7 @@
1331 static int lo_release(struct inode *inode, struct file *file)
1333 struct loop_device *lo;
1339 @@ -947,11 +1122,7 @@
1341 lo = &loop_dev[dev];
1342 down(&lo->lo_ctl_mutex);
1343 - type = lo->lo_encrypt_type;
1345 - if (xfer_funcs[type] && xfer_funcs[type]->unlock)
1346 - xfer_funcs[type]->unlock(lo);
1348 up(&lo->lo_ctl_mutex);
1351 @@ -1016,10 +1187,9 @@
1356 loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL);
1361 loop_sizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL);
1363 @@ -1029,6 +1199,10 @@
1367 + loop_hardsizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL);
1368 + if (!loop_hardsizes)
1369 + goto out_hardsizes;
1371 blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), loop_make_request);
1373 for (i = 0; i < max_loop; i++) {
1374 @@ -1036,18 +1210,28 @@
1375 memset(lo, 0, sizeof(struct loop_device));
1376 init_MUTEX(&lo->lo_ctl_mutex);
1377 init_MUTEX_LOCKED(&lo->lo_sem);
1378 - init_MUTEX_LOCKED(&lo->lo_bh_mutex);
1380 spin_lock_init(&lo->lo_lock);
1383 memset(loop_sizes, 0, max_loop * sizeof(int));
1384 memset(loop_blksizes, 0, max_loop * sizeof(int));
1385 + memset(loop_hardsizes, 0, max_loop * sizeof(int));
1386 blk_size[MAJOR_NR] = loop_sizes;
1387 blksize_size[MAJOR_NR] = loop_blksizes;
1388 + hardsect_size[MAJOR_NR] = loop_hardsizes;
1389 for (i = 0; i < max_loop; i++)
1390 register_disk(NULL, MKDEV(MAJOR_NR, i), 1, &lo_fops, 0);
1392 + for (i = 0; i < (sizeof(lo_prealloc) / sizeof(int)); i += 2) {
1393 + if (!lo_prealloc[i])
1395 + if (lo_prealloc[i] < LO_PREALLOC_MIN)
1396 + lo_prealloc[i] = LO_PREALLOC_MIN;
1397 + if (lo_prealloc[i] > LO_PREALLOC_MAX)
1398 + lo_prealloc[i] = LO_PREALLOC_MAX;
1401 devfs_handle = devfs_mk_dir(NULL, "loop", NULL);
1402 devfs_register_series(devfs_handle, "%u", max_loop, DEVFS_FL_DEFAULT,
1404 @@ -1057,10 +1241,13 @@
1405 printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop);
1409 + kfree(loop_blksizes);
1415 if (devfs_unregister_blkdev(MAJOR_NR, "loop"))
1416 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1417 printk(KERN_ERR "loop: ran out of memory\n");
1418 @@ -1072,9 +1259,14 @@
1419 devfs_unregister(devfs_handle);
1420 if (devfs_unregister_blkdev(MAJOR_NR, "loop"))
1421 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1423 + blk_size[MAJOR_NR] = 0;
1424 + blksize_size[MAJOR_NR] = 0;
1425 + hardsect_size[MAJOR_NR] = 0;
1428 kfree(loop_blksizes);
1429 + kfree(loop_hardsizes);
1432 module_init(loop_init);
1433 diff -ruN linux-2.4/drivers/block/Makefile linux-2.4-cl/drivers/block/Makefile
1434 --- linux-2.4/drivers/block/Makefile 2003-08-16 19:09:58.793156044 +0200
1435 +++ linux-2.4-cl/drivers/block/Makefile 2003-08-16 17:37:12.622670377 +0200
1437 obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
1438 obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
1439 obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
1440 +obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
1442 subdir-$(CONFIG_PARIDE) += paride
1444 diff -ruN linux-2.4/include/linux/loop.h linux-2.4-cl/include/linux/loop.h
1445 --- linux-2.4/include/linux/loop.h 2001-09-17 22:16:30.000000000 +0200
1446 +++ linux-2.4-cl/include/linux/loop.h 2003-08-16 19:31:26.733997046 +0200
1448 #define LO_KEY_SIZE 32
1451 +typedef u32 sector_t; /* for 2.6 this is defined in <asm/types.h> and
1452 + most likely an u64; but since cryptoloop uses
1453 + only the lower 32 bits of the block number
1454 + passed, let's just use an u32 for now */
1456 +/* definitions for IV metric */
1457 +#define LOOP_IV_SECTOR_BITS 9
1458 +#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS)
1460 /* Possible states of device */
1466 +struct loop_device;
1468 +typedef int (* transfer_proc_t)(struct loop_device *, int cmd,
1469 + char *raw_buf, char *loop_buf, int size,
1470 + sector_t real_block);
1472 struct loop_device {
1476 int lo_encrypt_type;
1477 int lo_encrypt_key_size;
1479 - int (*transfer)(struct loop_device *, int cmd,
1480 - char *raw_buf, char *loop_buf, int size,
1482 + transfer_proc_t transfer;
1483 char lo_name[LO_NAME_SIZE];
1484 char lo_encrypt_key[LO_KEY_SIZE];
1490 - struct buffer_head *lo_bh;
1491 - struct buffer_head *lo_bhtail;
1492 + struct buffer_head *lo_bhQue0;
1493 + struct buffer_head *lo_bhQue1;
1495 struct semaphore lo_sem;
1496 struct semaphore lo_ctl_mutex;
1497 - struct semaphore lo_bh_mutex;
1498 atomic_t lo_pending;
1499 + struct buffer_head *lo_bhQue2;
1500 + struct buffer_head *lo_bh_free;
1503 + wait_queue_head_t lo_bh_wait;
1506 -typedef int (* transfer_proc_t)(struct loop_device *, int cmd,
1507 - char *raw_buf, char *loop_buf, int size,
1510 static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf,
1511 - char *lbuf, int size, int rblock)
1512 + char *lbuf, int size, sector_t real_block)
1517 - return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock);
1518 + return lo->transfer(lo, cmd, rbuf, lbuf, size, real_block);
1520 #endif /* __KERNEL__ */
1524 #define LO_FLAGS_DO_BMAP 1
1525 #define LO_FLAGS_READ_ONLY 2
1526 -#define LO_FLAGS_BH_REMAP 4
1529 * Note that this structure gets the wrong offsets when directly used
1531 #define LO_CRYPT_IDEA 6
1532 #define LO_CRYPT_DUMMY 9
1533 #define LO_CRYPT_SKIPJACK 10
1534 +#define LO_CRYPT_CRYPTOAPI 18
1535 #define MAX_LO_CRYPT 20
1539 struct loop_func_table {
1540 int number; /* filter type */
1541 int (*transfer)(struct loop_device *lo, int cmd, char *raw_buf,
1542 - char *loop_buf, int size, int real_block);
1543 + char *loop_buf, int size, sector_t real_block);
1544 int (*init)(struct loop_device *, struct loop_info *);
1545 /* release is called from loop_unregister_transfer or clr_fd */
1546 int (*release)(struct loop_device *);