1 --- drivers/md/lvm.c.org Mon Nov 19 17:56:04 2001
2 +++ drivers/md/lvm.c Tue Oct 2 21:14:41 2001
7 - * Copyright (C) 1997 - 2000 Heinz Mauelshagen, Sistina Software
8 + * Copyright (C) 1997 - 2001 Heinz Mauelshagen, Sistina Software
10 * February-November 1997
11 * April-May,July-August,November 1998
12 * January-March,May,July,September,October 1999
13 * January,February,July,September-November 2000
15 + * January-April 2001
18 * LVM driver is free software; you can redistribute it and/or modify
20 * support for free (eg. longer) logical volume names
21 * 12/05/1998 - added spin_locks (thanks to Pascal van Dam
22 * <pascal@ramoth.xs4all.nl>)
23 - * 25/05/1998 - fixed handling of locked PEs in lvm_map() and lvm_chr_ioctl()
24 + * 25/05/1998 - fixed handling of locked PEs in lvm_map() and
26 * 26/05/1998 - reactivated verify_area by access_ok
27 * 07/06/1998 - used vmalloc/vfree instead of kmalloc/kfree to go
28 * beyond 128/256 KB max allocation limit per call
30 * 14/02/2000 - support for 2.3.43
31 * - integrated Andrea Arcagneli's snapshot code
32 * 25/06/2000 - james (chip) , IKKHAYD! roffl
33 - * 26/06/2000 - enhanced lv_extend_reduce for snapshot logical volume support
34 + * 26/06/2000 - enhanced lv_extend_reduce for snapshot logical volume
36 * 06/09/2000 - added devfs support
37 * 07/09/2000 - changed IOP version to 9
38 * - started to add new char ioctl LV_STATUS_BYDEV_T to support
40 * 08/01/2001 - Removed conditional compiles related to PROC_FS,
41 * procfs is always supported now. (JT)
42 * 12/01/2001 - avoided flushing logical volume in case of shrinking
43 - * because of unnecessary overhead in case of heavy updates
44 + * because of unecessary overhead in case of heavy updates
45 * 25/01/2001 - Allow RO open of an inactive LV so it can be reactivated.
46 - * 31/01/2001 - If you try and BMAP a snapshot you now get an -EPERM
47 - * 01/02/2001 - factored __remap_snapshot out of lvm_map
48 + * 31/01/2001 - removed blk_init_queue/blk_cleanup_queue queueing will be
49 + * handled by the proper devices.
50 + * - If you try and BMAP a snapshot you now get an -EPERM
51 + * 01/01/2001 - lvm_map() now calls buffer_IO_error on error for 2.4
52 + * - factored __remap_snapshot out of lvm_map
53 * 12/02/2001 - move devfs code to create VG before LVs
54 - * 14/02/2001 - tidied device defines for blk.h
55 + * 13/02/2001 - allow VG_CREATE on /dev/lvm
56 + * 14/02/2001 - removed modversions.h
57 + * - tidied device defines for blk.h
58 * - tidied debug statements
59 + * - bug: vg[] member not set back to NULL if activation fails
60 * - more lvm_map tidying
61 - * 14/02/2001 - bug: vg[] member not set back to NULL if activation fails
62 + * 15/02/2001 - register /dev/lvm with devfs correctly (major/minor
64 + * 19/02/2001 - preallocated buffer_heads for rawio when using
66 * 28/02/2001 - introduced the P_DEV macro and changed some internel
67 * functions to be static [AD]
68 * 28/02/2001 - factored lvm_get_snapshot_use_rate out of blk_ioctl [AD]
70 * where the check for an existing LV takes place right at
72 * 01/03/2001 - Add VG_CREATE_OLD for IOP 10 compatibility
73 - * 02/03/2001 - Don't destroy usermode pointers in lv_t structures duing LV_
74 - * STATUS_BYxxx and remove redundant lv_t variables from same.
75 + * 02/03/2001 - Don't destroy usermode pointers in lv_t structures duing
77 + * and remove redundant lv_t variables from same.
78 + * - avoid compilation of lvm_dummy_device_request in case of
79 + * Linux >= 2.3.0 to avoid a warning
80 + * - added lvm_name argument to printk in buffer allocation
81 + * in order to avoid a warning
82 + * 04/03/2001 - moved linux/version.h above first use of KERNEL_VERSION
84 * 05/03/2001 - restore copying pe_t array in lvm_do_lv_status_byname. For
86 * - restore copying pe_t array in lvm_do_lv_status_byindex (HM)
87 * - added copying pe_t array in lvm_do_lv_status_bydev (HM)
88 * - enhanced lvm_do_lv_status_by{name,index,dev} to be capable
89 * to copy the lv_block_exception_t array to userspace (HM)
90 - * 08/03/2001 - factored lvm_do_pv_flush out of lvm_chr_ioctl [HM]
91 + * 08/03/2001 - initialize new lv_ptr->lv_COW_table_iobuf for snapshots;
92 + * removed obsolete lv_ptr->lv_COW_table_page initialization
93 + * - factored lvm_do_pv_flush out of lvm_chr_ioctl (HM)
94 * 09/03/2001 - Added _lock_open_count to ensure we only drop the lock
95 * when the locking process closes.
96 - * 05/04/2001 - lvm_map bugs: don't use b_blocknr/b_dev in lvm_map, it
97 - * destroys stacking devices. call b_end_io on failed maps.
99 - * - Defer writes to an extent that is being moved [JT + AD]
100 - * 28/05/2001 - implemented missing BLKSSZGET ioctl [AD]
101 + * 05/04/2001 - Defer writes to an extent that is being moved [JT]
102 + * 05/04/2001 - use b_rdev and b_rsector rather than b_dev and b_blocknr in
103 + * lvm_map() in order to make stacking devices more happy (HM)
104 + * 11/04/2001 - cleaned up the pvmove queue code. I no longer retain the
105 + * rw flag, instead WRITEA's are just dropped [JT]
106 + * 30/04/2001 - added KERNEL_VERSION > 2.4.3 get_hardsect_size() rather
107 + * than get_hardblocksize() call
108 + * 03/05/2001 - Use copy_to/from_user to preserve pointers in
109 + * lvm_do_status_by*
110 + * 11/05/2001 - avoid accesses to inactive snapshot data in
111 + * __update_hardsectsize() and lvm_do_lv_extend_reduce() (JW)
112 + * 28/05/2001 - implemented missing BLKSSZGET ioctl
113 + * 05/06/2001 - Move _pe_lock out of fast path for lvm_map when no PEs
114 + * locked. Make buffer queue flush not need locking.
115 + * Fix lvm_user_bmap() to set b_rsector for new lvm_map(). [AED]
116 + * 30/06/2001 - Speed up __update_hardsectsize() by checking if PVs have
117 + * the same hardsectsize (very likely) before scanning all LEs
118 + * in the LV each time. [AED]
122 +#include <linux/version.h>
124 #define MAJOR_NR LVM_BLK_MAJOR
125 #define DEVICE_OFF(device)
126 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 0)
127 +#define DEVICE_REQUEST lvm_dummy_device_request
129 #define LOCAL_END_REQUEST
131 /* lvm_do_lv_create calls fsync_dev_lockfs()/unlockfs() */
132 /* #define LVM_VFS_ENHANCEMENT */
134 #include <linux/config.h>
136 #include <linux/module.h>
138 #include <linux/kernel.h>
139 #include <linux/vmalloc.h>
141 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 39)
142 #include <linux/slab.h>
144 #include <linux/init.h>
146 #include <linux/hdreg.h>
148 #include <linux/blkdev.h>
149 #include <linux/genhd.h>
150 #include <linux/locks.h>
152 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
153 +#include <linux/iobuf.h>
156 #include <linux/devfs_fs_kernel.h>
157 #include <linux/smp_lock.h>
158 #include <asm/ioctl.h>
162 #include <linux/blk.h>
163 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
164 #include <linux/blkpg.h>
167 #include <linux/errno.h>
168 #include <linux/lvm.h>
171 * External function prototypes
173 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 42)
174 static int lvm_make_request_fn(request_queue_t*, int, struct buffer_head*);
176 +static int lvm_make_request_fn(struct buffer_head *bh, int rw);
179 static int lvm_blk_ioctl(struct inode *, struct file *, uint, ulong);
180 static int lvm_blk_open(struct inode *, struct file *);
183 * Internal function prototypes
185 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
186 +void lvm_cleanup(void);
188 static void lvm_cleanup(void);
190 static void lvm_init_vars(void);
194 static int lvm_do_vg_remove(int);
195 static void lvm_geninit(struct gendisk *);
196 static void __update_hardsectsize(lv_t *lv);
197 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 4, 4)
198 +#define lvm_sectsize(dev) get_hardblocksize(dev)
200 +#define lvm_sectsize(dev) get_hardsect_size(dev)
203 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
204 +static struct buffer_head **__allocate_bheads(void);
205 +static void __free_bheads(struct buffer_head **bheads);
208 static void _queue_io(struct buffer_head *bh, int rw);
209 static struct buffer_head *_dequeue_io(void);
211 /* volume group descriptor area pointers */
212 vg_t *vg[ABS_MAX_VG];
214 +static pv_t *pvp = NULL;
215 +static lv_t *lvp = NULL;
216 +static pe_t *pep = NULL;
219 /* map from block minor number to VG and LV numbers */
223 static int _lock_open_count = 0;
224 static uint vg_count = 0;
225 static long lvm_chr_open_count = 0;
226 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
227 static DECLARE_WAIT_QUEUE_HEAD(lvm_wait);
229 +struct wait_queue *lvm_snapshot_wait = NULL;
230 +struct wait_queue *lvm_wait = NULL;
233 static spinlock_t lvm_lock = SPIN_LOCK_UNLOCKED;
234 static spinlock_t lvm_snapshot_lock = SPIN_LOCK_UNLOCKED;
235 @@ -356,14 +428,25 @@
236 ioctl: lvm_chr_ioctl,
239 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 38)
240 +static struct file_operations lvm_blk_fops =
242 + open: lvm_blk_open,
244 + write: block_write,
245 + release: lvm_blk_close,
246 + ioctl: lvm_blk_ioctl,
247 + fsync: block_fsync,
250 /* block device operations structure needed for 2.3.38? and above */
251 struct block_device_operations lvm_blk_dops =
253 - owner: THIS_MODULE,
254 - open: lvm_blk_open,
255 + open: lvm_blk_open,
256 release: lvm_blk_close,
257 ioctl: lvm_blk_ioctl,
262 /* gendisk structures */
263 @@ -374,13 +457,22 @@
265 static struct gendisk lvm_gendisk =
268 - major_name: LVM_NAME,
271 - part: lvm_hd_struct,
274 + MAJOR_NR, /* major # */
275 + LVM_NAME, /* name of major */
276 + 0, /* number of times minor is shifted
277 + to get real minor */
278 + 1, /* maximum partitions per device */
279 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 40)
280 + MAX_LV, /* maximum number of real devices */
281 + lvm_geninit, /* initialization called before we
284 + lvm_hd_struct, /* partition table */
285 + lvm_size, /* device size in blocks, copied
287 + MAX_LV, /* number or real devices */
288 + NULL, /* internal */
289 + NULL, /* pointer to next gendisk struct (internal) */
293 @@ -388,14 +480,19 @@
297 + struct gendisk *gendisk_ptr = NULL;
299 if (devfs_register_chrdev(LVM_CHAR_MAJOR,
300 lvm_name, &lvm_chr_fops) < 0) {
301 printk(KERN_ERR "%s -- devfs_register_chrdev failed\n",
306 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 37)
307 if (devfs_register_blkdev(MAJOR_NR, lvm_name, &lvm_blk_dops) < 0)
309 + if (register_blkdev(MAJOR_NR, lvm_name, &lvm_blk_fops) < 0)
312 printk("%s -- devfs_register_blkdev failed\n", lvm_name);
313 if (devfs_unregister_chrdev(LVM_CHAR_MAJOR, lvm_name) < 0)
314 @@ -409,15 +506,35 @@
316 lvm_geninit(&lvm_gendisk);
318 - add_gendisk(&lvm_gendisk);
319 + /* insert our gendisk at the corresponding major */
320 + if (gendisk_head != NULL) {
321 + gendisk_ptr = gendisk_head;
322 + while (gendisk_ptr->next != NULL &&
323 + gendisk_ptr->major > lvm_gendisk.major) {
324 + gendisk_ptr = gendisk_ptr->next;
326 + lvm_gendisk.next = gendisk_ptr->next;
327 + gendisk_ptr->next = &lvm_gendisk;
329 + gendisk_head = &lvm_gendisk;
330 + lvm_gendisk.next = NULL;
334 /* reference from drivers/block/genhd.c */
335 lvm_hd_name_ptr = lvm_hd_name;
338 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 42)
339 blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), lvm_make_request_fn);
341 + blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
342 + blk_dev[MAJOR_NR].current_request = NULL;
345 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 0)
346 + blk_dev[MAJOR_NR].make_req_fn = lvm_make_request_fn;
349 /* initialise the pe lock */
350 pe_lock_req.lock = UNLOCK_PE;
351 @@ -436,12 +553,18 @@
360 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 30)
361 +void lvm_cleanup(void)
363 static void lvm_cleanup(void)
366 + struct gendisk *gendisk_ptr = NULL, *gendisk_ptr_prev = NULL;
368 if (devfs_unregister_chrdev(LVM_CHAR_MAJOR, lvm_name) < 0)
369 printk(KERN_ERR "%s -- devfs_unregister_chrdev failed\n",
372 printk(KERN_ERR "%s -- devfs_unregister_blkdev failed\n",
375 - del_gendisk(&lvm_gendisk);
376 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 30)
377 + blk_dev[MAJOR_NR].request_fn = NULL;
378 + blk_dev[MAJOR_NR].current_request = NULL;
381 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 0)
382 + blk_dev[MAJOR_NR].make_req_fn = NULL;
385 + gendisk_ptr = gendisk_ptr_prev = gendisk_head;
386 + while (gendisk_ptr != NULL) {
387 + if (gendisk_ptr == &lvm_gendisk)
389 + gendisk_ptr_prev = gendisk_ptr;
390 + gendisk_ptr = gendisk_ptr->next;
392 + /* delete our gendisk from chain */
393 + if (gendisk_ptr == &lvm_gendisk)
394 + gendisk_ptr_prev->next = gendisk_ptr->next;
396 blk_size[MAJOR_NR] = NULL;
397 blksize_size[MAJOR_NR] = NULL;
400 static int lvm_chr_open(struct inode *inode, struct file *file)
402 - unsigned int minor = MINOR(inode->i_rdev);
403 + int minor = MINOR(inode->i_rdev);
405 P_DEV("chr_open MINOR: %d VG#: %d mode: %s%s lock: %d\n",
406 minor, VG_CHR(minor), MODE_TO_STR(file->f_mode), lock);
407 @@ -525,10 +666,10 @@
408 /* Group special file open */
409 if (VG_CHR(minor) > MAX_VG) return -ENXIO;
411 - spin_lock(&lvm_lock);
412 - if(lock == current->pid)
413 - _lock_open_count++;
414 - spin_unlock(&lvm_lock);
415 + spin_lock(&lvm_lock);
416 + if(lock == current->pid)
417 + _lock_open_count++;
418 + spin_unlock(&lvm_lock);
420 lvm_chr_open_count++;
424 return lvm_do_vg_create(arg, minor);
427 - /* create a VGDA, assume VG number is filled in */
429 + /* create a VGDA, assume VG number is filled in */
430 return lvm_do_vg_create(arg, -1);
436 /* physical volume buffer flush/invalidate */
437 - return lvm_do_pv_flush(arg);
438 + return lvm_do_pv_flush(arg);
442 @@ -765,16 +906,16 @@
444 if (lvm_chr_open_count > 0) lvm_chr_open_count--;
446 - spin_lock(&lvm_lock);
447 - if(lock == current->pid) {
448 - if(!_lock_open_count) {
449 + spin_lock(&lvm_lock);
450 + if(lock == current->pid) {
451 + if(!_lock_open_count) {
452 P_DEV("chr_close: unlocking LVM for pid %d\n", lock);
454 - wake_up_interruptible(&lvm_wait);
456 - _lock_open_count--;
458 + wake_up_interruptible(&lvm_wait);
460 + _lock_open_count--;
462 - spin_unlock(&lvm_lock);
463 + spin_unlock(&lvm_lock);
468 LV_BLK(minor) >= 0 &&
469 LV_BLK(minor) < vg_ptr->lv_max) {
471 - /* Check parallel LV spindown (LV remove) */
472 + /* Check parallel LV spindown (LV remove) */
473 if (lv_ptr->lv_status & LV_SPINDOWN) return -EPERM;
475 /* Check inactive LV and open for read/write */
477 (file->f_mode & FMODE_WRITE))
480 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 38)
481 + file->f_op = &lvm_blk_fops;
484 /* be sure to increment VG counter */
485 if (lv_ptr->lv_open == 0) vg_ptr->lv_open++;
486 @@ -860,17 +1004,12 @@
489 /* get block device sector size as needed e.g. by fdisk */
490 - return put_user(get_hardsect_size(inode->i_rdev), (int *) arg);
491 + return put_user(lvm_sectsize(inode->i_rdev), (int *) arg);
494 /* return device size */
495 P_IOCTL("BLKGETSIZE: %u\n", lv_ptr->lv_size);
496 - if (put_user(lv_ptr->lv_size, (unsigned long *)arg))
501 - if (put_user((u64)lv_ptr->lv_size << 9, (u64 *)arg))
502 + if (put_user(lv_ptr->lv_size, (long *)arg))
507 if(lv_ptr->lv_access & LV_SNAPSHOT)
510 + /* turn logical block into (dev_t, block). non privileged. */
511 return lvm_user_bmap(inode, (struct lv_bmap *) arg);
514 case LV_SET_ALLOCATION:
515 /* set allocation flags of a logical volume */
517 P_DEV("blk_close MINOR: %d VG#: %d LV#: %d\n",
518 minor, VG_BLK(minor), LV_BLK(minor));
520 + sync_dev(inode->i_rdev);
521 if (lv_ptr->lv_open == 1) vg_ptr->lv_open--;
524 @@ -1048,15 +1190,14 @@
525 bh.b_blocknr = block;
526 bh.b_dev = bh.b_rdev = inode->i_rdev;
527 bh.b_size = lvm_get_blksize(bh.b_dev);
528 - bh.b_rsector = block * (bh.b_size >> 9);
529 + bh.b_rsector = block * (bh.b_size >> 9);
530 if ((err=lvm_map(&bh, READ)) < 0) {
531 printk("lvm map failed: %d\n", err);
535 - return put_user(kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) ||
536 - put_user(bh.b_rsector/(bh.b_size>>9), &user_result->lv_block) ?
538 + return (put_user(kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) ||
539 + put_user(bh.b_rsector/(bh.b_size>>9), &user_result->lv_block));
543 @@ -1065,7 +1206,7 @@
544 * (see init_module/lvm_init)
546 static void __remap_snapshot(kdev_t rdev, ulong rsector,
547 - ulong pe_start, lv_t *lv, vg_t *vg) {
548 + ulong pe_start, lv_t *lv, vg_t *vg) {
550 /* copy a chunk from the origin to a snapshot device */
551 down_write(&lv->lv_lock);
552 @@ -1122,6 +1263,7 @@
557 static int lvm_map(struct buffer_head *bh, int rw)
559 int minor = MINOR(bh->b_rdev);
560 @@ -1245,7 +1387,7 @@
561 _remap_snapshot(rdev_map, rsector_map,
562 pe_start, snap, vg_this);
568 bh->b_rdev = rdev_map;
569 @@ -1254,7 +1396,9 @@
573 +#if LINUX_VERSION_CODE >= KERNEL_VERSION ( 2, 4, 0)
576 up_read(&lv->lv_lock);
579 @@ -1284,14 +1428,34 @@
583 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 0)
585 + * this one never should be called...
587 +static void lvm_dummy_device_request(void)
589 + printk(KERN_EMERG "%s -- oops, got lvm request for %s [sector: %lu]\n",
590 + lvm_name, kdevname(CURRENT->rq_dev), CURRENT->sector);
597 * make request function
599 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 48)
600 static int lvm_make_request_fn(request_queue_t *q,
602 struct buffer_head *bh) {
603 return (lvm_map(bh, rw) <= 0) ? 0 : 1;
606 +static int lvm_make_request_fn(struct buffer_head *bh, int rw) {
607 + int r = lvm_map(bh, rw);
608 + return (r <= 0) ? r : 1;
613 /********************************************************************
614 @@ -1457,14 +1621,14 @@
618 - /* VG_CREATE now uses minor number in VG structure */
619 - if (minor == -1) minor = vg_ptr->vg_number;
620 + /* VG_CREATE now uses minor number in VG structure */
621 + if (minor == -1) minor = vg_ptr->vg_number;
624 - if (vg[VG_CHR(minor)] != NULL) {
625 + if (vg[VG_CHR(minor)] != NULL) {
626 P_IOCTL("lvm_do_vg_create ERROR: VG %d in use\n", minor);
632 /* we are not that active so far... */
633 @@ -1495,7 +1659,6 @@
634 /* get the physical volume structures */
635 vg_ptr->pv_act = vg_ptr->pv_cur = 0;
636 for (p = 0; p < vg_ptr->pv_max; p++) {
638 /* user space address */
639 if ((pvp = vg_ptr->pv[p]) != NULL) {
640 ret = lvm_do_pv_create(pvp, vg_ptr, p);
641 @@ -1519,7 +1682,6 @@
642 /* get the logical volume structures */
644 for (l = 0; l < vg_ptr->lv_max; l++) {
646 /* user space address */
647 if ((lvp = vg_ptr->lv[l]) != NULL) {
648 if (copy_from_user(&lv, lvp, sizeof(lv_t)) != 0) {
649 @@ -1546,7 +1708,7 @@
650 /* Second path to correct snapshot logical volumes which are not
651 in place during first path above */
652 for (l = 0; l < ls; l++) {
653 - lv_t *lvp = snap_lv_ptr[l];
654 + lvp = snap_lv_ptr[l];
655 if (copy_from_user(&lv, lvp, sizeof(lv_t)) != 0) {
656 lvm_do_vg_remove(minor);
658 @@ -1637,7 +1799,8 @@
662 - if (vg_ptr == NULL) return -ENXIO;
663 + /* If the VG doesn't exist in the kernel then just exit */
664 + if (!vg_ptr) return 0;
666 if (copy_from_user(vg_name, arg, sizeof(vg_name)) != 0)
668 @@ -1797,30 +1960,56 @@
672 -static void __update_hardsectsize(lv_t *lv) {
674 - int max_hardsectsize = 0, hardsectsize;
676 - for (le = 0; le < lv->lv_allocated_le; le++) {
677 - hardsectsize = get_hardsect_size(lv->lv_current_pe[le].dev);
678 - if (hardsectsize == 0)
679 - hardsectsize = 512;
680 - if (hardsectsize > max_hardsectsize)
681 - max_hardsectsize = hardsectsize;
684 - /* only perform this operation on active snapshots */
685 - if ((lv->lv_access & LV_SNAPSHOT) &&
686 - (lv->lv_status & LV_ACTIVE)) {
687 - for (e = 0; e < lv->lv_remap_end; e++) {
688 - hardsectsize = get_hardsect_size( lv->lv_block_exception[e].rdev_new);
689 - if (hardsectsize == 0)
690 - hardsectsize = 512;
691 - if (hardsectsize > max_hardsectsize)
692 +static void __update_hardsectsize(lv_t *lv)
694 + int max_hardsectsize = 0, hardsectsize = 0;
697 + /* Check PVs first to see if they all have same sector size */
698 + for (p = 0; p < lv->vg->pv_cur; p++) {
699 + pv_t *pv = lv->vg->pv[p];
700 + if (pv && (hardsectsize = lvm_sectsize(pv->pv_dev))) {
701 + if (max_hardsectsize == 0)
702 + max_hardsectsize = hardsectsize;
703 + else if (hardsectsize != max_hardsectsize) {
704 + P_DEV("%s PV[%d] (%s) sector size %d, not %d\n",
705 + lv->lv_name, p, kdevname(pv->pv_dev),
706 + hardsectsize, max_hardsectsize);
712 + /* PVs have different block size, need to check each LE sector size */
713 + if (hardsectsize != max_hardsectsize) {
715 + for (le = 0; le < lv->lv_allocated_le; le++) {
716 + hardsectsize = lvm_sectsize(lv->lv_current_pe[le].dev);
717 + if (hardsectsize > max_hardsectsize) {
718 + P_DEV("%s LE[%d] (%s) blocksize %d not %d\n",
720 + kdevname(lv->lv_current_pe[le].dev),
721 + hardsectsize, max_hardsectsize);
722 max_hardsectsize = hardsectsize;
726 + /* only perform this operation on active snapshots */
727 + if ((lv->lv_access & LV_SNAPSHOT) &&
728 + (lv->lv_status & LV_ACTIVE)) {
730 + for (e = 0; e < lv->lv_remap_end; e++) {
731 + hardsectsize = lvm_sectsize(lv->lv_block_exception[e].rdev_new);
732 + if (hardsectsize > max_hardsectsize)
733 + max_hardsectsize = hardsectsize;
738 + if (max_hardsectsize == 0)
739 + max_hardsectsize = SECTOR_SIZE;
740 + P_DEV("hardblocksize for LV %s is %d\n",
741 + kdevname(lv->lv_dev), max_hardsectsize);
742 lvm_hardsectsizes[MINOR(lv->lv_dev)] = max_hardsectsize;
745 @@ -1834,7 +2023,6 @@
746 lv_block_exception_t *lvbe = lv->lv_block_exception;
747 vg_t *vg_ptr = vg[VG_CHR(minor)];
751 if (!(pep = lv->lv_current_pe))
753 @@ -1876,7 +2064,7 @@
754 lv_ptr->lv_snapshot_next = NULL;
755 lv_ptr->lv_block_exception = NULL;
756 lv_ptr->lv_iobuf = NULL;
757 - lv_ptr->lv_COW_table_iobuf = NULL;
758 + lv_ptr->lv_COW_table_iobuf = NULL;
759 lv_ptr->lv_snapshot_hash_table = NULL;
760 lv_ptr->lv_snapshot_hash_table_size = 0;
761 lv_ptr->lv_snapshot_hash_mask = 0;
762 @@ -1884,6 +2072,9 @@
764 lv_ptr->lv_snapshot_use_rate = 0;
766 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
767 + lv_ptr->bheads = 0;
769 vg_ptr->lv[l] = lv_ptr;
771 /* get the PE structures from user space if this
772 @@ -1956,12 +2147,11 @@
773 LVM_SNAPSHOT_DROPPED_SECTOR)
776 - "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n",
777 + "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n",
783 /* point to the original logical volume */
784 lv_ptr = lv_ptr->lv_snapshot_org;
786 @@ -1995,12 +2185,16 @@
787 lv_ptr->lv_block_exception[e].rsector_org, lv_ptr);
788 /* need to fill the COW exception table data
789 into the page for disk i/o */
790 - if(lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr)) {
792 - vg_ptr->lv[l] = NULL;
795 + if(lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr)) {
797 + vg_ptr->lv[l] = NULL;
800 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
801 init_waitqueue_head(&lv_ptr->lv_snapshot_wait);
803 + lv_ptr->lv_snapshot_wait = NULL;
807 vg_ptr->lv[l] = NULL;
808 @@ -2022,6 +2216,7 @@
809 LVM_CORRECT_READ_AHEAD(lv_ptr->lv_read_ahead);
811 lv_ptr->lv_status = lv_status_save;
812 + lv_ptr->vg = vg_ptr;
814 __update_hardsectsize(lv_ptr);
816 @@ -2040,6 +2235,17 @@
817 org->lv_access |= LV_SNAPSHOT_ORG;
818 lv_ptr->lv_access &= ~LV_SNAPSHOT_ORG; /* this can only hide an userspace bug */
820 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
821 + /* allocate a set of buffer_heads for snapshot io */
822 + if(!org->bheads && !(org->bheads = __allocate_bheads())) {
823 + printk(KERN_CRIT "%s -- LV_CREATE: "
824 + "couldn't allocate buffer heads\n", lvm_name);
825 + /* FIXME: tidy this function and free the lv */
826 + up_write(&org->lv_lock);
831 /* Link in the list of snapshot volumes */
832 for (last = org; last->lv_snapshot_next; last = last->lv_snapshot_next);
833 lv_ptr->lv_snapshot_prev = last;
834 @@ -2064,11 +2270,7 @@
835 unlockfs(lv_ptr->lv_snapshot_org->lv_dev);
838 - lv_ptr->vg = vg_ptr;
840 - lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de =
841 - lvm_fs_create_lv(vg_ptr, lv_ptr);
843 + lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de = lvm_fs_create_lv(vg_ptr, lv_ptr);
845 } /* lvm_do_lv_create() */
847 @@ -2126,6 +2328,13 @@
848 /* no more snapshots? */
849 if (!org->lv_snapshot_next) {
850 org->lv_access &= ~LV_SNAPSHOT_ORG;
851 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
852 + /* get rid of the buffer heads */
854 + __free_bheads(org->bheads);
859 up_write(&org->lv_lock);
861 @@ -2184,214 +2393,213 @@
862 * logical volume extend / reduce
864 static int __extend_reduce_snapshot(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) {
866 - lv_block_exception_t *lvbe;
868 + lv_block_exception_t *lvbe;
870 - if (!new_lv->lv_block_exception)
872 + if (!new_lv->lv_block_exception)
875 - size = new_lv->lv_remap_end * sizeof(lv_block_exception_t);
876 - if ((lvbe = vmalloc(size)) == NULL) {
878 - "%s -- lvm_do_lv_extend_reduce: vmalloc "
879 - "error LV_BLOCK_EXCEPTION of %lu Byte at line %d\n",
880 - lvm_name, size, __LINE__);
884 - if ((new_lv->lv_remap_end > old_lv->lv_remap_end) &&
885 - (copy_from_user(lvbe, new_lv->lv_block_exception, size))) {
889 - new_lv->lv_block_exception = lvbe;
891 - if (lvm_snapshot_alloc_hash_table(new_lv)) {
892 - vfree(new_lv->lv_block_exception);
895 + size = new_lv->lv_remap_end * sizeof(lv_block_exception_t);
896 + if ((lvbe = vmalloc(size)) == NULL) {
898 + "%s -- lvm_do_lv_extend_reduce: vmalloc "
899 + "error LV_BLOCK_EXCEPTION of %lu Byte at line %d\n",
900 + lvm_name, size, __LINE__);
904 + if ((new_lv->lv_remap_end > old_lv->lv_remap_end) &&
905 + (copy_from_user(lvbe, new_lv->lv_block_exception, size))) {
909 + new_lv->lv_block_exception = lvbe;
911 + if (lvm_snapshot_alloc_hash_table(new_lv)) {
912 + vfree(new_lv->lv_block_exception);
920 static int __extend_reduce(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) {
921 - ulong size, l, p, end;
923 + ulong size, l, p, end;
926 + /* allocate space for new pe structures */
927 + size = new_lv->lv_current_le * sizeof(pe_t);
928 + if ((pe = vmalloc(size)) == NULL) {
930 + "%s -- lvm_do_lv_extend_reduce: "
931 + "vmalloc error LV_CURRENT_PE of %lu Byte at line %d\n",
932 + lvm_name, size, __LINE__);
936 + /* get the PE structures from user space */
937 + if (copy_from_user(pe, new_lv->lv_current_pe, size)) {
938 + if(old_lv->lv_access & LV_SNAPSHOT)
939 + vfree(new_lv->lv_snapshot_hash_table);
944 + new_lv->lv_current_pe = pe;
946 + /* reduce allocation counters on PV(s) */
947 + for (l = 0; l < old_lv->lv_allocated_le; l++) {
948 + vg_ptr->pe_allocated--;
949 + for (p = 0; p < vg_ptr->pv_cur; p++) {
950 + if (vg_ptr->pv[p]->pv_dev ==
951 + old_lv->lv_current_pe[l].dev) {
952 + vg_ptr->pv[p]->pe_allocated--;
958 - /* allocate space for new pe structures */
959 - size = new_lv->lv_current_le * sizeof(pe_t);
960 - if ((pe = vmalloc(size)) == NULL) {
962 - "%s -- lvm_do_lv_extend_reduce: "
963 - "vmalloc error LV_CURRENT_PE of %lu Byte at line %d\n",
964 - lvm_name, size, __LINE__);
968 - /* get the PE structures from user space */
969 - if (copy_from_user(pe, new_lv->lv_current_pe, size)) {
970 - if(old_lv->lv_access & LV_SNAPSHOT)
971 - vfree(new_lv->lv_snapshot_hash_table);
976 - new_lv->lv_current_pe = pe;
978 - /* reduce allocation counters on PV(s) */
979 - for (l = 0; l < old_lv->lv_allocated_le; l++) {
980 - vg_ptr->pe_allocated--;
981 - for (p = 0; p < vg_ptr->pv_cur; p++) {
982 - if (vg_ptr->pv[p]->pv_dev ==
983 - old_lv->lv_current_pe[l].dev) {
984 - vg_ptr->pv[p]->pe_allocated--;
990 - /* extend the PE count in PVs */
991 - for (l = 0; l < new_lv->lv_allocated_le; l++) {
992 - vg_ptr->pe_allocated++;
993 - for (p = 0; p < vg_ptr->pv_cur; p++) {
994 - if (vg_ptr->pv[p]->pv_dev ==
995 + /* extend the PE count in PVs */
996 + for (l = 0; l < new_lv->lv_allocated_le; l++) {
997 + vg_ptr->pe_allocated++;
998 + for (p = 0; p < vg_ptr->pv_cur; p++) {
999 + if (vg_ptr->pv[p]->pv_dev ==
1000 new_lv->lv_current_pe[l].dev) {
1001 - vg_ptr->pv[p]->pe_allocated++;
1007 - /* save availiable i/o statistic data */
1008 - if (old_lv->lv_stripes < 2) { /* linear logical volume */
1009 - end = min(old_lv->lv_current_le, new_lv->lv_current_le);
1010 - for (l = 0; l < end; l++) {
1011 - new_lv->lv_current_pe[l].reads +=
1012 - old_lv->lv_current_pe[l].reads;
1014 - new_lv->lv_current_pe[l].writes +=
1015 - old_lv->lv_current_pe[l].writes;
1018 - } else { /* striped logical volume */
1019 - uint i, j, source, dest, end, old_stripe_size, new_stripe_size;
1021 - old_stripe_size = old_lv->lv_allocated_le / old_lv->lv_stripes;
1022 - new_stripe_size = new_lv->lv_allocated_le / new_lv->lv_stripes;
1023 - end = min(old_stripe_size, new_stripe_size);
1025 - for (i = source = dest = 0;
1026 - i < new_lv->lv_stripes; i++) {
1027 - for (j = 0; j < end; j++) {
1028 - new_lv->lv_current_pe[dest + j].reads +=
1029 - old_lv->lv_current_pe[source + j].reads;
1030 - new_lv->lv_current_pe[dest + j].writes +=
1031 - old_lv->lv_current_pe[source + j].writes;
1033 - source += old_stripe_size;
1034 - dest += new_stripe_size;
1037 + vg_ptr->pv[p]->pe_allocated++;
1044 + /* save availiable i/o statistic data */
1045 + if (old_lv->lv_stripes < 2) { /* linear logical volume */
1046 + end = min(old_lv->lv_current_le, new_lv->lv_current_le);
1047 + for (l = 0; l < end; l++) {
1048 + new_lv->lv_current_pe[l].reads +=
1049 + old_lv->lv_current_pe[l].reads;
1051 + new_lv->lv_current_pe[l].writes +=
1052 + old_lv->lv_current_pe[l].writes;
1055 + } else { /* striped logical volume */
1056 + uint i, j, source, dest, end, old_stripe_size, new_stripe_size;
1058 + old_stripe_size = old_lv->lv_allocated_le / old_lv->lv_stripes;
1059 + new_stripe_size = new_lv->lv_allocated_le / new_lv->lv_stripes;
1060 + end = min(old_stripe_size, new_stripe_size);
1062 + for (i = source = dest = 0; i < new_lv->lv_stripes; i++) {
1063 + for (j = 0; j < end; j++) {
1064 + new_lv->lv_current_pe[dest + j].reads +=
1065 + old_lv->lv_current_pe[source + j].reads;
1066 + new_lv->lv_current_pe[dest + j].writes +=
1067 + old_lv->lv_current_pe[source + j].writes;
1069 + source += old_stripe_size;
1070 + dest += new_stripe_size;
1077 static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t *new_lv)
1081 - vg_t *vg_ptr = vg[VG_CHR(minor)];
1085 - if ((pe = new_lv->lv_current_pe) == NULL)
1088 - for (l = 0; l < vg_ptr->lv_max; l++)
1089 - if (vg_ptr->lv[l] && !strcmp(vg_ptr->lv[l]->lv_name, lv_name))
1093 + vg_t *vg_ptr = vg[VG_CHR(minor)];
1097 - if (l == vg_ptr->lv_max)
1099 + if ((pe = new_lv->lv_current_pe) == NULL)
1102 + for (l = 0; l < vg_ptr->lv_max; l++)
1103 + if (vg_ptr->lv[l] && !strcmp(vg_ptr->lv[l]->lv_name, lv_name))
1106 + if (l == vg_ptr->lv_max)
1109 - old_lv = vg_ptr->lv[l];
1110 + old_lv = vg_ptr->lv[l];
1112 if (old_lv->lv_access & LV_SNAPSHOT) {
1113 /* only perform this operation on active snapshots */
1114 if (old_lv->lv_status & LV_ACTIVE)
1115 - r = __extend_reduce_snapshot(vg_ptr, old_lv, new_lv);
1117 + r = __extend_reduce_snapshot(vg_ptr, old_lv, new_lv);
1122 - r = __extend_reduce(vg_ptr, old_lv, new_lv);
1123 + r = __extend_reduce(vg_ptr, old_lv, new_lv);
1130 - /* copy relevent fields */
1131 + /* copy relevent fields */
1132 down_write(&old_lv->lv_lock);
1134 - if(new_lv->lv_access & LV_SNAPSHOT) {
1135 - size = (new_lv->lv_remap_end > old_lv->lv_remap_end) ?
1136 - old_lv->lv_remap_ptr : new_lv->lv_remap_end;
1137 - size *= sizeof(lv_block_exception_t);
1138 - memcpy(new_lv->lv_block_exception,
1139 - old_lv->lv_block_exception, size);
1141 - old_lv->lv_remap_end = new_lv->lv_remap_end;
1142 - old_lv->lv_block_exception = new_lv->lv_block_exception;
1143 - old_lv->lv_snapshot_hash_table =
1144 - new_lv->lv_snapshot_hash_table;
1145 - old_lv->lv_snapshot_hash_table_size =
1146 - new_lv->lv_snapshot_hash_table_size;
1147 - old_lv->lv_snapshot_hash_mask =
1148 - new_lv->lv_snapshot_hash_mask;
1150 - for (e = 0; e < new_lv->lv_remap_ptr; e++)
1151 - lvm_hash_link(new_lv->lv_block_exception + e,
1152 - new_lv->lv_block_exception[e].rdev_org,
1153 - new_lv->lv_block_exception[e].rsector_org,
1158 - vfree(old_lv->lv_current_pe);
1159 - vfree(old_lv->lv_snapshot_hash_table);
1161 - old_lv->lv_size = new_lv->lv_size;
1162 - old_lv->lv_allocated_le = new_lv->lv_allocated_le;
1163 - old_lv->lv_current_le = new_lv->lv_current_le;
1164 - old_lv->lv_current_pe = new_lv->lv_current_pe;
1165 - lvm_gendisk.part[MINOR(old_lv->lv_dev)].nr_sects =
1167 - lvm_size[MINOR(old_lv->lv_dev)] = old_lv->lv_size >> 1;
1169 - if (old_lv->lv_access & LV_SNAPSHOT_ORG) {
1171 - for(snap = old_lv->lv_snapshot_next; snap;
1172 - snap = snap->lv_snapshot_next) {
1173 + if(new_lv->lv_access & LV_SNAPSHOT) {
1174 + size = (new_lv->lv_remap_end > old_lv->lv_remap_end) ?
1175 + old_lv->lv_remap_ptr : new_lv->lv_remap_end;
1176 + size *= sizeof(lv_block_exception_t);
1177 + memcpy(new_lv->lv_block_exception,
1178 + old_lv->lv_block_exception, size);
1180 + old_lv->lv_remap_end = new_lv->lv_remap_end;
1181 + old_lv->lv_block_exception = new_lv->lv_block_exception;
1182 + old_lv->lv_snapshot_hash_table =
1183 + new_lv->lv_snapshot_hash_table;
1184 + old_lv->lv_snapshot_hash_table_size =
1185 + new_lv->lv_snapshot_hash_table_size;
1186 + old_lv->lv_snapshot_hash_mask =
1187 + new_lv->lv_snapshot_hash_mask;
1189 + for (e = 0; e < new_lv->lv_remap_ptr; e++)
1190 + lvm_hash_link(new_lv->lv_block_exception + e,
1191 + new_lv->lv_block_exception[e].rdev_org,
1192 + new_lv->lv_block_exception[e].rsector_org,
1197 + vfree(old_lv->lv_current_pe);
1198 + vfree(old_lv->lv_snapshot_hash_table);
1200 + old_lv->lv_size = new_lv->lv_size;
1201 + old_lv->lv_allocated_le = new_lv->lv_allocated_le;
1202 + old_lv->lv_current_le = new_lv->lv_current_le;
1203 + old_lv->lv_current_pe = new_lv->lv_current_pe;
1204 + lvm_gendisk.part[MINOR(old_lv->lv_dev)].nr_sects =
1206 + lvm_size[MINOR(old_lv->lv_dev)] = old_lv->lv_size >> 1;
1208 + if (old_lv->lv_access & LV_SNAPSHOT_ORG) {
1210 + for(snap = old_lv->lv_snapshot_next; snap;
1211 + snap = snap->lv_snapshot_next) {
1212 down_write(&snap->lv_lock);
1213 - snap->lv_current_pe = old_lv->lv_current_pe;
1214 - snap->lv_allocated_le =
1215 - old_lv->lv_allocated_le;
1216 - snap->lv_current_le = old_lv->lv_current_le;
1217 - snap->lv_size = old_lv->lv_size;
1219 - lvm_gendisk.part[MINOR(snap->lv_dev)].nr_sects
1220 - = old_lv->lv_size;
1221 - lvm_size[MINOR(snap->lv_dev)] =
1222 - old_lv->lv_size >> 1;
1223 - __update_hardsectsize(snap);
1224 + snap->lv_current_pe = old_lv->lv_current_pe;
1225 + snap->lv_allocated_le =
1226 + old_lv->lv_allocated_le;
1227 + snap->lv_current_le = old_lv->lv_current_le;
1228 + snap->lv_size = old_lv->lv_size;
1230 + lvm_gendisk.part[MINOR(snap->lv_dev)].nr_sects
1231 + = old_lv->lv_size;
1232 + lvm_size[MINOR(snap->lv_dev)] =
1233 + old_lv->lv_size >> 1;
1234 + __update_hardsectsize(snap);
1235 up_write(&snap->lv_lock);
1243 - __update_hardsectsize(old_lv);
1244 + __update_hardsectsize(old_lv);
1245 up_write(&old_lv->lv_lock);
1249 } /* lvm_do_lv_extend_reduce() */
1252 @@ -2426,7 +2634,6 @@
1257 if (saved_ptr1 != NULL) {
1258 if (copy_to_user(saved_ptr1,
1259 lv_ptr->lv_current_pe,
1260 @@ -2434,6 +2641,18 @@
1264 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
1265 + if (saved_ptr2 != NULL) {
1266 + if (copy_to_user(saved_ptr2,
1267 + lv_ptr->lv_block_exception,
1268 + lv_ptr->lv_remap_ptr *
1269 + sizeof(lv_block_exception_t)
1273 + if (copy_to_user(&lv_status_byname_req.lv->lv_block_exception, &saved_ptr2, sizeof(void*)) != 0)
1276 /* Restore usermode pointers */
1277 if (copy_to_user(&lv_status_byname_req.lv->lv_current_pe, &saved_ptr1, sizeof(void*)) != 0)
1279 @@ -2461,9 +2680,6 @@
1281 if (lv_status_byindex_req.lv == NULL)
1283 - if (lv_status_byindex_req.lv_index <0 ||
1284 - lv_status_byindex_req.lv_index >= MAX_LV)
1286 if ( ( lv_ptr = vg_ptr->lv[lv_status_byindex_req.lv_index]) == NULL)
1289 @@ -2482,6 +2698,18 @@
1293 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
1294 + if (saved_ptr2 != NULL) {
1295 + if (copy_to_user(saved_ptr2,
1296 + lv_ptr->lv_block_exception,
1297 + lv_ptr->lv_remap_ptr *
1298 + sizeof(lv_block_exception_t)
1302 + if (copy_to_user(&lv_status_byindex_req.lv->lv_block_exception, &saved_ptr2, sizeof(void *)) != 0)
1306 /* Restore usermode pointers */
1307 if (copy_to_user(&lv_status_byindex_req.lv->lv_current_pe, &saved_ptr1, sizeof(void *)) != 0)
1308 @@ -2529,6 +2757,18 @@
1312 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
1313 + if (saved_ptr2 != NULL) {
1314 + if (copy_to_user(saved_ptr2,
1315 + lv_ptr->lv_block_exception,
1316 + lv_ptr->lv_remap_ptr *
1317 + sizeof(lv_block_exception_t)
1321 + if (copy_to_user(&lv_status_bydev_req.lv->lv_block_exception, &saved_ptr2, sizeof(void *)) != 0)
1324 /* Restore usermode pointers */
1325 if (copy_to_user(&lv_status_bydev_req.lv->lv_current_pe, &saved_ptr1, sizeof(void *)) != 0)
1327 @@ -2552,9 +2792,7 @@
1328 if (lv_ptr->lv_dev == lv->lv_dev)
1330 lvm_fs_remove_lv(vg_ptr, lv_ptr);
1331 - strncpy(lv_ptr->lv_name,
1334 + strncpy(lv_ptr->lv_name, lv_req->lv_name, NAME_LEN);
1335 lvm_fs_create_lv(vg_ptr, lv_ptr);
1338 @@ -2629,23 +2867,24 @@
1340 } /* lvm_do_pv_status() */
1344 * character device support function flush and invalidate all buffers of a PV
1346 static int lvm_do_pv_flush(void *arg)
1348 - pv_flush_req_t pv_flush_req;
1349 + pv_flush_req_t pv_flush_req;
1351 - if (copy_from_user(&pv_flush_req, arg,
1352 - sizeof(pv_flush_req)) != 0)
1354 + if (copy_from_user(&pv_flush_req, arg, sizeof(pv_flush_req)) != 0)
1357 - fsync_dev(pv_flush_req.pv_dev);
1358 - invalidate_buffers(pv_flush_req.pv_dev);
1359 + fsync_dev(pv_flush_req.pv_dev);
1360 + invalidate_buffers(pv_flush_req.pv_dev);
1368 * support function initialize gendisk variables
1370 @@ -2670,10 +2909,43 @@
1372 } /* lvm_gen_init() */
1374 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
1375 +static struct buffer_head **__allocate_bheads(void) {
1377 + struct buffer_head *bh = 0, **bheads = 0;
1379 + if(!(bh = vmalloc(sizeof(*bh) * KIO_MAX_SECTORS)))
1382 + if(!(bheads = vmalloc(sizeof(*bheads) * KIO_MAX_SECTORS))) {
1387 + for(i = 0; i < KIO_MAX_SECTORS; i++)
1388 + bheads[i] = bh + i;
1392 +static void __free_bheads(struct buffer_head **bheads) {
1399 /* Must have down_write(_pe_lock) when we enqueue buffers */
1400 static void _queue_io(struct buffer_head *bh, int rw) {
1401 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
1402 + if (rw == WRITEA) {
1404 + * Discard write aheads (only 2.2 bdflush uses WRITEA, and
1405 + * it will retry any buffers with a WRITE again later).
1407 + bh->b_end_io(bh, buffer_uptodate(bh));
1411 if (bh->b_reqnext) BUG();
1412 bh->b_reqnext = _pe_requests;
1414 @@ -2708,10 +2980,12 @@
1420 * we must open the pv's before we use them
1422 static int _open_pv(pv_t *pv) {
1423 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 0)
1425 struct block_device *bd;
1427 @@ -2719,22 +2993,28 @@
1430 err = blkdev_get(bd, FMODE_READ|FMODE_WRITE, 0, BDEV_FILE);
1442 static void _close_pv(pv_t *pv) {
1444 - struct block_device *bdev = pv->bd;
1447 - blkdev_put(bdev, BDEV_FILE);
1449 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 0)
1450 + if(!pv || !pv->bd)
1453 + blkdev_put(pv->bd, BDEV_FILE);
1460 static unsigned long _sectors_to_k(unsigned long sect)
1462 if(SECTOR_SIZE > 1024) {
1463 @@ -2744,6 +3024,27 @@
1464 return sect / (1024 / SECTOR_SIZE);
1467 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
1468 module_init(lvm_init);
1469 module_exit(lvm_cleanup);
1470 -MODULE_LICENSE("GPL");
1473 + * 2.2.18 has support for module_init so why aren't I using it ?
1474 + * 1) I don't want to have to regression test against older kernels.
1475 + * 2) It doesn't work; if I use module_init(lvm_init) and build lvm
1476 + * into the kernel, lvm_init doesn't get called. If I then leave in
1477 + * the lvm_init call in ll_rw_block.c, lvm_init gets called twice !
1478 + * Probably got a link flag wrong somewhere.
1482 +int __init init_module(void) {
1483 + return lvm_init();
1486 +void cleanup_module(void) {
1492 --- drivers/md/lvm-internal.h.org Sun Nov 11 18:09:32 2001
1493 +++ drivers/md/lvm-internal.h Thu Sep 27 08:34:43 2001
1497 - * kernel/lvm-internal.h
1498 + * kernel/lvm_internal.h
1500 * Copyright (C) 2001 Sistina Software
1506 - * 05/01/2001:Joe Thornber - Factored this file out of lvm.c
1507 + * 05/01/2001 - Factored this file out of lvm.c (Joe Thornber)
1508 + * 11/01/2001 - Renamed lvm_internal and added declarations
1509 + * for lvm_fs.c stuff
1515 #include <linux/lvm.h>
1517 -#define _LVM_INTERNAL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
1518 +#define _LVM_INTERNAL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
1520 /* global variables, defined in lvm.c */
1521 extern char *lvm_version;
1524 extern struct file_operations lvm_chr_fops;
1526 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 38)
1527 +extern struct file_operations lvm_blk_fops;
1529 extern struct block_device_operations lvm_blk_dops;
1532 +/* 2.4.8 had no global min/max macros, and 2.4.9's were flawed */
1533 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
1536 +#define min(x,y) ({ \
1537 + const typeof(x) _x = (x); \
1538 + const typeof(y) _y = (y); \
1539 + (void) (&_x == &_y); \
1540 + _x < _y ? _x : _y; })
1543 +#define max(x,y) ({ \
1544 + const typeof(x) _x = (x); \
1545 + const typeof(y) _y = (y); \
1546 + (void) (&_x == &_y); \
1547 + _x > _y ? _x : _y; })
1553 --- drivers/md/lvm-snap.c.org Mon Nov 12 17:34:20 2001
1554 +++ drivers/md/lvm-snap.c Thu Sep 27 08:34:43 2001
1558 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
1559 - * Heinz Mauelshagen, Sistina Software (persistent snapshots)
1560 + * 2000 - 2001 Heinz Mauelshagen, Sistina Software
1562 * LVM snapshot driver is free software; you can redistribute it and/or modify
1563 * it under the terms of the GNU General Public License as published by
1564 * the Free Software Foundation; either version 2, or (at your option)
1565 * any later version.
1568 * LVM snapshot driver is distributed in the hope that it will be useful,
1569 * but WITHOUT ANY WARRANTY; without even the implied warranty of
1570 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1571 * GNU General Public License for more details.
1574 * You should have received a copy of the GNU General Public License
1575 * along with GNU CC; see the file COPYING. If not, write to
1576 * the Free Software Foundation, 59 Temple Place - Suite 330,
1577 - * Boston, MA 02111-1307, USA.
1578 + * Boston, MA 02111-1307, USA.
1583 * 23/11/2000 - used cpu_to_le64 rather than my own macro
1584 * 25/01/2001 - Put LockPage back in
1585 * 01/02/2001 - A dropped snapshot is now set as inactive
1586 + * 14/02/2001 - tidied debug statements
1587 + * 19/02/2001 - changed rawio calls to pass in preallocated buffer_heads
1588 + * 26/02/2001 - introduced __brw_kiovec to remove a lot of conditional
1590 + * 07/03/2001 - fixed COW exception table not persistent on 2.2 (HM)
1591 * 12/03/2001 - lvm_pv_get_number changes:
1593 * o renamed it to _pv_get_number
1597 #include <linux/kernel.h>
1598 -#include <linux/module.h>
1599 #include <linux/vmalloc.h>
1600 #include <linux/blkdev.h>
1601 #include <linux/smp_lock.h>
1603 #include <linux/iobuf.h>
1604 #include <linux/lvm.h>
1606 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3 ,0)
1607 +#include <linux/pagemap.h>
1610 #include "lvm-internal.h"
1612 -static char *lvm_snap_version __attribute__ ((unused)) =
1613 - "LVM "LVM_RELEASE_NAME" snapshot code ("LVM_RELEASE_DATE")\n";
1614 +static char *lvm_snap_version __attribute__ ((unused)) = "LVM "LVM_RELEASE_NAME" snapshot code ("LVM_RELEASE_DATE")\n";
1616 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3 ,0)
1618 +#define LockPage(map) set_bit(PG_locked, &(map)->flags)
1622 extern const char *const lvm_name;
1623 extern int lvm_blocksizes[];
1625 void lvm_snapshot_release(lv_t *);
1627 static int _write_COW_table_block(vg_t *vg, lv_t *lv, int idx,
1628 - const char **reason);
1629 + const char **reason);
1630 static void _disable_snapshot(vg_t *vg, lv_t *lv);
1633 +static inline int __brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
1634 + kdev_t dev, unsigned long b[], int size,
1636 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 0)
1637 + return brw_kiovec(rw, nr, iovec, dev, b, size);
1639 + return brw_kiovec_bh(rw, nr, iovec, dev, b, size, 0,
1640 + lv->lv_snapshot_org->bheads, KIO_MAX_SECTORS);
1645 static int _pv_get_number(vg_t * vg, kdev_t rdev, uint *pvn) {
1647 for(p = 0; p < vg->pv_max; p++) {
1650 if(vg->pv[p]->pv_dev == rdev)
1655 - if(p >= vg->pv_max) {
1656 + if(p >= vg->pv_max) {
1657 /* bad news, the snapshot COW table is probably corrupt */
1659 "%s -- _pv_get_number failed for rdev = %u\n",
1670 #define hashfn(dev,block,mask,chunk_size) \
1671 ((HASHDEV(dev)^((block)/(chunk_size))) & (mask))
1674 or error on this snapshot --> release it */
1675 invalidate_buffers(lv_snap->lv_dev);
1677 - /* wipe the snapshot since it's inconsistent now */
1678 - _disable_snapshot(vg, lv_snap);
1679 + /* wipe the snapshot since it's inconsistent now */
1680 + _disable_snapshot(vg, lv_snap);
1682 for (i = last_dev = 0; i < lv_snap->lv_remap_ptr; i++) {
1683 if ( lv_snap->lv_block_exception[i].rdev_new != last_dev) {
1687 static inline int lvm_snapshot_prepare_blocks(unsigned long *blocks,
1688 - unsigned long start,
1691 + unsigned long start,
1695 int i, sectors_per_block, nr_blocks;
1697 @@ -245,49 +269,48 @@
1699 int lvm_snapshot_fill_COW_page(vg_t * vg, lv_t * lv_snap)
1702 - int id = 0, is = lv_snap->lv_remap_ptr;
1703 - ulong blksize_snap;
1704 - lv_COW_table_disk_t * lv_COW_table = (lv_COW_table_disk_t *)
1705 - page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1707 + int id = 0, is = lv_snap->lv_remap_ptr;
1708 + ulong blksize_snap;
1709 + lv_COW_table_disk_t * lv_COW_table = (lv_COW_table_disk_t *)
1710 + page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1719 - lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
1720 + lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
1721 is -= is % (blksize_snap / sizeof(lv_COW_table_disk_t));
1723 memset(lv_COW_table, 0, blksize_snap);
1724 for ( ; is < lv_snap->lv_remap_ptr; is++, id++) {
1725 /* store new COW_table entry */
1726 - lv_block_exception_t *be = lv_snap->lv_block_exception + is;
1727 - if(_pv_get_number(vg, be->rdev_org, &pvn))
1730 - lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
1731 - lv_COW_table[id].pv_org_rsector = cpu_to_le64(be->rsector_org);
1732 - if(_pv_get_number(vg, be->rdev_new, &pvn))
1735 - lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
1736 - lv_COW_table[id].pv_snap_rsector =
1737 - cpu_to_le64(be->rsector_new);
1738 + lv_block_exception_t *be = lv_snap->lv_block_exception + is;
1739 + if(_pv_get_number(vg, be->rdev_org, &pvn))
1742 + lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
1743 + lv_COW_table[id].pv_org_rsector = cpu_to_le64(be->rsector_org);
1744 + if(_pv_get_number(vg, be->rdev_new, &pvn))
1747 + lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
1748 + lv_COW_table[id].pv_snap_rsector =
1749 + cpu_to_le64(be->rsector_new);
1756 - printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", lvm_name);
1758 + printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", lvm_name);
1764 * writes a COW exception table sector to disk (HM)
1767 int lvm_write_COW_table_block(vg_t * vg, lv_t *lv_snap)
1771 unsigned long org_start, snap_start, snap_phys_dev, virt_start, pe_off;
1772 int idx = lv_snap->lv_remap_ptr, chunk_size = lv_snap->lv_chunk_size;
1773 struct kiobuf * iobuf;
1774 + unsigned long blocks[KIO_MAX_SECTORS];
1775 int blksize_snap, blksize_org, min_blksize, max_blksize;
1776 int max_sectors, nr_sectors;
1778 @@ -363,20 +387,20 @@
1780 iobuf->length = nr_sectors << 9;
1782 - if(!lvm_snapshot_prepare_blocks(iobuf->blocks, org_start,
1783 + if(!lvm_snapshot_prepare_blocks(blocks, org_start,
1784 nr_sectors, blksize_org))
1787 - if (brw_kiovec(READ, 1, &iobuf, org_phys_dev,
1788 - iobuf->blocks, blksize_org) != (nr_sectors<<9))
1789 + if (__brw_kiovec(READ, 1, &iobuf, org_phys_dev, blocks,
1790 + blksize_org, lv_snap) != (nr_sectors<<9))
1793 - if(!lvm_snapshot_prepare_blocks(iobuf->blocks, snap_start,
1794 + if(!lvm_snapshot_prepare_blocks(blocks, snap_start,
1795 nr_sectors, blksize_snap))
1798 - if (brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev,
1799 - iobuf->blocks, blksize_snap) != (nr_sectors<<9))
1800 + if (__brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev, blocks,
1801 + blksize_snap, lv_snap) != (nr_sectors<<9))
1802 goto fail_raw_write;
1805 @@ -401,24 +425,24 @@
1811 lvm_drop_snapshot(vg, lv_snap, reason);
1814 - fail_out_of_space:
1816 reason = "out of space";
1820 reason = "read error";
1824 reason = "write error";
1828 reason = "blocksize error";
1833 reason = "couldn't prepare kiovec blocks "
1834 "(start probably isn't block aligned)";
1836 @@ -440,9 +464,17 @@
1840 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,27)
1841 page = alloc_page(GFP_KERNEL);
1844 + if (!page) goto out;
1847 + unsigned long addr = __get_free_page(GFP_USER);
1848 + if (!addr) goto out;
1849 + iobuf->pagelist[i] = addr;
1850 + page = mem_map + MAP_NR(addr);
1854 iobuf->maplist[i] = page;
1867 if (ret) goto out_free_kiovec;
1869 ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_COW_table_iobuf,
1870 - PAGE_SIZE/SECTOR_SIZE);
1871 + PAGE_SIZE/SECTOR_SIZE);
1872 if (ret) goto out_free_both_kiovecs;
1874 ret = lvm_snapshot_alloc_hash_table(lv_snap);
1877 void lvm_snapshot_release(lv_t * lv)
1879 - int nbhs = KIO_MAX_SECTORS;
1881 if (lv->lv_block_exception)
1883 vfree(lv->lv_block_exception);
1884 @@ -557,17 +588,21 @@
1888 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
1889 kiobuf_wait_for_io(lv->lv_iobuf);
1891 unmap_kiobuf(lv->lv_iobuf);
1892 free_kiovec(1, &lv->lv_iobuf);
1893 lv->lv_iobuf = NULL;
1895 if (lv->lv_COW_table_iobuf)
1897 - kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
1898 - unmap_kiobuf(lv->lv_COW_table_iobuf);
1899 - free_kiovec(1, &lv->lv_COW_table_iobuf);
1900 - lv->lv_COW_table_iobuf = NULL;
1901 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
1902 + kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
1904 + unmap_kiobuf(lv->lv_COW_table_iobuf);
1905 + free_kiovec(1, &lv->lv_COW_table_iobuf);
1906 + lv->lv_COW_table_iobuf = NULL;
1910 @@ -579,11 +614,11 @@
1913 ulong snap_pe_start, COW_table_sector_offset,
1914 - COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
1915 + COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
1917 kdev_t snap_phys_dev;
1918 lv_block_exception_t *be;
1919 - struct kiobuf * COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
1920 + struct kiobuf *COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
1921 lv_COW_table_disk_t * lv_COW_table =
1922 ( lv_COW_table_disk_t *) page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1924 @@ -601,39 +636,40 @@
1926 if ( idx_COW_table == 0) memset(lv_COW_table, 0, blksize_snap);
1928 - /* sector offset into the on disk COW table */
1929 + /* sector offset into the on disk COW table */
1930 COW_table_sector_offset = (idx % COW_entries_per_pe) / (SECTOR_SIZE / sizeof(lv_COW_table_disk_t));
1932 /* COW table block to write next */
1933 blocks[0] = (snap_pe_start + COW_table_sector_offset) >> (blksize_snap >> 10);
1935 /* store new COW_table entry */
1936 - be = lv_snap->lv_block_exception + idx;
1937 - if(_pv_get_number(vg, be->rdev_org, &pvn))
1938 - goto fail_pv_get_number;
1940 - lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
1941 - lv_COW_table[idx_COW_table].pv_org_rsector =
1942 - cpu_to_le64(be->rsector_org);
1943 - if(_pv_get_number(vg, snap_phys_dev, &pvn))
1944 - goto fail_pv_get_number;
1946 - lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
1947 - lv_COW_table[idx_COW_table].pv_snap_rsector =
1948 - cpu_to_le64(be->rsector_new);
1949 + be = lv_snap->lv_block_exception + idx;
1950 + if(_pv_get_number(vg, be->rdev_org, &pvn))
1951 + goto fail_pv_get_number;
1953 + lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
1954 + lv_COW_table[idx_COW_table].pv_org_rsector =
1955 + cpu_to_le64(be->rsector_org);
1956 + if(_pv_get_number(vg, snap_phys_dev, &pvn))
1957 + goto fail_pv_get_number;
1959 + lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
1960 + lv_COW_table[idx_COW_table].pv_snap_rsector =
1961 + cpu_to_le64(be->rsector_new);
1963 COW_table_iobuf->length = blksize_snap;
1964 + /* COW_table_iobuf->nr_pages = 1; */
1966 - if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1967 - blocks, blksize_snap) != blksize_snap)
1968 + if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1969 + blocks, blksize_snap, lv_snap) != blksize_snap)
1970 goto fail_raw_write;
1972 - /* initialization of next COW exception table block with zeroes */
1973 + /* initialization of next COW exception table block with zeroes */
1974 end_of_table = idx % COW_entries_per_pe == COW_entries_per_pe - 1;
1975 if (idx_COW_table % COW_entries_per_block == COW_entries_per_block - 1 || end_of_table)
1977 /* don't go beyond the end */
1978 - if (idx + 1 >= lv_snap->lv_remap_end) goto out;
1979 + if (idx + 1 >= lv_snap->lv_remap_end) goto out;
1981 memset(lv_COW_table, 0, blksize_snap);
1983 @@ -646,20 +682,20 @@
1984 blocks[0] = snap_pe_start >> (blksize_snap >> 10);
1987 - if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1988 - blocks, blksize_snap) !=
1989 + if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1990 + blocks, blksize_snap, lv_snap) !=
1992 goto fail_raw_write;
2001 *reason = "write error";
2004 - fail_pv_get_number:
2005 +fail_pv_get_number:
2006 *reason = "_pv_get_number failed";
2014 -MODULE_LICENSE("GPL");
2015 --- drivers/md/lvm-fs.c.org Sun Nov 11 18:09:32 2001
2016 +++ drivers/md/lvm-fs.c Tue Sep 4 10:40:17 2001
2019 * Copyright (C) 2001 Sistina Software
2021 - * January,February 2001
2022 + * January-April 2001
2024 * LVM driver is free software; you can redistribute it and/or modify
2025 * it under the terms of the GNU General Public License as published by
2028 #include <linux/config.h>
2029 #include <linux/version.h>
2030 -#include <linux/module.h>
2032 #include <linux/kernel.h>
2033 #include <linux/vmalloc.h>
2036 static void _show_uuid(const char *src, char *b, char *e);
2038 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2040 static devfs_handle_t lvm_devfs_handle;
2042 static devfs_handle_t vg_devfs_handle[MAX_VG];
2043 static devfs_handle_t ch_devfs_handle[MAX_VG];
2044 static devfs_handle_t lv_devfs_handle[MAX_LV];
2047 static struct proc_dir_entry *lvm_proc_dir = NULL;
2048 static struct proc_dir_entry *lvm_proc_vg_subdir = NULL;
2051 /* User-space has already registered this */
2053 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2054 lvm_devfs_handle = devfs_register(
2055 0 , "lvm", 0, LVM_CHAR_MAJOR, 0,
2056 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
2057 &lvm_chr_fops, NULL);
2061 lvm_proc_dir = create_proc_entry(LVM_DIR, S_IFDIR, &proc_root);
2063 lvm_proc_vg_subdir = create_proc_entry(LVM_VG_SUBDIR, S_IFDIR,
2068 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2069 devfs_unregister (lvm_devfs_handle);
2073 remove_proc_entry(LVM_GLOBAL, lvm_proc_dir);
2074 remove_proc_entry(LVM_VG_SUBDIR, lvm_proc_dir);
2075 remove_proc_entry(LVM_DIR, &proc_root);
2077 void lvm_fs_create_vg(vg_t *vg_ptr) {
2078 struct proc_dir_entry *pde;
2080 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2081 vg_devfs_handle[vg_ptr->vg_number] =
2082 devfs_mk_dir(0, vg_ptr->vg_name, NULL);
2085 DEVFS_FL_DEFAULT, LVM_CHAR_MAJOR, vg_ptr->vg_number,
2086 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
2087 &lvm_chr_fops, NULL);
2090 vg_ptr->vg_dir_pde = create_proc_entry(vg_ptr->vg_name, S_IFDIR,
2091 lvm_proc_vg_subdir);
2092 @@ -137,8 +142,10 @@
2093 void lvm_fs_remove_vg(vg_t *vg_ptr) {
2096 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2097 devfs_unregister(ch_devfs_handle[vg_ptr->vg_number]);
2098 devfs_unregister(vg_devfs_handle[vg_ptr->vg_number]);
2102 for(i = 0; i < vg_ptr->lv_max; i++)
2103 @@ -173,11 +180,13 @@
2104 struct proc_dir_entry *pde;
2105 const char *name = _basename(lv->lv_name);
2107 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2108 lv_devfs_handle[MINOR(lv->lv_dev)] = devfs_register(
2109 vg_devfs_handle[vg_ptr->vg_number], name,
2110 DEVFS_FL_DEFAULT, LVM_BLK_MAJOR, MINOR(lv->lv_dev),
2111 S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
2112 &lvm_blk_dops, NULL);
2115 if(vg_ptr->lv_subdir_pde &&
2116 (pde = create_proc_entry(name, S_IFREG, vg_ptr->lv_subdir_pde))) {
2120 void lvm_fs_remove_lv(vg_t *vg_ptr, lv_t *lv) {
2121 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2122 devfs_unregister(lv_devfs_handle[MINOR(lv->lv_dev)]);
2125 if(vg_ptr->lv_subdir_pde) {
2126 const char *name = _basename(lv->lv_name);
2127 @@ -276,12 +287,12 @@
2128 sz += sprintf(page + sz, "number: %u\n", lv->lv_number);
2129 sz += sprintf(page + sz, "open: %u\n", lv->lv_open);
2130 sz += sprintf(page + sz, "allocation: %u\n", lv->lv_allocation);
2131 - if(lv->lv_stripes > 1) {
2132 - sz += sprintf(page + sz, "stripes: %u\n",
2134 - sz += sprintf(page + sz, "stripesize: %u\n",
2135 - lv->lv_stripesize);
2137 + if(lv->lv_stripes > 1) {
2138 + sz += sprintf(page + sz, "stripes: %u\n",
2140 + sz += sprintf(page + sz, "stripesize: %u\n",
2141 + lv->lv_stripesize);
2143 sz += sprintf(page + sz, "device: %02u:%02u\n",
2144 MAJOR(lv->lv_dev), MINOR(lv->lv_dev));
2148 #ifdef DEBUG_LVM_PROC_GET_INFO
2150 - "%s - lvm_proc_get_global_info CALLED pos: %lu count: %d\n",
2151 - lvm_name, pos, count);
2152 + "%s - lvm_proc_get_global_info CALLED pos: %lu count: %d whence: %d\n",
2153 + lvm_name, pos, count, whence);
2156 if(pos != 0 && buf != NULL)
2161 -MODULE_LICENSE("GPL");
2162 --- include/linux/lvm.h.org Sun Nov 11 18:09:32 2001
2163 +++ include/linux/lvm.h Wed Oct 3 14:46:47 2001
2168 - * Copyright (C) 1997 - 2000 Heinz Mauelshagen, Sistina Software
2169 + * Copyright (C) 1997 - 2001 Heinz Mauelshagen, Sistina Software
2171 * February-November 1997
2173 * January-March,July,September,October,Dezember 1999
2174 * January,February,July,November 2000
2176 + * January-March,June,July 2001
2178 * lvm is free software; you can redistribute it and/or modify
2179 * it under the terms of the GNU General Public License as published by
2180 * the Free Software Foundation; either version 2, or (at your option)
2181 * any later version.
2184 * lvm is distributed in the hope that it will be useful,
2185 * but WITHOUT ANY WARRANTY; without even the implied warranty of
2186 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2187 * GNU General Public License for more details.
2190 * You should have received a copy of the GNU General Public License
2191 * along with GNU CC; see the file COPYING. If not, write to
2192 * the Free Software Foundation, 59 Temple Place - Suite 330,
2193 - * Boston, MA 02111-1307, USA.
2194 + * Boston, MA 02111-1307, USA.
2199 * 08/12/1999 - changed LVM_LV_SIZE_MAX macro to reflect current 1TB limit
2200 * 01/01/2000 - extended lv_v2 core structure by wait_queue member
2201 * 12/02/2000 - integrated Andrea Arcagnelli's snapshot work
2202 - * 14/02/2001 - changed LVM_SNAPSHOT_MIN_CHUNK to 1 page
2203 - * 18/02/2000 - seperated user and kernel space parts by
2204 + * 18/02/2000 - seperated user and kernel space parts by
2205 * #ifdef them with __KERNEL__
2206 * 08/03/2000 - implemented cluster/shared bits for vg_access
2207 * 26/06/2000 - implemented snapshot persistency and resizing support
2209 * 12/11/2000 - removed unneeded timestamp definitions
2210 * 24/12/2000 - removed LVM_TO_{CORE,DISK}*, use cpu_{from, to}_le*
2211 * instead - Christoph Hellwig
2212 - * 01/03/2001 - Rename VG_CREATE to VG_CREATE_OLD and add new VG_CREATE
2213 + * 22/01/2001 - Change ulong to uint32_t
2214 + * 14/02/2001 - changed LVM_SNAPSHOT_MIN_CHUNK to 1 page
2215 + * 20/02/2001 - incremented IOP version to 11 because of incompatible
2216 + * change in VG activation (in order to support devfs better)
2217 + * 01/03/2001 - Revert to IOP10 and add VG_CREATE_OLD call for compatibility
2218 * 08/03/2001 - new lv_t (in core) version number 5: changed page member
2219 * to (struct kiobuf *) to use for COW exception table io
2220 - * 23/03/2001 - Change a (presumably) mistyped pv_t* to an lv_t*
2221 - * 26/03/2001 - changed lv_v4 to lv_v5 in structure definition [HM]
2222 + * 26/03/2001 - changed lv_v4 to lv_v5 in structure definition (HM)
2223 + * 21/06/2001 - changed BLOCK_SIZE back to 1024 for non S/390
2224 + * 22/06/2001 - added Andreas Dilger's PE on 4k boundary alignment enhancements
2225 + * 19/07/2001 - added rwsem compatibility macros for 2.2 kernels
2230 #ifndef _LVM_H_INCLUDE
2231 #define _LVM_H_INCLUDE
2233 -#define LVM_RELEASE_NAME "1.0.1-rc4(ish)"
2234 +#define LVM_RELEASE_NAME "1.0.1-rc4"
2235 #define LVM_RELEASE_DATE "03/10/2001"
2237 -#define _LVM_KERNEL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
2238 +#define _LVM_KERNEL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
2240 #include <linux/version.h>
2242 @@ -98,23 +103,43 @@
2244 #define DEBUG_GENDISK
2245 #define DEBUG_VG_CREATE
2246 - #define DEBUG_LVM_BLK_OPEN
2247 + #define DEBUG_DEVICE
2250 -#endif /* #ifdef __KERNEL__ */
2252 #include <linux/kdev_t.h>
2253 #include <linux/list.h>
2256 +#include <linux/kdev_t.h>
2257 +#include <linux/list.h>
2259 +#endif /* #ifndef __KERNEL__ */
2261 #include <asm/types.h>
2262 #include <linux/major.h>
2265 +#if LINUX_VERSION_CODE >= KERNEL_VERSION ( 2, 3 ,0)
2266 #include <linux/spinlock.h>
2268 +#include <asm/spinlock.h>
2271 #include <asm/semaphore.h>
2272 #endif /* #ifdef __KERNEL__ */
2274 +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3 ,0)
2275 +/* Compatibility macros for 2.2 */
2276 +#define rw_semaphore semaphore
2277 +#define init_rwsem init_MUTEX
2278 +#define down_read down
2279 +#define down_write down
2281 +#define up_write up
2282 +#define DECLARE_RWSEM DECLARE_MUTEX
2285 #include <asm/page.h>
2287 #if !defined ( LVM_BLK_MAJOR) || !defined ( LVM_CHAR_MAJOR)
2292 -#ifdef CONFIG_ARCH_S390
2293 +#ifdef CONFIG_ARCH_S390
2294 #define BLOCK_SIZE 4096
2296 #define BLOCK_SIZE 1024
2297 @@ -189,6 +214,38 @@
2301 + * VGDA: default disk spaces and offsets
2303 + * there's space after the structures for later extensions.
2305 + * offset what size
2306 + * --------------- ---------------------------------- ------------
2307 + * 0 physical volume structure ~500 byte
2309 + * 1K volume group structure ~200 byte
2311 + * 6K namelist of physical volumes 128 byte each
2313 + * 6k + n * ~300byte n logical volume structures ~300 byte each
2315 + * + m * 4byte m physical extent alloc. structs 4 byte each
2317 + * End of disk - first physical extent typically 4 megabyte
2324 +/* DONT TOUCH THESE !!! */
2333 * LVM_PE_T_MAX corresponds to:
2335 * 8KB PE size can map a ~512 MB logical volume at the cost of 1MB memory,
2336 @@ -298,7 +355,12 @@
2339 /* lock the logical volume manager */
2340 +#if LVM_DRIVER_IOP_VERSION > 11
2341 +#define LVM_LOCK_LVM _IO ( 0xfe, 0x9A)
2343 +/* This is actually the same as _IO ( 0xff, 0x00), oops. Remove for IOP 12+ */
2344 #define LVM_LOCK_LVM _IO ( 0xfe, 0x100)
2352 /* delta to version 1 starts here */
2353 - struct lv_v5 *lv_snapshot_org;
2354 - struct lv_v5 *lv_snapshot_prev;
2355 - struct lv_v5 *lv_snapshot_next;
2356 + struct lv_v5 *lv_snapshot_org;
2357 + struct lv_v5 *lv_snapshot_prev;
2358 + struct lv_v5 *lv_snapshot_next;
2359 lv_block_exception_t *lv_block_exception;
2362 @@ -510,11 +572,18 @@
2363 struct list_head *lv_snapshot_hash_table;
2364 uint32_t lv_snapshot_hash_table_size;
2365 uint32_t lv_snapshot_hash_mask;
2366 +#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
2367 wait_queue_head_t lv_snapshot_wait;
2369 + struct wait_queue *lv_snapshot_wait;
2371 int lv_snapshot_use_rate;
2374 uint lv_allocated_snapshot_le;
2375 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
2376 + struct buffer_head **bheads;
2382 } lv_snapshot_use_rate_req_t;
2386 /* useful inlines */
2387 static inline ulong round_up(ulong n, ulong size) {
2390 return round_up(n, size) / size;
2393 +/* FIXME: nasty capital letters */
2394 static int inline LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg_t *vg, lv_t *lv) {
2395 return vg->pe_size / lv->lv_chunk_size;
2402 #endif /* #ifndef _LVM_H_INCLUDE */