]> git.pld-linux.org Git - packages/kernel.git/blame - linux-2.4.16-lvm-1.0.1rc4.patch
- this patch fix twice EXPORT_SYMBOL(br_ioctl_hook); in kernel-source net/netsyms.c
[packages/kernel.git] / linux-2.4.16-lvm-1.0.1rc4.patch
CommitLineData
119e3928 1--- drivers/md/lvm.c.org Mon Nov 19 17:56:04 2001
2+++ drivers/md/lvm.c Tue Oct 2 21:14:41 2001
3@@ -1,13 +1,13 @@
4 /*
5 * kernel/lvm.c
6 *
7- * Copyright (C) 1997 - 2000 Heinz Mauelshagen, Sistina Software
8+ * Copyright (C) 1997 - 2001 Heinz Mauelshagen, Sistina Software
9 *
10 * February-November 1997
11 * April-May,July-August,November 1998
12 * January-March,May,July,September,October 1999
13 * January,February,July,September-November 2000
14- * January 2001
15+ * January-April 2001
16 *
17 *
18 * LVM driver is free software; you can redistribute it and/or modify
19@@ -43,7 +43,8 @@
20 * support for free (eg. longer) logical volume names
21 * 12/05/1998 - added spin_locks (thanks to Pascal van Dam
22 * <pascal@ramoth.xs4all.nl>)
23- * 25/05/1998 - fixed handling of locked PEs in lvm_map() and lvm_chr_ioctl()
24+ * 25/05/1998 - fixed handling of locked PEs in lvm_map() and
25+ * lvm_chr_ioctl()
26 * 26/05/1998 - reactivated verify_area by access_ok
27 * 07/06/1998 - used vmalloc/vfree instead of kmalloc/kfree to go
28 * beyond 128/256 KB max allocation limit per call
29@@ -125,7 +126,8 @@
30 * 14/02/2000 - support for 2.3.43
31 * - integrated Andrea Arcagneli's snapshot code
32 * 25/06/2000 - james (chip) , IKKHAYD! roffl
33- * 26/06/2000 - enhanced lv_extend_reduce for snapshot logical volume support
34+ * 26/06/2000 - enhanced lv_extend_reduce for snapshot logical volume
35+ * support
36 * 06/09/2000 - added devfs support
37 * 07/09/2000 - changed IOP version to 9
38 * - started to add new char ioctl LV_STATUS_BYDEV_T to support
39@@ -147,15 +149,24 @@
40 * 08/01/2001 - Removed conditional compiles related to PROC_FS,
41 * procfs is always supported now. (JT)
42 * 12/01/2001 - avoided flushing logical volume in case of shrinking
43- * because of unnecessary overhead in case of heavy updates
44+ * because of unecessary overhead in case of heavy updates
45 * 25/01/2001 - Allow RO open of an inactive LV so it can be reactivated.
46- * 31/01/2001 - If you try and BMAP a snapshot you now get an -EPERM
47- * 01/02/2001 - factored __remap_snapshot out of lvm_map
48+ * 31/01/2001 - removed blk_init_queue/blk_cleanup_queue queueing will be
49+ * handled by the proper devices.
50+ * - If you try and BMAP a snapshot you now get an -EPERM
51+ * 01/01/2001 - lvm_map() now calls buffer_IO_error on error for 2.4
52+ * - factored __remap_snapshot out of lvm_map
53 * 12/02/2001 - move devfs code to create VG before LVs
54- * 14/02/2001 - tidied device defines for blk.h
55+ * 13/02/2001 - allow VG_CREATE on /dev/lvm
56+ * 14/02/2001 - removed modversions.h
57+ * - tidied device defines for blk.h
58 * - tidied debug statements
59+ * - bug: vg[] member not set back to NULL if activation fails
60 * - more lvm_map tidying
61- * 14/02/2001 - bug: vg[] member not set back to NULL if activation fails
62+ * 15/02/2001 - register /dev/lvm with devfs correctly (major/minor
63+ * were swapped)
64+ * 19/02/2001 - preallocated buffer_heads for rawio when using
65+ * snapshots [JT]
66 * 28/02/2001 - introduced the P_DEV macro and changed some internel
67 * functions to be static [AD]
68 * 28/02/2001 - factored lvm_get_snapshot_use_rate out of blk_ioctl [AD]
69@@ -163,40 +174,67 @@
70 * where the check for an existing LV takes place right at
71 * the beginning
72 * 01/03/2001 - Add VG_CREATE_OLD for IOP 10 compatibility
73- * 02/03/2001 - Don't destroy usermode pointers in lv_t structures duing LV_
74- * STATUS_BYxxx and remove redundant lv_t variables from same.
75+ * 02/03/2001 - Don't destroy usermode pointers in lv_t structures duing
76+ * LV_STATUS_BYxxx
77+ * and remove redundant lv_t variables from same.
78+ * - avoid compilation of lvm_dummy_device_request in case of
79+ * Linux >= 2.3.0 to avoid a warning
80+ * - added lvm_name argument to printk in buffer allocation
81+ * in order to avoid a warning
82+ * 04/03/2001 - moved linux/version.h above first use of KERNEL_VERSION
83+ * macros
84 * 05/03/2001 - restore copying pe_t array in lvm_do_lv_status_byname. For
85 * lvdisplay -v (PC)
86 * - restore copying pe_t array in lvm_do_lv_status_byindex (HM)
87 * - added copying pe_t array in lvm_do_lv_status_bydev (HM)
88 * - enhanced lvm_do_lv_status_by{name,index,dev} to be capable
89 * to copy the lv_block_exception_t array to userspace (HM)
90- * 08/03/2001 - factored lvm_do_pv_flush out of lvm_chr_ioctl [HM]
91+ * 08/03/2001 - initialize new lv_ptr->lv_COW_table_iobuf for snapshots;
92+ * removed obsolete lv_ptr->lv_COW_table_page initialization
93+ * - factored lvm_do_pv_flush out of lvm_chr_ioctl (HM)
94 * 09/03/2001 - Added _lock_open_count to ensure we only drop the lock
95 * when the locking process closes.
96- * 05/04/2001 - lvm_map bugs: don't use b_blocknr/b_dev in lvm_map, it
97- * destroys stacking devices. call b_end_io on failed maps.
98- * (Jens Axboe)
99- * - Defer writes to an extent that is being moved [JT + AD]
100- * 28/05/2001 - implemented missing BLKSSZGET ioctl [AD]
101+ * 05/04/2001 - Defer writes to an extent that is being moved [JT]
102+ * 05/04/2001 - use b_rdev and b_rsector rather than b_dev and b_blocknr in
103+ * lvm_map() in order to make stacking devices more happy (HM)
104+ * 11/04/2001 - cleaned up the pvmove queue code. I no longer retain the
105+ * rw flag, instead WRITEA's are just dropped [JT]
106+ * 30/04/2001 - added KERNEL_VERSION > 2.4.3 get_hardsect_size() rather
107+ * than get_hardblocksize() call
108+ * 03/05/2001 - Use copy_to/from_user to preserve pointers in
109+ * lvm_do_status_by*
110+ * 11/05/2001 - avoid accesses to inactive snapshot data in
111+ * __update_hardsectsize() and lvm_do_lv_extend_reduce() (JW)
112+ * 28/05/2001 - implemented missing BLKSSZGET ioctl
113+ * 05/06/2001 - Move _pe_lock out of fast path for lvm_map when no PEs
114+ * locked. Make buffer queue flush not need locking.
115+ * Fix lvm_user_bmap() to set b_rsector for new lvm_map(). [AED]
116+ * 30/06/2001 - Speed up __update_hardsectsize() by checking if PVs have
117+ * the same hardsectsize (very likely) before scanning all LEs
118+ * in the LV each time. [AED]
119 *
120 */
121
122+#include <linux/version.h>
123
124 #define MAJOR_NR LVM_BLK_MAJOR
125 #define DEVICE_OFF(device)
126+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 0)
127+#define DEVICE_REQUEST lvm_dummy_device_request
128+#endif
129 #define LOCAL_END_REQUEST
130
131 /* lvm_do_lv_create calls fsync_dev_lockfs()/unlockfs() */
132 /* #define LVM_VFS_ENHANCEMENT */
133
134 #include <linux/config.h>
135-
136 #include <linux/module.h>
137-
138 #include <linux/kernel.h>
139 #include <linux/vmalloc.h>
140+
141+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 39)
142 #include <linux/slab.h>
143+#endif
144 #include <linux/init.h>
145
146 #include <linux/hdreg.h>
147@@ -206,6 +244,11 @@
148 #include <linux/blkdev.h>
149 #include <linux/genhd.h>
150 #include <linux/locks.h>
151+
152+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
153+#include <linux/iobuf.h>
154+#endif
155+
156 #include <linux/devfs_fs_kernel.h>
157 #include <linux/smp_lock.h>
158 #include <asm/ioctl.h>
159@@ -217,7 +260,9 @@
160 #endif
161
162 #include <linux/blk.h>
163+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
164 #include <linux/blkpg.h>
165+#endif
166
167 #include <linux/errno.h>
168 #include <linux/lvm.h>
169@@ -236,7 +281,11 @@
170 /*
171 * External function prototypes
172 */
173+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 42)
174 static int lvm_make_request_fn(request_queue_t*, int, struct buffer_head*);
175+#else
176+static int lvm_make_request_fn(struct buffer_head *bh, int rw);
177+#endif
178
179 static int lvm_blk_ioctl(struct inode *, struct file *, uint, ulong);
180 static int lvm_blk_open(struct inode *, struct file *);
181@@ -256,7 +305,11 @@
182 /*
183 * Internal function prototypes
184 */
185+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
186+void lvm_cleanup(void);
187+#else
188 static void lvm_cleanup(void);
189+#endif
190 static void lvm_init_vars(void);
191
192 #ifdef LVM_HD_NAME
193@@ -289,7 +342,16 @@
194 static int lvm_do_vg_remove(int);
195 static void lvm_geninit(struct gendisk *);
196 static void __update_hardsectsize(lv_t *lv);
197+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 4, 4)
198+#define lvm_sectsize(dev) get_hardblocksize(dev)
199+#else
200+#define lvm_sectsize(dev) get_hardsect_size(dev)
201+#endif
202
203+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
204+static struct buffer_head **__allocate_bheads(void);
205+static void __free_bheads(struct buffer_head **bheads);
206+#endif
207
208 static void _queue_io(struct buffer_head *bh, int rw);
209 static struct buffer_head *_dequeue_io(void);
210@@ -316,6 +378,11 @@
211 /* volume group descriptor area pointers */
212 vg_t *vg[ABS_MAX_VG];
213
214+static pv_t *pvp = NULL;
215+static lv_t *lvp = NULL;
216+static pe_t *pep = NULL;
217+
218+
219 /* map from block minor number to VG and LV numbers */
220 typedef struct {
221 int vg_number;
222@@ -341,7 +408,12 @@
223 static int _lock_open_count = 0;
224 static uint vg_count = 0;
225 static long lvm_chr_open_count = 0;
226+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
227 static DECLARE_WAIT_QUEUE_HEAD(lvm_wait);
228+#else
229+struct wait_queue *lvm_snapshot_wait = NULL;
230+struct wait_queue *lvm_wait = NULL;
231+#endif
232
233 static spinlock_t lvm_lock = SPIN_LOCK_UNLOCKED;
234 static spinlock_t lvm_snapshot_lock = SPIN_LOCK_UNLOCKED;
235@@ -356,14 +428,25 @@
236 ioctl: lvm_chr_ioctl,
237 };
238
239+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 38)
240+static struct file_operations lvm_blk_fops =
241+{
242+ open: lvm_blk_open,
243+ read: block_read,
244+ write: block_write,
245+ release: lvm_blk_close,
246+ ioctl: lvm_blk_ioctl,
247+ fsync: block_fsync,
248+};
249+#else
250 /* block device operations structure needed for 2.3.38? and above */
251 struct block_device_operations lvm_blk_dops =
252 {
253- owner: THIS_MODULE,
254- open: lvm_blk_open,
255+ open: lvm_blk_open,
256 release: lvm_blk_close,
257 ioctl: lvm_blk_ioctl,
258 };
259+#endif
260
261
262 /* gendisk structures */
263@@ -374,13 +457,22 @@
264
265 static struct gendisk lvm_gendisk =
266 {
267- major: MAJOR_NR,
268- major_name: LVM_NAME,
269- minor_shift: 0,
270- max_p: 1,
271- part: lvm_hd_struct,
272- sizes: lvm_size,
273- nr_real: MAX_LV,
274+ MAJOR_NR, /* major # */
275+ LVM_NAME, /* name of major */
276+ 0, /* number of times minor is shifted
277+ to get real minor */
278+ 1, /* maximum partitions per device */
279+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 40)
280+ MAX_LV, /* maximum number of real devices */
281+ lvm_geninit, /* initialization called before we
282+ do other things */
283+#endif
284+ lvm_hd_struct, /* partition table */
285+ lvm_size, /* device size in blocks, copied
286+ to block_size[] */
287+ MAX_LV, /* number or real devices */
288+ NULL, /* internal */
289+ NULL, /* pointer to next gendisk struct (internal) */
290 };
291
292 /*
293@@ -388,14 +480,19 @@
294 */
295 int lvm_init(void)
296 {
297+ struct gendisk *gendisk_ptr = NULL;
298+
299 if (devfs_register_chrdev(LVM_CHAR_MAJOR,
300 lvm_name, &lvm_chr_fops) < 0) {
301 printk(KERN_ERR "%s -- devfs_register_chrdev failed\n",
302 lvm_name);
303 return -EIO;
304 }
305-
306+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 37)
307 if (devfs_register_blkdev(MAJOR_NR, lvm_name, &lvm_blk_dops) < 0)
308+#else
309+ if (register_blkdev(MAJOR_NR, lvm_name, &lvm_blk_fops) < 0)
310+#endif
311 {
312 printk("%s -- devfs_register_blkdev failed\n", lvm_name);
313 if (devfs_unregister_chrdev(LVM_CHAR_MAJOR, lvm_name) < 0)
314@@ -409,15 +506,35 @@
315 lvm_init_vars();
316 lvm_geninit(&lvm_gendisk);
317
318- add_gendisk(&lvm_gendisk);
319+ /* insert our gendisk at the corresponding major */
320+ if (gendisk_head != NULL) {
321+ gendisk_ptr = gendisk_head;
322+ while (gendisk_ptr->next != NULL &&
323+ gendisk_ptr->major > lvm_gendisk.major) {
324+ gendisk_ptr = gendisk_ptr->next;
325+ }
326+ lvm_gendisk.next = gendisk_ptr->next;
327+ gendisk_ptr->next = &lvm_gendisk;
328+ } else {
329+ gendisk_head = &lvm_gendisk;
330+ lvm_gendisk.next = NULL;
331+ }
332
333 #ifdef LVM_HD_NAME
334 /* reference from drivers/block/genhd.c */
335 lvm_hd_name_ptr = lvm_hd_name;
336 #endif
337
338+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 42)
339 blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), lvm_make_request_fn);
340+#else
341+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
342+ blk_dev[MAJOR_NR].current_request = NULL;
343+#endif
344
345+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 0)
346+ blk_dev[MAJOR_NR].make_req_fn = lvm_make_request_fn;
347+#endif
348
349 /* initialise the pe lock */
350 pe_lock_req.lock = UNLOCK_PE;
351@@ -436,12 +553,18 @@
352 return 0;
353 } /* lvm_init() */
354
355-
356 /*
357 * cleanup...
358 */
359+
360+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 30)
361+void lvm_cleanup(void)
362+#else
363 static void lvm_cleanup(void)
364+#endif
365 {
366+ struct gendisk *gendisk_ptr = NULL, *gendisk_ptr_prev = NULL;
367+
368 if (devfs_unregister_chrdev(LVM_CHAR_MAJOR, lvm_name) < 0)
369 printk(KERN_ERR "%s -- devfs_unregister_chrdev failed\n",
370 lvm_name);
371@@ -449,7 +572,25 @@
372 printk(KERN_ERR "%s -- devfs_unregister_blkdev failed\n",
373 lvm_name);
374
375- del_gendisk(&lvm_gendisk);
376+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 30)
377+ blk_dev[MAJOR_NR].request_fn = NULL;
378+ blk_dev[MAJOR_NR].current_request = NULL;
379+#endif
380+
381+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 0)
382+ blk_dev[MAJOR_NR].make_req_fn = NULL;
383+#endif
384+
385+ gendisk_ptr = gendisk_ptr_prev = gendisk_head;
386+ while (gendisk_ptr != NULL) {
387+ if (gendisk_ptr == &lvm_gendisk)
388+ break;
389+ gendisk_ptr_prev = gendisk_ptr;
390+ gendisk_ptr = gendisk_ptr->next;
391+ }
392+ /* delete our gendisk from chain */
393+ if (gendisk_ptr == &lvm_gendisk)
394+ gendisk_ptr_prev->next = gendisk_ptr->next;
395
396 blk_size[MAJOR_NR] = NULL;
397 blksize_size[MAJOR_NR] = NULL;
398@@ -514,7 +655,7 @@
399 */
400 static int lvm_chr_open(struct inode *inode, struct file *file)
401 {
402- unsigned int minor = MINOR(inode->i_rdev);
403+ int minor = MINOR(inode->i_rdev);
404
405 P_DEV("chr_open MINOR: %d VG#: %d mode: %s%s lock: %d\n",
406 minor, VG_CHR(minor), MODE_TO_STR(file->f_mode), lock);
407@@ -525,10 +666,10 @@
408 /* Group special file open */
409 if (VG_CHR(minor) > MAX_VG) return -ENXIO;
410
411- spin_lock(&lvm_lock);
412- if(lock == current->pid)
413- _lock_open_count++;
414- spin_unlock(&lvm_lock);
415+ spin_lock(&lvm_lock);
416+ if(lock == current->pid)
417+ _lock_open_count++;
418+ spin_unlock(&lvm_lock);
419
420 lvm_chr_open_count++;
421
422@@ -610,8 +751,8 @@
423 /* create a VGDA */
424 return lvm_do_vg_create(arg, minor);
425
426- case VG_CREATE:
427- /* create a VGDA, assume VG number is filled in */
428+ case VG_CREATE:
429+ /* create a VGDA, assume VG number is filled in */
430 return lvm_do_vg_create(arg, -1);
431
432 case VG_EXTEND:
433@@ -734,7 +875,7 @@
434
435 case PV_FLUSH:
436 /* physical volume buffer flush/invalidate */
437- return lvm_do_pv_flush(arg);
438+ return lvm_do_pv_flush(arg);
439
440
441 default:
442@@ -765,16 +906,16 @@
443
444 if (lvm_chr_open_count > 0) lvm_chr_open_count--;
445
446- spin_lock(&lvm_lock);
447- if(lock == current->pid) {
448- if(!_lock_open_count) {
449+ spin_lock(&lvm_lock);
450+ if(lock == current->pid) {
451+ if(!_lock_open_count) {
452 P_DEV("chr_close: unlocking LVM for pid %d\n", lock);
453- lock = 0;
454- wake_up_interruptible(&lvm_wait);
455- } else
456- _lock_open_count--;
457+ lock = 0;
458+ wake_up_interruptible(&lvm_wait);
459+ } else
460+ _lock_open_count--;
461 }
462- spin_unlock(&lvm_lock);
463+ spin_unlock(&lvm_lock);
464
465 MOD_DEC_USE_COUNT;
466
467@@ -812,7 +953,7 @@
468 LV_BLK(minor) >= 0 &&
469 LV_BLK(minor) < vg_ptr->lv_max) {
470
471- /* Check parallel LV spindown (LV remove) */
472+ /* Check parallel LV spindown (LV remove) */
473 if (lv_ptr->lv_status & LV_SPINDOWN) return -EPERM;
474
475 /* Check inactive LV and open for read/write */
476@@ -826,6 +967,9 @@
477 (file->f_mode & FMODE_WRITE))
478 return -EACCES;
479
480+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 38)
481+ file->f_op = &lvm_blk_fops;
482+#endif
483
484 /* be sure to increment VG counter */
485 if (lv_ptr->lv_open == 0) vg_ptr->lv_open++;
486@@ -860,17 +1004,12 @@
487 switch (command) {
488 case BLKSSZGET:
489 /* get block device sector size as needed e.g. by fdisk */
490- return put_user(get_hardsect_size(inode->i_rdev), (int *) arg);
491+ return put_user(lvm_sectsize(inode->i_rdev), (int *) arg);
492
493 case BLKGETSIZE:
494 /* return device size */
495 P_IOCTL("BLKGETSIZE: %u\n", lv_ptr->lv_size);
496- if (put_user(lv_ptr->lv_size, (unsigned long *)arg))
497- return -EFAULT;
498- break;
499-
500- case BLKGETSIZE64:
501- if (put_user((u64)lv_ptr->lv_size << 9, (u64 *)arg))
502+ if (put_user(lv_ptr->lv_size, (long *)arg))
503 return -EFAULT;
504 break;
505
506@@ -960,7 +1099,9 @@
507 if(lv_ptr->lv_access & LV_SNAPSHOT)
508 return -EPERM;
509
510+ /* turn logical block into (dev_t, block). non privileged. */
511 return lvm_user_bmap(inode, (struct lv_bmap *) arg);
512+ break;
513
514 case LV_SET_ALLOCATION:
515 /* set allocation flags of a logical volume */
516@@ -994,6 +1135,7 @@
517 P_DEV("blk_close MINOR: %d VG#: %d LV#: %d\n",
518 minor, VG_BLK(minor), LV_BLK(minor));
519
520+ sync_dev(inode->i_rdev);
521 if (lv_ptr->lv_open == 1) vg_ptr->lv_open--;
522 lv_ptr->lv_open--;
523
524@@ -1048,15 +1190,14 @@
525 bh.b_blocknr = block;
526 bh.b_dev = bh.b_rdev = inode->i_rdev;
527 bh.b_size = lvm_get_blksize(bh.b_dev);
528- bh.b_rsector = block * (bh.b_size >> 9);
529+ bh.b_rsector = block * (bh.b_size >> 9);
530 if ((err=lvm_map(&bh, READ)) < 0) {
531 printk("lvm map failed: %d\n", err);
532 return -EINVAL;
533 }
534
535- return put_user(kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) ||
536- put_user(bh.b_rsector/(bh.b_size>>9), &user_result->lv_block) ?
537- -EFAULT : 0;
538+ return (put_user(kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) ||
539+ put_user(bh.b_rsector/(bh.b_size>>9), &user_result->lv_block));
540 }
541
542
543@@ -1065,7 +1206,7 @@
544 * (see init_module/lvm_init)
545 */
546 static void __remap_snapshot(kdev_t rdev, ulong rsector,
547- ulong pe_start, lv_t *lv, vg_t *vg) {
548+ ulong pe_start, lv_t *lv, vg_t *vg) {
549
550 /* copy a chunk from the origin to a snapshot device */
551 down_write(&lv->lv_lock);
552@@ -1122,6 +1263,7 @@
553 return 0;
554 }
555
556+
557 static int lvm_map(struct buffer_head *bh, int rw)
558 {
559 int minor = MINOR(bh->b_rdev);
560@@ -1245,7 +1387,7 @@
561 _remap_snapshot(rdev_map, rsector_map,
562 pe_start, snap, vg_this);
563 }
564- }
565+ }
566
567 out:
568 bh->b_rdev = rdev_map;
569@@ -1254,7 +1396,9 @@
570 return 1;
571
572 bad:
573+#if LINUX_VERSION_CODE >= KERNEL_VERSION ( 2, 4, 0)
574 buffer_IO_error(bh);
575+#endif
576 up_read(&lv->lv_lock);
577 return -1;
578 } /* lvm_map() */
579@@ -1284,14 +1428,34 @@
580 #endif
581
582
583+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 0)
584+/*
585+ * this one never should be called...
586+ */
587+static void lvm_dummy_device_request(void)
588+{
589+ printk(KERN_EMERG "%s -- oops, got lvm request for %s [sector: %lu]\n",
590+ lvm_name, kdevname(CURRENT->rq_dev), CURRENT->sector);
591+ return;
592+}
593+#endif
594+
595+
596 /*
597 * make request function
598 */
599+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 48)
600 static int lvm_make_request_fn(request_queue_t *q,
601 int rw,
602 struct buffer_head *bh) {
603 return (lvm_map(bh, rw) <= 0) ? 0 : 1;
604 }
605+#else
606+static int lvm_make_request_fn(struct buffer_head *bh, int rw) {
607+ int r = lvm_map(bh, rw);
608+ return (r <= 0) ? r : 1;
609+}
610+#endif
611
612
613 /********************************************************************
614@@ -1457,14 +1621,14 @@
615 return -EFAULT;
616 }
617
618- /* VG_CREATE now uses minor number in VG structure */
619- if (minor == -1) minor = vg_ptr->vg_number;
620+ /* VG_CREATE now uses minor number in VG structure */
621+ if (minor == -1) minor = vg_ptr->vg_number;
622
623 /* Validate it */
624- if (vg[VG_CHR(minor)] != NULL) {
625+ if (vg[VG_CHR(minor)] != NULL) {
626 P_IOCTL("lvm_do_vg_create ERROR: VG %d in use\n", minor);
627 kfree(vg_ptr);
628- return -EPERM;
629+ return -EPERM;
630 }
631
632 /* we are not that active so far... */
633@@ -1495,7 +1659,6 @@
634 /* get the physical volume structures */
635 vg_ptr->pv_act = vg_ptr->pv_cur = 0;
636 for (p = 0; p < vg_ptr->pv_max; p++) {
637- pv_t *pvp;
638 /* user space address */
639 if ((pvp = vg_ptr->pv[p]) != NULL) {
640 ret = lvm_do_pv_create(pvp, vg_ptr, p);
641@@ -1519,7 +1682,6 @@
642 /* get the logical volume structures */
643 vg_ptr->lv_cur = 0;
644 for (l = 0; l < vg_ptr->lv_max; l++) {
645- lv_t *lvp;
646 /* user space address */
647 if ((lvp = vg_ptr->lv[l]) != NULL) {
648 if (copy_from_user(&lv, lvp, sizeof(lv_t)) != 0) {
649@@ -1546,7 +1708,7 @@
650 /* Second path to correct snapshot logical volumes which are not
651 in place during first path above */
652 for (l = 0; l < ls; l++) {
653- lv_t *lvp = snap_lv_ptr[l];
654+ lvp = snap_lv_ptr[l];
655 if (copy_from_user(&lv, lvp, sizeof(lv_t)) != 0) {
656 lvm_do_vg_remove(minor);
657 return -EFAULT;
658@@ -1637,7 +1799,8 @@
659 lv_t *lv_ptr = NULL;
660 pv_t *pv_ptr = NULL;
661
662- if (vg_ptr == NULL) return -ENXIO;
663+ /* If the VG doesn't exist in the kernel then just exit */
664+ if (!vg_ptr) return 0;
665
666 if (copy_from_user(vg_name, arg, sizeof(vg_name)) != 0)
667 return -EFAULT;
668@@ -1797,30 +1960,56 @@
669 }
670
671
672-static void __update_hardsectsize(lv_t *lv) {
673- int le, e;
674- int max_hardsectsize = 0, hardsectsize;
675-
676- for (le = 0; le < lv->lv_allocated_le; le++) {
677- hardsectsize = get_hardsect_size(lv->lv_current_pe[le].dev);
678- if (hardsectsize == 0)
679- hardsectsize = 512;
680- if (hardsectsize > max_hardsectsize)
681- max_hardsectsize = hardsectsize;
682- }
683-
684- /* only perform this operation on active snapshots */
685- if ((lv->lv_access & LV_SNAPSHOT) &&
686- (lv->lv_status & LV_ACTIVE)) {
687- for (e = 0; e < lv->lv_remap_end; e++) {
688- hardsectsize = get_hardsect_size( lv->lv_block_exception[e].rdev_new);
689- if (hardsectsize == 0)
690- hardsectsize = 512;
691- if (hardsectsize > max_hardsectsize)
692+static void __update_hardsectsize(lv_t *lv)
693+{
694+ int max_hardsectsize = 0, hardsectsize = 0;
695+ int p;
696+
697+ /* Check PVs first to see if they all have same sector size */
698+ for (p = 0; p < lv->vg->pv_cur; p++) {
699+ pv_t *pv = lv->vg->pv[p];
700+ if (pv && (hardsectsize = lvm_sectsize(pv->pv_dev))) {
701+ if (max_hardsectsize == 0)
702+ max_hardsectsize = hardsectsize;
703+ else if (hardsectsize != max_hardsectsize) {
704+ P_DEV("%s PV[%d] (%s) sector size %d, not %d\n",
705+ lv->lv_name, p, kdevname(pv->pv_dev),
706+ hardsectsize, max_hardsectsize);
707+ break;
708+ }
709+ }
710+ }
711+
712+ /* PVs have different block size, need to check each LE sector size */
713+ if (hardsectsize != max_hardsectsize) {
714+ int le;
715+ for (le = 0; le < lv->lv_allocated_le; le++) {
716+ hardsectsize = lvm_sectsize(lv->lv_current_pe[le].dev);
717+ if (hardsectsize > max_hardsectsize) {
718+ P_DEV("%s LE[%d] (%s) blocksize %d not %d\n",
719+ lv->lv_name, le,
720+ kdevname(lv->lv_current_pe[le].dev),
721+ hardsectsize, max_hardsectsize);
722 max_hardsectsize = hardsectsize;
723+ }
724+ }
725+
726+ /* only perform this operation on active snapshots */
727+ if ((lv->lv_access & LV_SNAPSHOT) &&
728+ (lv->lv_status & LV_ACTIVE)) {
729+ int e;
730+ for (e = 0; e < lv->lv_remap_end; e++) {
731+ hardsectsize = lvm_sectsize(lv->lv_block_exception[e].rdev_new);
732+ if (hardsectsize > max_hardsectsize)
733+ max_hardsectsize = hardsectsize;
734+ }
735 }
736 }
737
738+ if (max_hardsectsize == 0)
739+ max_hardsectsize = SECTOR_SIZE;
740+ P_DEV("hardblocksize for LV %s is %d\n",
741+ kdevname(lv->lv_dev), max_hardsectsize);
742 lvm_hardsectsizes[MINOR(lv->lv_dev)] = max_hardsectsize;
743 }
744
745@@ -1834,7 +2023,6 @@
746 lv_block_exception_t *lvbe = lv->lv_block_exception;
747 vg_t *vg_ptr = vg[VG_CHR(minor)];
748 lv_t *lv_ptr = NULL;
749- pe_t *pep;
750
751 if (!(pep = lv->lv_current_pe))
752 return -EINVAL;
753@@ -1876,7 +2064,7 @@
754 lv_ptr->lv_snapshot_next = NULL;
755 lv_ptr->lv_block_exception = NULL;
756 lv_ptr->lv_iobuf = NULL;
757- lv_ptr->lv_COW_table_iobuf = NULL;
758+ lv_ptr->lv_COW_table_iobuf = NULL;
759 lv_ptr->lv_snapshot_hash_table = NULL;
760 lv_ptr->lv_snapshot_hash_table_size = 0;
761 lv_ptr->lv_snapshot_hash_mask = 0;
762@@ -1884,6 +2072,9 @@
763
764 lv_ptr->lv_snapshot_use_rate = 0;
765
766+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
767+ lv_ptr->bheads = 0;
768+#endif
769 vg_ptr->lv[l] = lv_ptr;
770
771 /* get the PE structures from user space if this
772@@ -1956,12 +2147,11 @@
773 LVM_SNAPSHOT_DROPPED_SECTOR)
774 {
775 printk(KERN_WARNING
776- "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n",
777+ "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n",
778 lvm_name);
779 activate = 0;
780 }
781
782-
783 /* point to the original logical volume */
784 lv_ptr = lv_ptr->lv_snapshot_org;
785
786@@ -1995,12 +2185,16 @@
787 lv_ptr->lv_block_exception[e].rsector_org, lv_ptr);
788 /* need to fill the COW exception table data
789 into the page for disk i/o */
790- if(lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr)) {
791- kfree(lv_ptr);
792- vg_ptr->lv[l] = NULL;
793- return -EINVAL;
794- }
795+ if(lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr)) {
796+ kfree(lv_ptr);
797+ vg_ptr->lv[l] = NULL;
798+ return -EINVAL;
799+ }
800+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
801 init_waitqueue_head(&lv_ptr->lv_snapshot_wait);
802+#else
803+ lv_ptr->lv_snapshot_wait = NULL;
804+#endif
805 } else {
806 kfree(lv_ptr);
807 vg_ptr->lv[l] = NULL;
808@@ -2022,6 +2216,7 @@
809 LVM_CORRECT_READ_AHEAD(lv_ptr->lv_read_ahead);
810 vg_ptr->lv_cur++;
811 lv_ptr->lv_status = lv_status_save;
812+ lv_ptr->vg = vg_ptr;
813
814 __update_hardsectsize(lv_ptr);
815
816@@ -2040,6 +2235,17 @@
817 org->lv_access |= LV_SNAPSHOT_ORG;
818 lv_ptr->lv_access &= ~LV_SNAPSHOT_ORG; /* this can only hide an userspace bug */
819
820+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
821+ /* allocate a set of buffer_heads for snapshot io */
822+ if(!org->bheads && !(org->bheads = __allocate_bheads())) {
823+ printk(KERN_CRIT "%s -- LV_CREATE: "
824+ "couldn't allocate buffer heads\n", lvm_name);
825+ /* FIXME: tidy this function and free the lv */
826+ up_write(&org->lv_lock);
827+ return -ENOMEM;
828+ }
829+#endif
830+
831 /* Link in the list of snapshot volumes */
832 for (last = org; last->lv_snapshot_next; last = last->lv_snapshot_next);
833 lv_ptr->lv_snapshot_prev = last;
834@@ -2064,11 +2270,7 @@
835 unlockfs(lv_ptr->lv_snapshot_org->lv_dev);
836 #endif
837
838- lv_ptr->vg = vg_ptr;
839-
840- lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de =
841- lvm_fs_create_lv(vg_ptr, lv_ptr);
842-
843+ lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de = lvm_fs_create_lv(vg_ptr, lv_ptr);
844 return 0;
845 } /* lvm_do_lv_create() */
846
847@@ -2126,6 +2328,13 @@
848 /* no more snapshots? */
849 if (!org->lv_snapshot_next) {
850 org->lv_access &= ~LV_SNAPSHOT_ORG;
851+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
852+ /* get rid of the buffer heads */
853+ if(org->bheads) {
854+ __free_bheads(org->bheads);
855+ org->bheads = 0;
856+ }
857+#endif
858 }
859 up_write(&org->lv_lock);
860
861@@ -2184,214 +2393,213 @@
862 * logical volume extend / reduce
863 */
864 static int __extend_reduce_snapshot(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) {
865- ulong size;
866- lv_block_exception_t *lvbe;
867+ ulong size;
868+ lv_block_exception_t *lvbe;
869
870- if (!new_lv->lv_block_exception)
871- return -ENXIO;
872+ if (!new_lv->lv_block_exception)
873+ return -ENXIO;
874
875- size = new_lv->lv_remap_end * sizeof(lv_block_exception_t);
876- if ((lvbe = vmalloc(size)) == NULL) {
877- printk(KERN_CRIT
878- "%s -- lvm_do_lv_extend_reduce: vmalloc "
879- "error LV_BLOCK_EXCEPTION of %lu Byte at line %d\n",
880- lvm_name, size, __LINE__);
881- return -ENOMEM;
882- }
883-
884- if ((new_lv->lv_remap_end > old_lv->lv_remap_end) &&
885- (copy_from_user(lvbe, new_lv->lv_block_exception, size))) {
886- vfree(lvbe);
887- return -EFAULT;
888- }
889- new_lv->lv_block_exception = lvbe;
890-
891- if (lvm_snapshot_alloc_hash_table(new_lv)) {
892- vfree(new_lv->lv_block_exception);
893- return -ENOMEM;
894- }
895+ size = new_lv->lv_remap_end * sizeof(lv_block_exception_t);
896+ if ((lvbe = vmalloc(size)) == NULL) {
897+ printk(KERN_CRIT
898+ "%s -- lvm_do_lv_extend_reduce: vmalloc "
899+ "error LV_BLOCK_EXCEPTION of %lu Byte at line %d\n",
900+ lvm_name, size, __LINE__);
901+ return -ENOMEM;
902+ }
903+
904+ if ((new_lv->lv_remap_end > old_lv->lv_remap_end) &&
905+ (copy_from_user(lvbe, new_lv->lv_block_exception, size))) {
906+ vfree(lvbe);
907+ return -EFAULT;
908+ }
909+ new_lv->lv_block_exception = lvbe;
910+
911+ if (lvm_snapshot_alloc_hash_table(new_lv)) {
912+ vfree(new_lv->lv_block_exception);
913+ return -ENOMEM;
914+ }
915
916- return 0;
917+ return 0;
918 }
919
920 static int __extend_reduce(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) {
921- ulong size, l, p, end;
922- pe_t *pe;
923+ ulong size, l, p, end;
924+ pe_t *pe;
925+
926+ /* allocate space for new pe structures */
927+ size = new_lv->lv_current_le * sizeof(pe_t);
928+ if ((pe = vmalloc(size)) == NULL) {
929+ printk(KERN_CRIT
930+ "%s -- lvm_do_lv_extend_reduce: "
931+ "vmalloc error LV_CURRENT_PE of %lu Byte at line %d\n",
932+ lvm_name, size, __LINE__);
933+ return -ENOMEM;
934+ }
935+
936+ /* get the PE structures from user space */
937+ if (copy_from_user(pe, new_lv->lv_current_pe, size)) {
938+ if(old_lv->lv_access & LV_SNAPSHOT)
939+ vfree(new_lv->lv_snapshot_hash_table);
940+ vfree(pe);
941+ return -EFAULT;
942+ }
943+
944+ new_lv->lv_current_pe = pe;
945+
946+ /* reduce allocation counters on PV(s) */
947+ for (l = 0; l < old_lv->lv_allocated_le; l++) {
948+ vg_ptr->pe_allocated--;
949+ for (p = 0; p < vg_ptr->pv_cur; p++) {
950+ if (vg_ptr->pv[p]->pv_dev ==
951+ old_lv->lv_current_pe[l].dev) {
952+ vg_ptr->pv[p]->pe_allocated--;
953+ break;
954+ }
955+ }
956+ }
957
958- /* allocate space for new pe structures */
959- size = new_lv->lv_current_le * sizeof(pe_t);
960- if ((pe = vmalloc(size)) == NULL) {
961- printk(KERN_CRIT
962- "%s -- lvm_do_lv_extend_reduce: "
963- "vmalloc error LV_CURRENT_PE of %lu Byte at line %d\n",
964- lvm_name, size, __LINE__);
965- return -ENOMEM;
966- }
967-
968- /* get the PE structures from user space */
969- if (copy_from_user(pe, new_lv->lv_current_pe, size)) {
970- if(old_lv->lv_access & LV_SNAPSHOT)
971- vfree(new_lv->lv_snapshot_hash_table);
972- vfree(pe);
973- return -EFAULT;
974- }
975-
976- new_lv->lv_current_pe = pe;
977-
978- /* reduce allocation counters on PV(s) */
979- for (l = 0; l < old_lv->lv_allocated_le; l++) {
980- vg_ptr->pe_allocated--;
981- for (p = 0; p < vg_ptr->pv_cur; p++) {
982- if (vg_ptr->pv[p]->pv_dev ==
983- old_lv->lv_current_pe[l].dev) {
984- vg_ptr->pv[p]->pe_allocated--;
985- break;
986- }
987- }
988- }
989-
990- /* extend the PE count in PVs */
991- for (l = 0; l < new_lv->lv_allocated_le; l++) {
992- vg_ptr->pe_allocated++;
993- for (p = 0; p < vg_ptr->pv_cur; p++) {
994- if (vg_ptr->pv[p]->pv_dev ==
995+ /* extend the PE count in PVs */
996+ for (l = 0; l < new_lv->lv_allocated_le; l++) {
997+ vg_ptr->pe_allocated++;
998+ for (p = 0; p < vg_ptr->pv_cur; p++) {
999+ if (vg_ptr->pv[p]->pv_dev ==
1000 new_lv->lv_current_pe[l].dev) {
1001- vg_ptr->pv[p]->pe_allocated++;
1002- break;
1003- }
1004- }
1005- }
1006-
1007- /* save availiable i/o statistic data */
1008- if (old_lv->lv_stripes < 2) { /* linear logical volume */
1009- end = min(old_lv->lv_current_le, new_lv->lv_current_le);
1010- for (l = 0; l < end; l++) {
1011- new_lv->lv_current_pe[l].reads +=
1012- old_lv->lv_current_pe[l].reads;
1013-
1014- new_lv->lv_current_pe[l].writes +=
1015- old_lv->lv_current_pe[l].writes;
1016- }
1017-
1018- } else { /* striped logical volume */
1019- uint i, j, source, dest, end, old_stripe_size, new_stripe_size;
1020-
1021- old_stripe_size = old_lv->lv_allocated_le / old_lv->lv_stripes;
1022- new_stripe_size = new_lv->lv_allocated_le / new_lv->lv_stripes;
1023- end = min(old_stripe_size, new_stripe_size);
1024-
1025- for (i = source = dest = 0;
1026- i < new_lv->lv_stripes; i++) {
1027- for (j = 0; j < end; j++) {
1028- new_lv->lv_current_pe[dest + j].reads +=
1029- old_lv->lv_current_pe[source + j].reads;
1030- new_lv->lv_current_pe[dest + j].writes +=
1031- old_lv->lv_current_pe[source + j].writes;
1032- }
1033- source += old_stripe_size;
1034- dest += new_stripe_size;
1035- }
1036- }
1037+ vg_ptr->pv[p]->pe_allocated++;
1038+ break;
1039+ }
1040+ }
1041+ }
1042
1043- return 0;
1044+ /* save availiable i/o statistic data */
1045+ if (old_lv->lv_stripes < 2) { /* linear logical volume */
1046+ end = min(old_lv->lv_current_le, new_lv->lv_current_le);
1047+ for (l = 0; l < end; l++) {
1048+ new_lv->lv_current_pe[l].reads +=
1049+ old_lv->lv_current_pe[l].reads;
1050+
1051+ new_lv->lv_current_pe[l].writes +=
1052+ old_lv->lv_current_pe[l].writes;
1053+ }
1054+
1055+ } else { /* striped logical volume */
1056+ uint i, j, source, dest, end, old_stripe_size, new_stripe_size;
1057+
1058+ old_stripe_size = old_lv->lv_allocated_le / old_lv->lv_stripes;
1059+ new_stripe_size = new_lv->lv_allocated_le / new_lv->lv_stripes;
1060+ end = min(old_stripe_size, new_stripe_size);
1061+
1062+ for (i = source = dest = 0; i < new_lv->lv_stripes; i++) {
1063+ for (j = 0; j < end; j++) {
1064+ new_lv->lv_current_pe[dest + j].reads +=
1065+ old_lv->lv_current_pe[source + j].reads;
1066+ new_lv->lv_current_pe[dest + j].writes +=
1067+ old_lv->lv_current_pe[source + j].writes;
1068+ }
1069+ source += old_stripe_size;
1070+ dest += new_stripe_size;
1071+ }
1072+ }
1073+
1074+ return 0;
1075 }
1076
1077 static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t *new_lv)
1078 {
1079- int r;
1080- ulong l, e, size;
1081- vg_t *vg_ptr = vg[VG_CHR(minor)];
1082- lv_t *old_lv;
1083- pe_t *pe;
1084-
1085- if ((pe = new_lv->lv_current_pe) == NULL)
1086- return -EINVAL;
1087-
1088- for (l = 0; l < vg_ptr->lv_max; l++)
1089- if (vg_ptr->lv[l] && !strcmp(vg_ptr->lv[l]->lv_name, lv_name))
1090- break;
1091+ int r;
1092+ ulong l, e, size;
1093+ vg_t *vg_ptr = vg[VG_CHR(minor)];
1094+ lv_t *old_lv;
1095+ pe_t *pe;
1096
1097- if (l == vg_ptr->lv_max)
1098- return -ENXIO;
1099+ if ((pe = new_lv->lv_current_pe) == NULL)
1100+ return -EINVAL;
1101+
1102+ for (l = 0; l < vg_ptr->lv_max; l++)
1103+ if (vg_ptr->lv[l] && !strcmp(vg_ptr->lv[l]->lv_name, lv_name))
1104+ break;
1105+
1106+ if (l == vg_ptr->lv_max)
1107+ return -ENXIO;
1108
1109- old_lv = vg_ptr->lv[l];
1110+ old_lv = vg_ptr->lv[l];
1111
1112 if (old_lv->lv_access & LV_SNAPSHOT) {
1113 /* only perform this operation on active snapshots */
1114 if (old_lv->lv_status & LV_ACTIVE)
1115- r = __extend_reduce_snapshot(vg_ptr, old_lv, new_lv);
1116- else
1117+ r = __extend_reduce_snapshot(vg_ptr, old_lv, new_lv);
1118+ else
1119 r = -EPERM;
1120
1121 } else
1122- r = __extend_reduce(vg_ptr, old_lv, new_lv);
1123+ r = __extend_reduce(vg_ptr, old_lv, new_lv);
1124
1125- if(r)
1126- return r;
1127+ if(r)
1128+ return r;
1129
1130- /* copy relevent fields */
1131+ /* copy relevent fields */
1132 down_write(&old_lv->lv_lock);
1133
1134- if(new_lv->lv_access & LV_SNAPSHOT) {
1135- size = (new_lv->lv_remap_end > old_lv->lv_remap_end) ?
1136- old_lv->lv_remap_ptr : new_lv->lv_remap_end;
1137- size *= sizeof(lv_block_exception_t);
1138- memcpy(new_lv->lv_block_exception,
1139- old_lv->lv_block_exception, size);
1140-
1141- old_lv->lv_remap_end = new_lv->lv_remap_end;
1142- old_lv->lv_block_exception = new_lv->lv_block_exception;
1143- old_lv->lv_snapshot_hash_table =
1144- new_lv->lv_snapshot_hash_table;
1145- old_lv->lv_snapshot_hash_table_size =
1146- new_lv->lv_snapshot_hash_table_size;
1147- old_lv->lv_snapshot_hash_mask =
1148- new_lv->lv_snapshot_hash_mask;
1149-
1150- for (e = 0; e < new_lv->lv_remap_ptr; e++)
1151- lvm_hash_link(new_lv->lv_block_exception + e,
1152- new_lv->lv_block_exception[e].rdev_org,
1153- new_lv->lv_block_exception[e].rsector_org,
1154- new_lv);
1155-
1156- } else {
1157-
1158- vfree(old_lv->lv_current_pe);
1159- vfree(old_lv->lv_snapshot_hash_table);
1160-
1161- old_lv->lv_size = new_lv->lv_size;
1162- old_lv->lv_allocated_le = new_lv->lv_allocated_le;
1163- old_lv->lv_current_le = new_lv->lv_current_le;
1164- old_lv->lv_current_pe = new_lv->lv_current_pe;
1165- lvm_gendisk.part[MINOR(old_lv->lv_dev)].nr_sects =
1166- old_lv->lv_size;
1167- lvm_size[MINOR(old_lv->lv_dev)] = old_lv->lv_size >> 1;
1168-
1169- if (old_lv->lv_access & LV_SNAPSHOT_ORG) {
1170- lv_t *snap;
1171- for(snap = old_lv->lv_snapshot_next; snap;
1172- snap = snap->lv_snapshot_next) {
1173+ if(new_lv->lv_access & LV_SNAPSHOT) {
1174+ size = (new_lv->lv_remap_end > old_lv->lv_remap_end) ?
1175+ old_lv->lv_remap_ptr : new_lv->lv_remap_end;
1176+ size *= sizeof(lv_block_exception_t);
1177+ memcpy(new_lv->lv_block_exception,
1178+ old_lv->lv_block_exception, size);
1179+
1180+ old_lv->lv_remap_end = new_lv->lv_remap_end;
1181+ old_lv->lv_block_exception = new_lv->lv_block_exception;
1182+ old_lv->lv_snapshot_hash_table =
1183+ new_lv->lv_snapshot_hash_table;
1184+ old_lv->lv_snapshot_hash_table_size =
1185+ new_lv->lv_snapshot_hash_table_size;
1186+ old_lv->lv_snapshot_hash_mask =
1187+ new_lv->lv_snapshot_hash_mask;
1188+
1189+ for (e = 0; e < new_lv->lv_remap_ptr; e++)
1190+ lvm_hash_link(new_lv->lv_block_exception + e,
1191+ new_lv->lv_block_exception[e].rdev_org,
1192+ new_lv->lv_block_exception[e].rsector_org,
1193+ new_lv);
1194+
1195+ } else {
1196+
1197+ vfree(old_lv->lv_current_pe);
1198+ vfree(old_lv->lv_snapshot_hash_table);
1199+
1200+ old_lv->lv_size = new_lv->lv_size;
1201+ old_lv->lv_allocated_le = new_lv->lv_allocated_le;
1202+ old_lv->lv_current_le = new_lv->lv_current_le;
1203+ old_lv->lv_current_pe = new_lv->lv_current_pe;
1204+ lvm_gendisk.part[MINOR(old_lv->lv_dev)].nr_sects =
1205+ old_lv->lv_size;
1206+ lvm_size[MINOR(old_lv->lv_dev)] = old_lv->lv_size >> 1;
1207+
1208+ if (old_lv->lv_access & LV_SNAPSHOT_ORG) {
1209+ lv_t *snap;
1210+ for(snap = old_lv->lv_snapshot_next; snap;
1211+ snap = snap->lv_snapshot_next) {
1212 down_write(&snap->lv_lock);
1213- snap->lv_current_pe = old_lv->lv_current_pe;
1214- snap->lv_allocated_le =
1215- old_lv->lv_allocated_le;
1216- snap->lv_current_le = old_lv->lv_current_le;
1217- snap->lv_size = old_lv->lv_size;
1218-
1219- lvm_gendisk.part[MINOR(snap->lv_dev)].nr_sects
1220- = old_lv->lv_size;
1221- lvm_size[MINOR(snap->lv_dev)] =
1222- old_lv->lv_size >> 1;
1223- __update_hardsectsize(snap);
1224+ snap->lv_current_pe = old_lv->lv_current_pe;
1225+ snap->lv_allocated_le =
1226+ old_lv->lv_allocated_le;
1227+ snap->lv_current_le = old_lv->lv_current_le;
1228+ snap->lv_size = old_lv->lv_size;
1229+
1230+ lvm_gendisk.part[MINOR(snap->lv_dev)].nr_sects
1231+ = old_lv->lv_size;
1232+ lvm_size[MINOR(snap->lv_dev)] =
1233+ old_lv->lv_size >> 1;
1234+ __update_hardsectsize(snap);
1235 up_write(&snap->lv_lock);
1236- }
1237- }
1238- }
1239+ }
1240+ }
1241+ }
1242
1243- __update_hardsectsize(old_lv);
1244+ __update_hardsectsize(old_lv);
1245 up_write(&old_lv->lv_lock);
1246
1247- return 0;
1248+ return 0;
1249 } /* lvm_do_lv_extend_reduce() */
1250
1251
1252@@ -2426,7 +2634,6 @@
1253 lv_ptr,
1254 sizeof(lv_t)) != 0)
1255 return -EFAULT;
1256-
1257 if (saved_ptr1 != NULL) {
1258 if (copy_to_user(saved_ptr1,
1259 lv_ptr->lv_current_pe,
1260@@ -2434,6 +2641,18 @@
1261 sizeof(pe_t)) != 0)
1262 return -EFAULT;
1263 }
1264+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
1265+ if (saved_ptr2 != NULL) {
1266+ if (copy_to_user(saved_ptr2,
1267+ lv_ptr->lv_block_exception,
1268+ lv_ptr->lv_remap_ptr *
1269+ sizeof(lv_block_exception_t)
1270+ ) != 0)
1271+ return -EFAULT;
1272+ }
1273+ if (copy_to_user(&lv_status_byname_req.lv->lv_block_exception, &saved_ptr2, sizeof(void*)) != 0)
1274+ return -EFAULT;
1275+#endif
1276 /* Restore usermode pointers */
1277 if (copy_to_user(&lv_status_byname_req.lv->lv_current_pe, &saved_ptr1, sizeof(void*)) != 0)
1278 return -EFAULT;
1279@@ -2461,9 +2680,6 @@
1280
1281 if (lv_status_byindex_req.lv == NULL)
1282 return -EINVAL;
1283- if (lv_status_byindex_req.lv_index <0 ||
1284- lv_status_byindex_req.lv_index >= MAX_LV)
1285- return -EINVAL;
1286 if ( ( lv_ptr = vg_ptr->lv[lv_status_byindex_req.lv_index]) == NULL)
1287 return -ENXIO;
1288
1289@@ -2482,6 +2698,18 @@
1290 sizeof(pe_t)) != 0)
1291 return -EFAULT;
1292 }
1293+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
1294+ if (saved_ptr2 != NULL) {
1295+ if (copy_to_user(saved_ptr2,
1296+ lv_ptr->lv_block_exception,
1297+ lv_ptr->lv_remap_ptr *
1298+ sizeof(lv_block_exception_t)
1299+ ) != 0)
1300+ return -EFAULT;
1301+ }
1302+ if (copy_to_user(&lv_status_byindex_req.lv->lv_block_exception, &saved_ptr2, sizeof(void *)) != 0)
1303+ return -EFAULT;
1304+#endif
1305
1306 /* Restore usermode pointers */
1307 if (copy_to_user(&lv_status_byindex_req.lv->lv_current_pe, &saved_ptr1, sizeof(void *)) != 0)
1308@@ -2529,6 +2757,18 @@
1309 sizeof(pe_t)) != 0)
1310 return -EFAULT;
1311 }
1312+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
1313+ if (saved_ptr2 != NULL) {
1314+ if (copy_to_user(saved_ptr2,
1315+ lv_ptr->lv_block_exception,
1316+ lv_ptr->lv_remap_ptr *
1317+ sizeof(lv_block_exception_t)
1318+ ) != 0)
1319+ return -EFAULT;
1320+ }
1321+ if (copy_to_user(&lv_status_bydev_req.lv->lv_block_exception, &saved_ptr2, sizeof(void *)) != 0)
1322+ return -EFAULT;
1323+#endif
1324 /* Restore usermode pointers */
1325 if (copy_to_user(&lv_status_bydev_req.lv->lv_current_pe, &saved_ptr1, sizeof(void *)) != 0)
1326 return -EFAULT;
1327@@ -2552,9 +2792,7 @@
1328 if (lv_ptr->lv_dev == lv->lv_dev)
1329 {
1330 lvm_fs_remove_lv(vg_ptr, lv_ptr);
1331- strncpy(lv_ptr->lv_name,
1332- lv_req->lv_name,
1333- NAME_LEN);
1334+ strncpy(lv_ptr->lv_name, lv_req->lv_name, NAME_LEN);
1335 lvm_fs_create_lv(vg_ptr, lv_ptr);
1336 break;
1337 }
1338@@ -2629,23 +2867,24 @@
1339 return -ENXIO;
1340 } /* lvm_do_pv_status() */
1341
1342+
1343 /*
1344 * character device support function flush and invalidate all buffers of a PV
1345 */
1346 static int lvm_do_pv_flush(void *arg)
1347 {
1348- pv_flush_req_t pv_flush_req;
1349+ pv_flush_req_t pv_flush_req;
1350
1351- if (copy_from_user(&pv_flush_req, arg,
1352- sizeof(pv_flush_req)) != 0)
1353- return -EFAULT;
1354+ if (copy_from_user(&pv_flush_req, arg, sizeof(pv_flush_req)) != 0)
1355+ return -EFAULT;
1356
1357- fsync_dev(pv_flush_req.pv_dev);
1358- invalidate_buffers(pv_flush_req.pv_dev);
1359+ fsync_dev(pv_flush_req.pv_dev);
1360+ invalidate_buffers(pv_flush_req.pv_dev);
1361
1362- return 0;
1363+ return 0;
1364 }
1365
1366+
1367 /*
1368 * support function initialize gendisk variables
1369 */
1370@@ -2670,10 +2909,43 @@
1371 return;
1372 } /* lvm_gen_init() */
1373
1374+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
1375+static struct buffer_head **__allocate_bheads(void) {
1376+ int i;
1377+ struct buffer_head *bh = 0, **bheads = 0;
1378+
1379+ if(!(bh = vmalloc(sizeof(*bh) * KIO_MAX_SECTORS)))
1380+ return 0;
1381+
1382+ if(!(bheads = vmalloc(sizeof(*bheads) * KIO_MAX_SECTORS))) {
1383+ vfree(bh);
1384+ return 0;
1385+ }
1386+
1387+ for(i = 0; i < KIO_MAX_SECTORS; i++)
1388+ bheads[i] = bh + i;
1389+ return bheads;
1390+}
1391+
1392+static void __free_bheads(struct buffer_head **bheads) {
1393+ vfree(bheads[0]);
1394+ vfree(bheads);
1395+}
1396+#endif
1397
1398
1399 /* Must have down_write(_pe_lock) when we enqueue buffers */
1400 static void _queue_io(struct buffer_head *bh, int rw) {
1401+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
1402+ if (rw == WRITEA) {
1403+ /*
1404+ * Discard write aheads (only 2.2 bdflush uses WRITEA, and
1405+ * it will retry any buffers with a WRITE again later).
1406+ */
1407+ bh->b_end_io(bh, buffer_uptodate(bh));
1408+ return;
1409+ }
1410+#endif
1411 if (bh->b_reqnext) BUG();
1412 bh->b_reqnext = _pe_requests;
1413 _pe_requests = bh;
1414@@ -2708,10 +2980,12 @@
1415 }
1416 }
1417
1418+
1419 /*
1420 * we must open the pv's before we use them
1421 */
1422 static int _open_pv(pv_t *pv) {
1423+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 0)
1424 int err;
1425 struct block_device *bd;
1426
1427@@ -2719,22 +2993,28 @@
1428 return -ENOMEM;
1429
1430 err = blkdev_get(bd, FMODE_READ|FMODE_WRITE, 0, BDEV_FILE);
1431- if (err)
1432+ if (err) {
1433+ bdput(bd);
1434 return err;
1435+ }
1436
1437 pv->bd = bd;
1438+#endif
1439 return 0;
1440 }
1441
1442 static void _close_pv(pv_t *pv) {
1443- if (pv) {
1444- struct block_device *bdev = pv->bd;
1445- pv->bd = NULL;
1446- if (bdev)
1447- blkdev_put(bdev, BDEV_FILE);
1448- }
1449+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 0)
1450+ if(!pv || !pv->bd)
1451+ return;
1452+
1453+ blkdev_put(pv->bd, BDEV_FILE);
1454+ bdput(pv->bd);
1455+ pv->bd = 0;
1456+#endif
1457 }
1458
1459+
1460 static unsigned long _sectors_to_k(unsigned long sect)
1461 {
1462 if(SECTOR_SIZE > 1024) {
1463@@ -2744,6 +3024,27 @@
1464 return sect / (1024 / SECTOR_SIZE);
1465 }
1466
1467+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
1468 module_init(lvm_init);
1469 module_exit(lvm_cleanup);
1470-MODULE_LICENSE("GPL");
1471+#else
1472+/*
1473+ * 2.2.18 has support for module_init so why aren't I using it ?
1474+ * 1) I don't want to have to regression test against older kernels.
1475+ * 2) It doesn't work; if I use module_init(lvm_init) and build lvm
1476+ * into the kernel, lvm_init doesn't get called. If I then leave in
1477+ * the lvm_init call in ll_rw_block.c, lvm_init gets called twice !
1478+ * Probably got a link flag wrong somewhere.
1479+ */
1480+
1481+#ifdef MODULE
1482+int __init init_module(void) {
1483+ return lvm_init();
1484+}
1485+
1486+void cleanup_module(void) {
1487+ lvm_cleanup();
1488+}
1489+#endif
1490+
1491+#endif
1492--- drivers/md/lvm-internal.h.org Sun Nov 11 18:09:32 2001
1493+++ drivers/md/lvm-internal.h Thu Sep 27 08:34:43 2001
1494@@ -1,5 +1,6 @@
1495+
1496 /*
1497- * kernel/lvm-internal.h
1498+ * kernel/lvm_internal.h
1499 *
1500 * Copyright (C) 2001 Sistina Software
1501 *
1502@@ -24,7 +25,9 @@
1503 /*
1504 * Changelog
1505 *
1506- * 05/01/2001:Joe Thornber - Factored this file out of lvm.c
1507+ * 05/01/2001 - Factored this file out of lvm.c (Joe Thornber)
1508+ * 11/01/2001 - Renamed lvm_internal and added declarations
1509+ * for lvm_fs.c stuff
1510 *
1511 */
1512
1513@@ -33,7 +36,7 @@
1514
1515 #include <linux/lvm.h>
1516
1517-#define _LVM_INTERNAL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
1518+#define _LVM_INTERNAL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
1519
1520 /* global variables, defined in lvm.c */
1521 extern char *lvm_version;
1522@@ -45,8 +48,30 @@
1523 extern vg_t *vg[];
1524 extern struct file_operations lvm_chr_fops;
1525
1526+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3, 38)
1527+extern struct file_operations lvm_blk_fops;
1528+#else
1529 extern struct block_device_operations lvm_blk_dops;
1530+#endif
1531+
1532+/* 2.4.8 had no global min/max macros, and 2.4.9's were flawed */
1533+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
1534+
1535+#undef min
1536+#define min(x,y) ({ \
1537+ const typeof(x) _x = (x); \
1538+ const typeof(y) _y = (y); \
1539+ (void) (&_x == &_y); \
1540+ _x < _y ? _x : _y; })
1541+
1542+#undef max
1543+#define max(x,y) ({ \
1544+ const typeof(x) _x = (x); \
1545+ const typeof(y) _y = (y); \
1546+ (void) (&_x == &_y); \
1547+ _x > _y ? _x : _y; })
1548
1549+#endif
1550
1551 /* debug macros */
1552 #ifdef DEBUG_IOCTL
1553--- drivers/md/lvm-snap.c.org Mon Nov 12 17:34:20 2001
1554+++ drivers/md/lvm-snap.c Thu Sep 27 08:34:43 2001
1555@@ -2,22 +2,22 @@
1556 * kernel/lvm-snap.c
1557 *
1558 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
1559- * Heinz Mauelshagen, Sistina Software (persistent snapshots)
1560+ * 2000 - 2001 Heinz Mauelshagen, Sistina Software
1561 *
1562 * LVM snapshot driver is free software; you can redistribute it and/or modify
1563 * it under the terms of the GNU General Public License as published by
1564 * the Free Software Foundation; either version 2, or (at your option)
1565 * any later version.
1566- *
1567+ *
1568 * LVM snapshot driver is distributed in the hope that it will be useful,
1569 * but WITHOUT ANY WARRANTY; without even the implied warranty of
1570 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1571 * GNU General Public License for more details.
1572- *
1573+ *
1574 * You should have received a copy of the GNU General Public License
1575 * along with GNU CC; see the file COPYING. If not, write to
1576 * the Free Software Foundation, 59 Temple Place - Suite 330,
1577- * Boston, MA 02111-1307, USA.
1578+ * Boston, MA 02111-1307, USA.
1579 *
1580 */
1581
1582@@ -28,6 +28,11 @@
1583 * 23/11/2000 - used cpu_to_le64 rather than my own macro
1584 * 25/01/2001 - Put LockPage back in
1585 * 01/02/2001 - A dropped snapshot is now set as inactive
1586+ * 14/02/2001 - tidied debug statements
1587+ * 19/02/2001 - changed rawio calls to pass in preallocated buffer_heads
1588+ * 26/02/2001 - introduced __brw_kiovec to remove a lot of conditional
1589+ * compiles.
1590+ * 07/03/2001 - fixed COW exception table not persistent on 2.2 (HM)
1591 * 12/03/2001 - lvm_pv_get_number changes:
1592 * o made it static
1593 * o renamed it to _pv_get_number
1594@@ -38,7 +43,6 @@
1595 */
1596
1597 #include <linux/kernel.h>
1598-#include <linux/module.h>
1599 #include <linux/vmalloc.h>
1600 #include <linux/blkdev.h>
1601 #include <linux/smp_lock.h>
1602@@ -46,22 +50,42 @@
1603 #include <linux/iobuf.h>
1604 #include <linux/lvm.h>
1605
1606+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3 ,0)
1607+#include <linux/pagemap.h>
1608+#endif
1609
1610 #include "lvm-internal.h"
1611
1612-static char *lvm_snap_version __attribute__ ((unused)) =
1613- "LVM "LVM_RELEASE_NAME" snapshot code ("LVM_RELEASE_DATE")\n";
1614+static char *lvm_snap_version __attribute__ ((unused)) = "LVM "LVM_RELEASE_NAME" snapshot code ("LVM_RELEASE_DATE")\n";
1615
1616+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3 ,0)
1617+#ifndef LockPage
1618+#define LockPage(map) set_bit(PG_locked, &(map)->flags)
1619+#endif
1620+#endif
1621
1622 extern const char *const lvm_name;
1623 extern int lvm_blocksizes[];
1624
1625 void lvm_snapshot_release(lv_t *);
1626+
1627 static int _write_COW_table_block(vg_t *vg, lv_t *lv, int idx,
1628- const char **reason);
1629+ const char **reason);
1630 static void _disable_snapshot(vg_t *vg, lv_t *lv);
1631
1632
1633+static inline int __brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
1634+ kdev_t dev, unsigned long b[], int size,
1635+ lv_t *lv) {
1636+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 0)
1637+ return brw_kiovec(rw, nr, iovec, dev, b, size);
1638+#else
1639+ return brw_kiovec_bh(rw, nr, iovec, dev, b, size, 0,
1640+ lv->lv_snapshot_org->bheads, KIO_MAX_SECTORS);
1641+#endif
1642+}
1643+
1644+
1645 static int _pv_get_number(vg_t * vg, kdev_t rdev, uint *pvn) {
1646 uint p;
1647 for(p = 0; p < vg->pv_max; p++) {
1648@@ -70,14 +94,13 @@
1649
1650 if(vg->pv[p]->pv_dev == rdev)
1651 break;
1652-
1653 }
1654
1655- if(p >= vg->pv_max) {
1656+ if(p >= vg->pv_max) {
1657 /* bad news, the snapshot COW table is probably corrupt */
1658 printk(KERN_ERR
1659 "%s -- _pv_get_number failed for rdev = %u\n",
1660- lvm_name, rdev);
1661+ lvm_name, rdev);
1662 return -1;
1663 }
1664
1665@@ -85,6 +108,7 @@
1666 return 0;
1667 }
1668
1669+
1670 #define hashfn(dev,block,mask,chunk_size) \
1671 ((HASHDEV(dev)^((block)/(chunk_size))) & (mask))
1672
1673@@ -166,8 +190,8 @@
1674 or error on this snapshot --> release it */
1675 invalidate_buffers(lv_snap->lv_dev);
1676
1677- /* wipe the snapshot since it's inconsistent now */
1678- _disable_snapshot(vg, lv_snap);
1679+ /* wipe the snapshot since it's inconsistent now */
1680+ _disable_snapshot(vg, lv_snap);
1681
1682 for (i = last_dev = 0; i < lv_snap->lv_remap_ptr; i++) {
1683 if ( lv_snap->lv_block_exception[i].rdev_new != last_dev) {
1684@@ -186,9 +210,9 @@
1685 }
1686
1687 static inline int lvm_snapshot_prepare_blocks(unsigned long *blocks,
1688- unsigned long start,
1689- int nr_sectors,
1690- int blocksize)
1691+ unsigned long start,
1692+ int nr_sectors,
1693+ int blocksize)
1694 {
1695 int i, sectors_per_block, nr_blocks;
1696
1697@@ -245,49 +269,48 @@
1698
1699 int lvm_snapshot_fill_COW_page(vg_t * vg, lv_t * lv_snap)
1700 {
1701- uint pvn;
1702- int id = 0, is = lv_snap->lv_remap_ptr;
1703- ulong blksize_snap;
1704- lv_COW_table_disk_t * lv_COW_table = (lv_COW_table_disk_t *)
1705- page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1706+ uint pvn;
1707+ int id = 0, is = lv_snap->lv_remap_ptr;
1708+ ulong blksize_snap;
1709+ lv_COW_table_disk_t * lv_COW_table = (lv_COW_table_disk_t *)
1710+ page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1711
1712- if (is == 0)
1713- return 0;
1714+ if (is == 0)
1715+ return 0;
1716
1717 is--;
1718 blksize_snap =
1719- lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
1720+ lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
1721 is -= is % (blksize_snap / sizeof(lv_COW_table_disk_t));
1722
1723 memset(lv_COW_table, 0, blksize_snap);
1724 for ( ; is < lv_snap->lv_remap_ptr; is++, id++) {
1725 /* store new COW_table entry */
1726- lv_block_exception_t *be = lv_snap->lv_block_exception + is;
1727- if(_pv_get_number(vg, be->rdev_org, &pvn))
1728- goto bad;
1729-
1730- lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
1731- lv_COW_table[id].pv_org_rsector = cpu_to_le64(be->rsector_org);
1732- if(_pv_get_number(vg, be->rdev_new, &pvn))
1733- goto bad;
1734-
1735- lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
1736- lv_COW_table[id].pv_snap_rsector =
1737- cpu_to_le64(be->rsector_new);
1738+ lv_block_exception_t *be = lv_snap->lv_block_exception + is;
1739+ if(_pv_get_number(vg, be->rdev_org, &pvn))
1740+ goto bad;
1741+
1742+ lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
1743+ lv_COW_table[id].pv_org_rsector = cpu_to_le64(be->rsector_org);
1744+ if(_pv_get_number(vg, be->rdev_new, &pvn))
1745+ goto bad;
1746+
1747+ lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
1748+ lv_COW_table[id].pv_snap_rsector =
1749+ cpu_to_le64(be->rsector_new);
1750 }
1751
1752- return 0;
1753+ return 0;
1754
1755 bad:
1756- printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", lvm_name);
1757- return -1;
1758+ printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", lvm_name);
1759+ return -1;
1760 }
1761
1762
1763 /*
1764 * writes a COW exception table sector to disk (HM)
1765 */
1766-
1767 int lvm_write_COW_table_block(vg_t * vg, lv_t *lv_snap)
1768 {
1769 int r;
1770@@ -316,6 +339,7 @@
1771 unsigned long org_start, snap_start, snap_phys_dev, virt_start, pe_off;
1772 int idx = lv_snap->lv_remap_ptr, chunk_size = lv_snap->lv_chunk_size;
1773 struct kiobuf * iobuf;
1774+ unsigned long blocks[KIO_MAX_SECTORS];
1775 int blksize_snap, blksize_org, min_blksize, max_blksize;
1776 int max_sectors, nr_sectors;
1777
1778@@ -363,20 +387,20 @@
1779
1780 iobuf->length = nr_sectors << 9;
1781
1782- if(!lvm_snapshot_prepare_blocks(iobuf->blocks, org_start,
1783+ if(!lvm_snapshot_prepare_blocks(blocks, org_start,
1784 nr_sectors, blksize_org))
1785 goto fail_prepare;
1786
1787- if (brw_kiovec(READ, 1, &iobuf, org_phys_dev,
1788- iobuf->blocks, blksize_org) != (nr_sectors<<9))
1789+ if (__brw_kiovec(READ, 1, &iobuf, org_phys_dev, blocks,
1790+ blksize_org, lv_snap) != (nr_sectors<<9))
1791 goto fail_raw_read;
1792
1793- if(!lvm_snapshot_prepare_blocks(iobuf->blocks, snap_start,
1794+ if(!lvm_snapshot_prepare_blocks(blocks, snap_start,
1795 nr_sectors, blksize_snap))
1796 goto fail_prepare;
1797
1798- if (brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev,
1799- iobuf->blocks, blksize_snap) != (nr_sectors<<9))
1800+ if (__brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev, blocks,
1801+ blksize_snap, lv_snap) != (nr_sectors<<9))
1802 goto fail_raw_write;
1803 }
1804
1805@@ -401,24 +425,24 @@
1806 return 0;
1807
1808 /* slow path */
1809- out:
1810+out:
1811 lvm_drop_snapshot(vg, lv_snap, reason);
1812 return 1;
1813
1814- fail_out_of_space:
1815+fail_out_of_space:
1816 reason = "out of space";
1817 goto out;
1818- fail_raw_read:
1819+fail_raw_read:
1820 reason = "read error";
1821 goto out;
1822- fail_raw_write:
1823+fail_raw_write:
1824 reason = "write error";
1825 goto out;
1826- fail_blksize:
1827+fail_blksize:
1828 reason = "blocksize error";
1829 goto out;
1830
1831- fail_prepare:
1832+fail_prepare:
1833 reason = "couldn't prepare kiovec blocks "
1834 "(start probably isn't block aligned)";
1835 goto out;
1836@@ -440,9 +464,17 @@
1837 {
1838 struct page * page;
1839
1840+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,27)
1841 page = alloc_page(GFP_KERNEL);
1842- if (!page)
1843- goto out;
1844+ if (!page) goto out;
1845+#else
1846+ {
1847+ unsigned long addr = __get_free_page(GFP_USER);
1848+ if (!addr) goto out;
1849+ iobuf->pagelist[i] = addr;
1850+ page = mem_map + MAP_NR(addr);
1851+ }
1852+#endif
1853
1854 iobuf->maplist[i] = page;
1855 LockPage(page);
1856@@ -451,7 +483,8 @@
1857 iobuf->offset = 0;
1858
1859 err = 0;
1860- out:
1861+
1862+out:
1863 return err;
1864 }
1865
1866@@ -515,7 +548,7 @@
1867 if (ret) goto out_free_kiovec;
1868
1869 ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_COW_table_iobuf,
1870- PAGE_SIZE/SECTOR_SIZE);
1871+ PAGE_SIZE/SECTOR_SIZE);
1872 if (ret) goto out_free_both_kiovecs;
1873
1874 ret = lvm_snapshot_alloc_hash_table(lv_snap);
1875@@ -542,8 +575,6 @@
1876
1877 void lvm_snapshot_release(lv_t * lv)
1878 {
1879- int nbhs = KIO_MAX_SECTORS;
1880-
1881 if (lv->lv_block_exception)
1882 {
1883 vfree(lv->lv_block_exception);
1884@@ -557,17 +588,21 @@
1885 }
1886 if (lv->lv_iobuf)
1887 {
1888+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
1889 kiobuf_wait_for_io(lv->lv_iobuf);
1890+#endif
1891 unmap_kiobuf(lv->lv_iobuf);
1892 free_kiovec(1, &lv->lv_iobuf);
1893 lv->lv_iobuf = NULL;
1894 }
1895 if (lv->lv_COW_table_iobuf)
1896 {
1897- kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
1898- unmap_kiobuf(lv->lv_COW_table_iobuf);
1899- free_kiovec(1, &lv->lv_COW_table_iobuf);
1900- lv->lv_COW_table_iobuf = NULL;
1901+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
1902+ kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
1903+#endif
1904+ unmap_kiobuf(lv->lv_COW_table_iobuf);
1905+ free_kiovec(1, &lv->lv_COW_table_iobuf);
1906+ lv->lv_COW_table_iobuf = NULL;
1907 }
1908 }
1909
1910@@ -579,11 +614,11 @@
1911 int idx_COW_table;
1912 uint pvn;
1913 ulong snap_pe_start, COW_table_sector_offset,
1914- COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
1915+ COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
1916 ulong blocks[1];
1917 kdev_t snap_phys_dev;
1918 lv_block_exception_t *be;
1919- struct kiobuf * COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
1920+ struct kiobuf *COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
1921 lv_COW_table_disk_t * lv_COW_table =
1922 ( lv_COW_table_disk_t *) page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1923
1924@@ -601,39 +636,40 @@
1925
1926 if ( idx_COW_table == 0) memset(lv_COW_table, 0, blksize_snap);
1927
1928- /* sector offset into the on disk COW table */
1929+ /* sector offset into the on disk COW table */
1930 COW_table_sector_offset = (idx % COW_entries_per_pe) / (SECTOR_SIZE / sizeof(lv_COW_table_disk_t));
1931
1932 /* COW table block to write next */
1933 blocks[0] = (snap_pe_start + COW_table_sector_offset) >> (blksize_snap >> 10);
1934
1935 /* store new COW_table entry */
1936- be = lv_snap->lv_block_exception + idx;
1937- if(_pv_get_number(vg, be->rdev_org, &pvn))
1938- goto fail_pv_get_number;
1939-
1940- lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
1941- lv_COW_table[idx_COW_table].pv_org_rsector =
1942- cpu_to_le64(be->rsector_org);
1943- if(_pv_get_number(vg, snap_phys_dev, &pvn))
1944- goto fail_pv_get_number;
1945-
1946- lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
1947- lv_COW_table[idx_COW_table].pv_snap_rsector =
1948- cpu_to_le64(be->rsector_new);
1949+ be = lv_snap->lv_block_exception + idx;
1950+ if(_pv_get_number(vg, be->rdev_org, &pvn))
1951+ goto fail_pv_get_number;
1952+
1953+ lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
1954+ lv_COW_table[idx_COW_table].pv_org_rsector =
1955+ cpu_to_le64(be->rsector_org);
1956+ if(_pv_get_number(vg, snap_phys_dev, &pvn))
1957+ goto fail_pv_get_number;
1958+
1959+ lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
1960+ lv_COW_table[idx_COW_table].pv_snap_rsector =
1961+ cpu_to_le64(be->rsector_new);
1962
1963 COW_table_iobuf->length = blksize_snap;
1964+ /* COW_table_iobuf->nr_pages = 1; */
1965
1966- if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1967- blocks, blksize_snap) != blksize_snap)
1968+ if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1969+ blocks, blksize_snap, lv_snap) != blksize_snap)
1970 goto fail_raw_write;
1971
1972- /* initialization of next COW exception table block with zeroes */
1973+ /* initialization of next COW exception table block with zeroes */
1974 end_of_table = idx % COW_entries_per_pe == COW_entries_per_pe - 1;
1975 if (idx_COW_table % COW_entries_per_block == COW_entries_per_block - 1 || end_of_table)
1976 {
1977 /* don't go beyond the end */
1978- if (idx + 1 >= lv_snap->lv_remap_end) goto out;
1979+ if (idx + 1 >= lv_snap->lv_remap_end) goto out;
1980
1981 memset(lv_COW_table, 0, blksize_snap);
1982
1983@@ -646,20 +682,20 @@
1984 blocks[0] = snap_pe_start >> (blksize_snap >> 10);
1985 } else blocks[0]++;
1986
1987- if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1988- blocks, blksize_snap) !=
1989+ if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1990+ blocks, blksize_snap, lv_snap) !=
1991 blksize_snap)
1992 goto fail_raw_write;
1993 }
1994
1995- out:
1996+out:
1997 return 0;
1998
1999- fail_raw_write:
2000+fail_raw_write:
2001 *reason = "write error";
2002 return 1;
2003
2004- fail_pv_get_number:
2005+fail_pv_get_number:
2006 *reason = "_pv_get_number failed";
2007 return 1;
2008 }
2009@@ -683,5 +719,3 @@
2010 lvm_name, err);
2011 }
2012 }
2013-
2014-MODULE_LICENSE("GPL");
2015--- drivers/md/lvm-fs.c.org Sun Nov 11 18:09:32 2001
2016+++ drivers/md/lvm-fs.c Tue Sep 4 10:40:17 2001
2017@@ -3,7 +3,7 @@
2018 *
2019 * Copyright (C) 2001 Sistina Software
2020 *
2021- * January,February 2001
2022+ * January-April 2001
2023 *
2024 * LVM driver is free software; you can redistribute it and/or modify
2025 * it under the terms of the GNU General Public License as published by
2026@@ -35,7 +35,6 @@
2027
2028 #include <linux/config.h>
2029 #include <linux/version.h>
2030-#include <linux/module.h>
2031
2032 #include <linux/kernel.h>
2033 #include <linux/vmalloc.h>
2034@@ -64,12 +63,14 @@
2035
2036 static void _show_uuid(const char *src, char *b, char *e);
2037
2038+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2039 #if 0
2040 static devfs_handle_t lvm_devfs_handle;
2041 #endif
2042 static devfs_handle_t vg_devfs_handle[MAX_VG];
2043 static devfs_handle_t ch_devfs_handle[MAX_VG];
2044 static devfs_handle_t lv_devfs_handle[MAX_LV];
2045+#endif
2046
2047 static struct proc_dir_entry *lvm_proc_dir = NULL;
2048 static struct proc_dir_entry *lvm_proc_vg_subdir = NULL;
2049@@ -82,12 +83,13 @@
2050
2051 /* User-space has already registered this */
2052 #if 0
2053+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2054 lvm_devfs_handle = devfs_register(
2055 0 , "lvm", 0, LVM_CHAR_MAJOR, 0,
2056 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
2057 &lvm_chr_fops, NULL);
2058 #endif
2059-
2060+#endif
2061 lvm_proc_dir = create_proc_entry(LVM_DIR, S_IFDIR, &proc_root);
2062 if (lvm_proc_dir) {
2063 lvm_proc_vg_subdir = create_proc_entry(LVM_VG_SUBDIR, S_IFDIR,
2064@@ -99,9 +101,10 @@
2065
2066 void lvm_fin_fs() {
2067 #if 0
2068+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2069 devfs_unregister (lvm_devfs_handle);
2070 #endif
2071-
2072+#endif
2073 remove_proc_entry(LVM_GLOBAL, lvm_proc_dir);
2074 remove_proc_entry(LVM_VG_SUBDIR, lvm_proc_dir);
2075 remove_proc_entry(LVM_DIR, &proc_root);
2076@@ -110,6 +113,7 @@
2077 void lvm_fs_create_vg(vg_t *vg_ptr) {
2078 struct proc_dir_entry *pde;
2079
2080+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2081 vg_devfs_handle[vg_ptr->vg_number] =
2082 devfs_mk_dir(0, vg_ptr->vg_name, NULL);
2083
2084@@ -118,6 +122,7 @@
2085 DEVFS_FL_DEFAULT, LVM_CHAR_MAJOR, vg_ptr->vg_number,
2086 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
2087 &lvm_chr_fops, NULL);
2088+#endif
2089
2090 vg_ptr->vg_dir_pde = create_proc_entry(vg_ptr->vg_name, S_IFDIR,
2091 lvm_proc_vg_subdir);
2092@@ -137,8 +142,10 @@
2093 void lvm_fs_remove_vg(vg_t *vg_ptr) {
2094 int i;
2095
2096+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2097 devfs_unregister(ch_devfs_handle[vg_ptr->vg_number]);
2098 devfs_unregister(vg_devfs_handle[vg_ptr->vg_number]);
2099+#endif
2100
2101 /* remove lv's */
2102 for(i = 0; i < vg_ptr->lv_max; i++)
2103@@ -173,11 +180,13 @@
2104 struct proc_dir_entry *pde;
2105 const char *name = _basename(lv->lv_name);
2106
2107+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2108 lv_devfs_handle[MINOR(lv->lv_dev)] = devfs_register(
2109 vg_devfs_handle[vg_ptr->vg_number], name,
2110 DEVFS_FL_DEFAULT, LVM_BLK_MAJOR, MINOR(lv->lv_dev),
2111 S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
2112 &lvm_blk_dops, NULL);
2113+#endif
2114
2115 if(vg_ptr->lv_subdir_pde &&
2116 (pde = create_proc_entry(name, S_IFREG, vg_ptr->lv_subdir_pde))) {
2117@@ -188,7 +197,9 @@
2118 }
2119
2120 void lvm_fs_remove_lv(vg_t *vg_ptr, lv_t *lv) {
2121+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 46)
2122 devfs_unregister(lv_devfs_handle[MINOR(lv->lv_dev)]);
2123+#endif
2124
2125 if(vg_ptr->lv_subdir_pde) {
2126 const char *name = _basename(lv->lv_name);
2127@@ -276,12 +287,12 @@
2128 sz += sprintf(page + sz, "number: %u\n", lv->lv_number);
2129 sz += sprintf(page + sz, "open: %u\n", lv->lv_open);
2130 sz += sprintf(page + sz, "allocation: %u\n", lv->lv_allocation);
2131- if(lv->lv_stripes > 1) {
2132- sz += sprintf(page + sz, "stripes: %u\n",
2133- lv->lv_stripes);
2134- sz += sprintf(page + sz, "stripesize: %u\n",
2135- lv->lv_stripesize);
2136- }
2137+ if(lv->lv_stripes > 1) {
2138+ sz += sprintf(page + sz, "stripes: %u\n",
2139+ lv->lv_stripes);
2140+ sz += sprintf(page + sz, "stripesize: %u\n",
2141+ lv->lv_stripesize);
2142+ }
2143 sz += sprintf(page + sz, "device: %02u:%02u\n",
2144 MAJOR(lv->lv_dev), MINOR(lv->lv_dev));
2145
2146@@ -330,8 +341,8 @@
2147
2148 #ifdef DEBUG_LVM_PROC_GET_INFO
2149 printk(KERN_DEBUG
2150- "%s - lvm_proc_get_global_info CALLED pos: %lu count: %d\n",
2151- lvm_name, pos, count);
2152+ "%s - lvm_proc_get_global_info CALLED pos: %lu count: %d whence: %d\n",
2153+ lvm_name, pos, count, whence);
2154 #endif
2155
2156 if(pos != 0 && buf != NULL)
2157@@ -620,4 +631,3 @@
2158 }
2159 *b = '\0';
2160 }
2161-MODULE_LICENSE("GPL");
2162--- include/linux/lvm.h.org Sun Nov 11 18:09:32 2001
2163+++ include/linux/lvm.h Wed Oct 3 14:46:47 2001
2164@@ -3,28 +3,28 @@
2165 * kernel/lvm.h
2166 * tools/lib/lvm.h
2167 *
2168- * Copyright (C) 1997 - 2000 Heinz Mauelshagen, Sistina Software
2169+ * Copyright (C) 1997 - 2001 Heinz Mauelshagen, Sistina Software
2170 *
2171 * February-November 1997
2172 * May-July 1998
2173 * January-March,July,September,October,Dezember 1999
2174 * January,February,July,November 2000
2175- * January 2001
2176+ * January-March,June,July 2001
2177 *
2178 * lvm is free software; you can redistribute it and/or modify
2179 * it under the terms of the GNU General Public License as published by
2180 * the Free Software Foundation; either version 2, or (at your option)
2181 * any later version.
2182- *
2183+ *
2184 * lvm is distributed in the hope that it will be useful,
2185 * but WITHOUT ANY WARRANTY; without even the implied warranty of
2186 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2187 * GNU General Public License for more details.
2188- *
2189+ *
2190 * You should have received a copy of the GNU General Public License
2191 * along with GNU CC; see the file COPYING. If not, write to
2192 * the Free Software Foundation, 59 Temple Place - Suite 330,
2193- * Boston, MA 02111-1307, USA.
2194+ * Boston, MA 02111-1307, USA.
2195 *
2196 */
2197
2198@@ -52,8 +52,7 @@
2199 * 08/12/1999 - changed LVM_LV_SIZE_MAX macro to reflect current 1TB limit
2200 * 01/01/2000 - extended lv_v2 core structure by wait_queue member
2201 * 12/02/2000 - integrated Andrea Arcagnelli's snapshot work
2202- * 14/02/2001 - changed LVM_SNAPSHOT_MIN_CHUNK to 1 page
2203- * 18/02/2000 - seperated user and kernel space parts by
2204+ * 18/02/2000 - seperated user and kernel space parts by
2205 * #ifdef them with __KERNEL__
2206 * 08/03/2000 - implemented cluster/shared bits for vg_access
2207 * 26/06/2000 - implemented snapshot persistency and resizing support
2208@@ -61,11 +60,17 @@
2209 * 12/11/2000 - removed unneeded timestamp definitions
2210 * 24/12/2000 - removed LVM_TO_{CORE,DISK}*, use cpu_{from, to}_le*
2211 * instead - Christoph Hellwig
2212- * 01/03/2001 - Rename VG_CREATE to VG_CREATE_OLD and add new VG_CREATE
2213+ * 22/01/2001 - Change ulong to uint32_t
2214+ * 14/02/2001 - changed LVM_SNAPSHOT_MIN_CHUNK to 1 page
2215+ * 20/02/2001 - incremented IOP version to 11 because of incompatible
2216+ * change in VG activation (in order to support devfs better)
2217+ * 01/03/2001 - Revert to IOP10 and add VG_CREATE_OLD call for compatibility
2218 * 08/03/2001 - new lv_t (in core) version number 5: changed page member
2219 * to (struct kiobuf *) to use for COW exception table io
2220- * 23/03/2001 - Change a (presumably) mistyped pv_t* to an lv_t*
2221- * 26/03/2001 - changed lv_v4 to lv_v5 in structure definition [HM]
2222+ * 26/03/2001 - changed lv_v4 to lv_v5 in structure definition (HM)
2223+ * 21/06/2001 - changed BLOCK_SIZE back to 1024 for non S/390
2224+ * 22/06/2001 - added Andreas Dilger's PE on 4k boundary alignment enhancements
2225+ * 19/07/2001 - added rwsem compatibility macros for 2.2 kernels
2226 *
2227 */
2228
2229@@ -73,10 +78,10 @@
2230 #ifndef _LVM_H_INCLUDE
2231 #define _LVM_H_INCLUDE
2232
2233-#define LVM_RELEASE_NAME "1.0.1-rc4(ish)"
2234+#define LVM_RELEASE_NAME "1.0.1-rc4"
2235 #define LVM_RELEASE_DATE "03/10/2001"
2236
2237-#define _LVM_KERNEL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
2238+#define _LVM_KERNEL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
2239
2240 #include <linux/version.h>
2241
2242@@ -98,23 +103,43 @@
2243 #define DEBUG_READ
2244 #define DEBUG_GENDISK
2245 #define DEBUG_VG_CREATE
2246- #define DEBUG_LVM_BLK_OPEN
2247+ #define DEBUG_DEVICE
2248 #define DEBUG_KFREE
2249 */
2250-#endif /* #ifdef __KERNEL__ */
2251
2252 #include <linux/kdev_t.h>
2253 #include <linux/list.h>
2254+#else
2255+#define __KERNEL__
2256+#include <linux/kdev_t.h>
2257+#include <linux/list.h>
2258+#undef __KERNEL__
2259+#endif /* #ifndef __KERNEL__ */
2260
2261 #include <asm/types.h>
2262 #include <linux/major.h>
2263
2264 #ifdef __KERNEL__
2265+#if LINUX_VERSION_CODE >= KERNEL_VERSION ( 2, 3 ,0)
2266 #include <linux/spinlock.h>
2267+#else
2268+#include <asm/spinlock.h>
2269+#endif
2270
2271 #include <asm/semaphore.h>
2272 #endif /* #ifdef __KERNEL__ */
2273
2274+#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 3 ,0)
2275+/* Compatibility macros for 2.2 */
2276+#define rw_semaphore semaphore
2277+#define init_rwsem init_MUTEX
2278+#define down_read down
2279+#define down_write down
2280+#define up_read up
2281+#define up_write up
2282+#define DECLARE_RWSEM DECLARE_MUTEX
2283+#endif
2284+
2285 #include <asm/page.h>
2286
2287 #if !defined ( LVM_BLK_MAJOR) || !defined ( LVM_CHAR_MAJOR)
2288@@ -125,7 +150,7 @@
2289 #undef BLOCK_SIZE
2290 #endif
2291
2292-#ifdef CONFIG_ARCH_S390
2293+#ifdef CONFIG_ARCH_S390
2294 #define BLOCK_SIZE 4096
2295 #else
2296 #define BLOCK_SIZE 1024
2297@@ -189,6 +214,38 @@
2298
2299
2300 /*
2301+ * VGDA: default disk spaces and offsets
2302+ *
2303+ * there's space after the structures for later extensions.
2304+ *
2305+ * offset what size
2306+ * --------------- ---------------------------------- ------------
2307+ * 0 physical volume structure ~500 byte
2308+ *
2309+ * 1K volume group structure ~200 byte
2310+ *
2311+ * 6K namelist of physical volumes 128 byte each
2312+ *
2313+ * 6k + n * ~300byte n logical volume structures ~300 byte each
2314+ *
2315+ * + m * 4byte m physical extent alloc. structs 4 byte each
2316+ *
2317+ * End of disk - first physical extent typically 4 megabyte
2318+ * PE total *
2319+ * PE size
2320+ *
2321+ *
2322+ */
2323+
2324+/* DONT TOUCH THESE !!! */
2325+
2326+
2327+
2328+
2329+
2330+
2331+
2332+/*
2333 * LVM_PE_T_MAX corresponds to:
2334 *
2335 * 8KB PE size can map a ~512 MB logical volume at the cost of 1MB memory,
2336@@ -298,7 +355,12 @@
2337 #endif
2338
2339 /* lock the logical volume manager */
2340+#if LVM_DRIVER_IOP_VERSION > 11
2341+#define LVM_LOCK_LVM _IO ( 0xfe, 0x9A)
2342+#else
2343+/* This is actually the same as _IO ( 0xff, 0x00), oops. Remove for IOP 12+ */
2344 #define LVM_LOCK_LVM _IO ( 0xfe, 0x100)
2345+#endif
2346 /* END ioctls */
2347
2348
2349@@ -495,9 +557,9 @@
2350 uint lv_read_ahead;
2351
2352 /* delta to version 1 starts here */
2353- struct lv_v5 *lv_snapshot_org;
2354- struct lv_v5 *lv_snapshot_prev;
2355- struct lv_v5 *lv_snapshot_next;
2356+ struct lv_v5 *lv_snapshot_org;
2357+ struct lv_v5 *lv_snapshot_prev;
2358+ struct lv_v5 *lv_snapshot_next;
2359 lv_block_exception_t *lv_block_exception;
2360 uint lv_remap_ptr;
2361 uint lv_remap_end;
2362@@ -510,11 +572,18 @@
2363 struct list_head *lv_snapshot_hash_table;
2364 uint32_t lv_snapshot_hash_table_size;
2365 uint32_t lv_snapshot_hash_mask;
2366+#if LINUX_VERSION_CODE > KERNEL_VERSION ( 2, 3, 0)
2367 wait_queue_head_t lv_snapshot_wait;
2368+#else
2369+ struct wait_queue *lv_snapshot_wait;
2370+#endif
2371 int lv_snapshot_use_rate;
2372 struct vg_v3 *vg;
2373
2374 uint lv_allocated_snapshot_le;
2375+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
2376+ struct buffer_head **bheads;
2377+#endif
2378 #else
2379 char dummy[200];
2380 #endif
2381@@ -661,6 +730,7 @@
2382 } lv_snapshot_use_rate_req_t;
2383
2384
2385+
2386 /* useful inlines */
2387 static inline ulong round_up(ulong n, ulong size) {
2388 size--;
2389@@ -671,6 +741,7 @@
2390 return round_up(n, size) / size;
2391 }
2392
2393+/* FIXME: nasty capital letters */
2394 static int inline LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg_t *vg, lv_t *lv) {
2395 return vg->pe_size / lv->lv_chunk_size;
2396 }
2397@@ -693,4 +764,6 @@
2398 return entries;
2399 }
2400
2401+
2402 #endif /* #ifndef _LVM_H_INCLUDE */
2403+
This page took 0.274895 seconds and 4 git commands to generate.