]> git.pld-linux.org Git - packages/kernel.git/blame - linux-lvm-1.0.1-2.4.17.patch
9cdd694a797623b3d0f76ee69304dd85 linux-loop-hvr-2.4.16.0.patch
[packages/kernel.git] / linux-lvm-1.0.1-2.4.17.patch
CommitLineData
2af08644 1--- linux/include/linux/lvm.h.orig Sun Nov 11 18:09:32 2001
2+++ linux/include/linux/lvm.h Thu Jan 10 12:24:08 2002
3@@ -3,28 +3,28 @@
4 * kernel/lvm.h
5 * tools/lib/lvm.h
6 *
7- * Copyright (C) 1997 - 2000 Heinz Mauelshagen, Sistina Software
8+ * Copyright (C) 1997 - 2001 Heinz Mauelshagen, Sistina Software
9 *
10 * February-November 1997
11 * May-July 1998
12 * January-March,July,September,October,Dezember 1999
13 * January,February,July,November 2000
14- * January 2001
15+ * January-March,June,July 2001
16 *
17 * lvm is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21- *
22+ *
23 * lvm is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27- *
28+ *
29 * You should have received a copy of the GNU General Public License
30 * along with GNU CC; see the file COPYING. If not, write to
31 * the Free Software Foundation, 59 Temple Place - Suite 330,
32- * Boston, MA 02111-1307, USA.
33+ * Boston, MA 02111-1307, USA.
34 *
35 */
36
37@@ -52,8 +52,7 @@
38 * 08/12/1999 - changed LVM_LV_SIZE_MAX macro to reflect current 1TB limit
39 * 01/01/2000 - extended lv_v2 core structure by wait_queue member
40 * 12/02/2000 - integrated Andrea Arcagnelli's snapshot work
41- * 14/02/2001 - changed LVM_SNAPSHOT_MIN_CHUNK to 1 page
42- * 18/02/2000 - seperated user and kernel space parts by
43+ * 18/02/2000 - seperated user and kernel space parts by
44 * #ifdef them with __KERNEL__
45 * 08/03/2000 - implemented cluster/shared bits for vg_access
46 * 26/06/2000 - implemented snapshot persistency and resizing support
47@@ -61,11 +60,18 @@
48 * 12/11/2000 - removed unneeded timestamp definitions
49 * 24/12/2000 - removed LVM_TO_{CORE,DISK}*, use cpu_{from, to}_le*
50 * instead - Christoph Hellwig
51- * 01/03/2001 - Rename VG_CREATE to VG_CREATE_OLD and add new VG_CREATE
52+ * 22/01/2001 - Change ulong to uint32_t
53+ * 14/02/2001 - changed LVM_SNAPSHOT_MIN_CHUNK to 1 page
54+ * 20/02/2001 - incremented IOP version to 11 because of incompatible
55+ * change in VG activation (in order to support devfs better)
56+ * 01/03/2001 - Revert to IOP10 and add VG_CREATE_OLD call for compatibility
57 * 08/03/2001 - new lv_t (in core) version number 5: changed page member
58 * to (struct kiobuf *) to use for COW exception table io
59- * 23/03/2001 - Change a (presumably) mistyped pv_t* to an lv_t*
60- * 26/03/2001 - changed lv_v4 to lv_v5 in structure definition [HM]
61+ * 26/03/2001 - changed lv_v4 to lv_v5 in structure definition (HM)
62+ * 21/06/2001 - changed BLOCK_SIZE back to 1024 for non S/390
63+ * 22/06/2001 - added Andreas Dilger's PE on 4k boundary alignment enhancements
64+ * 19/07/2001 - added rwsem compatibility macros for 2.2 kernels
65+ * 13/11/2001 - reduced userspace inclusion of kernel headers to a minimum
66 *
67 */
68
69@@ -73,10 +79,10 @@
70 #ifndef _LVM_H_INCLUDE
71 #define _LVM_H_INCLUDE
72
73-#define LVM_RELEASE_NAME "1.0.1-rc4(ish)"
74-#define LVM_RELEASE_DATE "03/10/2001"
75+#define LVM_RELEASE_NAME "1.0.1"
76+#define LVM_RELEASE_DATE "26/11/2001"
77
78-#define _LVM_KERNEL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
79+#define _LVM_KERNEL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
80
81 #include <linux/version.h>
82
83@@ -98,16 +104,26 @@
84 #define DEBUG_READ
85 #define DEBUG_GENDISK
86 #define DEBUG_VG_CREATE
87- #define DEBUG_LVM_BLK_OPEN
88+ #define DEBUG_DEVICE
89 #define DEBUG_KFREE
90 */
91-#endif /* #ifdef __KERNEL__ */
92
93 #include <linux/kdev_t.h>
94 #include <linux/list.h>
95-
96 #include <asm/types.h>
97 #include <linux/major.h>
98+#else
99+/* This prevents the need to include <linux/list.h> which
100+ causes problems on some platforms. It's not nice but then
101+ neither is the alternative. */
102+struct list_head {
103+ struct list_head *next, *prev;
104+};
105+#define __KERNEL__
106+#include <linux/kdev_t.h>
107+#undef __KERNEL__
108+#endif /* #ifndef __KERNEL__ */
109+
110
111 #ifdef __KERNEL__
112 #include <linux/spinlock.h>
113@@ -115,6 +131,7 @@
114 #include <asm/semaphore.h>
115 #endif /* #ifdef __KERNEL__ */
116
117+
118 #include <asm/page.h>
119
120 #if !defined ( LVM_BLK_MAJOR) || !defined ( LVM_CHAR_MAJOR)
121@@ -125,7 +142,7 @@
122 #undef BLOCK_SIZE
123 #endif
124
125-#ifdef CONFIG_ARCH_S390
126+#ifdef CONFIG_ARCH_S390
127 #define BLOCK_SIZE 4096
128 #else
129 #define BLOCK_SIZE 1024
130@@ -189,6 +206,38 @@
131
132
133 /*
134+ * VGDA: default disk spaces and offsets
135+ *
136+ * there's space after the structures for later extensions.
137+ *
138+ * offset what size
139+ * --------------- ---------------------------------- ------------
140+ * 0 physical volume structure ~500 byte
141+ *
142+ * 1K volume group structure ~200 byte
143+ *
144+ * 6K namelist of physical volumes 128 byte each
145+ *
146+ * 6k + n * ~300byte n logical volume structures ~300 byte each
147+ *
148+ * + m * 4byte m physical extent alloc. structs 4 byte each
149+ *
150+ * End of disk - first physical extent typically 4 megabyte
151+ * PE total *
152+ * PE size
153+ *
154+ *
155+ */
156+
157+/* DONT TOUCH THESE !!! */
158+
159+
160+
161+
162+
163+
164+
165+/*
166 * LVM_PE_T_MAX corresponds to:
167 *
168 * 8KB PE size can map a ~512 MB logical volume at the cost of 1MB memory,
169@@ -217,8 +266,9 @@
170 #define LVM_MAX_STRIPES 128 /* max # of stripes */
171 #define LVM_MAX_SIZE ( 1024LU * 1024 / SECTOR_SIZE * 1024 * 1024) /* 1TB[sectors] */
172 #define LVM_MAX_MIRRORS 2 /* future use */
173-#define LVM_MIN_READ_AHEAD 2 /* minimum read ahead sectors */
174-#define LVM_MAX_READ_AHEAD 120 /* maximum read ahead sectors */
175+#define LVM_MIN_READ_AHEAD 0 /* minimum read ahead sectors */
176+#define LVM_DEFAULT_READ_AHEAD 1024 /* sectors for 512k scsi segments */
177+#define LVM_MAX_READ_AHEAD 10000 /* maximum read ahead sectors */
178 #define LVM_MAX_LV_IO_TIMEOUT 60 /* seconds I/O timeout (future use) */
179 #define LVM_PARTITION 0xfe /* LVM partition id */
180 #define LVM_NEW_PARTITION 0x8e /* new LVM partition id (10/09/1999) */
181@@ -298,7 +348,12 @@
182 #endif
183
184 /* lock the logical volume manager */
185+#if LVM_DRIVER_IOP_VERSION > 11
186+#define LVM_LOCK_LVM _IO ( 0xfe, 0x9A)
187+#else
188+/* This is actually the same as _IO ( 0xff, 0x00), oops. Remove for IOP 12+ */
189 #define LVM_LOCK_LVM _IO ( 0xfe, 0x100)
190+#endif
191 /* END ioctls */
192
193
194@@ -495,9 +550,9 @@
195 uint lv_read_ahead;
196
197 /* delta to version 1 starts here */
198- struct lv_v5 *lv_snapshot_org;
199- struct lv_v5 *lv_snapshot_prev;
200- struct lv_v5 *lv_snapshot_next;
201+ struct lv_v5 *lv_snapshot_org;
202+ struct lv_v5 *lv_snapshot_prev;
203+ struct lv_v5 *lv_snapshot_next;
204 lv_block_exception_t *lv_block_exception;
205 uint lv_remap_ptr;
206 uint lv_remap_end;
207@@ -661,6 +716,7 @@
208 } lv_snapshot_use_rate_req_t;
209
210
211+
212 /* useful inlines */
213 static inline ulong round_up(ulong n, ulong size) {
214 size--;
215@@ -671,6 +727,7 @@
216 return round_up(n, size) / size;
217 }
218
219+/* FIXME: nasty capital letters */
220 static int inline LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg_t *vg, lv_t *lv) {
221 return vg->pe_size / lv->lv_chunk_size;
222 }
223@@ -693,4 +750,6 @@
224 return entries;
225 }
226
227+
228 #endif /* #ifndef _LVM_H_INCLUDE */
229+
230--- linux/drivers/md/lvm.c.orig Mon Nov 19 17:56:04 2001
231+++ linux/drivers/md/lvm.c Thu Jan 10 12:24:08 2002
232@@ -1,13 +1,13 @@
233 /*
234 * kernel/lvm.c
235 *
236- * Copyright (C) 1997 - 2000 Heinz Mauelshagen, Sistina Software
237+ * Copyright (C) 1997 - 2001 Heinz Mauelshagen, Sistina Software
238 *
239 * February-November 1997
240 * April-May,July-August,November 1998
241 * January-March,May,July,September,October 1999
242 * January,February,July,September-November 2000
243- * January 2001
244+ * January-April 2001
245 *
246 *
247 * LVM driver is free software; you can redistribute it and/or modify
248@@ -43,7 +43,8 @@
249 * support for free (eg. longer) logical volume names
250 * 12/05/1998 - added spin_locks (thanks to Pascal van Dam
251 * <pascal@ramoth.xs4all.nl>)
252- * 25/05/1998 - fixed handling of locked PEs in lvm_map() and lvm_chr_ioctl()
253+ * 25/05/1998 - fixed handling of locked PEs in lvm_map() and
254+ * lvm_chr_ioctl()
255 * 26/05/1998 - reactivated verify_area by access_ok
256 * 07/06/1998 - used vmalloc/vfree instead of kmalloc/kfree to go
257 * beyond 128/256 KB max allocation limit per call
258@@ -125,7 +126,8 @@
259 * 14/02/2000 - support for 2.3.43
260 * - integrated Andrea Arcagneli's snapshot code
261 * 25/06/2000 - james (chip) , IKKHAYD! roffl
262- * 26/06/2000 - enhanced lv_extend_reduce for snapshot logical volume support
263+ * 26/06/2000 - enhanced lv_extend_reduce for snapshot logical volume
264+ * support
265 * 06/09/2000 - added devfs support
266 * 07/09/2000 - changed IOP version to 9
267 * - started to add new char ioctl LV_STATUS_BYDEV_T to support
268@@ -147,15 +149,24 @@
269 * 08/01/2001 - Removed conditional compiles related to PROC_FS,
270 * procfs is always supported now. (JT)
271 * 12/01/2001 - avoided flushing logical volume in case of shrinking
272- * because of unnecessary overhead in case of heavy updates
273+ * because of unecessary overhead in case of heavy updates
274 * 25/01/2001 - Allow RO open of an inactive LV so it can be reactivated.
275- * 31/01/2001 - If you try and BMAP a snapshot you now get an -EPERM
276- * 01/02/2001 - factored __remap_snapshot out of lvm_map
277+ * 31/01/2001 - removed blk_init_queue/blk_cleanup_queue queueing will be
278+ * handled by the proper devices.
279+ * - If you try and BMAP a snapshot you now get an -EPERM
280+ * 01/01/2001 - lvm_map() now calls buffer_IO_error on error for 2.4
281+ * - factored __remap_snapshot out of lvm_map
282 * 12/02/2001 - move devfs code to create VG before LVs
283- * 14/02/2001 - tidied device defines for blk.h
284+ * 13/02/2001 - allow VG_CREATE on /dev/lvm
285+ * 14/02/2001 - removed modversions.h
286+ * - tidied device defines for blk.h
287 * - tidied debug statements
288+ * - bug: vg[] member not set back to NULL if activation fails
289 * - more lvm_map tidying
290- * 14/02/2001 - bug: vg[] member not set back to NULL if activation fails
291+ * 15/02/2001 - register /dev/lvm with devfs correctly (major/minor
292+ * were swapped)
293+ * 19/02/2001 - preallocated buffer_heads for rawio when using
294+ * snapshots [JT]
295 * 28/02/2001 - introduced the P_DEV macro and changed some internel
296 * functions to be static [AD]
297 * 28/02/2001 - factored lvm_get_snapshot_use_rate out of blk_ioctl [AD]
298@@ -163,25 +174,50 @@
299 * where the check for an existing LV takes place right at
300 * the beginning
301 * 01/03/2001 - Add VG_CREATE_OLD for IOP 10 compatibility
302- * 02/03/2001 - Don't destroy usermode pointers in lv_t structures duing LV_
303- * STATUS_BYxxx and remove redundant lv_t variables from same.
304+ * 02/03/2001 - Don't destroy usermode pointers in lv_t structures duing
305+ * LV_STATUS_BYxxx
306+ * and remove redundant lv_t variables from same.
307+ * - avoid compilation of lvm_dummy_device_request in case of
308+ * Linux >= 2.3.0 to avoid a warning
309+ * - added lvm_name argument to printk in buffer allocation
310+ * in order to avoid a warning
311+ * 04/03/2001 - moved linux/version.h above first use of KERNEL_VERSION
312+ * macros
313 * 05/03/2001 - restore copying pe_t array in lvm_do_lv_status_byname. For
314 * lvdisplay -v (PC)
315 * - restore copying pe_t array in lvm_do_lv_status_byindex (HM)
316 * - added copying pe_t array in lvm_do_lv_status_bydev (HM)
317 * - enhanced lvm_do_lv_status_by{name,index,dev} to be capable
318 * to copy the lv_block_exception_t array to userspace (HM)
319- * 08/03/2001 - factored lvm_do_pv_flush out of lvm_chr_ioctl [HM]
320+ * 08/03/2001 - initialize new lv_ptr->lv_COW_table_iobuf for snapshots;
321+ * removed obsolete lv_ptr->lv_COW_table_page initialization
322+ * - factored lvm_do_pv_flush out of lvm_chr_ioctl (HM)
323 * 09/03/2001 - Added _lock_open_count to ensure we only drop the lock
324 * when the locking process closes.
325- * 05/04/2001 - lvm_map bugs: don't use b_blocknr/b_dev in lvm_map, it
326- * destroys stacking devices. call b_end_io on failed maps.
327- * (Jens Axboe)
328- * - Defer writes to an extent that is being moved [JT + AD]
329- * 28/05/2001 - implemented missing BLKSSZGET ioctl [AD]
330+ * 05/04/2001 - Defer writes to an extent that is being moved [JT]
331+ * 05/04/2001 - use b_rdev and b_rsector rather than b_dev and b_blocknr in
332+ * lvm_map() in order to make stacking devices more happy (HM)
333+ * 11/04/2001 - cleaned up the pvmove queue code. I no longer retain the
334+ * rw flag, instead WRITEA's are just dropped [JT]
335+ * 30/04/2001 - added KERNEL_VERSION > 2.4.3 get_hardsect_size() rather
336+ * than get_hardblocksize() call
337+ * 03/05/2001 - Use copy_to/from_user to preserve pointers in
338+ * lvm_do_status_by*
339+ * 11/05/2001 - avoid accesses to inactive snapshot data in
340+ * __update_hardsectsize() and lvm_do_lv_extend_reduce() (JW)
341+ * 28/05/2001 - implemented missing BLKSSZGET ioctl
342+ * 05/06/2001 - Move _pe_lock out of fast path for lvm_map when no PEs
343+ * locked. Make buffer queue flush not need locking.
344+ * Fix lvm_user_bmap() to set b_rsector for new lvm_map(). [AED]
345+ * 30/06/2001 - Speed up __update_hardsectsize() by checking if PVs have
346+ * the same hardsectsize (very likely) before scanning all LEs
347+ * in the LV each time. [AED]
348+ * 12/10/2001 - Use add/del_gendisk() routines in 2.4.10+
349+ * 01/11/2001 - Backport read_ahead change from Linus kernel [AED]
350 *
351 */
352
353+#include <linux/version.h>
354
355 #define MAJOR_NR LVM_BLK_MAJOR
356 #define DEVICE_OFF(device)
357@@ -191,11 +227,10 @@
358 /* #define LVM_VFS_ENHANCEMENT */
359
360 #include <linux/config.h>
361-
362 #include <linux/module.h>
363-
364 #include <linux/kernel.h>
365 #include <linux/vmalloc.h>
366+
367 #include <linux/slab.h>
368 #include <linux/init.h>
369
370@@ -206,6 +241,8 @@
371 #include <linux/blkdev.h>
372 #include <linux/genhd.h>
373 #include <linux/locks.h>
374+
375+
376 #include <linux/devfs_fs_kernel.h>
377 #include <linux/smp_lock.h>
378 #include <asm/ioctl.h>
379@@ -224,9 +261,13 @@
380
381 #include "lvm-internal.h"
382
383-#define LVM_CORRECT_READ_AHEAD( a) \
384- if ( a < LVM_MIN_READ_AHEAD || \
385- a > LVM_MAX_READ_AHEAD) a = LVM_MAX_READ_AHEAD;
386+#define LVM_CORRECT_READ_AHEAD(a) \
387+do { \
388+ if ((a) < LVM_MIN_READ_AHEAD || \
389+ (a) > LVM_MAX_READ_AHEAD) \
390+ (a) = LVM_DEFAULT_READ_AHEAD; \
391+ read_ahead[MAJOR_NR] = (a); \
392+} while(0)
393
394 #ifndef WRITEA
395 # define WRITEA WRITE
396@@ -351,6 +392,7 @@
397
398
399 struct file_operations lvm_chr_fops = {
400+ owner: THIS_MODULE,
401 open: lvm_chr_open,
402 release: lvm_chr_close,
403 ioctl: lvm_chr_ioctl,
404@@ -360,7 +402,7 @@
405 struct block_device_operations lvm_blk_dops =
406 {
407 owner: THIS_MODULE,
408- open: lvm_blk_open,
409+ open: lvm_blk_open,
410 release: lvm_blk_close,
411 ioctl: lvm_blk_ioctl,
412 };
413@@ -383,6 +425,7 @@
414 nr_real: MAX_LV,
415 };
416
417+
418 /*
419 * Driver initialization...
420 */
421@@ -394,7 +437,6 @@
422 lvm_name);
423 return -EIO;
424 }
425-
426 if (devfs_register_blkdev(MAJOR_NR, lvm_name, &lvm_blk_dops) < 0)
427 {
428 printk("%s -- devfs_register_blkdev failed\n", lvm_name);
429@@ -409,6 +451,7 @@
430 lvm_init_vars();
431 lvm_geninit(&lvm_gendisk);
432
433+ /* insert our gendisk at the corresponding major */
434 add_gendisk(&lvm_gendisk);
435
436 #ifdef LVM_HD_NAME
437@@ -436,10 +479,10 @@
438 return 0;
439 } /* lvm_init() */
440
441-
442 /*
443 * cleanup...
444 */
445+
446 static void lvm_cleanup(void)
447 {
448 if (devfs_unregister_chrdev(LVM_CHAR_MAJOR, lvm_name) < 0)
449@@ -449,6 +492,9 @@
450 printk(KERN_ERR "%s -- devfs_unregister_blkdev failed\n",
451 lvm_name);
452
453+
454+
455+ /* delete our gendisk from chain */
456 del_gendisk(&lvm_gendisk);
457
458 blk_size[MAJOR_NR] = NULL;
459@@ -514,7 +560,7 @@
460 */
461 static int lvm_chr_open(struct inode *inode, struct file *file)
462 {
463- unsigned int minor = MINOR(inode->i_rdev);
464+ int minor = MINOR(inode->i_rdev);
465
466 P_DEV("chr_open MINOR: %d VG#: %d mode: %s%s lock: %d\n",
467 minor, VG_CHR(minor), MODE_TO_STR(file->f_mode), lock);
468@@ -525,10 +571,10 @@
469 /* Group special file open */
470 if (VG_CHR(minor) > MAX_VG) return -ENXIO;
471
472- spin_lock(&lvm_lock);
473- if(lock == current->pid)
474- _lock_open_count++;
475- spin_unlock(&lvm_lock);
476+ spin_lock(&lvm_lock);
477+ if(lock == current->pid)
478+ _lock_open_count++;
479+ spin_unlock(&lvm_lock);
480
481 lvm_chr_open_count++;
482
483@@ -546,7 +592,7 @@
484 *
485 */
486 static int lvm_chr_ioctl(struct inode *inode, struct file *file,
487- uint command, ulong a)
488+ uint command, ulong a)
489 {
490 int minor = MINOR(inode->i_rdev);
491 uint extendable, l, v;
492@@ -610,8 +656,8 @@
493 /* create a VGDA */
494 return lvm_do_vg_create(arg, minor);
495
496- case VG_CREATE:
497- /* create a VGDA, assume VG number is filled in */
498+ case VG_CREATE:
499+ /* create a VGDA, assume VG number is filled in */
500 return lvm_do_vg_create(arg, -1);
501
502 case VG_EXTEND:
503@@ -734,7 +780,7 @@
504
505 case PV_FLUSH:
506 /* physical volume buffer flush/invalidate */
507- return lvm_do_pv_flush(arg);
508+ return lvm_do_pv_flush(arg);
509
510
511 default:
512@@ -765,16 +811,16 @@
513
514 if (lvm_chr_open_count > 0) lvm_chr_open_count--;
515
516- spin_lock(&lvm_lock);
517- if(lock == current->pid) {
518- if(!_lock_open_count) {
519+ spin_lock(&lvm_lock);
520+ if(lock == current->pid) {
521+ if(!_lock_open_count) {
522 P_DEV("chr_close: unlocking LVM for pid %d\n", lock);
523- lock = 0;
524- wake_up_interruptible(&lvm_wait);
525- } else
526- _lock_open_count--;
527+ lock = 0;
528+ wake_up_interruptible(&lvm_wait);
529+ } else
530+ _lock_open_count--;
531 }
532- spin_unlock(&lvm_lock);
533+ spin_unlock(&lvm_lock);
534
535 MOD_DEC_USE_COUNT;
536
537@@ -860,7 +906,7 @@
538 switch (command) {
539 case BLKSSZGET:
540 /* get block device sector size as needed e.g. by fdisk */
541- return put_user(get_hardsect_size(inode->i_rdev), (int *) arg);
542+ return put_user(lvm_sectsize(inode->i_rdev), (int *) arg);
543
544 case BLKGETSIZE:
545 /* return device size */
546@@ -869,11 +915,12 @@
547 return -EFAULT;
548 break;
549
550+#ifdef BLKGETSIZE64
551 case BLKGETSIZE64:
552 if (put_user((u64)lv_ptr->lv_size << 9, (u64 *)arg))
553 return -EFAULT;
554 break;
555-
556+#endif
557
558 case BLKFLSBUF:
559 /* flush buffer cache */
560@@ -897,6 +944,7 @@
561 (long) arg > LVM_MAX_READ_AHEAD)
562 return -EINVAL;
563 lv_ptr->lv_read_ahead = (long) arg;
564+ read_ahead[MAJOR_NR] = lv_ptr->lv_read_ahead;
565 break;
566
567
568@@ -955,12 +1003,13 @@
569 break;
570
571 case LV_BMAP:
572- /* turn logical block into (dev_t, block). non privileged. */
573- /* don't bmap a snapshot, since the mapping can change */
574- if(lv_ptr->lv_access & LV_SNAPSHOT)
575+ /* turn logical block into (dev_t, block). non privileged. */
576+ /* don't bmap a snapshot, since the mapping can change */
577+ if (lv_ptr->lv_access & LV_SNAPSHOT)
578 return -EPERM;
579
580 return lvm_user_bmap(inode, (struct lv_bmap *) arg);
581+ break;
582
583 case LV_SET_ALLOCATION:
584 /* set allocation flags of a logical volume */
585@@ -1048,7 +1097,7 @@
586 bh.b_blocknr = block;
587 bh.b_dev = bh.b_rdev = inode->i_rdev;
588 bh.b_size = lvm_get_blksize(bh.b_dev);
589- bh.b_rsector = block * (bh.b_size >> 9);
590+ bh.b_rsector = block * (bh.b_size >> 9);
591 if ((err=lvm_map(&bh, READ)) < 0) {
592 printk("lvm map failed: %d\n", err);
593 return -EINVAL;
594@@ -1056,7 +1105,7 @@
595
596 return put_user(kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) ||
597 put_user(bh.b_rsector/(bh.b_size>>9), &user_result->lv_block) ?
598- -EFAULT : 0;
599+ -EFAULT : 0;
600 }
601
602
603@@ -1065,7 +1114,7 @@
604 * (see init_module/lvm_init)
605 */
606 static void __remap_snapshot(kdev_t rdev, ulong rsector,
607- ulong pe_start, lv_t *lv, vg_t *vg) {
608+ ulong pe_start, lv_t *lv, vg_t *vg) {
609
610 /* copy a chunk from the origin to a snapshot device */
611 down_write(&lv->lv_lock);
612@@ -1122,6 +1171,7 @@
613 return 0;
614 }
615
616+
617 static int lvm_map(struct buffer_head *bh, int rw)
618 {
619 int minor = MINOR(bh->b_rdev);
620@@ -1223,10 +1273,8 @@
621 goto out;
622
623 if (lv->lv_access & LV_SNAPSHOT) { /* remap snapshot */
624- if (lv->lv_block_exception)
625- lvm_snapshot_remap_block(&rdev_map, &rsector_map,
626- pe_start, lv);
627- else
628+ if (lvm_snapshot_remap_block(&rdev_map, &rsector_map,
629+ pe_start, lv) < 0)
630 goto bad;
631
632 } else if (rw == WRITE || rw == WRITEA) { /* snapshot origin */
633@@ -1245,7 +1293,7 @@
634 _remap_snapshot(rdev_map, rsector_map,
635 pe_start, snap, vg_this);
636 }
637- }
638+ }
639
640 out:
641 bh->b_rdev = rdev_map;
642@@ -1284,12 +1332,15 @@
643 #endif
644
645
646+
647+
648 /*
649 * make request function
650 */
651 static int lvm_make_request_fn(request_queue_t *q,
652 int rw,
653- struct buffer_head *bh) {
654+ struct buffer_head *bh)
655+{
656 return (lvm_map(bh, rw) <= 0) ? 0 : 1;
657 }
658
659@@ -1457,14 +1508,14 @@
660 return -EFAULT;
661 }
662
663- /* VG_CREATE now uses minor number in VG structure */
664- if (minor == -1) minor = vg_ptr->vg_number;
665+ /* VG_CREATE now uses minor number in VG structure */
666+ if (minor == -1) minor = vg_ptr->vg_number;
667
668 /* Validate it */
669- if (vg[VG_CHR(minor)] != NULL) {
670+ if (vg[VG_CHR(minor)] != NULL) {
671 P_IOCTL("lvm_do_vg_create ERROR: VG %d in use\n", minor);
672 kfree(vg_ptr);
673- return -EPERM;
674+ return -EPERM;
675 }
676
677 /* we are not that active so far... */
678@@ -1637,7 +1688,8 @@
679 lv_t *lv_ptr = NULL;
680 pv_t *pv_ptr = NULL;
681
682- if (vg_ptr == NULL) return -ENXIO;
683+ /* If the VG doesn't exist in the kernel then just exit */
684+ if (!vg_ptr) return 0;
685
686 if (copy_from_user(vg_name, arg, sizeof(vg_name)) != 0)
687 return -EFAULT;
688@@ -1797,30 +1849,56 @@
689 }
690
691
692-static void __update_hardsectsize(lv_t *lv) {
693- int le, e;
694- int max_hardsectsize = 0, hardsectsize;
695-
696- for (le = 0; le < lv->lv_allocated_le; le++) {
697- hardsectsize = get_hardsect_size(lv->lv_current_pe[le].dev);
698- if (hardsectsize == 0)
699- hardsectsize = 512;
700- if (hardsectsize > max_hardsectsize)
701- max_hardsectsize = hardsectsize;
702- }
703-
704- /* only perform this operation on active snapshots */
705- if ((lv->lv_access & LV_SNAPSHOT) &&
706- (lv->lv_status & LV_ACTIVE)) {
707- for (e = 0; e < lv->lv_remap_end; e++) {
708- hardsectsize = get_hardsect_size( lv->lv_block_exception[e].rdev_new);
709- if (hardsectsize == 0)
710- hardsectsize = 512;
711- if (hardsectsize > max_hardsectsize)
712+static void __update_hardsectsize(lv_t *lv)
713+{
714+ int max_hardsectsize = 0, hardsectsize = 0;
715+ int p;
716+
717+ /* Check PVs first to see if they all have same sector size */
718+ for (p = 0; p < lv->vg->pv_cur; p++) {
719+ pv_t *pv = lv->vg->pv[p];
720+ if (pv && (hardsectsize = lvm_sectsize(pv->pv_dev))) {
721+ if (max_hardsectsize == 0)
722 max_hardsectsize = hardsectsize;
723+ else if (hardsectsize != max_hardsectsize) {
724+ P_DEV("%s PV[%d] (%s) sector size %d, not %d\n",
725+ lv->lv_name, p, kdevname(pv->pv_dev),
726+ hardsectsize, max_hardsectsize);
727+ break;
728+ }
729 }
730 }
731
732+ /* PVs have different block size, need to check each LE sector size */
733+ if (hardsectsize != max_hardsectsize) {
734+ int le;
735+ for (le = 0; le < lv->lv_allocated_le; le++) {
736+ hardsectsize = lvm_sectsize(lv->lv_current_pe[le].dev);
737+ if (hardsectsize > max_hardsectsize) {
738+ P_DEV("%s LE[%d] (%s) blocksize %d not %d\n",
739+ lv->lv_name, le,
740+ kdevname(lv->lv_current_pe[le].dev),
741+ hardsectsize, max_hardsectsize);
742+ max_hardsectsize = hardsectsize;
743+ }
744+ }
745+
746+ /* only perform this operation on active snapshots */
747+ if ((lv->lv_access & LV_SNAPSHOT) &&
748+ (lv->lv_status & LV_ACTIVE)) {
749+ int e;
750+ for (e = 0; e < lv->lv_remap_end; e++) {
751+ hardsectsize = lvm_sectsize(lv->lv_block_exception[e].rdev_new);
752+ if (hardsectsize > max_hardsectsize)
753+ max_hardsectsize = hardsectsize;
754+ }
755+ }
756+ }
757+
758+ if (max_hardsectsize == 0)
759+ max_hardsectsize = SECTOR_SIZE;
760+ P_DEV("hardblocksize for LV %s is %d\n",
761+ kdevname(lv->lv_dev), max_hardsectsize);
762 lvm_hardsectsizes[MINOR(lv->lv_dev)] = max_hardsectsize;
763 }
764
765@@ -1876,7 +1954,7 @@
766 lv_ptr->lv_snapshot_next = NULL;
767 lv_ptr->lv_block_exception = NULL;
768 lv_ptr->lv_iobuf = NULL;
769- lv_ptr->lv_COW_table_iobuf = NULL;
770+ lv_ptr->lv_COW_table_iobuf = NULL;
771 lv_ptr->lv_snapshot_hash_table = NULL;
772 lv_ptr->lv_snapshot_hash_table_size = 0;
773 lv_ptr->lv_snapshot_hash_mask = 0;
774@@ -1926,7 +2004,7 @@
775 if (lv_ptr->lv_snapshot_org != NULL) {
776 size = lv_ptr->lv_remap_end * sizeof(lv_block_exception_t);
777
778- if(!size) {
779+ if (!size) {
780 printk(KERN_WARNING
781 "%s -- zero length exception table requested\n",
782 lvm_name);
783@@ -1956,12 +2034,11 @@
784 LVM_SNAPSHOT_DROPPED_SECTOR)
785 {
786 printk(KERN_WARNING
787- "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n",
788+ "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n",
789 lvm_name);
790 activate = 0;
791 }
792
793-
794 /* point to the original logical volume */
795 lv_ptr = lv_ptr->lv_snapshot_org;
796
797@@ -1995,11 +2072,11 @@
798 lv_ptr->lv_block_exception[e].rsector_org, lv_ptr);
799 /* need to fill the COW exception table data
800 into the page for disk i/o */
801- if(lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr)) {
802- kfree(lv_ptr);
803- vg_ptr->lv[l] = NULL;
804- return -EINVAL;
805- }
806+ if(lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr)) {
807+ kfree(lv_ptr);
808+ vg_ptr->lv[l] = NULL;
809+ return -EINVAL;
810+ }
811 init_waitqueue_head(&lv_ptr->lv_snapshot_wait);
812 } else {
813 kfree(lv_ptr);
814@@ -2022,6 +2099,7 @@
815 LVM_CORRECT_READ_AHEAD(lv_ptr->lv_read_ahead);
816 vg_ptr->lv_cur++;
817 lv_ptr->lv_status = lv_status_save;
818+ lv_ptr->vg = vg_ptr;
819
820 __update_hardsectsize(lv_ptr);
821
822@@ -2040,6 +2118,7 @@
823 org->lv_access |= LV_SNAPSHOT_ORG;
824 lv_ptr->lv_access &= ~LV_SNAPSHOT_ORG; /* this can only hide an userspace bug */
825
826+
827 /* Link in the list of snapshot volumes */
828 for (last = org; last->lv_snapshot_next; last = last->lv_snapshot_next);
829 lv_ptr->lv_snapshot_prev = last;
830@@ -2064,11 +2143,8 @@
831 unlockfs(lv_ptr->lv_snapshot_org->lv_dev);
832 #endif
833
834- lv_ptr->vg = vg_ptr;
835-
836 lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de =
837- lvm_fs_create_lv(vg_ptr, lv_ptr);
838-
839+ lvm_fs_create_lv(vg_ptr, lv_ptr);
840 return 0;
841 } /* lvm_do_lv_create() */
842
843@@ -2184,214 +2260,213 @@
844 * logical volume extend / reduce
845 */
846 static int __extend_reduce_snapshot(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) {
847- ulong size;
848- lv_block_exception_t *lvbe;
849+ ulong size;
850+ lv_block_exception_t *lvbe;
851
852- if (!new_lv->lv_block_exception)
853- return -ENXIO;
854+ if (!new_lv->lv_block_exception)
855+ return -ENXIO;
856+
857+ size = new_lv->lv_remap_end * sizeof(lv_block_exception_t);
858+ if ((lvbe = vmalloc(size)) == NULL) {
859+ printk(KERN_CRIT
860+ "%s -- lvm_do_lv_extend_reduce: vmalloc "
861+ "error LV_BLOCK_EXCEPTION of %lu Byte at line %d\n",
862+ lvm_name, size, __LINE__);
863+ return -ENOMEM;
864+ }
865+
866+ if ((new_lv->lv_remap_end > old_lv->lv_remap_end) &&
867+ (copy_from_user(lvbe, new_lv->lv_block_exception, size))) {
868+ vfree(lvbe);
869+ return -EFAULT;
870+ }
871+ new_lv->lv_block_exception = lvbe;
872
873- size = new_lv->lv_remap_end * sizeof(lv_block_exception_t);
874- if ((lvbe = vmalloc(size)) == NULL) {
875- printk(KERN_CRIT
876- "%s -- lvm_do_lv_extend_reduce: vmalloc "
877- "error LV_BLOCK_EXCEPTION of %lu Byte at line %d\n",
878- lvm_name, size, __LINE__);
879- return -ENOMEM;
880- }
881-
882- if ((new_lv->lv_remap_end > old_lv->lv_remap_end) &&
883- (copy_from_user(lvbe, new_lv->lv_block_exception, size))) {
884- vfree(lvbe);
885- return -EFAULT;
886- }
887- new_lv->lv_block_exception = lvbe;
888-
889- if (lvm_snapshot_alloc_hash_table(new_lv)) {
890- vfree(new_lv->lv_block_exception);
891- return -ENOMEM;
892- }
893+ if (lvm_snapshot_alloc_hash_table(new_lv)) {
894+ vfree(new_lv->lv_block_exception);
895+ return -ENOMEM;
896+ }
897
898- return 0;
899+ return 0;
900 }
901
902 static int __extend_reduce(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) {
903- ulong size, l, p, end;
904- pe_t *pe;
905+ ulong size, l, p, end;
906+ pe_t *pe;
907+
908+ /* allocate space for new pe structures */
909+ size = new_lv->lv_current_le * sizeof(pe_t);
910+ if ((pe = vmalloc(size)) == NULL) {
911+ printk(KERN_CRIT
912+ "%s -- lvm_do_lv_extend_reduce: "
913+ "vmalloc error LV_CURRENT_PE of %lu Byte at line %d\n",
914+ lvm_name, size, __LINE__);
915+ return -ENOMEM;
916+ }
917+
918+ /* get the PE structures from user space */
919+ if (copy_from_user(pe, new_lv->lv_current_pe, size)) {
920+ if(old_lv->lv_access & LV_SNAPSHOT)
921+ vfree(new_lv->lv_snapshot_hash_table);
922+ vfree(pe);
923+ return -EFAULT;
924+ }
925+
926+ new_lv->lv_current_pe = pe;
927+
928+ /* reduce allocation counters on PV(s) */
929+ for (l = 0; l < old_lv->lv_allocated_le; l++) {
930+ vg_ptr->pe_allocated--;
931+ for (p = 0; p < vg_ptr->pv_cur; p++) {
932+ if (vg_ptr->pv[p]->pv_dev ==
933+ old_lv->lv_current_pe[l].dev) {
934+ vg_ptr->pv[p]->pe_allocated--;
935+ break;
936+ }
937+ }
938+ }
939
940- /* allocate space for new pe structures */
941- size = new_lv->lv_current_le * sizeof(pe_t);
942- if ((pe = vmalloc(size)) == NULL) {
943- printk(KERN_CRIT
944- "%s -- lvm_do_lv_extend_reduce: "
945- "vmalloc error LV_CURRENT_PE of %lu Byte at line %d\n",
946- lvm_name, size, __LINE__);
947- return -ENOMEM;
948- }
949-
950- /* get the PE structures from user space */
951- if (copy_from_user(pe, new_lv->lv_current_pe, size)) {
952- if(old_lv->lv_access & LV_SNAPSHOT)
953- vfree(new_lv->lv_snapshot_hash_table);
954- vfree(pe);
955- return -EFAULT;
956- }
957-
958- new_lv->lv_current_pe = pe;
959-
960- /* reduce allocation counters on PV(s) */
961- for (l = 0; l < old_lv->lv_allocated_le; l++) {
962- vg_ptr->pe_allocated--;
963- for (p = 0; p < vg_ptr->pv_cur; p++) {
964- if (vg_ptr->pv[p]->pv_dev ==
965- old_lv->lv_current_pe[l].dev) {
966- vg_ptr->pv[p]->pe_allocated--;
967- break;
968- }
969- }
970- }
971-
972- /* extend the PE count in PVs */
973- for (l = 0; l < new_lv->lv_allocated_le; l++) {
974- vg_ptr->pe_allocated++;
975- for (p = 0; p < vg_ptr->pv_cur; p++) {
976- if (vg_ptr->pv[p]->pv_dev ==
977+ /* extend the PE count in PVs */
978+ for (l = 0; l < new_lv->lv_allocated_le; l++) {
979+ vg_ptr->pe_allocated++;
980+ for (p = 0; p < vg_ptr->pv_cur; p++) {
981+ if (vg_ptr->pv[p]->pv_dev ==
982 new_lv->lv_current_pe[l].dev) {
983- vg_ptr->pv[p]->pe_allocated++;
984- break;
985- }
986- }
987- }
988-
989- /* save availiable i/o statistic data */
990- if (old_lv->lv_stripes < 2) { /* linear logical volume */
991- end = min(old_lv->lv_current_le, new_lv->lv_current_le);
992- for (l = 0; l < end; l++) {
993- new_lv->lv_current_pe[l].reads +=
994- old_lv->lv_current_pe[l].reads;
995-
996- new_lv->lv_current_pe[l].writes +=
997- old_lv->lv_current_pe[l].writes;
998- }
999-
1000- } else { /* striped logical volume */
1001- uint i, j, source, dest, end, old_stripe_size, new_stripe_size;
1002-
1003- old_stripe_size = old_lv->lv_allocated_le / old_lv->lv_stripes;
1004- new_stripe_size = new_lv->lv_allocated_le / new_lv->lv_stripes;
1005- end = min(old_stripe_size, new_stripe_size);
1006-
1007- for (i = source = dest = 0;
1008- i < new_lv->lv_stripes; i++) {
1009- for (j = 0; j < end; j++) {
1010- new_lv->lv_current_pe[dest + j].reads +=
1011- old_lv->lv_current_pe[source + j].reads;
1012- new_lv->lv_current_pe[dest + j].writes +=
1013- old_lv->lv_current_pe[source + j].writes;
1014- }
1015- source += old_stripe_size;
1016- dest += new_stripe_size;
1017- }
1018- }
1019+ vg_ptr->pv[p]->pe_allocated++;
1020+ break;
1021+ }
1022+ }
1023+ }
1024
1025- return 0;
1026+ /* save availiable i/o statistic data */
1027+ if (old_lv->lv_stripes < 2) { /* linear logical volume */
1028+ end = min(old_lv->lv_current_le, new_lv->lv_current_le);
1029+ for (l = 0; l < end; l++) {
1030+ new_lv->lv_current_pe[l].reads +=
1031+ old_lv->lv_current_pe[l].reads;
1032+
1033+ new_lv->lv_current_pe[l].writes +=
1034+ old_lv->lv_current_pe[l].writes;
1035+ }
1036+
1037+ } else { /* striped logical volume */
1038+ uint i, j, source, dest, end, old_stripe_size, new_stripe_size;
1039+
1040+ old_stripe_size = old_lv->lv_allocated_le / old_lv->lv_stripes;
1041+ new_stripe_size = new_lv->lv_allocated_le / new_lv->lv_stripes;
1042+ end = min(old_stripe_size, new_stripe_size);
1043+
1044+ for (i = source = dest = 0; i < new_lv->lv_stripes; i++) {
1045+ for (j = 0; j < end; j++) {
1046+ new_lv->lv_current_pe[dest + j].reads +=
1047+ old_lv->lv_current_pe[source + j].reads;
1048+ new_lv->lv_current_pe[dest + j].writes +=
1049+ old_lv->lv_current_pe[source + j].writes;
1050+ }
1051+ source += old_stripe_size;
1052+ dest += new_stripe_size;
1053+ }
1054+ }
1055+
1056+ return 0;
1057 }
1058
1059 static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t *new_lv)
1060 {
1061- int r;
1062- ulong l, e, size;
1063- vg_t *vg_ptr = vg[VG_CHR(minor)];
1064- lv_t *old_lv;
1065- pe_t *pe;
1066-
1067- if ((pe = new_lv->lv_current_pe) == NULL)
1068- return -EINVAL;
1069-
1070- for (l = 0; l < vg_ptr->lv_max; l++)
1071- if (vg_ptr->lv[l] && !strcmp(vg_ptr->lv[l]->lv_name, lv_name))
1072- break;
1073+ int r;
1074+ ulong l, e, size;
1075+ vg_t *vg_ptr = vg[VG_CHR(minor)];
1076+ lv_t *old_lv;
1077+ pe_t *pe;
1078
1079- if (l == vg_ptr->lv_max)
1080- return -ENXIO;
1081+ if ((pe = new_lv->lv_current_pe) == NULL)
1082+ return -EINVAL;
1083
1084- old_lv = vg_ptr->lv[l];
1085+ for (l = 0; l < vg_ptr->lv_max; l++)
1086+ if (vg_ptr->lv[l] && !strcmp(vg_ptr->lv[l]->lv_name, lv_name))
1087+ break;
1088+
1089+ if (l == vg_ptr->lv_max)
1090+ return -ENXIO;
1091+
1092+ old_lv = vg_ptr->lv[l];
1093
1094 if (old_lv->lv_access & LV_SNAPSHOT) {
1095 /* only perform this operation on active snapshots */
1096 if (old_lv->lv_status & LV_ACTIVE)
1097- r = __extend_reduce_snapshot(vg_ptr, old_lv, new_lv);
1098- else
1099+ r = __extend_reduce_snapshot(vg_ptr, old_lv, new_lv);
1100+ else
1101 r = -EPERM;
1102
1103 } else
1104- r = __extend_reduce(vg_ptr, old_lv, new_lv);
1105+ r = __extend_reduce(vg_ptr, old_lv, new_lv);
1106
1107- if(r)
1108- return r;
1109+ if(r)
1110+ return r;
1111
1112- /* copy relevent fields */
1113+ /* copy relevent fields */
1114 down_write(&old_lv->lv_lock);
1115
1116- if(new_lv->lv_access & LV_SNAPSHOT) {
1117- size = (new_lv->lv_remap_end > old_lv->lv_remap_end) ?
1118- old_lv->lv_remap_ptr : new_lv->lv_remap_end;
1119- size *= sizeof(lv_block_exception_t);
1120- memcpy(new_lv->lv_block_exception,
1121- old_lv->lv_block_exception, size);
1122-
1123- old_lv->lv_remap_end = new_lv->lv_remap_end;
1124- old_lv->lv_block_exception = new_lv->lv_block_exception;
1125- old_lv->lv_snapshot_hash_table =
1126- new_lv->lv_snapshot_hash_table;
1127- old_lv->lv_snapshot_hash_table_size =
1128- new_lv->lv_snapshot_hash_table_size;
1129- old_lv->lv_snapshot_hash_mask =
1130- new_lv->lv_snapshot_hash_mask;
1131-
1132- for (e = 0; e < new_lv->lv_remap_ptr; e++)
1133- lvm_hash_link(new_lv->lv_block_exception + e,
1134- new_lv->lv_block_exception[e].rdev_org,
1135- new_lv->lv_block_exception[e].rsector_org,
1136- new_lv);
1137-
1138- } else {
1139-
1140- vfree(old_lv->lv_current_pe);
1141- vfree(old_lv->lv_snapshot_hash_table);
1142-
1143- old_lv->lv_size = new_lv->lv_size;
1144- old_lv->lv_allocated_le = new_lv->lv_allocated_le;
1145- old_lv->lv_current_le = new_lv->lv_current_le;
1146- old_lv->lv_current_pe = new_lv->lv_current_pe;
1147- lvm_gendisk.part[MINOR(old_lv->lv_dev)].nr_sects =
1148- old_lv->lv_size;
1149- lvm_size[MINOR(old_lv->lv_dev)] = old_lv->lv_size >> 1;
1150-
1151- if (old_lv->lv_access & LV_SNAPSHOT_ORG) {
1152- lv_t *snap;
1153- for(snap = old_lv->lv_snapshot_next; snap;
1154- snap = snap->lv_snapshot_next) {
1155+ if(new_lv->lv_access & LV_SNAPSHOT) {
1156+ size = (new_lv->lv_remap_end > old_lv->lv_remap_end) ?
1157+ old_lv->lv_remap_ptr : new_lv->lv_remap_end;
1158+ size *= sizeof(lv_block_exception_t);
1159+ memcpy(new_lv->lv_block_exception,
1160+ old_lv->lv_block_exception, size);
1161+
1162+ old_lv->lv_remap_end = new_lv->lv_remap_end;
1163+ old_lv->lv_block_exception = new_lv->lv_block_exception;
1164+ old_lv->lv_snapshot_hash_table =
1165+ new_lv->lv_snapshot_hash_table;
1166+ old_lv->lv_snapshot_hash_table_size =
1167+ new_lv->lv_snapshot_hash_table_size;
1168+ old_lv->lv_snapshot_hash_mask =
1169+ new_lv->lv_snapshot_hash_mask;
1170+
1171+ for (e = 0; e < new_lv->lv_remap_ptr; e++)
1172+ lvm_hash_link(new_lv->lv_block_exception + e,
1173+ new_lv->lv_block_exception[e].rdev_org,
1174+ new_lv->lv_block_exception[e].rsector_org,
1175+ new_lv);
1176+
1177+ } else {
1178+
1179+ vfree(old_lv->lv_current_pe);
1180+ vfree(old_lv->lv_snapshot_hash_table);
1181+
1182+ old_lv->lv_size = new_lv->lv_size;
1183+ old_lv->lv_allocated_le = new_lv->lv_allocated_le;
1184+ old_lv->lv_current_le = new_lv->lv_current_le;
1185+ old_lv->lv_current_pe = new_lv->lv_current_pe;
1186+ lvm_gendisk.part[MINOR(old_lv->lv_dev)].nr_sects =
1187+ old_lv->lv_size;
1188+ lvm_size[MINOR(old_lv->lv_dev)] = old_lv->lv_size >> 1;
1189+
1190+ if (old_lv->lv_access & LV_SNAPSHOT_ORG) {
1191+ lv_t *snap;
1192+ for(snap = old_lv->lv_snapshot_next; snap;
1193+ snap = snap->lv_snapshot_next) {
1194 down_write(&snap->lv_lock);
1195- snap->lv_current_pe = old_lv->lv_current_pe;
1196- snap->lv_allocated_le =
1197- old_lv->lv_allocated_le;
1198- snap->lv_current_le = old_lv->lv_current_le;
1199- snap->lv_size = old_lv->lv_size;
1200-
1201- lvm_gendisk.part[MINOR(snap->lv_dev)].nr_sects
1202- = old_lv->lv_size;
1203- lvm_size[MINOR(snap->lv_dev)] =
1204- old_lv->lv_size >> 1;
1205- __update_hardsectsize(snap);
1206+ snap->lv_current_pe = old_lv->lv_current_pe;
1207+ snap->lv_allocated_le =
1208+ old_lv->lv_allocated_le;
1209+ snap->lv_current_le = old_lv->lv_current_le;
1210+ snap->lv_size = old_lv->lv_size;
1211+
1212+ lvm_gendisk.part[MINOR(snap->lv_dev)].nr_sects
1213+ = old_lv->lv_size;
1214+ lvm_size[MINOR(snap->lv_dev)] =
1215+ old_lv->lv_size >> 1;
1216+ __update_hardsectsize(snap);
1217 up_write(&snap->lv_lock);
1218- }
1219- }
1220- }
1221+ }
1222+ }
1223+ }
1224
1225- __update_hardsectsize(old_lv);
1226+ __update_hardsectsize(old_lv);
1227 up_write(&old_lv->lv_lock);
1228
1229- return 0;
1230+ return 0;
1231 } /* lvm_do_lv_extend_reduce() */
1232
1233
1234@@ -2426,7 +2501,6 @@
1235 lv_ptr,
1236 sizeof(lv_t)) != 0)
1237 return -EFAULT;
1238-
1239 if (saved_ptr1 != NULL) {
1240 if (copy_to_user(saved_ptr1,
1241 lv_ptr->lv_current_pe,
1242@@ -2461,9 +2535,6 @@
1243
1244 if (lv_status_byindex_req.lv == NULL)
1245 return -EINVAL;
1246- if (lv_status_byindex_req.lv_index <0 ||
1247- lv_status_byindex_req.lv_index >= MAX_LV)
1248- return -EINVAL;
1249 if ( ( lv_ptr = vg_ptr->lv[lv_status_byindex_req.lv_index]) == NULL)
1250 return -ENXIO;
1251
1252@@ -2552,9 +2623,7 @@
1253 if (lv_ptr->lv_dev == lv->lv_dev)
1254 {
1255 lvm_fs_remove_lv(vg_ptr, lv_ptr);
1256- strncpy(lv_ptr->lv_name,
1257- lv_req->lv_name,
1258- NAME_LEN);
1259+ strncpy(lv_ptr->lv_name, lv_req->lv_name, NAME_LEN);
1260 lvm_fs_create_lv(vg_ptr, lv_ptr);
1261 break;
1262 }
1263@@ -2629,23 +2698,24 @@
1264 return -ENXIO;
1265 } /* lvm_do_pv_status() */
1266
1267+
1268 /*
1269 * character device support function flush and invalidate all buffers of a PV
1270 */
1271 static int lvm_do_pv_flush(void *arg)
1272 {
1273- pv_flush_req_t pv_flush_req;
1274+ pv_flush_req_t pv_flush_req;
1275
1276- if (copy_from_user(&pv_flush_req, arg,
1277- sizeof(pv_flush_req)) != 0)
1278- return -EFAULT;
1279+ if (copy_from_user(&pv_flush_req, arg, sizeof(pv_flush_req)) != 0)
1280+ return -EFAULT;
1281
1282- fsync_dev(pv_flush_req.pv_dev);
1283- invalidate_buffers(pv_flush_req.pv_dev);
1284+ fsync_dev(pv_flush_req.pv_dev);
1285+ invalidate_buffers(pv_flush_req.pv_dev);
1286
1287- return 0;
1288+ return 0;
1289 }
1290
1291+
1292 /*
1293 * support function initialize gendisk variables
1294 */
1295@@ -2708,6 +2778,7 @@
1296 }
1297 }
1298
1299+
1300 /*
1301 * we must open the pv's before we use them
1302 */
1303@@ -2719,22 +2790,25 @@
1304 return -ENOMEM;
1305
1306 err = blkdev_get(bd, FMODE_READ|FMODE_WRITE, 0, BDEV_FILE);
1307- if (err)
1308+ if (err) {
1309+ bdput(bd);
1310 return err;
1311+ }
1312
1313 pv->bd = bd;
1314 return 0;
1315 }
1316
1317 static void _close_pv(pv_t *pv) {
1318- if (pv) {
1319- struct block_device *bdev = pv->bd;
1320- pv->bd = NULL;
1321- if (bdev)
1322- blkdev_put(bdev, BDEV_FILE);
1323- }
1324+ if(!pv || !pv->bd)
1325+ return;
1326+
1327+ blkdev_put(pv->bd, BDEV_FILE);
1328+ bdput(pv->bd);
1329+ pv->bd = 0;
1330 }
1331
1332+
1333 static unsigned long _sectors_to_k(unsigned long sect)
1334 {
1335 if(SECTOR_SIZE > 1024) {
1336@@ -2744,6 +2818,11 @@
1337 return sect / (1024 / SECTOR_SIZE);
1338 }
1339
1340+MODULE_AUTHOR("Heinz Mauelshagen, Sistina Software");
1341+MODULE_DESCRIPTION("Logical Volume Manager");
1342+#ifdef MODULE_LICENSE
1343+MODULE_LICENSE("GPL");
1344+#endif
1345+
1346 module_init(lvm_init);
1347 module_exit(lvm_cleanup);
1348-MODULE_LICENSE("GPL");
1349--- linux/drivers/md/lvm-internal.h.orig Sun Nov 11 18:09:32 2001
1350+++ linux/drivers/md/lvm-internal.h Thu Jan 10 12:24:08 2002
1351@@ -1,5 +1,6 @@
1352+
1353 /*
1354- * kernel/lvm-internal.h
1355+ * kernel/lvm_internal.h
1356 *
1357 * Copyright (C) 2001 Sistina Software
1358 *
1359@@ -24,7 +25,9 @@
1360 /*
1361 * Changelog
1362 *
1363- * 05/01/2001:Joe Thornber - Factored this file out of lvm.c
1364+ * 05/01/2001 - Factored this file out of lvm.c (Joe Thornber)
1365+ * 11/01/2001 - Renamed lvm_internal and added declarations
1366+ * for lvm_fs.c stuff
1367 *
1368 */
1369
1370@@ -33,7 +36,7 @@
1371
1372 #include <linux/lvm.h>
1373
1374-#define _LVM_INTERNAL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
1375+#define _LVM_INTERNAL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")"
1376
1377 /* global variables, defined in lvm.c */
1378 extern char *lvm_version;
1379@@ -42,11 +45,15 @@
1380 extern const char *const lvm_name;
1381
1382
1383+extern uint vg_count;
1384 extern vg_t *vg[];
1385 extern struct file_operations lvm_chr_fops;
1386
1387 extern struct block_device_operations lvm_blk_dops;
1388
1389+#define lvm_sectsize(dev) get_hardsect_size(dev)
1390+
1391+/* 2.4.8 had no global min/max macros, and 2.4.9's were flawed */
1392
1393 /* debug macros */
1394 #ifdef DEBUG_IOCTL
1395--- linux/drivers/md/lvm-snap.c.orig Fri Dec 21 17:41:54 2001
1396+++ linux/drivers/md/lvm-snap.c Thu Jan 10 12:24:08 2002
1397@@ -2,22 +2,22 @@
1398 * kernel/lvm-snap.c
1399 *
1400 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
1401- * Heinz Mauelshagen, Sistina Software (persistent snapshots)
1402+ * 2000 - 2001 Heinz Mauelshagen, Sistina Software
1403 *
1404 * LVM snapshot driver is free software; you can redistribute it and/or modify
1405 * it under the terms of the GNU General Public License as published by
1406 * the Free Software Foundation; either version 2, or (at your option)
1407 * any later version.
1408- *
1409+ *
1410 * LVM snapshot driver is distributed in the hope that it will be useful,
1411 * but WITHOUT ANY WARRANTY; without even the implied warranty of
1412 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1413 * GNU General Public License for more details.
1414- *
1415+ *
1416 * You should have received a copy of the GNU General Public License
1417 * along with GNU CC; see the file COPYING. If not, write to
1418 * the Free Software Foundation, 59 Temple Place - Suite 330,
1419- * Boston, MA 02111-1307, USA.
1420+ * Boston, MA 02111-1307, USA.
1421 *
1422 */
1423
1424@@ -28,52 +28,66 @@
1425 * 23/11/2000 - used cpu_to_le64 rather than my own macro
1426 * 25/01/2001 - Put LockPage back in
1427 * 01/02/2001 - A dropped snapshot is now set as inactive
1428+ * 14/02/2001 - tidied debug statements
1429+ * 19/02/2001 - changed rawio calls to pass in preallocated buffer_heads
1430+ * 26/02/2001 - introduced __brw_kiovec to remove a lot of conditional
1431+ * compiles.
1432+ * 07/03/2001 - fixed COW exception table not persistent on 2.2 (HM)
1433 * 12/03/2001 - lvm_pv_get_number changes:
1434 * o made it static
1435 * o renamed it to _pv_get_number
1436 * o pv number is returned in new uint * arg
1437 * o -1 returned on error
1438 * lvm_snapshot_fill_COW_table has a return value too.
1439+ * 15/10/2001 - fix snapshot alignment problem [CM]
1440+ * - fix snapshot full oops (always check lv_block_exception) [CM]
1441 *
1442 */
1443
1444 #include <linux/kernel.h>
1445-#include <linux/module.h>
1446 #include <linux/vmalloc.h>
1447 #include <linux/blkdev.h>
1448 #include <linux/smp_lock.h>
1449 #include <linux/types.h>
1450 #include <linux/iobuf.h>
1451 #include <linux/lvm.h>
1452+#include <linux/devfs_fs_kernel.h>
1453
1454
1455 #include "lvm-internal.h"
1456
1457-static char *lvm_snap_version __attribute__ ((unused)) =
1458- "LVM "LVM_RELEASE_NAME" snapshot code ("LVM_RELEASE_DATE")\n";
1459+static char *lvm_snap_version __attribute__ ((unused)) = "LVM "LVM_RELEASE_NAME" snapshot code ("LVM_RELEASE_DATE")\n";
1460
1461
1462 extern const char *const lvm_name;
1463 extern int lvm_blocksizes[];
1464
1465 void lvm_snapshot_release(lv_t *);
1466+
1467 static int _write_COW_table_block(vg_t *vg, lv_t *lv, int idx,
1468- const char **reason);
1469+ const char **reason);
1470 static void _disable_snapshot(vg_t *vg, lv_t *lv);
1471
1472
1473-static int _pv_get_number(vg_t * vg, kdev_t rdev, uint *pvn) {
1474+static inline int __brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
1475+ kdev_t dev, unsigned long b[], int size,
1476+ lv_t *lv) {
1477+ return brw_kiovec(rw, nr, iovec, dev, b, size);
1478+}
1479+
1480+
1481+static int _pv_get_number(vg_t * vg, kdev_t rdev, uint *pvn)
1482+{
1483 uint p;
1484- for(p = 0; p < vg->pv_max; p++) {
1485- if(vg->pv[p] == NULL)
1486+ for (p = 0; p < vg->pv_max; p++) {
1487+ if (vg->pv[p] == NULL)
1488 continue;
1489
1490- if(vg->pv[p]->pv_dev == rdev)
1491+ if (vg->pv[p]->pv_dev == rdev)
1492 break;
1493-
1494 }
1495
1496- if(p >= vg->pv_max) {
1497+ if (p >= vg->pv_max) {
1498 /* bad news, the snapshot COW table is probably corrupt */
1499 printk(KERN_ERR
1500 "%s -- _pv_get_number failed for rdev = %u\n",
1501@@ -85,6 +99,7 @@
1502 return 0;
1503 }
1504
1505+
1506 #define hashfn(dev,block,mask,chunk_size) \
1507 ((HASHDEV(dev)^((block)/(chunk_size))) & (mask))
1508
1509@@ -129,10 +144,20 @@
1510 unsigned long mask = lv->lv_snapshot_hash_mask;
1511 int chunk_size = lv->lv_chunk_size;
1512
1513+ if (!hash_table)
1514+ BUG();
1515 hash_table = &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
1516 list_add(&exception->hash, hash_table);
1517 }
1518
1519+/*
1520+ * Determine if we already have a snapshot chunk for this block.
1521+ * Return: 1 if it the chunk already exists
1522+ * 0 if we need to COW this block and allocate a new chunk
1523+ * -1 if the snapshot was disabled because it ran out of space
1524+ *
1525+ * We need to be holding at least a read lock on lv->lv_lock.
1526+ */
1527 int lvm_snapshot_remap_block(kdev_t * org_dev, unsigned long * org_sector,
1528 unsigned long pe_start, lv_t * lv)
1529 {
1530@@ -142,6 +167,9 @@
1531 int chunk_size = lv->lv_chunk_size;
1532 lv_block_exception_t * exception;
1533
1534+ if (!lv->lv_block_exception)
1535+ return -1;
1536+
1537 pe_off = pe_start % chunk_size;
1538 pe_adjustment = (*org_sector-pe_off) % chunk_size;
1539 __org_start = *org_sector - pe_adjustment;
1540@@ -166,8 +194,8 @@
1541 or error on this snapshot --> release it */
1542 invalidate_buffers(lv_snap->lv_dev);
1543
1544- /* wipe the snapshot since it's inconsistent now */
1545- _disable_snapshot(vg, lv_snap);
1546+ /* wipe the snapshot since it's inconsistent now */
1547+ _disable_snapshot(vg, lv_snap);
1548
1549 for (i = last_dev = 0; i < lv_snap->lv_remap_ptr; i++) {
1550 if ( lv_snap->lv_block_exception[i].rdev_new != last_dev) {
1551@@ -186,15 +214,15 @@
1552 }
1553
1554 static inline int lvm_snapshot_prepare_blocks(unsigned long *blocks,
1555- unsigned long start,
1556- int nr_sectors,
1557- int blocksize)
1558+ unsigned long start,
1559+ int nr_sectors,
1560+ int blocksize)
1561 {
1562 int i, sectors_per_block, nr_blocks;
1563
1564 sectors_per_block = blocksize / SECTOR_SIZE;
1565
1566- if(start & (sectors_per_block - 1))
1567+ if (start & (sectors_per_block - 1))
1568 return 0;
1569
1570 nr_blocks = nr_sectors / sectors_per_block;
1571@@ -245,49 +273,51 @@
1572
1573 int lvm_snapshot_fill_COW_page(vg_t * vg, lv_t * lv_snap)
1574 {
1575- uint pvn;
1576- int id = 0, is = lv_snap->lv_remap_ptr;
1577- ulong blksize_snap;
1578- lv_COW_table_disk_t * lv_COW_table = (lv_COW_table_disk_t *)
1579- page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1580+ int id = 0, is = lv_snap->lv_remap_ptr;
1581+ ulong blksize_snap;
1582+ lv_COW_table_disk_t * lv_COW_table = (lv_COW_table_disk_t *)
1583+ page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1584
1585- if (is == 0)
1586- return 0;
1587+ if (is == 0)
1588+ return 0;
1589
1590 is--;
1591- blksize_snap =
1592- lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
1593- is -= is % (blksize_snap / sizeof(lv_COW_table_disk_t));
1594+ blksize_snap =
1595+ lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
1596+ is -= is % (blksize_snap / sizeof(lv_COW_table_disk_t));
1597
1598 memset(lv_COW_table, 0, blksize_snap);
1599 for ( ; is < lv_snap->lv_remap_ptr; is++, id++) {
1600 /* store new COW_table entry */
1601- lv_block_exception_t *be = lv_snap->lv_block_exception + is;
1602- if(_pv_get_number(vg, be->rdev_org, &pvn))
1603- goto bad;
1604+ lv_block_exception_t *be = lv_snap->lv_block_exception + is;
1605+ uint pvn;
1606
1607- lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
1608- lv_COW_table[id].pv_org_rsector = cpu_to_le64(be->rsector_org);
1609- if(_pv_get_number(vg, be->rdev_new, &pvn))
1610- goto bad;
1611+ if (_pv_get_number(vg, be->rdev_org, &pvn))
1612+ goto bad;
1613
1614- lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
1615- lv_COW_table[id].pv_snap_rsector =
1616- cpu_to_le64(be->rsector_new);
1617+ lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
1618+ lv_COW_table[id].pv_org_rsector = cpu_to_le64(be->rsector_org);
1619+
1620+ if (_pv_get_number(vg, be->rdev_new, &pvn))
1621+ goto bad;
1622+
1623+ lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
1624+ lv_COW_table[id].pv_snap_rsector = cpu_to_le64(be->rsector_new);
1625 }
1626
1627- return 0;
1628+ return 0;
1629
1630 bad:
1631- printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", lvm_name);
1632- return -1;
1633+ printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", lvm_name);
1634+ return -1;
1635 }
1636
1637
1638 /*
1639 * writes a COW exception table sector to disk (HM)
1640+ *
1641+ * We need to hold a write lock on lv_snap->lv_lock.
1642 */
1643-
1644 int lvm_write_COW_table_block(vg_t * vg, lv_t *lv_snap)
1645 {
1646 int r;
1647@@ -305,6 +335,10 @@
1648 * if there is no exception storage space free any longer --> release snapshot.
1649 *
1650 * this routine gets called for each _first_ write to a physical chunk.
1651+ *
1652+ * We need to hold a write lock on lv_snap->lv_lock. It is assumed that
1653+ * lv->lv_block_exception is non-NULL (checked by lvm_snapshot_remap_block())
1654+ * when this function is called.
1655 */
1656 int lvm_snapshot_COW(kdev_t org_phys_dev,
1657 unsigned long org_phys_sector,
1658@@ -314,8 +348,10 @@
1659 {
1660 const char * reason;
1661 unsigned long org_start, snap_start, snap_phys_dev, virt_start, pe_off;
1662+ unsigned long phys_start;
1663 int idx = lv_snap->lv_remap_ptr, chunk_size = lv_snap->lv_chunk_size;
1664 struct kiobuf * iobuf;
1665+ unsigned long blocks[KIO_MAX_SECTORS];
1666 int blksize_snap, blksize_org, min_blksize, max_blksize;
1667 int max_sectors, nr_sectors;
1668
1669@@ -347,8 +383,8 @@
1670
1671 iobuf = lv_snap->lv_iobuf;
1672
1673- blksize_org = lvm_get_blksize(org_phys_dev);
1674- blksize_snap = lvm_get_blksize(snap_phys_dev);
1675+ blksize_org = lvm_sectsize(org_phys_dev);
1676+ blksize_snap = lvm_sectsize(snap_phys_dev);
1677 max_blksize = max(blksize_org, blksize_snap);
1678 min_blksize = min(blksize_org, blksize_snap);
1679 max_sectors = KIO_MAX_SECTORS * (min_blksize>>9);
1680@@ -356,6 +392,9 @@
1681 if (chunk_size % (max_blksize>>9))
1682 goto fail_blksize;
1683
1684+ /* Don't change org_start, we need it to fill in the exception table */
1685+ phys_start = org_start;
1686+
1687 while (chunk_size)
1688 {
1689 nr_sectors = min(chunk_size, max_sectors);
1690@@ -363,21 +402,24 @@
1691
1692 iobuf->length = nr_sectors << 9;
1693
1694- if(!lvm_snapshot_prepare_blocks(iobuf->blocks, org_start,
1695- nr_sectors, blksize_org))
1696+ if (!lvm_snapshot_prepare_blocks(blocks, phys_start,
1697+ nr_sectors, blksize_org))
1698 goto fail_prepare;
1699
1700- if (brw_kiovec(READ, 1, &iobuf, org_phys_dev,
1701- iobuf->blocks, blksize_org) != (nr_sectors<<9))
1702+ if (__brw_kiovec(READ, 1, &iobuf, org_phys_dev, blocks,
1703+ blksize_org, lv_snap) != (nr_sectors<<9))
1704 goto fail_raw_read;
1705
1706- if(!lvm_snapshot_prepare_blocks(iobuf->blocks, snap_start,
1707- nr_sectors, blksize_snap))
1708+ if (!lvm_snapshot_prepare_blocks(blocks, snap_start,
1709+ nr_sectors, blksize_snap))
1710 goto fail_prepare;
1711
1712- if (brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev,
1713- iobuf->blocks, blksize_snap) != (nr_sectors<<9))
1714+ if (__brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev, blocks,
1715+ blksize_snap, lv_snap) != (nr_sectors<<9))
1716 goto fail_raw_write;
1717+
1718+ phys_start += nr_sectors;
1719+ snap_start += nr_sectors;
1720 }
1721
1722 #ifdef DEBUG_SNAPSHOT
1723@@ -401,24 +443,24 @@
1724 return 0;
1725
1726 /* slow path */
1727- out:
1728+out:
1729 lvm_drop_snapshot(vg, lv_snap, reason);
1730 return 1;
1731
1732- fail_out_of_space:
1733+fail_out_of_space:
1734 reason = "out of space";
1735 goto out;
1736- fail_raw_read:
1737+fail_raw_read:
1738 reason = "read error";
1739 goto out;
1740- fail_raw_write:
1741+fail_raw_write:
1742 reason = "write error";
1743 goto out;
1744- fail_blksize:
1745+fail_blksize:
1746 reason = "blocksize error";
1747 goto out;
1748
1749- fail_prepare:
1750+fail_prepare:
1751 reason = "couldn't prepare kiovec blocks "
1752 "(start probably isn't block aligned)";
1753 goto out;
1754@@ -441,8 +483,7 @@
1755 struct page * page;
1756
1757 page = alloc_page(GFP_KERNEL);
1758- if (!page)
1759- goto out;
1760+ if (!page) goto out;
1761
1762 iobuf->maplist[i] = page;
1763 LockPage(page);
1764@@ -451,7 +492,8 @@
1765 iobuf->offset = 0;
1766
1767 err = 0;
1768- out:
1769+
1770+out:
1771 return err;
1772 }
1773
1774@@ -515,13 +557,12 @@
1775 if (ret) goto out_free_kiovec;
1776
1777 ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_COW_table_iobuf,
1778- PAGE_SIZE/SECTOR_SIZE);
1779+ PAGE_SIZE/SECTOR_SIZE);
1780 if (ret) goto out_free_both_kiovecs;
1781
1782 ret = lvm_snapshot_alloc_hash_table(lv_snap);
1783 if (ret) goto out_free_both_kiovecs;
1784
1785-
1786 out:
1787 return ret;
1788
1789@@ -534,8 +575,7 @@
1790 unmap_kiobuf(lv_snap->lv_iobuf);
1791 free_kiovec(1, &lv_snap->lv_iobuf);
1792 lv_snap->lv_iobuf = NULL;
1793- if (lv_snap->lv_snapshot_hash_table != NULL)
1794- vfree(lv_snap->lv_snapshot_hash_table);
1795+ vfree(lv_snap->lv_snapshot_hash_table);
1796 lv_snap->lv_snapshot_hash_table = NULL;
1797 goto out;
1798 }
1799@@ -562,10 +602,10 @@
1800 }
1801 if (lv->lv_COW_table_iobuf)
1802 {
1803- kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
1804- unmap_kiobuf(lv->lv_COW_table_iobuf);
1805- free_kiovec(1, &lv->lv_COW_table_iobuf);
1806- lv->lv_COW_table_iobuf = NULL;
1807+ kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
1808+ unmap_kiobuf(lv->lv_COW_table_iobuf);
1809+ free_kiovec(1, &lv->lv_COW_table_iobuf);
1810+ lv->lv_COW_table_iobuf = NULL;
1811 }
1812 }
1813
1814@@ -577,11 +617,11 @@
1815 int idx_COW_table;
1816 uint pvn;
1817 ulong snap_pe_start, COW_table_sector_offset,
1818- COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
1819+ COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
1820 ulong blocks[1];
1821 kdev_t snap_phys_dev;
1822 lv_block_exception_t *be;
1823- struct kiobuf * COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
1824+ struct kiobuf *COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
1825 lv_COW_table_disk_t * lv_COW_table =
1826 ( lv_COW_table_disk_t *) page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
1827
1828@@ -592,46 +632,47 @@
1829 snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
1830 snap_pe_start = lv_snap->lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->lv_chunk_size;
1831
1832- blksize_snap = lvm_get_blksize(snap_phys_dev);
1833+ blksize_snap = lvm_sectsize(snap_phys_dev);
1834
1835 COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t);
1836 idx_COW_table = idx % COW_entries_per_pe % COW_entries_per_block;
1837
1838 if ( idx_COW_table == 0) memset(lv_COW_table, 0, blksize_snap);
1839
1840- /* sector offset into the on disk COW table */
1841+ /* sector offset into the on disk COW table */
1842 COW_table_sector_offset = (idx % COW_entries_per_pe) / (SECTOR_SIZE / sizeof(lv_COW_table_disk_t));
1843
1844 /* COW table block to write next */
1845 blocks[0] = (snap_pe_start + COW_table_sector_offset) >> (blksize_snap >> 10);
1846
1847 /* store new COW_table entry */
1848- be = lv_snap->lv_block_exception + idx;
1849- if(_pv_get_number(vg, be->rdev_org, &pvn))
1850- goto fail_pv_get_number;
1851-
1852- lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
1853- lv_COW_table[idx_COW_table].pv_org_rsector =
1854- cpu_to_le64(be->rsector_org);
1855- if(_pv_get_number(vg, snap_phys_dev, &pvn))
1856- goto fail_pv_get_number;
1857-
1858- lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
1859- lv_COW_table[idx_COW_table].pv_snap_rsector =
1860- cpu_to_le64(be->rsector_new);
1861+ be = lv_snap->lv_block_exception + idx;
1862+ if(_pv_get_number(vg, be->rdev_org, &pvn))
1863+ goto fail_pv_get_number;
1864+
1865+ lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
1866+ lv_COW_table[idx_COW_table].pv_org_rsector =
1867+ cpu_to_le64(be->rsector_org);
1868+ if(_pv_get_number(vg, snap_phys_dev, &pvn))
1869+ goto fail_pv_get_number;
1870+
1871+ lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
1872+ lv_COW_table[idx_COW_table].pv_snap_rsector =
1873+ cpu_to_le64(be->rsector_new);
1874
1875 COW_table_iobuf->length = blksize_snap;
1876+ /* COW_table_iobuf->nr_pages = 1; */
1877
1878- if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1879- blocks, blksize_snap) != blksize_snap)
1880+ if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1881+ blocks, blksize_snap, lv_snap) != blksize_snap)
1882 goto fail_raw_write;
1883
1884- /* initialization of next COW exception table block with zeroes */
1885+ /* initialization of next COW exception table block with zeroes */
1886 end_of_table = idx % COW_entries_per_pe == COW_entries_per_pe - 1;
1887 if (idx_COW_table % COW_entries_per_block == COW_entries_per_block - 1 || end_of_table)
1888 {
1889 /* don't go beyond the end */
1890- if (idx + 1 >= lv_snap->lv_remap_end) goto out;
1891+ if (idx + 1 >= lv_snap->lv_remap_end) goto out;
1892
1893 memset(lv_COW_table, 0, blksize_snap);
1894
1895@@ -640,24 +681,24 @@
1896 idx++;
1897 snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
1898 snap_pe_start = lv_snap->lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->lv_chunk_size;
1899- blksize_snap = lvm_get_blksize(snap_phys_dev);
1900+ blksize_snap = lvm_sectsize(snap_phys_dev);
1901 blocks[0] = snap_pe_start >> (blksize_snap >> 10);
1902 } else blocks[0]++;
1903
1904- if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1905- blocks, blksize_snap) !=
1906+ if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
1907+ blocks, blksize_snap, lv_snap) !=
1908 blksize_snap)
1909 goto fail_raw_write;
1910 }
1911
1912- out:
1913+out:
1914 return 0;
1915
1916- fail_raw_write:
1917+fail_raw_write:
1918 *reason = "write error";
1919 return 1;
1920
1921- fail_pv_get_number:
1922+fail_pv_get_number:
1923 *reason = "_pv_get_number failed";
1924 return 1;
1925 }
1926@@ -681,5 +722,3 @@
1927 lvm_name, err);
1928 }
1929 }
1930-
1931-MODULE_LICENSE("GPL");
1932--- linux/drivers/md/lvm-fs.c.orig Fri Dec 21 17:41:54 2001
1933+++ linux/drivers/md/lvm-fs.c Thu Jan 10 12:24:08 2002
1934@@ -3,7 +3,7 @@
1935 *
1936 * Copyright (C) 2001 Sistina Software
1937 *
1938- * January,February 2001
1939+ * January-April 2001
1940 *
1941 * LVM driver is free software; you can redistribute it and/or modify
1942 * it under the terms of the GNU General Public License as published by
1943@@ -30,13 +30,11 @@
1944 * 04/10/2001 - corrected devfs_register() call in lvm_init_fs()
1945 * 11/04/2001 - don't devfs_register("lvm") as user-space always does it
1946 * 10/05/2001 - show more of PV name in /proc/lvm/global
1947- * 16/12/2001 - fix devfs unregister order and prevent duplicate unreg (REG)
1948 *
1949 */
1950
1951 #include <linux/config.h>
1952 #include <linux/version.h>
1953-#include <linux/module.h>
1954
1955 #include <linux/kernel.h>
1956 #include <linux/vmalloc.h>
1957@@ -88,7 +86,6 @@
1958 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
1959 &lvm_chr_fops, NULL);
1960 #endif
1961-
1962 lvm_proc_dir = create_proc_entry(LVM_DIR, S_IFDIR, &proc_root);
1963 if (lvm_proc_dir) {
1964 lvm_proc_vg_subdir = create_proc_entry(LVM_VG_SUBDIR, S_IFDIR,
1965@@ -102,7 +99,6 @@
1966 #if 0
1967 devfs_unregister (lvm_devfs_handle);
1968 #endif
1969-
1970 remove_proc_entry(LVM_GLOBAL, lvm_proc_dir);
1971 remove_proc_entry(LVM_VG_SUBDIR, lvm_proc_dir);
1972 remove_proc_entry(LVM_DIR, &proc_root);
1973@@ -139,7 +135,7 @@
1974 int i;
1975
1976 devfs_unregister(ch_devfs_handle[vg_ptr->vg_number]);
1977- ch_devfs_handle[vg_ptr->vg_number] = NULL;
1978+ devfs_unregister(vg_devfs_handle[vg_ptr->vg_number]);
1979
1980 /* remove lv's */
1981 for(i = 0; i < vg_ptr->lv_max; i++)
1982@@ -149,10 +145,6 @@
1983 for(i = 0; i < vg_ptr->pv_max; i++)
1984 if(vg_ptr->pv[i]) lvm_fs_remove_pv(vg_ptr, vg_ptr->pv[i]);
1985
1986- /* must not remove directory before leaf nodes */
1987- devfs_unregister(vg_devfs_handle[vg_ptr->vg_number]);
1988- vg_devfs_handle[vg_ptr->vg_number] = NULL;
1989-
1990 if(vg_ptr->vg_dir_pde) {
1991 remove_proc_entry(LVM_LV_SUBDIR, vg_ptr->vg_dir_pde);
1992 vg_ptr->lv_subdir_pde = NULL;
1993@@ -194,7 +186,6 @@
1994
1995 void lvm_fs_remove_lv(vg_t *vg_ptr, lv_t *lv) {
1996 devfs_unregister(lv_devfs_handle[MINOR(lv->lv_dev)]);
1997- lv_devfs_handle[MINOR(lv->lv_dev)] = NULL;
1998
1999 if(vg_ptr->lv_subdir_pde) {
2000 const char *name = _basename(lv->lv_name);
2001@@ -282,12 +273,12 @@
2002 sz += sprintf(page + sz, "number: %u\n", lv->lv_number);
2003 sz += sprintf(page + sz, "open: %u\n", lv->lv_open);
2004 sz += sprintf(page + sz, "allocation: %u\n", lv->lv_allocation);
2005- if(lv->lv_stripes > 1) {
2006- sz += sprintf(page + sz, "stripes: %u\n",
2007- lv->lv_stripes);
2008- sz += sprintf(page + sz, "stripesize: %u\n",
2009- lv->lv_stripesize);
2010- }
2011+ if(lv->lv_stripes > 1) {
2012+ sz += sprintf(page + sz, "stripes: %u\n",
2013+ lv->lv_stripes);
2014+ sz += sprintf(page + sz, "stripesize: %u\n",
2015+ lv->lv_stripesize);
2016+ }
2017 sz += sprintf(page + sz, "device: %02u:%02u\n",
2018 MAJOR(lv->lv_dev), MINOR(lv->lv_dev));
2019
2020@@ -626,4 +617,3 @@
2021 }
2022 *b = '\0';
2023 }
2024-MODULE_LICENSE("GPL");
This page took 0.289774 seconds and 4 git commands to generate.