1 diff -Naur linux-2002-07-24/Documentation/Configure.help evms-2002-07-24/Documentation/Configure.help
2 --- linux-2002-07-24/Documentation/Configure.help Wed Jul 24 09:15:33 2002
3 +++ evms-2002-07-24/Documentation/Configure.help Wed Jul 24 09:09:30 2002
4 @@ -1755,6 +1755,196 @@
5 called on26.o. You must also have a high-level driver for the type
6 of device that you want to support.
10 + EVMS runtime driver. This is a plugin-based framework for volume
11 + management, and combines support for partitioning, software RAID,
12 + LVM, and more into a single interface.
14 + User-space tools are required to perform administration of EVMS logical
15 + volumes. Please visit <http://www.sourceforge.net/projects/evms> for
16 + more details on downloading and installing these tools.
18 + This driver is also available as a pair of modules called evms.o and
19 + evms_passthru.o ( = code which can be inserted and removed from the
20 + running kernel whenever you want). If you want to compile it as a module,
21 + say M here and read <file:Documentation/modules.txt>.
23 +EVMS Local Device Manager Plugin
24 +CONFIG_EVMS_LOCAL_DEV_MGR_PLUGIN
25 + Support for local IDE and SCSI devices. This plugin is required if EVMS
26 + support is selected.
28 + This plugin is also available as a kernel module called ldev_mgr.o.
30 +EVMS DOS Partition Manager Plugin
31 +CONFIG_EVMS_DOS_PARTITION_PLUGIN
32 + Support for recognizing all partitions using the ever-popular DOS
33 + partitioning scheme (MBRs & EBRs). 99% of the time you will need
34 + this plugin to do anything useful with EVMS.
36 + This plugin also contains support for recognizing BSD disklabels,
37 + UNIXWARE partitions, Solaris-X86 partitions, and OS/2 DLAT entries.
39 + This plugin is also available as a kernel module called dos_part.o.
41 +EVMS GPT Partition Manager Plugin
42 +CONFIG_EVMS_GPT_PARTITION_PLUGIN
43 + Support for recognizing all partitions using the new EFI GUID partitioning
44 + scheme that is used by IA-64 machines. You should only need to enable this
45 + plugin if you are running Linux on an IA-64 machine. All other architectures
48 + This plugin is also available as a kernel module called gpt_part.o
50 +EVMS S/390 Partition Manager Plugin
51 +CONFIG_EVMS_S390_PART_PLUGIN
52 + Support for recognizing all partitions created on S/390 machines. This
53 + plugin recognizes CDL, LDL, and CMS partition formats. You should only need
54 + to enable this plugin if you are running Linux on an S/390. All other
55 + architectures can say 'N' here.
57 + This plugin is also available as a kernel module called s390_part.o
59 +EVMS SnapShot Feature Plugin
60 +CONFIG_EVMS_SNAPSHOT_PLUGIN
61 + This feature plugin lets you create a snapshot of any volume
62 + under EVMS control using any other device under under EVMS
63 + control as the target for the snapshot volume.
65 + This plugin is also available as a kernel module called snapshot.o.
67 +EVMS DriveLink Feature Plugin
68 +CONFIG_EVMS_DRIVELINK_PLUGIN
69 + This feature plugin lets you combine multiple devices into a
70 + single virtual block device. The size of the virtual block
71 + device is approximately equal to the sum of all its components.
72 + It currently supports combining up to 60 devices (partitions,
73 + disks, or logical volumes).
75 + This plugin is also available as a kernel module called evms_drivelink.o.
77 +EVMS Bad Block Relocation (BBR) Feature
78 +CONFIG_EVMS_BBR_PLUGIN
79 + BBR is designed to remap I/O write failures to another safe
80 + location on disk. Note that most disk drives have BBR built
81 + into them, so software BBR will only be activated when all
82 + hardware BBR replacement sectors have been used.
84 + This plugin is also available as a kernel module called evms_bbr.o.
86 +EVMS Linux LVM Plugin
87 +CONFIG_EVMS_LVM_PLUGIN
88 + The LVM plugin is responsible for providing compatibility with the Linux
89 + LVM. This plugin recognizes disks and partitions that are LVM physical
90 + volumes (PVs), and assembles the appropriate volume groups (VGs). LVM
91 + logical volumes (LVs) are exported as EVMS volumes with full read/write
92 + support. In addition, support for striped and snapshotted volumes is
93 + included. The corresponding EVMS Engine plugin must also be installed in
94 + order to perform any administration of LVM VGs and LVs.
96 + This plugin is also available as a kernel module called lvm_vge.o.
99 +CONFIG_EVMS_MD_PLUGIN
100 + The MD plugin is responsible for providing compatibility with the Linux
101 + Software RAID driver (MD). It allows several devices to be combined into
102 + one logical device. This can be used to simply append one disk or
103 + partition to another, or to combine several redundant disks into a
104 + RAID 1/4/5 device so as to provide protection against hard disk failures.
106 + This plugin is also available as a kernel module called md_core.o.
108 +EVMS MD RAID-Linear Plugin
109 +CONFIG_EVMS_MD_LINEAR_PERS
110 + The RAID-Linear personality combines disks and/or partitions simply by
111 + appending one to the other.
113 + This plugin is also available as a kernel module called md_linear.o.
115 +EVMS MD RAID-0 Plugin
116 +CONFIG_EVMS_MD_RAID0_PERS
117 + The RAID-0 personality combines disks and/or partitions into one
118 + logical device using striping. This method writes data evenly across
119 + all members in the device in order to increase the throughput rate if
120 + each member resides on a distinct disk.
122 + This plugin is also available as a kernel module called md_raid0.o.
124 +EVMS MD RAID-1 Plugin
125 +CONFIG_EVMS_MD_RAID1_PERS
126 + The RAID-1 personality implements mirroring, in which a logical device
127 + consists of several disks that are exact copies of each other. In the
128 + event of a mirror failure, the RAID-1 personality will continue to use
129 + the remaining mirrors in the set, providing an error free device to the
130 + higher levels of the kernel. In a set with N drives, the available space
131 + is the capacity of a single drive, and the set protects against the
132 + failure of N-1 drives.
134 + This plugin is also available as a kernel module called md_raid1.o.
136 +EVMS MD RAID-4/RAID-5 Plugin
137 +CONFIG_EVMS_MD_RAID5_PERS
138 + A RAID-5 set of N drives with a capacity of C MB per drive provides
139 + the capacity of C * (N-1) MB, and protects against a failure of a
140 + single drive. For a given sector (row) number, (N-1) drives contain
141 + data sectors, and one drive contains the parity protection. For a
142 + RAID-4 set, the parity blocks are present on a single drive, while
143 + a RAID-5 set distributes the parity across all drives in one of the
144 + available parity distribution methods.
146 + This plugin is also available as a kernel module called md_raid5.o.
149 +CONFIG_EVMS_AIX_PLUGIN
150 + The AIX LVM plugin is responsible for providing compatibility with the
151 + AIX LVM. This plugin recognizes disks and partitions that are AIX disks,
152 + and assembles the appropriate volume groups. AIX logical volumes are
153 + exported as EVMS volumes with full read/write support. In addition,
154 + support for striped volumes is included, and support for mirroring is
157 + You should only need to select this option if you are running on a PPC
158 + machine and want to access AIX LVM volumes. The user-space plugin for
159 + AIX will be available in the future.
161 + This plugin is also available as a kernel module called AIXlvm_vge.o.
163 +EVMS OS/2 LVM Plugin
164 +CONFIG_EVMS_OS2_PLUGIN
165 + Support for recognizing the type 0x35 partitions that later versions
166 + of OS/2 use in its Logical Volume Manager. Provides binary
167 + compatibility and includes Drive Linking and Bad Block Relocation
168 + emulation. The user-space plugin for OS/2 will be available in the future.
170 + This plugin is also available as a kernel module called os2lvm_vge.o.
172 +EVMS Clustering Plugin
173 +CONFIG_EVMS_ECR_PLUGIN
175 + The EVMS Clustering Plugin is still under design and development.
176 + Best to just say 'n' here.
178 + This plugin is available as a kernel module called evms_ecr.o.
181 +CONFIG_EVMS_INFO_CRITICAL
182 + Set the level for kernel messages from EVMS. Each level on the list
183 + produces message for that level and all levels above it. Thus, level
184 + "Critical" only logs the most critical messages (and thus the fewest),
185 + whereas level "Everything" produces more information that will probably
186 + ever be useful. Level "Default" is a good starting point. Level "Debug"
187 + is good if you are having problems with EVMS and want more basic info
188 + on what's going on during the volume discovery process.
190 + EVMS also supports a boot-time kernel parameter to set the info level.
191 + To use this method, specify "evms_info_level=5" at boot time, or add the
192 + line "append = "evms_info_level=5"" to your lilo.conf file (replacing 5
193 + with your desired info level). See include/linux/evms/evms.h for the
194 + numerical definitions of the info levels. To use this boot-time parameter,
195 + the EVMS core driver must be statically built into the kernel (not as a
198 Logical Volume Manager (LVM) support
200 This driver lets you combine several hard disks, hard disk
201 diff -Naur linux-2002-07-24/MAINTAINERS evms-2002-07-24/MAINTAINERS
202 --- linux-2002-07-24/MAINTAINERS Wed Jul 24 09:15:34 2002
203 +++ evms-2002-07-24/MAINTAINERS Wed Jul 24 09:08:21 2002
205 W: http://opensource.creative.com/
208 +ENTERPRISE VOLUME MANAGEMENT SYSTEM (EVMS)
209 +P: Mark Peloquin, Steve Pratt, Kevin Corry
210 +M: peloquin@us.ibm.com, slpratt@us.ibm.com, corryk@us.ibm.com
211 +L: evms-devel@lists.sourceforge.net
212 +W: http://www.sourceforge.net/projects/evms/
215 ETHEREXPRESS-16 NETWORK DRIVER
217 M: Philip.Blundell@pobox.com
218 diff -Naur linux-2002-07-24/Makefile evms-2002-07-24/Makefile
219 --- linux-2002-07-24/Makefile Wed Jul 24 09:15:33 2002
220 +++ evms-2002-07-24/Makefile Wed Jul 24 09:08:53 2002
222 DRIVERS-$(CONFIG_BLUEZ) += drivers/bluetooth/bluetooth.o
223 DRIVERS-$(CONFIG_HOTPLUG_PCI) += drivers/hotplug/vmlinux-obj.o
224 DRIVERS-$(CONFIG_ISDN_BOOL) += drivers/isdn/vmlinux-obj.o
225 +DRIVERS-$(CONFIG_EVMS) += drivers/evms/evmsdrvr.o
227 DRIVERS := $(DRIVERS-y)
229 diff -Naur linux-2002-07-24/arch/i386/config.in evms-2002-07-24/arch/i386/config.in
230 --- linux-2002-07-24/arch/i386/config.in Wed Jul 24 09:15:33 2002
231 +++ evms-2002-07-24/arch/i386/config.in Wed Jun 5 16:07:45 2002
234 source drivers/block/Config.in
236 +source drivers/evms/Config.in
238 source drivers/md/Config.in
240 if [ "$CONFIG_NET" = "y" ]; then
241 diff -Naur linux-2002-07-24/arch/ia64/config.in evms-2002-07-24/arch/ia64/config.in
242 --- linux-2002-07-24/arch/ia64/config.in Wed Jul 24 09:15:33 2002
243 +++ evms-2002-07-24/arch/ia64/config.in Wed Jun 5 16:07:56 2002
245 source drivers/block/Config.in
246 source drivers/ieee1394/Config.in
247 source drivers/message/i2o/Config.in
248 +source drivers/evms/Config.in
249 source drivers/md/Config.in
251 mainmenu_option next_comment
252 diff -Naur linux-2002-07-24/arch/parisc/config.in evms-2002-07-24/arch/parisc/config.in
253 --- linux-2002-07-24/arch/parisc/config.in Wed Jul 24 09:15:33 2002
254 +++ evms-2002-07-24/arch/parisc/config.in Mon Mar 11 13:26:56 2002
257 source drivers/block/Config.in
259 +source drivers/evms/Config.in
261 if [ "$CONFIG_NET" = "y" ]; then
264 diff -Naur linux-2002-07-24/arch/ppc/config.in evms-2002-07-24/arch/ppc/config.in
265 --- linux-2002-07-24/arch/ppc/config.in Wed Jul 24 09:15:33 2002
266 +++ evms-2002-07-24/arch/ppc/config.in Wed Jun 5 16:08:20 2002
268 source drivers/mtd/Config.in
269 source drivers/pnp/Config.in
270 source drivers/block/Config.in
271 +source drivers/evms/Config.in
272 source drivers/md/Config.in
274 if [ "$CONFIG_NET" = "y" ]; then
275 diff -Naur linux-2002-07-24/arch/ppc64/config.in evms-2002-07-24/arch/ppc64/config.in
276 --- linux-2002-07-24/arch/ppc64/config.in Wed Jul 24 09:15:33 2002
277 +++ evms-2002-07-24/arch/ppc64/config.in Tue Jul 2 11:14:31 2002
279 source drivers/mtd/Config.in
280 source drivers/pnp/Config.in
281 source drivers/block/Config.in
282 +source drivers/evms/Config.in
283 source drivers/md/Config.in
285 if [ "$CONFIG_NET" = "y" ]; then
286 diff -Naur linux-2002-07-24/arch/ppc64/kernel/ioctl32.c evms-2002-07-24/arch/ppc64/kernel/ioctl32.c
287 --- linux-2002-07-24/arch/ppc64/kernel/ioctl32.c Wed Jul 24 09:15:33 2002
288 +++ evms-2002-07-24/arch/ppc64/kernel/ioctl32.c Tue Jul 2 11:14:31 2002
289 @@ -3435,6 +3435,383 @@
290 return ((0 == ret) ? 0 : -EFAULT);
295 +#include <linux/evms/evms_kernel.h>
296 +#include <linux/evms/evms_bbr.h>
298 +struct evms_sector_io32 {
301 + u64 starting_sector;
303 + __kernel_caddr_t32 buffer_address;
307 +struct evms_rediscover32 {
310 + __kernel_caddr_t32 drive_array;
313 +struct evms_compute_csum32 {
314 + __kernel_caddr_t32 buffer_address;
321 +struct evms_plugin_ioctl32 {
323 + s32 feature_command;
325 + __kernel_caddr_t32 feature_ioctl_data;
328 +struct evms_notify_bbr32 {
329 + char object_name[EVMS_VOLUME_NAME_SIZE+1];
333 + __kernel_caddr_t32 buffer;
337 +#define EVMS_MD_ID 4
338 +#define EVMS_MD_PERS_IOCTL_CMD 1
339 +#define EVMS_MD_ADD 2
340 +#define EVMS_MD_REMOVE 3
341 +#define EVMS_MD_ACTIVATE 4
342 +#define EVMS_MD_DEACTIVATE 5
343 +#define EVMS_MD_GET_ARRAY_INFO 6
344 +#define EVMS_MD_RAID5_INIT_IO 1
346 +struct evms_md_ioctl {
352 +struct evms_md_ioctl32 {
355 + __kernel_caddr_t32 arg;
358 +struct evms_md_array_info {
359 + unsigned long state;
363 +struct evms_md_array_info32 {
365 + __kernel_caddr_t32 sb;
368 +struct raid5_ioctl_init_io {
375 +struct raid5_ioctl_init_io32 {
379 + __kernel_caddr_t32 data;
382 +#define EVMS_MD_PLUGIN_ID ((IBM_OEM_ID << 16) | \
383 + (EVMS_REGION_MANAGER << 12) | EVMS_MD_ID)
384 +#define EVMS_BBR_PLUGIN_ID ((IBM_OEM_ID << 16) | \
385 + (EVMS_FEATURE << 12) | EVMS_BBR_FEATURE_ID)
388 +#define EVMS_SECTOR_IO_32 _IOWR( EVMS_MAJOR, \
389 + EVMS_SECTOR_IO_NUMBER, \
390 + struct evms_sector_io32)
391 +#define EVMS_REDISCOVER_VOLUMES_32 _IOWR( EVMS_MAJOR, \
392 + EVMS_REDISCOVER_VOLUMES_NUMBER, \
393 + struct evms_rediscover32)
394 +#define EVMS_COMPUTE_CSUM_32 _IOWR( EVMS_MAJOR, \
395 + EVMS_COMPUTE_CSUM_NUMBER, \
396 + struct evms_compute_csum32)
397 +#define EVMS_PLUGIN_IOCTL_32 _IOR( EVMS_MAJOR, \
398 + EVMS_PLUGIN_IOCTL_NUMBER, \
399 + struct evms_plugin_ioctl32)
401 +static int evms_sector_io(unsigned int fd, unsigned int cmd, unsigned long arg)
403 + mm_segment_t old_fs = get_fs();
404 + struct evms_sector_io32 parms32;
405 + struct evms_sector_io parms;
410 + if (copy_from_user(&parms32, (struct evms_sector_io32 *)arg,
411 + sizeof(struct evms_sector_io32)))
414 + parms.disk_handle = parms32.disk_handle;
415 + parms.io_flag = parms32.io_flag;
416 + parms.starting_sector = parms32.starting_sector;
417 + parms.sector_count = parms32.sector_count;
418 + parms.buffer_address = (u8 *)A(parms32.buffer_address);
421 + kcmd = EVMS_SECTOR_IO;
425 + rc = sys_ioctl(fd, kcmd, (unsigned long)karg);
428 + parms32.status = parms.status;
429 + parms32.buffer_address = (__kernel_caddr_t32)AA(parms.buffer_address);
431 + if (copy_to_user((struct evms_sector_io32 *)arg, &parms32,
432 + sizeof(struct evms_sector_io32)))
438 +static int evms_rediscover(unsigned int fd, unsigned int cmd, unsigned long arg)
440 + mm_segment_t old_fs = get_fs();
441 + struct evms_rediscover32 parms32;
442 + struct evms_rediscover parms;
447 + if (copy_from_user(&parms32, (struct evms_rediscover32 *)arg,
448 + sizeof(struct evms_rediscover32)))
451 + parms.drive_count = parms32.drive_count;
452 + parms.drive_array = (void *)A(parms32.drive_array);
455 + kcmd = EVMS_REDISCOVER_VOLUMES;
459 + rc = sys_ioctl(fd, kcmd, (unsigned long)karg);
462 + parms32.status = parms.status;
464 + if (copy_to_user((struct evms_rediscover32 *)arg, &parms32,
465 + sizeof(struct evms_rediscover32)))
471 +static int evms_compute_csum(unsigned int fd,
475 + mm_segment_t old_fs = get_fs();
476 + struct evms_compute_csum32 parms32;
477 + struct evms_compute_csum parms;
482 + if (copy_from_user(&parms32, (struct evms_compute_csum32 *)arg,
483 + sizeof(struct evms_compute_csum32)))
486 + parms.insum = parms32.insum;
487 + parms.outsum = parms32.outsum;
488 + parms.buffer_size = parms32.buffer_size;
489 + parms.buffer_address = (void *)A(parms32.buffer_address);
492 + kcmd = EVMS_COMPUTE_CSUM;
496 + rc = sys_ioctl(fd, kcmd, (unsigned long)karg);
499 + parms32.status = parms.status;
500 + parms32.outsum = parms.outsum;
502 + if (copy_to_user((struct evms_compute_csum32 *)arg, &parms32,
503 + sizeof(struct evms_compute_csum32)))
509 +static int evms_bbr_plugin_ioctl(unsigned int fd,
513 + mm_segment_t old_fs = get_fs();
514 + struct evms_notify_bbr32 bbr_parms32;
515 + struct evms_notify_bbr bbr_parms;
516 + struct evms_plugin_ioctl *parms = (struct evms_plugin_ioctl *)arg;
517 + void *old_ptr = NULL;
520 + if (copy_from_user(&bbr_parms32,
521 + (struct evms_notify_bbr32 *)parms->feature_ioctl_data,
522 + sizeof(struct evms_notify_bbr32)))
525 + memcpy(&bbr_parms, &bbr_parms32, sizeof(struct evms_notify_bbr32));
526 + bbr_parms.buffer = (void *)A(bbr_parms32.buffer);
527 + bbr_parms.rw = bbr_parms32.rw;
528 + old_ptr = parms->feature_ioctl_data;
529 + parms->feature_ioctl_data = &bbr_parms;
532 + rc = sys_ioctl(fd, cmd, arg);
535 + parms->feature_ioctl_data = old_ptr;
538 + bbr_parms32.nr_sect = bbr_parms.nr_sect;
539 + rc = copy_to_user((struct evms_notify_bbr32 *)parms->feature_ioctl_data,
540 + &bbr_parms32, sizeof(struct evms_notify_bbr32));
546 +static int evms_md_plugin_ioctl(unsigned int fd,
550 + mm_segment_t old_fs = get_fs();
551 + void *old_ptr = NULL;
552 + void *old_md_ptr = NULL;
553 + struct evms_md_ioctl32 md_parms32;
554 + struct evms_md_ioctl md_parms;
555 + struct evms_md_array_info32 md_array_parms32;
556 + struct evms_md_array_info md_array_parms;
557 + struct raid5_ioctl_init_io32 r5_init_io_parms32;
558 + struct raid5_ioctl_init_io r5_init_io_parms;
559 + struct evms_plugin_ioctl *parms = (struct evms_plugin_ioctl *)arg;
562 + if (copy_from_user(&md_parms32,
563 + (struct evms_md_ioctl*)parms->feature_ioctl_data,
564 + sizeof(struct evms_md_ioctl32)))
567 + md_parms.mddev_idx = md_parms32.mddev_idx;
568 + md_parms.cmd = md_parms32.cmd;
569 + md_parms.arg = (void *)A(md_parms32.arg);
570 + old_ptr = parms->feature_ioctl_data;
571 + parms->feature_ioctl_data = &md_parms;
573 + if (parms->feature_command == EVMS_MD_GET_ARRAY_INFO) {
574 + if (copy_from_user(&md_array_parms32,
575 + (struct evms_md_array_info32*)md_parms.arg,
576 + sizeof(struct evms_md_array_info32)))
578 + md_array_parms.state = md_array_parms32.state;
579 + md_array_parms.sb = (void *)A(md_array_parms32.sb);
580 + old_md_ptr = (void *)md_parms.arg;
581 + md_parms.arg = &md_array_parms;
582 + } else if (parms->feature_command == EVMS_MD_PERS_IOCTL_CMD) {
583 + if (md_parms.cmd == EVMS_MD_RAID5_INIT_IO) {
584 + if (copy_from_user(&r5_init_io_parms32,
585 + (struct raid5_ioctl_init_io32*)md_parms.arg,
586 + sizeof(struct raid5_ioctl_init_io32)))
589 + r5_init_io_parms.rw = r5_init_io_parms32.rw;
590 + r5_init_io_parms.lsn = r5_init_io_parms32.lsn;
591 + r5_init_io_parms.nr_sects = r5_init_io_parms32.nr_sects;
592 + r5_init_io_parms.data = (void *)A(r5_init_io_parms32.data);
593 + old_md_ptr = (void *)md_parms.arg;
594 + md_parms.arg = &r5_init_io_parms;
599 + rc = sys_ioctl(fd, cmd, arg);
602 + parms->feature_ioctl_data = old_ptr;
603 + md_parms.arg = old_md_ptr;
606 + if (parms->feature_command == EVMS_MD_GET_ARRAY_INFO) {
607 + md_array_parms32.state = md_array_parms.state;
608 + rc = copy_to_user((struct evms_md_array_info32 *)md_parms.arg,
610 + sizeof(struct evms_md_array_info32));
613 + md_parms32.mddev_idx = md_parms.mddev_idx;
614 + rc = copy_to_user((struct evms_md_ioctl*)parms->feature_ioctl_data,
616 + sizeof(struct evms_md_ioctl32));
623 +static int evms_plugin_ioctl(unsigned int fd,
627 + mm_segment_t old_fs = get_fs();
628 + struct evms_plugin_ioctl32 parms32;
629 + struct evms_plugin_ioctl parms;
634 + if (copy_from_user(&parms32, (struct evms_plugin_ioctl32 *)arg,
635 + sizeof(struct evms_plugin_ioctl32)))
638 + parms.feature_id = parms32.feature_id;
639 + parms.feature_command = parms32.feature_command;
640 + parms.status = parms32.status;
641 + parms.feature_ioctl_data = (void *)A(parms32.feature_ioctl_data);
643 + kcmd = EVMS_PLUGIN_IOCTL;
646 + switch (parms.feature_id) {
647 + case EVMS_MD_PLUGIN_ID:
648 + rc = evms_md_plugin_ioctl(fd, kcmd, (unsigned long)karg);
650 + case EVMS_BBR_PLUGIN_ID:
651 + rc = evms_bbr_plugin_ioctl(fd, kcmd, (unsigned long)karg);
655 + rc = sys_ioctl(fd, kcmd, (unsigned long)karg);
660 + parms32.status = parms.status;
661 + rc = copy_to_user((struct evms_plugin_ioctl32 *)arg, &parms32,
662 + sizeof(struct evms_plugin_ioctl32));
673 @@ -4092,6 +4469,29 @@
674 COMPATIBLE_IOCTL(ATMTCP_REMOVE),
675 COMPATIBLE_IOCTL(ATMMPC_CTRL),
676 COMPATIBLE_IOCTL(ATMMPC_DATA),
679 +COMPATIBLE_IOCTL(EVMS_GET_INFO_LEVEL),
680 +COMPATIBLE_IOCTL(EVMS_SET_INFO_LEVEL),
681 +HANDLE_IOCTL(EVMS_REDISCOVER_VOLUMES_32, evms_rediscover),
682 +COMPATIBLE_IOCTL(EVMS_DELETE_VOLUME),
683 +HANDLE_IOCTL(EVMS_PLUGIN_IOCTL_32, evms_plugin_ioctl),
684 +COMPATIBLE_IOCTL(EVMS_PROCESS_NOTIFY_EVENT),
685 +COMPATIBLE_IOCTL(EVMS_GET_LOGICAL_DISK),
686 +COMPATIBLE_IOCTL(EVMS_GET_LOGICAL_DISK_INFO),
687 +HANDLE_IOCTL(EVMS_SECTOR_IO_32, evms_sector_io),
688 +COMPATIBLE_IOCTL(EVMS_GET_MINOR),
689 +COMPATIBLE_IOCTL(EVMS_GET_VOLUME_DATA),
690 +COMPATIBLE_IOCTL(EVMS_GET_PLUGIN),
691 +HANDLE_IOCTL(EVMS_COMPUTE_CSUM_32, evms_compute_csum),
692 +COMPATIBLE_IOCTL(EVMS_GET_BMAP),
693 +COMPATIBLE_IOCTL(EVMS_GET_IOCTL_VERSION),
694 +COMPATIBLE_IOCTL(EVMS_GET_VERSION),
695 +COMPATIBLE_IOCTL(EVMS_UPDATE_DEVICE_INFO),
696 +COMPATIBLE_IOCTL(EVMS_CHECK_MOUNT_STATUS),
697 +COMPATIBLE_IOCTL(EVMS_GET_VOL_STRIPE_INFO),
700 #if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
702 COMPATIBLE_IOCTL(VG_SET_EXTENDABLE),
703 diff -Naur linux-2002-07-24/arch/s390/config.in evms-2002-07-24/arch/s390/config.in
704 --- linux-2002-07-24/arch/s390/config.in Wed Jul 24 09:15:33 2002
705 +++ evms-2002-07-24/arch/s390/config.in Wed Jun 5 16:08:32 2002
708 source drivers/s390/Config.in
710 +source drivers/evms/Config.in
712 if [ "$CONFIG_NET" = "y" ]; then
715 diff -Naur linux-2002-07-24/arch/s390x/config.in evms-2002-07-24/arch/s390x/config.in
716 --- linux-2002-07-24/arch/s390x/config.in Wed Jul 24 09:15:33 2002
717 +++ evms-2002-07-24/arch/s390x/config.in Wed Jun 5 16:08:39 2002
720 source drivers/s390/Config.in
722 +source drivers/evms/Config.in
724 if [ "$CONFIG_NET" = "y" ]; then
727 diff -Naur linux-2002-07-24/drivers/Makefile evms-2002-07-24/drivers/Makefile
728 --- linux-2002-07-24/drivers/Makefile Wed Jul 24 09:15:33 2002
729 +++ evms-2002-07-24/drivers/Makefile Wed Jun 5 16:09:04 2002
732 mod-subdirs := dio mtd sbus video macintosh usb input telephony sgi ide \
733 message/i2o message/fusion scsi md ieee1394 pnp isdn atm \
734 - fc4 net/hamradio i2c acpi bluetooth
735 + fc4 net/hamradio i2c acpi bluetooth evms
737 subdir-y := parport char block net sound misc media cdrom hotplug
738 subdir-m := $(subdir-y)
740 subdir-$(CONFIG_ACPI) += acpi
742 subdir-$(CONFIG_BLUEZ) += bluetooth
743 +subdir-$(CONFIG_EVMS) += evms
745 include $(TOPDIR)/Rules.make
746 diff -Naur linux-2002-07-24/include/linux/fs.h evms-2002-07-24/include/linux/fs.h
747 --- linux-2002-07-24/include/linux/fs.h Wed Jul 24 09:15:34 2002
748 +++ evms-2002-07-24/include/linux/fs.h Wed Jul 24 09:12:32 2002
749 @@ -1472,6 +1472,7 @@
750 unsigned long generate_cluster_swab32(kdev_t, int b[], int);
751 extern kdev_t ROOT_DEV;
752 extern char root_device_name[];
753 +extern void get_root_device_name( char * root_name );
756 extern void show_buffers(void);
757 diff -Naur linux-2002-07-24/include/linux/major.h evms-2002-07-24/include/linux/major.h
758 --- linux-2002-07-24/include/linux/major.h Wed Jul 24 09:15:34 2002
759 +++ evms-2002-07-24/include/linux/major.h Mon Jul 8 16:23:51 2002
762 #define UMEM_MAJOR 116 /* http://www.umem.com/ Battery Backed RAM */
764 +#define EVMS_MAJOR 117 /* Enterprise Volume Management System */
766 #define RTF_MAJOR 150
767 #define RAW_MAJOR 162
769 diff -Naur linux-2002-07-24/include/linux/mempool.h evms-2002-07-24/include/linux/mempool.h
770 --- linux-2002-07-24/include/linux/mempool.h Wed Dec 31 18:00:00 1969
771 +++ evms-2002-07-24/include/linux/mempool.h Mon Jun 17 10:13:08 2002
774 + * memory buffer pool support
776 +#ifndef _LINUX_MEMPOOL_H
777 +#define _LINUX_MEMPOOL_H
779 +#include <linux/wait.h>
781 +typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
782 +typedef void (mempool_free_t)(void *element, void *pool_data);
784 +typedef struct mempool_s {
786 + int min_nr; /* nr of elements at *elements */
787 + int curr_nr; /* Current nr of elements at *elements */
791 + mempool_alloc_t *alloc;
792 + mempool_free_t *free;
793 + wait_queue_head_t wait;
795 +extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
796 + mempool_free_t *free_fn, void *pool_data);
797 +extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
798 +extern void mempool_destroy(mempool_t *pool);
799 +extern void * mempool_alloc(mempool_t *pool, int gfp_mask);
800 +extern void mempool_free(void *element, mempool_t *pool);
802 +#endif /* _LINUX_MEMPOOL_H */
803 diff -Naur linux-2002-07-24/include/linux/sysctl.h evms-2002-07-24/include/linux/sysctl.h
804 --- linux-2002-07-24/include/linux/sysctl.h Wed Jul 24 09:15:34 2002
805 +++ evms-2002-07-24/include/linux/sysctl.h Wed Jun 5 16:11:50 2002
815 /* /proc/sys/dev/cdrom */
817 /* /proc/sys/dev/parport */
819 DEV_PARPORT_DEFAULT=-3
822 +/* /proc/sys/dev/evms */
824 + DEV_EVMS_INFO_LEVEL=1,
828 +/* /proc/sys/dev/evms/raid */
830 + DEV_EVMS_MD_SPEED_LIMIT_MIN=1,
831 + DEV_EVMS_MD_SPEED_LIMIT_MAX=2
834 /* /proc/sys/dev/raid */
835 diff -Naur linux-2002-07-24/init/do_mounts.c evms-2002-07-24/init/do_mounts.c
836 --- linux-2002-07-24/init/do_mounts.c Wed Jul 24 09:15:34 2002
837 +++ evms-2002-07-24/init/do_mounts.c Wed Jul 24 09:13:46 2002
841 { "mtdblock", 0x1f00 },
842 + { "evms", 0x7500 },
849 mount_block_root("/dev/root", root_mountflags);
852 +void get_root_device_name( char * root_name )
854 + strncpy(root_name, root_device_name, 63);
857 #ifdef CONFIG_BLK_DEV_INITRD
858 diff -Naur linux-2002-07-24/kernel/ksyms.c evms-2002-07-24/kernel/ksyms.c
859 --- linux-2002-07-24/kernel/ksyms.c Wed Jul 24 09:15:34 2002
860 +++ evms-2002-07-24/kernel/ksyms.c Wed Jul 24 09:14:08 2002
862 EXPORT_SYMBOL(refile_buffer);
863 EXPORT_SYMBOL(max_sectors);
864 EXPORT_SYMBOL(max_readahead);
865 +EXPORT_SYMBOL(is_swap_partition);
866 +EXPORT_SYMBOL(walk_gendisk);
869 EXPORT_SYMBOL(tty_hangup);
870 diff -Naur linux-2002-07-24/mm/Makefile evms-2002-07-24/mm/Makefile
871 --- linux-2002-07-24/mm/Makefile Wed Jul 24 09:15:34 2002
872 +++ evms-2002-07-24/mm/Makefile Wed Jun 19 11:21:04 2002
877 -export-objs := shmem.o filemap.o memory.o page_alloc.o
878 +export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o
880 obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
881 vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
882 page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
886 obj-$(CONFIG_HIGHMEM) += highmem.o
888 diff -Naur linux-2002-07-24/mm/mempool.c evms-2002-07-24/mm/mempool.c
889 --- linux-2002-07-24/mm/mempool.c Wed Dec 31 18:00:00 1969
890 +++ evms-2002-07-24/mm/mempool.c Wed Jun 19 09:47:16 2002
893 + * linux/mm/mempool.c
895 + * memory buffer pool support. Such pools are mostly used
896 + * for guaranteed, deadlock-free memory allocations during
899 + * started by Ingo Molnar, Copyright (C) 2001
902 +#include <linux/mm.h>
903 +#include <linux/slab.h>
904 +#include <linux/module.h>
905 +#include <linux/compiler.h>
906 +#include <linux/mempool.h>
907 +#include <linux/fs.h>
909 +#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
911 +static void add_element(mempool_t *pool, void *element)
913 + BUG_ON(pool->curr_nr >= pool->min_nr);
914 + pool->elements[pool->curr_nr++] = element;
917 +static void *remove_element(mempool_t *pool)
919 + BUG_ON(pool->curr_nr <= 0);
920 + return pool->elements[--pool->curr_nr];
923 +static void free_pool(mempool_t *pool)
925 + while (pool->curr_nr) {
926 + void *element = remove_element(pool);
927 + pool->free(element, pool->pool_data);
929 + kfree(pool->elements);
934 + * mempool_create - create a memory pool
935 + * @min_nr: the minimum number of elements guaranteed to be
936 + * allocated for this pool.
937 + * @alloc_fn: user-defined element-allocation function.
938 + * @free_fn: user-defined element-freeing function.
939 + * @pool_data: optional private data available to the user-defined functions.
941 + * this function creates and allocates a guaranteed size, preallocated
942 + * memory pool. The pool can be used from the mempool_alloc and mempool_free
943 + * functions. This function might sleep. Both the alloc_fn() and the free_fn()
944 + * functions might sleep - as long as the mempool_alloc function is not called
945 + * from IRQ contexts.
947 +mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
948 + mempool_free_t *free_fn, void *pool_data)
952 + pool = kmalloc(sizeof(*pool), GFP_KERNEL);
955 + memset(pool, 0, sizeof(*pool));
956 + pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL);
957 + if (!pool->elements) {
961 + spin_lock_init(&pool->lock);
962 + pool->min_nr = min_nr;
963 + pool->pool_data = pool_data;
964 + init_waitqueue_head(&pool->wait);
965 + pool->alloc = alloc_fn;
966 + pool->free = free_fn;
969 + * First pre-allocate the guaranteed number of buffers.
971 + while (pool->curr_nr < pool->min_nr) {
974 + element = pool->alloc(GFP_KERNEL, pool->pool_data);
975 + if (unlikely(!element)) {
979 + add_element(pool, element);
985 + * mempool_resize - resize an existing memory pool
986 + * @pool: pointer to the memory pool which was allocated via
987 + * mempool_create().
988 + * @new_min_nr: the new minimum number of elements guaranteed to be
989 + * allocated for this pool.
990 + * @gfp_mask: the usual allocation bitmask.
992 + * This function shrinks/grows the pool. In the case of growing,
993 + * it cannot be guaranteed that the pool will be grown to the new
994 + * size immediately, but new mempool_free() calls will refill it.
996 + * Note, the caller must guarantee that no mempool_destroy is called
997 + * while this function is running. mempool_alloc() & mempool_free()
998 + * might be called (eg. from IRQ contexts) while this function executes.
1000 +int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
1003 + void **new_elements;
1004 + unsigned long flags;
1006 + BUG_ON(new_min_nr <= 0);
1008 + spin_lock_irqsave(&pool->lock, flags);
1009 + if (new_min_nr < pool->min_nr) {
1010 + while (pool->curr_nr > new_min_nr) {
1011 + element = remove_element(pool);
1012 + spin_unlock_irqrestore(&pool->lock, flags);
1013 + pool->free(element, pool->pool_data);
1014 + spin_lock_irqsave(&pool->lock, flags);
1016 + pool->min_nr = new_min_nr;
1019 + spin_unlock_irqrestore(&pool->lock, flags);
1021 + /* Grow the pool */
1022 + new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
1023 + if (!new_elements)
1026 + spin_lock_irqsave(&pool->lock, flags);
1027 + memcpy(new_elements, pool->elements,
1028 + pool->curr_nr * sizeof(*new_elements));
1029 + kfree(pool->elements);
1030 + pool->elements = new_elements;
1031 + pool->min_nr = new_min_nr;
1033 + while (pool->curr_nr < pool->min_nr) {
1034 + spin_unlock_irqrestore(&pool->lock, flags);
1035 + element = pool->alloc(gfp_mask, pool->pool_data);
1038 + spin_lock_irqsave(&pool->lock, flags);
1039 + if (pool->curr_nr < pool->min_nr)
1040 + add_element(pool, element);
1042 + kfree(element); /* Raced */
1045 + spin_unlock_irqrestore(&pool->lock, flags);
1051 + * mempool_destroy - deallocate a memory pool
1052 + * @pool: pointer to the memory pool which was allocated via
1053 + * mempool_create().
1055 + * this function only sleeps if the free_fn() function sleeps. The caller
1056 + * has to guarantee that all elements have been returned to the pool (ie:
1057 + * freed) prior to calling mempool_destroy().
1059 +void mempool_destroy(mempool_t *pool)
1061 + if (pool->curr_nr != pool->min_nr)
1062 + BUG(); /* There were outstanding elements */
1067 + * mempool_alloc - allocate an element from a specific memory pool
1068 + * @pool: pointer to the memory pool which was allocated via
1069 + * mempool_create().
1070 + * @gfp_mask: the usual allocation bitmask.
1072 + * this function only sleeps if the alloc_fn function sleeps or
1073 + * returns NULL. Note that due to preallocation, this function
1074 + * *never* fails when called from process contexts. (it might
1075 + * fail if called from an IRQ context.)
1077 +void * mempool_alloc(mempool_t *pool, int gfp_mask)
1080 + unsigned long flags;
1082 + DECLARE_WAITQUEUE(wait, current);
1083 + int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
1086 + element = pool->alloc(gfp_nowait, pool->pool_data);
1087 + if (likely(element != NULL))
1091 + * If the pool is less than 50% full then try harder
1092 + * to allocate an element:
1094 + if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
1095 + element = pool->alloc(gfp_mask, pool->pool_data);
1096 + if (likely(element != NULL))
1101 + * Kick the VM at this point.
1105 + spin_lock_irqsave(&pool->lock, flags);
1106 + if (likely(pool->curr_nr)) {
1107 + element = remove_element(pool);
1108 + spin_unlock_irqrestore(&pool->lock, flags);
1111 + spin_unlock_irqrestore(&pool->lock, flags);
1113 + /* We must not sleep in the GFP_ATOMIC case */
1114 + if (gfp_mask == gfp_nowait)
1117 + run_task_queue(&tq_disk);
1119 + add_wait_queue_exclusive(&pool->wait, &wait);
1120 + set_task_state(current, TASK_UNINTERRUPTIBLE);
1122 + spin_lock_irqsave(&pool->lock, flags);
1123 + curr_nr = pool->curr_nr;
1124 + spin_unlock_irqrestore(&pool->lock, flags);
1129 + current->state = TASK_RUNNING;
1130 + remove_wait_queue(&pool->wait, &wait);
1132 + goto repeat_alloc;
1136 + * mempool_free - return an element to the pool.
1137 + * @element: pool element pointer.
1138 + * @pool: pointer to the memory pool which was allocated via
1139 + * mempool_create().
1141 + * this function only sleeps if the free_fn() function sleeps.
1143 +void mempool_free(void *element, mempool_t *pool)
1145 + unsigned long flags;
1147 + if (pool->curr_nr < pool->min_nr) {
1148 + spin_lock_irqsave(&pool->lock, flags);
1149 + if (pool->curr_nr < pool->min_nr) {
1150 + add_element(pool, element);
1151 + spin_unlock_irqrestore(&pool->lock, flags);
1152 + wake_up(&pool->wait);
1155 + spin_unlock_irqrestore(&pool->lock, flags);
1157 + pool->free(element, pool->pool_data);
1160 +EXPORT_SYMBOL(mempool_create);
1161 +EXPORT_SYMBOL(mempool_resize);
1162 +EXPORT_SYMBOL(mempool_destroy);
1163 +EXPORT_SYMBOL(mempool_alloc);
1164 +EXPORT_SYMBOL(mempool_free);