]> git.pld-linux.org Git - packages/kernel.git/blame - evms-linux-2.4.19-rc3-common-files.patch
- obsolete
[packages/kernel.git] / evms-linux-2.4.19-rc3-common-files.patch
CommitLineData
14f0972a
JR
1diff -Naur linux-2002-07-24/Documentation/Configure.help evms-2002-07-24/Documentation/Configure.help
2--- linux-2002-07-24/Documentation/Configure.help Wed Jul 24 09:15:33 2002
3+++ evms-2002-07-24/Documentation/Configure.help Wed Jul 24 09:09:30 2002
4@@ -1755,6 +1755,196 @@
5 called on26.o. You must also have a high-level driver for the type
6 of device that you want to support.
7
8+EVMS Kernel Runtime
9+CONFIG_EVMS
10+ EVMS runtime driver. This is a plugin-based framework for volume
11+ management, and combines support for partitioning, software RAID,
12+ LVM, and more into a single interface.
13+
14+ User-space tools are required to perform administration of EVMS logical
15+ volumes. Please visit <http://www.sourceforge.net/projects/evms> for
16+ more details on downloading and installing these tools.
17+
18+ This driver is also available as a pair of modules called evms.o and
19+ evms_passthru.o ( = code which can be inserted and removed from the
20+ running kernel whenever you want). If you want to compile it as a module,
21+ say M here and read <file:Documentation/modules.txt>.
22+
23+EVMS Local Device Manager Plugin
24+CONFIG_EVMS_LOCAL_DEV_MGR_PLUGIN
25+ Support for local IDE and SCSI devices. This plugin is required if EVMS
26+ support is selected.
27+
28+ This plugin is also available as a kernel module called ldev_mgr.o.
29+
30+EVMS DOS Partition Manager Plugin
31+CONFIG_EVMS_DOS_PARTITION_PLUGIN
32+ Support for recognizing all partitions using the ever-popular DOS
33+ partitioning scheme (MBRs & EBRs). 99% of the time you will need
34+ this plugin to do anything useful with EVMS.
35+
36+ This plugin also contains support for recognizing BSD disklabels,
37+ UNIXWARE partitions, Solaris-X86 partitions, and OS/2 DLAT entries.
38+
39+ This plugin is also available as a kernel module called dos_part.o.
40+
41+EVMS GPT Partition Manager Plugin
42+CONFIG_EVMS_GPT_PARTITION_PLUGIN
43+ Support for recognizing all partitions using the new EFI GUID partitioning
44+ scheme that is used by IA-64 machines. You should only need to enable this
45+ plugin if you are running Linux on an IA-64 machine. All other architectures
46+ can say 'N' here.
47+
48+ This plugin is also available as a kernel module called gpt_part.o
49+
50+EVMS S/390 Partition Manager Plugin
51+CONFIG_EVMS_S390_PART_PLUGIN
52+ Support for recognizing all partitions created on S/390 machines. This
53+ plugin recognizes CDL, LDL, and CMS partition formats. You should only need
54+ to enable this plugin if you are running Linux on an S/390. All other
55+ architectures can say 'N' here.
56+
57+ This plugin is also available as a kernel module called s390_part.o
58+
59+EVMS SnapShot Feature Plugin
60+CONFIG_EVMS_SNAPSHOT_PLUGIN
61+ This feature plugin lets you create a snapshot of any volume
62+ under EVMS control using any other device under under EVMS
63+ control as the target for the snapshot volume.
64+
65+ This plugin is also available as a kernel module called snapshot.o.
66+
67+EVMS DriveLink Feature Plugin
68+CONFIG_EVMS_DRIVELINK_PLUGIN
69+ This feature plugin lets you combine multiple devices into a
70+ single virtual block device. The size of the virtual block
71+ device is approximately equal to the sum of all its components.
72+ It currently supports combining up to 60 devices (partitions,
73+ disks, or logical volumes).
74+
75+ This plugin is also available as a kernel module called evms_drivelink.o.
76+
77+EVMS Bad Block Relocation (BBR) Feature
78+CONFIG_EVMS_BBR_PLUGIN
79+ BBR is designed to remap I/O write failures to another safe
80+ location on disk. Note that most disk drives have BBR built
81+ into them, so software BBR will only be activated when all
82+ hardware BBR replacement sectors have been used.
83+
84+ This plugin is also available as a kernel module called evms_bbr.o.
85+
86+EVMS Linux LVM Plugin
87+CONFIG_EVMS_LVM_PLUGIN
88+ The LVM plugin is responsible for providing compatibility with the Linux
89+ LVM. This plugin recognizes disks and partitions that are LVM physical
90+ volumes (PVs), and assembles the appropriate volume groups (VGs). LVM
91+ logical volumes (LVs) are exported as EVMS volumes with full read/write
92+ support. In addition, support for striped and snapshotted volumes is
93+ included. The corresponding EVMS Engine plugin must also be installed in
94+ order to perform any administration of LVM VGs and LVs.
95+
96+ This plugin is also available as a kernel module called lvm_vge.o.
97+
98+EVMS MD Plugin
99+CONFIG_EVMS_MD_PLUGIN
100+ The MD plugin is responsible for providing compatibility with the Linux
101+ Software RAID driver (MD). It allows several devices to be combined into
102+ one logical device. This can be used to simply append one disk or
103+ partition to another, or to combine several redundant disks into a
104+ RAID 1/4/5 device so as to provide protection against hard disk failures.
105+
106+ This plugin is also available as a kernel module called md_core.o.
107+
108+EVMS MD RAID-Linear Plugin
109+CONFIG_EVMS_MD_LINEAR_PERS
110+ The RAID-Linear personality combines disks and/or partitions simply by
111+ appending one to the other.
112+
113+ This plugin is also available as a kernel module called md_linear.o.
114+
115+EVMS MD RAID-0 Plugin
116+CONFIG_EVMS_MD_RAID0_PERS
117+ The RAID-0 personality combines disks and/or partitions into one
118+ logical device using striping. This method writes data evenly across
119+ all members in the device in order to increase the throughput rate if
120+ each member resides on a distinct disk.
121+
122+ This plugin is also available as a kernel module called md_raid0.o.
123+
124+EVMS MD RAID-1 Plugin
125+CONFIG_EVMS_MD_RAID1_PERS
126+ The RAID-1 personality implements mirroring, in which a logical device
127+ consists of several disks that are exact copies of each other. In the
128+ event of a mirror failure, the RAID-1 personality will continue to use
129+ the remaining mirrors in the set, providing an error free device to the
130+ higher levels of the kernel. In a set with N drives, the available space
131+ is the capacity of a single drive, and the set protects against the
132+ failure of N-1 drives.
133+
134+ This plugin is also available as a kernel module called md_raid1.o.
135+
136+EVMS MD RAID-4/RAID-5 Plugin
137+CONFIG_EVMS_MD_RAID5_PERS
138+ A RAID-5 set of N drives with a capacity of C MB per drive provides
139+ the capacity of C * (N-1) MB, and protects against a failure of a
140+ single drive. For a given sector (row) number, (N-1) drives contain
141+ data sectors, and one drive contains the parity protection. For a
142+ RAID-4 set, the parity blocks are present on a single drive, while
143+ a RAID-5 set distributes the parity across all drives in one of the
144+ available parity distribution methods.
145+
146+ This plugin is also available as a kernel module called md_raid5.o.
147+
148+EVMS AIX LVM Plugin
149+CONFIG_EVMS_AIX_PLUGIN
150+ The AIX LVM plugin is responsible for providing compatibility with the
151+ AIX LVM. This plugin recognizes disks and partitions that are AIX disks,
152+ and assembles the appropriate volume groups. AIX logical volumes are
153+ exported as EVMS volumes with full read/write support. In addition,
154+ support for striped volumes is included, and support for mirroring is
155+ under development.
156+
157+ You should only need to select this option if you are running on a PPC
158+ machine and want to access AIX LVM volumes. The user-space plugin for
159+ AIX will be available in the future.
160+
161+ This plugin is also available as a kernel module called AIXlvm_vge.o.
162+
163+EVMS OS/2 LVM Plugin
164+CONFIG_EVMS_OS2_PLUGIN
165+ Support for recognizing the type 0x35 partitions that later versions
166+ of OS/2 use in its Logical Volume Manager. Provides binary
167+ compatibility and includes Drive Linking and Bad Block Relocation
168+ emulation. The user-space plugin for OS/2 will be available in the future.
169+
170+ This plugin is also available as a kernel module called os2lvm_vge.o.
171+
172+EVMS Clustering Plugin
173+CONFIG_EVMS_ECR_PLUGIN
174+
175+ The EVMS Clustering Plugin is still under design and development.
176+ Best to just say 'n' here.
177+
178+ This plugin is available as a kernel module called evms_ecr.o.
179+
180+EVMS Debug Level
181+CONFIG_EVMS_INFO_CRITICAL
182+ Set the level for kernel messages from EVMS. Each level on the list
183+ produces message for that level and all levels above it. Thus, level
184+ "Critical" only logs the most critical messages (and thus the fewest),
185+ whereas level "Everything" produces more information that will probably
186+ ever be useful. Level "Default" is a good starting point. Level "Debug"
187+ is good if you are having problems with EVMS and want more basic info
188+ on what's going on during the volume discovery process.
189+
190+ EVMS also supports a boot-time kernel parameter to set the info level.
191+ To use this method, specify "evms_info_level=5" at boot time, or add the
192+ line "append = "evms_info_level=5"" to your lilo.conf file (replacing 5
193+ with your desired info level). See include/linux/evms/evms.h for the
194+ numerical definitions of the info levels. To use this boot-time parameter,
195+ the EVMS core driver must be statically built into the kernel (not as a
196+ module).
197+
198 Logical Volume Manager (LVM) support
199 CONFIG_BLK_DEV_LVM
200 This driver lets you combine several hard disks, hard disk
201diff -Naur linux-2002-07-24/MAINTAINERS evms-2002-07-24/MAINTAINERS
202--- linux-2002-07-24/MAINTAINERS Wed Jul 24 09:15:34 2002
203+++ evms-2002-07-24/MAINTAINERS Wed Jul 24 09:08:21 2002
204@@ -533,6 +533,13 @@
205 W: http://opensource.creative.com/
206 S: Maintained
207
208+ENTERPRISE VOLUME MANAGEMENT SYSTEM (EVMS)
209+P: Mark Peloquin, Steve Pratt, Kevin Corry
210+M: peloquin@us.ibm.com, slpratt@us.ibm.com, corryk@us.ibm.com
211+L: evms-devel@lists.sourceforge.net
212+W: http://www.sourceforge.net/projects/evms/
213+S: Supported
214+
215 ETHEREXPRESS-16 NETWORK DRIVER
216 P: Philip Blundell
217 M: Philip.Blundell@pobox.com
218diff -Naur linux-2002-07-24/Makefile evms-2002-07-24/Makefile
219--- linux-2002-07-24/Makefile Wed Jul 24 09:15:33 2002
220+++ evms-2002-07-24/Makefile Wed Jul 24 09:08:53 2002
221@@ -188,6 +188,7 @@
222 DRIVERS-$(CONFIG_BLUEZ) += drivers/bluetooth/bluetooth.o
223 DRIVERS-$(CONFIG_HOTPLUG_PCI) += drivers/hotplug/vmlinux-obj.o
224 DRIVERS-$(CONFIG_ISDN_BOOL) += drivers/isdn/vmlinux-obj.o
225+DRIVERS-$(CONFIG_EVMS) += drivers/evms/evmsdrvr.o
226
227 DRIVERS := $(DRIVERS-y)
228
229diff -Naur linux-2002-07-24/arch/i386/config.in evms-2002-07-24/arch/i386/config.in
230--- linux-2002-07-24/arch/i386/config.in Wed Jul 24 09:15:33 2002
231+++ evms-2002-07-24/arch/i386/config.in Wed Jun 5 16:07:45 2002
232@@ -301,6 +301,8 @@
233
234 source drivers/block/Config.in
235
236+source drivers/evms/Config.in
237+
238 source drivers/md/Config.in
239
240 if [ "$CONFIG_NET" = "y" ]; then
241diff -Naur linux-2002-07-24/arch/ia64/config.in evms-2002-07-24/arch/ia64/config.in
242--- linux-2002-07-24/arch/ia64/config.in Wed Jul 24 09:15:33 2002
243+++ evms-2002-07-24/arch/ia64/config.in Wed Jun 5 16:07:56 2002
244@@ -137,6 +137,7 @@
245 source drivers/block/Config.in
246 source drivers/ieee1394/Config.in
247 source drivers/message/i2o/Config.in
248+source drivers/evms/Config.in
249 source drivers/md/Config.in
250
251 mainmenu_option next_comment
252diff -Naur linux-2002-07-24/arch/parisc/config.in evms-2002-07-24/arch/parisc/config.in
253--- linux-2002-07-24/arch/parisc/config.in Wed Jul 24 09:15:33 2002
254+++ evms-2002-07-24/arch/parisc/config.in Mon Mar 11 13:26:56 2002
255@@ -106,6 +106,8 @@
256
257 source drivers/block/Config.in
258
259+source drivers/evms/Config.in
260+
261 if [ "$CONFIG_NET" = "y" ]; then
262 source net/Config.in
263 fi
264diff -Naur linux-2002-07-24/arch/ppc/config.in evms-2002-07-24/arch/ppc/config.in
265--- linux-2002-07-24/arch/ppc/config.in Wed Jul 24 09:15:33 2002
266+++ evms-2002-07-24/arch/ppc/config.in Wed Jun 5 16:08:20 2002
267@@ -245,6 +245,7 @@
268 source drivers/mtd/Config.in
269 source drivers/pnp/Config.in
270 source drivers/block/Config.in
271+source drivers/evms/Config.in
272 source drivers/md/Config.in
273
274 if [ "$CONFIG_NET" = "y" ]; then
275diff -Naur linux-2002-07-24/arch/ppc64/config.in evms-2002-07-24/arch/ppc64/config.in
276--- linux-2002-07-24/arch/ppc64/config.in Wed Jul 24 09:15:33 2002
277+++ evms-2002-07-24/arch/ppc64/config.in Tue Jul 2 11:14:31 2002
278@@ -100,6 +100,7 @@
279 source drivers/mtd/Config.in
280 source drivers/pnp/Config.in
281 source drivers/block/Config.in
282+source drivers/evms/Config.in
283 source drivers/md/Config.in
284
285 if [ "$CONFIG_NET" = "y" ]; then
286diff -Naur linux-2002-07-24/arch/ppc64/kernel/ioctl32.c evms-2002-07-24/arch/ppc64/kernel/ioctl32.c
287--- linux-2002-07-24/arch/ppc64/kernel/ioctl32.c Wed Jul 24 09:15:33 2002
288+++ evms-2002-07-24/arch/ppc64/kernel/ioctl32.c Tue Jul 2 11:14:31 2002
289@@ -3435,6 +3435,383 @@
290 return ((0 == ret) ? 0 : -EFAULT);
291 }
292
293+#ifdef CONFIG_EVMS
294+
295+#include <linux/evms/evms_kernel.h>
296+#include <linux/evms/evms_bbr.h>
297+
298+struct evms_sector_io32 {
299+ u64 disk_handle;
300+ s32 io_flag;
301+ u64 starting_sector;
302+ u64 sector_count;
303+ __kernel_caddr_t32 buffer_address;
304+ s32 status;
305+};
306+
307+struct evms_rediscover32 {
308+ s32 status;
309+ u32 drive_count;
310+ __kernel_caddr_t32 drive_array;
311+};
312+
313+struct evms_compute_csum32 {
314+ __kernel_caddr_t32 buffer_address;
315+ s32 buffer_size;
316+ u32 insum;
317+ u32 outsum;
318+ s32 status;
319+};
320+
321+struct evms_plugin_ioctl32 {
322+ u32 feature_id;
323+ s32 feature_command;
324+ s32 status;
325+ __kernel_caddr_t32 feature_ioctl_data;
326+};
327+
328+struct evms_notify_bbr32 {
329+ char object_name[EVMS_VOLUME_NAME_SIZE+1];
330+ u64 count;
331+ u64 start_sect;
332+ u64 nr_sect;
333+ __kernel_caddr_t32 buffer;
334+ s32 rw;
335+};
336+
337+#define EVMS_MD_ID 4
338+#define EVMS_MD_PERS_IOCTL_CMD 1
339+#define EVMS_MD_ADD 2
340+#define EVMS_MD_REMOVE 3
341+#define EVMS_MD_ACTIVATE 4
342+#define EVMS_MD_DEACTIVATE 5
343+#define EVMS_MD_GET_ARRAY_INFO 6
344+#define EVMS_MD_RAID5_INIT_IO 1
345+
346+struct evms_md_ioctl {
347+ int mddev_idx;
348+ int cmd;
349+ void *arg;
350+};
351+
352+struct evms_md_ioctl32 {
353+ u32 mddev_idx;
354+ u32 cmd;
355+ __kernel_caddr_t32 arg;
356+};
357+
358+struct evms_md_array_info {
359+ unsigned long state;
360+ mdp_super_t *sb;
361+};
362+
363+struct evms_md_array_info32 {
364+ u32 state;
365+ __kernel_caddr_t32 sb;
366+};
367+
368+struct raid5_ioctl_init_io {
369+ int rw;
370+ u64 lsn;
371+ u64 nr_sects;
372+ void *data;
373+};
374+
375+struct raid5_ioctl_init_io32 {
376+ s32 rw;
377+ u64 lsn;
378+ u64 nr_sects;
379+ __kernel_caddr_t32 data;
380+};
381+
382+#define EVMS_MD_PLUGIN_ID ((IBM_OEM_ID << 16) | \
383+ (EVMS_REGION_MANAGER << 12) | EVMS_MD_ID)
384+#define EVMS_BBR_PLUGIN_ID ((IBM_OEM_ID << 16) | \
385+ (EVMS_FEATURE << 12) | EVMS_BBR_FEATURE_ID)
386+
387+
388+#define EVMS_SECTOR_IO_32 _IOWR( EVMS_MAJOR, \
389+ EVMS_SECTOR_IO_NUMBER, \
390+ struct evms_sector_io32)
391+#define EVMS_REDISCOVER_VOLUMES_32 _IOWR( EVMS_MAJOR, \
392+ EVMS_REDISCOVER_VOLUMES_NUMBER, \
393+ struct evms_rediscover32)
394+#define EVMS_COMPUTE_CSUM_32 _IOWR( EVMS_MAJOR, \
395+ EVMS_COMPUTE_CSUM_NUMBER, \
396+ struct evms_compute_csum32)
397+#define EVMS_PLUGIN_IOCTL_32 _IOR( EVMS_MAJOR, \
398+ EVMS_PLUGIN_IOCTL_NUMBER, \
399+ struct evms_plugin_ioctl32)
400+
401+static int evms_sector_io(unsigned int fd, unsigned int cmd, unsigned long arg)
402+{
403+ mm_segment_t old_fs = get_fs();
404+ struct evms_sector_io32 parms32;
405+ struct evms_sector_io parms;
406+ unsigned int kcmd;
407+ void *karg;
408+ int rc = 0;
409+
410+ if (copy_from_user(&parms32, (struct evms_sector_io32 *)arg,
411+ sizeof(struct evms_sector_io32)))
412+ return -EFAULT;
413+
414+ parms.disk_handle = parms32.disk_handle;
415+ parms.io_flag = parms32.io_flag;
416+ parms.starting_sector = parms32.starting_sector;
417+ parms.sector_count = parms32.sector_count;
418+ parms.buffer_address = (u8 *)A(parms32.buffer_address);
419+ parms.status = 0;
420+
421+ kcmd = EVMS_SECTOR_IO;
422+ karg = &parms;
423+
424+ set_fs(KERNEL_DS);
425+ rc = sys_ioctl(fd, kcmd, (unsigned long)karg);
426+ set_fs(old_fs);
427+
428+ parms32.status = parms.status;
429+ parms32.buffer_address = (__kernel_caddr_t32)AA(parms.buffer_address);
430+
431+ if (copy_to_user((struct evms_sector_io32 *)arg, &parms32,
432+ sizeof(struct evms_sector_io32)))
433+ return -EFAULT;
434+
435+ return rc;
436+}
437+
438+static int evms_rediscover(unsigned int fd, unsigned int cmd, unsigned long arg)
439+{
440+ mm_segment_t old_fs = get_fs();
441+ struct evms_rediscover32 parms32;
442+ struct evms_rediscover parms;
443+ unsigned int kcmd;
444+ void *karg;
445+ int rc = 0;
446+
447+ if (copy_from_user(&parms32, (struct evms_rediscover32 *)arg,
448+ sizeof(struct evms_rediscover32)))
449+ return -EFAULT;
450+
451+ parms.drive_count = parms32.drive_count;
452+ parms.drive_array = (void *)A(parms32.drive_array);
453+ parms.status = 0;
454+
455+ kcmd = EVMS_REDISCOVER_VOLUMES;
456+ karg = &parms;
457+
458+ set_fs(KERNEL_DS);
459+ rc = sys_ioctl(fd, kcmd, (unsigned long)karg);
460+ set_fs(old_fs);
461+
462+ parms32.status = parms.status;
463+
464+ if (copy_to_user((struct evms_rediscover32 *)arg, &parms32,
465+ sizeof(struct evms_rediscover32)))
466+ return -EFAULT;
467+
468+ return rc;
469+}
470+
471+static int evms_compute_csum(unsigned int fd,
472+ unsigned int cmd,
473+ unsigned long arg)
474+{
475+ mm_segment_t old_fs = get_fs();
476+ struct evms_compute_csum32 parms32;
477+ struct evms_compute_csum parms;
478+ unsigned int kcmd;
479+ void *karg;
480+ int rc = 0;
481+
482+ if (copy_from_user(&parms32, (struct evms_compute_csum32 *)arg,
483+ sizeof(struct evms_compute_csum32)))
484+ return -EFAULT;
485+
486+ parms.insum = parms32.insum;
487+ parms.outsum = parms32.outsum;
488+ parms.buffer_size = parms32.buffer_size;
489+ parms.buffer_address = (void *)A(parms32.buffer_address);
490+ parms.status = 0;
491+
492+ kcmd = EVMS_COMPUTE_CSUM;
493+ karg = &parms;
494+
495+ set_fs(KERNEL_DS);
496+ rc = sys_ioctl(fd, kcmd, (unsigned long)karg);
497+ set_fs(old_fs);
498+
499+ parms32.status = parms.status;
500+ parms32.outsum = parms.outsum;
501+
502+ if (copy_to_user((struct evms_compute_csum32 *)arg, &parms32,
503+ sizeof(struct evms_compute_csum32)))
504+ return -EFAULT;
505+
506+ return rc;
507+}
508+
509+static int evms_bbr_plugin_ioctl(unsigned int fd,
510+ unsigned int cmd,
511+ unsigned long arg)
512+{
513+ mm_segment_t old_fs = get_fs();
514+ struct evms_notify_bbr32 bbr_parms32;
515+ struct evms_notify_bbr bbr_parms;
516+ struct evms_plugin_ioctl *parms = (struct evms_plugin_ioctl *)arg;
517+ void *old_ptr = NULL;
518+ int rc;
519+
520+ if (copy_from_user(&bbr_parms32,
521+ (struct evms_notify_bbr32 *)parms->feature_ioctl_data,
522+ sizeof(struct evms_notify_bbr32)))
523+ return -EFAULT;
524+
525+ memcpy(&bbr_parms, &bbr_parms32, sizeof(struct evms_notify_bbr32));
526+ bbr_parms.buffer = (void *)A(bbr_parms32.buffer);
527+ bbr_parms.rw = bbr_parms32.rw;
528+ old_ptr = parms->feature_ioctl_data;
529+ parms->feature_ioctl_data = &bbr_parms;
530+
531+ set_fs(KERNEL_DS);
532+ rc = sys_ioctl(fd, cmd, arg);
533+ set_fs(old_fs);
534+
535+ parms->feature_ioctl_data = old_ptr;
536+
537+ if (!rc) {
538+ bbr_parms32.nr_sect = bbr_parms.nr_sect;
539+ rc = copy_to_user((struct evms_notify_bbr32 *)parms->feature_ioctl_data,
540+ &bbr_parms32, sizeof(struct evms_notify_bbr32));
541+ }
542+
543+ return rc;
544+}
545+
546+static int evms_md_plugin_ioctl(unsigned int fd,
547+ unsigned int cmd,
548+ unsigned long arg)
549+{
550+ mm_segment_t old_fs = get_fs();
551+ void *old_ptr = NULL;
552+ void *old_md_ptr = NULL;
553+ struct evms_md_ioctl32 md_parms32;
554+ struct evms_md_ioctl md_parms;
555+ struct evms_md_array_info32 md_array_parms32;
556+ struct evms_md_array_info md_array_parms;
557+ struct raid5_ioctl_init_io32 r5_init_io_parms32;
558+ struct raid5_ioctl_init_io r5_init_io_parms;
559+ struct evms_plugin_ioctl *parms = (struct evms_plugin_ioctl *)arg;
560+ int rc;
561+
562+ if (copy_from_user(&md_parms32,
563+ (struct evms_md_ioctl*)parms->feature_ioctl_data,
564+ sizeof(struct evms_md_ioctl32)))
565+ return -EFAULT;
566+
567+ md_parms.mddev_idx = md_parms32.mddev_idx;
568+ md_parms.cmd = md_parms32.cmd;
569+ md_parms.arg = (void *)A(md_parms32.arg);
570+ old_ptr = parms->feature_ioctl_data;
571+ parms->feature_ioctl_data = &md_parms;
572+
573+ if (parms->feature_command == EVMS_MD_GET_ARRAY_INFO) {
574+ if (copy_from_user(&md_array_parms32,
575+ (struct evms_md_array_info32*)md_parms.arg,
576+ sizeof(struct evms_md_array_info32)))
577+ return -EFAULT;
578+ md_array_parms.state = md_array_parms32.state;
579+ md_array_parms.sb = (void *)A(md_array_parms32.sb);
580+ old_md_ptr = (void *)md_parms.arg;
581+ md_parms.arg = &md_array_parms;
582+ } else if (parms->feature_command == EVMS_MD_PERS_IOCTL_CMD) {
583+ if (md_parms.cmd == EVMS_MD_RAID5_INIT_IO) {
584+ if (copy_from_user(&r5_init_io_parms32,
585+ (struct raid5_ioctl_init_io32*)md_parms.arg,
586+ sizeof(struct raid5_ioctl_init_io32)))
587+ return -EFAULT;
588+
589+ r5_init_io_parms.rw = r5_init_io_parms32.rw;
590+ r5_init_io_parms.lsn = r5_init_io_parms32.lsn;
591+ r5_init_io_parms.nr_sects = r5_init_io_parms32.nr_sects;
592+ r5_init_io_parms.data = (void *)A(r5_init_io_parms32.data);
593+ old_md_ptr = (void *)md_parms.arg;
594+ md_parms.arg = &r5_init_io_parms;
595+ }
596+ }
597+
598+ set_fs(KERNEL_DS);
599+ rc = sys_ioctl(fd, cmd, arg);
600+ set_fs(old_fs);
601+
602+ parms->feature_ioctl_data = old_ptr;
603+ md_parms.arg = old_md_ptr;
604+
605+ if (!rc) {
606+ if (parms->feature_command == EVMS_MD_GET_ARRAY_INFO) {
607+ md_array_parms32.state = md_array_parms.state;
608+ rc = copy_to_user((struct evms_md_array_info32 *)md_parms.arg,
609+ &md_array_parms32,
610+ sizeof(struct evms_md_array_info32));
611+ }
612+ if (!rc) {
613+ md_parms32.mddev_idx = md_parms.mddev_idx;
614+ rc = copy_to_user((struct evms_md_ioctl*)parms->feature_ioctl_data,
615+ &md_parms32,
616+ sizeof(struct evms_md_ioctl32));
617+ }
618+ }
619+
620+ return rc;
621+}
622+
623+static int evms_plugin_ioctl(unsigned int fd,
624+ unsigned int cmd,
625+ unsigned long arg)
626+{
627+ mm_segment_t old_fs = get_fs();
628+ struct evms_plugin_ioctl32 parms32;
629+ struct evms_plugin_ioctl parms;
630+ unsigned int kcmd;
631+ void *karg;
632+ int rc;
633+
634+ if (copy_from_user(&parms32, (struct evms_plugin_ioctl32 *)arg,
635+ sizeof(struct evms_plugin_ioctl32)))
636+ return -EFAULT;
637+
638+ parms.feature_id = parms32.feature_id;
639+ parms.feature_command = parms32.feature_command;
640+ parms.status = parms32.status;
641+ parms.feature_ioctl_data = (void *)A(parms32.feature_ioctl_data);
642+
643+ kcmd = EVMS_PLUGIN_IOCTL;
644+ karg = &parms;
645+
646+ switch (parms.feature_id) {
647+ case EVMS_MD_PLUGIN_ID:
648+ rc = evms_md_plugin_ioctl(fd, kcmd, (unsigned long)karg);
649+ break;
650+ case EVMS_BBR_PLUGIN_ID:
651+ rc = evms_bbr_plugin_ioctl(fd, kcmd, (unsigned long)karg);
652+ break;
653+ default:
654+ set_fs(KERNEL_DS);
655+ rc = sys_ioctl(fd, kcmd, (unsigned long)karg);
656+ set_fs(old_fs);
657+ }
658+
659+ if (!rc) {
660+ parms32.status = parms.status;
661+ rc = copy_to_user((struct evms_plugin_ioctl32 *)arg, &parms32,
662+ sizeof(struct evms_plugin_ioctl32));
663+ }
664+
665+ return rc;
666+}
667+
668+#endif
669+
670 struct sg_io_hdr_32
671 {
672 int interface_id;
673@@ -4092,6 +4469,29 @@
674 COMPATIBLE_IOCTL(ATMTCP_REMOVE),
675 COMPATIBLE_IOCTL(ATMMPC_CTRL),
676 COMPATIBLE_IOCTL(ATMMPC_DATA),
677+
678+#ifdef CONFIG_EVMS
679+COMPATIBLE_IOCTL(EVMS_GET_INFO_LEVEL),
680+COMPATIBLE_IOCTL(EVMS_SET_INFO_LEVEL),
681+HANDLE_IOCTL(EVMS_REDISCOVER_VOLUMES_32, evms_rediscover),
682+COMPATIBLE_IOCTL(EVMS_DELETE_VOLUME),
683+HANDLE_IOCTL(EVMS_PLUGIN_IOCTL_32, evms_plugin_ioctl),
684+COMPATIBLE_IOCTL(EVMS_PROCESS_NOTIFY_EVENT),
685+COMPATIBLE_IOCTL(EVMS_GET_LOGICAL_DISK),
686+COMPATIBLE_IOCTL(EVMS_GET_LOGICAL_DISK_INFO),
687+HANDLE_IOCTL(EVMS_SECTOR_IO_32, evms_sector_io),
688+COMPATIBLE_IOCTL(EVMS_GET_MINOR),
689+COMPATIBLE_IOCTL(EVMS_GET_VOLUME_DATA),
690+COMPATIBLE_IOCTL(EVMS_GET_PLUGIN),
691+HANDLE_IOCTL(EVMS_COMPUTE_CSUM_32, evms_compute_csum),
692+COMPATIBLE_IOCTL(EVMS_GET_BMAP),
693+COMPATIBLE_IOCTL(EVMS_GET_IOCTL_VERSION),
694+COMPATIBLE_IOCTL(EVMS_GET_VERSION),
695+COMPATIBLE_IOCTL(EVMS_UPDATE_DEVICE_INFO),
696+COMPATIBLE_IOCTL(EVMS_CHECK_MOUNT_STATUS),
697+COMPATIBLE_IOCTL(EVMS_GET_VOL_STRIPE_INFO),
698+#endif
699+
700 #if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
701 /* 0xfe - lvm */
702 COMPATIBLE_IOCTL(VG_SET_EXTENDABLE),
703diff -Naur linux-2002-07-24/arch/s390/config.in evms-2002-07-24/arch/s390/config.in
704--- linux-2002-07-24/arch/s390/config.in Wed Jul 24 09:15:33 2002
705+++ evms-2002-07-24/arch/s390/config.in Wed Jun 5 16:08:32 2002
706@@ -59,6 +59,8 @@
707
708 source drivers/s390/Config.in
709
710+source drivers/evms/Config.in
711+
712 if [ "$CONFIG_NET" = "y" ]; then
713 source net/Config.in
714 fi
715diff -Naur linux-2002-07-24/arch/s390x/config.in evms-2002-07-24/arch/s390x/config.in
716--- linux-2002-07-24/arch/s390x/config.in Wed Jul 24 09:15:33 2002
717+++ evms-2002-07-24/arch/s390x/config.in Wed Jun 5 16:08:39 2002
718@@ -61,6 +61,8 @@
719
720 source drivers/s390/Config.in
721
722+source drivers/evms/Config.in
723+
724 if [ "$CONFIG_NET" = "y" ]; then
725 source net/Config.in
726 fi
727diff -Naur linux-2002-07-24/drivers/Makefile evms-2002-07-24/drivers/Makefile
728--- linux-2002-07-24/drivers/Makefile Wed Jul 24 09:15:33 2002
729+++ evms-2002-07-24/drivers/Makefile Wed Jun 5 16:09:04 2002
730@@ -8,7 +8,7 @@
731
732 mod-subdirs := dio mtd sbus video macintosh usb input telephony sgi ide \
733 message/i2o message/fusion scsi md ieee1394 pnp isdn atm \
734- fc4 net/hamradio i2c acpi bluetooth
735+ fc4 net/hamradio i2c acpi bluetooth evms
736
737 subdir-y := parport char block net sound misc media cdrom hotplug
738 subdir-m := $(subdir-y)
739@@ -46,5 +46,6 @@
740 subdir-$(CONFIG_ACPI) += acpi
741
742 subdir-$(CONFIG_BLUEZ) += bluetooth
743+subdir-$(CONFIG_EVMS) += evms
744
745 include $(TOPDIR)/Rules.make
746diff -Naur linux-2002-07-24/include/linux/fs.h evms-2002-07-24/include/linux/fs.h
747--- linux-2002-07-24/include/linux/fs.h Wed Jul 24 09:15:34 2002
748+++ evms-2002-07-24/include/linux/fs.h Wed Jul 24 09:12:32 2002
749@@ -1472,6 +1472,7 @@
750 unsigned long generate_cluster_swab32(kdev_t, int b[], int);
751 extern kdev_t ROOT_DEV;
752 extern char root_device_name[];
753+extern void get_root_device_name( char * root_name );
754
755
756 extern void show_buffers(void);
757diff -Naur linux-2002-07-24/include/linux/major.h evms-2002-07-24/include/linux/major.h
758--- linux-2002-07-24/include/linux/major.h Wed Jul 24 09:15:34 2002
759+++ evms-2002-07-24/include/linux/major.h Mon Jul 8 16:23:51 2002
760@@ -142,6 +142,8 @@
761
762 #define UMEM_MAJOR 116 /* http://www.umem.com/ Battery Backed RAM */
763
764+#define EVMS_MAJOR 117 /* Enterprise Volume Management System */
765+
766 #define RTF_MAJOR 150
767 #define RAW_MAJOR 162
768
769diff -Naur linux-2002-07-24/include/linux/mempool.h evms-2002-07-24/include/linux/mempool.h
770--- linux-2002-07-24/include/linux/mempool.h Wed Dec 31 18:00:00 1969
771+++ evms-2002-07-24/include/linux/mempool.h Mon Jun 17 10:13:08 2002
772@@ -0,0 +1,30 @@
773+/*
774+ * memory buffer pool support
775+ */
776+#ifndef _LINUX_MEMPOOL_H
777+#define _LINUX_MEMPOOL_H
778+
779+#include <linux/wait.h>
780+
781+typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
782+typedef void (mempool_free_t)(void *element, void *pool_data);
783+
784+typedef struct mempool_s {
785+ spinlock_t lock;
786+ int min_nr; /* nr of elements at *elements */
787+ int curr_nr; /* Current nr of elements at *elements */
788+ void **elements;
789+
790+ void *pool_data;
791+ mempool_alloc_t *alloc;
792+ mempool_free_t *free;
793+ wait_queue_head_t wait;
794+} mempool_t;
795+extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
796+ mempool_free_t *free_fn, void *pool_data);
797+extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
798+extern void mempool_destroy(mempool_t *pool);
799+extern void * mempool_alloc(mempool_t *pool, int gfp_mask);
800+extern void mempool_free(void *element, mempool_t *pool);
801+
802+#endif /* _LINUX_MEMPOOL_H */
803diff -Naur linux-2002-07-24/include/linux/sysctl.h evms-2002-07-24/include/linux/sysctl.h
804--- linux-2002-07-24/include/linux/sysctl.h Wed Jul 24 09:15:34 2002
805+++ evms-2002-07-24/include/linux/sysctl.h Wed Jun 5 16:11:50 2002
806@@ -556,7 +556,8 @@
807 DEV_HWMON=2,
808 DEV_PARPORT=3,
809 DEV_RAID=4,
810- DEV_MAC_HID=5
811+ DEV_MAC_HID=5,
812+ DEV_EVMS=6
813 };
814
815 /* /proc/sys/dev/cdrom */
816@@ -572,6 +573,18 @@
817 /* /proc/sys/dev/parport */
818 enum {
819 DEV_PARPORT_DEFAULT=-3
820+};
821+
822+/* /proc/sys/dev/evms */
823+enum {
824+ DEV_EVMS_INFO_LEVEL=1,
825+ DEV_EVMS_MD=2
826+};
827+
828+/* /proc/sys/dev/evms/raid */
829+enum {
830+ DEV_EVMS_MD_SPEED_LIMIT_MIN=1,
831+ DEV_EVMS_MD_SPEED_LIMIT_MAX=2
832 };
833
834 /* /proc/sys/dev/raid */
835diff -Naur linux-2002-07-24/init/do_mounts.c evms-2002-07-24/init/do_mounts.c
836--- linux-2002-07-24/init/do_mounts.c Wed Jul 24 09:15:34 2002
837+++ evms-2002-07-24/init/do_mounts.c Wed Jul 24 09:13:46 2002
838@@ -224,6 +224,7 @@
839 { "ftlc", 0x2c10 },
840 { "ftld", 0x2c18 },
841 { "mtdblock", 0x1f00 },
842+ { "evms", 0x7500 },
843 { NULL, 0 }
844 };
845
846@@ -738,6 +739,11 @@
847 }
848 #endif
849 mount_block_root("/dev/root", root_mountflags);
850+}
851+
852+void get_root_device_name( char * root_name )
853+{
854+ strncpy(root_name, root_device_name, 63);
855 }
856
857 #ifdef CONFIG_BLK_DEV_INITRD
858diff -Naur linux-2002-07-24/kernel/ksyms.c evms-2002-07-24/kernel/ksyms.c
859--- linux-2002-07-24/kernel/ksyms.c Wed Jul 24 09:15:34 2002
860+++ evms-2002-07-24/kernel/ksyms.c Wed Jul 24 09:14:08 2002
861@@ -320,6 +320,8 @@
862 EXPORT_SYMBOL(refile_buffer);
863 EXPORT_SYMBOL(max_sectors);
864 EXPORT_SYMBOL(max_readahead);
865+EXPORT_SYMBOL(is_swap_partition);
866+EXPORT_SYMBOL(walk_gendisk);
867
868 /* tty routines */
869 EXPORT_SYMBOL(tty_hangup);
870diff -Naur linux-2002-07-24/mm/Makefile evms-2002-07-24/mm/Makefile
871--- linux-2002-07-24/mm/Makefile Wed Jul 24 09:15:34 2002
872+++ evms-2002-07-24/mm/Makefile Wed Jun 19 11:21:04 2002
873@@ -9,12 +9,12 @@
874
875 O_TARGET := mm.o
876
877-export-objs := shmem.o filemap.o memory.o page_alloc.o
878+export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o
879
880 obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
881 vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
882 page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
883- shmem.o
884+ shmem.o mempool.o
885
886 obj-$(CONFIG_HIGHMEM) += highmem.o
887
888diff -Naur linux-2002-07-24/mm/mempool.c evms-2002-07-24/mm/mempool.c
889--- linux-2002-07-24/mm/mempool.c Wed Dec 31 18:00:00 1969
890+++ evms-2002-07-24/mm/mempool.c Wed Jun 19 09:47:16 2002
891@@ -0,0 +1,273 @@
892+/*
893+ * linux/mm/mempool.c
894+ *
895+ * memory buffer pool support. Such pools are mostly used
896+ * for guaranteed, deadlock-free memory allocations during
897+ * extreme VM load.
898+ *
899+ * started by Ingo Molnar, Copyright (C) 2001
900+ */
901+
902+#include <linux/mm.h>
903+#include <linux/slab.h>
904+#include <linux/module.h>
905+#include <linux/compiler.h>
906+#include <linux/mempool.h>
907+#include <linux/fs.h>
908+
909+#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
910+
911+static void add_element(mempool_t *pool, void *element)
912+{
913+ BUG_ON(pool->curr_nr >= pool->min_nr);
914+ pool->elements[pool->curr_nr++] = element;
915+}
916+
917+static void *remove_element(mempool_t *pool)
918+{
919+ BUG_ON(pool->curr_nr <= 0);
920+ return pool->elements[--pool->curr_nr];
921+}
922+
923+static void free_pool(mempool_t *pool)
924+{
925+ while (pool->curr_nr) {
926+ void *element = remove_element(pool);
927+ pool->free(element, pool->pool_data);
928+ }
929+ kfree(pool->elements);
930+ kfree(pool);
931+}
932+
933+/**
934+ * mempool_create - create a memory pool
935+ * @min_nr: the minimum number of elements guaranteed to be
936+ * allocated for this pool.
937+ * @alloc_fn: user-defined element-allocation function.
938+ * @free_fn: user-defined element-freeing function.
939+ * @pool_data: optional private data available to the user-defined functions.
940+ *
941+ * this function creates and allocates a guaranteed size, preallocated
942+ * memory pool. The pool can be used from the mempool_alloc and mempool_free
943+ * functions. This function might sleep. Both the alloc_fn() and the free_fn()
944+ * functions might sleep - as long as the mempool_alloc function is not called
945+ * from IRQ contexts.
946+ */
947+mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
948+ mempool_free_t *free_fn, void *pool_data)
949+{
950+ mempool_t *pool;
951+
952+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
953+ if (!pool)
954+ return NULL;
955+ memset(pool, 0, sizeof(*pool));
956+ pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL);
957+ if (!pool->elements) {
958+ kfree(pool);
959+ return NULL;
960+ }
961+ spin_lock_init(&pool->lock);
962+ pool->min_nr = min_nr;
963+ pool->pool_data = pool_data;
964+ init_waitqueue_head(&pool->wait);
965+ pool->alloc = alloc_fn;
966+ pool->free = free_fn;
967+
968+ /*
969+ * First pre-allocate the guaranteed number of buffers.
970+ */
971+ while (pool->curr_nr < pool->min_nr) {
972+ void *element;
973+
974+ element = pool->alloc(GFP_KERNEL, pool->pool_data);
975+ if (unlikely(!element)) {
976+ free_pool(pool);
977+ return NULL;
978+ }
979+ add_element(pool, element);
980+ }
981+ return pool;
982+}
983+
984+/**
985+ * mempool_resize - resize an existing memory pool
986+ * @pool: pointer to the memory pool which was allocated via
987+ * mempool_create().
988+ * @new_min_nr: the new minimum number of elements guaranteed to be
989+ * allocated for this pool.
990+ * @gfp_mask: the usual allocation bitmask.
991+ *
992+ * This function shrinks/grows the pool. In the case of growing,
993+ * it cannot be guaranteed that the pool will be grown to the new
994+ * size immediately, but new mempool_free() calls will refill it.
995+ *
996+ * Note, the caller must guarantee that no mempool_destroy is called
997+ * while this function is running. mempool_alloc() & mempool_free()
998+ * might be called (eg. from IRQ contexts) while this function executes.
999+ */
1000+int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
1001+{
1002+ void *element;
1003+ void **new_elements;
1004+ unsigned long flags;
1005+
1006+ BUG_ON(new_min_nr <= 0);
1007+
1008+ spin_lock_irqsave(&pool->lock, flags);
1009+ if (new_min_nr < pool->min_nr) {
1010+ while (pool->curr_nr > new_min_nr) {
1011+ element = remove_element(pool);
1012+ spin_unlock_irqrestore(&pool->lock, flags);
1013+ pool->free(element, pool->pool_data);
1014+ spin_lock_irqsave(&pool->lock, flags);
1015+ }
1016+ pool->min_nr = new_min_nr;
1017+ goto out_unlock;
1018+ }
1019+ spin_unlock_irqrestore(&pool->lock, flags);
1020+
1021+ /* Grow the pool */
1022+ new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
1023+ if (!new_elements)
1024+ return -ENOMEM;
1025+
1026+ spin_lock_irqsave(&pool->lock, flags);
1027+ memcpy(new_elements, pool->elements,
1028+ pool->curr_nr * sizeof(*new_elements));
1029+ kfree(pool->elements);
1030+ pool->elements = new_elements;
1031+ pool->min_nr = new_min_nr;
1032+
1033+ while (pool->curr_nr < pool->min_nr) {
1034+ spin_unlock_irqrestore(&pool->lock, flags);
1035+ element = pool->alloc(gfp_mask, pool->pool_data);
1036+ if (!element)
1037+ goto out;
1038+ spin_lock_irqsave(&pool->lock, flags);
1039+ if (pool->curr_nr < pool->min_nr)
1040+ add_element(pool, element);
1041+ else
1042+ kfree(element); /* Raced */
1043+ }
1044+out_unlock:
1045+ spin_unlock_irqrestore(&pool->lock, flags);
1046+out:
1047+ return 0;
1048+}
1049+
1050+/**
1051+ * mempool_destroy - deallocate a memory pool
1052+ * @pool: pointer to the memory pool which was allocated via
1053+ * mempool_create().
1054+ *
1055+ * this function only sleeps if the free_fn() function sleeps. The caller
1056+ * has to guarantee that all elements have been returned to the pool (ie:
1057+ * freed) prior to calling mempool_destroy().
1058+ */
1059+void mempool_destroy(mempool_t *pool)
1060+{
1061+ if (pool->curr_nr != pool->min_nr)
1062+ BUG(); /* There were outstanding elements */
1063+ free_pool(pool);
1064+}
1065+
1066+/**
1067+ * mempool_alloc - allocate an element from a specific memory pool
1068+ * @pool: pointer to the memory pool which was allocated via
1069+ * mempool_create().
1070+ * @gfp_mask: the usual allocation bitmask.
1071+ *
1072+ * this function only sleeps if the alloc_fn function sleeps or
1073+ * returns NULL. Note that due to preallocation, this function
1074+ * *never* fails when called from process contexts. (it might
1075+ * fail if called from an IRQ context.)
1076+ */
1077+void * mempool_alloc(mempool_t *pool, int gfp_mask)
1078+{
1079+ void *element;
1080+ unsigned long flags;
1081+ int curr_nr;
1082+ DECLARE_WAITQUEUE(wait, current);
1083+ int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
1084+
1085+repeat_alloc:
1086+ element = pool->alloc(gfp_nowait, pool->pool_data);
1087+ if (likely(element != NULL))
1088+ return element;
1089+
1090+ /*
1091+ * If the pool is less than 50% full then try harder
1092+ * to allocate an element:
1093+ */
1094+ if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
1095+ element = pool->alloc(gfp_mask, pool->pool_data);
1096+ if (likely(element != NULL))
1097+ return element;
1098+ }
1099+
1100+ /*
1101+ * Kick the VM at this point.
1102+ */
1103+ wakeup_bdflush();
1104+
1105+ spin_lock_irqsave(&pool->lock, flags);
1106+ if (likely(pool->curr_nr)) {
1107+ element = remove_element(pool);
1108+ spin_unlock_irqrestore(&pool->lock, flags);
1109+ return element;
1110+ }
1111+ spin_unlock_irqrestore(&pool->lock, flags);
1112+
1113+ /* We must not sleep in the GFP_ATOMIC case */
1114+ if (gfp_mask == gfp_nowait)
1115+ return NULL;
1116+
1117+ run_task_queue(&tq_disk);
1118+
1119+ add_wait_queue_exclusive(&pool->wait, &wait);
1120+ set_task_state(current, TASK_UNINTERRUPTIBLE);
1121+
1122+ spin_lock_irqsave(&pool->lock, flags);
1123+ curr_nr = pool->curr_nr;
1124+ spin_unlock_irqrestore(&pool->lock, flags);
1125+
1126+ if (!curr_nr)
1127+ schedule();
1128+
1129+ current->state = TASK_RUNNING;
1130+ remove_wait_queue(&pool->wait, &wait);
1131+
1132+ goto repeat_alloc;
1133+}
1134+
1135+/**
1136+ * mempool_free - return an element to the pool.
1137+ * @element: pool element pointer.
1138+ * @pool: pointer to the memory pool which was allocated via
1139+ * mempool_create().
1140+ *
1141+ * this function only sleeps if the free_fn() function sleeps.
1142+ */
1143+void mempool_free(void *element, mempool_t *pool)
1144+{
1145+ unsigned long flags;
1146+
1147+ if (pool->curr_nr < pool->min_nr) {
1148+ spin_lock_irqsave(&pool->lock, flags);
1149+ if (pool->curr_nr < pool->min_nr) {
1150+ add_element(pool, element);
1151+ spin_unlock_irqrestore(&pool->lock, flags);
1152+ wake_up(&pool->wait);
1153+ return;
1154+ }
1155+ spin_unlock_irqrestore(&pool->lock, flags);
1156+ }
1157+ pool->free(element, pool->pool_data);
1158+}
1159+
1160+EXPORT_SYMBOL(mempool_create);
1161+EXPORT_SYMBOL(mempool_resize);
1162+EXPORT_SYMBOL(mempool_destroy);
1163+EXPORT_SYMBOL(mempool_alloc);
1164+EXPORT_SYMBOL(mempool_free);
This page took 0.26654 seconds and 4 git commands to generate.