]> git.pld-linux.org Git - packages/kernel.git/blame - evms-linux-2.4.20-common-files.patch
- obsolete
[packages/kernel.git] / evms-linux-2.4.20-common-files.patch
CommitLineData
30dc7178 1diff -Naur linux-2002-09-16/Documentation/Configure.help evms-2002-09-16/Documentation/Configure.help
2--- linux-2002-09-16/Documentation/Configure.help Mon Sep 16 08:55:18 2002
3+++ evms-2002-09-16/Documentation/Configure.help Mon Sep 16 08:50:20 2002
4@@ -1788,6 +1788,196 @@
5 called on26.o. You must also have a high-level driver for the type
6 of device that you want to support.
7
8+EVMS Kernel Runtime
9+CONFIG_EVMS
10+ EVMS runtime driver. This is a plugin-based framework for volume
11+ management, and combines support for partitioning, software RAID,
12+ LVM, and more into a single interface.
13+
14+ User-space tools are required to perform administration of EVMS logical
15+ volumes. Please visit <http://www.sourceforge.net/projects/evms> for
16+ more details on downloading and installing these tools.
17+
18+ This driver is also available as a pair of modules called evms.o and
19+ evms_passthru.o ( = code which can be inserted and removed from the
20+ running kernel whenever you want). If you want to compile it as a module,
21+ say M here and read <file:Documentation/modules.txt>.
22+
23+EVMS Local Device Manager Plugin
24+CONFIG_EVMS_LOCAL_DEV_MGR
25+ Support for local IDE and SCSI devices. This plugin is required if EVMS
26+ support is selected.
27+
28+ This plugin is also available as a kernel module called ldev_mgr.o.
29+
30+EVMS DOS Partition Manager Plugin
31+CONFIG_EVMS_DOS_SEGMENT_MGR
32+ Support for recognizing all partitions using the ever-popular DOS
33+ partitioning scheme (MBRs & EBRs). 99% of the time you will need
34+ this plugin to do anything useful with EVMS.
35+
36+ This plugin also contains support for recognizing BSD disklabels,
37+ UNIXWARE partitions, Solaris-X86 partitions, and OS/2 DLAT entries.
38+
39+ This plugin is also available as a kernel module called dos_part.o.
40+
41+EVMS GPT Partition Manager Plugin
42+CONFIG_EVMS_GPT_SEGMENT_MGR
43+ Support for recognizing all partitions using the new EFI GUID partitioning
44+ scheme that is used by IA-64 machines. You should only need to enable this
45+ plugin if you are running Linux on an IA-64 machine. All other architectures
46+ can say 'N' here.
47+
48+ This plugin is also available as a kernel module called gpt_part.o
49+
50+EVMS S/390 Partition Manager Plugin
51+CONFIG_EVMS_S390_SEGMENT_MGR
52+ Support for recognizing all partitions created on S/390 machines. This
53+ plugin recognizes CDL, LDL, and CMS partition formats. You should only need
54+ to enable this plugin if you are running Linux on an S/390. All other
55+ architectures can say 'N' here.
56+
57+ This plugin is also available as a kernel module called s390_part.o
58+
59+EVMS SnapShot Feature Plugin
60+CONFIG_EVMS_SNAPSHOT
61+ This feature plugin lets you create a snapshot of any volume
62+ under EVMS control using any other device under under EVMS
63+ control as the target for the snapshot volume.
64+
65+ This plugin is also available as a kernel module called snapshot.o.
66+
67+EVMS DriveLink Feature Plugin
68+CONFIG_EVMS_DRIVELINK
69+ This feature plugin lets you combine multiple devices into a
70+ single virtual block device. The size of the virtual block
71+ device is approximately equal to the sum of all its components.
72+ It currently supports combining up to 60 devices (partitions,
73+ disks, or logical volumes).
74+
75+ This plugin is also available as a kernel module called evms_drivelink.o.
76+
77+EVMS Bad Block Relocation (BBR) Feature
78+CONFIG_EVMS_BBR
79+ BBR is designed to remap I/O write failures to another safe
80+ location on disk. Note that most disk drives have BBR built
81+ into them, so software BBR will only be activated when all
82+ hardware BBR replacement sectors have been used.
83+
84+ This plugin is also available as a kernel module called evms_bbr.o.
85+
86+EVMS Linux LVM Plugin
87+CONFIG_EVMS_LVM
88+ The LVM plugin is responsible for providing compatibility with the Linux
89+ LVM. This plugin recognizes disks and partitions that are LVM physical
90+ volumes (PVs), and assembles the appropriate volume groups (VGs). LVM
91+ logical volumes (LVs) are exported as EVMS volumes with full read/write
92+ support. In addition, support for striped and snapshotted volumes is
93+ included. The corresponding EVMS Engine plugin must also be installed in
94+ order to perform any administration of LVM VGs and LVs.
95+
96+ This plugin is also available as a kernel module called lvm_vge.o.
97+
98+EVMS MD Plugin
99+CONFIG_EVMS_MD
100+ The MD plugin is responsible for providing compatibility with the Linux
101+ Software RAID driver (MD). It allows several devices to be combined into
102+ one logical device. This can be used to simply append one disk or
103+ partition to another, or to combine several redundant disks into a
104+ RAID 1/4/5 device so as to provide protection against hard disk failures.
105+
106+ This plugin is also available as a kernel module called md_core.o.
107+
108+EVMS MD RAID-Linear Plugin
109+CONFIG_EVMS_MD_LINEAR
110+ The RAID-Linear personality combines disks and/or partitions simply by
111+ appending one to the other.
112+
113+ This plugin is also available as a kernel module called md_linear.o.
114+
115+EVMS MD RAID-0 Plugin
116+CONFIG_EVMS_MD_RAID0
117+ The RAID-0 personality combines disks and/or partitions into one
118+ logical device using striping. This method writes data evenly across
119+ all members in the device in order to increase the throughput rate if
120+ each member resides on a distinct disk.
121+
122+ This plugin is also available as a kernel module called md_raid0.o.
123+
124+EVMS MD RAID-1 Plugin
125+CONFIG_EVMS_MD_RAID1
126+ The RAID-1 personality implements mirroring, in which a logical device
127+ consists of several disks that are exact copies of each other. In the
128+ event of a mirror failure, the RAID-1 personality will continue to use
129+ the remaining mirrors in the set, providing an error free device to the
130+ higher levels of the kernel. In a set with N drives, the available space
131+ is the capacity of a single drive, and the set protects against the
132+ failure of N-1 drives.
133+
134+ This plugin is also available as a kernel module called md_raid1.o.
135+
136+EVMS MD RAID-4/RAID-5 Plugin
137+CONFIG_EVMS_MD_RAID5
138+ A RAID-5 set of N drives with a capacity of C MB per drive provides
139+ the capacity of C * (N-1) MB, and protects against a failure of a
140+ single drive. For a given sector (row) number, (N-1) drives contain
141+ data sectors, and one drive contains the parity protection. For a
142+ RAID-4 set, the parity blocks are present on a single drive, while
143+ a RAID-5 set distributes the parity across all drives in one of the
144+ available parity distribution methods.
145+
146+ This plugin is also available as a kernel module called md_raid5.o.
147+
148+EVMS AIX LVM Plugin
149+CONFIG_EVMS_AIX
150+ The AIX LVM plugin is responsible for providing compatibility with the
151+ AIX LVM. This plugin recognizes disks and partitions that are AIX disks,
152+ and assembles the appropriate volume groups. AIX logical volumes are
153+ exported as EVMS volumes with full read/write support. In addition,
154+ support for striped volumes is included, and support for mirroring is
155+ under development.
156+
157+ You should only need to select this option if you are running on a PPC
158+ machine and want to access AIX LVM volumes. The user-space plugin for
159+ AIX will be available in the future.
160+
161+ This plugin is also available as a kernel module called AIXlvm_vge.o.
162+
163+EVMS OS/2 LVM Plugin
164+CONFIG_EVMS_OS2
165+ Support for recognizing the type 0x35 partitions that later versions
166+ of OS/2 use in its Logical Volume Manager. Provides binary
167+ compatibility and includes Drive Linking and Bad Block Relocation
168+ emulation. The user-space plugin for OS/2 will be available in the future.
169+
170+ This plugin is also available as a kernel module called os2lvm_vge.o.
171+
172+EVMS Clustering Plugin
173+CONFIG_EVMS_ECR
174+
175+ The EVMS Clustering Plugin is still under design and development.
176+ Best to just say 'n' here.
177+
178+ This plugin is available as a kernel module called evms_ecr.o.
179+
180+EVMS Debug Level
181+CONFIG_EVMS_INFO_CRITICAL
182+ Set the level for kernel messages from EVMS. Each level on the list
183+ produces message for that level and all levels above it. Thus, level
184+ "Critical" only logs the most critical messages (and thus the fewest),
185+ whereas level "Everything" produces more information that will probably
186+ ever be useful. Level "Default" is a good starting point. Level "Debug"
187+ is good if you are having problems with EVMS and want more basic info
188+ on what's going on during the volume discovery process.
189+
190+ EVMS also supports a boot-time kernel parameter to set the info level.
191+ To use this method, specify "evms_info_level=5" at boot time, or add the
192+ line "append = "evms_info_level=5"" to your lilo.conf file (replacing 5
193+ with your desired info level). See include/linux/evms/evms.h for the
194+ numerical definitions of the info levels. To use this boot-time parameter,
195+ the EVMS core driver must be statically built into the kernel (not as a
196+ module).
197+
198 Logical Volume Manager (LVM) support
199 CONFIG_BLK_DEV_LVM
200 This driver lets you combine several hard disks, hard disk
201diff -Naur linux-2002-09-16/MAINTAINERS evms-2002-09-16/MAINTAINERS
202--- linux-2002-09-16/MAINTAINERS Mon Sep 16 08:55:18 2002
203+++ evms-2002-09-16/MAINTAINERS Mon Sep 16 08:50:20 2002
204@@ -546,6 +546,13 @@
205 W: http://opensource.creative.com/
206 S: Maintained
207
208+ENTERPRISE VOLUME MANAGEMENT SYSTEM (EVMS)
209+P: Mark Peloquin, Steve Pratt, Kevin Corry
210+M: peloquin@us.ibm.com, slpratt@us.ibm.com, corryk@us.ibm.com
211+L: evms-devel@lists.sourceforge.net
212+W: http://www.sourceforge.net/projects/evms/
213+S: Supported
214+
215 ETHEREXPRESS-16 NETWORK DRIVER
216 P: Philip Blundell
217 M: Philip.Blundell@pobox.com
218diff -Naur linux-2002-09-16/Makefile evms-2002-09-16/Makefile
219--- linux-2002-09-16/Makefile Mon Sep 16 08:55:18 2002
220+++ evms-2002-09-16/Makefile Mon Sep 16 08:50:20 2002
221@@ -190,6 +190,7 @@
222 DRIVERS-$(CONFIG_BLUEZ) += drivers/bluetooth/bluetooth.o
223 DRIVERS-$(CONFIG_HOTPLUG_PCI) += drivers/hotplug/vmlinux-obj.o
224 DRIVERS-$(CONFIG_ISDN_BOOL) += drivers/isdn/vmlinux-obj.o
225+DRIVERS-$(CONFIG_EVMS) += drivers/evms/evmsdrvr.o
226
227 DRIVERS := $(DRIVERS-y)
228
229diff -Naur linux-2002-09-16/arch/i386/config.in evms-2002-09-16/arch/i386/config.in
230--- linux-2002-09-16/arch/i386/config.in Mon Sep 16 08:55:18 2002
231+++ evms-2002-09-16/arch/i386/config.in Mon Sep 16 08:50:21 2002
232@@ -326,6 +326,8 @@
233
234 source drivers/block/Config.in
235
236+source drivers/evms/Config.in
237+
238 source drivers/md/Config.in
239
240 if [ "$CONFIG_NET" = "y" ]; then
241diff -Naur linux-2002-09-16/arch/ia64/config.in evms-2002-09-16/arch/ia64/config.in
242--- linux-2002-09-16/arch/ia64/config.in Mon Sep 16 08:55:18 2002
243+++ evms-2002-09-16/arch/ia64/config.in Mon Sep 16 08:52:33 2002
244@@ -139,6 +139,7 @@
245 source drivers/block/Config.in
246 source drivers/ieee1394/Config.in
247 source drivers/message/i2o/Config.in
248+ source drivers/evms/Config.in
249 source drivers/md/Config.in
250 source drivers/message/fusion/Config.in
251
252diff -Naur linux-2002-09-16/arch/parisc/config.in evms-2002-09-16/arch/parisc/config.in
253--- linux-2002-09-16/arch/parisc/config.in Mon Sep 16 08:55:18 2002
254+++ evms-2002-09-16/arch/parisc/config.in Mon Sep 16 08:53:20 2002
255@@ -97,6 +97,8 @@
256
257 source drivers/block/Config.in
258
259+source drivers/evms/Config.in
260+
261 source drivers/md/Config.in
262
263 if [ "$CONFIG_NET" = "y" ]; then
264diff -Naur linux-2002-09-16/arch/ppc/config.in evms-2002-09-16/arch/ppc/config.in
265--- linux-2002-09-16/arch/ppc/config.in Mon Sep 16 08:55:18 2002
266+++ evms-2002-09-16/arch/ppc/config.in Mon Sep 16 08:50:21 2002
267@@ -249,6 +249,7 @@
268 source drivers/mtd/Config.in
269 source drivers/pnp/Config.in
270 source drivers/block/Config.in
271+source drivers/evms/Config.in
272 source drivers/md/Config.in
273
274 if [ "$CONFIG_NET" = "y" ]; then
275diff -Naur linux-2002-09-16/arch/ppc64/config.in evms-2002-09-16/arch/ppc64/config.in
276--- linux-2002-09-16/arch/ppc64/config.in Mon Sep 16 08:55:18 2002
277+++ evms-2002-09-16/arch/ppc64/config.in Mon Sep 16 08:50:22 2002
278@@ -99,6 +99,7 @@
279 endmenu
280
281 source drivers/block/Config.in
282+source drivers/evms/Config.in
283 source drivers/md/Config.in
284
285 if [ "$CONFIG_NET" = "y" ]; then
286diff -Naur linux-2002-09-16/arch/s390/config.in evms-2002-09-16/arch/s390/config.in
287--- linux-2002-09-16/arch/s390/config.in Mon Sep 16 08:55:18 2002
288+++ evms-2002-09-16/arch/s390/config.in Mon Sep 16 08:50:21 2002
289@@ -59,6 +59,8 @@
290
291 source drivers/s390/Config.in
292
293+source drivers/evms/Config.in
294+
295 if [ "$CONFIG_NET" = "y" ]; then
296 source net/Config.in
297 fi
298diff -Naur linux-2002-09-16/arch/s390x/config.in evms-2002-09-16/arch/s390x/config.in
299--- linux-2002-09-16/arch/s390x/config.in Mon Sep 16 08:55:18 2002
300+++ evms-2002-09-16/arch/s390x/config.in Mon Sep 16 08:50:21 2002
301@@ -63,6 +63,8 @@
302
303 source drivers/s390/Config.in
304
305+source drivers/evms/Config.in
306+
307 if [ "$CONFIG_NET" = "y" ]; then
308 source net/Config.in
309 fi
310diff -Naur linux-2002-09-16/drivers/Makefile evms-2002-09-16/drivers/Makefile
311--- linux-2002-09-16/drivers/Makefile Mon Sep 16 08:55:18 2002
312+++ evms-2002-09-16/drivers/Makefile Mon Sep 16 08:50:22 2002
313@@ -8,7 +8,7 @@
314
315 mod-subdirs := dio hil mtd sbus video macintosh usb input telephony sgi ide \
316 message/i2o message/fusion scsi md ieee1394 pnp isdn atm \
317- fc4 net/hamradio i2c acpi bluetooth
318+ fc4 net/hamradio i2c acpi bluetooth evms
319
320 subdir-y := parport char block net sound misc media cdrom hotplug
321 subdir-m := $(subdir-y)
322@@ -48,5 +48,6 @@
323 subdir-$(CONFIG_ACPI) += acpi
324
325 subdir-$(CONFIG_BLUEZ) += bluetooth
326+subdir-$(CONFIG_EVMS) += evms
327
328 include $(TOPDIR)/Rules.make
329diff -Naur linux-2002-09-16/include/linux/fs.h evms-2002-09-16/include/linux/fs.h
330--- linux-2002-09-16/include/linux/fs.h Mon Sep 16 08:55:18 2002
331+++ evms-2002-09-16/include/linux/fs.h Mon Sep 16 08:50:23 2002
332@@ -1478,6 +1478,7 @@
333 unsigned long generate_cluster_swab32(kdev_t, int b[], int);
334 extern kdev_t ROOT_DEV;
335 extern char root_device_name[];
336+extern void get_root_device_name( char * root_name );
337
338
339 extern void show_buffers(void);
340diff -Naur linux-2002-09-16/include/linux/major.h evms-2002-09-16/include/linux/major.h
341--- linux-2002-09-16/include/linux/major.h Mon Sep 16 08:55:18 2002
342+++ evms-2002-09-16/include/linux/major.h Mon Jul 8 16:23:51 2002
343@@ -142,6 +142,8 @@
344
345 #define UMEM_MAJOR 116 /* http://www.umem.com/ Battery Backed RAM */
346
347+#define EVMS_MAJOR 117 /* Enterprise Volume Management System */
348+
349 #define RTF_MAJOR 150
350 #define RAW_MAJOR 162
351
352diff -Naur linux-2002-09-16/include/linux/mempool.h evms-2002-09-16/include/linux/mempool.h
353--- linux-2002-09-16/include/linux/mempool.h Wed Dec 31 18:00:00 1969
354+++ evms-2002-09-16/include/linux/mempool.h Mon Jun 17 10:13:08 2002
355@@ -0,0 +1,30 @@
356+/*
357+ * memory buffer pool support
358+ */
359+#ifndef _LINUX_MEMPOOL_H
360+#define _LINUX_MEMPOOL_H
361+
362+#include <linux/wait.h>
363+
364+typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
365+typedef void (mempool_free_t)(void *element, void *pool_data);
366+
367+typedef struct mempool_s {
368+ spinlock_t lock;
369+ int min_nr; /* nr of elements at *elements */
370+ int curr_nr; /* Current nr of elements at *elements */
371+ void **elements;
372+
373+ void *pool_data;
374+ mempool_alloc_t *alloc;
375+ mempool_free_t *free;
376+ wait_queue_head_t wait;
377+} mempool_t;
378+extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
379+ mempool_free_t *free_fn, void *pool_data);
380+extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
381+extern void mempool_destroy(mempool_t *pool);
382+extern void * mempool_alloc(mempool_t *pool, int gfp_mask);
383+extern void mempool_free(void *element, mempool_t *pool);
384+
385+#endif /* _LINUX_MEMPOOL_H */
386diff -Naur linux-2002-09-16/include/linux/sysctl.h evms-2002-09-16/include/linux/sysctl.h
387--- linux-2002-09-16/include/linux/sysctl.h Mon Sep 16 08:55:18 2002
388+++ evms-2002-09-16/include/linux/sysctl.h Mon Sep 16 08:50:22 2002
389@@ -557,7 +557,8 @@
390 DEV_HWMON=2,
391 DEV_PARPORT=3,
392 DEV_RAID=4,
393- DEV_MAC_HID=5
394+ DEV_MAC_HID=5,
395+ DEV_EVMS=6
396 };
397
398 /* /proc/sys/dev/cdrom */
399@@ -573,6 +574,18 @@
400 /* /proc/sys/dev/parport */
401 enum {
402 DEV_PARPORT_DEFAULT=-3
403+};
404+
405+/* /proc/sys/dev/evms */
406+enum {
407+ DEV_EVMS_INFO_LEVEL=1,
408+ DEV_EVMS_MD=2
409+};
410+
411+/* /proc/sys/dev/evms/raid */
412+enum {
413+ DEV_EVMS_MD_SPEED_LIMIT_MIN=1,
414+ DEV_EVMS_MD_SPEED_LIMIT_MAX=2
415 };
416
417 /* /proc/sys/dev/raid */
418diff -Naur linux-2002-09-16/init/do_mounts.c evms-2002-09-16/init/do_mounts.c
419--- linux-2002-09-16/init/do_mounts.c Mon Sep 16 08:55:18 2002
420+++ evms-2002-09-16/init/do_mounts.c Mon Sep 16 08:50:23 2002
421@@ -225,6 +225,7 @@
422 { "ftlc", 0x2c10 },
423 { "ftld", 0x2c18 },
424 { "mtdblock", 0x1f00 },
425+ { "evms", 0x7500 },
426 { NULL, 0 }
427 };
428
429@@ -739,6 +740,11 @@
430 }
431 #endif
432 mount_block_root("/dev/root", root_mountflags);
433+}
434+
435+void get_root_device_name( char * root_name )
436+{
437+ strncpy(root_name, root_device_name, 63);
438 }
439
440 #ifdef CONFIG_BLK_DEV_INITRD
441diff -Naur linux-2002-09-16/kernel/ksyms.c evms-2002-09-16/kernel/ksyms.c
442--- linux-2002-09-16/kernel/ksyms.c Mon Sep 16 08:55:18 2002
443+++ evms-2002-09-16/kernel/ksyms.c Mon Sep 16 08:50:23 2002
444@@ -323,6 +323,8 @@
445 EXPORT_SYMBOL(refile_buffer);
446 EXPORT_SYMBOL(max_sectors);
447 EXPORT_SYMBOL(max_readahead);
448+EXPORT_SYMBOL(is_swap_partition);
449+EXPORT_SYMBOL(walk_gendisk);
450
451 /* tty routines */
452 EXPORT_SYMBOL(tty_hangup);
453diff -Naur linux-2002-09-16/mm/Makefile evms-2002-09-16/mm/Makefile
454--- linux-2002-09-16/mm/Makefile Mon Sep 16 08:55:18 2002
455+++ evms-2002-09-16/mm/Makefile Wed Jun 19 11:21:04 2002
456@@ -9,12 +9,12 @@
457
458 O_TARGET := mm.o
459
460-export-objs := shmem.o filemap.o memory.o page_alloc.o
461+export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o
462
463 obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
464 vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
465 page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
466- shmem.o
467+ shmem.o mempool.o
468
469 obj-$(CONFIG_HIGHMEM) += highmem.o
470
471diff -Naur linux-2002-09-16/mm/mempool.c evms-2002-09-16/mm/mempool.c
472--- linux-2002-09-16/mm/mempool.c Wed Dec 31 18:00:00 1969
473+++ evms-2002-09-16/mm/mempool.c Wed Jun 19 09:47:16 2002
474@@ -0,0 +1,273 @@
475+/*
476+ * linux/mm/mempool.c
477+ *
478+ * memory buffer pool support. Such pools are mostly used
479+ * for guaranteed, deadlock-free memory allocations during
480+ * extreme VM load.
481+ *
482+ * started by Ingo Molnar, Copyright (C) 2001
483+ */
484+
485+#include <linux/mm.h>
486+#include <linux/slab.h>
487+#include <linux/module.h>
488+#include <linux/compiler.h>
489+#include <linux/mempool.h>
490+#include <linux/fs.h>
491+
492+#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
493+
494+static void add_element(mempool_t *pool, void *element)
495+{
496+ BUG_ON(pool->curr_nr >= pool->min_nr);
497+ pool->elements[pool->curr_nr++] = element;
498+}
499+
500+static void *remove_element(mempool_t *pool)
501+{
502+ BUG_ON(pool->curr_nr <= 0);
503+ return pool->elements[--pool->curr_nr];
504+}
505+
506+static void free_pool(mempool_t *pool)
507+{
508+ while (pool->curr_nr) {
509+ void *element = remove_element(pool);
510+ pool->free(element, pool->pool_data);
511+ }
512+ kfree(pool->elements);
513+ kfree(pool);
514+}
515+
516+/**
517+ * mempool_create - create a memory pool
518+ * @min_nr: the minimum number of elements guaranteed to be
519+ * allocated for this pool.
520+ * @alloc_fn: user-defined element-allocation function.
521+ * @free_fn: user-defined element-freeing function.
522+ * @pool_data: optional private data available to the user-defined functions.
523+ *
524+ * this function creates and allocates a guaranteed size, preallocated
525+ * memory pool. The pool can be used from the mempool_alloc and mempool_free
526+ * functions. This function might sleep. Both the alloc_fn() and the free_fn()
527+ * functions might sleep - as long as the mempool_alloc function is not called
528+ * from IRQ contexts.
529+ */
530+mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
531+ mempool_free_t *free_fn, void *pool_data)
532+{
533+ mempool_t *pool;
534+
535+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
536+ if (!pool)
537+ return NULL;
538+ memset(pool, 0, sizeof(*pool));
539+ pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL);
540+ if (!pool->elements) {
541+ kfree(pool);
542+ return NULL;
543+ }
544+ spin_lock_init(&pool->lock);
545+ pool->min_nr = min_nr;
546+ pool->pool_data = pool_data;
547+ init_waitqueue_head(&pool->wait);
548+ pool->alloc = alloc_fn;
549+ pool->free = free_fn;
550+
551+ /*
552+ * First pre-allocate the guaranteed number of buffers.
553+ */
554+ while (pool->curr_nr < pool->min_nr) {
555+ void *element;
556+
557+ element = pool->alloc(GFP_KERNEL, pool->pool_data);
558+ if (unlikely(!element)) {
559+ free_pool(pool);
560+ return NULL;
561+ }
562+ add_element(pool, element);
563+ }
564+ return pool;
565+}
566+
567+/**
568+ * mempool_resize - resize an existing memory pool
569+ * @pool: pointer to the memory pool which was allocated via
570+ * mempool_create().
571+ * @new_min_nr: the new minimum number of elements guaranteed to be
572+ * allocated for this pool.
573+ * @gfp_mask: the usual allocation bitmask.
574+ *
575+ * This function shrinks/grows the pool. In the case of growing,
576+ * it cannot be guaranteed that the pool will be grown to the new
577+ * size immediately, but new mempool_free() calls will refill it.
578+ *
579+ * Note, the caller must guarantee that no mempool_destroy is called
580+ * while this function is running. mempool_alloc() & mempool_free()
581+ * might be called (eg. from IRQ contexts) while this function executes.
582+ */
583+int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
584+{
585+ void *element;
586+ void **new_elements;
587+ unsigned long flags;
588+
589+ BUG_ON(new_min_nr <= 0);
590+
591+ spin_lock_irqsave(&pool->lock, flags);
592+ if (new_min_nr < pool->min_nr) {
593+ while (pool->curr_nr > new_min_nr) {
594+ element = remove_element(pool);
595+ spin_unlock_irqrestore(&pool->lock, flags);
596+ pool->free(element, pool->pool_data);
597+ spin_lock_irqsave(&pool->lock, flags);
598+ }
599+ pool->min_nr = new_min_nr;
600+ goto out_unlock;
601+ }
602+ spin_unlock_irqrestore(&pool->lock, flags);
603+
604+ /* Grow the pool */
605+ new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
606+ if (!new_elements)
607+ return -ENOMEM;
608+
609+ spin_lock_irqsave(&pool->lock, flags);
610+ memcpy(new_elements, pool->elements,
611+ pool->curr_nr * sizeof(*new_elements));
612+ kfree(pool->elements);
613+ pool->elements = new_elements;
614+ pool->min_nr = new_min_nr;
615+
616+ while (pool->curr_nr < pool->min_nr) {
617+ spin_unlock_irqrestore(&pool->lock, flags);
618+ element = pool->alloc(gfp_mask, pool->pool_data);
619+ if (!element)
620+ goto out;
621+ spin_lock_irqsave(&pool->lock, flags);
622+ if (pool->curr_nr < pool->min_nr)
623+ add_element(pool, element);
624+ else
625+ kfree(element); /* Raced */
626+ }
627+out_unlock:
628+ spin_unlock_irqrestore(&pool->lock, flags);
629+out:
630+ return 0;
631+}
632+
633+/**
634+ * mempool_destroy - deallocate a memory pool
635+ * @pool: pointer to the memory pool which was allocated via
636+ * mempool_create().
637+ *
638+ * this function only sleeps if the free_fn() function sleeps. The caller
639+ * has to guarantee that all elements have been returned to the pool (ie:
640+ * freed) prior to calling mempool_destroy().
641+ */
642+void mempool_destroy(mempool_t *pool)
643+{
644+ if (pool->curr_nr != pool->min_nr)
645+ BUG(); /* There were outstanding elements */
646+ free_pool(pool);
647+}
648+
649+/**
650+ * mempool_alloc - allocate an element from a specific memory pool
651+ * @pool: pointer to the memory pool which was allocated via
652+ * mempool_create().
653+ * @gfp_mask: the usual allocation bitmask.
654+ *
655+ * this function only sleeps if the alloc_fn function sleeps or
656+ * returns NULL. Note that due to preallocation, this function
657+ * *never* fails when called from process contexts. (it might
658+ * fail if called from an IRQ context.)
659+ */
660+void * mempool_alloc(mempool_t *pool, int gfp_mask)
661+{
662+ void *element;
663+ unsigned long flags;
664+ int curr_nr;
665+ DECLARE_WAITQUEUE(wait, current);
666+ int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
667+
668+repeat_alloc:
669+ element = pool->alloc(gfp_nowait, pool->pool_data);
670+ if (likely(element != NULL))
671+ return element;
672+
673+ /*
674+ * If the pool is less than 50% full then try harder
675+ * to allocate an element:
676+ */
677+ if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
678+ element = pool->alloc(gfp_mask, pool->pool_data);
679+ if (likely(element != NULL))
680+ return element;
681+ }
682+
683+ /*
684+ * Kick the VM at this point.
685+ */
686+ wakeup_bdflush();
687+
688+ spin_lock_irqsave(&pool->lock, flags);
689+ if (likely(pool->curr_nr)) {
690+ element = remove_element(pool);
691+ spin_unlock_irqrestore(&pool->lock, flags);
692+ return element;
693+ }
694+ spin_unlock_irqrestore(&pool->lock, flags);
695+
696+ /* We must not sleep in the GFP_ATOMIC case */
697+ if (gfp_mask == gfp_nowait)
698+ return NULL;
699+
700+ run_task_queue(&tq_disk);
701+
702+ add_wait_queue_exclusive(&pool->wait, &wait);
703+ set_task_state(current, TASK_UNINTERRUPTIBLE);
704+
705+ spin_lock_irqsave(&pool->lock, flags);
706+ curr_nr = pool->curr_nr;
707+ spin_unlock_irqrestore(&pool->lock, flags);
708+
709+ if (!curr_nr)
710+ schedule();
711+
712+ current->state = TASK_RUNNING;
713+ remove_wait_queue(&pool->wait, &wait);
714+
715+ goto repeat_alloc;
716+}
717+
718+/**
719+ * mempool_free - return an element to the pool.
720+ * @element: pool element pointer.
721+ * @pool: pointer to the memory pool which was allocated via
722+ * mempool_create().
723+ *
724+ * this function only sleeps if the free_fn() function sleeps.
725+ */
726+void mempool_free(void *element, mempool_t *pool)
727+{
728+ unsigned long flags;
729+
730+ if (pool->curr_nr < pool->min_nr) {
731+ spin_lock_irqsave(&pool->lock, flags);
732+ if (pool->curr_nr < pool->min_nr) {
733+ add_element(pool, element);
734+ spin_unlock_irqrestore(&pool->lock, flags);
735+ wake_up(&pool->wait);
736+ return;
737+ }
738+ spin_unlock_irqrestore(&pool->lock, flags);
739+ }
740+ pool->free(element, pool->pool_data);
741+}
742+
743+EXPORT_SYMBOL(mempool_create);
744+EXPORT_SYMBOL(mempool_resize);
745+EXPORT_SYMBOL(mempool_destroy);
746+EXPORT_SYMBOL(mempool_alloc);
747+EXPORT_SYMBOL(mempool_free);
This page took 0.212129 seconds and 4 git commands to generate.