--- /dev/null
+diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/block/ll_rw_blk.c x/drivers/block/ll_rw_blk.c
+--- x-ref/drivers/block/ll_rw_blk.c 2003-07-17 05:23:59.000000000 +0200
++++ x/drivers/block/ll_rw_blk.c 2003-07-17 05:29:00.000000000 +0200
+@@ -1263,7 +1263,7 @@ void __submit_bh(int rw, struct buffer_h
+ /* fix race condition with wait_on_buffer() */
+ smp_mb(); /* spin_unlock may have inclusive semantics */
+ if (waitqueue_active(&bh->b_wait))
+- wake_up(&bh->b_wait);
++ run_task_queue(&tq_disk);
+
+ put_bh(bh);
+ switch (rw) {
+diff -urNp --exclude CVS --exclude BitKeeper x-ref/mm/filemap.c x/mm/filemap.c
+--- x-ref/mm/filemap.c 2003-07-17 05:23:58.000000000 +0200
++++ x/mm/filemap.c 2003-07-17 05:29:35.000000000 +0200
+@@ -788,7 +788,7 @@ void wakeup_page_waiters(struct page * p
+
+ head = page_waitqueue(page);
+ if (waitqueue_active(head))
+- wake_up(head);
++ sync_page(page);
+ }
+
+ /*
+diff -urNp --exclude CVS --exclude BitKeeper x-ref/mm/swapfile.c x/mm/swapfile.c
+--- x-ref/mm/swapfile.c 2003-07-17 05:23:58.000000000 +0200
++++ x/mm/swapfile.c 2003-07-17 05:24:00.000000000 +0200
+@@ -997,8 +997,10 @@ asmlinkage long sys_swapon(const char *
+ goto bad_swap;
+ }
+
++ get_page(virt_to_page(swap_header));
+ lock_page(virt_to_page(swap_header));
+ rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header);
++ put_page(virt_to_page(swap_header));
+
+ if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
+ swap_header_version = 1;
--- /dev/null
+diff -urN linux-2.4.22/Documentation/Configure.help linux-2.4.22-dm/Documentation/Configure.help
+--- linux-2.4.22/Documentation/Configure.help 2003-09-15 16:54:19.000000000 +0200
++++ linux-2.4.22-dm/Documentation/Configure.help 2003-09-15 17:01:29.000000000 +0200
+@@ -1885,6 +1885,20 @@
+ want), say M here and read <file:Documentation/modules.txt>. The
+ module will be called lvm-mod.o.
+
++Device-mapper support
++CONFIG_BLK_DEV_DM
++ Device-mapper is a low level volume manager. It works by allowing
++ people to specify mappings for ranges of logical sectors. Various
++ mapping types are available, in addition people may write their own
++ modules containing custom mappings if they wish.
++
++ Higher level volume managers such as LVM2 use this driver.
++
++ If you want to compile this as a module, say M here and read
++ <file:Documentation/modules.txt>. The module will be called dm-mod.o.
++
++ If unsure, say N.
++
+ Multiple devices driver support (RAID and LVM)
+ CONFIG_MD
+ Support multiple physical spindles through a single logical device.
+diff -urN linux-2.4.22/MAINTAINERS linux-2.4.22-dm/MAINTAINERS
+--- linux-2.4.22/MAINTAINERS 2003-09-15 16:54:16.000000000 +0200
++++ linux-2.4.22-dm/MAINTAINERS 2003-09-15 17:01:29.000000000 +0200
+@@ -554,6 +554,13 @@
+ W: http://www.debian.org/~dz/i8k/
+ S: Maintained
+
++DEVICE MAPPER
++P: Joe Thornber
++M: dm@uk.sistina.com
++L: linux-LVM@sistina.com
++W: http://www.sistina.com/lvm
++S: Maintained
++
+ DEVICE NUMBER REGISTRY
+ P: H. Peter Anvin
+ M: hpa@zytor.com
+diff -urN linux-2.4.22/arch/mips64/kernel/ioctl32.c linux-2.4.22-dm/arch/mips64/kernel/ioctl32.c
+--- linux-2.4.22/arch/mips64/kernel/ioctl32.c 2003-08-25 13:44:40.000000000 +0200
++++ linux-2.4.22-dm/arch/mips64/kernel/ioctl32.c 2003-09-15 17:01:29.000000000 +0200
+@@ -36,6 +36,7 @@
+ #include <linux/auto_fs4.h>
+ #include <linux/ext2_fs.h>
+ #include <linux/raid/md_u.h>
++#include <linux/dm-ioctl.h>
+ #include <linux/serial.h>
+
+ #include <scsi/scsi.h>
+@@ -1228,6 +1229,22 @@
+ IOCTL32_DEFAULT(SBPROF_ZBWAITFULL),
+ #endif /* CONFIG_SIBYTE_TBPROF */
+
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++ IOCTL32_DEFAULT(DM_VERSION),
++ IOCTL32_DEFAULT(DM_REMOVE_ALL),
++ IOCTL32_DEFAULT(DM_DEV_CREATE),
++ IOCTL32_DEFAULT(DM_DEV_REMOVE),
++ IOCTL32_DEFAULT(DM_TABLE_LOAD),
++ IOCTL32_DEFAULT(DM_DEV_SUSPEND),
++ IOCTL32_DEFAULT(DM_DEV_RENAME),
++ IOCTL32_DEFAULT(DM_TABLE_DEPS),
++ IOCTL32_DEFAULT(DM_DEV_STATUS),
++ IOCTL32_DEFAULT(DM_TABLE_STATUS),
++ IOCTL32_DEFAULT(DM_DEV_WAIT),
++ IOCTL32_DEFAULT(DM_LIST_DEVICES),
++ IOCTL32_DEFAULT(DM_TABLE_CLEAR),
++#endif /* CONFIG_BLK_DEV_DM */
++
+ IOCTL32_DEFAULT(MTIOCTOP), /* mtio.h ioctls */
+ IOCTL32_HANDLER(MTIOCGET32, mt_ioctl_trans),
+ IOCTL32_HANDLER(MTIOCPOS32, mt_ioctl_trans),
+diff -urN linux-2.4.22/arch/parisc/kernel/ioctl32.c linux-2.4.22-dm/arch/parisc/kernel/ioctl32.c
+--- linux-2.4.22/arch/parisc/kernel/ioctl32.c 2003-08-25 13:44:40.000000000 +0200
++++ linux-2.4.22-dm/arch/parisc/kernel/ioctl32.c 2003-09-15 17:01:29.000000000 +0200
+@@ -55,6 +55,7 @@
+ #define max max */
+ #include <linux/lvm.h>
+ #endif /* LVM */
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ /* Ugly hack. */
+@@ -3423,6 +3424,22 @@
+ COMPATIBLE_IOCTL(LV_BMAP)
+ COMPATIBLE_IOCTL(LV_SNAPSHOT_USE_RATE)
+ #endif /* LVM */
++/* Device-Mapper */
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++COMPATIBLE_IOCTL(DM_VERSION)
++COMPATIBLE_IOCTL(DM_REMOVE_ALL)
++COMPATIBLE_IOCTL(DM_DEV_CREATE)
++COMPATIBLE_IOCTL(DM_DEV_REMOVE)
++COMPATIBLE_IOCTL(DM_TABLE_LOAD)
++COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
++COMPATIBLE_IOCTL(DM_DEV_RENAME)
++COMPATIBLE_IOCTL(DM_TABLE_DEPS)
++COMPATIBLE_IOCTL(DM_DEV_STATUS)
++COMPATIBLE_IOCTL(DM_TABLE_STATUS)
++COMPATIBLE_IOCTL(DM_DEV_WAIT)
++COMPATIBLE_IOCTL(DM_LIST_DEVICES)
++COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
++#endif /* CONFIG_BLK_DEV_DM */
+ #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+ COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
+ COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
+diff -urN linux-2.4.22/arch/ppc64/kernel/ioctl32.c linux-2.4.22-dm/arch/ppc64/kernel/ioctl32.c
+--- linux-2.4.22/arch/ppc64/kernel/ioctl32.c 2003-08-25 13:44:40.000000000 +0200
++++ linux-2.4.22-dm/arch/ppc64/kernel/ioctl32.c 2003-09-15 17:01:29.000000000 +0200
+@@ -66,6 +66,7 @@
+ #if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
+ #include <linux/lvm.h>
+ #endif /* LVM */
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ /* Ugly hack. */
+@@ -4435,6 +4436,22 @@
+ COMPATIBLE_IOCTL(NBD_PRINT_DEBUG),
+ COMPATIBLE_IOCTL(NBD_SET_SIZE_BLOCKS),
+ COMPATIBLE_IOCTL(NBD_DISCONNECT),
++/* device-mapper */
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++COMPATIBLE_IOCTL(DM_VERSION),
++COMPATIBLE_IOCTL(DM_REMOVE_ALL),
++COMPATIBLE_IOCTL(DM_DEV_CREATE),
++COMPATIBLE_IOCTL(DM_DEV_REMOVE),
++COMPATIBLE_IOCTL(DM_TABLE_LOAD),
++COMPATIBLE_IOCTL(DM_DEV_SUSPEND),
++COMPATIBLE_IOCTL(DM_DEV_RENAME),
++COMPATIBLE_IOCTL(DM_TABLE_DEPS),
++COMPATIBLE_IOCTL(DM_DEV_STATUS),
++COMPATIBLE_IOCTL(DM_TABLE_STATUS),
++COMPATIBLE_IOCTL(DM_DEV_WAIT),
++COMPATIBLE_IOCTL(DM_LIST_DEVICES),
++COMPATIBLE_IOCTL(DM_TABLE_CLEAR),
++#endif /* CONFIG_BLK_DEV_DM */
+ /* Remove *PRIVATE in 2.5 */
+ COMPATIBLE_IOCTL(SIOCDEVPRIVATE),
+ COMPATIBLE_IOCTL(SIOCDEVPRIVATE+1),
+diff -urN linux-2.4.22/arch/s390x/kernel/ioctl32.c linux-2.4.22-dm/arch/s390x/kernel/ioctl32.c
+--- linux-2.4.22/arch/s390x/kernel/ioctl32.c 2003-08-25 13:44:40.000000000 +0200
++++ linux-2.4.22-dm/arch/s390x/kernel/ioctl32.c 2003-09-15 17:01:29.000000000 +0200
+@@ -30,6 +30,7 @@
+ #include <linux/blk.h>
+ #include <linux/elevator.h>
+ #include <linux/raw.h>
++#include <linux/dm-ioctl.h>
+ #include <asm/types.h>
+ #include <asm/uaccess.h>
+ #include <asm/dasd.h>
+@@ -627,6 +628,20 @@
+
+ IOCTL32_DEFAULT(SIOCGSTAMP),
+
++ IOCTL32_DEFAULT(DM_VERSION),
++ IOCTL32_DEFAULT(DM_REMOVE_ALL),
++ IOCTL32_DEFAULT(DM_DEV_CREATE),
++ IOCTL32_DEFAULT(DM_DEV_REMOVE),
++ IOCTL32_DEFAULT(DM_TABLE_LOAD),
++ IOCTL32_DEFAULT(DM_DEV_SUSPEND),
++ IOCTL32_DEFAULT(DM_DEV_RENAME),
++ IOCTL32_DEFAULT(DM_TABLE_DEPS),
++ IOCTL32_DEFAULT(DM_DEV_STATUS),
++ IOCTL32_DEFAULT(DM_TABLE_STATUS),
++ IOCTL32_DEFAULT(DM_DEV_WAIT),
++ IOCTL32_DEFAULT(DM_LIST_DEVICES),
++ IOCTL32_DEFAULT(DM_TABLE_CLEAR),
++
+ IOCTL32_DEFAULT(LOOP_SET_FD),
+ IOCTL32_DEFAULT(LOOP_CLR_FD),
+
+diff -urN linux-2.4.22/arch/sparc64/kernel/ioctl32.c linux-2.4.22-dm/arch/sparc64/kernel/ioctl32.c
+--- linux-2.4.22/arch/sparc64/kernel/ioctl32.c 2003-08-25 13:44:40.000000000 +0200
++++ linux-2.4.22-dm/arch/sparc64/kernel/ioctl32.c 2003-09-15 17:01:29.000000000 +0200
+@@ -56,6 +56,7 @@
+ #if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
+ #include <linux/lvm.h>
+ #endif /* LVM */
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ /* Ugly hack. */
+@@ -5086,6 +5087,22 @@
+ COMPATIBLE_IOCTL(NBD_PRINT_DEBUG)
+ COMPATIBLE_IOCTL(NBD_SET_SIZE_BLOCKS)
+ COMPATIBLE_IOCTL(NBD_DISCONNECT)
++/* device-mapper */
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++COMPATIBLE_IOCTL(DM_VERSION)
++COMPATIBLE_IOCTL(DM_REMOVE_ALL)
++COMPATIBLE_IOCTL(DM_DEV_CREATE)
++COMPATIBLE_IOCTL(DM_DEV_REMOVE)
++COMPATIBLE_IOCTL(DM_TABLE_LOAD)
++COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
++COMPATIBLE_IOCTL(DM_DEV_RENAME)
++COMPATIBLE_IOCTL(DM_TABLE_DEPS)
++COMPATIBLE_IOCTL(DM_DEV_STATUS)
++COMPATIBLE_IOCTL(DM_TABLE_STATUS)
++COMPATIBLE_IOCTL(DM_DEV_WAIT)
++COMPATIBLE_IOCTL(DM_LIST_DEVICES)
++COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
++#endif /* CONFIG_BLK_DEV_DM */
+ /* Linux-1394 */
+ #if defined(CONFIG_IEEE1394) || defined(CONFIG_IEEE1394_MODULE)
+ COMPATIBLE_IOCTL(AMDTP_IOC_CHANNEL)
+diff -urN linux-2.4.22/arch/x86_64/ia32/ia32_ioctl.c linux-2.4.22-dm/arch/x86_64/ia32/ia32_ioctl.c
+--- linux-2.4.22/arch/x86_64/ia32/ia32_ioctl.c 2003-08-25 13:44:40.000000000 +0200
++++ linux-2.4.22-dm/arch/x86_64/ia32/ia32_ioctl.c 2003-09-15 17:01:29.000000000 +0200
+@@ -67,6 +67,7 @@
+ #define max max
+ #include <linux/lvm.h>
+ #endif /* LVM */
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ /* Ugly hack. */
+@@ -4047,6 +4048,22 @@
+ COMPATIBLE_IOCTL(LV_BMAP)
+ COMPATIBLE_IOCTL(LV_SNAPSHOT_USE_RATE)
+ #endif /* LVM */
++/* Device-Mapper */
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++COMPATIBLE_IOCTL(DM_VERSION)
++COMPATIBLE_IOCTL(DM_REMOVE_ALL)
++COMPATIBLE_IOCTL(DM_DEV_CREATE)
++COMPATIBLE_IOCTL(DM_DEV_REMOVE)
++COMPATIBLE_IOCTL(DM_TABLE_LOAD)
++COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
++COMPATIBLE_IOCTL(DM_DEV_RENAME)
++COMPATIBLE_IOCTL(DM_TABLE_DEPS)
++COMPATIBLE_IOCTL(DM_DEV_STATUS)
++COMPATIBLE_IOCTL(DM_TABLE_STATUS)
++COMPATIBLE_IOCTL(DM_DEV_WAIT)
++COMPATIBLE_IOCTL(DM_LIST_DEVICES)
++COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
++#endif /* CONFIG_BLK_DEV_DM */
+ #ifdef CONFIG_AUTOFS_FS
+ COMPATIBLE_IOCTL(AUTOFS_IOC_READY)
+ COMPATIBLE_IOCTL(AUTOFS_IOC_FAIL)
+diff -urN linux-2.4.22/drivers/md/Config.in linux-2.4.22-dm/drivers/md/Config.in
+--- linux-2.4.22/drivers/md/Config.in 2001-09-14 23:22:18.000000000 +0200
++++ linux-2.4.22-dm/drivers/md/Config.in 2003-09-15 17:02:48.000000000 +0200
+@@ -14,5 +14,7 @@
+ dep_tristate ' Multipath I/O support' CONFIG_MD_MULTIPATH $CONFIG_BLK_DEV_MD
+
+ dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD
++dep_tristate ' Device-mapper support' CONFIG_BLK_DEV_DM $CONFIG_MD
++dep_tristate ' Mirror (RAID-1) support' CONFIG_BLK_DEV_DM_MIRROR $CONFIG_BLK_DEV_DM
+
+ endmenu
+diff -urN linux-2.4.22/drivers/md/Makefile linux-2.4.22-dm/drivers/md/Makefile
+--- linux-2.4.22/drivers/md/Makefile 2001-11-11 19:09:32.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/Makefile 2003-09-15 17:01:29.000000000 +0200
+@@ -4,24 +4,41 @@
+
+ O_TARGET := mddev.o
+
+-export-objs := md.o xor.o
+-list-multi := lvm-mod.o
++export-objs := md.o xor.o dm-table.o dm-target.o kcopyd.o dm-daemon.o \
++ dm-log.o dm-io.o dm.o
++
++list-multi := lvm-mod.o dm-mod.o dm-mirror-mod.o
+ lvm-mod-objs := lvm.o lvm-snap.o lvm-fs.o
++dm-mod-objs := dm.o dm-table.o dm-target.o dm-ioctl.o \
++ dm-linear.o dm-stripe.o dm-snapshot.o dm-exception-store.o \
++ kcopyd.o dm-daemon.o dm-io.o
++dm-mirror-mod-objs := dm-raid1.o dm-log.o
+
+ # Note: link order is important. All raid personalities
+ # and xor.o must come before md.o, as they each initialise
+ # themselves, and md.o may use the personalities when it
+ # auto-initialised.
+
+-obj-$(CONFIG_MD_LINEAR) += linear.o
+-obj-$(CONFIG_MD_RAID0) += raid0.o
+-obj-$(CONFIG_MD_RAID1) += raid1.o
+-obj-$(CONFIG_MD_RAID5) += raid5.o xor.o
+-obj-$(CONFIG_MD_MULTIPATH) += multipath.o
+-obj-$(CONFIG_BLK_DEV_MD) += md.o
+-obj-$(CONFIG_BLK_DEV_LVM) += lvm-mod.o
++obj-$(CONFIG_MD_LINEAR) += linear.o
++obj-$(CONFIG_MD_RAID0) += raid0.o
++obj-$(CONFIG_MD_RAID1) += raid1.o
++obj-$(CONFIG_MD_RAID5) += raid5.o xor.o
++obj-$(CONFIG_MD_MULTIPATH) += multipath.o
++obj-$(CONFIG_BLK_DEV_MD) += md.o
++
++obj-$(CONFIG_BLK_DEV_LVM) += lvm-mod.o
++
++obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
++obj-$(CONFIG_BLK_DEV_DM_MIRROR) += dm-mirror.o
+
+ include $(TOPDIR)/Rules.make
+
+ lvm-mod.o: $(lvm-mod-objs)
+ $(LD) -r -o $@ $(lvm-mod-objs)
++
++dm-mod.o: $(dm-mod-objs)
++ $(LD) -r -o $@ $(dm-mod-objs)
++
++dm-mirror.o: $(dm-mirror-mod-objs)
++ $(LD) -r -o $@ $(dm-mirror-mod-objs)
++
+diff -urN linux-2.4.22/drivers/md/dm-daemon.c linux-2.4.22-dm/drivers/md/dm-daemon.c
+--- linux-2.4.22/drivers/md/dm-daemon.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-daemon.c 2003-09-15 17:02:37.000000000 +0200
+@@ -0,0 +1,113 @@
++/*
++ * Copyright (C) 2003 Sistina Software
++ *
++ * This file is released under the LGPL.
++ */
++
++#include "dm.h"
++#include "dm-daemon.h"
++
++#include <linux/module.h>
++#include <linux/sched.h>
++
++static int daemon(void *arg)
++{
++ struct dm_daemon *dd = (struct dm_daemon *) arg;
++ DECLARE_WAITQUEUE(wq, current);
++
++ daemonize();
++ reparent_to_init();
++
++ /* block all signals */
++ spin_lock_irq(¤t->sigmask_lock);
++ sigfillset(¤t->blocked);
++ flush_signals(current);
++ spin_unlock_irq(¤t->sigmask_lock);
++
++ strcpy(current->comm, dd->name);
++ atomic_set(&dd->please_die, 0);
++
++ add_wait_queue(&dd->job_queue, &wq);
++
++ down(&dd->run_lock);
++ up(&dd->start_lock);
++
++ /*
++ * dd->fn() could do anything, very likely it will
++ * suspend. So we can't set the state to
++ * TASK_INTERRUPTIBLE before calling it. In order to
++ * prevent a race with a waking thread we do this little
++ * dance with the dd->woken variable.
++ */
++ while (1) {
++ do {
++ set_current_state(TASK_RUNNING);
++
++ if (atomic_read(&dd->please_die))
++ goto out;
++
++ atomic_set(&dd->woken, 0);
++ dd->fn();
++ yield();
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ } while (atomic_read(&dd->woken));
++
++ schedule();
++ }
++
++ out:
++ remove_wait_queue(&dd->job_queue, &wq);
++ up(&dd->run_lock);
++ return 0;
++}
++
++int dm_daemon_start(struct dm_daemon *dd, const char *name, void (*fn)(void))
++{
++ pid_t pid = 0;
++
++ /*
++ * Initialise the dm_daemon.
++ */
++ dd->fn = fn;
++ strncpy(dd->name, name, sizeof(dd->name) - 1);
++ sema_init(&dd->start_lock, 1);
++ sema_init(&dd->run_lock, 1);
++ init_waitqueue_head(&dd->job_queue);
++
++ /*
++ * Start the new thread.
++ */
++ down(&dd->start_lock);
++ pid = kernel_thread(daemon, dd, 0);
++ if (pid <= 0) {
++ DMERR("Failed to start %s thread", name);
++ return -EAGAIN;
++ }
++
++ /*
++ * wait for the daemon to up this mutex.
++ */
++ down(&dd->start_lock);
++ up(&dd->start_lock);
++
++ return 0;
++}
++
++void dm_daemon_stop(struct dm_daemon *dd)
++{
++ atomic_set(&dd->please_die, 1);
++ dm_daemon_wake(dd);
++ down(&dd->run_lock);
++ up(&dd->run_lock);
++}
++
++void dm_daemon_wake(struct dm_daemon *dd)
++{
++ atomic_set(&dd->woken, 1);
++ wake_up_interruptible(&dd->job_queue);
++}
++
++EXPORT_SYMBOL(dm_daemon_start);
++EXPORT_SYMBOL(dm_daemon_stop);
++EXPORT_SYMBOL(dm_daemon_wake);
+diff -urN linux-2.4.22/drivers/md/dm-daemon.h linux-2.4.22-dm/drivers/md/dm-daemon.h
+--- linux-2.4.22/drivers/md/dm-daemon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-daemon.h 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,29 @@
++/*
++ * Copyright (C) 2003 Sistina Software
++ *
++ * This file is released under the LGPL.
++ */
++
++#ifndef DM_DAEMON_H
++#define DM_DAEMON_H
++
++#include <asm/atomic.h>
++#include <asm/semaphore.h>
++
++struct dm_daemon {
++ void (*fn)(void);
++ char name[16];
++ atomic_t please_die;
++ struct semaphore start_lock;
++ struct semaphore run_lock;
++
++ atomic_t woken;
++ wait_queue_head_t job_queue;
++};
++
++int dm_daemon_start(struct dm_daemon *dd, const char *name, void (*fn)(void));
++void dm_daemon_stop(struct dm_daemon *dd);
++void dm_daemon_wake(struct dm_daemon *dd);
++int dm_daemon_running(struct dm_daemon *dd);
++
++#endif
+diff -urN linux-2.4.22/drivers/md/dm-exception-store.c linux-2.4.22-dm/drivers/md/dm-exception-store.c
+--- linux-2.4.22/drivers/md/dm-exception-store.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-exception-store.c 2003-09-15 17:02:01.000000000 +0200
+@@ -0,0 +1,673 @@
++/*
++ * dm-snapshot.c
++ *
++ * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm-snapshot.h"
++#include "dm-io.h"
++#include "kcopyd.h"
++
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++/*-----------------------------------------------------------------
++ * Persistent snapshots, by persistent we mean that the snapshot
++ * will survive a reboot.
++ *---------------------------------------------------------------*/
++
++/*
++ * We need to store a record of which parts of the origin have
++ * been copied to the snapshot device. The snapshot code
++ * requires that we copy exception chunks to chunk aligned areas
++ * of the COW store. It makes sense therefore, to store the
++ * metadata in chunk size blocks.
++ *
++ * There is no backward or forward compatibility implemented,
++ * snapshots with different disk versions than the kernel will
++ * not be usable. It is expected that "lvcreate" will blank out
++ * the start of a fresh COW device before calling the snapshot
++ * constructor.
++ *
++ * The first chunk of the COW device just contains the header.
++ * After this there is a chunk filled with exception metadata,
++ * followed by as many exception chunks as can fit in the
++ * metadata areas.
++ *
++ * All on disk structures are in little-endian format. The end
++ * of the exceptions info is indicated by an exception with a
++ * new_chunk of 0, which is invalid since it would point to the
++ * header chunk.
++ */
++
++/*
++ * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
++ */
++#define SNAP_MAGIC 0x70416e53
++
++/*
++ * The on-disk version of the metadata.
++ */
++#define SNAPSHOT_DISK_VERSION 1
++
++struct disk_header {
++ uint32_t magic;
++
++ /*
++ * Is this snapshot valid. There is no way of recovering
++ * an invalid snapshot.
++ */
++ uint32_t valid;
++
++ /*
++ * Simple, incrementing version. no backward
++ * compatibility.
++ */
++ uint32_t version;
++
++ /* In sectors */
++ uint32_t chunk_size;
++};
++
++struct disk_exception {
++ uint64_t old_chunk;
++ uint64_t new_chunk;
++};
++
++struct commit_callback {
++ void (*callback)(void *, int success);
++ void *context;
++};
++
++/*
++ * The top level structure for a persistent exception store.
++ */
++struct pstore {
++ struct dm_snapshot *snap; /* up pointer to my snapshot */
++ int version;
++ int valid;
++ uint32_t chunk_size;
++ uint32_t exceptions_per_area;
++
++ /*
++ * Now that we have an asynchronous kcopyd there is no
++ * need for large chunk sizes, so it wont hurt to have a
++ * whole chunks worth of metadata in memory at once.
++ */
++ void *area;
++
++ /*
++ * Used to keep track of which metadata area the data in
++ * 'chunk' refers to.
++ */
++ uint32_t current_area;
++
++ /*
++ * The next free chunk for an exception.
++ */
++ uint32_t next_free;
++
++ /*
++ * The index of next free exception in the current
++ * metadata area.
++ */
++ uint32_t current_committed;
++
++ atomic_t pending_count;
++ uint32_t callback_count;
++ struct commit_callback *callbacks;
++};
++
++static inline unsigned int sectors_to_pages(unsigned int sectors)
++{
++ return sectors / (PAGE_SIZE / SECTOR_SIZE);
++}
++
++static int alloc_area(struct pstore *ps)
++{
++ int r = -ENOMEM;
++ size_t i, len, nr_pages;
++ struct page *page, *last = NULL;
++
++ len = ps->chunk_size << SECTOR_SHIFT;
++
++ /*
++ * Allocate the chunk_size block of memory that will hold
++ * a single metadata area.
++ */
++ ps->area = vmalloc(len);
++ if (!ps->area)
++ return r;
++
++ nr_pages = sectors_to_pages(ps->chunk_size);
++
++ /*
++ * We lock the pages for ps->area into memory since
++ * they'll be doing a lot of io. We also chain them
++ * together ready for dm-io.
++ */
++ for (i = 0; i < nr_pages; i++) {
++ page = vmalloc_to_page(ps->area + (i * PAGE_SIZE));
++ LockPage(page);
++ if (last)
++ last->list.next = &page->list;
++ last = page;
++ }
++
++ return 0;
++}
++
++static void free_area(struct pstore *ps)
++{
++ size_t i, nr_pages;
++ struct page *page;
++
++ nr_pages = sectors_to_pages(ps->chunk_size);
++ for (i = 0; i < nr_pages; i++) {
++ page = vmalloc_to_page(ps->area + (i * PAGE_SIZE));
++ page->list.next = NULL;
++ UnlockPage(page);
++ }
++
++ vfree(ps->area);
++}
++
++/*
++ * Read or write a chunk aligned and sized block of data from a device.
++ */
++static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
++{
++ struct io_region where;
++ unsigned int bits;
++
++ where.dev = ps->snap->cow->dev;
++ where.sector = ps->chunk_size * chunk;
++ where.count = ps->chunk_size;
++
++ return dm_io_sync(1, &where, rw, vmalloc_to_page(ps->area), 0, &bits);
++}
++
++/*
++ * Read or write a metadata area. Remembering to skip the first
++ * chunk which holds the header.
++ */
++static int area_io(struct pstore *ps, uint32_t area, int rw)
++{
++ int r;
++ uint32_t chunk;
++
++ /* convert a metadata area index to a chunk index */
++ chunk = 1 + ((ps->exceptions_per_area + 1) * area);
++
++ r = chunk_io(ps, chunk, rw);
++ if (r)
++ return r;
++
++ ps->current_area = area;
++ return 0;
++}
++
++static int zero_area(struct pstore *ps, uint32_t area)
++{
++ memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT);
++ return area_io(ps, area, WRITE);
++}
++
++static int read_header(struct pstore *ps, int *new_snapshot)
++{
++ int r;
++ struct disk_header *dh;
++
++ r = chunk_io(ps, 0, READ);
++ if (r)
++ return r;
++
++ dh = (struct disk_header *) ps->area;
++
++ if (le32_to_cpu(dh->magic) == 0) {
++ *new_snapshot = 1;
++
++ } else if (le32_to_cpu(dh->magic) == SNAP_MAGIC) {
++ *new_snapshot = 0;
++ ps->valid = le32_to_cpu(dh->valid);
++ ps->version = le32_to_cpu(dh->version);
++ ps->chunk_size = le32_to_cpu(dh->chunk_size);
++
++ } else {
++ DMWARN("Invalid/corrupt snapshot");
++ r = -ENXIO;
++ }
++
++ return r;
++}
++
++static int write_header(struct pstore *ps)
++{
++ struct disk_header *dh;
++
++ memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT);
++
++ dh = (struct disk_header *) ps->area;
++ dh->magic = cpu_to_le32(SNAP_MAGIC);
++ dh->valid = cpu_to_le32(ps->valid);
++ dh->version = cpu_to_le32(ps->version);
++ dh->chunk_size = cpu_to_le32(ps->chunk_size);
++
++ return chunk_io(ps, 0, WRITE);
++}
++
++/*
++ * Access functions for the disk exceptions, these do the endian conversions.
++ */
++static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
++{
++ if (index >= ps->exceptions_per_area)
++ return NULL;
++
++ return ((struct disk_exception *) ps->area) + index;
++}
++
++static int read_exception(struct pstore *ps,
++ uint32_t index, struct disk_exception *result)
++{
++ struct disk_exception *e;
++
++ e = get_exception(ps, index);
++ if (!e)
++ return -EINVAL;
++
++ /* copy it */
++ result->old_chunk = le64_to_cpu(e->old_chunk);
++ result->new_chunk = le64_to_cpu(e->new_chunk);
++
++ return 0;
++}
++
++static int write_exception(struct pstore *ps,
++ uint32_t index, struct disk_exception *de)
++{
++ struct disk_exception *e;
++
++ e = get_exception(ps, index);
++ if (!e)
++ return -EINVAL;
++
++ /* copy it */
++ e->old_chunk = cpu_to_le64(de->old_chunk);
++ e->new_chunk = cpu_to_le64(de->new_chunk);
++
++ return 0;
++}
++
++/*
++ * Registers the exceptions that are present in the current area.
++ * 'full' is filled in to indicate if the area has been
++ * filled.
++ */
++static int insert_exceptions(struct pstore *ps, int *full)
++{
++ int r;
++ unsigned int i;
++ struct disk_exception de;
++
++ /* presume the area is full */
++ *full = 1;
++
++ for (i = 0; i < ps->exceptions_per_area; i++) {
++ r = read_exception(ps, i, &de);
++
++ if (r)
++ return r;
++
++ /*
++ * If the new_chunk is pointing at the start of
++ * the COW device, where the first metadata area
++ * is we know that we've hit the end of the
++ * exceptions. Therefore the area is not full.
++ */
++ if (de.new_chunk == 0LL) {
++ ps->current_committed = i;
++ *full = 0;
++ break;
++ }
++
++ /*
++ * Keep track of the start of the free chunks.
++ */
++ if (ps->next_free <= de.new_chunk)
++ ps->next_free = de.new_chunk + 1;
++
++ /*
++ * Otherwise we add the exception to the snapshot.
++ */
++ r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk);
++ if (r)
++ return r;
++ }
++
++ return 0;
++}
++
++static int read_exceptions(struct pstore *ps)
++{
++ uint32_t area;
++ int r, full = 1;
++
++ /*
++ * Keeping reading chunks and inserting exceptions until
++ * we find a partially full area.
++ */
++ for (area = 0; full; area++) {
++ r = area_io(ps, area, READ);
++ if (r)
++ return r;
++
++ r = insert_exceptions(ps, &full);
++ if (r)
++ return r;
++ }
++
++ return 0;
++}
++
++static inline struct pstore *get_info(struct exception_store *store)
++{
++ return (struct pstore *) store->context;
++}
++
++static void persistent_fraction_full(struct exception_store *store,
++ sector_t *numerator, sector_t *denominator)
++{
++ *numerator = get_info(store)->next_free * store->snap->chunk_size;
++ *denominator = get_dev_size(store->snap->cow->dev);
++}
++
++static void persistent_destroy(struct exception_store *store)
++{
++ struct pstore *ps = get_info(store);
++
++ dm_io_put(sectors_to_pages(ps->chunk_size));
++ vfree(ps->callbacks);
++ free_area(ps);
++ kfree(ps);
++}
++
++static int persistent_read_metadata(struct exception_store *store)
++{
++ int r, new_snapshot;
++ struct pstore *ps = get_info(store);
++
++ /*
++ * Read the snapshot header.
++ */
++ r = read_header(ps, &new_snapshot);
++ if (r)
++ return r;
++
++ /*
++ * Do we need to setup a new snapshot ?
++ */
++ if (new_snapshot) {
++ r = write_header(ps);
++ if (r) {
++ DMWARN("write_header failed");
++ return r;
++ }
++
++ r = zero_area(ps, 0);
++ if (r) {
++ DMWARN("zero_area(0) failed");
++ return r;
++ }
++
++ } else {
++ /*
++ * Sanity checks.
++ */
++ if (!ps->valid) {
++ DMWARN("snapshot is marked invalid");
++ return -EINVAL;
++ }
++
++ if (ps->version != SNAPSHOT_DISK_VERSION) {
++ DMWARN("unable to handle snapshot disk version %d",
++ ps->version);
++ return -EINVAL;
++ }
++
++ /*
++ * Read the metadata.
++ */
++ r = read_exceptions(ps);
++ if (r)
++ return r;
++ }
++
++ return 0;
++}
++
++static int persistent_prepare(struct exception_store *store,
++ struct exception *e)
++{
++ struct pstore *ps = get_info(store);
++ uint32_t stride;
++ sector_t size = get_dev_size(store->snap->cow->dev);
++
++ /* Is there enough room ? */
++ if (size < ((ps->next_free + 1) * store->snap->chunk_size))
++ return -ENOSPC;
++
++ e->new_chunk = ps->next_free;
++
++ /*
++ * Move onto the next free pending, making sure to take
++ * into account the location of the metadata chunks.
++ */
++ stride = (ps->exceptions_per_area + 1);
++ if ((++ps->next_free % stride) == 1)
++ ps->next_free++;
++
++ atomic_inc(&ps->pending_count);
++ return 0;
++}
++
++static void persistent_commit(struct exception_store *store,
++ struct exception *e,
++ void (*callback) (void *, int success),
++ void *callback_context)
++{
++ int r;
++ unsigned int i;
++ struct pstore *ps = get_info(store);
++ struct disk_exception de;
++ struct commit_callback *cb;
++
++ de.old_chunk = e->old_chunk;
++ de.new_chunk = e->new_chunk;
++ write_exception(ps, ps->current_committed++, &de);
++
++ /*
++ * Add the callback to the back of the array. This code
++ * is the only place where the callback array is
++ * manipulated, and we know that it will never be called
++ * multiple times concurrently.
++ */
++ cb = ps->callbacks + ps->callback_count++;
++ cb->callback = callback;
++ cb->context = callback_context;
++
++ /*
++ * If there are no more exceptions in flight, or we have
++ * filled this metadata area we commit the exceptions to
++ * disk.
++ */
++ if (atomic_dec_and_test(&ps->pending_count) ||
++ (ps->current_committed == ps->exceptions_per_area)) {
++ r = area_io(ps, ps->current_area, WRITE);
++ if (r)
++ ps->valid = 0;
++
++ for (i = 0; i < ps->callback_count; i++) {
++ cb = ps->callbacks + i;
++ cb->callback(cb->context, r == 0 ? 1 : 0);
++ }
++
++ ps->callback_count = 0;
++ }
++
++ /*
++ * Have we completely filled the current area ?
++ */
++ if (ps->current_committed == ps->exceptions_per_area) {
++ ps->current_committed = 0;
++ r = zero_area(ps, ps->current_area + 1);
++ if (r)
++ ps->valid = 0;
++ }
++}
++
++static void persistent_drop(struct exception_store *store)
++{
++ struct pstore *ps = get_info(store);
++
++ ps->valid = 0;
++ if (write_header(ps))
++ DMWARN("write header failed");
++}
++
++int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
++{
++ int r;
++ struct pstore *ps;
++
++ r = dm_io_get(sectors_to_pages(chunk_size));
++ if (r)
++ return r;
++
++ /* allocate the pstore */
++ ps = kmalloc(sizeof(*ps), GFP_KERNEL);
++ if (!ps) {
++ r = -ENOMEM;
++ goto bad;
++ }
++
++ ps->snap = store->snap;
++ ps->valid = 1;
++ ps->version = SNAPSHOT_DISK_VERSION;
++ ps->chunk_size = chunk_size;
++ ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) /
++ sizeof(struct disk_exception);
++ ps->next_free = 2; /* skipping the header and first area */
++ ps->current_committed = 0;
++
++ r = alloc_area(ps);
++ if (r)
++ goto bad;
++
++ /*
++ * Allocate space for all the callbacks.
++ */
++ ps->callback_count = 0;
++ atomic_set(&ps->pending_count, 0);
++ ps->callbacks = vcalloc(ps->exceptions_per_area,
++ sizeof(*ps->callbacks));
++
++ if (!ps->callbacks) {
++ r = -ENOMEM;
++ goto bad;
++ }
++
++ store->destroy = persistent_destroy;
++ store->read_metadata = persistent_read_metadata;
++ store->prepare_exception = persistent_prepare;
++ store->commit_exception = persistent_commit;
++ store->drop_snapshot = persistent_drop;
++ store->fraction_full = persistent_fraction_full;
++ store->context = ps;
++
++ return 0;
++
++ bad:
++ dm_io_put(sectors_to_pages(chunk_size));
++ if (ps) {
++ if (ps->callbacks)
++ vfree(ps->callbacks);
++
++ kfree(ps);
++ }
++ return r;
++}
++
++/*-----------------------------------------------------------------
++ * Implementation of the store for non-persistent snapshots.
++ *---------------------------------------------------------------*/
++struct transient_c {
++ sector_t next_free;
++};
++
++void transient_destroy(struct exception_store *store)
++{
++ kfree(store->context);
++}
++
++int transient_read_metadata(struct exception_store *store)
++{
++ return 0;
++}
++
++int transient_prepare(struct exception_store *store, struct exception *e)
++{
++ struct transient_c *tc = (struct transient_c *) store->context;
++ sector_t size = get_dev_size(store->snap->cow->dev);
++
++ if (size < (tc->next_free + store->snap->chunk_size))
++ return -1;
++
++ e->new_chunk = sector_to_chunk(store->snap, tc->next_free);
++ tc->next_free += store->snap->chunk_size;
++
++ return 0;
++}
++
++void transient_commit(struct exception_store *store,
++ struct exception *e,
++ void (*callback) (void *, int success),
++ void *callback_context)
++{
++ /* Just succeed */
++ callback(callback_context, 1);
++}
++
++static void transient_fraction_full(struct exception_store *store,
++ sector_t *numerator, sector_t *denominator)
++{
++ *numerator = ((struct transient_c *) store->context)->next_free;
++ *denominator = get_dev_size(store->snap->cow->dev);
++}
++
++int dm_create_transient(struct exception_store *store,
++ struct dm_snapshot *s, int blocksize)
++{
++ struct transient_c *tc;
++
++ memset(store, 0, sizeof(*store));
++ store->destroy = transient_destroy;
++ store->read_metadata = transient_read_metadata;
++ store->prepare_exception = transient_prepare;
++ store->commit_exception = transient_commit;
++ store->fraction_full = transient_fraction_full;
++ store->snap = s;
++
++ tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL);
++ if (!tc)
++ return -ENOMEM;
++
++ tc->next_free = 0;
++ store->context = tc;
++
++ return 0;
++}
+diff -urN linux-2.4.22/drivers/md/dm-io.c linux-2.4.22-dm/drivers/md/dm-io.c
+--- linux-2.4.22/drivers/md/dm-io.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-io.c 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,344 @@
++/*
++ * Copyright (C) 2003 Sistina Software
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm-io.h"
++
++#include <linux/mempool.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++
++/* FIXME: can we shrink this ? */
++struct io_context {
++ int rw;
++ unsigned int error;
++ atomic_t count;
++ struct task_struct *sleeper;
++ io_notify_fn callback;
++ void *context;
++};
++
++/*
++ * We maintain a pool of buffer heads for dispatching the io.
++ */
++static unsigned int _num_bhs;
++static mempool_t *_buffer_pool;
++
++/*
++ * io contexts are only dynamically allocated for asynchronous
++ * io. Since async io is likely to be the majority of io we'll
++ * have the same number of io contexts as buffer heads ! (FIXME:
++ * must reduce this).
++ */
++mempool_t *_io_pool;
++
++static void *alloc_bh(int gfp_mask, void *pool_data)
++{
++ struct buffer_head *bh;
++
++ bh = kmem_cache_alloc(bh_cachep, gfp_mask);
++ if (bh) {
++ bh->b_reqnext = NULL;
++ init_waitqueue_head(&bh->b_wait);
++ INIT_LIST_HEAD(&bh->b_inode_buffers);
++ }
++
++ return bh;
++}
++
++static void *alloc_io(int gfp_mask, void *pool_data)
++{
++ return kmalloc(sizeof(struct io_context), gfp_mask);
++}
++
++static void free_io(void *element, void *pool_data)
++{
++ kfree(element);
++}
++
++static unsigned int pages_to_buffers(unsigned int pages)
++{
++ return 4 * pages; /* too many ? */
++}
++
++static int resize_pool(unsigned int new_bhs)
++{
++ int r = 0;
++
++ if (_buffer_pool) {
++ if (new_bhs == 0) {
++ /* free off the pools */
++ mempool_destroy(_buffer_pool);
++ mempool_destroy(_io_pool);
++ _buffer_pool = _io_pool = NULL;
++ } else {
++ /* resize the pools */
++ r = mempool_resize(_buffer_pool, new_bhs, GFP_KERNEL);
++ if (!r)
++ r = mempool_resize(_io_pool,
++ new_bhs, GFP_KERNEL);
++ }
++ } else {
++ /* create new pools */
++ _buffer_pool = mempool_create(new_bhs, alloc_bh,
++ mempool_free_slab, bh_cachep);
++ if (!_buffer_pool)
++ r = -ENOMEM;
++
++ _io_pool = mempool_create(new_bhs, alloc_io, free_io, NULL);
++ if (!_io_pool) {
++ mempool_destroy(_buffer_pool);
++ _buffer_pool = NULL;
++ r = -ENOMEM;
++ }
++ }
++
++ if (!r)
++ _num_bhs = new_bhs;
++
++ return r;
++}
++
++int dm_io_get(unsigned int num_pages)
++{
++ return resize_pool(_num_bhs + pages_to_buffers(num_pages));
++}
++
++void dm_io_put(unsigned int num_pages)
++{
++ resize_pool(_num_bhs - pages_to_buffers(num_pages));
++}
++
++/*-----------------------------------------------------------------
++ * We need to keep track of which region a buffer is doing io
++ * for. In order to save a memory allocation we store this in an
++ * unused field of the buffer head, and provide these access
++ * functions.
++ *
++ * FIXME: add compile time check that an unsigned int can fit
++ * into a pointer.
++ *
++ *---------------------------------------------------------------*/
++static inline void bh_set_region(struct buffer_head *bh, unsigned int region)
++{
++ bh->b_journal_head = (void *) region;
++}
++
++static inline int bh_get_region(struct buffer_head *bh)
++{
++ return (unsigned int) bh->b_journal_head;
++}
++
++/*-----------------------------------------------------------------
++ * We need an io object to keep track of the number of bhs that
++ * have been dispatched for a particular io.
++ *---------------------------------------------------------------*/
++static void dec_count(struct io_context *io, unsigned int region, int error)
++{
++ if (error)
++ set_bit(region, &io->error);
++
++ if (atomic_dec_and_test(&io->count)) {
++ if (io->sleeper)
++ wake_up_process(io->sleeper);
++
++ else {
++ int r = io->error;
++ io_notify_fn fn = io->callback;
++ void *context = io->context;
++
++ mempool_free(io, _io_pool);
++ fn(r, context);
++ }
++ }
++}
++
++static void endio(struct buffer_head *bh, int uptodate)
++{
++ struct io_context *io = (struct io_context *) bh->b_private;
++
++ if (!uptodate && io->rw != WRITE) {
++ /*
++ * We need to zero this region, otherwise people
++ * like kcopyd may write the arbitrary contents
++ * of the page.
++ */
++ memset(bh->b_data, 0, bh->b_size);
++ }
++
++ dec_count((struct io_context *) bh->b_private,
++ bh_get_region(bh), !uptodate);
++ mempool_free(bh, _buffer_pool);
++}
++
++/*
++ * Primitives for alignment calculations.
++ */
++int fls(unsigned n)
++{
++ return generic_fls32(n);
++}
++
++static inline int log2_floor(unsigned n)
++{
++ return ffs(n) - 1;
++}
++
++static inline int log2_align(unsigned n)
++{
++ return fls(n) - 1;
++}
++
++/*
++ * Returns the next block for io.
++ */
++static int do_page(kdev_t dev, sector_t *block, sector_t end_block,
++ unsigned int block_size,
++ struct page *p, unsigned int offset,
++ unsigned int region, struct io_context *io)
++{
++ struct buffer_head *bh;
++ sector_t b = *block;
++ sector_t blocks_per_page = PAGE_SIZE / block_size;
++ unsigned int this_size; /* holds the size of the current io */
++ unsigned int len;
++
++ while ((offset < PAGE_SIZE) && (b != end_block)) {
++ bh = mempool_alloc(_buffer_pool, GFP_NOIO);
++ init_buffer(bh, endio, io);
++ bh_set_region(bh, region);
++
++ /*
++ * Block size must be a power of 2 and aligned
++ * correctly.
++ */
++ len = end_block - b;
++ this_size = min((sector_t) 1 << log2_floor(b), blocks_per_page);
++ if (this_size > len)
++ this_size = 1 << log2_align(len);
++
++ /*
++ * Add in the job offset.
++ */
++ bh->b_blocknr = (b / this_size);
++ bh->b_size = block_size * this_size;
++ set_bh_page(bh, p, offset);
++ bh->b_this_page = bh;
++
++ bh->b_dev = dev;
++ atomic_set(&bh->b_count, 1);
++
++ bh->b_state = ((1 << BH_Uptodate) | (1 << BH_Mapped) |
++ (1 << BH_Lock));
++
++ if (io->rw == WRITE)
++ clear_bit(BH_Dirty, &bh->b_state);
++
++ atomic_inc(&io->count);
++ submit_bh(io->rw, bh);
++
++ b += this_size;
++ offset += block_size * this_size;
++ }
++
++ *block = b;
++ return (b == end_block);
++}
++
++static void do_region(unsigned int region, struct io_region *where,
++ struct page *page, unsigned int offset,
++ struct io_context *io)
++{
++ unsigned int block_size = get_hardsect_size(where->dev);
++ unsigned int sblock_size = block_size >> 9;
++ sector_t block = where->sector / sblock_size;
++ sector_t end_block = (where->sector + where->count) / sblock_size;
++
++ while (1) {
++ if (do_page(where->dev, &block, end_block, block_size,
++ page, offset, region, io))
++ break;
++
++ offset = 0; /* only offset the first page */
++
++ page = list_entry(page->list.next, struct page, list);
++ }
++}
++
++static void dispatch_io(unsigned int num_regions, struct io_region *where,
++ struct page *pages, unsigned int offset,
++ struct io_context *io)
++{
++ int i;
++
++ for (i = 0; i < num_regions; i++)
++ if (where[i].count)
++ do_region(i, where + i, pages, offset, io);
++
++ /*
++ * Drop the extra refence that we were holding to avoid
++ * the io being completed too early.
++ */
++ dec_count(io, 0, 0);
++}
++
++/*
++ * Synchronous io
++ */
++int dm_io_sync(unsigned int num_regions, struct io_region *where,
++ int rw, struct page *pages, unsigned int offset,
++ unsigned int *error_bits)
++{
++ struct io_context io;
++
++ BUG_ON(num_regions > 1 && rw != WRITE);
++
++ io.rw = rw;
++ io.error = 0;
++ atomic_set(&io.count, 1); /* see dispatch_io() */
++ io.sleeper = current;
++
++ dispatch_io(num_regions, where, pages, offset, &io);
++ run_task_queue(&tq_disk);
++
++ while (1) {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++
++ if (!atomic_read(&io.count))
++ break;
++
++ schedule();
++ }
++ set_current_state(TASK_RUNNING);
++
++ *error_bits = io.error;
++ return io.error ? -EIO : 0;
++}
++
++/*
++ * Asynchronous io
++ */
++int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
++ struct page *pages, unsigned int offset,
++ io_notify_fn fn, void *context)
++{
++ struct io_context *io = mempool_alloc(_io_pool, GFP_NOIO);
++
++ io->rw = rw;
++ io->error = 0;
++ atomic_set(&io->count, 1); /* see dispatch_io() */
++ io->sleeper = NULL;
++ io->callback = fn;
++ io->context = context;
++
++ dispatch_io(num_regions, where, pages, offset, io);
++ return 0;
++}
++
++EXPORT_SYMBOL(dm_io_get);
++EXPORT_SYMBOL(dm_io_put);
++EXPORT_SYMBOL(dm_io_sync);
++EXPORT_SYMBOL(dm_io_async);
+diff -urN linux-2.4.22/drivers/md/dm-io.h linux-2.4.22-dm/drivers/md/dm-io.h
+--- linux-2.4.22/drivers/md/dm-io.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-io.h 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,86 @@
++/*
++ * Copyright (C) 2003 Sistina Software
++ *
++ * This file is released under the GPL.
++ */
++
++#ifndef _DM_IO_H
++#define _DM_IO_H
++
++#include "dm.h"
++
++#include <linux/list.h>
++
++/* Move these to bitops.h eventually */
++/* Improved generic_fls algorithm (in 2.4 there is no generic_fls so far) */
++/* (c) 2002, D.Phillips and Sistina Software */
++/* Licensed under Version 2 of the GPL */
++
++static unsigned generic_fls8(unsigned n)
++{
++ return n & 0xf0 ?
++ n & 0xc0 ? (n >> 7) + 7 : (n >> 5) + 5:
++ n & 0x0c ? (n >> 3) + 3 : n - ((n + 1) >> 2);
++}
++
++static inline unsigned generic_fls16(unsigned n)
++{
++ return n & 0xff00? generic_fls8(n >> 8) + 8 : generic_fls8(n);
++}
++
++static inline unsigned generic_fls32(unsigned n)
++{
++ return n & 0xffff0000 ? generic_fls16(n >> 16) + 16 : generic_fls16(n);
++}
++
++/* FIXME make this configurable */
++#define DM_MAX_IO_REGIONS 8
++
++struct io_region {
++ kdev_t dev;
++ sector_t sector;
++ sector_t count;
++};
++
++
++/*
++ * 'error' is a bitset, with each bit indicating whether an error
++ * occurred doing io to the corresponding region.
++ */
++typedef void (*io_notify_fn)(unsigned int error, void *context);
++
++
++/*
++ * Before anyone uses the IO interface they should call
++ * dm_io_get(), specifying roughly how many pages they are
++ * expecting to perform io on concurrently.
++ *
++ * This function may block.
++ */
++int dm_io_get(unsigned int num_pages);
++void dm_io_put(unsigned int num_pages);
++
++
++/*
++ * Synchronous IO.
++ *
++ * Please ensure that the rw flag in the next two functions is
++ * either READ or WRITE, ie. we don't take READA. Any
++ * regions with a zero count field will be ignored.
++ */
++int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
++ struct page *pages, unsigned int offset,
++ unsigned int *error_bits);
++
++
++/*
++ * Aynchronous IO.
++ *
++ * The 'where' array may be safely allocated on the stack since
++ * the function takes a copy.
++ */
++int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
++ struct page *pages, unsigned int offset,
++ io_notify_fn fn, void *context);
++
++#endif
+diff -urN linux-2.4.22/drivers/md/dm-ioctl.c linux-2.4.22-dm/drivers/md/dm-ioctl.c
+--- linux-2.4.22/drivers/md/dm-ioctl.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-ioctl.c 2003-09-15 17:03:21.000000000 +0200
+@@ -0,0 +1,1284 @@
++/*
++ * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/miscdevice.h>
++#include <linux/dm-ioctl.h>
++#include <linux/init.h>
++#include <linux/wait.h>
++#include <linux/blk.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++
++#define DM_DRIVER_EMAIL "dm@uk.sistina.com"
++
++/*-----------------------------------------------------------------
++ * The ioctl interface needs to be able to look up devices by
++ * name or uuid.
++ *---------------------------------------------------------------*/
++struct hash_cell {
++ struct list_head name_list;
++ struct list_head uuid_list;
++
++ char *name;
++ char *uuid;
++ struct mapped_device *md;
++ struct dm_table *new_map;
++
++ /* I hate devfs */
++ devfs_handle_t devfs_entry;
++};
++
++#define NUM_BUCKETS 64
++#define MASK_BUCKETS (NUM_BUCKETS - 1)
++static struct list_head _name_buckets[NUM_BUCKETS];
++static struct list_head _uuid_buckets[NUM_BUCKETS];
++
++static devfs_handle_t _dev_dir;
++void dm_hash_remove_all(void);
++
++/*
++ * Guards access to both hash tables.
++ */
++static DECLARE_RWSEM(_hash_lock);
++
++static void init_buckets(struct list_head *buckets)
++{
++ unsigned int i;
++
++ for (i = 0; i < NUM_BUCKETS; i++)
++ INIT_LIST_HEAD(buckets + i);
++}
++
++int dm_hash_init(void)
++{
++ init_buckets(_name_buckets);
++ init_buckets(_uuid_buckets);
++ _dev_dir = devfs_mk_dir(0, DM_DIR, NULL);
++ return 0;
++}
++
++void dm_hash_exit(void)
++{
++ dm_hash_remove_all();
++ devfs_unregister(_dev_dir);
++}
++
++/*-----------------------------------------------------------------
++ * Hash function:
++ * We're not really concerned with the str hash function being
++ * fast since it's only used by the ioctl interface.
++ *---------------------------------------------------------------*/
++static unsigned int hash_str(const char *str)
++{
++ const unsigned int hash_mult = 2654435387U;
++ unsigned int h = 0;
++
++ while (*str)
++ h = (h + (unsigned int) *str++) * hash_mult;
++
++ return h & MASK_BUCKETS;
++}
++
++/*-----------------------------------------------------------------
++ * Code for looking up a device by name
++ *---------------------------------------------------------------*/
++static struct hash_cell *__get_name_cell(const char *str)
++{
++ struct list_head *tmp;
++ struct hash_cell *hc;
++ unsigned int h = hash_str(str);
++
++ list_for_each (tmp, _name_buckets + h) {
++ hc = list_entry(tmp, struct hash_cell, name_list);
++ if (!strcmp(hc->name, str))
++ return hc;
++ }
++
++ return NULL;
++}
++
++static struct hash_cell *__get_uuid_cell(const char *str)
++{
++ struct list_head *tmp;
++ struct hash_cell *hc;
++ unsigned int h = hash_str(str);
++
++ list_for_each (tmp, _uuid_buckets + h) {
++ hc = list_entry(tmp, struct hash_cell, uuid_list);
++ if (!strcmp(hc->uuid, str))
++ return hc;
++ }
++
++ return NULL;
++}
++
++/*-----------------------------------------------------------------
++ * Inserting, removing and renaming a device.
++ *---------------------------------------------------------------*/
++static inline char *kstrdup(const char *str)
++{
++ char *r = kmalloc(strlen(str) + 1, GFP_KERNEL);
++ if (r)
++ strcpy(r, str);
++ return r;
++}
++
++static struct hash_cell *alloc_cell(const char *name, const char *uuid,
++ struct mapped_device *md)
++{
++ struct hash_cell *hc;
++
++ hc = kmalloc(sizeof(*hc), GFP_KERNEL);
++ if (!hc)
++ return NULL;
++
++ hc->name = kstrdup(name);
++ if (!hc->name) {
++ kfree(hc);
++ return NULL;
++ }
++
++ if (!uuid)
++ hc->uuid = NULL;
++
++ else {
++ hc->uuid = kstrdup(uuid);
++ if (!hc->uuid) {
++ kfree(hc->name);
++ kfree(hc);
++ return NULL;
++ }
++ }
++
++ INIT_LIST_HEAD(&hc->name_list);
++ INIT_LIST_HEAD(&hc->uuid_list);
++ hc->md = md;
++ hc->new_map = NULL;
++ return hc;
++}
++
++static void free_cell(struct hash_cell *hc)
++{
++ if (hc) {
++ kfree(hc->name);
++ kfree(hc->uuid);
++ kfree(hc);
++ }
++}
++
++/*
++ * devfs stuff.
++ */
++static int register_with_devfs(struct hash_cell *hc)
++{
++ kdev_t dev = dm_kdev(hc->md);
++
++ hc->devfs_entry =
++ devfs_register(_dev_dir, hc->name, DEVFS_FL_CURRENT_OWNER,
++ major(dev), minor(dev),
++ S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
++ &dm_blk_dops, NULL);
++
++ return 0;
++}
++
++static int unregister_with_devfs(struct hash_cell *hc)
++{
++ devfs_unregister(hc->devfs_entry);
++ return 0;
++}
++
++/*
++ * The kdev_t and uuid of a device can never change once it is
++ * initially inserted.
++ */
++int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
++{
++ struct hash_cell *cell;
++
++ /*
++ * Allocate the new cells.
++ */
++ cell = alloc_cell(name, uuid, md);
++ if (!cell)
++ return -ENOMEM;
++
++ /*
++ * Insert the cell into both hash tables.
++ */
++ down_write(&_hash_lock);
++ if (__get_name_cell(name))
++ goto bad;
++
++ list_add(&cell->name_list, _name_buckets + hash_str(name));
++
++ if (uuid) {
++ if (__get_uuid_cell(uuid)) {
++ list_del(&cell->name_list);
++ goto bad;
++ }
++ list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
++ }
++ register_with_devfs(cell);
++ dm_get(md);
++ up_write(&_hash_lock);
++
++ return 0;
++
++ bad:
++ up_write(&_hash_lock);
++ free_cell(cell);
++ return -EBUSY;
++}
++
++void __hash_remove(struct hash_cell *hc)
++{
++ /* remove from the dev hash */
++ list_del(&hc->uuid_list);
++ list_del(&hc->name_list);
++ unregister_with_devfs(hc);
++ dm_put(hc->md);
++ if (hc->new_map)
++ dm_table_put(hc->new_map);
++ free_cell(hc);
++}
++
++void dm_hash_remove_all(void)
++{
++ int i;
++ struct hash_cell *hc;
++ struct list_head *tmp, *n;
++
++ down_write(&_hash_lock);
++ for (i = 0; i < NUM_BUCKETS; i++) {
++ list_for_each_safe (tmp, n, _name_buckets + i) {
++ hc = list_entry(tmp, struct hash_cell, name_list);
++ __hash_remove(hc);
++ }
++ }
++ up_write(&_hash_lock);
++}
++
++int dm_hash_rename(const char *old, const char *new)
++{
++ char *new_name, *old_name;
++ struct hash_cell *hc;
++
++ /*
++ * duplicate new.
++ */
++ new_name = kstrdup(new);
++ if (!new_name)
++ return -ENOMEM;
++
++ down_write(&_hash_lock);
++
++ /*
++ * Is new free ?
++ */
++ hc = __get_name_cell(new);
++ if (hc) {
++ DMWARN("asked to rename to an already existing name %s -> %s",
++ old, new);
++ up_write(&_hash_lock);
++ kfree(new_name);
++ return -EBUSY;
++ }
++
++ /*
++ * Is there such a device as 'old' ?
++ */
++ hc = __get_name_cell(old);
++ if (!hc) {
++ DMWARN("asked to rename a non existent device %s -> %s",
++ old, new);
++ up_write(&_hash_lock);
++ kfree(new_name);
++ return -ENXIO;
++ }
++
++ /*
++ * rename and move the name cell.
++ */
++ list_del(&hc->name_list);
++ old_name = hc->name;
++ hc->name = new_name;
++ list_add(&hc->name_list, _name_buckets + hash_str(new_name));
++
++ /* rename the device node in devfs */
++ unregister_with_devfs(hc);
++ register_with_devfs(hc);
++
++ up_write(&_hash_lock);
++ kfree(old_name);
++ return 0;
++}
++
++/*-----------------------------------------------------------------
++ * Implementation of the ioctl commands
++ *---------------------------------------------------------------*/
++/*
++ * All the ioctl commands get dispatched to functions with this
++ * prototype.
++ */
++typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
++
++static int remove_all(struct dm_ioctl *param, size_t param_size)
++{
++ dm_hash_remove_all();
++ param->data_size = 0;
++ return 0;
++}
++
++/*
++ * Round up the ptr to an 8-byte boundary.
++ */
++#define ALIGN_MASK 7
++static inline void *align_ptr(void *ptr)
++{
++ return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
++}
++
++/*
++ * Retrieves the data payload buffer from an already allocated
++ * struct dm_ioctl.
++ */
++static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
++ size_t *len)
++{
++ param->data_start = align_ptr(param + 1) - (void *) param;
++
++ if (param->data_start < param_size)
++ *len = param_size - param->data_start;
++ else
++ *len = 0;
++
++ return ((void *) param) + param->data_start;
++}
++
++static int list_devices(struct dm_ioctl *param, size_t param_size)
++{
++ unsigned int i;
++ struct hash_cell *hc;
++ size_t len, needed = 0;
++ struct dm_name_list *nl, *old_nl = NULL;
++
++ down_write(&_hash_lock);
++
++ /*
++ * Loop through all the devices working out how much
++ * space we need.
++ */
++ for (i = 0; i < NUM_BUCKETS; i++) {
++ list_for_each_entry (hc, _name_buckets + i, name_list) {
++ needed += sizeof(struct dm_name_list);
++ needed += strlen(hc->name);
++ needed += ALIGN_MASK;
++ }
++ }
++
++ /*
++ * Grab our output buffer.
++ */
++ nl = get_result_buffer(param, param_size, &len);
++ if (len < needed) {
++ param->flags |= DM_BUFFER_FULL_FLAG;
++ goto out;
++ }
++ param->data_size = param->data_start + needed;
++
++ nl->dev = 0; /* Flags no data */
++
++ /*
++ * Now loop through filling out the names.
++ */
++ for (i = 0; i < NUM_BUCKETS; i++) {
++ list_for_each_entry (hc, _name_buckets + i, name_list) {
++ if (old_nl)
++ old_nl->next = (uint32_t) ((void *) nl -
++ (void *) old_nl);
++
++ nl->dev = dm_kdev(hc->md);
++ nl->next = 0;
++ strcpy(nl->name, hc->name);
++
++ old_nl = nl;
++ nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1);
++ }
++ }
++
++ out:
++ up_write(&_hash_lock);
++ return 0;
++}
++
++static int check_name(const char *name)
++{
++ if (strchr(name, '/')) {
++ DMWARN("invalid device name");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/*
++ * Fills in a dm_ioctl structure, ready for sending back to
++ * userland.
++ */
++static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
++{
++ kdev_t dev = dm_kdev(md);
++ struct dm_table *table;
++ struct block_device *bdev;
++
++ param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
++ DM_ACTIVE_PRESENT_FLAG);
++
++ if (dm_suspended(md))
++ param->flags |= DM_SUSPEND_FLAG;
++
++ param->dev = kdev_t_to_nr(dev);
++
++ if (is_read_only(dev))
++ param->flags |= DM_READONLY_FLAG;
++
++ param->event_nr = dm_get_event_nr(md);
++
++ table = dm_get_table(md);
++ if (table) {
++ param->flags |= DM_ACTIVE_PRESENT_FLAG;
++ param->target_count = dm_table_get_num_targets(table);
++ dm_table_put(table);
++ } else
++ param->target_count = 0;
++
++ bdev = bdget(param->dev);
++ if (!bdev)
++ return -ENXIO;
++ param->open_count = bdev->bd_openers;
++ bdput(bdev);
++
++ return 0;
++}
++
++static int dev_create(struct dm_ioctl *param, size_t param_size)
++{
++ int r;
++ kdev_t dev = 0;
++ struct mapped_device *md;
++
++ r = check_name(param->name);
++ if (r)
++ return r;
++
++ if (param->flags & DM_PERSISTENT_DEV_FLAG)
++ dev = to_kdev_t(param->dev);
++
++ r = dm_create(dev, &md);
++ if (r)
++ return r;
++
++ r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
++ if (r) {
++ dm_put(md);
++ return r;
++ }
++
++ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
++
++ r = __dev_status(md, param);
++ dm_put(md);
++
++ return r;
++}
++
++/*
++ * Always use UUID for lookups if it's present, otherwise use name.
++ */
++static inline struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
++{
++ return *param->uuid ?
++ __get_uuid_cell(param->uuid) : __get_name_cell(param->name);
++}
++
++static inline struct mapped_device *find_device(struct dm_ioctl *param)
++{
++ struct hash_cell *hc;
++ struct mapped_device *md = NULL;
++
++ down_read(&_hash_lock);
++ hc = __find_device_hash_cell(param);
++ if (hc) {
++ md = hc->md;
++
++ /*
++ * Sneakily write in both the name and the uuid
++ * while we have the cell.
++ */
++ strncpy(param->name, hc->name, sizeof(param->name));
++ if (hc->uuid)
++ strncpy(param->uuid, hc->uuid, sizeof(param->uuid) - 1);
++ else
++ param->uuid[0] = '\0';
++
++ if (hc->new_map)
++ param->flags |= DM_INACTIVE_PRESENT_FLAG;
++ else
++ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
++
++ dm_get(md);
++ }
++ up_read(&_hash_lock);
++
++ return md;
++}
++
++static int dev_remove(struct dm_ioctl *param, size_t param_size)
++{
++ struct hash_cell *hc;
++
++ down_write(&_hash_lock);
++ hc = __find_device_hash_cell(param);
++
++ if (!hc) {
++ DMWARN("device doesn't appear to be in the dev hash table.");
++ up_write(&_hash_lock);
++ return -ENXIO;
++ }
++
++ __hash_remove(hc);
++ up_write(&_hash_lock);
++ param->data_size = 0;
++ return 0;
++}
++
++/*
++ * Check a string doesn't overrun the chunk of
++ * memory we copied from userland.
++ */
++static int invalid_str(char *str, void *end)
++{
++ while ((void *) str < end)
++ if (!*str++)
++ return 0;
++
++ return -EINVAL;
++}
++
++static int dev_rename(struct dm_ioctl *param, size_t param_size)
++{
++ int r;
++ char *new_name = (char *) param + param->data_start;
++
++ if (new_name < (char *) (param + 1) ||
++ invalid_str(new_name, (void *) param + param_size)) {
++ DMWARN("Invalid new logical volume name supplied.");
++ return -EINVAL;
++ }
++
++ r = check_name(new_name);
++ if (r)
++ return r;
++
++ param->data_size = 0;
++ return dm_hash_rename(param->name, new_name);
++}
++
++static int do_suspend(struct dm_ioctl *param)
++{
++ int r = 0;
++ struct mapped_device *md;
++
++ md = find_device(param);
++ if (!md)
++ return -ENXIO;
++
++ if (!dm_suspended(md))
++ r = dm_suspend(md);
++
++ if (!r)
++ r = __dev_status(md, param);
++
++ dm_put(md);
++ return r;
++}
++
++static int do_resume(struct dm_ioctl *param)
++{
++ int r = 0;
++ struct hash_cell *hc;
++ struct mapped_device *md;
++ struct dm_table *new_map;
++
++ down_write(&_hash_lock);
++
++ hc = __find_device_hash_cell(param);
++ if (!hc) {
++ DMWARN("device doesn't appear to be in the dev hash table.");
++ up_write(&_hash_lock);
++ return -ENXIO;
++ }
++
++ md = hc->md;
++ dm_get(md);
++
++ new_map = hc->new_map;
++ hc->new_map = NULL;
++ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
++
++ up_write(&_hash_lock);
++
++ /* Do we need to load a new map ? */
++ if (new_map) {
++ /* Suspend if it isn't already suspended */
++ if (!dm_suspended(md))
++ dm_suspend(md);
++
++ r = dm_swap_table(md, new_map);
++ if (r) {
++ dm_put(md);
++ dm_table_put(new_map);
++ return r;
++ }
++
++ if (dm_table_get_mode(new_map) & FMODE_WRITE)
++ set_device_ro(dm_kdev(md), 0);
++ else
++ set_device_ro(dm_kdev(md), 1);
++
++ dm_table_put(new_map);
++ }
++
++ if (dm_suspended(md))
++ r = dm_resume(md);
++
++ if (!r)
++ r = __dev_status(md, param);
++
++ dm_put(md);
++ return r;
++}
++
++/*
++ * Set or unset the suspension state of a device.
++ * If the device already is in the requested state we just return its status.
++ */
++static int dev_suspend(struct dm_ioctl *param, size_t param_size)
++{
++ if (param->flags & DM_SUSPEND_FLAG)
++ return do_suspend(param);
++
++ return do_resume(param);
++}
++
++/*
++ * Copies device info back to user space, used by
++ * the create and info ioctls.
++ */
++static int dev_status(struct dm_ioctl *param, size_t param_size)
++{
++ int r;
++ struct mapped_device *md;
++
++ md = find_device(param);
++ if (!md)
++ return -ENXIO;
++
++ r = __dev_status(md, param);
++ dm_put(md);
++ return r;
++}
++
++/*
++ * Build up the status struct for each target
++ */
++static void retrieve_status(struct dm_table *table, struct dm_ioctl *param,
++ size_t param_size)
++{
++ unsigned int i, num_targets;
++ struct dm_target_spec *spec;
++ char *outbuf, *outptr;
++ status_type_t type;
++ size_t remaining, len, used = 0;
++
++ outptr = outbuf = get_result_buffer(param, param_size, &len);
++
++ if (param->flags & DM_STATUS_TABLE_FLAG)
++ type = STATUSTYPE_TABLE;
++ else
++ type = STATUSTYPE_INFO;
++
++ /* Get all the target info */
++ num_targets = dm_table_get_num_targets(table);
++ for (i = 0; i < num_targets; i++) {
++ struct dm_target *ti = dm_table_get_target(table, i);
++
++ remaining = len - (outptr - outbuf);
++ if (remaining < sizeof(struct dm_target_spec)) {
++ param->flags |= DM_BUFFER_FULL_FLAG;
++ break;
++ }
++
++ spec = (struct dm_target_spec *) outptr;
++
++ spec->status = 0;
++ spec->sector_start = ti->begin;
++ spec->length = ti->len;
++ strncpy(spec->target_type, ti->type->name,
++ sizeof(spec->target_type));
++
++ outptr += sizeof(struct dm_target_spec);
++ remaining = len - (outptr - outbuf);
++
++ /* Get the status/table string from the target driver */
++ if (ti->type->status) {
++ if (ti->type->status(ti, type, outptr, remaining)) {
++ param->flags |= DM_BUFFER_FULL_FLAG;
++ break;
++ }
++ } else
++ outptr[0] = '\0';
++
++ outptr += strlen(outptr) + 1;
++ used = param->data_start + (outptr - outbuf);
++
++ align_ptr(outptr);
++ spec->next = outptr - outbuf;
++ }
++
++ if (used)
++ param->data_size = used;
++
++ param->target_count = num_targets;
++}
++
++/*
++ * Wait for a device to report an event
++ */
++static int dev_wait(struct dm_ioctl *param, size_t param_size)
++{
++ int r;
++ struct mapped_device *md;
++ struct dm_table *table;
++ DECLARE_WAITQUEUE(wq, current);
++
++ md = find_device(param);
++ if (!md)
++ return -ENXIO;
++
++ /*
++ * Wait for a notification event
++ */
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (!dm_add_wait_queue(md, &wq, param->event_nr)) {
++ schedule();
++ dm_remove_wait_queue(md, &wq);
++ }
++ set_current_state(TASK_RUNNING);
++
++ /*
++ * The userland program is going to want to know what
++ * changed to trigger the event, so we may as well tell
++ * him and save an ioctl.
++ */
++ r = __dev_status(md, param);
++ if (r)
++ goto out;
++
++ table = dm_get_table(md);
++ if (table) {
++ retrieve_status(table, param, param_size);
++ dm_table_put(table);
++ }
++
++ out:
++ dm_put(md);
++ return r;
++}
++
++static inline int get_mode(struct dm_ioctl *param)
++{
++ int mode = FMODE_READ | FMODE_WRITE;
++
++ if (param->flags & DM_READONLY_FLAG)
++ mode = FMODE_READ;
++
++ return mode;
++}
++
++static int next_target(struct dm_target_spec *last, uint32_t next, void *end,
++ struct dm_target_spec **spec, char **target_params)
++{
++ *spec = (struct dm_target_spec *) ((unsigned char *) last + next);
++ *target_params = (char *) (*spec + 1);
++
++ if (*spec < (last + 1))
++ return -EINVAL;
++
++ return invalid_str(*target_params, end);
++}
++
++static int populate_table(struct dm_table *table, struct dm_ioctl *param,
++ size_t param_size)
++{
++ int r;
++ unsigned int i = 0;
++ struct dm_target_spec *spec = (struct dm_target_spec *) param;
++ uint32_t next = param->data_start;
++ void *end = (void *) param + param_size;
++ char *target_params;
++
++ if (!param->target_count) {
++ DMWARN("populate_table: no targets specified");
++ return -EINVAL;
++ }
++
++ for (i = 0; i < param->target_count; i++) {
++
++ r = next_target(spec, next, end, &spec, &target_params);
++ if (r) {
++ DMWARN("unable to find target");
++ return r;
++ }
++
++ r = dm_table_add_target(table, spec->target_type,
++ (sector_t) spec->sector_start,
++ (sector_t) spec->length,
++ target_params);
++ if (r) {
++ DMWARN("error adding target to table");
++ return r;
++ }
++
++ next = spec->next;
++ }
++
++ return dm_table_complete(table);
++}
++
++static int table_load(struct dm_ioctl *param, size_t param_size)
++{
++ int r;
++ struct hash_cell *hc;
++ struct dm_table *t;
++
++ r = dm_table_create(&t, get_mode(param), param->target_count);
++ if (r)
++ return r;
++
++ r = populate_table(t, param, param_size);
++ if (r) {
++ dm_table_put(t);
++ return r;
++ }
++
++ down_write(&_hash_lock);
++ hc = __find_device_hash_cell(param);
++ if (!hc) {
++ DMWARN("device doesn't appear to be in the dev hash table.");
++ up_write(&_hash_lock);
++ return -ENXIO;
++ }
++
++ if (hc->new_map)
++ dm_table_put(hc->new_map);
++ hc->new_map = t;
++ param->flags |= DM_INACTIVE_PRESENT_FLAG;
++
++ r = __dev_status(hc->md, param);
++ up_write(&_hash_lock);
++ return r;
++}
++
++static int table_clear(struct dm_ioctl *param, size_t param_size)
++{
++ int r;
++ struct hash_cell *hc;
++
++ down_write(&_hash_lock);
++
++ hc = __find_device_hash_cell(param);
++ if (!hc) {
++ DMWARN("device doesn't appear to be in the dev hash table.");
++ up_write(&_hash_lock);
++ return -ENXIO;
++ }
++
++ if (hc->new_map) {
++ dm_table_put(hc->new_map);
++ hc->new_map = NULL;
++ }
++
++ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
++
++ r = __dev_status(hc->md, param);
++ up_write(&_hash_lock);
++ return r;
++}
++
++/*
++ * Retrieves a list of devices used by a particular dm device.
++ */
++static void retrieve_deps(struct dm_table *table, struct dm_ioctl *param,
++ size_t param_size)
++{
++ unsigned int count = 0;
++ struct list_head *tmp;
++ size_t len, needed;
++ struct dm_target_deps *deps;
++
++ deps = get_result_buffer(param, param_size, &len);
++
++ /*
++ * Count the devices.
++ */
++ list_for_each(tmp, dm_table_get_devices(table))
++ count++;
++
++ /*
++ * Check we have enough space.
++ */
++ needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
++ if (len < needed) {
++ param->flags |= DM_BUFFER_FULL_FLAG;
++ return;
++ }
++
++ /*
++ * Fill in the devices.
++ */
++ deps->count = count;
++ count = 0;
++ list_for_each(tmp, dm_table_get_devices(table)) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ deps->dev[count++] = dd->bdev->bd_dev;
++ }
++
++ param->data_size = param->data_start + needed;
++}
++
++static int table_deps(struct dm_ioctl *param, size_t param_size)
++{
++ int r;
++ struct mapped_device *md;
++ struct dm_table *table;
++
++ md = find_device(param);
++ if (!md)
++ return -ENXIO;
++
++ r = __dev_status(md, param);
++ if (r)
++ goto out;
++
++ table = dm_get_table(md);
++ if (table) {
++ retrieve_deps(table, param, param_size);
++ dm_table_put(table);
++ }
++
++ out:
++ dm_put(md);
++ return r;
++}
++
++/*
++ * Return the status of a device as a text string for each
++ * target.
++ */
++static int table_status(struct dm_ioctl *param, size_t param_size)
++{
++ int r;
++ struct mapped_device *md;
++ struct dm_table *table;
++
++ md = find_device(param);
++ if (!md)
++ return -ENXIO;
++
++ r = __dev_status(md, param);
++ if (r)
++ goto out;
++
++ table = dm_get_table(md);
++ if (table) {
++ retrieve_status(table, param, param_size);
++ dm_table_put(table);
++ }
++
++ out:
++ dm_put(md);
++ return r;
++}
++
++/*-----------------------------------------------------------------
++ * Implementation of open/close/ioctl on the special char
++ * device.
++ *---------------------------------------------------------------*/
++static ioctl_fn lookup_ioctl(unsigned int cmd)
++{
++ static struct {
++ int cmd;
++ ioctl_fn fn;
++ } _ioctls[] = {
++ {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */
++ {DM_REMOVE_ALL_CMD, remove_all},
++ {DM_LIST_DEVICES_CMD, list_devices},
++
++ {DM_DEV_CREATE_CMD, dev_create},
++ {DM_DEV_REMOVE_CMD, dev_remove},
++ {DM_DEV_RENAME_CMD, dev_rename},
++ {DM_DEV_SUSPEND_CMD, dev_suspend},
++ {DM_DEV_STATUS_CMD, dev_status},
++ {DM_DEV_WAIT_CMD, dev_wait},
++
++ {DM_TABLE_LOAD_CMD, table_load},
++ {DM_TABLE_CLEAR_CMD, table_clear},
++ {DM_TABLE_DEPS_CMD, table_deps},
++ {DM_TABLE_STATUS_CMD, table_status}
++ };
++
++ return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
++}
++
++/*
++ * As well as checking the version compatibility this always
++ * copies the kernel interface version out.
++ */
++static int check_version(unsigned int cmd, struct dm_ioctl *user)
++{
++ uint32_t version[3];
++ int r = 0;
++
++ if (copy_from_user(version, user->version, sizeof(version)))
++ return -EFAULT;
++
++ if ((DM_VERSION_MAJOR != version[0]) ||
++ (DM_VERSION_MINOR < version[1])) {
++ DMWARN("ioctl interface mismatch: "
++ "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
++ DM_VERSION_MAJOR, DM_VERSION_MINOR,
++ DM_VERSION_PATCHLEVEL,
++ version[0], version[1], version[2], cmd);
++ r = -EINVAL;
++ }
++
++ /*
++ * Fill in the kernel version.
++ */
++ version[0] = DM_VERSION_MAJOR;
++ version[1] = DM_VERSION_MINOR;
++ version[2] = DM_VERSION_PATCHLEVEL;
++ if (copy_to_user(user->version, version, sizeof(version)))
++ return -EFAULT;
++
++ return r;
++}
++
++static void free_params(struct dm_ioctl *param)
++{
++ vfree(param);
++}
++
++static int copy_params(struct dm_ioctl *user, struct dm_ioctl **param)
++{
++ struct dm_ioctl tmp, *dmi;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp)))
++ return -EFAULT;
++
++ if (tmp.data_size < sizeof(tmp))
++ return -EINVAL;
++
++ dmi = (struct dm_ioctl *) vmalloc(tmp.data_size);
++ if (!dmi)
++ return -ENOMEM;
++
++ if (copy_from_user(dmi, user, tmp.data_size)) {
++ vfree(dmi);
++ return -EFAULT;
++ }
++
++ *param = dmi;
++ return 0;
++}
++
++static int validate_params(uint cmd, struct dm_ioctl *param)
++{
++ /* Always clear this flag */
++ param->flags &= ~DM_BUFFER_FULL_FLAG;
++
++ /* Ignores parameters */
++ if (cmd == DM_REMOVE_ALL_CMD || cmd == DM_LIST_DEVICES_CMD)
++ return 0;
++
++ /* Unless creating, either name or uuid but not both */
++ if (cmd != DM_DEV_CREATE_CMD) {
++ if ((!*param->uuid && !*param->name) ||
++ (*param->uuid && *param->name)) {
++ DMWARN("one of name or uuid must be supplied, cmd(%u)",
++ cmd);
++ return -EINVAL;
++ }
++ }
++
++ /* Ensure strings are terminated */
++ param->name[DM_NAME_LEN - 1] = '\0';
++ param->uuid[DM_UUID_LEN - 1] = '\0';
++
++ return 0;
++}
++
++static int ctl_ioctl(struct inode *inode, struct file *file,
++ uint command, ulong u)
++{
++ int r = 0;
++ unsigned int cmd;
++ struct dm_ioctl *param;
++ struct dm_ioctl *user = (struct dm_ioctl *) u;
++ ioctl_fn fn = NULL;
++ size_t param_size;
++
++ /* only root can play with this */
++ if (!capable(CAP_SYS_ADMIN))
++ return -EACCES;
++
++ if (_IOC_TYPE(command) != DM_IOCTL)
++ return -ENOTTY;
++
++ cmd = _IOC_NR(command);
++
++ /*
++ * Check the interface version passed in. This also
++ * writes out the kernel's interface version.
++ */
++ r = check_version(cmd, user);
++ if (r)
++ return r;
++
++ /*
++ * Nothing more to do for the version command.
++ */
++ if (cmd == DM_VERSION_CMD)
++ return 0;
++
++ fn = lookup_ioctl(cmd);
++ if (!fn) {
++ DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
++ return -ENOTTY;
++ }
++
++ /*
++ * FIXME: I don't like this, we're trying to avoid low
++ * memory issues when a device is suspended.
++ */
++ current->flags |= PF_MEMALLOC;
++
++ /*
++ * Copy the parameters into kernel space.
++ */
++ r = copy_params(user, ¶m);
++ if (r) {
++ current->flags &= ~PF_MEMALLOC;
++ return r;
++ }
++
++ r = validate_params(cmd, param);
++ if (r)
++ goto out;
++
++ param_size = param->data_size;
++ param->data_size = sizeof(*param);
++ r = fn(param, param_size);
++
++ /*
++ * Copy the results back to userland.
++ */
++ if (!r && copy_to_user(user, param, param->data_size))
++ r = -EFAULT;
++
++ out:
++ free_params(param);
++ current->flags &= ~PF_MEMALLOC;
++ return r;
++}
++
++static struct file_operations _ctl_fops = {
++ .ioctl = ctl_ioctl,
++ .owner = THIS_MODULE,
++};
++
++static devfs_handle_t _ctl_handle;
++
++static struct miscdevice _dm_misc = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = DM_NAME,
++ .fops = &_ctl_fops
++};
++
++/*
++ * Create misc character device and link to DM_DIR/control.
++ */
++int __init dm_interface_init(void)
++{
++ int r;
++ char rname[64];
++
++ r = dm_hash_init();
++ if (r)
++ return r;
++
++ r = misc_register(&_dm_misc);
++ if (r) {
++ DMERR("misc_register failed for control device");
++ dm_hash_exit();
++ return r;
++ }
++
++ r = devfs_generate_path(_dm_misc.devfs_handle, rname + 3,
++ sizeof rname - 3);
++ if (r == -ENOSYS)
++ goto done; /* devfs not present */
++
++ if (r < 0) {
++ DMERR("devfs_generate_path failed for control device");
++ goto failed;
++ }
++
++ strncpy(rname + r, "../", 3);
++ r = devfs_mk_symlink(NULL, DM_DIR "/control",
++ DEVFS_FL_DEFAULT, rname + r, &_ctl_handle, NULL);
++ if (r) {
++ DMERR("devfs_mk_symlink failed for control device");
++ goto failed;
++ }
++ devfs_auto_unregister(_dm_misc.devfs_handle, _ctl_handle);
++
++ done:
++ DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR,
++ DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA,
++ DM_DRIVER_EMAIL);
++ return 0;
++
++ failed:
++ misc_deregister(&_dm_misc);
++ dm_hash_exit();
++ return r;
++}
++
++void dm_interface_exit(void)
++{
++ if (misc_deregister(&_dm_misc) < 0)
++ DMERR("misc_deregister failed for control device");
++
++ dm_hash_exit();
++}
+diff -urN linux-2.4.22/drivers/md/dm-linear.c linux-2.4.22-dm/drivers/md/dm-linear.c
+--- linux-2.4.22/drivers/md/dm-linear.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-linear.c 2003-09-15 17:03:05.000000000 +0200
+@@ -0,0 +1,123 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/blkdev.h>
++#include <linux/slab.h>
++
++/*
++ * Linear: maps a linear range of a device.
++ */
++struct linear_c {
++ struct dm_dev *dev;
++ sector_t start;
++};
++
++/*
++ * Construct a linear mapping: <dev_path> <offset>
++ */
++static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
++{
++ struct linear_c *lc;
++
++ if (argc != 2) {
++ ti->error = "dm-linear: Invalid argument count";
++ return -EINVAL;
++ }
++
++ lc = kmalloc(sizeof(*lc), GFP_KERNEL);
++ if (lc == NULL) {
++ ti->error = "dm-linear: Cannot allocate linear context";
++ return -ENOMEM;
++ }
++
++ if (sscanf(argv[1], SECTOR_FORMAT, &lc->start) != 1) {
++ ti->error = "dm-linear: Invalid device sector";
++ goto bad;
++ }
++
++ if (dm_get_device(ti, argv[0], lc->start, ti->len,
++ dm_table_get_mode(ti->table), &lc->dev)) {
++ ti->error = "dm-linear: Device lookup failed";
++ goto bad;
++ }
++
++ ti->private = lc;
++ return 0;
++
++ bad:
++ kfree(lc);
++ return -EINVAL;
++}
++
++static void linear_dtr(struct dm_target *ti)
++{
++ struct linear_c *lc = (struct linear_c *) ti->private;
++
++ dm_put_device(ti, lc->dev);
++ kfree(lc);
++}
++
++static int linear_map(struct dm_target *ti, struct buffer_head *bh, int rw,
++ union map_info *map_context)
++{
++ struct linear_c *lc = (struct linear_c *) ti->private;
++
++ bh->b_rdev = lc->dev->dev;
++ bh->b_rsector = lc->start + (bh->b_rsector - ti->begin);
++
++ return 1;
++}
++
++static int linear_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned int maxlen)
++{
++ struct linear_c *lc = (struct linear_c *) ti->private;
++ kdev_t kdev;
++
++ switch (type) {
++ case STATUSTYPE_INFO:
++ result[0] = '\0';
++ break;
++
++ case STATUSTYPE_TABLE:
++ kdev = to_kdev_t(lc->dev->bdev->bd_dev);
++ snprintf(result, maxlen, "%s " SECTOR_FORMAT,
++ dm_kdevname(kdev), lc->start);
++ break;
++ }
++ return 0;
++}
++
++static struct target_type linear_target = {
++ .name = "linear",
++ .module = THIS_MODULE,
++ .ctr = linear_ctr,
++ .dtr = linear_dtr,
++ .map = linear_map,
++ .status = linear_status,
++};
++
++int __init dm_linear_init(void)
++{
++ int r = dm_register_target(&linear_target);
++
++ if (r < 0)
++ DMERR("linear: register failed %d", r);
++
++ return r;
++}
++
++void dm_linear_exit(void)
++{
++ int r = dm_unregister_target(&linear_target);
++
++ if (r < 0)
++ DMERR("linear: unregister failed %d", r);
++}
+diff -urN linux-2.4.22/drivers/md/dm-log.c linux-2.4.22-dm/drivers/md/dm-log.c
+--- linux-2.4.22/drivers/md/dm-log.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-log.c 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,302 @@
++/*
++ * Copyright (C) 2003 Sistina Software
++ *
++ * This file is released under the LGPL.
++ */
++
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++
++#include "dm-log.h"
++#include "dm-io.h"
++
++static LIST_HEAD(_log_types);
++static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
++
++int dm_register_dirty_log_type(struct dirty_log_type *type)
++{
++ spin_lock(&_lock);
++ type->use_count = 0;
++ if (type->module)
++ __MOD_INC_USE_COUNT(type->module);
++
++ list_add(&type->list, &_log_types);
++ spin_unlock(&_lock);
++
++ return 0;
++}
++
++int dm_unregister_dirty_log_type(struct dirty_log_type *type)
++{
++ spin_lock(&_lock);
++
++ if (type->use_count)
++ DMWARN("Attempt to unregister a log type that is still in use");
++ else {
++ list_del(&type->list);
++ if (type->module)
++ __MOD_DEC_USE_COUNT(type->module);
++ }
++
++ spin_unlock(&_lock);
++
++ return 0;
++}
++
++static struct dirty_log_type *get_type(const char *type_name)
++{
++ struct dirty_log_type *type;
++ struct list_head *tmp;
++
++ spin_lock(&_lock);
++ list_for_each (tmp, &_log_types) {
++ type = list_entry(tmp, struct dirty_log_type, list);
++ if (!strcmp(type_name, type->name)) {
++ type->use_count++;
++ spin_unlock(&_lock);
++ return type;
++ }
++ }
++
++ spin_unlock(&_lock);
++ return NULL;
++}
++
++static void put_type(struct dirty_log_type *type)
++{
++ spin_lock(&_lock);
++ type->use_count--;
++ spin_unlock(&_lock);
++}
++
++struct dirty_log *dm_create_dirty_log(const char *type_name, sector_t dev_size,
++ unsigned int argc, char **argv)
++{
++ struct dirty_log_type *type;
++ struct dirty_log *log;
++
++ log = kmalloc(sizeof(*log), GFP_KERNEL);
++ if (!log)
++ return NULL;
++
++ type = get_type(type_name);
++ if (!type) {
++ kfree(log);
++ return NULL;
++ }
++
++ log->type = type;
++ if (type->ctr(log, dev_size, argc, argv)) {
++ kfree(log);
++ put_type(type);
++ return NULL;
++ }
++
++ return log;
++}
++
++void dm_destroy_dirty_log(struct dirty_log *log)
++{
++ log->type->dtr(log);
++ put_type(log->type);
++ kfree(log);
++}
++
++
++/*-----------------------------------------------------------------
++ * In core log, ie. trivial, non-persistent
++ *
++ * For now we'll keep this simple and just have 2 bitsets, one
++ * for clean/dirty, the other for sync/nosync. The sync bitset
++ * will be freed when everything is in sync.
++ *
++ * FIXME: problems with a 64bit sector_t
++ *---------------------------------------------------------------*/
++struct core_log {
++ sector_t region_size;
++ unsigned int region_count;
++ unsigned long *clean_bits;
++ unsigned long *sync_bits;
++ unsigned long *recovering_bits; /* FIXME: this seems excessive */
++
++ int sync_search;
++};
++
++static int core_ctr(struct dirty_log *log, sector_t dev_size,
++ unsigned int argc, char **argv)
++{
++ struct core_log *clog;
++ sector_t region_size;
++ unsigned int region_count;
++ size_t bitset_size;
++
++ if (argc != 1) {
++ DMWARN("wrong number of arguments to core_log");
++ return -EINVAL;
++ }
++
++ if (sscanf(argv[0], SECTOR_FORMAT, ®ion_size) != 1) {
++ DMWARN("invalid region size string");
++ return -EINVAL;
++ }
++
++ region_count = dm_div_up(dev_size, region_size);
++
++ clog = kmalloc(sizeof(*clog), GFP_KERNEL);
++ if (!clog) {
++ DMWARN("couldn't allocate core log");
++ return -ENOMEM;
++ }
++
++ clog->region_size = region_size;
++ clog->region_count = region_count;
++
++ bitset_size = dm_round_up(region_count >> 3, sizeof(*clog->clean_bits));
++ clog->clean_bits = vmalloc(bitset_size);
++ if (!clog->clean_bits) {
++ DMWARN("couldn't allocate clean bitset");
++ kfree(clog);
++ return -ENOMEM;
++ }
++ memset(clog->clean_bits, -1, bitset_size);
++
++ clog->sync_bits = vmalloc(bitset_size);
++ if (!clog->sync_bits) {
++ DMWARN("couldn't allocate sync bitset");
++ vfree(clog->clean_bits);
++ kfree(clog);
++ return -ENOMEM;
++ }
++ memset(clog->sync_bits, 0, bitset_size);
++
++ clog->recovering_bits = vmalloc(bitset_size);
++ if (!clog->recovering_bits) {
++ DMWARN("couldn't allocate sync bitset");
++ vfree(clog->sync_bits);
++ vfree(clog->clean_bits);
++ kfree(clog);
++ return -ENOMEM;
++ }
++ memset(clog->recovering_bits, 0, bitset_size);
++ clog->sync_search = 0;
++ log->context = clog;
++ return 0;
++}
++
++static void core_dtr(struct dirty_log *log)
++{
++ struct core_log *clog = (struct core_log *) log->context;
++ vfree(clog->clean_bits);
++ vfree(clog->sync_bits);
++ vfree(clog->recovering_bits);
++ kfree(clog);
++}
++
++static sector_t core_get_region_size(struct dirty_log *log)
++{
++ struct core_log *clog = (struct core_log *) log->context;
++ return clog->region_size;
++}
++
++static int core_is_clean(struct dirty_log *log, region_t region)
++{
++ struct core_log *clog = (struct core_log *) log->context;
++ return test_bit(region, clog->clean_bits);
++}
++
++static int core_in_sync(struct dirty_log *log, region_t region, int block)
++{
++ struct core_log *clog = (struct core_log *) log->context;
++
++ return test_bit(region, clog->sync_bits) ? 1 : 0;
++}
++
++static int core_flush(struct dirty_log *log)
++{
++ /* no op */
++ return 0;
++}
++
++static void core_mark_region(struct dirty_log *log, region_t region)
++{
++ struct core_log *clog = (struct core_log *) log->context;
++ clear_bit(region, clog->clean_bits);
++}
++
++static void core_clear_region(struct dirty_log *log, region_t region)
++{
++ struct core_log *clog = (struct core_log *) log->context;
++ set_bit(region, clog->clean_bits);
++}
++
++static int core_get_resync_work(struct dirty_log *log, region_t *region)
++{
++ struct core_log *clog = (struct core_log *) log->context;
++
++ if (clog->sync_search >= clog->region_count)
++ return 0;
++
++ do {
++ *region = find_next_zero_bit(clog->sync_bits,
++ clog->region_count,
++ clog->sync_search);
++ clog->sync_search = *region + 1;
++
++ if (*region == clog->region_count)
++ return 0;
++
++ } while (test_bit(*region, clog->recovering_bits));
++
++ set_bit(*region, clog->recovering_bits);
++ return 1;
++}
++
++static void core_complete_resync_work(struct dirty_log *log, region_t region,
++ int success)
++{
++ struct core_log *clog = (struct core_log *) log->context;
++
++ clear_bit(region, clog->recovering_bits);
++ if (success)
++ set_bit(region, clog->sync_bits);
++}
++
++static struct dirty_log_type _core_type = {
++ .name = "core",
++
++ .ctr = core_ctr,
++ .dtr = core_dtr,
++ .get_region_size = core_get_region_size,
++ .is_clean = core_is_clean,
++ .in_sync = core_in_sync,
++ .flush = core_flush,
++ .mark_region = core_mark_region,
++ .clear_region = core_clear_region,
++ .get_resync_work = core_get_resync_work,
++ .complete_resync_work = core_complete_resync_work
++};
++
++__init int dm_dirty_log_init(void)
++{
++ int r;
++
++ r = dm_register_dirty_log_type(&_core_type);
++ if (r)
++ DMWARN("couldn't register core log");
++
++ return r;
++}
++
++void dm_dirty_log_exit(void)
++{
++ dm_unregister_dirty_log_type(&_core_type);
++}
++
++EXPORT_SYMBOL(dm_register_dirty_log_type);
++EXPORT_SYMBOL(dm_unregister_dirty_log_type);
++EXPORT_SYMBOL(dm_dirty_log_init);
++EXPORT_SYMBOL(dm_dirty_log_exit);
++EXPORT_SYMBOL(dm_create_dirty_log);
++EXPORT_SYMBOL(dm_destroy_dirty_log);
+diff -urN linux-2.4.22/drivers/md/dm-log.h linux-2.4.22-dm/drivers/md/dm-log.h
+--- linux-2.4.22/drivers/md/dm-log.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-log.h 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,112 @@
++/*
++ * Copyright (C) 2003 Sistina Software
++ *
++ * This file is released under the LGPL.
++ */
++
++#ifndef DM_DIRTY_LOG
++#define DM_DIRTY_LOG
++
++#include "dm.h"
++
++typedef sector_t region_t;
++
++struct dirty_log_type;
++
++struct dirty_log {
++ struct dirty_log_type *type;
++ void *context;
++};
++
++struct dirty_log_type {
++ struct list_head list;
++ const char *name;
++ struct module *module;
++ unsigned int use_count;
++
++ int (*ctr)(struct dirty_log *log, sector_t dev_size,
++ unsigned int argc, char **argv);
++ void (*dtr)(struct dirty_log *log);
++
++ /*
++ * Retrieves the smallest size of region that the log can
++ * deal with.
++ */
++ sector_t (*get_region_size)(struct dirty_log *log);
++
++ /*
++ * A predicate to say whether a region is clean or not.
++ * May block.
++ */
++ int (*is_clean)(struct dirty_log *log, region_t region);
++
++ /*
++ * Returns: 0, 1, -EWOULDBLOCK, < 0
++ *
++ * A predicate function to check the area given by
++ * [sector, sector + len) is in sync.
++ *
++ * If -EWOULDBLOCK is returned the state of the region is
++ * unknown, typically this will result in a read being
++ * passed to a daemon to deal with, since a daemon is
++ * allowed to block.
++ */
++ int (*in_sync)(struct dirty_log *log, region_t region, int can_block);
++
++ /*
++ * Flush the current log state (eg, to disk). This
++ * function may block.
++ */
++ int (*flush)(struct dirty_log *log);
++
++ /*
++ * Mark an area as clean or dirty. These functions may
++ * block, though for performance reasons blocking should
++ * be extremely rare (eg, allocating another chunk of
++ * memory for some reason).
++ */
++ void (*mark_region)(struct dirty_log *log, region_t region);
++ void (*clear_region)(struct dirty_log *log, region_t region);
++
++ /*
++ * Returns: <0 (error), 0 (no region), 1 (region)
++ *
++ * The mirrord will need perform recovery on regions of
++ * the mirror that are in the NOSYNC state. This
++ * function asks the log to tell the caller about the
++ * next region that this machine should recover.
++ *
++ * Do not confuse this function with 'in_sync()', one
++ * tells you if an area is synchronised, the other
++ * assigns recovery work.
++ */
++ int (*get_resync_work)(struct dirty_log *log, region_t *region);
++
++ /*
++ * This notifies the log that the resync of an area has
++ * been completed. The log should then mark this region
++ * as CLEAN.
++ */
++ void (*complete_resync_work)(struct dirty_log *log,
++ region_t region, int success);
++};
++
++int dm_register_dirty_log_type(struct dirty_log_type *type);
++int dm_unregister_dirty_log_type(struct dirty_log_type *type);
++
++
++/*
++ * Make sure you use these two functions, rather than calling
++ * type->constructor/destructor() directly.
++ */
++struct dirty_log *dm_create_dirty_log(const char *type_name, sector_t dev_size,
++ unsigned int argc, char **argv);
++void dm_destroy_dirty_log(struct dirty_log *log);
++
++/*
++ * init/exit functions.
++ */
++int dm_dirty_log_init(void);
++void dm_dirty_log_exit(void);
++
++#endif
+diff -urN linux-2.4.22/drivers/md/dm-raid1.c linux-2.4.22-dm/drivers/md/dm-raid1.c
+--- linux-2.4.22/drivers/md/dm-raid1.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-raid1.c 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,1297 @@
++/*
++ * Copyright (C) 2003 Sistina Software Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++#include "dm-daemon.h"
++#include "dm-io.h"
++#include "dm-log.h"
++#include "kcopyd.h"
++
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/mempool.h>
++#include <linux/module.h>
++#include <linux/pagemap.h>
++#include <linux/slab.h>
++#include <linux/time.h>
++#include <linux/vmalloc.h>
++
++static struct dm_daemon _kmirrord;
++
++/*-----------------------------------------------------------------
++ * buffer lists:
++ *
++ * We play with singly linked lists of buffers, but we want to be
++ * careful to add new buffers to the back of the list, to avoid
++ * buffers being starved of attention.
++ *---------------------------------------------------------------*/
++struct buffer_list {
++ struct buffer_head *head;
++ struct buffer_head *tail;
++};
++
++static inline void buffer_list_init(struct buffer_list *bl)
++{
++ bl->head = bl->tail = NULL;
++}
++
++static inline void buffer_list_add(struct buffer_list *bl,
++ struct buffer_head *bh)
++{
++ bh->b_reqnext = NULL;
++
++ if (bl->tail) {
++ bl->tail->b_reqnext = bh;
++ bl->tail = bh;
++ } else
++ bl->head = bl->tail = bh;
++}
++
++static struct buffer_head *buffer_list_pop(struct buffer_list *bl)
++{
++ struct buffer_head *bh = bl->head;
++
++ if (bh) {
++ bl->head = bl->head->b_reqnext;
++ if (!bl->head)
++ bl->tail = NULL;
++
++ bh->b_reqnext = NULL;
++ }
++
++ return bh;
++}
++
++/*-----------------------------------------------------------------
++ * Region hash
++ *
++ * The mirror splits itself up into discrete regions. Each
++ * region can be in one of three states: clean, dirty,
++ * nosync. There is no need to put clean regions in the hash.
++ *
++ * In addition to being present in the hash table a region _may_
++ * be present on one of three lists.
++ *
++ * clean_regions: Regions on this list have no io pending to
++ * them, they are in sync, we are no longer interested in them,
++ * they are dull. rh_update_states() will remove them from the
++ * hash table.
++ *
++ * quiesced_regions: These regions have been spun down, ready
++ * for recovery. rh_recovery_start() will remove regions from
++ * this list and hand them to kmirrord, which will schedule the
++ * recovery io with kcopyd.
++ *
++ * recovered_regions: Regions that kcopyd has successfully
++ * recovered. rh_update_states() will now schedule any delayed
++ * io, up the recovery_count, and remove the region from the
++ * hash.
++ *
++ * There are 2 locks:
++ * A rw spin lock 'hash_lock' protects just the hash table,
++ * this is never held in write mode from interrupt context,
++ * which I believe means that we only have to disable irqs when
++ * doing a write lock.
++ *
++ * An ordinary spin lock 'region_lock' that protects the three
++ * lists in the region_hash, with the 'state', 'list' and
++ * 'bhs_delayed' fields of the regions. This is used from irq
++ * context, so all other uses will have to suspend local irqs.
++ *---------------------------------------------------------------*/
++struct mirror_set;
++struct region_hash {
++ struct mirror_set *ms;
++ sector_t region_size;
++
++ /* holds persistent region state */
++ struct dirty_log *log;
++
++ /* hash table */
++ rwlock_t hash_lock;
++ mempool_t *region_pool;
++ unsigned int mask;
++ unsigned int nr_buckets;
++ struct list_head *buckets;
++
++ spinlock_t region_lock;
++ struct semaphore recovery_count;
++ struct list_head clean_regions;
++ struct list_head quiesced_regions;
++ struct list_head recovered_regions;
++};
++
++enum {
++ RH_CLEAN,
++ RH_DIRTY,
++ RH_NOSYNC,
++ RH_RECOVERING
++};
++
++struct region {
++ struct region_hash *rh; /* FIXME: can we get rid of this ? */
++ region_t key;
++ int state;
++
++ struct list_head hash_list;
++ struct list_head list;
++
++ atomic_t pending;
++ struct buffer_head *delayed_bhs;
++};
++
++/*
++ * Conversion fns
++ */
++static inline region_t bh_to_region(struct region_hash *rh,
++ struct buffer_head *bh)
++{
++ return bh->b_rsector / rh->region_size;
++}
++
++static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
++{
++ return region * rh->region_size;
++}
++
++/* FIXME move this */
++static void queue_bh(struct mirror_set *ms, struct buffer_head *bh, int rw);
++
++static void *region_alloc(int gfp_mask, void *pool_data)
++{
++ return kmalloc(sizeof(struct region), gfp_mask);
++}
++
++static void region_free(void *element, void *pool_data)
++{
++ kfree(element);
++}
++
++#define MIN_REGIONS 64
++#define MAX_RECOVERY 1
++static int rh_init(struct region_hash *rh, struct mirror_set *ms,
++ struct dirty_log *log, sector_t region_size,
++ region_t nr_regions)
++{
++ unsigned int nr_buckets, max_buckets;
++ size_t i;
++
++ /*
++ * Calculate a suitable number of buckets for our hash
++ * table.
++ */
++ max_buckets = nr_regions >> 6;
++ for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
++ ;
++ nr_buckets >>= 1;
++
++ rh->ms = ms;
++ rh->log = log;
++ rh->region_size = region_size;
++ rwlock_init(&rh->hash_lock);
++ rh->mask = nr_buckets - 1;
++ rh->nr_buckets = nr_buckets;
++
++ rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
++ if (!rh->buckets) {
++ DMERR("unable to allocate region hash memory");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < nr_buckets; i++)
++ INIT_LIST_HEAD(rh->buckets + i);
++
++ spin_lock_init(&rh->region_lock);
++ sema_init(&rh->recovery_count, 0);
++ INIT_LIST_HEAD(&rh->clean_regions);
++ INIT_LIST_HEAD(&rh->quiesced_regions);
++ INIT_LIST_HEAD(&rh->recovered_regions);
++
++ rh->region_pool = mempool_create(MIN_REGIONS, region_alloc,
++ region_free, NULL);
++ if (!rh->region_pool) {
++ vfree(rh->buckets);
++ rh->buckets = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static void rh_exit(struct region_hash *rh)
++{
++ unsigned int h;
++ struct region *reg;
++ struct list_head *tmp, *tmp2;
++
++ BUG_ON(!list_empty(&rh->quiesced_regions));
++ for (h = 0; h < rh->nr_buckets; h++) {
++ list_for_each_safe (tmp, tmp2, rh->buckets + h) {
++ reg = list_entry(tmp, struct region, hash_list);
++ BUG_ON(atomic_read(®->pending));
++ mempool_free(reg, rh->region_pool);
++ }
++ }
++
++ if (rh->log)
++ dm_destroy_dirty_log(rh->log);
++ if (rh->region_pool)
++ mempool_destroy(rh->region_pool);
++ vfree(rh->buckets);
++}
++
++#define RH_HASH_MULT 2654435387U
++
++static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
++{
++ return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
++}
++
++static struct region *__rh_lookup(struct region_hash *rh, region_t region)
++{
++ struct region *reg;
++
++ list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
++ if (reg->key == region)
++ return reg;
++
++ return NULL;
++}
++
++static void __rh_insert(struct region_hash *rh, struct region *reg)
++{
++ unsigned int h = rh_hash(rh, reg->key);
++ list_add(®->hash_list, rh->buckets + h);
++}
++
++static struct region *__rh_alloc(struct region_hash *rh, region_t region)
++{
++ struct region *reg, *nreg;
++
++ read_unlock(&rh->hash_lock);
++ nreg = mempool_alloc(rh->region_pool, GFP_NOIO);
++ nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
++ RH_CLEAN : RH_NOSYNC;
++ nreg->rh = rh;
++ nreg->key = region;
++
++ INIT_LIST_HEAD(&nreg->list);
++
++ atomic_set(&nreg->pending, 0);
++ nreg->delayed_bhs = NULL;
++ write_lock_irq(&rh->hash_lock);
++
++ reg = __rh_lookup(rh, region);
++ if (reg)
++ /* we lost the race */
++ mempool_free(nreg, rh->region_pool);
++
++ else {
++ __rh_insert(rh, nreg);
++ if (nreg->state == RH_CLEAN) {
++ spin_lock_irq(&rh->region_lock);
++ list_add(&nreg->list, &rh->clean_regions);
++ spin_unlock_irq(&rh->region_lock);
++ }
++ reg = nreg;
++ }
++ write_unlock_irq(&rh->hash_lock);
++ read_lock(&rh->hash_lock);
++
++ return reg;
++}
++
++static inline struct region *__rh_find(struct region_hash *rh, region_t region)
++{
++ struct region *reg;
++
++ reg = __rh_lookup(rh, region);
++ if (!reg)
++ reg = __rh_alloc(rh, region);
++
++ return reg;
++}
++
++static int rh_state(struct region_hash *rh, region_t region, int may_block)
++{
++ int r;
++ struct region *reg;
++
++ read_lock(&rh->hash_lock);
++ reg = __rh_lookup(rh, region);
++ read_unlock(&rh->hash_lock);
++
++ if (reg)
++ return reg->state;
++
++ /*
++ * The region wasn't in the hash, so we fall back to the
++ * dirty log.
++ */
++ r = rh->log->type->in_sync(rh->log, region, may_block);
++
++ /*
++ * Any error from the dirty log (eg. -EWOULDBLOCK) gets
++ * taken as a RH_NOSYNC
++ */
++ return r == 1 ? RH_CLEAN : RH_NOSYNC;
++}
++
++static inline int rh_in_sync(struct region_hash *rh,
++ region_t region, int may_block)
++{
++ int state = rh_state(rh, region, may_block);
++ return state == RH_CLEAN || state == RH_DIRTY;
++}
++
++static void dispatch_buffers(struct mirror_set *ms, struct buffer_head *bh)
++{
++ struct buffer_head *nbh;
++
++ while (bh) {
++ nbh = bh->b_reqnext;
++ queue_bh(ms, bh, WRITE);
++ bh = nbh;
++ }
++}
++
++static void rh_update_states(struct region_hash *rh)
++{
++ struct list_head *tmp, *tmp2;
++ struct region *reg;
++
++ LIST_HEAD(clean);
++ LIST_HEAD(recovered);
++
++ /*
++ * Quickly grab the lists.
++ */
++ write_lock_irq(&rh->hash_lock);
++ spin_lock(&rh->region_lock);
++ if (!list_empty(&rh->clean_regions)) {
++ list_splice(&rh->clean_regions, &clean);
++ INIT_LIST_HEAD(&rh->clean_regions);
++
++ list_for_each_entry (reg, &clean, list) {
++ rh->log->type->clear_region(rh->log, reg->key);
++ list_del(®->hash_list);
++ }
++ }
++
++ if (!list_empty(&rh->recovered_regions)) {
++ list_splice(&rh->recovered_regions, &recovered);
++ INIT_LIST_HEAD(&rh->recovered_regions);
++
++ list_for_each_entry (reg, &recovered, list)
++ list_del(®->hash_list);
++ }
++ spin_unlock(&rh->region_lock);
++ write_unlock_irq(&rh->hash_lock);
++
++ /*
++ * All the regions on the recovered and clean lists have
++ * now been pulled out of the system, so no need to do
++ * any more locking.
++ */
++ list_for_each_safe (tmp, tmp2, &recovered) {
++ reg = list_entry(tmp, struct region, list);
++
++ rh->log->type->complete_resync_work(rh->log, reg->key, 1);
++ dispatch_buffers(rh->ms, reg->delayed_bhs);
++ up(&rh->recovery_count);
++ mempool_free(reg, rh->region_pool);
++ }
++
++ list_for_each_safe (tmp, tmp2, &clean) {
++ reg = list_entry(tmp, struct region, list);
++ mempool_free(reg, rh->region_pool);
++ }
++}
++
++static void rh_inc(struct region_hash *rh, region_t region)
++{
++ struct region *reg;
++
++ read_lock(&rh->hash_lock);
++ reg = __rh_find(rh, region);
++ if (reg->state == RH_CLEAN) {
++ rh->log->type->mark_region(rh->log, reg->key);
++
++ spin_lock_irq(&rh->region_lock);
++ reg->state = RH_DIRTY;
++ list_del_init(®->list); /* take off the clean list */
++ spin_unlock_irq(&rh->region_lock);
++ }
++
++ atomic_inc(®->pending);
++ read_unlock(&rh->hash_lock);
++}
++
++static void rh_inc_pending(struct region_hash *rh, struct buffer_list *buffers)
++{
++ struct buffer_head *bh;
++
++ for (bh = buffers->head; bh; bh = bh->b_reqnext)
++ rh_inc(rh, bh_to_region(rh, bh));
++}
++
++static void rh_dec(struct region_hash *rh, region_t region)
++{
++ unsigned long flags;
++ struct region *reg;
++ int wake = 0;
++
++ read_lock(&rh->hash_lock);
++ reg = __rh_lookup(rh, region);
++ read_unlock(&rh->hash_lock);
++
++ if (atomic_dec_and_test(®->pending)) {
++ spin_lock_irqsave(&rh->region_lock, flags);
++ if (reg->state == RH_RECOVERING) {
++ list_add_tail(®->list, &rh->quiesced_regions);
++ } else {
++ reg->state = RH_CLEAN;
++ list_add(®->list, &rh->clean_regions);
++ }
++ spin_unlock_irqrestore(&rh->region_lock, flags);
++ wake = 1;
++ }
++
++ if (wake)
++ dm_daemon_wake(&_kmirrord);
++}
++
++/*
++ * Starts quiescing a region in preparation for recovery.
++ */
++static int __rh_recovery_prepare(struct region_hash *rh)
++{
++ int r;
++ struct region *reg;
++ region_t region;
++
++ /*
++ * Ask the dirty log what's next.
++ */
++ r = rh->log->type->get_resync_work(rh->log, ®ion);
++ if (r <= 0)
++ return r;
++
++ /*
++ * Get this region, and start it quiescing by setting the
++ * recovering flag.
++ */
++ read_lock(&rh->hash_lock);
++ reg = __rh_find(rh, region);
++ read_unlock(&rh->hash_lock);
++
++ spin_lock_irq(&rh->region_lock);
++ reg->state = RH_RECOVERING;
++
++ /* Already quiesced ? */
++ if (atomic_read(®->pending))
++ list_del_init(®->list);
++
++ else {
++ list_del_init(®->list);
++ list_add(®->list, &rh->quiesced_regions);
++ }
++ spin_unlock_irq(&rh->region_lock);
++
++ return 1;
++}
++
++static void rh_recovery_prepare(struct region_hash *rh)
++{
++ while (!down_trylock(&rh->recovery_count))
++ if (__rh_recovery_prepare(rh) <= 0) {
++ up(&rh->recovery_count);
++ break;
++ }
++}
++
++/*
++ * Returns any quiesced regions.
++ */
++static struct region *rh_recovery_start(struct region_hash *rh)
++{
++ struct region *reg = NULL;
++
++ spin_lock_irq(&rh->region_lock);
++ if (!list_empty(&rh->quiesced_regions)) {
++ reg = list_entry(rh->quiesced_regions.next,
++ struct region, list);
++ list_del_init(®->list); /* remove from the quiesced list */
++ }
++ spin_unlock_irq(&rh->region_lock);
++
++ return reg;
++}
++
++/* FIXME: success ignored for now */
++static void rh_recovery_end(struct region *reg, int success)
++{
++ struct region_hash *rh = reg->rh;
++
++ spin_lock_irq(&rh->region_lock);
++ list_add(®->list, ®->rh->recovered_regions);
++ spin_unlock_irq(&rh->region_lock);
++
++ dm_daemon_wake(&_kmirrord);
++}
++
++static void rh_flush(struct region_hash *rh)
++{
++ rh->log->type->flush(rh->log);
++}
++
++static void rh_delay(struct region_hash *rh, struct buffer_head *bh)
++{
++ struct region *reg;
++
++ read_lock(&rh->hash_lock);
++ reg = __rh_find(rh, bh_to_region(rh, bh));
++ bh->b_reqnext = reg->delayed_bhs;
++ reg->delayed_bhs = bh;
++ read_unlock(&rh->hash_lock);
++}
++
++static void rh_stop_recovery(struct region_hash *rh)
++{
++ int i;
++
++ /* wait for any recovering regions */
++ for (i = 0; i < MAX_RECOVERY; i++)
++ down(&rh->recovery_count);
++}
++
++static void rh_start_recovery(struct region_hash *rh)
++{
++ int i;
++
++ for (i = 0; i < MAX_RECOVERY; i++)
++ up(&rh->recovery_count);
++
++ dm_daemon_wake(&_kmirrord);
++}
++
++/*-----------------------------------------------------------------
++ * Mirror set structures.
++ *---------------------------------------------------------------*/
++struct mirror {
++ atomic_t error_count;
++ struct dm_dev *dev;
++ sector_t offset;
++};
++
++struct mirror_set {
++ struct dm_target *ti;
++ struct list_head list;
++ struct region_hash rh;
++ struct kcopyd_client *kcopyd_client;
++
++ spinlock_t lock; /* protects the next two lists */
++ struct buffer_list reads;
++ struct buffer_list writes;
++
++ /* recovery */
++ region_t nr_regions;
++ region_t sync_count;
++
++ unsigned int nr_mirrors;
++ struct mirror mirror[0];
++};
++
++/*
++ * Every mirror should look like this one.
++ */
++#define DEFAULT_MIRROR 0
++
++/*
++ * This is yucky. We squirrel the mirror_set struct away inside
++ * b_reqnext for write buffers. This is safe since the bh
++ * doesn't get submitted to the lower levels of block layer.
++ */
++static struct mirror_set *bh_get_ms(struct buffer_head *bh)
++{
++ return (struct mirror_set *) bh->b_reqnext;
++}
++
++static void bh_set_ms(struct buffer_head *bh, struct mirror_set *ms)
++{
++ bh->b_reqnext = (struct buffer_head *) ms;
++}
++
++/*-----------------------------------------------------------------
++ * Recovery.
++ *
++ * When a mirror is first activated we may find that some regions
++ * are in the no-sync state. We have to recover these by
++ * recopying from the default mirror to all the others.
++ *---------------------------------------------------------------*/
++static void recovery_complete(int read_err, unsigned int write_err,
++ void *context)
++{
++ struct region *reg = (struct region *) context;
++ struct mirror_set *ms = reg->rh->ms;
++
++ /* FIXME: better error handling */
++ rh_recovery_end(reg, read_err || write_err);
++ if (++ms->sync_count == ms->nr_regions)
++ /* the sync is complete */
++ dm_table_event(ms->ti->table);
++}
++
++static int recover(struct mirror_set *ms, struct region *reg)
++{
++ int r;
++ unsigned int i;
++ struct io_region from, to[ms->nr_mirrors - 1], *dest;
++ struct mirror *m;
++ unsigned int flags = 0;
++
++ /* fill in the source */
++ m = ms->mirror + DEFAULT_MIRROR;
++ from.dev = m->dev->dev;
++ from.sector = m->offset + region_to_sector(reg->rh, reg->key);
++ if (reg->key == (ms->nr_regions - 1)) {
++ /*
++ * The final region may be smaller than
++ * region_size.
++ */
++ from.count = ms->ti->len & (reg->rh->region_size - 1);
++ if (!from.count)
++ from.count = reg->rh->region_size;
++ } else
++ from.count = reg->rh->region_size;
++
++ /* fill in the destinations */
++ for (i = 1; i < ms->nr_mirrors; i++) {
++ m = ms->mirror + i;
++ dest = to + (i - 1);
++
++ dest->dev = m->dev->dev;
++ dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
++ dest->count = from.count;
++ }
++
++ /* hand to kcopyd */
++ set_bit(KCOPYD_IGNORE_ERROR, &flags);
++ r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
++ recovery_complete, reg);
++
++ return r;
++}
++
++static void do_recovery(struct mirror_set *ms)
++{
++ int r;
++ struct region *reg;
++
++ /*
++ * Start quiescing some regions.
++ */
++ rh_recovery_prepare(&ms->rh);
++
++ /*
++ * Copy any already quiesced regions.
++ */
++ while ((reg = rh_recovery_start(&ms->rh))) {
++ r = recover(ms, reg);
++ if (r)
++ rh_recovery_end(reg, 0);
++ }
++}
++
++/*-----------------------------------------------------------------
++ * Reads
++ *---------------------------------------------------------------*/
++static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
++{
++ /* FIXME: add read balancing */
++ return ms->mirror + DEFAULT_MIRROR;
++}
++
++/*
++ * remap a buffer to a particular mirror.
++ */
++static void map_buffer(struct mirror_set *ms,
++ struct mirror *m, struct buffer_head *bh)
++{
++ sector_t bsize = bh->b_size >> 9;
++ sector_t rsector = bh->b_blocknr * bsize;
++
++ bh->b_rdev = m->dev->dev;
++ bh->b_rsector = m->offset + (rsector - ms->ti->begin);
++}
++
++static void do_reads(struct mirror_set *ms, struct buffer_list *reads)
++{
++ region_t region;
++ struct buffer_head *bh;
++ struct mirror *m;
++
++ while ((bh = buffer_list_pop(reads))) {
++ region = bh_to_region(&ms->rh, bh);
++
++ /*
++ * We can only read balance if the region is in sync.
++ */
++ if (rh_in_sync(&ms->rh, region, 0))
++ m = choose_mirror(ms, bh->b_rsector);
++ else
++ m = ms->mirror + DEFAULT_MIRROR;
++
++ map_buffer(ms, m, bh);
++ generic_make_request(READ, bh);
++ }
++}
++
++/*-----------------------------------------------------------------
++ * Writes.
++ *
++ * We do different things with the write io depending on the
++ * state of the region that it's in:
++ *
++ * SYNC: increment pending, use kcopyd to write to *all* mirrors
++ * RECOVERING: delay the io until recovery completes
++ * NOSYNC: increment pending, just write to the default mirror
++ *---------------------------------------------------------------*/
++static void write_callback(unsigned int error, void *context)
++{
++ unsigned int i;
++ int uptodate = 1;
++ struct buffer_head *bh = (struct buffer_head *) context;
++ struct mirror_set *ms;
++
++ ms = bh_get_ms(bh);
++ bh_set_ms(bh, NULL);
++
++ /*
++ * NOTE: We don't decrement the pending count here,
++ * instead it is done by the targets endio function.
++ * This way we handle both writes to SYNC and NOSYNC
++ * regions with the same code.
++ */
++
++ if (error) {
++ /*
++ * only error the io if all mirrors failed.
++ * FIXME: bogus
++ */
++ uptodate = 0;
++ for (i = 0; i < ms->nr_mirrors; i++)
++ if (!test_bit(i, &error)) {
++ uptodate = 1;
++ break;
++ }
++ }
++ bh->b_end_io(bh, uptodate);
++}
++
++static void do_write(struct mirror_set *ms, struct buffer_head *bh)
++{
++ unsigned int i;
++ struct io_region io[ms->nr_mirrors];
++ struct mirror *m;
++
++ for (i = 0; i < ms->nr_mirrors; i++) {
++ m = ms->mirror + i;
++
++ io[i].dev = m->dev->dev;
++ io[i].sector = m->offset + (bh->b_rsector - ms->ti->begin);
++ io[i].count = bh->b_size >> 9;
++ }
++
++ bh_set_ms(bh, ms);
++ dm_io_async(ms->nr_mirrors, io, WRITE, bh->b_page,
++ (unsigned int) bh->b_data & ~PAGE_MASK, write_callback, bh);
++}
++
++static void do_writes(struct mirror_set *ms, struct buffer_list *writes)
++{
++ int state;
++ struct buffer_head *bh;
++ struct buffer_list sync, nosync, recover, *this_list = NULL;
++
++ if (!writes->head)
++ return;
++
++ /*
++ * Classify each write.
++ */
++ buffer_list_init(&sync);
++ buffer_list_init(&nosync);
++ buffer_list_init(&recover);
++
++ while ((bh = buffer_list_pop(writes))) {
++ state = rh_state(&ms->rh, bh_to_region(&ms->rh, bh), 1);
++ switch (state) {
++ case RH_CLEAN:
++ case RH_DIRTY:
++ this_list = &sync;
++ break;
++
++ case RH_NOSYNC:
++ this_list = &nosync;
++ break;
++
++ case RH_RECOVERING:
++ this_list = &recover;
++ break;
++ }
++
++ buffer_list_add(this_list, bh);
++ }
++
++ /*
++ * Increment the pending counts for any regions that will
++ * be written to (writes to recover regions are going to
++ * be delayed).
++ */
++ rh_inc_pending(&ms->rh, &sync);
++ rh_inc_pending(&ms->rh, &nosync);
++ rh_flush(&ms->rh);
++
++ /*
++ * Dispatch io.
++ */
++ while ((bh = buffer_list_pop(&sync)))
++ do_write(ms, bh);
++
++ while ((bh = buffer_list_pop(&recover)))
++ rh_delay(&ms->rh, bh);
++
++ while ((bh = buffer_list_pop(&nosync))) {
++ map_buffer(ms, ms->mirror + DEFAULT_MIRROR, bh);
++ generic_make_request(WRITE, bh);
++ }
++}
++
++/*-----------------------------------------------------------------
++ * kmirrord
++ *---------------------------------------------------------------*/
++static LIST_HEAD(_mirror_sets);
++static DECLARE_RWSEM(_mirror_sets_lock);
++
++static void do_mirror(struct mirror_set *ms)
++{
++ struct buffer_list reads, writes;
++
++ spin_lock(&ms->lock);
++ memcpy(&reads, &ms->reads, sizeof(reads));
++ buffer_list_init(&ms->reads);
++ memcpy(&writes, &ms->writes, sizeof(writes));
++ buffer_list_init(&ms->writes);
++ spin_unlock(&ms->lock);
++
++ rh_update_states(&ms->rh);
++ do_recovery(ms);
++ do_reads(ms, &reads);
++ do_writes(ms, &writes);
++ run_task_queue(&tq_disk);
++}
++
++static void do_work(void)
++{
++ struct mirror_set *ms;
++
++ down_read(&_mirror_sets_lock);
++ list_for_each_entry (ms, &_mirror_sets, list)
++ do_mirror(ms);
++ up_read(&_mirror_sets_lock);
++}
++
++/*-----------------------------------------------------------------
++ * Target functions
++ *---------------------------------------------------------------*/
++static struct mirror_set *alloc_context(unsigned int nr_mirrors,
++ sector_t region_size,
++ struct dm_target *ti,
++ struct dirty_log *dl)
++{
++ size_t len;
++ struct mirror_set *ms = NULL;
++
++ if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
++ return NULL;
++
++ len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
++
++ ms = kmalloc(len, GFP_KERNEL);
++ if (!ms) {
++ ti->error = "dm-mirror: Cannot allocate mirror context";
++ return NULL;
++ }
++
++ memset(ms, 0, len);
++ spin_lock_init(&ms->lock);
++
++ ms->ti = ti;
++ ms->nr_mirrors = nr_mirrors;
++ ms->nr_regions = dm_div_up(ti->len, region_size);
++
++ if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
++ ti->error = "dm-mirror: Error creating dirty region hash";
++ kfree(ms);
++ return NULL;
++ }
++
++ return ms;
++}
++
++static void free_context(struct mirror_set *ms, struct dm_target *ti,
++ unsigned int m)
++{
++ while (m--)
++ dm_put_device(ti, ms->mirror[m].dev);
++
++ rh_exit(&ms->rh);
++ kfree(ms);
++}
++
++static inline int _check_region_size(struct dm_target *ti, sector_t size)
++{
++ return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
++ size > ti->len);
++}
++
++static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
++ unsigned int mirror, char **argv)
++{
++ sector_t offset;
++
++ if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) {
++ ti->error = "dm-mirror: Invalid offset";
++ return -EINVAL;
++ }
++
++ if (dm_get_device(ti, argv[0], offset, ti->len,
++ dm_table_get_mode(ti->table),
++ &ms->mirror[mirror].dev)) {
++ ti->error = "dm-mirror: Device lookup failure";
++ return -ENXIO;
++ }
++
++ ms->mirror[mirror].offset = offset;
++
++ return 0;
++}
++
++static int add_mirror_set(struct mirror_set *ms)
++{
++ down_write(&_mirror_sets_lock);
++ list_add_tail(&ms->list, &_mirror_sets);
++ up_write(&_mirror_sets_lock);
++ dm_daemon_wake(&_kmirrord);
++
++ return 0;
++}
++
++static void del_mirror_set(struct mirror_set *ms)
++{
++ down_write(&_mirror_sets_lock);
++ list_del(&ms->list);
++ up_write(&_mirror_sets_lock);
++}
++
++/*
++ * Create dirty log: log_type #log_params <log_params>
++ */
++static struct dirty_log *create_dirty_log(struct dm_target *ti,
++ unsigned int argc, char **argv,
++ unsigned int *args_used)
++{
++ unsigned int param_count;
++ struct dirty_log *dl;
++
++ if (argc < 2) {
++ ti->error = "dm-mirror: Insufficient mirror log arguments";
++ return NULL;
++ }
++
++ if (sscanf(argv[1], "%u", ¶m_count) != 1 || param_count != 1) {
++ ti->error = "dm-mirror: Invalid mirror log argument count";
++ return NULL;
++ }
++
++ *args_used = 2 + param_count;
++
++ if (argc < *args_used) {
++ ti->error = "dm-mirror: Insufficient mirror log arguments";
++ return NULL;
++ }
++
++ dl = dm_create_dirty_log(argv[0], ti->len, param_count, argv + 2);
++ if (!dl) {
++ ti->error = "dm-mirror: Error creating mirror dirty log";
++ return NULL;
++ }
++
++ if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
++ ti->error = "dm-mirror: Invalid region size";
++ dm_destroy_dirty_log(dl);
++ return NULL;
++ }
++
++ return dl;
++}
++
++/*
++ * Construct a mirror mapping:
++ *
++ * log_type #log_params <log_params>
++ * #mirrors [mirror_path offset]{2,}
++ *
++ * For now, #log_params = 1, log_type = "core"
++ *
++ */
++#define DM_IO_PAGES 64
++static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
++{
++ int r;
++ unsigned int nr_mirrors, m, args_used;
++ struct mirror_set *ms;
++ struct dirty_log *dl;
++
++ dl = create_dirty_log(ti, argc, argv, &args_used);
++ if (!dl)
++ return -EINVAL;
++
++ argv += args_used;
++ argc -= args_used;
++
++ if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
++ nr_mirrors < 2) {
++ ti->error = "dm-mirror: Invalid number of mirrors";
++ dm_destroy_dirty_log(dl);
++ return -EINVAL;
++ }
++
++ argv++, argc--;
++
++ if (argc != nr_mirrors * 2) {
++ ti->error = "dm-mirror: Wrong number of mirror arguments";
++ dm_destroy_dirty_log(dl);
++ return -EINVAL;
++ }
++
++ ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
++ if (!ms) {
++ dm_destroy_dirty_log(dl);
++ return -ENOMEM;
++ }
++
++ /* Get the mirror parameter sets */
++ for (m = 0; m < nr_mirrors; m++) {
++ r = get_mirror(ms, ti, m, argv);
++ if (r) {
++ free_context(ms, ti, m);
++ return r;
++ }
++ argv += 2;
++ argc -= 2;
++ }
++
++ ti->private = ms;
++
++ r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
++ if (r) {
++ free_context(ms, ti, ms->nr_mirrors);
++ return r;
++ }
++
++ add_mirror_set(ms);
++ return 0;
++}
++
++static void mirror_dtr(struct dm_target *ti)
++{
++ struct mirror_set *ms = (struct mirror_set *) ti->private;
++
++ del_mirror_set(ms);
++ kcopyd_client_destroy(ms->kcopyd_client);
++ free_context(ms, ti, ms->nr_mirrors);
++}
++
++static void queue_bh(struct mirror_set *ms, struct buffer_head *bh, int rw)
++{
++ int wake = 0;
++ struct buffer_list *bl;
++
++ bl = (rw == WRITE) ? &ms->writes : &ms->reads;
++ spin_lock(&ms->lock);
++ wake = !(bl->head);
++ buffer_list_add(bl, bh);
++ spin_unlock(&ms->lock);
++
++ if (wake)
++ dm_daemon_wake(&_kmirrord);
++}
++
++/*
++ * Mirror mapping function
++ */
++static int mirror_map(struct dm_target *ti, struct buffer_head *bh,
++ int rw, union map_info *map_context)
++{
++ int r;
++ struct mirror *m;
++ struct mirror_set *ms = ti->private;
++
++ /* FIXME: nasty hack, 32 bit sector_t only */
++ map_context->ll = bh->b_rsector / ms->rh.region_size;
++
++ if (rw == WRITE) {
++ queue_bh(ms, bh, rw);
++ return 0;
++ }
++
++ r = ms->rh.log->type->in_sync(ms->rh.log, bh_to_region(&ms->rh, bh), 0);
++ if (r < 0 && r != -EWOULDBLOCK)
++ return r;
++
++ if (r == -EWOULDBLOCK) /* FIXME: ugly */
++ r = 0;
++
++ /*
++ * We don't want to fast track a recovery just for a read
++ * ahead. So we just let it silently fail.
++ * FIXME: get rid of this.
++ */
++ if (!r && rw == READA)
++ return -EIO;
++
++ if (!r) {
++ /* Pass this io over to the daemon */
++ queue_bh(ms, bh, rw);
++ return 0;
++ }
++
++ m = choose_mirror(ms, bh->b_rsector);
++ if (!m)
++ return -EIO;
++
++ map_buffer(ms, m, bh);
++ return 1;
++}
++
++static int mirror_end_io(struct dm_target *ti, struct buffer_head *bh,
++ int rw, int error, union map_info *map_context)
++{
++ struct mirror_set *ms = (struct mirror_set *) ti->private;
++ region_t region = map_context->ll;
++
++ /*
++ * We need to dec pending if this was a write.
++ */
++ if (rw == WRITE)
++ rh_dec(&ms->rh, region);
++
++ return 0;
++}
++
++static void mirror_suspend(struct dm_target *ti)
++{
++ struct mirror_set *ms = (struct mirror_set *) ti->private;
++ rh_stop_recovery(&ms->rh);
++}
++
++static void mirror_resume(struct dm_target *ti)
++{
++ struct mirror_set *ms = (struct mirror_set *) ti->private;
++ rh_start_recovery(&ms->rh);
++}
++
++static int mirror_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned int maxlen)
++{
++ unsigned int m, sz = 0;
++ struct mirror_set *ms = (struct mirror_set *) ti->private;
++
++ switch (type) {
++ case STATUSTYPE_INFO:
++ sz += snprintf(result + sz, maxlen - sz, "%d ", ms->nr_mirrors);
++
++ for (m = 0; m < ms->nr_mirrors; m++)
++ sz += snprintf(result + sz, maxlen - sz, "%s ",
++ dm_kdevname(ms->mirror[m].dev->dev));
++
++ sz += snprintf(result + sz, maxlen - sz, "%lu/%lu",
++ ms->sync_count, ms->nr_regions);
++ break;
++
++ case STATUSTYPE_TABLE:
++ sz += snprintf(result + sz, maxlen - sz,
++ "%s 1 " SECTOR_FORMAT " %d ",
++ ms->rh.log->type->name, ms->rh.region_size,
++ ms->nr_mirrors);
++
++ for (m = 0; m < ms->nr_mirrors; m++)
++ sz += snprintf(result + sz, maxlen - sz, "%s %ld ",
++ dm_kdevname(ms->mirror[m].dev->dev),
++ ms->mirror[m].offset);
++ }
++
++ return 0;
++}
++
++static struct target_type mirror_target = {
++ .name = "mirror",
++ .module = THIS_MODULE,
++ .ctr = mirror_ctr,
++ .dtr = mirror_dtr,
++ .map = mirror_map,
++ .end_io = mirror_end_io,
++ .suspend = mirror_suspend,
++ .resume = mirror_resume,
++ .status = mirror_status,
++};
++
++static int __init dm_mirror_init(void)
++{
++ int r;
++
++ r = dm_dirty_log_init();
++ if (r)
++ return r;
++
++ r = dm_daemon_start(&_kmirrord, "kmirrord", do_work);
++ if (r) {
++ DMERR("couldn't start kmirrord");
++ dm_dirty_log_exit();
++ return r;
++ }
++
++ r = dm_register_target(&mirror_target);
++ if (r < 0) {
++ DMERR("%s: Failed to register mirror target",
++ mirror_target.name);
++ dm_dirty_log_exit();
++ dm_daemon_stop(&_kmirrord);
++ }
++
++ return r;
++}
++
++static void __exit dm_mirror_exit(void)
++{
++ int r;
++
++ r = dm_unregister_target(&mirror_target);
++ if (r < 0)
++ DMERR("%s: unregister failed %d", mirror_target.name, r);
++
++ dm_daemon_stop(&_kmirrord);
++ dm_dirty_log_exit();
++}
++
++/* Module hooks */
++module_init(dm_mirror_init);
++module_exit(dm_mirror_exit);
++
++MODULE_DESCRIPTION(DM_NAME " mirror target");
++MODULE_AUTHOR("Heinz Mauelshagen <mge@sistina.com>");
++MODULE_LICENSE("GPL");
+diff -urN linux-2.4.22/drivers/md/dm-snapshot.c linux-2.4.22-dm/drivers/md/dm-snapshot.c
+--- linux-2.4.22/drivers/md/dm-snapshot.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-snapshot.c 2003-09-15 17:02:30.000000000 +0200
+@@ -0,0 +1,1232 @@
++/*
++ * dm-snapshot.c
++ *
++ * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include <linux/config.h>
++#include <linux/ctype.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/list.h>
++#include <linux/fs.h>
++#include <linux/blkdev.h>
++#include <linux/mempool.h>
++#include <linux/device-mapper.h>
++#include <linux/vmalloc.h>
++
++#include "dm-snapshot.h"
++#include "kcopyd.h"
++
++/*
++ * FIXME: Remove this before release.
++ */
++#if 0
++#define DMDEBUG(x...) DMWARN( ## x)
++#else
++#define DMDEBUG(x...)
++#endif
++
++/*
++ * The percentage increment we will wake up users at
++ */
++#define WAKE_UP_PERCENT 5
++
++/*
++ * kcopyd priority of snapshot operations
++ */
++#define SNAPSHOT_COPY_PRIORITY 2
++
++/*
++ * Each snapshot reserves this many pages for io
++ * FIXME: calculate this
++ */
++#define SNAPSHOT_PAGES 256
++
++struct pending_exception {
++ struct exception e;
++
++ /*
++ * Origin buffers waiting for this to complete are held
++ * in a list (using b_reqnext).
++ */
++ struct buffer_head *origin_bhs;
++ struct buffer_head *snapshot_bhs;
++
++ /*
++ * Other pending_exceptions that are processing this
++ * chunk. When this list is empty, we know we can
++ * complete the origins.
++ */
++ struct list_head siblings;
++
++ /* Pointer back to snapshot context */
++ struct dm_snapshot *snap;
++
++ /*
++ * 1 indicates the exception has already been sent to
++ * kcopyd.
++ */
++ int started;
++};
++
++/*
++ * Hash table mapping origin volumes to lists of snapshots and
++ * a lock to protect it
++ */
++static kmem_cache_t *exception_cache;
++static kmem_cache_t *pending_cache;
++static mempool_t *pending_pool;
++
++/*
++ * One of these per registered origin, held in the snapshot_origins hash
++ */
++struct origin {
++ /* The origin device */
++ kdev_t dev;
++
++ struct list_head hash_list;
++
++ /* List of snapshots for this origin */
++ struct list_head snapshots;
++};
++
++/*
++ * Size of the hash table for origin volumes. If we make this
++ * the size of the minors list then it should be nearly perfect
++ */
++#define ORIGIN_HASH_SIZE 256
++#define ORIGIN_MASK 0xFF
++static struct list_head *_origins;
++static struct rw_semaphore _origins_lock;
++
++static int init_origin_hash(void)
++{
++ int i;
++
++ _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
++ GFP_KERNEL);
++ if (!_origins) {
++ DMERR("Device mapper: Snapshot: unable to allocate memory");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < ORIGIN_HASH_SIZE; i++)
++ INIT_LIST_HEAD(_origins + i);
++ init_rwsem(&_origins_lock);
++
++ return 0;
++}
++
++static void exit_origin_hash(void)
++{
++ kfree(_origins);
++}
++
++static inline unsigned int origin_hash(kdev_t dev)
++{
++ return MINOR(dev) & ORIGIN_MASK;
++}
++
++static struct origin *__lookup_origin(kdev_t origin)
++{
++ struct list_head *slist;
++ struct list_head *ol;
++ struct origin *o;
++
++ ol = &_origins[origin_hash(origin)];
++ list_for_each(slist, ol) {
++ o = list_entry(slist, struct origin, hash_list);
++
++ if (o->dev == origin)
++ return o;
++ }
++
++ return NULL;
++}
++
++static void __insert_origin(struct origin *o)
++{
++ struct list_head *sl = &_origins[origin_hash(o->dev)];
++ list_add_tail(&o->hash_list, sl);
++}
++
++/*
++ * Make a note of the snapshot and its origin so we can look it
++ * up when the origin has a write on it.
++ */
++static int register_snapshot(struct dm_snapshot *snap)
++{
++ struct origin *o;
++ kdev_t dev = snap->origin->dev;
++
++ down_write(&_origins_lock);
++ o = __lookup_origin(dev);
++
++ if (!o) {
++ /* New origin */
++ o = kmalloc(sizeof(*o), GFP_KERNEL);
++ if (!o) {
++ up_write(&_origins_lock);
++ return -ENOMEM;
++ }
++
++ /* Initialise the struct */
++ INIT_LIST_HEAD(&o->snapshots);
++ o->dev = dev;
++
++ __insert_origin(o);
++ }
++
++ list_add_tail(&snap->list, &o->snapshots);
++
++ up_write(&_origins_lock);
++ return 0;
++}
++
++static void unregister_snapshot(struct dm_snapshot *s)
++{
++ struct origin *o;
++
++ down_write(&_origins_lock);
++ o = __lookup_origin(s->origin->dev);
++
++ list_del(&s->list);
++ if (list_empty(&o->snapshots)) {
++ list_del(&o->hash_list);
++ kfree(o);
++ }
++
++ up_write(&_origins_lock);
++}
++
++/*
++ * Implementation of the exception hash tables.
++ */
++static int init_exception_table(struct exception_table *et, uint32_t size)
++{
++ unsigned int i;
++
++ et->hash_mask = size - 1;
++ et->table = vcalloc(size, sizeof(struct list_head));
++ if (!et->table)
++ return -ENOMEM;
++
++ for (i = 0; i < size; i++)
++ INIT_LIST_HEAD(et->table + i);
++
++ return 0;
++}
++
++static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
++{
++ struct list_head *slot, *entry, *temp;
++ struct exception *ex;
++ int i, size;
++
++ size = et->hash_mask + 1;
++ for (i = 0; i < size; i++) {
++ slot = et->table + i;
++
++ list_for_each_safe(entry, temp, slot) {
++ ex = list_entry(entry, struct exception, hash_list);
++ kmem_cache_free(mem, ex);
++ }
++ }
++
++ vfree(et->table);
++}
++
++/*
++ * FIXME: check how this hash fn is performing.
++ */
++static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
++{
++ return chunk & et->hash_mask;
++}
++
++static void insert_exception(struct exception_table *eh, struct exception *e)
++{
++ struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
++ list_add(&e->hash_list, l);
++}
++
++static inline void remove_exception(struct exception *e)
++{
++ list_del(&e->hash_list);
++}
++
++/*
++ * Return the exception data for a sector, or NULL if not
++ * remapped.
++ */
++static struct exception *lookup_exception(struct exception_table *et,
++ chunk_t chunk)
++{
++ struct list_head *slot, *el;
++ struct exception *e;
++
++ slot = &et->table[exception_hash(et, chunk)];
++ list_for_each(el, slot) {
++ e = list_entry(el, struct exception, hash_list);
++ if (e->old_chunk == chunk)
++ return e;
++ }
++
++ return NULL;
++}
++
++static inline struct exception *alloc_exception(void)
++{
++ struct exception *e;
++
++ e = kmem_cache_alloc(exception_cache, GFP_NOIO);
++ if (!e)
++ e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
++
++ return e;
++}
++
++static inline void free_exception(struct exception *e)
++{
++ kmem_cache_free(exception_cache, e);
++}
++
++static inline struct pending_exception *alloc_pending_exception(void)
++{
++ return mempool_alloc(pending_pool, GFP_NOIO);
++}
++
++static inline void free_pending_exception(struct pending_exception *pe)
++{
++ mempool_free(pe, pending_pool);
++}
++
++int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
++{
++ struct exception *e;
++
++ e = alloc_exception();
++ if (!e)
++ return -ENOMEM;
++
++ e->old_chunk = old;
++ e->new_chunk = new;
++ insert_exception(&s->complete, e);
++ return 0;
++}
++
++/*
++ * Hard coded magic.
++ */
++static int calc_max_buckets(void)
++{
++ unsigned long mem;
++
++ mem = num_physpages << PAGE_SHIFT;
++ mem /= 50;
++ mem /= sizeof(struct list_head);
++
++ return mem;
++}
++
++/*
++ * Rounds a number down to a power of 2.
++ */
++static inline uint32_t round_down(uint32_t n)
++{
++ while (n & (n - 1))
++ n &= (n - 1);
++ return n;
++}
++
++/*
++ * Allocate room for a suitable hash table.
++ */
++static int init_hash_tables(struct dm_snapshot *s)
++{
++ sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
++
++ /*
++ * Calculate based on the size of the original volume or
++ * the COW volume...
++ */
++ cow_dev_size = get_dev_size(s->cow->dev);
++ origin_dev_size = get_dev_size(s->origin->dev);
++ max_buckets = calc_max_buckets();
++
++ hash_size = min(origin_dev_size, cow_dev_size) / s->chunk_size;
++ hash_size = min(hash_size, max_buckets);
++
++ /* Round it down to a power of 2 */
++ hash_size = round_down(hash_size);
++ if (init_exception_table(&s->complete, hash_size))
++ return -ENOMEM;
++
++ /*
++ * Allocate hash table for in-flight exceptions
++ * Make this smaller than the real hash table
++ */
++ hash_size >>= 3;
++ if (!hash_size)
++ hash_size = 64;
++
++ if (init_exception_table(&s->pending, hash_size)) {
++ exit_exception_table(&s->complete, exception_cache);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++/*
++ * Round a number up to the nearest 'size' boundary. size must
++ * be a power of 2.
++ */
++static inline ulong round_up(ulong n, ulong size)
++{
++ size--;
++ return (n + size) & ~size;
++}
++
++/*
++ * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
++ */
++static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
++{
++ struct dm_snapshot *s;
++ unsigned long chunk_size;
++ int r = -EINVAL;
++ char persistent;
++ char *origin_path;
++ char *cow_path;
++ char *value;
++ int blocksize;
++
++ if (argc < 4) {
++ ti->error = "dm-snapshot: requires exactly 4 arguments";
++ r = -EINVAL;
++ goto bad1;
++ }
++
++ origin_path = argv[0];
++ cow_path = argv[1];
++ persistent = toupper(*argv[2]);
++
++ if (persistent != 'P' && persistent != 'N') {
++ ti->error = "Persistent flag is not P or N";
++ r = -EINVAL;
++ goto bad1;
++ }
++
++ chunk_size = simple_strtoul(argv[3], &value, 10);
++ if (chunk_size == 0 || value == NULL) {
++ ti->error = "Invalid chunk size";
++ r = -EINVAL;
++ goto bad1;
++ }
++
++ s = kmalloc(sizeof(*s), GFP_KERNEL);
++ if (s == NULL) {
++ ti->error = "Cannot allocate snapshot context private "
++ "structure";
++ r = -ENOMEM;
++ goto bad1;
++ }
++
++ r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
++ if (r) {
++ ti->error = "Cannot get origin device";
++ goto bad2;
++ }
++
++ /* FIXME: get cow length */
++ r = dm_get_device(ti, cow_path, 0, 0,
++ FMODE_READ | FMODE_WRITE, &s->cow);
++ if (r) {
++ dm_put_device(ti, s->origin);
++ ti->error = "Cannot get COW device";
++ goto bad2;
++ }
++
++ /*
++ * Chunk size must be multiple of page size. Silently
++ * round up if it's not.
++ */
++ chunk_size = round_up(chunk_size, PAGE_SIZE / SECTOR_SIZE);
++
++ /* Validate the chunk size against the device block size */
++ blocksize = get_hardsect_size(s->cow->dev);
++ if (chunk_size % (blocksize / SECTOR_SIZE)) {
++ ti->error = "Chunk size is not a multiple of device blocksize";
++ r = -EINVAL;
++ goto bad3;
++ }
++
++ /* Check the sizes are small enough to fit in one kiovec */
++ if (chunk_size > KIO_MAX_SECTORS) {
++ ti->error = "Chunk size is too big";
++ r = -EINVAL;
++ goto bad3;
++ }
++
++ /* Check chunk_size is a power of 2 */
++ if (chunk_size & (chunk_size - 1)) {
++ ti->error = "Chunk size is not a power of 2";
++ r = -EINVAL;
++ goto bad3;
++ }
++
++ s->chunk_size = chunk_size;
++ s->chunk_mask = chunk_size - 1;
++ s->type = persistent;
++ for (s->chunk_shift = 0; chunk_size;
++ s->chunk_shift++, chunk_size >>= 1)
++ ;
++ s->chunk_shift--;
++
++ s->valid = 1;
++ s->have_metadata = 0;
++ s->last_percent = 0;
++ init_rwsem(&s->lock);
++ s->table = ti->table;
++
++ /* Allocate hash table for COW data */
++ if (init_hash_tables(s)) {
++ ti->error = "Unable to allocate hash table space";
++ r = -ENOMEM;
++ goto bad3;
++ }
++
++ /*
++ * Check the persistent flag - done here because we need the iobuf
++ * to check the LV header
++ */
++ s->store.snap = s;
++
++ if (persistent == 'P')
++ r = dm_create_persistent(&s->store, s->chunk_size);
++ else
++ r = dm_create_transient(&s->store, s, blocksize);
++
++ if (r) {
++ ti->error = "Couldn't create exception store";
++ r = -EINVAL;
++ goto bad4;
++ }
++
++ r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
++ if (r) {
++ ti->error = "Could not create kcopyd client";
++ goto bad5;
++ }
++
++ /* Add snapshot to the list of snapshots for this origin */
++ if (register_snapshot(s)) {
++ r = -EINVAL;
++ ti->error = "Cannot register snapshot origin";
++ goto bad6;
++ }
++
++ ti->private = s;
++ return 0;
++
++ bad6:
++ kcopyd_client_destroy(s->kcopyd_client);
++
++ bad5:
++ s->store.destroy(&s->store);
++
++ bad4:
++ exit_exception_table(&s->pending, pending_cache);
++ exit_exception_table(&s->complete, exception_cache);
++
++ bad3:
++ dm_put_device(ti, s->cow);
++ dm_put_device(ti, s->origin);
++
++ bad2:
++ kfree(s);
++
++ bad1:
++ return r;
++}
++
++static void snapshot_dtr(struct dm_target *ti)
++{
++ struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
++
++ dm_table_event(ti->table);
++
++ unregister_snapshot(s);
++
++ exit_exception_table(&s->pending, pending_cache);
++ exit_exception_table(&s->complete, exception_cache);
++
++ /* Deallocate memory used */
++ s->store.destroy(&s->store);
++
++ dm_put_device(ti, s->origin);
++ dm_put_device(ti, s->cow);
++ kcopyd_client_destroy(s->kcopyd_client);
++ kfree(s);
++}
++
++/*
++ * We hold lists of buffer_heads, using the b_reqnext field.
++ */
++static void queue_buffer(struct buffer_head **queue, struct buffer_head *bh)
++{
++ bh->b_reqnext = *queue;
++ *queue = bh;
++}
++
++/*
++ * FIXME: inefficient.
++ */
++static void queue_buffers(struct buffer_head **queue, struct buffer_head *bhs)
++{
++ while (*queue)
++ queue = &((*queue)->b_reqnext);
++
++ *queue = bhs;
++}
++
++/*
++ * Flush a list of buffers.
++ */
++static void flush_buffers(struct buffer_head *bh)
++{
++ struct buffer_head *n;
++
++ DMDEBUG("begin flush");
++ while (bh) {
++ n = bh->b_reqnext;
++ bh->b_reqnext = NULL;
++ DMDEBUG("flushing %p", bh);
++ generic_make_request(WRITE, bh);
++ bh = n;
++ }
++
++ run_task_queue(&tq_disk);
++}
++
++/*
++ * Error a list of buffers.
++ */
++static void error_buffers(struct buffer_head *bh)
++{
++ struct buffer_head *n;
++
++ while (bh) {
++ n = bh->b_reqnext;
++ bh->b_reqnext = NULL;
++ buffer_IO_error(bh);
++ bh = n;
++ }
++}
++
++static struct buffer_head *__flush_bhs(struct pending_exception *pe)
++{
++ struct pending_exception *sibling;
++
++ if (list_empty(&pe->siblings))
++ return pe->origin_bhs;
++
++ sibling = list_entry(pe->siblings.next,
++ struct pending_exception, siblings);
++
++ list_del(&pe->siblings);
++
++ /* FIXME: I think there's a race on SMP machines here, add spin lock */
++ queue_buffers(&sibling->origin_bhs, pe->origin_bhs);
++
++ return NULL;
++}
++
++static void pending_complete(struct pending_exception *pe, int success)
++{
++ struct exception *e;
++ struct dm_snapshot *s = pe->snap;
++ struct buffer_head *flush = NULL;
++
++ if (success) {
++ e = alloc_exception();
++ if (!e) {
++ DMWARN("Unable to allocate exception.");
++ down_write(&s->lock);
++ s->store.drop_snapshot(&s->store);
++ s->valid = 0;
++ flush = __flush_bhs(pe);
++ up_write(&s->lock);
++
++ error_buffers(pe->snapshot_bhs);
++ goto out;
++ }
++
++ /*
++ * Add a proper exception, and remove the
++ * in-flight exception from the list.
++ */
++ down_write(&s->lock);
++
++ memcpy(e, &pe->e, sizeof(*e));
++ insert_exception(&s->complete, e);
++ remove_exception(&pe->e);
++ flush = __flush_bhs(pe);
++
++ /* Submit any pending write BHs */
++ up_write(&s->lock);
++
++ flush_buffers(pe->snapshot_bhs);
++ DMDEBUG("Exception completed successfully.");
++
++ /* Notify any interested parties */
++ if (s->store.fraction_full) {
++ sector_t numerator, denominator;
++ int pc;
++
++ s->store.fraction_full(&s->store, &numerator,
++ &denominator);
++ pc = numerator * 100 / denominator;
++
++ if (pc >= s->last_percent + WAKE_UP_PERCENT) {
++ dm_table_event(s->table);
++ s->last_percent = pc - pc % WAKE_UP_PERCENT;
++ }
++ }
++
++ } else {
++ /* Read/write error - snapshot is unusable */
++ down_write(&s->lock);
++ if (s->valid)
++ DMERR("Error reading/writing snapshot");
++ s->store.drop_snapshot(&s->store);
++ s->valid = 0;
++ remove_exception(&pe->e);
++ flush = __flush_bhs(pe);
++ up_write(&s->lock);
++
++ error_buffers(pe->snapshot_bhs);
++
++ dm_table_event(s->table);
++ DMDEBUG("Exception failed.");
++ }
++
++ out:
++ if (flush)
++ flush_buffers(flush);
++
++ free_pending_exception(pe);
++}
++
++static void commit_callback(void *context, int success)
++{
++ struct pending_exception *pe = (struct pending_exception *) context;
++ pending_complete(pe, success);
++}
++
++/*
++ * Called when the copy I/O has finished. kcopyd actually runs
++ * this code so don't block.
++ */
++static void copy_callback(int read_err, unsigned int write_err, void *context)
++{
++ struct pending_exception *pe = (struct pending_exception *) context;
++ struct dm_snapshot *s = pe->snap;
++
++ if (read_err || write_err)
++ pending_complete(pe, 0);
++
++ else
++ /* Update the metadata if we are persistent */
++ s->store.commit_exception(&s->store, &pe->e, commit_callback,
++ pe);
++}
++
++/*
++ * Dispatches the copy operation to kcopyd.
++ */
++static inline void start_copy(struct pending_exception *pe)
++{
++ struct dm_snapshot *s = pe->snap;
++ struct io_region src, dest;
++ kdev_t dev = s->origin->dev;
++ int *sizes = blk_size[major(dev)];
++ sector_t dev_size = (sector_t) -1;
++
++ if (pe->started)
++ return;
++
++ /* this is protected by snap->lock */
++ pe->started = 1;
++
++ if (sizes && sizes[minor(dev)])
++ dev_size = sizes[minor(dev)] << 1;
++
++ src.dev = dev;
++ src.sector = chunk_to_sector(s, pe->e.old_chunk);
++ src.count = min(s->chunk_size, dev_size - src.sector);
++
++ dest.dev = s->cow->dev;
++ dest.sector = chunk_to_sector(s, pe->e.new_chunk);
++ dest.count = src.count;
++
++ /* Hand over to kcopyd */
++ kcopyd_copy(s->kcopyd_client,
++ &src, 1, &dest, 0, copy_callback, pe);
++}
++
++/*
++ * Looks to see if this snapshot already has a pending exception
++ * for this chunk, otherwise it allocates a new one and inserts
++ * it into the pending table.
++ */
++static struct pending_exception *find_pending_exception(struct dm_snapshot *s,
++ struct buffer_head *bh)
++{
++ struct exception *e;
++ struct pending_exception *pe;
++ chunk_t chunk = sector_to_chunk(s, bh->b_rsector);
++
++ /*
++ * Is there a pending exception for this already ?
++ */
++ e = lookup_exception(&s->pending, chunk);
++ if (e) {
++ /* cast the exception to a pending exception */
++ pe = list_entry(e, struct pending_exception, e);
++
++ } else {
++ /* Create a new pending exception */
++ pe = alloc_pending_exception();
++ pe->e.old_chunk = chunk;
++ pe->origin_bhs = pe->snapshot_bhs = NULL;
++ INIT_LIST_HEAD(&pe->siblings);
++ pe->snap = s;
++ pe->started = 0;
++
++ if (s->store.prepare_exception(&s->store, &pe->e)) {
++ free_pending_exception(pe);
++ s->valid = 0;
++ return NULL;
++ }
++
++ insert_exception(&s->pending, &pe->e);
++ }
++
++ return pe;
++}
++
++static inline void remap_exception(struct dm_snapshot *s, struct exception *e,
++ struct buffer_head *bh)
++{
++ bh->b_rdev = s->cow->dev;
++ bh->b_rsector = chunk_to_sector(s, e->new_chunk) +
++ (bh->b_rsector & s->chunk_mask);
++}
++
++static int snapshot_map(struct dm_target *ti, struct buffer_head *bh, int rw,
++ union map_info *map_context)
++{
++ struct exception *e;
++ struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
++ int r = 1;
++ chunk_t chunk;
++ struct pending_exception *pe;
++
++ chunk = sector_to_chunk(s, bh->b_rsector);
++
++ /* Full snapshots are not usable */
++ if (!s->valid)
++ return -1;
++
++ /*
++ * Write to snapshot - higher level takes care of RW/RO
++ * flags so we should only get this if we are
++ * writeable.
++ */
++ if (rw == WRITE) {
++
++ down_write(&s->lock);
++
++ /* If the block is already remapped - use that, else remap it */
++ e = lookup_exception(&s->complete, chunk);
++ if (e)
++ remap_exception(s, e, bh);
++
++ else {
++ pe = find_pending_exception(s, bh);
++
++ if (!pe) {
++ s->store.drop_snapshot(&s->store);
++ s->valid = 0;
++ r = -EIO;
++ } else {
++ remap_exception(s, &pe->e, bh);
++ queue_buffer(&pe->snapshot_bhs, bh);
++ start_copy(pe);
++ r = 0;
++ }
++ }
++
++ up_write(&s->lock);
++
++ } else {
++ /*
++ * FIXME: this read path scares me because we
++ * always use the origin when we have a pending
++ * exception. However I can't think of a
++ * situation where this is wrong - ejt.
++ */
++
++ /* Do reads */
++ down_read(&s->lock);
++
++ /* See if it it has been remapped */
++ e = lookup_exception(&s->complete, chunk);
++ if (e)
++ remap_exception(s, e, bh);
++ else
++ bh->b_rdev = s->origin->dev;
++
++ up_read(&s->lock);
++ }
++
++ return r;
++}
++
++void snapshot_resume(struct dm_target *ti)
++{
++ struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
++
++ if (s->have_metadata)
++ return;
++
++ if (s->store.read_metadata(&s->store)) {
++ down_write(&s->lock);
++ s->valid = 0;
++ up_write(&s->lock);
++ }
++
++ s->have_metadata = 1;
++}
++
++static int snapshot_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned int maxlen)
++{
++ struct dm_snapshot *snap = (struct dm_snapshot *) ti->private;
++ char cow[16];
++ char org[16];
++
++ switch (type) {
++ case STATUSTYPE_INFO:
++ if (!snap->valid)
++ snprintf(result, maxlen, "Invalid");
++ else {
++ if (snap->store.fraction_full) {
++ sector_t numerator, denominator;
++ snap->store.fraction_full(&snap->store,
++ &numerator,
++ &denominator);
++ snprintf(result, maxlen,
++ SECTOR_FORMAT "/" SECTOR_FORMAT,
++ numerator, denominator);
++ }
++ else
++ snprintf(result, maxlen, "Unknown");
++ }
++ break;
++
++ case STATUSTYPE_TABLE:
++ /*
++ * kdevname returns a static pointer so we need
++ * to make private copies if the output is to
++ * make sense.
++ */
++ strncpy(cow, dm_kdevname(snap->cow->dev), sizeof(cow));
++ strncpy(org, dm_kdevname(snap->origin->dev), sizeof(org));
++ snprintf(result, maxlen, "%s %s %c %ld", org, cow,
++ snap->type, snap->chunk_size);
++ break;
++ }
++
++ return 0;
++}
++
++/*-----------------------------------------------------------------
++ * Origin methods
++ *---------------------------------------------------------------*/
++static void list_merge(struct list_head *l1, struct list_head *l2)
++{
++ struct list_head *l1_n, *l2_p;
++
++ l1_n = l1->next;
++ l2_p = l2->prev;
++
++ l1->next = l2;
++ l2->prev = l1;
++
++ l2_p->next = l1_n;
++ l1_n->prev = l2_p;
++}
++
++static int __origin_write(struct list_head *snapshots, struct buffer_head *bh)
++{
++ int r = 1, first = 1;
++ struct list_head *sl;
++ struct dm_snapshot *snap;
++ struct exception *e;
++ struct pending_exception *pe, *last = NULL;
++ chunk_t chunk;
++
++ /* Do all the snapshots on this origin */
++ list_for_each(sl, snapshots) {
++ snap = list_entry(sl, struct dm_snapshot, list);
++
++ /* Only deal with valid snapshots */
++ if (!snap->valid)
++ continue;
++
++ down_write(&snap->lock);
++
++ /*
++ * Remember, different snapshots can have
++ * different chunk sizes.
++ */
++ chunk = sector_to_chunk(snap, bh->b_rsector);
++
++ /*
++ * Check exception table to see if block
++ * is already remapped in this snapshot
++ * and trigger an exception if not.
++ */
++ e = lookup_exception(&snap->complete, chunk);
++ if (!e) {
++ pe = find_pending_exception(snap, bh);
++ if (!pe) {
++ snap->store.drop_snapshot(&snap->store);
++ snap->valid = 0;
++
++ } else {
++ if (last)
++ list_merge(&pe->siblings,
++ &last->siblings);
++
++ last = pe;
++ r = 0;
++ }
++ }
++
++ up_write(&snap->lock);
++ }
++
++ /*
++ * Now that we have a complete pe list we can start the copying.
++ */
++ if (last) {
++ pe = last;
++ do {
++ down_write(&pe->snap->lock);
++ if (first)
++ queue_buffer(&pe->origin_bhs, bh);
++ start_copy(pe);
++ up_write(&pe->snap->lock);
++ first = 0;
++ pe = list_entry(pe->siblings.next,
++ struct pending_exception, siblings);
++
++ } while (pe != last);
++ }
++
++ return r;
++}
++
++/*
++ * Called on a write from the origin driver.
++ */
++int do_origin(struct dm_dev *origin, struct buffer_head *bh)
++{
++ struct origin *o;
++ int r;
++
++ down_read(&_origins_lock);
++ o = __lookup_origin(origin->dev);
++ if (!o)
++ BUG();
++
++ r = __origin_write(&o->snapshots, bh);
++ up_read(&_origins_lock);
++
++ return r;
++}
++
++/*
++ * Origin: maps a linear range of a device, with hooks for snapshotting.
++ */
++
++/*
++ * Construct an origin mapping: <dev_path>
++ * The context for an origin is merely a 'struct dm_dev *'
++ * pointing to the real device.
++ */
++static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
++{
++ int r;
++ struct dm_dev *dev;
++
++ if (argc != 1) {
++ ti->error = "dm-origin: incorrect number of arguments";
++ return -EINVAL;
++ }
++
++ r = dm_get_device(ti, argv[0], 0, ti->len,
++ dm_table_get_mode(ti->table), &dev);
++ if (r) {
++ ti->error = "Cannot get target device";
++ return r;
++ }
++
++ ti->private = dev;
++ return 0;
++}
++
++static void origin_dtr(struct dm_target *ti)
++{
++ struct dm_dev *dev = (struct dm_dev *) ti->private;
++ dm_put_device(ti, dev);
++}
++
++static int origin_map(struct dm_target *ti, struct buffer_head *bh, int rw,
++ union map_info *map_context)
++{
++ struct dm_dev *dev = (struct dm_dev *) ti->private;
++ bh->b_rdev = dev->dev;
++
++ /* Only tell snapshots if this is a write */
++ return (rw == WRITE) ? do_origin(dev, bh) : 1;
++}
++
++static int origin_status(struct dm_target *ti, status_type_t type, char *result,
++ unsigned int maxlen)
++{
++ struct dm_dev *dev = (struct dm_dev *) ti->private;
++
++ switch (type) {
++ case STATUSTYPE_INFO:
++ result[0] = '\0';
++ break;
++
++ case STATUSTYPE_TABLE:
++ snprintf(result, maxlen, "%s", dm_kdevname(dev->dev));
++ break;
++ }
++
++ return 0;
++}
++
++static struct target_type origin_target = {
++ name: "snapshot-origin",
++ module: THIS_MODULE,
++ ctr: origin_ctr,
++ dtr: origin_dtr,
++ map: origin_map,
++ status: origin_status,
++};
++
++static struct target_type snapshot_target = {
++ name: "snapshot",
++ module: THIS_MODULE,
++ ctr: snapshot_ctr,
++ dtr: snapshot_dtr,
++ map: snapshot_map,
++ resume: snapshot_resume,
++ status: snapshot_status,
++};
++
++int __init dm_snapshot_init(void)
++{
++ int r;
++
++ r = dm_register_target(&snapshot_target);
++ if (r) {
++ DMERR("snapshot target register failed %d", r);
++ return r;
++ }
++
++ r = dm_register_target(&origin_target);
++ if (r < 0) {
++ DMERR("Device mapper: Origin: register failed %d\n", r);
++ goto bad1;
++ }
++
++ r = init_origin_hash();
++ if (r) {
++ DMERR("init_origin_hash failed.");
++ goto bad2;
++ }
++
++ exception_cache = kmem_cache_create("dm-snapshot-ex",
++ sizeof(struct exception),
++ __alignof__(struct exception),
++ 0, NULL, NULL);
++ if (!exception_cache) {
++ DMERR("Couldn't create exception cache.");
++ r = -ENOMEM;
++ goto bad3;
++ }
++
++ pending_cache =
++ kmem_cache_create("dm-snapshot-in",
++ sizeof(struct pending_exception),
++ __alignof__(struct pending_exception),
++ 0, NULL, NULL);
++ if (!pending_cache) {
++ DMERR("Couldn't create pending cache.");
++ r = -ENOMEM;
++ goto bad4;
++ }
++
++ pending_pool = mempool_create(128, mempool_alloc_slab,
++ mempool_free_slab, pending_cache);
++ if (!pending_pool) {
++ DMERR("Couldn't create pending pool.");
++ r = -ENOMEM;
++ goto bad5;
++ }
++
++ return 0;
++
++ bad5:
++ kmem_cache_destroy(pending_cache);
++ bad4:
++ kmem_cache_destroy(exception_cache);
++ bad3:
++ exit_origin_hash();
++ bad2:
++ dm_unregister_target(&origin_target);
++ bad1:
++ dm_unregister_target(&snapshot_target);
++ return r;
++}
++
++void dm_snapshot_exit(void)
++{
++ int r;
++
++ r = dm_unregister_target(&snapshot_target);
++ if (r)
++ DMERR("snapshot unregister failed %d", r);
++
++ r = dm_unregister_target(&origin_target);
++ if (r)
++ DMERR("origin unregister failed %d", r);
++
++ exit_origin_hash();
++ mempool_destroy(pending_pool);
++ kmem_cache_destroy(pending_cache);
++ kmem_cache_destroy(exception_cache);
++}
+diff -urN linux-2.4.22/drivers/md/dm-snapshot.h linux-2.4.22-dm/drivers/md/dm-snapshot.h
+--- linux-2.4.22/drivers/md/dm-snapshot.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-snapshot.h 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,158 @@
++/*
++ * dm-snapshot.c
++ *
++ * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#ifndef DM_SNAPSHOT_H
++#define DM_SNAPSHOT_H
++
++#include "dm.h"
++#include <linux/blkdev.h>
++
++struct exception_table {
++ uint32_t hash_mask;
++ struct list_head *table;
++};
++
++/*
++ * The snapshot code deals with largish chunks of the disk at a
++ * time. Typically 64k - 256k.
++ */
++/* FIXME: can we get away with limiting these to a uint32_t ? */
++typedef sector_t chunk_t;
++
++/*
++ * An exception is used where an old chunk of data has been
++ * replaced by a new one.
++ */
++struct exception {
++ struct list_head hash_list;
++
++ chunk_t old_chunk;
++ chunk_t new_chunk;
++};
++
++/*
++ * Abstraction to handle the meta/layout of exception stores (the
++ * COW device).
++ */
++struct exception_store {
++
++ /*
++ * Destroys this object when you've finished with it.
++ */
++ void (*destroy) (struct exception_store *store);
++
++ /*
++ * The target shouldn't read the COW device until this is
++ * called.
++ */
++ int (*read_metadata) (struct exception_store *store);
++
++ /*
++ * Find somewhere to store the next exception.
++ */
++ int (*prepare_exception) (struct exception_store *store,
++ struct exception *e);
++
++ /*
++ * Update the metadata with this exception.
++ */
++ void (*commit_exception) (struct exception_store *store,
++ struct exception *e,
++ void (*callback) (void *, int success),
++ void *callback_context);
++
++ /*
++ * The snapshot is invalid, note this in the metadata.
++ */
++ void (*drop_snapshot) (struct exception_store *store);
++
++ /*
++ * Return how full the snapshot is.
++ */
++ void (*fraction_full) (struct exception_store *store,
++ sector_t *numerator,
++ sector_t *denominator);
++
++ struct dm_snapshot *snap;
++ void *context;
++};
++
++struct dm_snapshot {
++ struct rw_semaphore lock;
++ struct dm_table *table;
++
++ struct dm_dev *origin;
++ struct dm_dev *cow;
++
++ /* List of snapshots per Origin */
++ struct list_head list;
++
++ /* Size of data blocks saved - must be a power of 2 */
++ chunk_t chunk_size;
++ chunk_t chunk_mask;
++ chunk_t chunk_shift;
++
++ /* You can't use a snapshot if this is 0 (e.g. if full) */
++ int valid;
++ int have_metadata;
++
++ /* Used for display of table */
++ char type;
++
++ /* The last percentage we notified */
++ int last_percent;
++
++ struct exception_table pending;
++ struct exception_table complete;
++
++ /* The on disk metadata handler */
++ struct exception_store store;
++
++ struct kcopyd_client *kcopyd_client;
++};
++
++/*
++ * Used by the exception stores to load exceptions hen
++ * initialising.
++ */
++int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new);
++
++/*
++ * Constructor and destructor for the default persistent
++ * store.
++ */
++int dm_create_persistent(struct exception_store *store, uint32_t chunk_size);
++
++int dm_create_transient(struct exception_store *store,
++ struct dm_snapshot *s, int blocksize);
++
++/*
++ * Return the number of sectors in the device.
++ */
++static inline sector_t get_dev_size(kdev_t dev)
++{
++ int *sizes;
++
++ sizes = blk_size[MAJOR(dev)];
++ if (sizes)
++ return sizes[MINOR(dev)] << 1;
++
++ return 0;
++}
++
++static inline chunk_t sector_to_chunk(struct dm_snapshot *s, sector_t sector)
++{
++ return (sector & ~s->chunk_mask) >> s->chunk_shift;
++}
++
++static inline sector_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk)
++{
++ return chunk << s->chunk_shift;
++}
++
++#endif
+diff -urN linux-2.4.22/drivers/md/dm-stripe.c linux-2.4.22-dm/drivers/md/dm-stripe.c
+--- linux-2.4.22/drivers/md/dm-stripe.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-stripe.c 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,258 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/blkdev.h>
++#include <linux/slab.h>
++
++struct stripe {
++ struct dm_dev *dev;
++ sector_t physical_start;
++};
++
++struct stripe_c {
++ uint32_t stripes;
++
++ /* The size of this target / num. stripes */
++ uint32_t stripe_width;
++
++ /* stripe chunk size */
++ uint32_t chunk_shift;
++ sector_t chunk_mask;
++
++ struct stripe stripe[0];
++};
++
++static inline struct stripe_c *alloc_context(unsigned int stripes)
++{
++ size_t len;
++
++ if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
++ stripes))
++ return NULL;
++
++ len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
++
++ return kmalloc(len, GFP_KERNEL);
++}
++
++/*
++ * Parse a single <dev> <sector> pair
++ */
++static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
++ unsigned int stripe, char **argv)
++{
++ sector_t start;
++
++ if (sscanf(argv[1], SECTOR_FORMAT, &start) != 1)
++ return -EINVAL;
++
++ if (dm_get_device(ti, argv[0], start, sc->stripe_width,
++ dm_table_get_mode(ti->table),
++ &sc->stripe[stripe].dev))
++ return -ENXIO;
++
++ sc->stripe[stripe].physical_start = start;
++ return 0;
++}
++
++/*
++ * FIXME: Nasty function, only present because we can't link
++ * against __moddi3 and __divdi3.
++ *
++ * returns a == b * n
++ */
++static int multiple(sector_t a, sector_t b, sector_t *n)
++{
++ sector_t acc, prev, i;
++
++ *n = 0;
++ while (a >= b) {
++ for (acc = b, prev = 0, i = 1;
++ acc <= a;
++ prev = acc, acc <<= 1, i <<= 1)
++ ;
++
++ a -= prev;
++ *n += i >> 1;
++ }
++
++ return a == 0;
++}
++
++/*
++ * Construct a striped mapping.
++ * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+
++ */
++static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
++{
++ struct stripe_c *sc;
++ sector_t width;
++ uint32_t stripes;
++ uint32_t chunk_size;
++ char *end;
++ int r;
++ unsigned int i;
++
++ if (argc < 2) {
++ ti->error = "dm-stripe: Not enough arguments";
++ return -EINVAL;
++ }
++
++ stripes = simple_strtoul(argv[0], &end, 10);
++ if (*end) {
++ ti->error = "dm-stripe: Invalid stripe count";
++ return -EINVAL;
++ }
++
++ chunk_size = simple_strtoul(argv[1], &end, 10);
++ if (*end) {
++ ti->error = "dm-stripe: Invalid chunk_size";
++ return -EINVAL;
++ }
++
++ /*
++ * chunk_size is a power of two
++ */
++ if (!chunk_size || (chunk_size & (chunk_size - 1))) {
++ ti->error = "dm-stripe: Invalid chunk size";
++ return -EINVAL;
++ }
++
++ if (!multiple(ti->len, stripes, &width)) {
++ ti->error = "dm-stripe: Target length not divisable by "
++ "number of stripes";
++ return -EINVAL;
++ }
++
++ /*
++ * Do we have enough arguments for that many stripes ?
++ */
++ if (argc != (2 + 2 * stripes)) {
++ ti->error = "dm-stripe: Not enough destinations specified";
++ return -EINVAL;
++ }
++
++ sc = alloc_context(stripes);
++ if (!sc) {
++ ti->error = "dm-stripe: Memory allocation for striped context "
++ "failed";
++ return -ENOMEM;
++ }
++
++ sc->stripes = stripes;
++ sc->stripe_width = width;
++
++ sc->chunk_mask = ((sector_t) chunk_size) - 1;
++ for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
++ chunk_size >>= 1;
++ sc->chunk_shift--;
++
++ /*
++ * Get the stripe destinations.
++ */
++ for (i = 0; i < stripes; i++) {
++ argv += 2;
++
++ r = get_stripe(ti, sc, i, argv);
++ if (r < 0) {
++ ti->error = "dm-stripe: Couldn't parse stripe "
++ "destination";
++ while (i--)
++ dm_put_device(ti, sc->stripe[i].dev);
++ kfree(sc);
++ return r;
++ }
++ }
++
++ ti->private = sc;
++ return 0;
++}
++
++static void stripe_dtr(struct dm_target *ti)
++{
++ unsigned int i;
++ struct stripe_c *sc = (struct stripe_c *) ti->private;
++
++ for (i = 0; i < sc->stripes; i++)
++ dm_put_device(ti, sc->stripe[i].dev);
++
++ kfree(sc);
++}
++
++static int stripe_map(struct dm_target *ti, struct buffer_head *bh, int rw,
++ union map_info *context)
++{
++ struct stripe_c *sc = (struct stripe_c *) ti->private;
++
++ sector_t offset = bh->b_rsector - ti->begin;
++ uint32_t chunk = (uint32_t) (offset >> sc->chunk_shift);
++ uint32_t stripe = chunk % sc->stripes; /* 32bit modulus */
++ chunk = chunk / sc->stripes;
++
++ bh->b_rdev = sc->stripe[stripe].dev->dev;
++ bh->b_rsector = sc->stripe[stripe].physical_start +
++ (chunk << sc->chunk_shift) + (offset & sc->chunk_mask);
++ return 1;
++}
++
++static int stripe_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned int maxlen)
++{
++ struct stripe_c *sc = (struct stripe_c *) ti->private;
++ int offset;
++ unsigned int i;
++
++ switch (type) {
++ case STATUSTYPE_INFO:
++ result[0] = '\0';
++ break;
++
++ case STATUSTYPE_TABLE:
++ offset = snprintf(result, maxlen, "%d " SECTOR_FORMAT,
++ sc->stripes, sc->chunk_mask + 1);
++ for (i = 0; i < sc->stripes; i++) {
++ offset +=
++ snprintf(result + offset, maxlen - offset,
++ " %s " SECTOR_FORMAT,
++ dm_kdevname(to_kdev_t(sc->stripe[i].dev->bdev->bd_dev)),
++ sc->stripe[i].physical_start);
++ }
++ break;
++ }
++ return 0;
++}
++
++static struct target_type stripe_target = {
++ .name = "striped",
++ .module = THIS_MODULE,
++ .ctr = stripe_ctr,
++ .dtr = stripe_dtr,
++ .map = stripe_map,
++ .status = stripe_status,
++};
++
++int __init dm_stripe_init(void)
++{
++ int r;
++
++ r = dm_register_target(&stripe_target);
++ if (r < 0)
++ DMWARN("striped target registration failed");
++
++ return r;
++}
++
++void dm_stripe_exit(void)
++{
++ if (dm_unregister_target(&stripe_target))
++ DMWARN("striped target unregistration failed");
++
++ return;
++}
+diff -urN linux-2.4.22/drivers/md/dm-table.c linux-2.4.22-dm/drivers/md/dm-table.c
+--- linux-2.4.22/drivers/md/dm-table.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-table.c 2003-09-15 17:03:21.000000000 +0200
+@@ -0,0 +1,674 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/blkdev.h>
++#include <linux/ctype.h>
++#include <linux/slab.h>
++#include <asm/atomic.h>
++
++#define MAX_DEPTH 16
++#define NODE_SIZE L1_CACHE_BYTES
++#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
++#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
++
++struct dm_table {
++ atomic_t holders;
++
++ /* btree table */
++ unsigned int depth;
++ unsigned int counts[MAX_DEPTH]; /* in nodes */
++ sector_t *index[MAX_DEPTH];
++
++ unsigned int num_targets;
++ unsigned int num_allocated;
++ sector_t *highs;
++ struct dm_target *targets;
++
++ /*
++ * Indicates the rw permissions for the new logical
++ * device. This should be a combination of FMODE_READ
++ * and FMODE_WRITE.
++ */
++ int mode;
++
++ /* a list of devices used by this table */
++ struct list_head devices;
++
++ /* events get handed up using this callback */
++ void (*event_fn)(void *);
++ void *event_context;
++};
++
++/*
++ * Similar to ceiling(log_size(n))
++ */
++static unsigned int int_log(unsigned long n, unsigned long base)
++{
++ int result = 0;
++
++ while (n > 1) {
++ n = dm_div_up(n, base);
++ result++;
++ }
++
++ return result;
++}
++
++/*
++ * Calculate the index of the child node of the n'th node k'th key.
++ */
++static inline unsigned int get_child(unsigned int n, unsigned int k)
++{
++ return (n * CHILDREN_PER_NODE) + k;
++}
++
++/*
++ * Return the n'th node of level l from table t.
++ */
++static inline sector_t *get_node(struct dm_table *t, unsigned int l,
++ unsigned int n)
++{
++ return t->index[l] + (n * KEYS_PER_NODE);
++}
++
++/*
++ * Return the highest key that you could lookup from the n'th
++ * node on level l of the btree.
++ */
++static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
++{
++ for (; l < t->depth - 1; l++)
++ n = get_child(n, CHILDREN_PER_NODE - 1);
++
++ if (n >= t->counts[l])
++ return (sector_t) - 1;
++
++ return get_node(t, l, n)[KEYS_PER_NODE - 1];
++}
++
++/*
++ * Fills in a level of the btree based on the highs of the level
++ * below it.
++ */
++static int setup_btree_index(unsigned int l, struct dm_table *t)
++{
++ unsigned int n, k;
++ sector_t *node;
++
++ for (n = 0U; n < t->counts[l]; n++) {
++ node = get_node(t, l, n);
++
++ for (k = 0U; k < KEYS_PER_NODE; k++)
++ node[k] = high(t, l + 1, get_child(n, k));
++ }
++
++ return 0;
++}
++
++int dm_table_create(struct dm_table **result, int mode, unsigned num_targets)
++{
++ struct dm_table *t = kmalloc(sizeof(*t), GFP_NOIO);
++
++ if (!t)
++ return -ENOMEM;
++
++ memset(t, 0, sizeof(*t));
++ INIT_LIST_HEAD(&t->devices);
++ atomic_set(&t->holders, 1);
++
++
++ /* allocate both the target array and offset array at once */
++ t->highs = (sector_t *) vcalloc(sizeof(struct dm_target) +
++ sizeof(sector_t), num_targets);
++ if (!t->highs) {
++ kfree(t);
++ return -ENOMEM;
++ }
++
++ t->targets = (struct dm_target *) (t->highs + num_targets);
++ t->num_allocated = num_targets;
++ t->mode = mode;
++ *result = t;
++ return 0;
++}
++
++static void free_devices(struct list_head *devices)
++{
++ struct list_head *tmp, *next;
++
++ for (tmp = devices->next; tmp != devices; tmp = next) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ next = tmp->next;
++ kfree(dd);
++ }
++}
++
++void table_destroy(struct dm_table *t)
++{
++ unsigned int i;
++
++ /* free the indexes (see dm_table_complete) */
++ if (t->depth >= 2)
++ vfree(t->index[t->depth - 2]);
++
++ /* free the targets */
++ for (i = 0; i < t->num_targets; i++) {
++ struct dm_target *tgt = t->targets + i;
++
++ if (tgt->type->dtr)
++ tgt->type->dtr(tgt);
++
++ dm_put_target_type(tgt->type);
++ }
++
++ vfree(t->highs);
++
++ /* free the device list */
++ if (t->devices.next != &t->devices) {
++ DMWARN("devices still present during destroy: "
++ "dm_table_remove_device calls missing");
++
++ free_devices(&t->devices);
++ }
++
++ kfree(t);
++}
++
++void dm_table_get(struct dm_table *t)
++{
++ atomic_inc(&t->holders);
++}
++
++void dm_table_put(struct dm_table *t)
++{
++ if (atomic_dec_and_test(&t->holders))
++ table_destroy(t);
++}
++
++/*
++ * Convert a device path to a dev_t.
++ */
++static int lookup_device(const char *path, kdev_t *dev)
++{
++ int r;
++ struct nameidata nd;
++ struct inode *inode;
++
++ if (!path_init(path, LOOKUP_FOLLOW, &nd))
++ return 0;
++
++ if ((r = path_walk(path, &nd)))
++ goto out;
++
++ inode = nd.dentry->d_inode;
++ if (!inode) {
++ r = -ENOENT;
++ goto out;
++ }
++
++ if (!S_ISBLK(inode->i_mode)) {
++ r = -ENOTBLK;
++ goto out;
++ }
++
++ *dev = inode->i_rdev;
++
++ out:
++ path_release(&nd);
++ return r;
++}
++
++/*
++ * See if we've already got a device in the list.
++ */
++static struct dm_dev *find_device(struct list_head *l, kdev_t dev)
++{
++ struct list_head *tmp;
++
++ list_for_each(tmp, l) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ if (kdev_same(dd->dev, dev))
++ return dd;
++ }
++
++ return NULL;
++}
++
++/*
++ * Open a device so we can use it as a map destination.
++ */
++static int open_dev(struct dm_dev *dd)
++{
++ if (dd->bdev)
++ BUG();
++
++ dd->bdev = bdget(kdev_t_to_nr(dd->dev));
++ if (!dd->bdev)
++ return -ENOMEM;
++
++ return blkdev_get(dd->bdev, dd->mode, 0, BDEV_RAW);
++}
++
++/*
++ * Close a device that we've been using.
++ */
++static void close_dev(struct dm_dev *dd)
++{
++ if (!dd->bdev)
++ return;
++
++ blkdev_put(dd->bdev, BDEV_RAW);
++ dd->bdev = NULL;
++}
++
++/*
++ * If possible (ie. blk_size[major] is set), this checks an area
++ * of a destination device is valid.
++ */
++static int check_device_area(kdev_t dev, sector_t start, sector_t len)
++{
++ int *sizes;
++ sector_t dev_size;
++
++ if (!(sizes = blk_size[major(dev)]) || !(dev_size = sizes[minor(dev)]))
++ /* we don't know the device details,
++ * so give the benefit of the doubt */
++ return 1;
++
++ /* convert to 512-byte sectors */
++ dev_size <<= 1;
++
++ return ((start < dev_size) && (len <= (dev_size - start)));
++}
++
++/*
++ * This upgrades the mode on an already open dm_dev. Being
++ * careful to leave things as they were if we fail to reopen the
++ * device.
++ */
++static int upgrade_mode(struct dm_dev *dd, int new_mode)
++{
++ int r;
++ struct dm_dev dd_copy;
++
++ memcpy(&dd_copy, dd, sizeof(dd_copy));
++
++ dd->mode |= new_mode;
++ dd->bdev = NULL;
++ r = open_dev(dd);
++ if (!r)
++ close_dev(&dd_copy);
++ else
++ memcpy(dd, &dd_copy, sizeof(dd_copy));
++
++ return r;
++}
++
++/*
++ * Add a device to the list, or just increment the usage count if
++ * it's already present.
++ */
++int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
++ sector_t len, int mode, struct dm_dev **result)
++{
++ int r;
++ kdev_t dev;
++ struct dm_dev *dd;
++ unsigned major, minor;
++ struct dm_table *t = ti->table;
++
++ if (!t)
++ BUG();
++
++ if (sscanf(path, "%u:%u", &major, &minor) == 2) {
++ /* Extract the major/minor numbers */
++ dev = mk_kdev(major, minor);
++ } else {
++ /* convert the path to a device */
++ if ((r = lookup_device(path, &dev)))
++ return r;
++ }
++
++ dd = find_device(&t->devices, dev);
++ if (!dd) {
++ dd = kmalloc(sizeof(*dd), GFP_KERNEL);
++ if (!dd)
++ return -ENOMEM;
++
++ dd->dev = dev;
++ dd->mode = mode;
++ dd->bdev = NULL;
++
++ if ((r = open_dev(dd))) {
++ kfree(dd);
++ return r;
++ }
++
++ atomic_set(&dd->count, 0);
++ list_add(&dd->list, &t->devices);
++
++ } else if (dd->mode != (mode | dd->mode)) {
++ r = upgrade_mode(dd, mode);
++ if (r)
++ return r;
++ }
++ atomic_inc(&dd->count);
++
++ if (!check_device_area(dd->dev, start, len)) {
++ DMWARN("device %s too small for target", path);
++ dm_put_device(ti, dd);
++ return -EINVAL;
++ }
++
++ *result = dd;
++
++ return 0;
++}
++
++/*
++ * Decrement a devices use count and remove it if neccessary.
++ */
++void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
++{
++ if (atomic_dec_and_test(&dd->count)) {
++ close_dev(dd);
++ list_del(&dd->list);
++ kfree(dd);
++ }
++}
++
++/*
++ * Checks to see if the target joins onto the end of the table.
++ */
++static int adjoin(struct dm_table *table, struct dm_target *ti)
++{
++ struct dm_target *prev;
++
++ if (!table->num_targets)
++ return !ti->begin;
++
++ prev = &table->targets[table->num_targets - 1];
++ return (ti->begin == (prev->begin + prev->len));
++}
++
++/*
++ * Used to dynamically allocate the arg array.
++ */
++static char **realloc_argv(unsigned *array_size, char **old_argv)
++{
++ char **argv;
++ unsigned new_size;
++
++ new_size = *array_size ? *array_size * 2 : 64;
++ argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
++ if (argv) {
++ memcpy(argv, old_argv, *array_size * sizeof(*argv));
++ *array_size = new_size;
++ }
++
++ kfree(old_argv);
++ return argv;
++}
++
++/*
++ * Destructively splits up the argument list to pass to ctr.
++ */
++static int split_args(int *argc, char ***argvp, char *input)
++{
++ char *start, *end = input, *out, **argv = NULL;
++ unsigned array_size = 0;
++
++ *argc = 0;
++ argv = realloc_argv(&array_size, argv);
++ if (!argv)
++ return -ENOMEM;
++
++ while (1) {
++ start = end;
++
++ /* Skip whitespace */
++ while (*start && isspace(*start))
++ start++;
++
++ if (!*start)
++ break; /* success, we hit the end */
++
++ /* 'out' is used to remove any back-quotes */
++ end = out = start;
++ while (*end) {
++ /* Everything apart from '\0' can be quoted */
++ if (*end == '\\' && *(end + 1)) {
++ *out++ = *(end + 1);
++ end += 2;
++ continue;
++ }
++
++ if (isspace(*end))
++ break; /* end of token */
++
++ *out++ = *end++;
++ }
++
++ /* have we already filled the array ? */
++ if ((*argc + 1) > array_size) {
++ argv = realloc_argv(&array_size, argv);
++ if (!argv)
++ return -ENOMEM;
++ }
++
++ /* we know this is whitespace */
++ if (*end)
++ end++;
++
++ /* terminate the string and put it in the array */
++ *out = '\0';
++ argv[*argc] = start;
++ (*argc)++;
++ }
++
++ *argvp = argv;
++ return 0;
++}
++
++int dm_table_add_target(struct dm_table *t, const char *type,
++ sector_t start, sector_t len, char *params)
++{
++ int r = -EINVAL, argc;
++ char **argv;
++ struct dm_target *tgt;
++
++ if (t->num_targets >= t->num_allocated)
++ return -ENOMEM;
++
++ tgt = t->targets + t->num_targets;
++ memset(tgt, 0, sizeof(*tgt));
++
++ tgt->type = dm_get_target_type(type);
++ if (!tgt->type) {
++ tgt->error = "unknown target type";
++ return -EINVAL;
++ }
++
++ tgt->table = t;
++ tgt->begin = start;
++ tgt->len = len;
++ tgt->error = "Unknown error";
++
++ /*
++ * Does this target adjoin the previous one ?
++ */
++ if (!adjoin(t, tgt)) {
++ tgt->error = "Gap in table";
++ r = -EINVAL;
++ goto bad;
++ }
++
++ r = split_args(&argc, &argv, params);
++ if (r) {
++ tgt->error = "couldn't split parameters (insufficient memory)";
++ goto bad;
++ }
++
++ r = tgt->type->ctr(tgt, argc, argv);
++ kfree(argv);
++ if (r)
++ goto bad;
++
++ t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
++ return 0;
++
++ bad:
++ printk(KERN_ERR DM_NAME ": %s\n", tgt->error);
++ dm_put_target_type(tgt->type);
++ return r;
++}
++
++static int setup_indexes(struct dm_table *t)
++{
++ int i;
++ unsigned int total = 0;
++ sector_t *indexes;
++
++ /* allocate the space for *all* the indexes */
++ for (i = t->depth - 2; i >= 0; i--) {
++ t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
++ total += t->counts[i];
++ }
++
++ indexes = (sector_t *) vcalloc(total, (unsigned long) NODE_SIZE);
++ if (!indexes)
++ return -ENOMEM;
++
++ /* set up internal nodes, bottom-up */
++ for (i = t->depth - 2, total = 0; i >= 0; i--) {
++ t->index[i] = indexes;
++ indexes += (KEYS_PER_NODE * t->counts[i]);
++ setup_btree_index(i, t);
++ }
++
++ return 0;
++}
++
++/*
++ * Builds the btree to index the map.
++ */
++int dm_table_complete(struct dm_table *t)
++{
++ int r = 0;
++ unsigned int leaf_nodes;
++
++ /* how many indexes will the btree have ? */
++ leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
++ t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
++
++ /* leaf layer has already been set up */
++ t->counts[t->depth - 1] = leaf_nodes;
++ t->index[t->depth - 1] = t->highs;
++
++ if (t->depth >= 2)
++ r = setup_indexes(t);
++
++ return r;
++}
++
++static spinlock_t _event_lock = SPIN_LOCK_UNLOCKED;
++void dm_table_event_callback(struct dm_table *t,
++ void (*fn)(void *), void *context)
++{
++ spin_lock_irq(&_event_lock);
++ t->event_fn = fn;
++ t->event_context = context;
++ spin_unlock_irq(&_event_lock);
++}
++
++void dm_table_event(struct dm_table *t)
++{
++ spin_lock(&_event_lock);
++ if (t->event_fn)
++ t->event_fn(t->event_context);
++ spin_unlock(&_event_lock);
++}
++
++sector_t dm_table_get_size(struct dm_table *t)
++{
++ return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
++}
++
++struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
++{
++ if (index > t->num_targets)
++ return NULL;
++
++ return t->targets + index;
++}
++
++/*
++ * Search the btree for the correct target.
++ */
++struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
++{
++ unsigned int l, n = 0, k = 0;
++ sector_t *node;
++
++ for (l = 0; l < t->depth; l++) {
++ n = get_child(n, k);
++ node = get_node(t, l, n);
++
++ for (k = 0; k < KEYS_PER_NODE; k++)
++ if (node[k] >= sector)
++ break;
++ }
++
++ return &t->targets[(KEYS_PER_NODE * n) + k];
++}
++
++unsigned int dm_table_get_num_targets(struct dm_table *t)
++{
++ return t->num_targets;
++}
++
++struct list_head *dm_table_get_devices(struct dm_table *t)
++{
++ return &t->devices;
++}
++
++int dm_table_get_mode(struct dm_table *t)
++{
++ return t->mode;
++}
++
++void dm_table_suspend_targets(struct dm_table *t)
++{
++ int i;
++
++ for (i = 0; i < t->num_targets; i++) {
++ struct dm_target *ti = t->targets + i;
++
++ if (ti->type->suspend)
++ ti->type->suspend(ti);
++ }
++}
++
++void dm_table_resume_targets(struct dm_table *t)
++{
++ int i;
++
++ for (i = 0; i < t->num_targets; i++) {
++ struct dm_target *ti = t->targets + i;
++
++ if (ti->type->resume)
++ ti->type->resume(ti);
++ }
++}
++
++EXPORT_SYMBOL(dm_get_device);
++EXPORT_SYMBOL(dm_put_device);
++EXPORT_SYMBOL(dm_table_event);
++EXPORT_SYMBOL(dm_table_get_mode);
+diff -urN linux-2.4.22/drivers/md/dm-target.c linux-2.4.22-dm/drivers/md/dm-target.c
+--- linux-2.4.22/drivers/md/dm-target.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm-target.c 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,188 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++
++#include <linux/module.h>
++#include <linux/kmod.h>
++#include <linux/slab.h>
++
++struct tt_internal {
++ struct target_type tt;
++
++ struct list_head list;
++ long use;
++};
++
++static LIST_HEAD(_targets);
++static DECLARE_RWSEM(_lock);
++
++#define DM_MOD_NAME_SIZE 32
++
++static inline struct tt_internal *__find_target_type(const char *name)
++{
++ struct list_head *tih;
++ struct tt_internal *ti;
++
++ list_for_each(tih, &_targets) {
++ ti = list_entry(tih, struct tt_internal, list);
++
++ if (!strcmp(name, ti->tt.name))
++ return ti;
++ }
++
++ return NULL;
++}
++
++static struct tt_internal *get_target_type(const char *name)
++{
++ struct tt_internal *ti;
++
++ down_read(&_lock);
++ ti = __find_target_type(name);
++
++ if (ti) {
++ if (ti->use == 0 && ti->tt.module)
++ __MOD_INC_USE_COUNT(ti->tt.module);
++ ti->use++;
++ }
++ up_read(&_lock);
++
++ return ti;
++}
++
++static void load_module(const char *name)
++{
++ char module_name[DM_MOD_NAME_SIZE] = "dm-";
++
++ /* Length check for strcat() below */
++ if (strlen(name) > (DM_MOD_NAME_SIZE - 4))
++ return;
++
++ strcat(module_name, name);
++ request_module(module_name);
++}
++
++struct target_type *dm_get_target_type(const char *name)
++{
++ struct tt_internal *ti = get_target_type(name);
++
++ if (!ti) {
++ load_module(name);
++ ti = get_target_type(name);
++ }
++
++ return ti ? &ti->tt : NULL;
++}
++
++void dm_put_target_type(struct target_type *t)
++{
++ struct tt_internal *ti = (struct tt_internal *) t;
++
++ down_read(&_lock);
++ if (--ti->use == 0 && ti->tt.module)
++ __MOD_DEC_USE_COUNT(ti->tt.module);
++
++ if (ti->use < 0)
++ BUG();
++ up_read(&_lock);
++
++ return;
++}
++
++static struct tt_internal *alloc_target(struct target_type *t)
++{
++ struct tt_internal *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
++
++ if (ti) {
++ memset(ti, 0, sizeof(*ti));
++ ti->tt = *t;
++ }
++
++ return ti;
++}
++
++int dm_register_target(struct target_type *t)
++{
++ int rv = 0;
++ struct tt_internal *ti = alloc_target(t);
++
++ if (!ti)
++ return -ENOMEM;
++
++ down_write(&_lock);
++ if (__find_target_type(t->name)) {
++ kfree(ti);
++ rv = -EEXIST;
++ } else
++ list_add(&ti->list, &_targets);
++
++ up_write(&_lock);
++ return rv;
++}
++
++int dm_unregister_target(struct target_type *t)
++{
++ struct tt_internal *ti;
++
++ down_write(&_lock);
++ if (!(ti = __find_target_type(t->name))) {
++ up_write(&_lock);
++ return -EINVAL;
++ }
++
++ if (ti->use) {
++ up_write(&_lock);
++ return -ETXTBSY;
++ }
++
++ list_del(&ti->list);
++ kfree(ti);
++
++ up_write(&_lock);
++ return 0;
++}
++
++/*
++ * io-err: always fails an io, useful for bringing
++ * up LVs that have holes in them.
++ */
++static int io_err_ctr(struct dm_target *ti, unsigned int argc, char **args)
++{
++ return 0;
++}
++
++static void io_err_dtr(struct dm_target *ti)
++{
++ /* empty */
++}
++
++static int io_err_map(struct dm_target *ti, struct buffer_head *bh, int rw,
++ union map_info *map_context)
++{
++ return -EIO;
++}
++
++static struct target_type error_target = {
++ .name = "error",
++ .ctr = io_err_ctr,
++ .dtr = io_err_dtr,
++ .map = io_err_map,
++};
++
++int dm_target_init(void)
++{
++ return dm_register_target(&error_target);
++}
++
++void dm_target_exit(void)
++{
++ if (dm_unregister_target(&error_target))
++ DMWARN("error target unregistration failed");
++}
++
++EXPORT_SYMBOL(dm_register_target);
++EXPORT_SYMBOL(dm_unregister_target);
+diff -urN linux-2.4.22/drivers/md/dm.c linux-2.4.22-dm/drivers/md/dm.c
+--- linux-2.4.22/drivers/md/dm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm.c 2003-09-15 17:02:30.000000000 +0200
+@@ -0,0 +1,1127 @@
++/*
++ * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++#include "kcopyd.h"
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/blk.h>
++#include <linux/blkpg.h>
++#include <linux/mempool.h>
++#include <linux/slab.h>
++#include <linux/major.h>
++#include <linux/kdev_t.h>
++#include <linux/lvm.h>
++
++#include <asm/uaccess.h>
++
++static const char *_name = DM_NAME;
++#define DEFAULT_READ_AHEAD 64
++
++struct dm_io {
++ struct mapped_device *md;
++
++ struct dm_target *ti;
++ int rw;
++ union map_info map_context;
++ void (*end_io) (struct buffer_head * bh, int uptodate);
++ void *context;
++};
++
++struct deferred_io {
++ int rw;
++ struct buffer_head *bh;
++ struct deferred_io *next;
++};
++
++/*
++ * Bits for the md->flags field.
++ */
++#define DMF_BLOCK_IO 0
++#define DMF_SUSPENDED 1
++
++struct mapped_device {
++ struct rw_semaphore lock;
++ atomic_t holders;
++
++ kdev_t dev;
++ unsigned long flags;
++
++ /*
++ * A list of ios that arrived while we were suspended.
++ */
++ atomic_t pending;
++ wait_queue_head_t wait;
++ struct deferred_io *deferred;
++
++ /*
++ * The current mapping.
++ */
++ struct dm_table *map;
++
++ /*
++ * io objects are allocated from here.
++ */
++ mempool_t *io_pool;
++
++ /*
++ * Event handling.
++ */
++ uint32_t event_nr;
++ wait_queue_head_t eventq;
++};
++
++#define MIN_IOS 256
++static kmem_cache_t *_io_cache;
++
++static struct mapped_device *get_kdev(kdev_t dev);
++static int dm_request(request_queue_t *q, int rw, struct buffer_head *bh);
++static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb);
++
++/*-----------------------------------------------------------------
++ * In order to avoid the 256 minor number limit we are going to
++ * register more major numbers as neccessary.
++ *---------------------------------------------------------------*/
++#define MAX_MINORS (1 << MINORBITS)
++
++struct major_details {
++ unsigned int major;
++
++ int transient;
++ struct list_head transient_list;
++
++ unsigned int first_free_minor;
++ int nr_free_minors;
++
++ struct mapped_device *mds[MAX_MINORS];
++ int blk_size[MAX_MINORS];
++ int blksize_size[MAX_MINORS];
++ int hardsect_size[MAX_MINORS];
++};
++
++static struct rw_semaphore _dev_lock;
++static struct major_details *_majors[MAX_BLKDEV];
++
++/*
++ * This holds a list of majors that non-specified device numbers
++ * may be allocated from. Only majors with free minors appear on
++ * this list.
++ */
++static LIST_HEAD(_transients_free);
++
++static int __alloc_major(unsigned int major, struct major_details **result)
++{
++ int r;
++ unsigned int transient = !major;
++ struct major_details *maj;
++
++ /* Major already allocated? */
++ if (major && _majors[major])
++ return 0;
++
++ maj = kmalloc(sizeof(*maj), GFP_KERNEL);
++ if (!maj)
++ return -ENOMEM;
++
++ memset(maj, 0, sizeof(*maj));
++ INIT_LIST_HEAD(&maj->transient_list);
++
++ maj->nr_free_minors = MAX_MINORS;
++
++ r = register_blkdev(major, _name, &dm_blk_dops);
++ if (r < 0) {
++ DMERR("register_blkdev failed for %d", major);
++ kfree(maj);
++ return r;
++ }
++ if (r > 0)
++ major = r;
++
++ maj->major = major;
++
++ if (transient) {
++ maj->transient = transient;
++ list_add_tail(&maj->transient_list, &_transients_free);
++ }
++
++ _majors[major] = maj;
++
++ blk_size[major] = maj->blk_size;
++ blksize_size[major] = maj->blksize_size;
++ hardsect_size[major] = maj->hardsect_size;
++ read_ahead[major] = DEFAULT_READ_AHEAD;
++
++ blk_queue_make_request(BLK_DEFAULT_QUEUE(major), dm_request);
++
++ *result = maj;
++ return 0;
++}
++
++static void __free_major(struct major_details *maj)
++{
++ unsigned int major = maj->major;
++
++ list_del(&maj->transient_list);
++
++ read_ahead[major] = 0;
++ blk_size[major] = NULL;
++ blksize_size[major] = NULL;
++ hardsect_size[major] = NULL;
++
++ _majors[major] = NULL;
++ kfree(maj);
++
++ if (unregister_blkdev(major, _name) < 0)
++ DMERR("devfs_unregister_blkdev failed");
++}
++
++static void free_all_majors(void)
++{
++ unsigned int major = ARRAY_SIZE(_majors);
++
++ down_write(&_dev_lock);
++
++ while (major--)
++ if (_majors[major])
++ __free_major(_majors[major]);
++
++ up_write(&_dev_lock);
++}
++
++static void free_dev(kdev_t dev)
++{
++ unsigned int major = major(dev);
++ unsigned int minor = minor(dev);
++ struct major_details *maj;
++
++ down_write(&_dev_lock);
++
++ maj = _majors[major];
++ if (!maj)
++ goto out;
++
++ maj->mds[minor] = NULL;
++ maj->nr_free_minors++;
++
++ if (maj->nr_free_minors == MAX_MINORS) {
++ __free_major(maj);
++ goto out;
++ }
++
++ if (!maj->transient)
++ goto out;
++
++ if (maj->nr_free_minors == 1)
++ list_add_tail(&maj->transient_list, &_transients_free);
++
++ if (minor < maj->first_free_minor)
++ maj->first_free_minor = minor;
++
++ out:
++ up_write(&_dev_lock);
++}
++
++static void __alloc_minor(struct major_details *maj, unsigned int minor,
++ struct mapped_device *md)
++{
++ maj->mds[minor] = md;
++ md->dev = mk_kdev(maj->major, minor);
++ maj->nr_free_minors--;
++
++ if (maj->transient && !maj->nr_free_minors)
++ list_del_init(&maj->transient_list);
++}
++
++/*
++ * See if requested kdev_t is available.
++ */
++static int specific_dev(kdev_t dev, struct mapped_device *md)
++{
++ int r = 0;
++ unsigned int major = major(dev);
++ unsigned int minor = minor(dev);
++ struct major_details *maj;
++
++ if (!major || (major > MAX_BLKDEV) || (minor >= MAX_MINORS)) {
++ DMWARN("device number requested out of range (%d, %d)",
++ major, minor);
++ return -EINVAL;
++ }
++
++ down_write(&_dev_lock);
++ maj = _majors[major];
++
++ /* Register requested major? */
++ if (!maj) {
++ r = __alloc_major(major, &maj);
++ if (r)
++ goto out;
++
++ major = maj->major;
++ }
++
++ if (maj->mds[minor]) {
++ r = -EBUSY;
++ goto out;
++ }
++
++ __alloc_minor(maj, minor, md);
++
++ out:
++ up_write(&_dev_lock);
++
++ return r;
++}
++
++/*
++ * Find first unused device number, requesting a new major number if required.
++ */
++static int first_free_dev(struct mapped_device *md)
++{
++ int r = 0;
++ struct major_details *maj;
++
++ down_write(&_dev_lock);
++
++ if (list_empty(&_transients_free)) {
++ r = __alloc_major(0, &maj);
++ if (r)
++ goto out;
++ } else
++ maj = list_entry(_transients_free.next, struct major_details,
++ transient_list);
++
++ while (maj->mds[maj->first_free_minor++])
++ ;
++
++ __alloc_minor(maj, maj->first_free_minor - 1, md);
++
++ out:
++ up_write(&_dev_lock);
++
++ return r;
++}
++
++static struct mapped_device *get_kdev(kdev_t dev)
++{
++ struct mapped_device *md;
++ struct major_details *maj;
++
++ down_read(&_dev_lock);
++ maj = _majors[major(dev)];
++ if (!maj) {
++ md = NULL;
++ goto out;
++ }
++ md = maj->mds[minor(dev)];
++ if (md)
++ dm_get(md);
++ out:
++ up_read(&_dev_lock);
++
++ return md;
++}
++
++/*-----------------------------------------------------------------
++ * init/exit code
++ *---------------------------------------------------------------*/
++
++static __init int local_init(void)
++{
++ init_rwsem(&_dev_lock);
++
++ /* allocate a slab for the dm_ios */
++ _io_cache = kmem_cache_create("dm io",
++ sizeof(struct dm_io), 0, 0, NULL, NULL);
++
++ if (!_io_cache)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static void local_exit(void)
++{
++ kmem_cache_destroy(_io_cache);
++ free_all_majors();
++
++ DMINFO("cleaned up");
++}
++
++/*
++ * We have a lot of init/exit functions, so it seems easier to
++ * store them in an array. The disposable macro 'xx'
++ * expands a prefix into a pair of function names.
++ */
++static struct {
++ int (*init) (void);
++ void (*exit) (void);
++
++} _inits[] = {
++#define xx(n) {n ## _init, n ## _exit},
++ xx(local)
++ xx(kcopyd)
++ xx(dm_target)
++ xx(dm_linear)
++ xx(dm_stripe)
++ xx(dm_snapshot)
++ xx(dm_interface)
++#undef xx
++};
++
++static int __init dm_init(void)
++{
++ const int count = ARRAY_SIZE(_inits);
++
++ int r, i;
++
++ for (i = 0; i < count; i++) {
++ r = _inits[i].init();
++ if (r)
++ goto bad;
++ }
++
++ return 0;
++
++ bad:
++ while (i--)
++ _inits[i].exit();
++
++ return r;
++}
++
++static void __exit dm_exit(void)
++{
++ int i = ARRAY_SIZE(_inits);
++
++ while (i--)
++ _inits[i].exit();
++}
++
++/*
++ * Block device functions
++ */
++static int dm_blk_open(struct inode *inode, struct file *file)
++{
++ struct mapped_device *md;
++
++ md = get_kdev(inode->i_rdev);
++ if (!md)
++ return -ENXIO;
++
++ return 0;
++}
++
++static int dm_blk_close(struct inode *inode, struct file *file)
++{
++ struct mapped_device *md;
++
++ md = get_kdev(inode->i_rdev);
++ dm_put(md); /* put the reference gained by dm_blk_open */
++ dm_put(md);
++ return 0;
++}
++
++static inline struct dm_io *alloc_io(struct mapped_device *md)
++{
++ return mempool_alloc(md->io_pool, GFP_NOIO);
++}
++
++static inline void free_io(struct mapped_device *md, struct dm_io *io)
++{
++ mempool_free(io, md->io_pool);
++}
++
++static inline struct deferred_io *alloc_deferred(void)
++{
++ return kmalloc(sizeof(struct deferred_io), GFP_NOIO);
++}
++
++static inline void free_deferred(struct deferred_io *di)
++{
++ kfree(di);
++}
++
++static inline sector_t volume_size(kdev_t dev)
++{
++ return blk_size[major(dev)][minor(dev)] << 1;
++}
++
++/* FIXME: check this */
++static int dm_blk_ioctl(struct inode *inode, struct file *file,
++ unsigned int command, unsigned long a)
++{
++ kdev_t dev = inode->i_rdev;
++ long size;
++
++ switch (command) {
++ case BLKROSET:
++ case BLKROGET:
++ case BLKRASET:
++ case BLKRAGET:
++ case BLKFLSBUF:
++ case BLKSSZGET:
++ //case BLKRRPART: /* Re-read partition tables */
++ //case BLKPG:
++ case BLKELVGET:
++ case BLKELVSET:
++ case BLKBSZGET:
++ case BLKBSZSET:
++ return blk_ioctl(dev, command, a);
++ break;
++
++ case BLKGETSIZE:
++ size = volume_size(dev);
++ if (copy_to_user((void *) a, &size, sizeof(long)))
++ return -EFAULT;
++ break;
++
++ case BLKGETSIZE64:
++ size = volume_size(dev);
++ if (put_user((u64) ((u64) size) << 9, (u64 *) a))
++ return -EFAULT;
++ break;
++
++ case BLKRRPART:
++ return -ENOTTY;
++
++ case LV_BMAP:
++ return dm_user_bmap(inode, (struct lv_bmap *) a);
++
++ default:
++ DMWARN("unknown block ioctl 0x%x", command);
++ return -ENOTTY;
++ }
++
++ return 0;
++}
++
++/*
++ * Add the buffer to the list of deferred io.
++ */
++static int queue_io(struct mapped_device *md, struct buffer_head *bh, int rw)
++{
++ struct deferred_io *di;
++
++ di = alloc_deferred();
++ if (!di)
++ return -ENOMEM;
++
++ down_write(&md->lock);
++
++ if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
++ up_write(&md->lock);
++ free_deferred(di);
++ return 1;
++ }
++
++ di->bh = bh;
++ di->rw = rw;
++ di->next = md->deferred;
++ md->deferred = di;
++
++ up_write(&md->lock);
++ return 0; /* deferred successfully */
++}
++
++/*
++ * bh->b_end_io routine that decrements the pending count
++ * and then calls the original bh->b_end_io fn.
++ */
++static void dec_pending(struct buffer_head *bh, int uptodate)
++{
++ int r;
++ struct dm_io *io = bh->b_private;
++ dm_endio_fn endio = io->ti->type->end_io;
++
++ if (endio) {
++ r = endio(io->ti, bh, io->rw, uptodate ? 0 : -EIO,
++ &io->map_context);
++ if (r < 0)
++ uptodate = 0;
++
++ else if (r > 0)
++ /* the target wants another shot at the io */
++ return;
++ }
++
++ if (atomic_dec_and_test(&io->md->pending))
++ /* nudge anyone waiting on suspend queue */
++ wake_up(&io->md->wait);
++
++ bh->b_end_io = io->end_io;
++ bh->b_private = io->context;
++ free_io(io->md, io);
++
++ bh->b_end_io(bh, uptodate);
++}
++
++/*
++ * Do the bh mapping for a given leaf
++ */
++static inline int __map_buffer(struct mapped_device *md, int rw,
++ struct buffer_head *bh, struct dm_io *io)
++{
++ struct dm_target *ti;
++
++ if (!md->map)
++ return -EINVAL;
++
++ ti = dm_table_find_target(md->map, bh->b_rsector);
++ if (!ti->type)
++ return -EINVAL;
++
++ /* hook the end io request fn */
++ atomic_inc(&md->pending);
++ io->md = md;
++ io->ti = ti;
++ io->rw = rw;
++ io->end_io = bh->b_end_io;
++ io->context = bh->b_private;
++ bh->b_end_io = dec_pending;
++ bh->b_private = io;
++
++ return ti->type->map(ti, bh, rw, &io->map_context);
++}
++
++/*
++ * Checks to see if we should be deferring io, if so it queues it
++ * and returns 1.
++ */
++static inline int __deferring(struct mapped_device *md, int rw,
++ struct buffer_head *bh)
++{
++ int r;
++
++ /*
++ * If we're suspended we have to queue this io for later.
++ */
++ while (test_bit(DMF_BLOCK_IO, &md->flags)) {
++ up_read(&md->lock);
++
++ /*
++ * There's no point deferring a read ahead
++ * request, just drop it.
++ */
++ if (rw == READA) {
++ down_read(&md->lock);
++ return -EIO;
++ }
++
++ r = queue_io(md, bh, rw);
++ down_read(&md->lock);
++
++ if (r < 0)
++ return r;
++
++ if (r == 0)
++ return 1; /* deferred successfully */
++
++ }
++
++ return 0;
++}
++
++static int dm_request(request_queue_t *q, int rw, struct buffer_head *bh)
++{
++ int r;
++ struct dm_io *io;
++ struct mapped_device *md;
++
++ md = get_kdev(bh->b_rdev);
++ if (!md) {
++ buffer_IO_error(bh);
++ return 0;
++ }
++
++ io = alloc_io(md);
++ down_read(&md->lock);
++
++ r = __deferring(md, rw, bh);
++ if (r < 0)
++ goto bad;
++
++ else if (!r) {
++ /* not deferring */
++ r = __map_buffer(md, rw, bh, io);
++ if (r < 0)
++ goto bad;
++ } else
++ r = 0;
++
++ up_read(&md->lock);
++ dm_put(md);
++ return r;
++
++ bad:
++ buffer_IO_error(bh);
++ up_read(&md->lock);
++ dm_put(md);
++ return 0;
++}
++
++static int check_dev_size(kdev_t dev, unsigned long block)
++{
++ unsigned int major = major(dev);
++ unsigned int minor = minor(dev);
++
++ /* FIXME: check this */
++ unsigned long max_sector = (blk_size[major][minor] << 1) + 1;
++ unsigned long sector = (block + 1) * (blksize_size[major][minor] >> 9);
++
++ return (sector > max_sector) ? 0 : 1;
++}
++
++/*
++ * Creates a dummy buffer head and maps it (for lilo).
++ */
++static int __bmap(struct mapped_device *md, kdev_t dev, unsigned long block,
++ kdev_t *r_dev, unsigned long *r_block)
++{
++ struct buffer_head bh;
++ struct dm_target *ti;
++ union map_info map_context;
++ int r;
++
++ if (test_bit(DMF_BLOCK_IO, &md->flags)) {
++ return -EPERM;
++ }
++
++ if (!check_dev_size(dev, block)) {
++ return -EINVAL;
++ }
++
++ if (!md->map)
++ return -EINVAL;
++
++ /* setup dummy bh */
++ memset(&bh, 0, sizeof(bh));
++ bh.b_blocknr = block;
++ bh.b_dev = bh.b_rdev = dev;
++ bh.b_size = blksize_size[major(dev)][minor(dev)];
++ bh.b_rsector = block * (bh.b_size >> 9);
++
++ /* find target */
++ ti = dm_table_find_target(md->map, bh.b_rsector);
++
++ /* do the mapping */
++ r = ti->type->map(ti, &bh, READ, &map_context);
++ ti->type->end_io(ti, &bh, READ, 0, &map_context);
++
++ if (!r) {
++ *r_dev = bh.b_rdev;
++ *r_block = bh.b_rsector / (bh.b_size >> 9);
++ }
++
++ return r;
++}
++
++/*
++ * Marshals arguments and results between user and kernel space.
++ */
++static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb)
++{
++ struct mapped_device *md;
++ unsigned long block, r_block;
++ kdev_t r_dev;
++ int r;
++
++ if (get_user(block, &lvb->lv_block))
++ return -EFAULT;
++
++ md = get_kdev(inode->i_rdev);
++ if (!md)
++ return -ENXIO;
++
++ down_read(&md->lock);
++ r = __bmap(md, inode->i_rdev, block, &r_dev, &r_block);
++ up_read(&md->lock);
++ dm_put(md);
++
++ if (!r && (put_user(kdev_t_to_nr(r_dev), &lvb->lv_dev) ||
++ put_user(r_block, &lvb->lv_block)))
++ r = -EFAULT;
++
++ return r;
++}
++
++static void free_md(struct mapped_device *md)
++{
++ free_dev(md->dev);
++ mempool_destroy(md->io_pool);
++ kfree(md);
++}
++
++/*
++ * Allocate and initialise a blank device with a given minor.
++ */
++static struct mapped_device *alloc_md(kdev_t dev)
++{
++ int r;
++ struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
++
++ if (!md) {
++ DMWARN("unable to allocate device, out of memory.");
++ return NULL;
++ }
++
++ memset(md, 0, sizeof(*md));
++
++ /* Allocate suitable device number */
++ if (!dev)
++ r = first_free_dev(md);
++ else
++ r = specific_dev(dev, md);
++
++ if (r) {
++ kfree(md);
++ return NULL;
++ }
++
++ md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
++ mempool_free_slab, _io_cache);
++ if (!md->io_pool) {
++ free_md(md);
++ kfree(md);
++ return NULL;
++ }
++
++ init_rwsem(&md->lock);
++ atomic_set(&md->holders, 1);
++ atomic_set(&md->pending, 0);
++ init_waitqueue_head(&md->wait);
++ init_waitqueue_head(&md->eventq);
++
++ return md;
++}
++
++/*
++ * The hardsect size for a mapped device is the largest hardsect size
++ * from the devices it maps onto.
++ */
++static int __find_hardsect_size(struct list_head *devices)
++{
++ int result = 512, size;
++ struct list_head *tmp;
++
++ list_for_each (tmp, devices) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ size = get_hardsect_size(dd->dev);
++ if (size > result)
++ result = size;
++ }
++
++ return result;
++}
++
++/*
++ * Bind a table to the device.
++ */
++static void event_callback(void *context)
++{
++ struct mapped_device *md = (struct mapped_device *) context;
++
++ down_write(&md->lock);
++ md->event_nr++;
++ wake_up_interruptible(&md->eventq);
++ up_write(&md->lock);
++}
++
++static int __bind(struct mapped_device *md, struct dm_table *t)
++{
++ unsigned int minor = minor(md->dev);
++ unsigned int major = major(md->dev);
++ md->map = t;
++
++ /* in k */
++ blk_size[major][minor] = dm_table_get_size(t) >> 1;
++ blksize_size[major][minor] = BLOCK_SIZE;
++ hardsect_size[major][minor] =
++ __find_hardsect_size(dm_table_get_devices(t));
++ register_disk(NULL, md->dev, 1, &dm_blk_dops, blk_size[major][minor]);
++
++ dm_table_event_callback(md->map, event_callback, md);
++ dm_table_get(t);
++ return 0;
++}
++
++static void __unbind(struct mapped_device *md)
++{
++ unsigned int minor = minor(md->dev);
++ unsigned int major = major(md->dev);
++
++ if (md->map) {
++ dm_table_event_callback(md->map, NULL, NULL);
++ dm_table_put(md->map);
++ md->map = NULL;
++
++ }
++
++ blk_size[major][minor] = 0;
++ blksize_size[major][minor] = 0;
++ hardsect_size[major][minor] = 0;
++}
++
++/*
++ * Constructor for a new device.
++ */
++int dm_create(kdev_t dev, struct mapped_device **result)
++{
++ struct mapped_device *md;
++
++ md = alloc_md(dev);
++ if (!md)
++ return -ENXIO;
++
++ __unbind(md); /* Ensure zero device size */
++
++ *result = md;
++ return 0;
++}
++
++void dm_get(struct mapped_device *md)
++{
++ atomic_inc(&md->holders);
++}
++
++void dm_put(struct mapped_device *md)
++{
++ if (atomic_dec_and_test(&md->holders)) {
++ if (md->map)
++ dm_table_suspend_targets(md->map);
++ __unbind(md);
++ free_md(md);
++ }
++}
++
++/*
++ * Requeue the deferred io by calling generic_make_request.
++ */
++static void flush_deferred_io(struct deferred_io *c)
++{
++ struct deferred_io *n;
++
++ while (c) {
++ n = c->next;
++ generic_make_request(c->rw, c->bh);
++ free_deferred(c);
++ c = n;
++ }
++}
++
++/*
++ * Swap in a new table (destroying old one).
++ */
++int dm_swap_table(struct mapped_device *md, struct dm_table *table)
++{
++ int r;
++
++ down_write(&md->lock);
++
++ /*
++ * The device must be suspended, or have no table bound yet.
++ */
++ if (md->map && !test_bit(DMF_SUSPENDED, &md->flags)) {
++ up_write(&md->lock);
++ return -EPERM;
++ }
++
++ __unbind(md);
++ r = __bind(md, table);
++ if (r)
++ return r;
++
++ up_write(&md->lock);
++ return 0;
++}
++
++/*
++ * We need to be able to change a mapping table under a mounted
++ * filesystem. For example we might want to move some data in
++ * the background. Before the table can be swapped with
++ * dm_bind_table, dm_suspend must be called to flush any in
++ * flight io and ensure that any further io gets deferred.
++ */
++int dm_suspend(struct mapped_device *md)
++{
++ int r = 0;
++ DECLARE_WAITQUEUE(wait, current);
++
++ /* Flush IO to the origin device */
++ down_read(&md->lock);
++ if (test_bit(DMF_BLOCK_IO, &md->flags)) {
++ up_read(&md->lock);
++ return -EINVAL;
++ }
++
++ fsync_dev_lockfs(md->dev);
++ up_read(&md->lock);
++
++
++ /*
++ * Set the BLOCK_IO flag so no more ios will be mapped.
++ */
++ down_write(&md->lock);
++ if (test_bit(DMF_BLOCK_IO, &md->flags)) {
++ unlockfs(md->dev);
++ up_write(&md->lock);
++ return -EINVAL;
++ }
++
++ set_bit(DMF_BLOCK_IO, &md->flags);
++ add_wait_queue(&md->wait, &wait);
++ up_write(&md->lock);
++
++ /*
++ * Then we wait for the already mapped ios to
++ * complete.
++ */
++ run_task_queue(&tq_disk);
++ while (1) {
++ set_current_state(TASK_INTERRUPTIBLE);
++
++ if (!atomic_read(&md->pending) || signal_pending(current))
++ break;
++
++ schedule();
++ }
++ set_current_state(TASK_RUNNING);
++
++ down_write(&md->lock);
++ remove_wait_queue(&md->wait, &wait);
++
++ /* did we flush everything ? */
++ if (atomic_read(&md->pending)) {
++ unlockfs(md->dev);
++ clear_bit(DMF_BLOCK_IO, &md->flags);
++ r = -EINTR;
++ } else {
++ set_bit(DMF_SUSPENDED, &md->flags);
++ if (md->map)
++ dm_table_suspend_targets(md->map);
++ }
++ up_write(&md->lock);
++
++ return r;
++}
++
++int dm_resume(struct mapped_device *md)
++{
++ struct deferred_io *def;
++
++ down_write(&md->lock);
++ if (!test_bit(DMF_SUSPENDED, &md->flags)) {
++ up_write(&md->lock);
++ return -EINVAL;
++ }
++
++ if (md->map)
++ dm_table_resume_targets(md->map);
++
++ clear_bit(DMF_SUSPENDED, &md->flags);
++ clear_bit(DMF_BLOCK_IO, &md->flags);
++ def = md->deferred;
++ md->deferred = NULL;
++ up_write(&md->lock);
++
++ unlockfs(md->dev);
++ flush_deferred_io(def);
++ run_task_queue(&tq_disk);
++
++ return 0;
++}
++
++struct dm_table *dm_get_table(struct mapped_device *md)
++{
++ struct dm_table *t;
++
++ down_read(&md->lock);
++ t = md->map;
++ if (t)
++ dm_table_get(t);
++ up_read(&md->lock);
++
++ return t;
++}
++
++/*-----------------------------------------------------------------
++ * Event notification.
++ *---------------------------------------------------------------*/
++uint32_t dm_get_event_nr(struct mapped_device *md)
++{
++ uint32_t r;
++
++ down_read(&md->lock);
++ r = md->event_nr;
++ up_read(&md->lock);
++
++ return r;
++}
++
++int dm_add_wait_queue(struct mapped_device *md, wait_queue_t *wq,
++ uint32_t event_nr)
++{
++ down_write(&md->lock);
++ if (event_nr != md->event_nr) {
++ up_write(&md->lock);
++ return 1;
++ }
++
++ add_wait_queue(&md->eventq, wq);
++ up_write(&md->lock);
++
++ return 0;
++}
++
++const char *dm_kdevname(kdev_t dev)
++{
++ static char buffer[32];
++ sprintf(buffer, "%03d:%03d", MAJOR(dev), MINOR(dev));
++ return buffer;
++}
++
++void dm_remove_wait_queue(struct mapped_device *md, wait_queue_t *wq)
++{
++ down_write(&md->lock);
++ remove_wait_queue(&md->eventq, wq);
++ up_write(&md->lock);
++}
++
++kdev_t dm_kdev(struct mapped_device *md)
++{
++ kdev_t dev;
++
++ down_read(&md->lock);
++ dev = md->dev;
++ up_read(&md->lock);
++
++ return dev;
++}
++
++int dm_suspended(struct mapped_device *md)
++{
++ return test_bit(DMF_SUSPENDED, &md->flags);
++}
++
++struct block_device_operations dm_blk_dops = {
++ .open = dm_blk_open,
++ .release = dm_blk_close,
++ .ioctl = dm_blk_ioctl,
++ .owner = THIS_MODULE
++};
++
++/*
++ * module hooks
++ */
++module_init(dm_init);
++module_exit(dm_exit);
++
++MODULE_DESCRIPTION(DM_NAME " driver");
++MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
++MODULE_LICENSE("GPL");
++
++EXPORT_SYMBOL(dm_kdevname);
+diff -urN linux-2.4.22/drivers/md/dm.h linux-2.4.22-dm/drivers/md/dm.h
+--- linux-2.4.22/drivers/md/dm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/dm.h 2003-09-15 17:03:21.000000000 +0200
+@@ -0,0 +1,175 @@
++/*
++ * Internal header file for device mapper
++ *
++ * Copyright (C) 2001, 2002 Sistina Software
++ *
++ * This file is released under the LGPL.
++ */
++
++#ifndef DM_INTERNAL_H
++#define DM_INTERNAL_H
++
++#include <linux/fs.h>
++#include <linux/device-mapper.h>
++#include <linux/list.h>
++#include <linux/blkdev.h>
++
++#define DM_NAME "device-mapper"
++#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x)
++#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x)
++#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x)
++
++/*
++ * FIXME: I think this should be with the definition of sector_t
++ * in types.h.
++ */
++#ifdef CONFIG_LBD
++#define SECTOR_FORMAT "%Lu"
++#else
++#define SECTOR_FORMAT "%lu"
++#endif
++
++#define SECTOR_SHIFT 9
++#define SECTOR_SIZE (1 << SECTOR_SHIFT)
++
++extern struct block_device_operations dm_blk_dops;
++
++/*
++ * List of devices that a metadevice uses and should open/close.
++ */
++struct dm_dev {
++ struct list_head list;
++
++ atomic_t count;
++ int mode;
++ kdev_t dev;
++ struct block_device *bdev;
++};
++
++struct dm_table;
++struct mapped_device;
++
++/*-----------------------------------------------------------------
++ * Functions for manipulating a struct mapped_device.
++ * Drop the reference with dm_put when you finish with the object.
++ *---------------------------------------------------------------*/
++int dm_create(kdev_t dev, struct mapped_device **md);
++
++/*
++ * Reference counting for md.
++ */
++void dm_get(struct mapped_device *md);
++void dm_put(struct mapped_device *md);
++
++/*
++ * A device can still be used while suspended, but I/O is deferred.
++ */
++int dm_suspend(struct mapped_device *md);
++int dm_resume(struct mapped_device *md);
++
++/*
++ * The device must be suspended before calling this method.
++ */
++int dm_swap_table(struct mapped_device *md, struct dm_table *t);
++
++/*
++ * Drop a reference on the table when you've finished with the
++ * result.
++ */
++struct dm_table *dm_get_table(struct mapped_device *md);
++
++/*
++ * Event functions.
++ */
++uint32_t dm_get_event_nr(struct mapped_device *md);
++int dm_add_wait_queue(struct mapped_device *md, wait_queue_t *wq,
++ uint32_t event_nr);
++void dm_remove_wait_queue(struct mapped_device *md, wait_queue_t *wq);
++
++/*
++ * Info functions.
++ */
++kdev_t dm_kdev(struct mapped_device *md);
++int dm_suspended(struct mapped_device *md);
++
++/*-----------------------------------------------------------------
++ * Functions for manipulating a table. Tables are also reference
++ * counted.
++ *---------------------------------------------------------------*/
++int dm_table_create(struct dm_table **result, int mode, unsigned num_targets);
++
++void dm_table_get(struct dm_table *t);
++void dm_table_put(struct dm_table *t);
++
++int dm_table_add_target(struct dm_table *t, const char *type,
++ sector_t start, sector_t len, char *params);
++int dm_table_complete(struct dm_table *t);
++void dm_table_event_callback(struct dm_table *t,
++ void (*fn)(void *), void *context);
++void dm_table_event(struct dm_table *t);
++sector_t dm_table_get_size(struct dm_table *t);
++struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
++struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
++unsigned int dm_table_get_num_targets(struct dm_table *t);
++struct list_head *dm_table_get_devices(struct dm_table *t);
++int dm_table_get_mode(struct dm_table *t);
++void dm_table_suspend_targets(struct dm_table *t);
++void dm_table_resume_targets(struct dm_table *t);
++
++/*-----------------------------------------------------------------
++ * A registry of target types.
++ *---------------------------------------------------------------*/
++int dm_target_init(void);
++void dm_target_exit(void);
++struct target_type *dm_get_target_type(const char *name);
++void dm_put_target_type(struct target_type *t);
++
++
++/*-----------------------------------------------------------------
++ * Useful inlines.
++ *---------------------------------------------------------------*/
++static inline int array_too_big(unsigned long fixed, unsigned long obj,
++ unsigned long num)
++{
++ return (num > (ULONG_MAX - fixed) / obj);
++}
++
++/*
++ * ceiling(n / size) * size
++ */
++static inline unsigned long dm_round_up(unsigned long n, unsigned long size)
++{
++ unsigned long r = n % size;
++ return n + (r ? (size - r) : 0);
++}
++
++/*
++ * Ceiling(n / size)
++ */
++static inline unsigned long dm_div_up(unsigned long n, unsigned long size)
++{
++ return dm_round_up(n, size) / size;
++}
++
++const char *dm_kdevname(kdev_t dev);
++
++/*
++ * The device-mapper can be driven through one of two interfaces;
++ * ioctl or filesystem, depending which patch you have applied.
++ */
++int dm_interface_init(void);
++void dm_interface_exit(void);
++
++/*
++ * Targets for linear and striped mappings
++ */
++int dm_linear_init(void);
++void dm_linear_exit(void);
++
++int dm_stripe_init(void);
++void dm_stripe_exit(void);
++
++int dm_snapshot_init(void);
++void dm_snapshot_exit(void);
++
++#endif
+diff -urN linux-2.4.22/drivers/md/kcopyd.c linux-2.4.22-dm/drivers/md/kcopyd.c
+--- linux-2.4.22/drivers/md/kcopyd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/kcopyd.c 2003-09-15 17:02:08.000000000 +0200
+@@ -0,0 +1,650 @@
++/*
++ * Copyright (C) 2002 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include <asm/atomic.h>
++
++#include <linux/blkdev.h>
++#include <linux/config.h>
++#include <linux/device-mapper.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/locks.h>
++#include <linux/mempool.h>
++#include <linux/module.h>
++#include <linux/pagemap.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++
++#include "kcopyd.h"
++#include "dm-daemon.h"
++
++/* FIXME: this is only needed for the DMERR macros */
++#include "dm.h"
++
++static struct dm_daemon _kcopyd;
++
++/*-----------------------------------------------------------------
++ * Each kcopyd client has its own little pool of preallocated
++ * pages for kcopyd io.
++ *---------------------------------------------------------------*/
++struct kcopyd_client {
++ struct list_head list;
++
++ spinlock_t lock;
++ struct list_head pages;
++ unsigned int nr_pages;
++ unsigned int nr_free_pages;
++};
++
++static inline void __push_page(struct kcopyd_client *kc, struct page *p)
++{
++ list_add(&p->list, &kc->pages);
++ kc->nr_free_pages++;
++}
++
++static inline struct page *__pop_page(struct kcopyd_client *kc)
++{
++ struct page *p;
++
++ p = list_entry(kc->pages.next, struct page, list);
++ list_del(&p->list);
++ kc->nr_free_pages--;
++
++ return p;
++}
++
++static int kcopyd_get_pages(struct kcopyd_client *kc,
++ unsigned int nr, struct list_head *pages)
++{
++ struct page *p;
++ INIT_LIST_HEAD(pages);
++
++ spin_lock(&kc->lock);
++ if (kc->nr_free_pages < nr) {
++ spin_unlock(&kc->lock);
++ return -ENOMEM;
++ }
++
++ while (nr--) {
++ p = __pop_page(kc);
++ list_add(&p->list, pages);
++ }
++ spin_unlock(&kc->lock);
++
++ return 0;
++}
++
++static void kcopyd_put_pages(struct kcopyd_client *kc, struct list_head *pages)
++{
++ struct list_head *tmp, *tmp2;
++
++ spin_lock(&kc->lock);
++ list_for_each_safe (tmp, tmp2, pages)
++ __push_page(kc, list_entry(tmp, struct page, list));
++ spin_unlock(&kc->lock);
++}
++
++/*
++ * These three functions resize the page pool.
++ */
++static void release_pages(struct list_head *pages)
++{
++ struct page *p;
++ struct list_head *tmp, *tmp2;
++
++ list_for_each_safe (tmp, tmp2, pages) {
++ p = list_entry(tmp, struct page, list);
++ UnlockPage(p);
++ __free_page(p);
++ }
++}
++
++static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
++{
++ unsigned int i;
++ struct page *p;
++ LIST_HEAD(new);
++
++ for (i = 0; i < nr; i++) {
++ p = alloc_page(GFP_KERNEL);
++ if (!p) {
++ release_pages(&new);
++ return -ENOMEM;
++ }
++
++ LockPage(p);
++ list_add(&p->list, &new);
++ }
++
++ kcopyd_put_pages(kc, &new);
++ kc->nr_pages += nr;
++ return 0;
++}
++
++static void client_free_pages(struct kcopyd_client *kc)
++{
++ BUG_ON(kc->nr_free_pages != kc->nr_pages);
++ release_pages(&kc->pages);
++ kc->nr_free_pages = kc->nr_pages = 0;
++}
++
++/*-----------------------------------------------------------------
++ * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
++ * for this reason we use a mempool to prevent the client from
++ * ever having to do io (which could cause a deadlock).
++ *---------------------------------------------------------------*/
++struct kcopyd_job {
++ struct kcopyd_client *kc;
++ struct list_head list;
++ unsigned int flags;
++
++ /*
++ * Error state of the job.
++ */
++ int read_err;
++ unsigned int write_err;
++
++ /*
++ * Either READ or WRITE
++ */
++ int rw;
++ struct io_region source;
++
++ /*
++ * The destinations for the transfer.
++ */
++ unsigned int num_dests;
++ struct io_region dests[KCOPYD_MAX_REGIONS];
++
++ sector_t offset;
++ unsigned int nr_pages;
++ struct list_head pages;
++
++ /*
++ * Set this to ensure you are notified when the job has
++ * completed. 'context' is for callback to use.
++ */
++ kcopyd_notify_fn fn;
++ void *context;
++
++ /*
++ * These fields are only used if the job has been split
++ * into more manageable parts.
++ */
++ struct semaphore lock;
++ atomic_t sub_jobs;
++ sector_t progress;
++};
++
++/* FIXME: this should scale with the number of pages */
++#define MIN_JOBS 512
++
++static kmem_cache_t *_job_cache;
++static mempool_t *_job_pool;
++
++/*
++ * We maintain three lists of jobs:
++ *
++ * i) jobs waiting for pages
++ * ii) jobs that have pages, and are waiting for the io to be issued.
++ * iii) jobs that have completed.
++ *
++ * All three of these are protected by job_lock.
++ */
++static spinlock_t _job_lock = SPIN_LOCK_UNLOCKED;
++
++static LIST_HEAD(_complete_jobs);
++static LIST_HEAD(_io_jobs);
++static LIST_HEAD(_pages_jobs);
++
++static int jobs_init(void)
++{
++ INIT_LIST_HEAD(&_complete_jobs);
++ INIT_LIST_HEAD(&_io_jobs);
++ INIT_LIST_HEAD(&_pages_jobs);
++
++ _job_cache = kmem_cache_create("kcopyd-jobs",
++ sizeof(struct kcopyd_job),
++ __alignof__(struct kcopyd_job),
++ 0, NULL, NULL);
++ if (!_job_cache)
++ return -ENOMEM;
++
++ _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab,
++ mempool_free_slab, _job_cache);
++ if (!_job_pool) {
++ kmem_cache_destroy(_job_cache);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static void jobs_exit(void)
++{
++ BUG_ON(!list_empty(&_complete_jobs));
++ BUG_ON(!list_empty(&_io_jobs));
++ BUG_ON(!list_empty(&_pages_jobs));
++
++ mempool_destroy(_job_pool);
++ kmem_cache_destroy(_job_cache);
++}
++
++/*
++ * Functions to push and pop a job onto the head of a given job
++ * list.
++ */
++static inline struct kcopyd_job *pop(struct list_head *jobs)
++{
++ struct kcopyd_job *job = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&_job_lock, flags);
++
++ if (!list_empty(jobs)) {
++ job = list_entry(jobs->next, struct kcopyd_job, list);
++ list_del(&job->list);
++ }
++ spin_unlock_irqrestore(&_job_lock, flags);
++
++ return job;
++}
++
++static inline void push(struct list_head *jobs, struct kcopyd_job *job)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&_job_lock, flags);
++ list_add_tail(&job->list, jobs);
++ spin_unlock_irqrestore(&_job_lock, flags);
++}
++
++/*
++ * These three functions process 1 item from the corresponding
++ * job list.
++ *
++ * They return:
++ * < 0: error
++ * 0: success
++ * > 0: can't process yet.
++ */
++static int run_complete_job(struct kcopyd_job *job)
++{
++ void *context = job->context;
++ int read_err = job->read_err;
++ unsigned int write_err = job->write_err;
++ kcopyd_notify_fn fn = job->fn;
++
++ kcopyd_put_pages(job->kc, &job->pages);
++ mempool_free(job, _job_pool);
++ fn(read_err, write_err, context);
++ return 0;
++}
++
++static void complete_io(unsigned int error, void *context)
++{
++ struct kcopyd_job *job = (struct kcopyd_job *) context;
++
++ if (error) {
++ if (job->rw == WRITE)
++ job->write_err &= error;
++ else
++ job->read_err = 1;
++
++ if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
++ push(&_complete_jobs, job);
++ dm_daemon_wake(&_kcopyd);
++ return;
++ }
++ }
++
++ if (job->rw == WRITE)
++ push(&_complete_jobs, job);
++
++ else {
++ job->rw = WRITE;
++ push(&_io_jobs, job);
++ }
++
++ dm_daemon_wake(&_kcopyd);
++}
++
++/*
++ * Request io on as many buffer heads as we can currently get for
++ * a particular job.
++ */
++static int run_io_job(struct kcopyd_job *job)
++{
++ int r;
++
++ if (job->rw == READ)
++ r = dm_io_async(1, &job->source, job->rw,
++ list_entry(job->pages.next, struct page, list),
++ job->offset, complete_io, job);
++
++ else
++ r = dm_io_async(job->num_dests, job->dests, job->rw,
++ list_entry(job->pages.next, struct page, list),
++ job->offset, complete_io, job);
++
++ return r;
++}
++
++#define SECTORS_PER_PAGE (PAGE_SIZE / SECTOR_SIZE)
++static int run_pages_job(struct kcopyd_job *job)
++{
++ int r;
++
++ job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
++ SECTORS_PER_PAGE);
++ r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
++ if (!r) {
++ /* this job is ready for io */
++ push(&_io_jobs, job);
++ return 0;
++ }
++
++ if (r == -ENOMEM)
++ /* can't complete now */
++ return 1;
++
++ return r;
++}
++
++/*
++ * Run through a list for as long as possible. Returns the count
++ * of successful jobs.
++ */
++static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
++{
++ struct kcopyd_job *job;
++ int r, count = 0;
++
++ while ((job = pop(jobs))) {
++
++ r = fn(job);
++
++ if (r < 0) {
++ /* error this rogue job */
++ if (job->rw == WRITE)
++ job->write_err = (unsigned int) -1;
++ else
++ job->read_err = 1;
++ push(&_complete_jobs, job);
++ break;
++ }
++
++ if (r > 0) {
++ /*
++ * We couldn't service this job ATM, so
++ * push this job back onto the list.
++ */
++ push(jobs, job);
++ break;
++ }
++
++ count++;
++ }
++
++ return count;
++}
++
++/*
++ * kcopyd does this every time it's woken up.
++ */
++static void do_work(void)
++{
++ /*
++ * The order that these are called is *very* important.
++ * complete jobs can free some pages for pages jobs.
++ * Pages jobs when successful will jump onto the io jobs
++ * list. io jobs call wake when they complete and it all
++ * starts again.
++ */
++ process_jobs(&_complete_jobs, run_complete_job);
++ process_jobs(&_pages_jobs, run_pages_job);
++ process_jobs(&_io_jobs, run_io_job);
++ run_task_queue(&tq_disk);
++}
++
++/*
++ * If we are copying a small region we just dispatch a single job
++ * to do the copy, otherwise the io has to be split up into many
++ * jobs.
++ */
++static void dispatch_job(struct kcopyd_job *job)
++{
++ push(&_pages_jobs, job);
++ dm_daemon_wake(&_kcopyd);
++}
++
++#define SUB_JOB_SIZE 128
++static void segment_complete(int read_err,
++ unsigned int write_err, void *context)
++{
++ /* FIXME: tidy this function */
++ sector_t progress = 0;
++ sector_t count = 0;
++ struct kcopyd_job *job = (struct kcopyd_job *) context;
++
++ down(&job->lock);
++
++ /* update the error */
++ if (read_err)
++ job->read_err = 1;
++
++ if (write_err)
++ job->write_err &= write_err;
++
++ /*
++ * Only dispatch more work if there hasn't been an error.
++ */
++ if ((!job->read_err && !job->write_err) ||
++ test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
++ /* get the next chunk of work */
++ progress = job->progress;
++ count = job->source.count - progress;
++ if (count) {
++ if (count > SUB_JOB_SIZE)
++ count = SUB_JOB_SIZE;
++
++ job->progress += count;
++ }
++ }
++ up(&job->lock);
++
++ if (count) {
++ int i;
++ struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
++
++ memcpy(sub_job, job, sizeof(*job));
++ sub_job->source.sector += progress;
++ sub_job->source.count = count;
++
++ for (i = 0; i < job->num_dests; i++) {
++ sub_job->dests[i].sector += progress;
++ sub_job->dests[i].count = count;
++ }
++
++ sub_job->fn = segment_complete;
++ sub_job->context = job;
++ dispatch_job(sub_job);
++
++ } else if (atomic_dec_and_test(&job->sub_jobs)) {
++
++ /*
++ * To avoid a race we must keep the job around
++ * until after the notify function has completed.
++ * Otherwise the client may try and stop the job
++ * after we've completed.
++ */
++ job->fn(read_err, write_err, job->context);
++ mempool_free(job, _job_pool);
++ }
++}
++
++/*
++ * Create some little jobs that will do the move between
++ * them.
++ */
++#define SPLIT_COUNT 8
++static void split_job(struct kcopyd_job *job)
++{
++ int i;
++
++ atomic_set(&job->sub_jobs, SPLIT_COUNT);
++ for (i = 0; i < SPLIT_COUNT; i++)
++ segment_complete(0, 0u, job);
++}
++
++#define SUB_JOB_THRESHOLD (SPLIT_COUNT * SUB_JOB_SIZE)
++int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
++ unsigned int num_dests, struct io_region *dests,
++ unsigned int flags, kcopyd_notify_fn fn, void *context)
++{
++ struct kcopyd_job *job;
++
++ /*
++ * Allocate a new job.
++ */
++ job = mempool_alloc(_job_pool, GFP_NOIO);
++
++ /*
++ * set up for the read.
++ */
++ job->kc = kc;
++ job->flags = flags;
++ job->read_err = 0;
++ job->write_err = 0;
++ job->rw = READ;
++
++ memcpy(&job->source, from, sizeof(*from));
++
++ job->num_dests = num_dests;
++ memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
++
++ job->offset = 0;
++ job->nr_pages = 0;
++ INIT_LIST_HEAD(&job->pages);
++
++ job->fn = fn;
++ job->context = context;
++
++ if (job->source.count < SUB_JOB_THRESHOLD)
++ dispatch_job(job);
++
++ else {
++ init_MUTEX(&job->lock);
++ job->progress = 0;
++ split_job(job);
++ }
++
++ return 0;
++}
++
++/*
++ * Cancels a kcopyd job, eg. someone might be deactivating a
++ * mirror.
++ */
++int kcopyd_cancel(struct kcopyd_job *job, int block)
++{
++ /* FIXME: finish */
++ return -1;
++}
++
++/*-----------------------------------------------------------------
++ * Unit setup
++ *---------------------------------------------------------------*/
++static DECLARE_MUTEX(_client_lock);
++static LIST_HEAD(_clients);
++
++static int client_add(struct kcopyd_client *kc)
++{
++ down(&_client_lock);
++ list_add(&kc->list, &_clients);
++ up(&_client_lock);
++ return 0;
++}
++
++static void client_del(struct kcopyd_client *kc)
++{
++ down(&_client_lock);
++ list_del(&kc->list);
++ up(&_client_lock);
++}
++
++int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
++{
++ int r = 0;
++ struct kcopyd_client *kc;
++
++ kc = kmalloc(sizeof(*kc), GFP_KERNEL);
++ if (!kc)
++ return -ENOMEM;
++
++ kc->lock = SPIN_LOCK_UNLOCKED;
++ INIT_LIST_HEAD(&kc->pages);
++ kc->nr_pages = kc->nr_free_pages = 0;
++ r = client_alloc_pages(kc, nr_pages);
++ if (r) {
++ kfree(kc);
++ return r;
++ }
++
++ r = dm_io_get(nr_pages);
++ if (r) {
++ client_free_pages(kc);
++ kfree(kc);
++ return r;
++ }
++
++ r = client_add(kc);
++ if (r) {
++ dm_io_put(nr_pages);
++ client_free_pages(kc);
++ kfree(kc);
++ return r;
++ }
++
++ *result = kc;
++ return 0;
++}
++
++void kcopyd_client_destroy(struct kcopyd_client *kc)
++{
++ dm_io_put(kc->nr_pages);
++ client_free_pages(kc);
++ client_del(kc);
++ kfree(kc);
++}
++
++
++int __init kcopyd_init(void)
++{
++ int r;
++
++ r = jobs_init();
++ if (r)
++ return r;
++
++ r = dm_daemon_start(&_kcopyd, "kcopyd", do_work);
++ if (r)
++ jobs_exit();
++
++ return r;
++}
++
++void kcopyd_exit(void)
++{
++ jobs_exit();
++ dm_daemon_stop(&_kcopyd);
++}
++
++EXPORT_SYMBOL(kcopyd_client_create);
++EXPORT_SYMBOL(kcopyd_client_destroy);
++EXPORT_SYMBOL(kcopyd_copy);
++EXPORT_SYMBOL(kcopyd_cancel);
+diff -urN linux-2.4.22/drivers/md/kcopyd.h linux-2.4.22-dm/drivers/md/kcopyd.h
+--- linux-2.4.22/drivers/md/kcopyd.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/drivers/md/kcopyd.h 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,47 @@
++/*
++ * Copyright (C) 2001 Sistina Software
++ *
++ * This file is released under the GPL.
++ */
++
++#ifndef DM_KCOPYD_H
++#define DM_KCOPYD_H
++
++/*
++ * Needed for the definition of offset_t.
++ */
++#include <linux/device-mapper.h>
++#include <linux/iobuf.h>
++
++#include "dm-io.h"
++
++int kcopyd_init(void);
++void kcopyd_exit(void);
++
++/* FIXME: make this configurable */
++#define KCOPYD_MAX_REGIONS 8
++
++#define KCOPYD_IGNORE_ERROR 1
++
++/*
++ * To use kcopyd you must first create a kcopyd client object.
++ */
++struct kcopyd_client;
++int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result);
++void kcopyd_client_destroy(struct kcopyd_client *kc);
++
++/*
++ * Submit a copy job to kcopyd. This is built on top of the
++ * previous three fns.
++ *
++ * read_err is a boolean,
++ * write_err is a bitset, with 1 bit for each destination region
++ */
++typedef void (*kcopyd_notify_fn)(int read_err,
++ unsigned int write_err, void *context);
++
++int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
++ unsigned int num_dests, struct io_region *dests,
++ unsigned int flags, kcopyd_notify_fn fn, void *context);
++
++#endif
+diff -urN linux-2.4.22/drivers/md/lvm.c linux-2.4.22-dm/drivers/md/lvm.c
+--- linux-2.4.22/drivers/md/lvm.c 2003-06-13 16:51:34.000000000 +0200
++++ linux-2.4.22-dm/drivers/md/lvm.c 2003-09-15 17:02:19.000000000 +0200
+@@ -229,9 +229,6 @@
+ #define DEVICE_OFF(device)
+ #define LOCAL_END_REQUEST
+
+-/* lvm_do_lv_create calls fsync_dev_lockfs()/unlockfs() */
+-/* #define LVM_VFS_ENHANCEMENT */
+-
+ #include <linux/config.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -2171,12 +2168,8 @@
+ if (lv_ptr->lv_access & LV_SNAPSHOT) {
+ lv_t *org = lv_ptr->lv_snapshot_org, *last;
+
+- /* sync the original logical volume */
+- fsync_dev(org->lv_dev);
+-#ifdef LVM_VFS_ENHANCEMENT
+ /* VFS function call to sync and lock the filesystem */
+ fsync_dev_lockfs(org->lv_dev);
+-#endif
+
+ down_write(&org->lv_lock);
+ org->lv_access |= LV_SNAPSHOT_ORG;
+@@ -2201,11 +2194,9 @@
+ else
+ set_device_ro(lv_ptr->lv_dev, 1);
+
+-#ifdef LVM_VFS_ENHANCEMENT
+ /* VFS function call to unlock the filesystem */
+ if (lv_ptr->lv_access & LV_SNAPSHOT)
+ unlockfs(lv_ptr->lv_snapshot_org->lv_dev);
+-#endif
+
+ lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de =
+ lvm_fs_create_lv(vg_ptr, lv_ptr);
+diff -urN linux-2.4.22/fs/buffer.c linux-2.4.22-dm/fs/buffer.c
+--- linux-2.4.22/fs/buffer.c 2003-09-15 16:54:17.000000000 +0200
++++ linux-2.4.22-dm/fs/buffer.c 2003-09-15 17:03:15.000000000 +0200
+@@ -412,6 +412,34 @@
+ fsync_dev(dev);
+ }
+
++int fsync_dev_lockfs(kdev_t dev)
++{
++ /* you are not allowed to try locking all the filesystems
++ ** on the system, your chances of getting through without
++ ** total deadlock are slim to none.
++ */
++ if (!dev)
++ return fsync_dev(dev) ;
++
++ sync_buffers(dev, 0);
++
++ lock_kernel();
++ /* note, the FS might need to start transactions to
++ ** sync the inodes, or the quota, no locking until
++ ** after these are done
++ */
++ sync_inodes(dev);
++ DQUOT_SYNC_DEV(dev);
++ /* if inodes or quotas could be dirtied during the
++ ** sync_supers_lockfs call, the FS is responsible for getting
++ ** them on disk, without deadlocking against the lock
++ */
++ sync_supers_lockfs(dev) ;
++ unlock_kernel();
++
++ return sync_buffers(dev, 1) ;
++}
++
+ asmlinkage long sys_sync(void)
+ {
+ fsync_dev(0);
+diff -urN linux-2.4.22/fs/reiserfs/super.c linux-2.4.22-dm/fs/reiserfs/super.c
+--- linux-2.4.22/fs/reiserfs/super.c 2003-09-15 16:54:17.000000000 +0200
++++ linux-2.4.22-dm/fs/reiserfs/super.c 2003-09-15 17:02:19.000000000 +0200
+@@ -84,7 +84,7 @@
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
+ journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
+ reiserfs_block_writes(&th) ;
+- journal_end(&th, s, 1) ;
++ journal_end_sync(&th, s, 1) ;
+ }
+ s->s_dirt = 0;
+ unlock_kernel() ;
+diff -urN linux-2.4.22/fs/super.c linux-2.4.22-dm/fs/super.c
+--- linux-2.4.22/fs/super.c 2003-09-15 16:54:15.000000000 +0200
++++ linux-2.4.22-dm/fs/super.c 2003-09-15 17:02:19.000000000 +0200
+@@ -39,6 +39,12 @@
+ spinlock_t sb_lock = SPIN_LOCK_UNLOCKED;
+
+ /*
++ * stub of a filesystem used to make sure an FS isn't mounted
++ * in the middle of a lockfs call
++ */
++static DECLARE_FSTYPE_DEV(lockfs_fs_type, "lockfs", NULL);
++
++/*
+ * Handling of filesystem drivers list.
+ * Rules:
+ * Inclusion to/removals from/scanning of list are protected by spinlock.
+@@ -442,6 +448,25 @@
+ put_super(sb);
+ }
+
++static void write_super_lockfs(struct super_block *sb)
++{
++ lock_super(sb);
++ if (sb->s_root && sb->s_op) {
++ if (sb->s_dirt && sb->s_op->write_super)
++ sb->s_op->write_super(sb);
++ if (sb->s_op->write_super_lockfs)
++ sb->s_op->write_super_lockfs(sb);
++ }
++ unlock_super(sb);
++
++ /*
++ * if no lockfs call is provided, use the sync_fs call instead.
++ * this must be done without the super lock held
++ */
++ if (!sb->s_op->write_super_lockfs && sb->s_op->sync_fs)
++ sb->s_op->sync_fs(sb);
++}
++
+ static inline void write_super(struct super_block *sb)
+ {
+ lock_super(sb);
+@@ -489,6 +514,119 @@
+ spin_unlock(&sb_lock);
+ }
+
++static struct super_block *find_super_for_lockfs(kdev_t dev)
++{
++ struct super_block *lockfs_sb = alloc_super();
++ struct super_block * s;
++
++ if (!dev)
++ return NULL;
++restart:
++ spin_lock(&sb_lock);
++ s = find_super(dev);
++ if (s) {
++ spin_unlock(&sb_lock);
++ down_read(&s->s_umount);
++ if (s->s_root) {
++ destroy_super(lockfs_sb);
++ return s;
++ }
++ drop_super(s);
++ goto restart;
++ }
++ /* if (s) we either return or goto, so we know s == NULL here.
++ * At this point, there are no mounted filesystems on this device,
++ * so we pretend to mount one.
++ */
++ if (!lockfs_sb) {
++ spin_unlock(&sb_lock);
++ return NULL;
++ }
++ s = lockfs_sb;
++ s->s_dev = dev;
++ if (lockfs_fs_type.fs_supers.prev == NULL)
++ INIT_LIST_HEAD(&lockfs_fs_type.fs_supers);
++ insert_super(s, &lockfs_fs_type);
++ s->s_root = (struct dentry *)1;
++ /* alloc_super gives us a write lock on s_umount, this
++ * way we know there are no concurrent lockfs holders for this dev.
++ * It allows us to remove the temp super from the list of supers
++ * immediately when unlockfs is called
++ */
++ return s;
++}
++/*
++ * Note: don't check the dirty flag before waiting, we want the lock
++ * to happen every time this is called. dev must be non-zero
++ */
++void sync_supers_lockfs(kdev_t dev)
++{
++ struct super_block *sb;
++ sb = find_super_for_lockfs(dev);
++ if (sb) {
++ write_super_lockfs(sb);
++ /* the drop_super is done by unlockfs */
++ }
++}
++
++static void drop_super_lockfs(struct super_block *s)
++{
++ if (s->s_type == &lockfs_fs_type) {
++ struct file_system_type *fs = s->s_type;
++
++ /*
++ * nobody else is allowed to grab_super() on our temp
++ */
++ if (!deactivate_super(s))
++ BUG();
++
++ spin_lock(&sb_lock);
++ s->s_root = NULL;
++ list_del(&s->s_list);
++ list_del(&s->s_instances);
++ spin_unlock(&sb_lock);
++
++ up_write(&s->s_umount);
++ put_super(s);
++ put_filesystem(fs);
++ } else
++ drop_super(s);
++}
++
++void unlockfs(kdev_t dev)
++{
++ struct super_block *s;
++ if (!dev)
++ return;
++
++ spin_lock(&sb_lock);
++ s = find_super(dev);
++ if (s) {
++ /*
++ * find_super and the original lockfs call both incremented
++ * the reference count. drop one of them
++ */
++ s->s_count--;
++ spin_unlock(&sb_lock);
++ if (s->s_root) {
++ if (s->s_op->unlockfs)
++ s->s_op->unlockfs(s);
++ drop_super_lockfs(s);
++ goto out;
++ } else {
++ printk("unlockfs: no s_root, dev %s\n", kdevname(dev));
++ BUG();
++ }
++ } else {
++ printk("unlockfs: no super found, dev %s\n", kdevname(dev));
++ BUG();
++ }
++
++ spin_unlock(&sb_lock);
++out:
++ return;
++}
++
+ /**
+ * get_super - get the superblock of a device
+ * @dev: device to get the superblock for
+diff -urN linux-2.4.22/include/linux/device-mapper.h linux-2.4.22-dm/include/linux/device-mapper.h
+--- linux-2.4.22/include/linux/device-mapper.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/include/linux/device-mapper.h 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,104 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the LGPL.
++ */
++
++#ifndef _LINUX_DEVICE_MAPPER_H
++#define _LINUX_DEVICE_MAPPER_H
++
++typedef unsigned long sector_t;
++
++struct dm_target;
++struct dm_table;
++struct dm_dev;
++
++typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
++
++union map_info {
++ void *ptr;
++ unsigned long long ll;
++};
++
++/*
++ * In the constructor the target parameter will already have the
++ * table, type, begin and len fields filled in.
++ */
++typedef int (*dm_ctr_fn) (struct dm_target * target, unsigned int argc,
++ char **argv);
++
++/*
++ * The destructor doesn't need to free the dm_target, just
++ * anything hidden ti->private.
++ */
++typedef void (*dm_dtr_fn) (struct dm_target * ti);
++
++/*
++ * The map function must return:
++ * < 0: error
++ * = 0: The target will handle the io by resubmitting it later
++ * > 0: simple remap complete
++ */
++typedef int (*dm_map_fn) (struct dm_target * ti, struct buffer_head * bh,
++ int rw, union map_info *map_context);
++
++/*
++ * Returns:
++ * < 0 : error (currently ignored)
++ * 0 : ended successfully
++ * 1 : for some reason the io has still not completed (eg,
++ * multipath target might want to requeue a failed io).
++ */
++typedef int (*dm_endio_fn) (struct dm_target * ti,
++ struct buffer_head * bh, int rw, int error,
++ union map_info *map_context);
++typedef void (*dm_suspend_fn) (struct dm_target *ti);
++typedef void (*dm_resume_fn) (struct dm_target *ti);
++typedef int (*dm_status_fn) (struct dm_target * ti, status_type_t status_type,
++ char *result, unsigned int maxlen);
++
++void dm_error(const char *message);
++
++/*
++ * Constructors should call these functions to ensure destination devices
++ * are opened/closed correctly.
++ * FIXME: too many arguments.
++ */
++int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
++ sector_t len, int mode, struct dm_dev **result);
++void dm_put_device(struct dm_target *ti, struct dm_dev *d);
++
++/*
++ * Information about a target type
++ */
++struct target_type {
++ const char *name;
++ struct module *module;
++ dm_ctr_fn ctr;
++ dm_dtr_fn dtr;
++ dm_map_fn map;
++ dm_endio_fn end_io;
++ dm_suspend_fn suspend;
++ dm_resume_fn resume;
++ dm_status_fn status;
++};
++
++struct dm_target {
++ struct dm_table *table;
++ struct target_type *type;
++
++ /* target limits */
++ sector_t begin;
++ sector_t len;
++
++ /* target specific data */
++ void *private;
++
++ /* Used to provide an error string from the ctr */
++ char *error;
++};
++
++int dm_register_target(struct target_type *t);
++int dm_unregister_target(struct target_type *t);
++
++#endif /* _LINUX_DEVICE_MAPPER_H */
+diff -urN linux-2.4.22/include/linux/dm-ioctl.h linux-2.4.22-dm/include/linux/dm-ioctl.h
+--- linux-2.4.22/include/linux/dm-ioctl.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/include/linux/dm-ioctl.h 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,237 @@
++/*
++ * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
++ *
++ * This file is released under the LGPL.
++ */
++
++#ifndef _LINUX_DM_IOCTL_H
++#define _LINUX_DM_IOCTL_H
++
++#include <linux/types.h>
++
++#define DM_DIR "mapper" /* Slashes not supported */
++#define DM_MAX_TYPE_NAME 16
++#define DM_NAME_LEN 128
++#define DM_UUID_LEN 129
++
++/*
++ * A traditional ioctl interface for the device mapper.
++ *
++ * Each device can have two tables associated with it, an
++ * 'active' table which is the one currently used by io passing
++ * through the device, and an 'inactive' one which is a table
++ * that is being prepared as a replacement for the 'active' one.
++ *
++ * DM_VERSION:
++ * Just get the version information for the ioctl interface.
++ *
++ * DM_REMOVE_ALL:
++ * Remove all dm devices, destroy all tables. Only really used
++ * for debug.
++ *
++ * DM_LIST_DEVICES:
++ * Get a list of all the dm device names.
++ *
++ * DM_DEV_CREATE:
++ * Create a new device, neither the 'active' or 'inactive' table
++ * slots will be filled. The device will be in suspended state
++ * after creation, however any io to the device will get errored
++ * since it will be out-of-bounds.
++ *
++ * DM_DEV_REMOVE:
++ * Remove a device, destroy any tables.
++ *
++ * DM_DEV_RENAME:
++ * Rename a device.
++ *
++ * DM_SUSPEND:
++ * This performs both suspend and resume, depending which flag is
++ * passed in.
++ * Suspend: This command will not return until all pending io to
++ * the device has completed. Further io will be deferred until
++ * the device is resumed.
++ * Resume: It is no longer an error to issue this command on an
++ * unsuspended device. If a table is present in the 'inactive'
++ * slot, it will be moved to the active slot, then the old table
++ * from the active slot will be _destroyed_. Finally the device
++ * is resumed.
++ *
++ * DM_DEV_STATUS:
++ * Retrieves the status for the table in the 'active' slot.
++ *
++ * DM_DEV_WAIT:
++ * Wait for a significant event to occur to the device. This
++ * could either be caused by an event triggered by one of the
++ * targets of the table in the 'active' slot, or a table change.
++ *
++ * DM_TABLE_LOAD:
++ * Load a table into the 'inactive' slot for the device. The
++ * device does _not_ need to be suspended prior to this command.
++ *
++ * DM_TABLE_CLEAR:
++ * Destroy any table in the 'inactive' slot (ie. abort).
++ *
++ * DM_TABLE_DEPS:
++ * Return a set of device dependencies for the 'active' table.
++ *
++ * DM_TABLE_STATUS:
++ * Return the targets status for the 'active' table.
++ */
++
++/*
++ * All ioctl arguments consist of a single chunk of memory, with
++ * this structure at the start. If a uuid is specified any
++ * lookup (eg. for a DM_INFO) will be done on that, *not* the
++ * name.
++ */
++struct dm_ioctl {
++ /*
++ * The version number is made up of three parts:
++ * major - no backward or forward compatibility,
++ * minor - only backwards compatible,
++ * patch - both backwards and forwards compatible.
++ *
++ * All clients of the ioctl interface should fill in the
++ * version number of the interface that they were
++ * compiled with.
++ *
++ * All recognised ioctl commands (ie. those that don't
++ * return -ENOTTY) fill out this field, even if the
++ * command failed.
++ */
++ uint32_t version[3]; /* in/out */
++ uint32_t data_size; /* total size of data passed in
++ * including this struct */
++
++ uint32_t data_start; /* offset to start of data
++ * relative to start of this struct */
++
++ uint32_t target_count; /* in/out */
++ int32_t open_count; /* out */
++ uint32_t flags; /* in/out */
++ uint32_t event_nr; /* in/out */
++ uint32_t padding;
++
++ uint64_t dev; /* in/out */
++
++ char name[DM_NAME_LEN]; /* device name */
++ char uuid[DM_UUID_LEN]; /* unique identifier for
++ * the block device */
++};
++
++/*
++ * Used to specify tables. These structures appear after the
++ * dm_ioctl.
++ */
++struct dm_target_spec {
++ uint64_t sector_start;
++ uint64_t length;
++ int32_t status; /* used when reading from kernel only */
++
++ /*
++ * Offset in bytes (from the start of this struct) to
++ * next target_spec.
++ */
++ uint32_t next;
++
++ char target_type[DM_MAX_TYPE_NAME];
++
++ /*
++ * Parameter string starts immediately after this object.
++ * Be careful to add padding after string to ensure correct
++ * alignment of subsequent dm_target_spec.
++ */
++};
++
++/*
++ * Used to retrieve the target dependencies.
++ */
++struct dm_target_deps {
++ uint32_t count; /* Array size */
++ uint32_t padding; /* unused */
++ uint64_t dev[0]; /* out */
++};
++
++/*
++ * Used to get a list of all dm devices.
++ */
++struct dm_name_list {
++ uint64_t dev;
++ uint32_t next; /* offset to the next record from
++ the _start_ of this */
++ char name[0];
++};
++
++/*
++ * If you change this make sure you make the corresponding change
++ * to dm-ioctl.c:lookup_ioctl()
++ */
++enum {
++ /* Top level cmds */
++ DM_VERSION_CMD = 0,
++ DM_REMOVE_ALL_CMD,
++ DM_LIST_DEVICES_CMD,
++
++ /* device level cmds */
++ DM_DEV_CREATE_CMD,
++ DM_DEV_REMOVE_CMD,
++ DM_DEV_RENAME_CMD,
++ DM_DEV_SUSPEND_CMD,
++ DM_DEV_STATUS_CMD,
++ DM_DEV_WAIT_CMD,
++
++ /* Table level cmds */
++ DM_TABLE_LOAD_CMD,
++ DM_TABLE_CLEAR_CMD,
++ DM_TABLE_DEPS_CMD,
++ DM_TABLE_STATUS_CMD,
++};
++
++#define DM_IOCTL 0xfd
++
++#define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl)
++#define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl)
++#define DM_LIST_DEVICES _IOWR(DM_IOCTL, DM_LIST_DEVICES_CMD, struct dm_ioctl)
++
++#define DM_DEV_CREATE _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl)
++#define DM_DEV_REMOVE _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl)
++#define DM_DEV_RENAME _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl)
++#define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl)
++#define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl)
++#define DM_DEV_WAIT _IOWR(DM_IOCTL, DM_DEV_WAIT_CMD, struct dm_ioctl)
++
++#define DM_TABLE_LOAD _IOWR(DM_IOCTL, DM_TABLE_LOAD_CMD, struct dm_ioctl)
++#define DM_TABLE_CLEAR _IOWR(DM_IOCTL, DM_TABLE_CLEAR_CMD, struct dm_ioctl)
++#define DM_TABLE_DEPS _IOWR(DM_IOCTL, DM_TABLE_DEPS_CMD, struct dm_ioctl)
++#define DM_TABLE_STATUS _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, struct dm_ioctl)
++
++#define DM_VERSION_MAJOR 4
++#define DM_VERSION_MINOR 0
++#define DM_VERSION_PATCHLEVEL 1
++#define DM_VERSION_EXTRA "-ioctl (2003-07-12)"
++
++/* Status bits */
++#define DM_READONLY_FLAG (1 << 0) /* In/Out */
++#define DM_SUSPEND_FLAG (1 << 1) /* In/Out */
++#define DM_PERSISTENT_DEV_FLAG (1 << 3) /* In */
++
++/*
++ * Flag passed into ioctl STATUS command to get table information
++ * rather than current status.
++ */
++#define DM_STATUS_TABLE_FLAG (1 << 4) /* In */
++
++/*
++ * Flags that indicate whether a table is present in either of
++ * the two table slots that a device has.
++ */
++#define DM_ACTIVE_PRESENT_FLAG (1 << 5) /* Out */
++#define DM_INACTIVE_PRESENT_FLAG (1 << 6) /* Out */
++
++/*
++ * Indicates that the buffer passed in wasn't big enough for the
++ * results.
++ */
++#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */
++
++#endif /* _LINUX_DM_IOCTL_H */
+diff -urN linux-2.4.22/include/linux/fs.h linux-2.4.22-dm/include/linux/fs.h
+--- linux-2.4.22/include/linux/fs.h 2003-09-15 16:54:17.000000000 +0200
++++ linux-2.4.22-dm/include/linux/fs.h 2003-09-15 17:02:19.000000000 +0200
+@@ -1288,6 +1288,7 @@
+ extern int sync_buffers(kdev_t, int);
+ extern void sync_dev(kdev_t);
+ extern int fsync_dev(kdev_t);
++extern int fsync_dev_lockfs(kdev_t);
+ extern int fsync_super(struct super_block *);
+ extern int fsync_no_super(kdev_t);
+ extern void sync_inodes_sb(struct super_block *);
+@@ -1305,6 +1306,8 @@
+ extern int filemap_fdatasync(struct address_space *);
+ extern int filemap_fdatawait(struct address_space *);
+ extern void sync_supers(kdev_t dev, int wait);
++extern void sync_supers_lockfs(kdev_t);
++extern void unlockfs(kdev_t);
+ extern int bmap(struct inode *, int);
+ extern int notify_change(struct dentry *, struct iattr *);
+ extern int permission(struct inode *, int);
+diff -urN linux-2.4.22/include/linux/mempool.h linux-2.4.22-dm/include/linux/mempool.h
+--- linux-2.4.22/include/linux/mempool.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/include/linux/mempool.h 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,31 @@
++/*
++ * memory buffer pool support
++ */
++#ifndef _LINUX_MEMPOOL_H
++#define _LINUX_MEMPOOL_H
++
++#include <linux/list.h>
++#include <linux/wait.h>
++
++struct mempool_s;
++typedef struct mempool_s mempool_t;
++
++typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
++typedef void (mempool_free_t)(void *element, void *pool_data);
++
++extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
++ mempool_free_t *free_fn, void *pool_data);
++extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
++extern void mempool_destroy(mempool_t *pool);
++extern void * mempool_alloc(mempool_t *pool, int gfp_mask);
++extern void mempool_free(void *element, mempool_t *pool);
++
++/*
++ * A mempool_alloc_t and mempool_free_t that get the memory from
++ * a slab that is passed in through pool_data.
++ */
++void *mempool_alloc_slab(int gfp_mask, void *pool_data);
++void mempool_free_slab(void *element, void *pool_data);
++
++
++#endif /* _LINUX_MEMPOOL_H */
+diff -urN linux-2.4.22/include/linux/vmalloc.h linux-2.4.22-dm/include/linux/vmalloc.h
+--- linux-2.4.22/include/linux/vmalloc.h 2003-08-25 13:44:44.000000000 +0200
++++ linux-2.4.22-dm/include/linux/vmalloc.h 2003-09-15 17:01:29.000000000 +0200
+@@ -29,6 +29,7 @@
+ extern void vmfree_area_pages(unsigned long address, unsigned long size);
+ extern int vmalloc_area_pages(unsigned long address, unsigned long size,
+ int gfp_mask, pgprot_t prot);
++extern void *vcalloc(unsigned long nmemb, unsigned long elem_size);
+
+ /*
+ * Allocate any pages
+diff -urN linux-2.4.22/kernel/ksyms.c linux-2.4.22-dm/kernel/ksyms.c
+--- linux-2.4.22/kernel/ksyms.c 2003-09-15 16:54:18.000000000 +0200
++++ linux-2.4.22-dm/kernel/ksyms.c 2003-09-15 17:02:19.000000000 +0200
+@@ -117,6 +117,7 @@
+ EXPORT_SYMBOL(__vmalloc);
+ EXPORT_SYMBOL(vmap);
+ EXPORT_SYMBOL(vmalloc_to_page);
++EXPORT_SYMBOL(vcalloc);
+ EXPORT_SYMBOL(mem_map);
+ EXPORT_SYMBOL(remap_page_range);
+ EXPORT_SYMBOL(max_mapnr);
+@@ -201,6 +202,8 @@
+ EXPORT_SYMBOL(invalidate_inode_pages);
+ EXPORT_SYMBOL(truncate_inode_pages);
+ EXPORT_SYMBOL(fsync_dev);
++EXPORT_SYMBOL(fsync_dev_lockfs);
++EXPORT_SYMBOL(unlockfs);
+ EXPORT_SYMBOL(fsync_no_super);
+ EXPORT_SYMBOL(permission);
+ EXPORT_SYMBOL(vfs_permission);
+diff -urN linux-2.4.22/mm/Makefile linux-2.4.22-dm/mm/Makefile
+--- linux-2.4.22/mm/Makefile 2003-09-15 16:54:07.000000000 +0200
++++ linux-2.4.22-dm/mm/Makefile 2003-09-15 17:01:29.000000000 +0200
+@@ -9,12 +9,12 @@
+
+ O_TARGET := mm.o
+
+-export-objs := shmem.o filemap.o memory.o page_alloc.o
++export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o
+
+ obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
+ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
+ page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
+- shmem.o
++ shmem.o mempool.o
+
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_PROC_MM) += proc_mm.o
+diff -urN linux-2.4.22/mm/filemap.c linux-2.4.22-dm/mm/filemap.c
+--- linux-2.4.22/mm/filemap.c 2003-09-15 16:54:17.000000000 +0200
++++ linux-2.4.22-dm/mm/filemap.c 2003-09-15 17:01:29.000000000 +0200
+@@ -1742,7 +1742,8 @@
+ }
+ up(&inode->i_sem);
+ up_read(&inode->i_alloc_sem);
+- UPDATE_ATIME(filp->f_dentry->d_inode);
++ if (!S_ISBLK(inode->i_mode))
++ UPDATE_ATIME(filp->f_dentry->d_inode);
+ goto out;
+ }
+ }
+@@ -3128,8 +3129,12 @@
+ goto out;
+
+ remove_suid(inode);
+- inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+- mark_inode_dirty_sync(inode);
++
++ /* Don't update times for block devices using O_DIRECT */
++ if (!(file->f_flags & O_DIRECT) || !S_ISBLK(inode->i_mode)) {
++ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
++ mark_inode_dirty_sync(inode);
++ }
+
+ do {
+ unsigned long index, offset;
+diff -urN linux-2.4.22/mm/mempool.c linux-2.4.22-dm/mm/mempool.c
+--- linux-2.4.22/mm/mempool.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-dm/mm/mempool.c 2003-09-15 17:01:29.000000000 +0200
+@@ -0,0 +1,299 @@
++/*
++ * linux/mm/mempool.c
++ *
++ * memory buffer pool support. Such pools are mostly used
++ * for guaranteed, deadlock-free memory allocations during
++ * extreme VM load.
++ *
++ * started by Ingo Molnar, Copyright (C) 2001
++ */
++
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/mempool.h>
++
++struct mempool_s {
++ spinlock_t lock;
++ int min_nr; /* nr of elements at *elements */
++ int curr_nr; /* Current nr of elements at *elements */
++ void **elements;
++
++ void *pool_data;
++ mempool_alloc_t *alloc;
++ mempool_free_t *free;
++ wait_queue_head_t wait;
++};
++
++static void add_element(mempool_t *pool, void *element)
++{
++ BUG_ON(pool->curr_nr >= pool->min_nr);
++ pool->elements[pool->curr_nr++] = element;
++}
++
++static void *remove_element(mempool_t *pool)
++{
++ BUG_ON(pool->curr_nr <= 0);
++ return pool->elements[--pool->curr_nr];
++}
++
++static void free_pool(mempool_t *pool)
++{
++ while (pool->curr_nr) {
++ void *element = remove_element(pool);
++ pool->free(element, pool->pool_data);
++ }
++ kfree(pool->elements);
++ kfree(pool);
++}
++
++/**
++ * mempool_create - create a memory pool
++ * @min_nr: the minimum number of elements guaranteed to be
++ * allocated for this pool.
++ * @alloc_fn: user-defined element-allocation function.
++ * @free_fn: user-defined element-freeing function.
++ * @pool_data: optional private data available to the user-defined functions.
++ *
++ * this function creates and allocates a guaranteed size, preallocated
++ * memory pool. The pool can be used from the mempool_alloc and mempool_free
++ * functions. This function might sleep. Both the alloc_fn() and the free_fn()
++ * functions might sleep - as long as the mempool_alloc function is not called
++ * from IRQ contexts.
++ */
++mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
++ mempool_free_t *free_fn, void *pool_data)
++{
++ mempool_t *pool;
++
++ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
++ if (!pool)
++ return NULL;
++ memset(pool, 0, sizeof(*pool));
++ pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL);
++ if (!pool->elements) {
++ kfree(pool);
++ return NULL;
++ }
++ spin_lock_init(&pool->lock);
++ pool->min_nr = min_nr;
++ pool->pool_data = pool_data;
++ init_waitqueue_head(&pool->wait);
++ pool->alloc = alloc_fn;
++ pool->free = free_fn;
++
++ /*
++ * First pre-allocate the guaranteed number of buffers.
++ */
++ while (pool->curr_nr < pool->min_nr) {
++ void *element;
++
++ element = pool->alloc(GFP_KERNEL, pool->pool_data);
++ if (unlikely(!element)) {
++ free_pool(pool);
++ return NULL;
++ }
++ add_element(pool, element);
++ }
++ return pool;
++}
++
++/**
++ * mempool_resize - resize an existing memory pool
++ * @pool: pointer to the memory pool which was allocated via
++ * mempool_create().
++ * @new_min_nr: the new minimum number of elements guaranteed to be
++ * allocated for this pool.
++ * @gfp_mask: the usual allocation bitmask.
++ *
++ * This function shrinks/grows the pool. In the case of growing,
++ * it cannot be guaranteed that the pool will be grown to the new
++ * size immediately, but new mempool_free() calls will refill it.
++ *
++ * Note, the caller must guarantee that no mempool_destroy is called
++ * while this function is running. mempool_alloc() & mempool_free()
++ * might be called (eg. from IRQ contexts) while this function executes.
++ */
++int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
++{
++ void *element;
++ void **new_elements;
++ unsigned long flags;
++
++ BUG_ON(new_min_nr <= 0);
++
++ spin_lock_irqsave(&pool->lock, flags);
++ if (new_min_nr < pool->min_nr) {
++ while (pool->curr_nr > new_min_nr) {
++ element = remove_element(pool);
++ spin_unlock_irqrestore(&pool->lock, flags);
++ pool->free(element, pool->pool_data);
++ spin_lock_irqsave(&pool->lock, flags);
++ }
++ pool->min_nr = new_min_nr;
++ goto out_unlock;
++ }
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ /* Grow the pool */
++ new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
++ if (!new_elements)
++ return -ENOMEM;
++
++ spin_lock_irqsave(&pool->lock, flags);
++ memcpy(new_elements, pool->elements,
++ pool->curr_nr * sizeof(*new_elements));
++ kfree(pool->elements);
++ pool->elements = new_elements;
++ pool->min_nr = new_min_nr;
++
++ while (pool->curr_nr < pool->min_nr) {
++ spin_unlock_irqrestore(&pool->lock, flags);
++ element = pool->alloc(gfp_mask, pool->pool_data);
++ if (!element)
++ goto out;
++ spin_lock_irqsave(&pool->lock, flags);
++ if (pool->curr_nr < pool->min_nr)
++ add_element(pool, element);
++ else
++ kfree(element); /* Raced */
++ }
++out_unlock:
++ spin_unlock_irqrestore(&pool->lock, flags);
++out:
++ return 0;
++}
++
++/**
++ * mempool_destroy - deallocate a memory pool
++ * @pool: pointer to the memory pool which was allocated via
++ * mempool_create().
++ *
++ * this function only sleeps if the free_fn() function sleeps. The caller
++ * has to guarantee that all elements have been returned to the pool (ie:
++ * freed) prior to calling mempool_destroy().
++ */
++void mempool_destroy(mempool_t *pool)
++{
++ if (pool->curr_nr != pool->min_nr)
++ BUG(); /* There were outstanding elements */
++ free_pool(pool);
++}
++
++/**
++ * mempool_alloc - allocate an element from a specific memory pool
++ * @pool: pointer to the memory pool which was allocated via
++ * mempool_create().
++ * @gfp_mask: the usual allocation bitmask.
++ *
++ * this function only sleeps if the alloc_fn function sleeps or
++ * returns NULL. Note that due to preallocation, this function
++ * *never* fails when called from process contexts. (it might
++ * fail if called from an IRQ context.)
++ */
++void * mempool_alloc(mempool_t *pool, int gfp_mask)
++{
++ void *element;
++ unsigned long flags;
++ int curr_nr;
++ DECLARE_WAITQUEUE(wait, current);
++ int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
++
++repeat_alloc:
++ element = pool->alloc(gfp_nowait, pool->pool_data);
++ if (likely(element != NULL))
++ return element;
++
++ /*
++ * If the pool is less than 50% full then try harder
++ * to allocate an element:
++ */
++ if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
++ element = pool->alloc(gfp_mask, pool->pool_data);
++ if (likely(element != NULL))
++ return element;
++ }
++
++ /*
++ * Kick the VM at this point.
++ */
++ wakeup_bdflush();
++
++ spin_lock_irqsave(&pool->lock, flags);
++ if (likely(pool->curr_nr)) {
++ element = remove_element(pool);
++ spin_unlock_irqrestore(&pool->lock, flags);
++ return element;
++ }
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ /* We must not sleep in the GFP_ATOMIC case */
++ if (gfp_mask == gfp_nowait)
++ return NULL;
++
++ run_task_queue(&tq_disk);
++
++ add_wait_queue_exclusive(&pool->wait, &wait);
++ set_task_state(current, TASK_UNINTERRUPTIBLE);
++
++ spin_lock_irqsave(&pool->lock, flags);
++ curr_nr = pool->curr_nr;
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ if (!curr_nr)
++ schedule();
++
++ current->state = TASK_RUNNING;
++ remove_wait_queue(&pool->wait, &wait);
++
++ goto repeat_alloc;
++}
++
++/**
++ * mempool_free - return an element to the pool.
++ * @element: pool element pointer.
++ * @pool: pointer to the memory pool which was allocated via
++ * mempool_create().
++ *
++ * this function only sleeps if the free_fn() function sleeps.
++ */
++void mempool_free(void *element, mempool_t *pool)
++{
++ unsigned long flags;
++
++ if (pool->curr_nr < pool->min_nr) {
++ spin_lock_irqsave(&pool->lock, flags);
++ if (pool->curr_nr < pool->min_nr) {
++ add_element(pool, element);
++ spin_unlock_irqrestore(&pool->lock, flags);
++ wake_up(&pool->wait);
++ return;
++ }
++ spin_unlock_irqrestore(&pool->lock, flags);
++ }
++ pool->free(element, pool->pool_data);
++}
++
++/*
++ * A commonly used alloc and free fn.
++ */
++void *mempool_alloc_slab(int gfp_mask, void *pool_data)
++{
++ kmem_cache_t *mem = (kmem_cache_t *) pool_data;
++ return kmem_cache_alloc(mem, gfp_mask);
++}
++
++void mempool_free_slab(void *element, void *pool_data)
++{
++ kmem_cache_t *mem = (kmem_cache_t *) pool_data;
++ kmem_cache_free(mem, element);
++}
++
++
++EXPORT_SYMBOL(mempool_create);
++EXPORT_SYMBOL(mempool_resize);
++EXPORT_SYMBOL(mempool_destroy);
++EXPORT_SYMBOL(mempool_alloc);
++EXPORT_SYMBOL(mempool_free);
++EXPORT_SYMBOL(mempool_alloc_slab);
++EXPORT_SYMBOL(mempool_free_slab);
+diff -urN linux-2.4.22/mm/vmalloc.c linux-2.4.22-dm/mm/vmalloc.c
+--- linux-2.4.22/mm/vmalloc.c 2003-08-25 13:44:44.000000000 +0200
++++ linux-2.4.22-dm/mm/vmalloc.c 2003-09-15 17:01:29.000000000 +0200
+@@ -374,3 +374,22 @@
+ read_unlock(&vmlist_lock);
+ return buf - buf_start;
+ }
++
++void *vcalloc(unsigned long nmemb, unsigned long elem_size)
++{
++ unsigned long size;
++ void *addr;
++
++ /*
++ * Check that we're not going to overflow.
++ */
++ if (nmemb > (ULONG_MAX / elem_size))
++ return NULL;
++
++ size = nmemb * elem_size;
++ addr = vmalloc(size);
++ if (addr)
++ memset(addr, 0, size);
++
++ return addr;
++}
--- /dev/null
+diff -urN linux-2.4.22/drivers/md/Config.in linux-2.4.22-evms/drivers/md/Config.in
+--- linux-2.4.22/drivers/md/Config.in 2003-09-15 17:07:45.000000000 +0200
++++ linux-2.4.22-evms/drivers/md/Config.in 2003-09-15 17:09:48.000000000 +0200
+@@ -16,5 +16,9 @@
+ dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD
+ dep_tristate ' Device-mapper support' CONFIG_BLK_DEV_DM $CONFIG_MD
+ dep_tristate ' Mirror (RAID-1) support' CONFIG_BLK_DEV_DM_MIRROR $CONFIG_BLK_DEV_DM
++if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
++ dep_tristate ' Bad Block Relocation Device Target' CONFIG_BLK_DEV_DM_BBR $CONFIG_BLK_DEV_DM
++ dep_tristate ' Sparse Device Target' CONFIG_BLK_DEV_DM_SPARSE $CONFIG_BLK_DEV_DM
++fi
+
+ endmenu
+diff -urN linux-2.4.22/drivers/md/Makefile linux-2.4.22-evms/drivers/md/Makefile
+--- linux-2.4.22/drivers/md/Makefile 2003-09-15 17:07:45.000000000 +0200
++++ linux-2.4.22-evms/drivers/md/Makefile 2003-09-15 17:09:48.000000000 +0200
+@@ -30,6 +30,8 @@
+
+ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
+ obj-$(CONFIG_BLK_DEV_DM_MIRROR) += dm-mirror.o
++obj-$(CONFIG_BLK_DEV_DM_BBR) += dm-bbr.o
++obj-$(CONFIG_BLK_DEV_DM_SPARSE) += dm-sparse.o
+
+ include $(TOPDIR)/Rules.make
+
+diff -urN linux-2.4.22/drivers/md/dm-bbr.c linux-2.4.22-evms/drivers/md/dm-bbr.c
+--- linux-2.4.22/drivers/md/dm-bbr.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-evms/drivers/md/dm-bbr.c 2003-09-15 17:08:42.000000000 +0200
+@@ -0,0 +1,1228 @@
++/*
++ * Copyright (c) International Business Machines Corp., 2002-2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
++ * the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * linux/drivers/md/dm-bbr.c
++ *
++ * Bad-block-relocation (BBR) target for device-mapper.
++ *
++ * The BBR target is designed to remap I/O write failures to another safe
++ * location on disk. Note that most disk drives have BBR built into them,
++ * this means that our software BBR will be only activated when all hardware
++ * BBR replacement sectors have been used.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/blkdev.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/mempool.h>
++#include "dm.h"
++#include "dm-bbr.h"
++#include "dm-daemon.h"
++#include "dm-io.h"
++
++/* Number of active BBR devices. */
++static int bbr_instances = 0;
++static DECLARE_MUTEX(bbr_instances_lock);
++
++/* Data pertaining to the I/O thread. */
++static struct dm_daemon * bbr_io_thread = NULL;
++static spinlock_t bbr_io_list_lock = SPIN_LOCK_UNLOCKED;
++static LIST_HEAD(bbr_io_list);
++static void bbr_io_handler(void);
++
++/* Global pools for bbr_io_buf's and bbr_remap's. */
++static kmem_cache_t * bbr_io_buf_cache;
++static mempool_t * bbr_io_buf_pool;
++static kmem_cache_t * bbr_remap_cache;
++static mempool_t * bbr_remap_pool;
++
++static void bbr_free_remap(struct bbr_private * bbr_id);
++
++/**
++ * destroy_pools
++ *
++ * Delete the pools for the remap list and I/O anchors.
++ **/
++static void destroy_pools(void)
++{
++ if (bbr_io_buf_pool) {
++ mempool_destroy(bbr_io_buf_pool);
++ bbr_io_buf_pool = NULL;
++ }
++ if (bbr_io_buf_cache) {
++ kmem_cache_destroy(bbr_io_buf_cache);
++ bbr_io_buf_cache = NULL;
++ }
++ if (bbr_remap_pool) {
++ mempool_destroy(bbr_remap_pool);
++ bbr_remap_pool = NULL;
++ }
++ if (bbr_remap_cache) {
++ kmem_cache_destroy(bbr_remap_cache);
++ bbr_remap_cache = NULL;
++ }
++}
++
++/**
++ * create_pools
++ *
++ * Create mempools for the remap list and I/O anchors.
++ **/
++static int create_pools(void)
++{
++ if (!bbr_remap_cache) {
++ bbr_remap_cache = kmem_cache_create("BBR_Remap_Cache",
++ sizeof(struct bbr_runtime_remap),
++ 0, SLAB_HWCACHE_ALIGN,
++ NULL, NULL);
++ if (!bbr_remap_cache) {
++ DMERR("Unable to create BBR remap cache.");
++ goto out;
++ }
++ }
++ if (!bbr_remap_pool) {
++ bbr_remap_pool = mempool_create(64, mempool_alloc_slab,
++ mempool_free_slab,
++ bbr_remap_cache);
++ if (!bbr_remap_pool) {
++ DMERR("Unable to create BBR remap mempool.");
++ goto out;
++ }
++ }
++
++ if (!bbr_io_buf_cache) {
++ bbr_io_buf_cache = kmem_cache_create("BBR_IO_Buf_Cache",
++ sizeof(struct bbr_io_buffer),
++ 0, SLAB_HWCACHE_ALIGN,
++ NULL, NULL);
++ if (!bbr_io_buf_cache) {
++ DMERR("Unable to create BBR I/O buffer cache.");
++ goto out;
++ }
++ }
++ if (!bbr_io_buf_pool) {
++ bbr_io_buf_pool = mempool_create(256, mempool_alloc_slab,
++ mempool_free_slab,
++ bbr_io_buf_cache);
++ if (!bbr_io_buf_pool) {
++ DMERR("Unable to create BBR I/O buffer mempool.");
++ goto out;
++ }
++ }
++
++out:
++ if (!bbr_remap_cache || !bbr_remap_pool ||
++ !bbr_io_buf_cache || !bbr_io_buf_pool ) {
++ destroy_pools();
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++/**
++ * stop_io_thread
++ *
++ * Use the dm-daemon services to stop the BBR I/O thread.
++ **/
++static void stop_io_thread(void)
++{
++ if (bbr_io_thread) {
++ dm_daemon_stop(bbr_io_thread);
++ kfree(bbr_io_thread);
++ bbr_io_thread = NULL;
++ }
++}
++
++/**
++ * stop_io_thread
++ *
++ * Use the dm-daemon services to start the BBR I/O thread.
++ **/
++static int start_io_thread(void)
++{
++ int rc;
++
++ if (!bbr_io_thread) {
++ bbr_io_thread = kmalloc(sizeof(*bbr_io_thread), GFP_KERNEL);
++ if (!bbr_io_thread) {
++ return -ENOMEM;
++ }
++
++ rc = dm_daemon_start(bbr_io_thread, "bbr_io", bbr_io_handler);
++ if (rc) {
++ kfree(bbr_io_thread);
++ return rc;
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * bbr_global_init
++ *
++ * Set up the mempools, I/O thread, and sync-I/O service. This should
++ * be called only when the first bbr device is created.
++ **/
++static int bbr_global_init(void)
++{
++ int rc;
++
++ rc = create_pools();
++ if (rc) {
++ goto out;
++ }
++
++ rc = start_io_thread();
++ if (rc) {
++ destroy_pools();
++ goto out;
++ }
++
++ rc = dm_io_get(1);
++ if (rc) {
++ destroy_pools();
++ stop_io_thread();
++ goto out;
++ }
++
++out:
++ return rc;
++}
++
++/**
++ * bbr_global_cleanup
++ *
++ * Cleanup the mempools, I/O thread and sync-I/O service. This should
++ * be called only when the last bbr device is removed.
++ **/
++static void bbr_global_cleanup(void)
++{
++ destroy_pools();
++ stop_io_thread();
++ dm_io_put(1);
++}
++
++static struct bbr_private * bbr_alloc_private(void)
++{
++ struct bbr_private * bbr_id;
++
++ bbr_id = kmalloc(sizeof(*bbr_id), GFP_KERNEL);
++ if (bbr_id) {
++ memset(bbr_id, 0, sizeof(*bbr_id));
++ bbr_id->in_use_replacement_blks = (atomic_t)ATOMIC_INIT(0);
++ bbr_id->bbr_id_lock = SPIN_LOCK_UNLOCKED;
++ }
++
++ return bbr_id;
++}
++
++static void bbr_free_private(struct bbr_private * bbr_id)
++{
++ if (bbr_id->bbr_table) {
++ kfree(bbr_id->bbr_table);
++ }
++ bbr_free_remap(bbr_id);
++ kfree(bbr_id);
++}
++
++static u32 crc_table[256];
++static u32 crc_table_built = 0;
++
++static void build_crc_table(void)
++{
++ u32 i, j, crc;
++
++ for (i = 0; i <= 255; i++) {
++ crc = i;
++ for (j = 8; j > 0; j--) {
++ if (crc & 1)
++ crc = (crc >> 1) ^ CRC_POLYNOMIAL;
++ else
++ crc >>= 1;
++ }
++ crc_table[i] = crc;
++ }
++ crc_table_built = 1;
++}
++
++static u32 calculate_crc(u32 crc, void * buffer, u32 buffersize)
++{
++ unsigned char * current_byte;
++ u32 temp1, temp2, i;
++
++ current_byte = (unsigned char *) buffer;
++ /* Make sure the crc table is available */
++ if (!crc_table_built)
++ build_crc_table();
++ /* Process each byte in the buffer. */
++ for (i = 0; i < buffersize; i++) {
++ temp1 = (crc >> 8) & 0x00FFFFFF;
++ temp2 = crc_table[(crc ^ (u32) * current_byte) &
++ (u32) 0xff];
++ current_byte++;
++ crc = temp1 ^ temp2;
++ }
++ return crc;
++}
++
++/**
++ * le_bbr_table_sector_to_cpu
++ *
++ * Convert bbr meta data from on-disk (LE) format
++ * to the native cpu endian format.
++ **/
++static void le_bbr_table_sector_to_cpu(struct bbr_table * p)
++{
++ int i;
++ p->signature = le32_to_cpup(&p->signature);
++ p->crc = le32_to_cpup(&p->crc);
++ p->sequence_number = le32_to_cpup(&p->sequence_number);
++ p->in_use_cnt = le32_to_cpup(&p->in_use_cnt);
++ for (i = 0; i < BBR_ENTRIES_PER_SECT; i++) {
++ p->entries[i].bad_sect =
++ le64_to_cpup(&p->entries[i].bad_sect);
++ p->entries[i].replacement_sect =
++ le64_to_cpup(&p->entries[i].replacement_sect);
++ }
++}
++
++/**
++ * cpu_bbr_table_sector_to_le
++ *
++ * Convert bbr meta data from cpu endian format to on-disk (LE) format
++ **/
++static void cpu_bbr_table_sector_to_le(struct bbr_table * p,
++ struct bbr_table * le)
++{
++ int i;
++ le->signature = cpu_to_le32p(&p->signature);
++ le->crc = cpu_to_le32p(&p->crc);
++ le->sequence_number = cpu_to_le32p(&p->sequence_number);
++ le->in_use_cnt = cpu_to_le32p(&p->in_use_cnt);
++ for (i = 0; i < BBR_ENTRIES_PER_SECT; i++) {
++ le->entries[i].bad_sect =
++ cpu_to_le64p(&p->entries[i].bad_sect);
++ le->entries[i].replacement_sect =
++ cpu_to_le64p(&p->entries[i].replacement_sect);
++ }
++}
++
++/**
++ * validate_bbr_table_sector
++ *
++ * Check the specified BBR table sector for a valid signature and CRC. If it's
++ * valid, endian-convert the table sector.
++ **/
++static int validate_bbr_table_sector(struct bbr_table * p)
++{
++ int rc = 0;
++ int org_crc, final_crc;
++
++ if (le32_to_cpup(&p->signature) != BBR_TABLE_SIGNATURE) {
++ DMERR("BBR table signature doesn't match!");
++ DMERR("Found 0x%x. Expecting 0x%x",
++ le32_to_cpup(&p->signature), BBR_TABLE_SIGNATURE);
++ rc = -EINVAL;
++ goto out;
++ }
++
++ if (!p->crc) {
++ DMERR("BBR table sector has no CRC!");
++ rc = -EINVAL;
++ goto out;
++ }
++
++ org_crc = le32_to_cpup(&p->crc);
++ p->crc = 0;
++ final_crc = calculate_crc(INITIAL_CRC, (void *)p, sizeof(*p));
++ if (final_crc != org_crc) {
++ DMERR("CRC failed!");
++ DMERR("Found 0x%x. Expecting 0x%x",
++ org_crc, final_crc);
++ rc = -EINVAL;
++ goto out;
++ }
++
++ p->crc = cpu_to_le32p(&org_crc);
++ le_bbr_table_sector_to_cpu(p);
++
++out:
++ return rc;
++}
++
++/**
++ * bbr_binary_tree_insert
++ *
++ * Insert a node into the binary tree.
++ **/
++static void bbr_binary_tree_insert(struct bbr_runtime_remap ** root,
++ struct bbr_runtime_remap * newnode)
++{
++ struct bbr_runtime_remap ** node = root;
++ while (node && *node) {
++ if (newnode->remap.bad_sect > (*node)->remap.bad_sect) {
++ node = &((*node)->right);
++ } else {
++ node = &((*node)->left);
++ }
++ }
++
++ newnode->left = newnode->right = NULL;
++ *node = newnode;
++}
++
++/**
++ * bbr_binary_search
++ *
++ * Search for a node that contains bad_sect == lsn.
++ **/
++static struct bbr_runtime_remap * bbr_binary_search(
++ struct bbr_runtime_remap * root,
++ u64 lsn)
++{
++ struct bbr_runtime_remap * node = root;
++ while (node) {
++ if (node->remap.bad_sect == lsn) {
++ break;
++ }
++ if (lsn > node->remap.bad_sect) {
++ node = node->right;
++ } else {
++ node = node->left;
++ }
++ }
++ return node;
++}
++
++/**
++ * bbr_binary_tree_destroy
++ *
++ * Destroy the binary tree.
++ **/
++static void bbr_binary_tree_destroy(struct bbr_runtime_remap * root,
++ struct bbr_private * bbr_id)
++{
++ struct bbr_runtime_remap ** link = NULL;
++ struct bbr_runtime_remap * node = root;
++
++ while (node) {
++ if (node->left) {
++ link = &(node->left);
++ node = node->left;
++ continue;
++ }
++ if (node->right) {
++ link = &(node->right);
++ node = node->right;
++ continue;
++ }
++
++ mempool_free(node, bbr_remap_pool);
++ if (node == root) {
++ /* If root is deleted, we're done. */
++ break;
++ }
++
++ /* Back to root. */
++ node = root;
++ *link = NULL;
++ }
++}
++
++static void bbr_free_remap(struct bbr_private * bbr_id)
++{
++ spin_lock_irq(&bbr_id->bbr_id_lock);
++ bbr_binary_tree_destroy(bbr_id->remap_root, bbr_id);
++ bbr_id->remap_root = NULL;
++ spin_unlock_irq(&bbr_id->bbr_id_lock);
++}
++
++/**
++ * bbr_insert_remap_entry
++ *
++ * Create a new remap entry and add it to the binary tree for this node.
++ **/
++static int bbr_insert_remap_entry(struct bbr_private * bbr_id,
++ struct bbr_table_entry * new_bbr_entry)
++{
++ struct bbr_runtime_remap * newnode;
++
++ newnode = mempool_alloc(bbr_remap_pool, GFP_NOIO);
++ if (!newnode) {
++ DMERR("Could not allocate from remap mempool!");
++ return -ENOMEM;
++ }
++ newnode->remap.bad_sect = new_bbr_entry->bad_sect;
++ newnode->remap.replacement_sect = new_bbr_entry->replacement_sect;
++ spin_lock_irq(&bbr_id->bbr_id_lock);
++ bbr_binary_tree_insert(&bbr_id->remap_root, newnode);
++ spin_unlock_irq(&bbr_id->bbr_id_lock);
++ return 0;
++}
++
++/**
++ * bbr_table_to_remap_list
++ *
++ * The on-disk bbr table is sorted by the replacement sector LBA. In order to
++ * improve run time performance, the in memory remap list must be sorted by
++ * the bad sector LBA. This function is called at discovery time to initialize
++ * the remap list. This function assumes that at least one copy of meta data
++ * is valid.
++ **/
++static u32 bbr_table_to_remap_list(struct bbr_private * bbr_id)
++{
++ u32 in_use_blks = 0;
++ int i, j;
++ struct bbr_table * p;
++
++
++ for (i = 0, p = bbr_id->bbr_table;
++ i < bbr_id->nr_sects_bbr_table;
++ i++, p++ ) {
++ if (!p->in_use_cnt) {
++ break;
++ }
++ in_use_blks += p->in_use_cnt;
++ for (j = 0; j < p->in_use_cnt; j++) {
++ bbr_insert_remap_entry(bbr_id, &p->entries[j]);
++ }
++ }
++ if (in_use_blks)
++ DMWARN("There are %u BBR entries for device %u:%u",
++ in_use_blks, MAJOR(bbr_id->dev->dev),
++ MINOR(bbr_id->dev->dev));
++
++ return in_use_blks;
++}
++
++/**
++ * bbr_search_remap_entry
++ *
++ * Search remap entry for the specified sector. If found, return a pointer to
++ * the table entry. Otherwise, return NULL.
++ **/
++static struct bbr_table_entry * bbr_search_remap_entry(
++ struct bbr_private * bbr_id,
++ u64 lsn)
++{
++ struct bbr_runtime_remap * p;
++
++ spin_lock_irq(&bbr_id->bbr_id_lock);
++ p = bbr_binary_search(bbr_id->remap_root, lsn);
++ spin_unlock_irq(&bbr_id->bbr_id_lock);
++ if (p) {
++ return (&p->remap);
++ } else {
++ return NULL;
++ }
++}
++
++/**
++ * bbr_remap
++ *
++ * If *lsn is in the remap table, return TRUE and modify *lsn,
++ * else, return FALSE.
++ **/
++static inline int bbr_remap(struct bbr_private * bbr_id,
++ u64 * lsn)
++{
++ struct bbr_table_entry * e;
++
++ if (atomic_read(&bbr_id->in_use_replacement_blks)) {
++ e = bbr_search_remap_entry(bbr_id, *lsn);
++ if (e) {
++ *lsn = e->replacement_sect;
++ return 1;
++ }
++ }
++ return 0;
++}
++
++/**
++ * bbr_remap_probe
++ *
++ * If any of the sectors in the range [lsn, lsn+nr_sects] are in the remap
++ * table return TRUE, Else, return FALSE.
++ **/
++static inline int bbr_remap_probe(struct bbr_private * bbr_id,
++ u64 lsn, u64 nr_sects)
++{
++ u64 tmp, cnt;
++
++ if (atomic_read(&bbr_id->in_use_replacement_blks)) {
++ for (cnt = 0, tmp = lsn;
++ cnt < nr_sects;
++ cnt += bbr_id->blksize_in_sects, tmp = lsn + cnt) {
++ if (bbr_remap(bbr_id,&tmp)) {
++ return 1;
++ }
++ }
++ }
++ return 0;
++}
++
++/**
++ * bbr_setup
++ *
++ * Read the remap tables from disk and set up the initial remap tree.
++ **/
++static int bbr_setup(struct bbr_private * bbr_id)
++{
++ struct bbr_table * table = bbr_id->bbr_table;
++ struct page * page;
++ struct io_region job;
++ unsigned int error, offset;
++ int i, rc = 0;
++
++ job.dev = bbr_id->dev->dev;
++ job.count = 1;
++
++ /* Read and verify each BBR table sector individually. */
++ for (i = 0; i < bbr_id->nr_sects_bbr_table; i++, table++) {
++ job.sector = bbr_id->lba_table1 + i;
++ page = virt_to_page(table);
++ offset = (unsigned long)table & ~PAGE_MASK;
++ rc = dm_io_sync(1, &job, READ, page, offset, &error);
++ if (rc && bbr_id->lba_table2) {
++ job.sector = bbr_id->lba_table2 + i;
++ rc = dm_io_sync(1, &job, READ, page, offset, &error);
++ }
++ if (rc) {
++ goto out;
++ }
++
++ rc = validate_bbr_table_sector(table);
++ if (rc) {
++ goto out;
++ }
++ }
++ atomic_set(&bbr_id->in_use_replacement_blks,
++ bbr_table_to_remap_list(bbr_id));
++
++out:
++ if (rc) {
++ DMERR("dm-bbr: error during device setup: %d", rc);
++ }
++ return rc;
++}
++
++static struct bbr_io_buffer * allocate_bbr_io_buf(struct bbr_private * bbr_id,
++ struct buffer_head * bh,
++ int rw)
++{
++ struct bbr_io_buffer * bbr_io_buf;
++
++ bbr_io_buf = mempool_alloc(bbr_io_buf_pool, GFP_NOIO);
++ if (bbr_io_buf) {
++ memset(bbr_io_buf, 0, sizeof(struct bbr_io_buffer));
++ INIT_LIST_HEAD(&bbr_io_buf->bbr_io_list);
++ bbr_io_buf->bbr_id = bbr_id;
++ bbr_io_buf->sector = bh->b_rsector;
++ bbr_io_buf->bh = bh;
++ bbr_io_buf->rw = rw;
++ } else {
++ DMWARN("Could not allocate from BBR I/O buffer pool!");
++ }
++ return bbr_io_buf;
++}
++
++static void free_bbr_io_buf(struct bbr_io_buffer * bbr_io_buf)
++{
++ mempool_free(bbr_io_buf, bbr_io_buf_pool);
++}
++
++/**
++ * bbr_io_remap_error
++ * @bbr_id: Private data for the BBR node.
++ * @rw: READ or WRITE.
++ * @starting_lsn: Starting sector of request to remap.
++ * @count: Number of sectors in the request.
++ * @buffer: Data buffer for the request.
++ *
++ * For the requested range, try to write each sector individually. For each
++ * sector that fails, find the next available remap location and write the
++ * data to that new location. Then update the table and write both copies
++ * of the table to disk. Finally, update the in-memory mapping and do any
++ * other necessary bookkeeping.
++ **/
++static int bbr_io_remap_error(struct bbr_private * bbr_id,
++ int rw,
++ u64 starting_lsn,
++ u64 count,
++ char * buffer)
++{
++ struct bbr_table * bbr_table;
++ struct io_region job;
++ struct page * page;
++ unsigned long table_sector_index;
++ unsigned long table_sector_offset;
++ unsigned long index;
++ unsigned int offset_in_page, error;
++ u64 lsn, new_lsn;
++ int rc;
++
++ if (rw == READ) {
++ /* Nothing can be done about read errors. */
++ return -EIO;
++ }
++
++ job.dev = bbr_id->dev->dev;
++
++ /* For each sector in the request. */
++ for (lsn = 0; lsn < count; lsn++, buffer += SECTOR_SIZE) {
++ job.sector = starting_lsn + lsn;
++ job.count = 1;
++ page = virt_to_page(buffer);
++ offset_in_page = (unsigned long)buffer & ~PAGE_MASK;
++ rc = dm_io_sync(1, &job, rw, page, offset_in_page, &error);
++ while (rc) {
++ /* Find the next available relocation sector. */
++ new_lsn = atomic_read(&bbr_id->in_use_replacement_blks);
++ if (new_lsn >= bbr_id->nr_replacement_blks) {
++ /* No more replacement sectors available. */
++ return -EIO;
++ }
++ new_lsn += bbr_id->start_replacement_sect;
++
++ /* Write the data to its new location. */
++ DMWARN("dm-bbr: device %u:%u: Trying to remap bad sector "PFU64" to sector "PFU64,
++ MAJOR(bbr_id->dev->dev), MINOR(bbr_id->dev->dev),
++ starting_lsn + lsn, new_lsn);
++ job.sector = new_lsn;
++ rc = dm_io_sync(1, &job, rw, page, offset_in_page, &error);
++ if (rc) {
++ /* This replacement sector is bad.
++ * Try the next one.
++ */
++ DMERR("dm-bbr: device %u:%u: replacement sector "PFU64" is bad. Skipping.",
++ MAJOR(bbr_id->dev->dev), MINOR(bbr_id->dev->dev), new_lsn);
++ atomic_inc(&bbr_id->in_use_replacement_blks);
++ continue;
++ }
++
++ /* Add this new entry to the on-disk table. */
++ table_sector_index = new_lsn -
++ bbr_id->start_replacement_sect;
++ table_sector_offset = table_sector_index /
++ BBR_ENTRIES_PER_SECT;
++ index = table_sector_index % BBR_ENTRIES_PER_SECT;
++
++ bbr_table = &bbr_id->bbr_table[table_sector_offset];
++ bbr_table->entries[index].bad_sect = starting_lsn + lsn;
++ bbr_table->entries[index].replacement_sect = new_lsn;
++ bbr_table->in_use_cnt++;
++ bbr_table->sequence_number++;
++ bbr_table->crc = 0;
++ bbr_table->crc = calculate_crc(INITIAL_CRC,
++ bbr_table,
++ sizeof(struct bbr_table));
++
++ /* Write the table to disk. */
++ cpu_bbr_table_sector_to_le(bbr_table, bbr_table);
++ page = virt_to_page(bbr_table);
++ offset_in_page = (unsigned long)bbr_table & ~PAGE_MASK;
++ if (bbr_id->lba_table1) {
++ job.sector = bbr_id->lba_table1 + table_sector_offset;
++ job.count = 1;
++ rc = dm_io_sync(1, &job, WRITE, page, offset_in_page, &error);
++ }
++ if (bbr_id->lba_table2) {
++ job.sector = bbr_id->lba_table2 + table_sector_offset;
++ rc |= dm_io_sync(1, &job, WRITE, page, offset_in_page, &error);
++ }
++ le_bbr_table_sector_to_cpu(bbr_table);
++
++ if (rc) {
++ /* Error writing one of the tables to disk. */
++ DMERR("dm-bbr: device %u:%u: error updating BBR tables on disk.",
++ MAJOR(bbr_id->dev->dev), MINOR(bbr_id->dev->dev));
++ return rc;
++ }
++
++ /* Insert a new entry in the remapping binary-tree. */
++ rc = bbr_insert_remap_entry(bbr_id,
++ &bbr_table->entries[index]);
++ if (rc) {
++ DMERR("dm-bbr: device %u:%u: error adding new entry to remap tree.",
++ MAJOR(bbr_id->dev->dev), MINOR(bbr_id->dev->dev));
++ return rc;
++ }
++
++ atomic_inc(&bbr_id->in_use_replacement_blks);
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * bbr_io_process_request
++ *
++ * For each sector in this request, check if the sector has already
++ * been remapped. If so, process all previous sectors in the request,
++ * followed by the remapped sector. Then reset the starting lsn and
++ * count, and keep going with the rest of the request as if it were
++ * a whole new request. If any of the sync_io's return an error,
++ * call the remapper to relocate the bad sector(s).
++ **/
++static int bbr_io_process_request(struct bbr_io_buffer * bbr_io_buf)
++{
++ struct bbr_private * bbr_id = bbr_io_buf->bbr_id;
++ struct io_region job;
++ u64 starting_lsn = bbr_io_buf->sector;
++ u64 count = bbr_io_buf->bh->b_size >> SECTOR_SHIFT;
++ u64 lsn, remapped_lsn;
++ char * buffer = bbr_io_buf->bh->b_data;
++ struct page * page = virt_to_page(buffer);
++ unsigned int offset_in_page = (unsigned long)buffer & ~PAGE_MASK;
++ unsigned int error;
++ int rw = bbr_io_buf->rw;
++ int rc = 0;
++
++ job.dev = bbr_id->dev->dev;
++
++ /* For each sector in this request, check if this sector has already
++ * been remapped. If so, process all previous sectors in this request,
++ * followed by the remapped sector. Then reset the starting lsn and
++ * count and keep going with the rest of the request as if it were
++ * a whole new request.
++ */
++ for (lsn = 0; lsn < count; lsn++) {
++ remapped_lsn = starting_lsn + lsn;
++ rc = bbr_remap(bbr_id, &remapped_lsn);
++ if (!rc) {
++ /* This sector is fine. */
++ continue;
++ }
++
++ /* Process all sectors in the request up to this one. */
++ if (lsn > 0) {
++ job.sector = starting_lsn;
++ job.count = lsn;
++ rc = dm_io_sync(1, &job, rw, page, offset_in_page, &error);
++ if (rc) {
++ /* If this I/O failed, then one of the sectors
++ * in this request needs to be relocated.
++ */
++ rc = bbr_io_remap_error(bbr_id, bbr_io_buf->rw, starting_lsn,
++ lsn, buffer);
++ if (rc) {
++ return rc;
++ }
++ }
++ buffer += (lsn << SECTOR_SHIFT);
++ page = virt_to_page(buffer);
++ offset_in_page = (unsigned long)buffer & ~PAGE_MASK;
++ }
++
++ /* Process the remapped sector. */
++ job.sector = remapped_lsn;
++ job.count = 1;
++ rc = dm_io_sync(1, &job, rw, page, offset_in_page, &error);
++ if (rc) {
++ /* BUGBUG - Need more processing if this caused an
++ * an error. If this I/O failed, then the existing
++ * remap is now bad, and we need to find a new remap.
++ * Can't use bbr_io_remap_error(), because the existing
++ * map entry needs to be changed, not added again, and
++ * the original table entry also needs to be changed.
++ */
++ return rc;
++ }
++
++ buffer += SECTOR_SIZE;
++ starting_lsn += (lsn + 1);
++ count -= (lsn + 1);
++ lsn = -1;
++ page = virt_to_page(buffer);
++ offset_in_page = (unsigned long)buffer & ~PAGE_MASK;
++ }
++
++ /* Check for any remaining sectors after the last split. This could
++ * potentially be the whole request, but that should be a rare case
++ * because requests should only be processed by the thread if we know
++ * an error occurred or they contained one or more remapped sectors.
++ */
++ if (count) {
++ job.sector = starting_lsn;
++ job.count = count;
++ rc = dm_io_sync(1, &job, rw, page, offset_in_page, &error);
++ if (rc) {
++ /* If this I/O failed, then one of the sectors in this
++ * request needs to be relocated.
++ */
++ rc = bbr_io_remap_error(bbr_id, bbr_io_buf->rw, starting_lsn,
++ count, buffer);
++ if (rc) {
++ return rc;
++ }
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * bbr_io_handler
++ *
++ * This is the handler for the bbr_io_thread. It continuously loops,
++ * taking I/O requests off its list and processing them. If nothing
++ * is on the list, the thread goes back to sleep until specifically
++ * woken up.
++ *
++ * I/O requests should only be sent to this thread if we know that:
++ * a) the request contains at least one remapped sector.
++ * or
++ * b) the request caused an error on the normal I/O path.
++ * This function uses synchronous I/O, so sending a request to this
++ * thread that doesn't need special processing will cause severe
++ * performance degredation.
++ **/
++static void bbr_io_handler(void)
++{
++ struct bbr_io_buffer * bbr_io_buf;
++ struct buffer_head * bh;
++ unsigned long flags;
++ int rc;
++
++ while (1) {
++ /* Process bbr_io_list, one entry at a time. */
++ spin_lock_irqsave(&bbr_io_list_lock, flags);
++ if (list_empty(&bbr_io_list)) {
++ /* No more items on the list. */
++ spin_unlock_irqrestore(&bbr_io_list_lock, flags);
++ break;
++ }
++ bbr_io_buf = list_entry(bbr_io_list.next,
++ struct bbr_io_buffer, bbr_io_list);
++ list_del_init(&bbr_io_buf->bbr_io_list);
++ spin_unlock_irqrestore(&bbr_io_list_lock, flags);
++
++ rc = bbr_io_process_request(bbr_io_buf);
++
++ /* Clean up and complete the original I/O. */
++ bbr_io_buf->flags |= BBR_IO_HANDLED;
++ bh = bbr_io_buf->bh;
++ if (bh->b_end_io) {
++ /* If this was the bbr_io_buf for an error on the
++ * normal WRITE, don't free it here. It will be
++ * freed later in bbr_callback()
++ */
++ if (!(bbr_io_buf->flags & BBR_IO_RELOCATE))
++ free_bbr_io_buf(bbr_io_buf);
++ bh->b_end_io(bh, rc ? 0 : 1);
++ }
++ }
++}
++
++/**
++ * bbr_schedule_io
++ *
++ * Place the specified bbr_io_buf on the thread's processing list.
++ **/
++static void bbr_schedule_io(struct bbr_io_buffer * bbr_io_buf)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&bbr_io_list_lock, flags);
++ list_add_tail(&bbr_io_buf->bbr_io_list, &bbr_io_list);
++ spin_unlock_irqrestore(&bbr_io_list_lock, flags);
++ dm_daemon_wake(bbr_io_thread);
++}
++
++/**
++ * bbr_read
++ *
++ * If there are any remapped sectors on this object, send this request over
++ * to the thread for processing. Otherwise send it down the stack normally.
++ **/
++static int bbr_read(struct bbr_private * bbr_id,
++ struct buffer_head * bh)
++{
++ struct bbr_io_buffer * bbr_io_buf;
++
++
++ if (atomic_read(&bbr_id->in_use_replacement_blks) == 0 ||
++ !bbr_remap_probe(bbr_id, bh->b_rsector,
++ bh->b_size >> SECTOR_SHIFT)) {
++ /* No existing remaps or this request doesn't
++ * contain any remapped sectors.
++ */
++ bh->b_rdev = bbr_id->dev->dev;
++ return 1;
++ }
++
++ /* This request has at least one remapped sector. */
++ bbr_io_buf = allocate_bbr_io_buf(bbr_id, bh, READ);
++ if (!bbr_io_buf) {
++ /* Can't get memory to track the I/O. */
++ bh->b_end_io(bh, 0);
++ return -ENOMEM;
++ }
++
++ bbr_schedule_io(bbr_io_buf);
++ return 0;
++}
++
++/**
++ * bbr_callback
++ *
++ * This is the callback for normal write requests. Check for an error
++ * during the I/O, and send to the thread for processing if necessary.
++ **/
++static int bbr_callback(struct dm_target * ti,
++ struct buffer_head * bh,
++ int rw,
++ int error,
++ union map_info * map_context)
++{
++ struct bbr_io_buffer * bbr_io_buf = (struct bbr_io_buffer *) map_context->ptr;
++
++ if (!bbr_io_buf)
++ return error;
++
++ /* Will try to relocate the WRITE if:
++ * - It is an error, and
++ * - It is not an error of BBR relocation, and
++ */
++ if (error && !(bbr_io_buf->flags & BBR_IO_HANDLED)) {
++ DMERR("dm-bbr: device %u:%u: Write failure on sector %lu. Scheduling for retry.",
++ MAJOR(bh->b_rdev), MINOR(bh->b_rdev),
++ (unsigned long)bbr_io_buf->sector);
++ /* Indicate this bbr_io_buf is for an error on normal WRITE */
++ bbr_io_buf->flags |= BBR_IO_RELOCATE;
++ bbr_schedule_io(bbr_io_buf);
++ /* Returns >0 so that DM will let us retry the I/O */
++ return 1;
++ }
++
++ free_bbr_io_buf(bbr_io_buf);
++ return error;
++}
++
++/**
++ * bbr_write
++ *
++ * If there are any remapped sectors on this object, send the request over
++ * to the thread for processing. Otherwise, register for callback
++ * notification, and send the request down normally.
++ **/
++static int bbr_write(struct bbr_private * bbr_id,
++ struct buffer_head * bh,
++ union map_info * map_context)
++{
++ struct bbr_io_buffer * bbr_io_buf;
++
++ bbr_io_buf = allocate_bbr_io_buf(bbr_id, bh, WRITE);
++ if (!bbr_io_buf) {
++ /* Can't get memory to track the I/O. */
++ bh->b_end_io(bh, 0);
++ return -ENOMEM;
++ }
++
++ if (atomic_read(&bbr_id->in_use_replacement_blks) == 0 ||
++ !bbr_remap_probe(bbr_id, bh->b_rsector,
++ bh->b_size >> SECTOR_SHIFT)) {
++ /* No existing remaps or this request
++ * contains no remapped sectors.
++ */
++ bh->b_rdev = bbr_id->dev->dev;
++ map_context->ptr = bbr_io_buf;
++ return 1;
++ } else {
++ /* This request contains at least one remapped sector. */
++ map_context->ptr = NULL;
++ bbr_schedule_io(bbr_io_buf);
++ }
++ return 0;
++}
++
++/**
++ * Construct a bbr mapping
++ **/
++static int bbr_ctr(struct dm_target * ti, unsigned int argc, char ** argv)
++{
++ struct bbr_private * bbr_id;
++ u32 block_size;
++ char * end;
++ int rc = -EINVAL;
++
++ if (argc != 8) {
++ ti->error = "dm-bbr requires exactly 8 arguments: "
++ "device offset table1_lsn table2_lsn table_size start_replacement nr_replacement_blks block_size";
++ goto out1;
++ }
++
++ bbr_id = bbr_alloc_private();
++ if (!bbr_id) {
++ ti->error = "dm-bbr: Error allocating bbr private data.";
++ goto out1;
++ }
++
++ bbr_id->offset = simple_strtoull(argv[1], &end, 10);
++ bbr_id->lba_table1 = simple_strtoull(argv[2], &end, 10);
++ bbr_id->lba_table2 = simple_strtoull(argv[3], &end, 10);
++ bbr_id->nr_sects_bbr_table = simple_strtoull(argv[4], &end, 10);
++ bbr_id->start_replacement_sect = simple_strtoull(argv[5], &end, 10);
++ bbr_id->nr_replacement_blks = simple_strtoull(argv[6], &end, 10);
++ block_size = simple_strtoul(argv[7], &end, 10);
++ bbr_id->blksize_in_sects = (block_size >> SECTOR_SHIFT);
++
++ bbr_id->bbr_table = kmalloc(bbr_id->nr_sects_bbr_table << SECTOR_SHIFT,
++ GFP_KERNEL);
++ if (!bbr_id->bbr_table) {
++ ti->error = "dm-bbr: Error allocating bbr table.";
++ goto out2;
++ }
++
++ if (dm_get_device(ti, argv[0], 0, ti->len,
++ dm_table_get_mode(ti->table), &bbr_id->dev)) {
++ ti->error = "dm-bbr: Device lookup failed";
++ goto out2;
++ }
++
++ /* Using a semaphore here is probably overkill,
++ * but at least it will be correct.
++ */
++ down(&bbr_instances_lock);
++ if (bbr_instances == 0) {
++ rc = bbr_global_init();
++ if (rc) {
++ up(&bbr_instances_lock);
++ goto out3;
++ }
++ }
++ bbr_instances++;
++ up(&bbr_instances_lock);
++
++ rc = bbr_setup(bbr_id);
++ if (rc) {
++ ti->error = "dm-bbr: Device setup failed";
++ goto out4;
++ }
++
++ ti->private = bbr_id;
++ return 0;
++
++out4:
++ down(&bbr_instances_lock);
++ bbr_instances--;
++ if (bbr_instances == 0) {
++ bbr_global_cleanup();
++ }
++ up(&bbr_instances_lock);
++
++out3:
++ dm_put_device(ti, bbr_id->dev);
++out2:
++ bbr_free_private(bbr_id);
++out1:
++ return rc;
++}
++
++static void bbr_dtr(struct dm_target * ti)
++{
++ struct bbr_private * bbr_id = (struct bbr_private *) ti->private;
++
++ dm_put_device(ti, bbr_id->dev);
++ bbr_free_private(bbr_id);
++
++ down(&bbr_instances_lock);
++ bbr_instances--;
++ if (bbr_instances == 0) {
++ bbr_global_cleanup();
++ }
++ up(&bbr_instances_lock);
++}
++
++static int bbr_map(struct dm_target * ti, struct buffer_head * bh, int rw,
++ union map_info * map_context)
++{
++ struct bbr_private * bbr_id = (struct bbr_private *) ti->private;
++
++ bh->b_rsector += bbr_id->offset;
++ switch (rw) {
++ case READ:
++ case READA:
++ map_context->ptr = NULL;
++ return bbr_read(bbr_id, bh);
++ case WRITE:
++ return bbr_write(bbr_id, bh, map_context);
++ default:
++ return -EIO;
++ }
++}
++
++static int bbr_status(struct dm_target * ti, status_type_t type,
++ char * result, unsigned int maxlen)
++{
++ struct bbr_private * bbr_id = (struct bbr_private *) ti->private;
++
++ switch (type) {
++ case STATUSTYPE_INFO:
++ result[0] = '\0';
++ break;
++
++ case STATUSTYPE_TABLE:
++ snprintf(result, maxlen, "%s "PFU64" "PFU64" "PFU64" "PFU64" "PFU64" "PFU64" %u",
++ dm_kdevname(bbr_id->dev->dev), bbr_id->offset,
++ bbr_id->lba_table1, bbr_id->lba_table2,
++ bbr_id->nr_sects_bbr_table,
++ bbr_id->start_replacement_sect,
++ bbr_id->nr_replacement_blks,
++ bbr_id->blksize_in_sects << SECTOR_SHIFT);
++ break;
++ }
++ return 0;
++}
++
++static struct target_type bbr_target = {
++ name: "bbr",
++ module: THIS_MODULE,
++ ctr: bbr_ctr,
++ dtr: bbr_dtr,
++ map: bbr_map,
++ end_io: bbr_callback,
++ status: bbr_status,
++};
++
++int __init dm_bbr_init(void)
++{
++ int r = dm_register_target(&bbr_target);
++
++ if (r < 0)
++ DMERR("dm-bbr: register failed %d", r);
++
++ return r;
++}
++
++void __exit dm_bbr_exit(void)
++{
++ int r = dm_unregister_target(&bbr_target);
++
++ if (r < 0)
++ DMERR("dm-bbr: unregister failed %d", r);
++}
++
++module_init(dm_bbr_init);
++module_exit(dm_bbr_exit);
++MODULE_LICENSE("GPL");
+diff -urN linux-2.4.22/drivers/md/dm-bbr.h linux-2.4.22-evms/drivers/md/dm-bbr.h
+--- linux-2.4.22/drivers/md/dm-bbr.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-evms/drivers/md/dm-bbr.h 2003-09-15 17:08:42.000000000 +0200
+@@ -0,0 +1,148 @@
++/*
++ * Copyright (c) International Business Machines Corp., 2002-2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
++ * the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * linux/drivers/md/dm-bbr.h
++ *
++ * Bad-block-relocation (BBR) target for device-mapper.
++ *
++ * The BBR target is designed to remap I/O write failures to another safe
++ * location on disk. Note that most disk drives have BBR built into them,
++ * this means that our software BBR will be only activated when all hardware
++ * BBR replacement sectors have been used.
++ */
++
++#ifndef _DM_BBR_H_
++#define _DM_BBR_H_
++
++#define BBR_TABLE_SIGNATURE 0x42627254 /* BbrT */
++#define BBR_ENTRIES_PER_SECT 31
++#define BBR_NR_BUFS 128
++#define INITIAL_CRC 0xFFFFFFFF
++#define CRC_POLYNOMIAL 0xEDB88320L
++
++/**
++ * Macros to cleanly print 64-bit numbers on both 32-bit and 64-bit machines.
++ * Use these in place of %Ld, %Lu, and %Lx.
++ **/
++#if BITS_PER_LONG > 32
++#define PFU64 "%lu"
++#else
++#define PFU64 "%Lu"
++#endif
++
++/**
++ * struct bbr_table_entry
++ * @bad_sect: LBA of bad location.
++ * @replacement_sect: LBA of new location.
++ *
++ * Structure to describe one BBR remap.
++ **/
++struct bbr_table_entry {
++ u64 bad_sect;
++ u64 replacement_sect;
++};
++
++/**
++ * struct bbr_table
++ * @signature: Signature on each BBR table sector.
++ * @crc: CRC for this table sector.
++ * @sequence_number: Used to resolve conflicts when primary and secondary
++ * tables do not match.
++ * @in_use_cnt: Number of in-use table entries.
++ * @entries: Actual table of remaps.
++ *
++ * Structure to describe each sector of the metadata table. Each sector in this
++ * table can describe 31 remapped sectors.
++ **/
++struct bbr_table {
++ u32 signature;
++ u32 crc;
++ u32 sequence_number;
++ u32 in_use_cnt;
++ struct bbr_table_entry entries[BBR_ENTRIES_PER_SECT];
++};
++
++/**
++ * struct bbr_runtime_remap
++ *
++ * Node in the binary tree used to keep track of remaps.
++ **/
++struct bbr_runtime_remap {
++ struct bbr_table_entry remap;
++ struct bbr_runtime_remap *left;
++ struct bbr_runtime_remap *right;
++};
++
++/**
++ * struct bbr_private
++ * @dev: Info about underlying device.
++ * @bbr_table: Copy of metadata table.
++ * @offset: LBA of data area.
++ * @lba_table1: LBA of primary BBR table.
++ * @lba_table2: LBA of secondary BBR table.
++ * @nr_sects_bbr_table: Size of each BBR table.
++ * @nr_replacement_blks: Number of replacement blocks.
++ * @start_replacement_sect: LBA of start of replacement blocks.
++ * @blksize_in_sects: Size of each block.
++ * @in_use_replacement_blks: Current number of remapped blocks.
++ * @remap_root: Binary tree containing all remaps.
++ * @bbr_id_lock: Lock for the binary tree.
++ *
++ * Private data for each BBR target.
++ **/
++struct bbr_private {
++ struct dm_dev * dev;
++ struct bbr_table * bbr_table;
++ struct bbr_runtime_remap * remap_root;
++ u64 offset;
++ u64 lba_table1;
++ u64 lba_table2;
++ u64 nr_sects_bbr_table;
++ u64 start_replacement_sect;
++ u64 nr_replacement_blks;
++ u32 blksize_in_sects;
++ atomic_t in_use_replacement_blks;
++ spinlock_t bbr_id_lock;
++};
++
++#define BBR_IO_HANDLED (1<<0)
++#define BBR_IO_RELOCATE (1<<1)
++
++/**
++ * struct bbr_io_buffer
++ * @bbr_io_list: Thread's list of bbr_io_buf's.
++ * @bbr_id: Object for this request.
++ * @bh: Original buffer_head.
++ * @sector: Original sector
++ * @flags: Operation flag (BBR_IO_*)
++ * @rw: READ or WRITE.
++ * @rc: Return code from bbr_io_handler.
++ *
++ * Structure used to track each write request.
++ **/
++struct bbr_io_buffer {
++ struct list_head bbr_io_list;
++ struct bbr_private *bbr_id;
++ struct buffer_head *bh;
++ u64 sector;
++ u32 flags;
++ s32 rw;
++ s32 rc;
++};
++
++#endif
++
+diff -urN linux-2.4.22/drivers/md/dm-snapshot.c linux-2.4.22-evms/drivers/md/dm-snapshot.c
+--- linux-2.4.22/drivers/md/dm-snapshot.c 2003-09-15 17:07:45.000000000 +0200
++++ linux-2.4.22-evms/drivers/md/dm-snapshot.c 2003-09-15 17:08:35.000000000 +0200
+@@ -92,6 +92,9 @@
+
+ /* List of snapshots for this origin */
+ struct list_head snapshots;
++
++ /* Count of snapshots and origins referrencing this structure. */
++ unsigned int count;
+ };
+
+ /*
+@@ -155,6 +158,35 @@
+ }
+
+ /*
++ * Allocate and initialize an origin structure.
++ */
++static struct origin * __alloc_origin(kdev_t dev)
++{
++ struct origin *o = kmalloc(sizeof(*o), GFP_KERNEL);
++ if (o) {
++ o->dev = dev;
++ INIT_LIST_HEAD(&o->hash_list);
++ INIT_LIST_HEAD(&o->snapshots);
++ __insert_origin(o);
++ }
++ return o;
++}
++
++static void __get_origin(struct origin *o)
++{
++ o->count++;
++}
++
++static void __put_origin(struct origin *o)
++{
++ o->count--;
++ if (o->count == 0) {
++ list_del(&o->hash_list);
++ kfree(o);
++ }
++}
++
++/*
+ * Make a note of the snapshot and its origin so we can look it
+ * up when the origin has a write on it.
+ */
+@@ -168,20 +200,37 @@
+
+ if (!o) {
+ /* New origin */
+- o = kmalloc(sizeof(*o), GFP_KERNEL);
++ o = __alloc_origin(dev);
+ if (!o) {
+ up_write(&_origins_lock);
+ return -ENOMEM;
+ }
++ }
+
+- /* Initialise the struct */
+- INIT_LIST_HEAD(&o->snapshots);
+- o->dev = dev;
++ __get_origin(o);
++ list_add_tail(&snap->list, &o->snapshots);
+
+- __insert_origin(o);
++ up_write(&_origins_lock);
++ return 0;
++}
++
++static int register_origin(kdev_t dev)
++{
++ struct origin *o;
++
++ down_write(&_origins_lock);
++ o = __lookup_origin(dev);
++
++ if (!o) {
++ /* New origin */
++ o = __alloc_origin(dev);
++ if (!o) {
++ up_write(&_origins_lock);
++ return -ENOMEM;
++ }
+ }
+
+- list_add_tail(&snap->list, &o->snapshots);
++ __get_origin(o);
+
+ up_write(&_origins_lock);
+ return 0;
+@@ -195,11 +244,18 @@
+ o = __lookup_origin(s->origin->dev);
+
+ list_del(&s->list);
+- if (list_empty(&o->snapshots)) {
+- list_del(&o->hash_list);
+- kfree(o);
+- }
++ __put_origin(o);
++
++ up_write(&_origins_lock);
++}
++
++static void unregister_origin(kdev_t dev)
++{
++ struct origin *o;
+
++ down_write(&_origins_lock);
++ o = __lookup_origin(dev);
++ __put_origin(o);
+ up_write(&_origins_lock);
+ }
+
+@@ -1090,6 +1146,13 @@
+ return r;
+ }
+
++ r = register_origin(dev->dev);
++ if (r) {
++ ti->error = "Cannot register origin";
++ dm_put_device(ti, dev);
++ return r;
++ }
++
+ ti->private = dev;
+ return 0;
+ }
+@@ -1097,6 +1160,7 @@
+ static void origin_dtr(struct dm_target *ti)
+ {
+ struct dm_dev *dev = (struct dm_dev *) ti->private;
++ unregister_origin(dev->dev);
+ dm_put_device(ti, dev);
+ }
+
+diff -urN linux-2.4.22/drivers/md/dm-sparse.c linux-2.4.22-evms/drivers/md/dm-sparse.c
+--- linux-2.4.22/drivers/md/dm-sparse.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.22-evms/drivers/md/dm-sparse.c 2003-09-15 17:09:48.000000000 +0200
+@@ -0,0 +1,713 @@
++/* -*- linux-c -*- */
++
++/*
++ * Copyright (c) International Business Machines Corp., 2002
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
++ * the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * linux/drivers/md/dm-sparse.c
++ *
++ * Sparse target for device-mapper.
++ *
++ * This target provides the ability to create a sparse device. This
++ * allows a device to pretend to be larger than it really is.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/blkdev.h>
++#include <linux/slab.h>
++#include <linux/mempool.h>
++#include <linux/vmalloc.h>
++
++#include "dm.h"
++#include "dm-io.h"
++
++#define MAX_HASH_CHAIN_ENTRIES 10
++#define NAME_SIZE 127
++
++/* Sparse Ioctl
++ device
++ start
++ chunk_size
++ chunks
++ */
++
++// Entries in the sparse remapping structure
++struct sparse_hash_entry {
++ u64 org_chunk; // Chunk number, not LBA.
++ u64 sparse_chunk; // Chunk number, not LBA.
++ struct sparse_hash_entry * next;
++ struct sparse_hash_entry * prev;
++};
++
++//Private data structure
++struct sparse_volume {
++ struct dm_dev *dev;
++ struct rw_semaphore sparse_semaphore;
++ struct sparse_hash_entry ** sparse_map; // Hash table of remappings
++ struct sparse_hash_entry * free_hash_list;
++ kmem_cache_t * hash_slab;
++ mempool_t * hash_pool;
++ u32 dm_io_flag;
++ u32 chunk_size; // Sectors.
++ u32 chunk_shift; // Shift value for chunk size.
++ u32 num_chunks; // In this volume.
++ u32 next_cow_entry; // Index into current COW table.
++ u64 current_cow_sector; // LOGICAL sector of current COW table.
++ u32 next_free_chunk; // Index of next free chunk (not LBA!).
++ u32 hash_table_size; // Size of the hash table for the remap.
++ u64 start;
++ u64 cow_table[64]; // One sector's worth of COW tables.
++};
++
++/*************************** OLD SERVICES ****************************/
++
++/* computes log base 2 of value */
++inline int log2(u32 value) //ok to change to u32?
++{
++ int result = -1;
++ long tmp; //ok to change to long?
++
++ if (value) {
++ tmp = value;
++ result++;
++ while (!(tmp & 1)) {
++ result++;
++ tmp >>= 1;
++ }
++ if (tmp != 1) {
++ result = -2;
++ }
++ }
++ return result;
++}
++
++/********************************* Functions *********************************/
++
++/***************************** Hash Functions *****************************/
++
++/* Take and initialize from the free hash list */
++static struct sparse_hash_entry *
++allocate_sparse_hash_entry( struct sparse_volume * volume,
++ u64 org_chunk,
++ u64 sparse_chunk )
++{
++ struct sparse_hash_entry * hash_entry;
++
++ hash_entry = volume->free_hash_list;
++ if ( hash_entry ) { //should always be the case b/c preallocate these
++ volume->free_hash_list = hash_entry->next;
++ hash_entry->org_chunk = org_chunk;
++ hash_entry->sparse_chunk = sparse_chunk;
++ hash_entry->next = NULL;
++ hash_entry->prev = NULL;
++ }
++
++ return hash_entry;
++}
++
++/*
++ * This function inserts a new entry into a sparse hash chain, immediately
++ * following the specified entry. This function should not be used to add
++ * an entry into an empty list, or as the first entry in an existing list.
++ * For that case, use insert_sparse_map_entry_at_head().
++ */
++static int insert_sparse_hash_entry( struct sparse_hash_entry * entry,
++ struct sparse_hash_entry * base )
++{
++ entry->next = base->next;
++ entry->prev = base;
++ base->next = entry;
++ if ( entry->next ) {
++ entry->next->prev = entry;
++ }
++ return 0;
++}
++
++/*
++ * This function inserts a new entry into a sparse chain as the first
++ * entry in the chain.
++ */
++static int insert_sparse_hash_entry_at_head( struct sparse_hash_entry * entry,
++ struct sparse_hash_entry ** head )
++{
++ entry->next = *head;
++ entry->prev = NULL;
++ *head = entry;
++ if ( entry->next ) {
++ entry->next->prev = entry;
++ }
++ return 0;
++}
++
++/*
++ * Delete all items in a single chain in the hash table.
++ */
++static int delete_sparse_hash_chain( struct sparse_volume * vol,
++ struct sparse_hash_entry * head )
++{
++ struct sparse_hash_entry * next;
++
++ while ( head ) {
++ next = head->next;
++ mempool_free( head, vol->hash_pool );
++ head = next;
++ }
++ return 0;
++}
++
++/*
++ * This function will search the hash chain that is anchored at the
++ * specified head pointer. If the chunk number is found, a pointer to that
++ * entry in the chain is set, and a 1 is returned. If the chunk is not
++ * found, a pointer to the previous entry is set and 0 is returned. If the
++ * return pointer is NULL, this means either the list is empty, or the
++ * specified sector should become the first list item.
++ */
++static int search_sparse_hash_chain( u64 chunk,
++ struct sparse_hash_entry * head,
++ struct sparse_hash_entry ** result )
++{
++ struct sparse_hash_entry * curr = head;
++ struct sparse_hash_entry * prev = head;
++ while ( curr && curr->org_chunk < chunk ) {
++ prev = curr;
++ curr = curr->next;
++ }
++ if (!curr) { // Either an empty chain or went off the end of the chain.
++ *result = prev;
++ return 0;
++ }
++ else if ( curr->org_chunk != chunk ) {
++ *result = curr->prev;
++ return 0;
++ }
++ else {
++ *result = curr;
++ return 1;
++ }
++}
++
++/*
++ * This function takes a cow table entry (from the on-disk data), and
++ * converts it into an appropriate entry for the sparse map, and
++ * inserts it into the appropriate map for the specified volume.
++ */
++static int add_cow_entry_to_sparse_map( u64 org_chunk,
++ u64 sparse_chunk,
++ struct sparse_volume * volume )
++{
++ struct sparse_hash_entry * new_entry;
++ struct sparse_hash_entry * target_entry;
++ u32 hash_value;
++ int rc = -EINVAL;
++
++ new_entry = allocate_sparse_hash_entry(volume, org_chunk, sparse_chunk);
++ if (!new_entry) {
++ return -ENOMEM;
++ }
++
++ hash_value = (long)org_chunk % volume->hash_table_size;
++
++ if (! search_sparse_hash_chain( org_chunk,
++ volume->sparse_map[hash_value],
++ &target_entry ) ) {
++ //should always take this path
++
++ if ( target_entry ) {
++ insert_sparse_hash_entry( new_entry, target_entry );
++ }
++ else {
++ insert_sparse_hash_entry_at_head
++ ( new_entry, &(volume->sparse_map[hash_value]) );
++ }
++ rc = 0;
++ }
++ return rc;
++}
++
++/*
++ * Construct the initial hash table state based on
++ * existing COW tables on the disk.
++ */
++static int build_sparse_maps(struct sparse_volume * volume)
++{
++ int rc = 0, done = 0;
++ struct io_region job;
++ struct page * page;
++ unsigned int error, offset;
++
++ while (!done) {
++
++ // Read in one sector's worth of COW tables.
++ job.dev = volume->dev->dev;
++ job.sector = volume->current_cow_sector;
++ job.count = 1;
++ page = virt_to_page(volume->cow_table);
++ offset = (unsigned long)volume->cow_table & ~PAGE_MASK;
++ rc = dm_io_sync(1, &job, READ, page, offset, &error);
++ if (rc) {
++ return rc;
++ }
++
++ // Translate every valid COW table entry into
++ // a sparse map entry.
++ for ( volume->next_cow_entry = 0;
++
++ volume->next_cow_entry < (SECTOR_SIZE/sizeof(u64)) &&
++ volume->cow_table[volume->next_cow_entry] !=
++ 0xffffffffffffffff;
++
++ volume->next_cow_entry++, volume->next_free_chunk++ ) {
++
++ if ( (rc = add_cow_entry_to_sparse_map
++ ( le64_to_cpu( volume->cow_table[volume->next_cow_entry] ),
++ volume->next_free_chunk, volume ))) {
++ return( rc );
++ }
++ }
++ // Move on to the next sector if necessary.
++ if ( volume->next_cow_entry == (SECTOR_SIZE/sizeof(u64)) ) {
++ volume->current_cow_sector++;
++ }
++ else {
++ done = 1;
++ }
++ }
++ return 0;
++}
++
++/************************* Other Functions ************************/
++
++/*
++ * Function: sparse_remap_chunk
++ *
++ * This function performs a sector remap on a sparse volume. This should
++ * be called from the I/O path, It first determines the base sector
++ * of the chunk containing the specified sector, and saves the remainder.
++ * Then it performs a search through the sparse map for the specified
++ * volume. If a match is found, the sector number is changed to the new
++ * value. If no match is found, the value is left the same, meaning the
++ * chunk has not been remapped.
++ */
++static int sparse_remap_chunk( struct sparse_volume * sparse_volume,
++ u64 * sector )
++{
++ struct sparse_hash_entry * result;
++ u64 chunk;
++ u32 hash_value;
++ u32 remainder;
++ int rc = 1;
++
++ down_read(&sparse_volume->sparse_semaphore);
++
++ remainder = *sector & (u64)(sparse_volume->chunk_size - 1);
++ chunk = *sector >> sparse_volume->chunk_shift;
++ hash_value = ((u32)chunk) % sparse_volume->hash_table_size;
++
++ if ( search_sparse_hash_chain( chunk,
++ sparse_volume->sparse_map[hash_value],
++ &result) ) {
++ *sector = ( result->sparse_chunk << sparse_volume->chunk_shift )
++ + remainder;
++ rc = 0;
++ }
++ up_read(&sparse_volume->sparse_semaphore);
++ return rc;
++}
++
++/* Function: sparse_cow_write
++ *
++ * Check this sparse node to see if the given sector/chunk has been
++ * remapped yet. If it hasn't, create a new hash table entry, update the
++ * in-memory COW table, write the COW table to disk.
++ */
++
++static int sparse_cow_write( struct sparse_volume * sparse_volume,
++ u64 * sector )
++{
++ struct sparse_hash_entry * target_entry, * new_map_entry;
++ struct io_region job;
++ struct page * page;
++ char * cow = NULL;
++ unsigned int error, offset;
++ u64 chunk;
++ u32 hash_value = 0;
++ u32 remainder;
++ int rc;
++
++ down_write(&sparse_volume->sparse_semaphore);
++
++ remainder = *sector & (u64)(sparse_volume->chunk_size - 1);
++ chunk = *sector >> sparse_volume->chunk_shift;
++ hash_value = ((u32)chunk) % sparse_volume->hash_table_size;
++
++ if ( search_sparse_hash_chain( chunk,
++ sparse_volume->sparse_map[hash_value],
++ &target_entry) ) {
++ *sector =
++ ( target_entry->sparse_chunk << sparse_volume->chunk_shift )
++ + remainder;
++ rc = 0;
++ goto out;
++ }
++
++ // Is there enough room left on this sparse to remap this chunk?
++ if ( sparse_volume->next_free_chunk >= sparse_volume->num_chunks ) {
++ DMERR("dm-sparse: full no new remaps allowed\n");
++ rc = -ENOSPC;
++ goto out;
++ }
++
++ // Create and initialize a new hash table entry for the new remap.
++ new_map_entry = allocate_sparse_hash_entry
++ (sparse_volume, chunk, sparse_volume->next_free_chunk);
++ if ( ! new_map_entry ) {
++ // Can't get memory for map entry. Disable this sparse.
++ DMERR("dm-sparse: memory error allocating hash entry\n");
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ //Always write cow table so its safe
++ cow = kmalloc( SECTOR_SIZE, GFP_KERNEL );
++ if (! cow ) {
++ // Can't get I/O buffer. Disable this sparse.
++ DMERR("dm-sparse: memory error allocating COW table buffer");
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ // Add the entry to the hash table.
++ if ( target_entry ) {
++ insert_sparse_hash_entry( new_map_entry, target_entry );
++ }
++ else {
++ insert_sparse_hash_entry_at_head
++ ( new_map_entry,
++ &(sparse_volume->sparse_map[hash_value]) );
++ }
++
++ sparse_volume->next_free_chunk++;
++
++ // Update the appropriate entry in the COW table.
++ sparse_volume->cow_table[sparse_volume->next_cow_entry] =
++ cpu_to_le64(chunk);
++ sparse_volume->next_cow_entry++;
++
++ memcpy(cow, sparse_volume->cow_table, SECTOR_SIZE);
++
++ //because of ordering issues needs to be synchronous
++ job.dev = sparse_volume->dev->dev;
++ job.sector = sparse_volume->current_cow_sector;
++ job.count = 1;
++ page = virt_to_page(cow);
++ offset = (unsigned long)cow & ~PAGE_MASK;
++ dm_io_sync(1, &job, WRITE, page, offset, &error);
++
++ // Update the in-memory COW table values.
++ if ( sparse_volume->next_cow_entry >= (SECTOR_SIZE/sizeof(u64)) )
++ {
++ sparse_volume->next_cow_entry = 0;
++ sparse_volume->current_cow_sector++;
++ memset(sparse_volume->cow_table, 0xff, SECTOR_SIZE);
++ }
++
++ *sector = ( new_map_entry->sparse_chunk << sparse_volume->chunk_shift )
++ + remainder;
++
++ rc = 0;
++
++ out:
++ up_write(&sparse_volume->sparse_semaphore);
++ if ( cow ) {
++ kfree( cow );
++ }
++
++ return rc;
++}
++
++/************************ EXPORT FUNCTIONS ************************/
++
++/*
++ * Function: sparse_dtr
++ */
++static void sparse_dtr( struct dm_target *ti )
++{
++ struct sparse_volume * vol = (struct sparse_volume *)ti->private;
++ int i;
++
++ if (vol) {
++
++ if (vol->sparse_map) {
++ for ( i = 0; i < vol->hash_table_size; i++ ) {
++ delete_sparse_hash_chain( vol, vol->sparse_map[i] );
++ }
++ delete_sparse_hash_chain( vol, vol->free_hash_list );
++ vfree(vol->sparse_map);
++ }
++
++ if (vol->hash_pool)
++ mempool_destroy(vol->hash_pool);
++
++ if (vol->hash_slab)
++ kmem_cache_destroy(vol->hash_slab);
++
++ dm_put_device(ti, vol->dev);
++
++ if (vol->dm_io_flag) {
++ dm_io_put(1);
++ }
++
++ kfree( vol );
++ }
++}
++
++/*
++ * Function: sparse_ctr
++ */
++static int sparse_ctr( struct dm_target *ti, unsigned int argc, char** argv )
++{
++ int i, rc = -EINVAL;
++ struct sparse_hash_entry *new_entry;
++ struct sparse_volume *vol;
++ struct dm_dev *dev;
++ u32 chunk_size, chunks;
++ u64 start;
++ char* end, slab_name[NAME_SIZE+1];
++
++ if ( argc != 4 ) {
++ ti->error="dm-sparse: wrong number of arguments";
++ return rc;
++ }
++
++ start = simple_strtoull(argv[1], &end, 10);
++ if (*end) {
++ ti->error="dm-sparse: Invalid first chunk lba";
++ return rc;
++ }
++
++ chunk_size = simple_strtoul(argv[2], &end, 10);
++ if (*end) {
++ ti->error="dm-sparse: Invalid chunk_size";
++ return rc;
++ }
++
++ chunks = simple_strtoul(argv[3], &end, 10);
++ if (*end) {
++ ti->error="dm-sparse: Invalid number of chunks";
++ return rc;
++ }
++
++ if ( dm_get_device( ti, argv[0], ti->begin, start + chunks * chunk_size,
++ dm_table_get_mode(ti->table), &dev ) ) {
++ ti->error = "dm-sparse: Device lookup failed";
++ return rc;
++ }
++
++ vol = kmalloc(sizeof(struct sparse_volume), GFP_KERNEL);
++ if ( !vol ) {
++ ti->error = "dm-sparse: Memory allocation for private-data failed";
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ memset( vol, 0, sizeof(struct sparse_volume) );
++
++ rc = dm_io_get(1);
++ if (rc) {
++ ti->error = "dm-sparse: failed to initialize dm-io.";
++ sparse_dtr(ti);
++ return rc;
++ }
++
++ // Initialize
++ vol->dm_io_flag = 1;
++ vol->chunk_size = chunk_size;
++ vol->chunk_shift = log2(chunk_size);
++ vol->num_chunks = chunks;
++ vol->current_cow_sector = 1;
++ vol->hash_table_size = chunks / MAX_HASH_CHAIN_ENTRIES + 1;
++ vol->start = start;
++ vol->dev = dev;
++ init_rwsem(&vol->sparse_semaphore);
++
++ snprintf(slab_name, NAME_SIZE, "sparse-%p", vol);
++ vol->hash_slab = kmem_cache_create(slab_name,
++ sizeof(struct sparse_hash_entry),
++ 0, SLAB_HWCACHE_ALIGN,
++ NULL, NULL);
++ if ( ! vol->hash_slab ) {
++ ti->error = "dm-sparse: memory allocation error in hash slab create";
++ sparse_dtr(ti);
++ return -ENOMEM;
++ }
++ vol->hash_pool = mempool_create(1, mempool_alloc_slab,
++ mempool_free_slab,
++ vol->hash_slab);
++ if ( ! vol->hash_pool ) {
++ ti->error = "dm-sparse: memory allocation error in hash pool create";
++ sparse_dtr(ti);
++ return -ENOMEM;
++ }
++
++ // Sparse hash table
++ vol->sparse_map = vmalloc( vol->hash_table_size *
++ sizeof( struct sparse_hash_entry * ) );
++ if ( ! vol->sparse_map ) {
++ ti->error = "dm-sparse: Memory allocation error in sparse_map create";
++ sparse_dtr(ti);
++ return -ENOMEM;
++ }
++
++ memset( vol->sparse_map, 0, vol->hash_table_size *
++ sizeof( struct sparse_hash_entry * ) );
++
++ for ( i = 0; i < chunks; i++ ) {
++
++ new_entry = mempool_alloc(vol->hash_pool, GFP_KERNEL );
++ if ( ! new_entry ) {
++ ti->error="dm-sparse: memory allocation error in hash table setup";
++ sparse_dtr(ti);
++ return -ENOMEM;
++ }
++
++ new_entry->next = vol->free_hash_list;
++ vol->free_hash_list = new_entry;
++ }
++
++ rc = build_sparse_maps(vol);
++ if (rc) {
++ ti->error = "dm-sparse: error building hash tables";
++ sparse_dtr(ti);
++ return rc;
++ }
++
++ ti->private = vol;
++ return rc;
++
++ out:
++ dm_put_device(ti, dev);
++ return rc;
++}
++
++/*
++ * Function: sparse_map
++ */
++static int sparse_map( struct dm_target * ti, struct buffer_head * bh, int rw,
++ union map_info *map_context )
++{
++ struct sparse_volume * volume = (struct sparse_volume*)ti->private;
++ u64 sector = bh->b_rsector;
++ int rc;
++
++
++
++ // Check if this sector has been remapped
++ rc = sparse_remap_chunk( volume, §or );
++
++ if ( rc < 0 ) { //Error
++ bh->b_end_io(bh, 0);
++ return rc;
++ }
++
++ if ( rc == 0 ) { // Remapped I/O : read or write same logic
++ bh->b_rsector = volume->start + sector;
++ bh->b_rdev = volume->dev->dev;
++ return 1;
++ }
++
++ // ( Previously )Un-mapped: read / write different logic
++
++ if ( rw ) { //write :
++ rc = sparse_cow_write( volume, §or );
++
++ if ( rc < 0 ) { //Error
++ bh->b_end_io(bh, 0);
++ return rc;
++ }
++ //Send write on
++ bh->b_rsector = volume->start + sector;
++ bh->b_rdev = volume->dev->dev;
++ return 1;
++ }
++
++ //Reading something that was never written
++ //return zeros and indicate complete
++ memset(bh->b_data, 0x0, bh->b_size);
++ bh->b_end_io(bh, 1);
++ return 0;
++}
++
++static int sparse_status( struct dm_target *ti, status_type_t type,
++ char *result, unsigned int maxlen )
++{
++ struct sparse_volume * vol = (struct sparse_volume * )ti->private;
++
++ switch(type) {
++
++ case STATUSTYPE_INFO:
++ snprintf( result, maxlen, "%d%%",
++ ( vol->next_free_chunk * 100 ) / vol->num_chunks );
++ break;
++
++ case STATUSTYPE_TABLE:
++ snprintf( result, maxlen, "%s %Lu %u %u",
++ dm_kdevname(vol->dev->dev), vol->start,
++ vol->chunk_size, vol->num_chunks );
++ break;
++
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++/****************** FUNCTION TABLE **********************/
++
++static struct target_type sparse_target = {
++ .name = "sparse",
++ .module = THIS_MODULE,
++ .ctr = sparse_ctr,
++ .dtr = sparse_dtr,
++ .map = sparse_map,
++ .status = sparse_status,
++};
++
++/********************* REGISTRATION *****************/
++
++int __init sparse_init(void)
++{
++ int rc = dm_register_target(&sparse_target);
++
++ if ( rc < 0 )
++ DMWARN("sparse target registration failed");
++
++ return rc;
++}
++
++void __exit sparse_exit(void)
++{
++ if (dm_unregister_target(&sparse_target) )
++ DMWARN("sparse target unregistration failed");
++
++ return;
++}
++
++module_init(sparse_init);
++module_exit(sparse_exit);
++MODULE_LICENSE("GPL");
+diff -urN linux-2.4.22/drivers/md/multipath.c linux-2.4.22-evms/drivers/md/multipath.c
+--- linux-2.4.22/drivers/md/multipath.c 2003-06-13 16:51:34.000000000 +0200
++++ linux-2.4.22-evms/drivers/md/multipath.c 2003-09-15 17:09:36.000000000 +0200
+@@ -139,15 +139,16 @@
+ static int multipath_map (mddev_t *mddev, kdev_t *rdev)
+ {
+ multipath_conf_t *conf = mddev_to_conf(mddev);
+- int i, disks = MD_SB_DISKS;
++ int i;
+
+ /*
+ * Later we do read balancing on the read side
+ * now we use the first available disk.
+ */
+
+- for (i = 0; i < disks; i++) {
++ for (i = 0; i < conf->nr_disks; i++) {
+ if (conf->multipaths[i].operational) {
++ /* first operational is winner! */
+ *rdev = conf->multipaths[i].dev;
+ return (0);
+ }
+@@ -191,6 +192,8 @@
+ {
+ struct multipath_bh * mp_bh = (struct multipath_bh *)(bh->b_private);
+
++ atomic_dec(&mp_bh->multipath->nr_pending);
++
+ /*
+ * this branch is our 'one multipath IO has finished' event handler:
+ */
+@@ -223,19 +226,39 @@
+ }
+
+ /*
+- * This routine returns the disk from which the requested read should
+- * be done.
++ * Multipath read balance ...
++ *
++ * Returns:
++ *
++ * If no active paths
++ *
++ * - Error ( -1 )
++ *
++ * If active paths == 1
++ *
++ * - 1st active path encountered
++ *
++ * If active paths > 1
++ *
++ * - 1st idle active path encountered
++ * - else ... the active path doing the least amount of work.
+ */
+-
+ static int multipath_read_balance (multipath_conf_t *conf)
+ {
+- int disk;
+-
+- for (disk = 0; disk < conf->raid_disks; disk++)
+- if (conf->multipaths[disk].operational)
+- return disk;
+- BUG();
+- return 0;
++ int i, disk=-1, nr_pending, least_pending=0;
++
++ for (i=0; i<conf->nr_disks; i++) {
++ if (conf->multipaths[i].operational) {
++ nr_pending = atomic_read(&conf->multipaths[i].nr_pending);
++ if (nr_pending==0 || conf->working_disks==1)
++ return i;
++ if (least_pending==0 || nr_pending<least_pending) {
++ disk = i;
++ least_pending = nr_pending;
++ }
++ }
++ }
++ return disk;
+ }
+
+ static int multipath_make_request (mddev_t *mddev, int rw,
+@@ -245,6 +268,7 @@
+ struct buffer_head *bh_req;
+ struct multipath_bh * mp_bh;
+ struct multipath_info *multipath;
++ int disk;
+
+ if (!buffer_locked(bh))
+ BUG();
+@@ -267,7 +291,16 @@
+ /*
+ * read balancing logic:
+ */
+- multipath = conf->multipaths + multipath_read_balance(conf);
++ disk = multipath_read_balance(conf);
++ if (disk==-1) {
++ printk (KERN_ERR "multipath_make_request: no more operational IO paths.\n");
++ buffer_IO_error(bh);
++ return 0;
++ }
++
++ multipath = conf->multipaths + disk;
++ mp_bh->multipath = multipath;
++ atomic_inc(&multipath->nr_pending);
+
+ bh_req = &mp_bh->bh_req;
+ memcpy(bh_req, bh, sizeof(*bh));
+@@ -331,13 +364,14 @@
+ {
+ multipath_conf_t *conf = mddev_to_conf(mddev);
+ struct multipath_info * multipaths = conf->multipaths;
+- int disks = MD_SB_DISKS;
+ int other_paths = 1;
+- int i;
++ int i, first = 1;
++ mdk_rdev_t *rdev;
++ struct md_list_head *tmp;
+
+ if (conf->working_disks == 1) {
+ other_paths = 0;
+- for (i = 0; i < disks; i++) {
++ for (i = 0; i < MD_SB_DISKS; i++) {
+ if (multipaths[i].spare) {
+ other_paths = 1;
+ break;
+@@ -351,16 +385,17 @@
+ * first check if this is a queued request for a device
+ * which has just failed.
+ */
+- for (i = 0; i < disks; i++) {
++ for (i = 0; i < MD_SB_DISKS; i++) {
+ if (multipaths[i].dev==dev && !multipaths[i].operational)
+ return 0;
+ }
+ printk (LAST_DISK);
+ } else {
++ mdp_super_t *sb = mddev->sb;
+ /*
+ * Mark disk as unusable
+ */
+- for (i = 0; i < disks; i++) {
++ for (i = 0; i < MD_SB_DISKS; i++) {
+ if (multipaths[i].dev==dev && multipaths[i].operational) {
+ mark_disk_bad(mddev, i);
+ break;
+@@ -369,7 +404,6 @@
+ if (!conf->working_disks) {
+ int err = 1;
+ mdp_disk_t *spare;
+- mdp_super_t *sb = mddev->sb;
+
+ spare = get_spare(mddev);
+ if (spare) {
+@@ -384,6 +418,21 @@
+ sb->spare_disks--;
+ }
+ }
++ /* prevent unnecessary work in md_do_recovery() */
++ if (conf->working_disks) {
++ conf->raid_disks = conf->working_disks
++ = sb->raid_disks = sb->active_disks;
++ }
++ /* update alias disk info to insure we can do sb commit. */
++ ITERATE_RDEV(mddev,rdev,tmp) {
++ if (first && disk_active(&sb->disks[rdev->desc_nr])) {
++ rdev->alias_device = 0;
++ first = 0;
++ } else {
++ if (!disk_faulty(&sb->disks[rdev->desc_nr]))
++ rdev->alias_device = 1;
++ }
++ }
+ }
+ return 0;
+ }
+@@ -677,9 +726,8 @@
+ /*
+ * This is a kernel thread which:
+ *
+- * 1. Retries failed read operations on working multipaths.
++ * 1. Retries failed operations on working multipaths.
+ * 2. Updates the raid superblock when problems encounter.
+- * 3. Performs writes following reads for array syncronising.
+ */
+
+ static void multipathd (void *data)
+@@ -833,6 +881,7 @@
+ mdk_rdev_t *rdev, *def_rdev = NULL;
+ struct md_list_head *tmp;
+ int num_rdevs = 0;
++ int active_disks = 0, spare_disks = 0, faulty_disks = 0;
+
+ MOD_INC_USE_COUNT;
+
+@@ -881,9 +930,7 @@
+ printk(NOT_IN_SYNC, partition_name(rdev->dev));
+
+ /*
+- * Mark all disks as spare to start with, then pick our
+- * active disk. If we have a disk that is marked active
+- * in the sb, then use it, else use the first rdev.
++ * Mark all disks as spare to start with.
+ */
+ disk->number = desc->number;
+ disk->raid_disk = desc->raid_disk;
+@@ -894,20 +941,21 @@
+ mark_disk_sync(desc);
+
+ if (disk_active(desc)) {
+- if(!conf->working_disks) {
+- printk(OPERATIONAL, partition_name(rdev->dev),
+- desc->raid_disk);
+- disk->operational = 1;
+- disk->spare = 0;
+- conf->working_disks++;
+- def_rdev = rdev;
+- } else {
+- mark_disk_spare(desc);
+- }
+- } else
+- mark_disk_spare(desc);
++ printk(OPERATIONAL, partition_name(rdev->dev),
++ desc->raid_disk);
++ disk->operational = 1;
++ disk->spare = 0;
++ conf->working_disks++;
++ def_rdev = rdev;
++ active_disks++;
++ } else if (disk_faulty(desc)) {
++ disk->spare = 0;
++ faulty_disks++;
++ } else {
++ spare_disks++;
++ }
+
+- if(!num_rdevs++) def_rdev = rdev;
++ num_rdevs++;
+ }
+ if(!conf->working_disks && num_rdevs) {
+ desc = &sb->disks[def_rdev->desc_nr];
+@@ -918,11 +966,12 @@
+ disk->spare = 0;
+ conf->working_disks++;
+ mark_disk_active(desc);
++ active_disks++;
+ }
+ /*
+- * Make sure our active path is in desc spot 0
++ * If there is only 1 active path ... make sure it is in desc spot 0
+ */
+- if(def_rdev->desc_nr != 0) {
++ if (active_disks == 1 && def_rdev->desc_nr != 0) {
+ rdev = find_rdev_nr(mddev, 0);
+ desc = &sb->disks[def_rdev->desc_nr];
+ desc2 = sb->disks;
+@@ -940,10 +989,10 @@
+ def_rdev->desc_nr = 0;
+ }
+ }
+- conf->raid_disks = sb->raid_disks = sb->active_disks = 1;
++ conf->raid_disks = sb->raid_disks = sb->active_disks = active_disks;
+ conf->nr_disks = sb->nr_disks = sb->working_disks = num_rdevs;
+- sb->failed_disks = 0;
+- sb->spare_disks = num_rdevs - 1;
++ sb->failed_disks = faulty_disks;
++ sb->spare_disks = spare_disks;
+ mddev->sb_dirty = 1;
+ conf->mddev = mddev;
+ conf->device_lock = MD_SPIN_LOCK_UNLOCKED;
+diff -urN linux-2.4.22/fs/jfs/super.c linux-2.4.22-evms/fs/jfs/super.c
+--- linux-2.4.22/fs/jfs/super.c 2003-09-15 16:54:15.000000000 +0200
++++ linux-2.4.22-evms/fs/jfs/super.c 2003-09-15 17:09:07.000000000 +0200
+@@ -379,6 +379,7 @@
+ if (!(sb->s_flags & MS_RDONLY)) {
+ txQuiesce(sb);
+ lmLogShutdown(log);
++ updateSuper(sb, FM_CLEAN);
+ }
+ }
+
+@@ -389,6 +390,7 @@
+ int rc = 0;
+
+ if (!(sb->s_flags & MS_RDONLY)) {
++ updateSuper(sb, FM_MOUNT);
+ if ((rc = lmLogInit(log)))
+ jfs_err("jfs_unlock failed with return code %d", rc);
+ else
+diff -urN linux-2.4.22/include/linux/raid/multipath.h linux-2.4.22-evms/include/linux/raid/multipath.h
+--- linux-2.4.22/include/linux/raid/multipath.h 2001-11-12 18:51:56.000000000 +0100
++++ linux-2.4.22-evms/include/linux/raid/multipath.h 2003-09-15 17:09:36.000000000 +0200
+@@ -15,6 +15,7 @@
+ int spare;
+
+ int used_slot;
++ atomic_t nr_pending; /* number of pending requests */
+ };
+
+ struct multipath_private_data {
+@@ -63,6 +64,7 @@
+ struct buffer_head *master_bh;
+ struct buffer_head bh_req;
+ struct multipath_bh *next_mp; /* next for retry or in free list */
++ struct multipath_info *multipath; /* allows end_request to easilly dec pending buffer count*/
+ };
+ /* bits for multipath_bh.state */
+ #define MPBH_Uptodate 1
--- /dev/null
+diff -ruN linux-2.4.22-orig/drivers/acorn/char/i2c.c linux-2.4.22-i2c/drivers/acorn/char/i2c.c
+--- linux-2.4.22-orig/drivers/acorn/char/i2c.c Wed Aug 27 18:11:45 2003
++++ linux-2.4.22-i2c/drivers/acorn/char/i2c.c Thu Aug 28 18:13:23 2003
+@@ -303,11 +303,12 @@
+ }
+
+ static struct i2c_adapter ioc_ops = {
+- name: "IOC/IOMD",
+- id: I2C_HW_B_IOC,
+- algo_data: &ioc_data,
+- client_register: ioc_client_reg,
+- client_unregister: ioc_client_unreg
++ .owner = THIS_MODULE,
++ .name = "IOC/IOMD",
++ .id = I2C_HW_B_IOC,
++ .algo_data = &ioc_data,
++ .client_register = ioc_client_reg,
++ .client_unregister = ioc_client_unreg
+ };
+
+ static int __init i2c_ioc_init(void)
+diff -ruN linux-2.4.22-orig/drivers/acorn/char/pcf8583.c linux-2.4.22-i2c/drivers/acorn/char/pcf8583.c
+--- linux-2.4.22-orig/drivers/acorn/char/pcf8583.c Tue Jul 15 12:23:25 2003
++++ linux-2.4.22-i2c/drivers/acorn/char/pcf8583.c Thu Aug 28 18:13:23 2003
+@@ -225,12 +225,13 @@
+ }
+
+ static struct i2c_driver pcf8583_driver = {
+- name: "PCF8583",
+- id: I2C_DRIVERID_PCF8583,
+- flags: I2C_DF_NOTIFY,
+- attach_adapter: pcf8583_probe,
+- detach_client: pcf8583_detach,
+- command: pcf8583_command
++ .owner = THIS_MODULE,
++ .name = "PCF8583",
++ .id = I2C_DRIVERID_PCF8583,
++ .flags = I2C_DF_NOTIFY,
++ .attach_adapter = pcf8583_probe,
++ .detach_client = pcf8583_detach,
++ .command = pcf8583_command
+ };
+
+ static __init int pcf8583_init(void)
+--- linux-2.4.22-orig/drivers/i2c/Config.in Wed Aug 27 18:11:46 2003
++++ linux-2.4.22-i2c/drivers/i2c/Config.in Sun Sep 7 17:07:13 2003
+@@ -13,17 +13,23 @@
+ dep_tristate ' Philips style parallel port adapter' CONFIG_I2C_PHILIPSPAR $CONFIG_I2C_ALGOBIT $CONFIG_PARPORT
+ dep_tristate ' ELV adapter' CONFIG_I2C_ELV $CONFIG_I2C_ALGOBIT
+ dep_tristate ' Velleman K9000 adapter' CONFIG_I2C_VELLEMAN $CONFIG_I2C_ALGOBIT
+- dep_tristate ' NatSemi SCx200 I2C using GPIO pins' CONFIG_SCx200_I2C $CONFIG_SCx200 $CONFIG_I2C_ALGOBIT
++ dep_tristate ' Basic I2C on Parallel Port' CONFIG_I2C_PPORT $CONFIG_I2C_ALGOBIT
++ if [ "$CONFIG_ARCH_SA1100" = "y" ]; then
++ dep_tristate 'SA1100 I2C Adapter' CONFIG_I2C_FRODO $CONFIG_I2C_ALGOBIT
++ fi
++ dep_tristate ' NatSemi SCx200 I2C using GPIO pins' CONFIG_SCx200_I2C $CONFIG_SCx200_GPIO $CONFIG_I2C_ALGOBIT
+ if [ "$CONFIG_SCx200_I2C" != "n" ]; then
+ int ' GPIO pin used for SCL' CONFIG_SCx200_I2C_SCL 12
+ int ' GPIO pin used for SDA' CONFIG_SCx200_I2C_SDA 13
+ fi
+- dep_tristate ' NatSemi SCx200 ACCESS.bus' CONFIG_SCx200_ACB $CONFIG_I2C
+ fi
+
++ dep_tristate 'NatSemi SCx200 ACCESS.bus' CONFIG_SCx200_ACB $CONFIG_I2C
++
+ dep_tristate 'I2C PCF 8584 interfaces' CONFIG_I2C_ALGOPCF $CONFIG_I2C
+ if [ "$CONFIG_I2C_ALGOPCF" != "n" ]; then
+ dep_tristate ' Elektor ISA card' CONFIG_I2C_ELEKTOR $CONFIG_I2C_ALGOPCF
++ dep_tristate ' PCF on EPP port' CONFIG_I2C_PCFEPP $CONFIG_I2C_ALGOPCF
+ fi
+
+ if [ "$CONFIG_MIPS_ITE8172" = "y" ]; then
+@@ -35,13 +41,13 @@
+ if [ "$CONFIG_8xx" = "y" ]; then
+ dep_tristate 'MPC8xx CPM I2C interface' CONFIG_I2C_ALGO8XX $CONFIG_I2C
+ if [ "$CONFIG_RPXLITE" = "y" -o "$CONFIG_RPXCLASSIC" = "y" ]; then
+- dep_tristate ' Embedded Planet RPX Lite/Classic suppoort' CONFIG_I2C_RPXLITE $CONFIG_I2C_ALGO8XX
++ dep_tristate ' Embedded Planet RPX Lite/Classic support' CONFIG_I2C_RPXLITE $CONFIG_I2C_ALGO8XX
+ fi
+ fi
+- if [ "$CONFIG_405" = "y" ]; then
+- dep_tristate 'PPC 405 I2C Algorithm' CONFIG_I2C_PPC405_ALGO $CONFIG_I2C
+- if [ "$CONFIG_I2C_PPC405_ALGO" != "n" ]; then
+- dep_tristate ' PPC 405 I2C Adapter' CONFIG_I2C_PPC405_ADAP $CONFIG_I2C_PPC405_ALGO
++ if [ "$CONFIG_IBM_OCP" = "y" ]; then
++ dep_tristate 'IBM on-chip I2C Algorithm' CONFIG_I2C_IBM_OCP_ALGO $CONFIG_I2C
++ if [ "$CONFIG_I2C_IBM_OCP_ALGO" != "n" ]; then
++ dep_tristate ' IBM on-chip I2C Adapter' CONFIG_I2C_IBM_OCP_ADAP $CONFIG_I2C_IBM_OCP_ALGO
+ fi
+ fi
+
+@@ -49,16 +55,11 @@
+ dep_tristate 'Keywest I2C interface in Apple Core99 machines' CONFIG_I2C_KEYWEST $CONFIG_I2C
+ fi
+
+- if [ "$CONFIG_SIBYTE_SB1xxx_SOC" = "y" ]; then
+- dep_tristate 'SiByte SMBus interface' CONFIG_I2C_ALGO_SIBYTE $CONFIG_I2C
+- dep_tristate ' MAX1617 Temperature Sensor' CONFIG_I2C_MAX1617 $CONFIG_I2C_ALGO_SIBYTE
+- fi
+-
+ # This is needed for automatic patch generation: sensors code starts here
+ # This is needed for automatic patch generation: sensors code ends here
+
+ dep_tristate 'I2C device interface' CONFIG_I2C_CHARDEV $CONFIG_I2C
+- dep_tristate 'I2C /proc interface (required for hardware sensors)' CONFIG_I2C_PROC $CONFIG_I2C
++ dep_tristate 'I2C /proc interface (required for hardware sensors)' CONFIG_I2C_PROC $CONFIG_I2C $CONFIG_SYSCTL
+
+ fi
+ endmenu
+--- linux-2.4.22-orig/drivers/i2c/Makefile Wed Aug 27 18:11:46 2003
++++ linux-2.4.22-i2c/drivers/i2c/Makefile Sun Aug 31 14:46:51 2003
+@@ -5,7 +5,8 @@
+ O_TARGET := i2c.o
+
+ export-objs := i2c-core.o i2c-algo-bit.o i2c-algo-pcf.o \
+- i2c-algo-ite.o i2c-proc.o i2c-algo-sibyte.o
++ i2c-algo-ite.o i2c-proc.o i2c-algo-8xx.o \
++ i2c-algo-ibm_ocp.o
+
+ obj-$(CONFIG_I2C) += i2c-core.o
+ obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
+@@ -13,16 +14,21 @@
+ obj-$(CONFIG_I2C_PHILIPSPAR) += i2c-philips-par.o
+ obj-$(CONFIG_I2C_ELV) += i2c-elv.o
+ obj-$(CONFIG_I2C_VELLEMAN) += i2c-velleman.o
++obj-$(CONFIG_I2C_PPORT) += i2c-pport.o
++obj-$(CONFIG_I2C_FRODO) += i2c-frodo.o
+ obj-$(CONFIG_I2C_ALGOPCF) += i2c-algo-pcf.o
+ obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
++obj-$(CONFIG_I2C_PCFEPP) += i2c-pcf-epp.o
+ obj-$(CONFIG_ITE_I2C_ALGO) += i2c-algo-ite.o
+ obj-$(CONFIG_ITE_I2C_ADAP) += i2c-adap-ite.o
++obj-$(CONFIG_I2C_ALGO8XX) += i2c-algo-8xx.o
++obj-$(CONFIG_I2C_RPXLITE) += i2c-rpx.o
++obj-$(CONFIG_I2C_IBM_OCP_ALGO) += i2c-algo-ibm_ocp.o
++obj-$(CONFIG_I2C_IBM_OCP_ADAP) += i2c-adap-ibm_ocp.o
+ obj-$(CONFIG_I2C_PROC) += i2c-proc.o
+ obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
+ obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
+ obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o
+-obj-$(CONFIG_I2C_ALGO_SIBYTE) += i2c-algo-sibyte.o i2c-sibyte.o
+-obj-$(CONFIG_I2C_MAX1617) += i2c-max1617.o
+
+ # This is needed for automatic patch generation: sensors code starts here
+ # This is needed for automatic patch generation: sensors code ends here
+--- linux-old/Documentation/i2c/dev-interface Sun Aug 31 14:51:49 CEST 2003
++++ linux/Documentation/i2c/dev-interface Sun Aug 31 14:51:49 CEST 2003
+@@ -88,5 +88,10 @@
+ ioctl(file,I2C_TENBIT,long select)
+ Selects ten bit addresses if select not equals 0, selects normal 7 bit
+- addresses if select equals 0.
++ addresses if select equals 0. Default 0.
++
++ioctl(file,I2C_PEC,long select)
++ Selects SMBus PEC (packet error checking) generation and verification
++ if select not equals 0, disables if select equals 0. Default 0.
++ Used only for SMBus transactions.
+
+ ioctl(file,I2C_FUNCS,unsigned long *funcs)
+--- linux-old/Documentation/i2c/functionality Sun Aug 31 14:51:49 CEST 2003
++++ linux/Documentation/i2c/functionality Sun Aug 31 14:51:49 CEST 2003
+--- linux-old/Documentation/i2c/i2c-pport Sun Aug 31 14:51:49 CEST 2003
++++ linux/Documentation/i2c/i2c-pport Sun Aug 31 14:51:49 CEST 2003
+@@ -0,0 +1,67 @@
++Parallel Port Adapters
++----------------------
++If you are installing parallel port adapters it means you are probably messing
++around with wires and IC's and the like. If you have purchased a card that
++provides an external i2c/smbus this will require combined algorithm and
++adapter code in a single module.
++If you are doing it yourself by using the parallel port there
++are basically 2 options.
++
++1) Using the parallel port and using the i2c-pport adapter module and the
++i2c-algo-bit algorithm module together to enable you to wire up your parallel
++port to act as an i2c/smbus. This provides a bus that will enable most
++sensors to work but doesn't support the entire i2c/smbus capability.
++
++2) Using the parallel port to interface to a Philips PCF8584 parallel to i2c
++adapter chip. You will need to build a bit of a circuit to do this. This
++configuration needs the i2c-pcf-epp adapter module and the i2c-algo-pcf
++algorithm module. This support almost all of the i2c/smbus capabilities.
++
++
++i2c-pport Documentation
++-----------------------
++This is a primitive parallel port driver for the i2c bus, which exploits
++features of modern bidirectional parallel ports.
++
++Bidirectional ports have particular bits connected in following way:
++
++ |
++ /-----| R
++ --o| |-----|
++ read \-----| /------- Out pin
++ |/
++ - -|\
++ write V
++ |
++ ---
++
++
++It means when output is set to 1 we can read the port. Therefore
++we can use 2 pins of parallel port as SDA and SCL for i2c bus. It
++is not necessary to add any external - additional parts, we can
++read and write the same port simultaneously.
++ I only use register base+2 so it is possible to use all
++8 data bits of parallel port for other applications (I have
++connected EEPROM and LCD display). I do not use bit Enable Bi-directional
++ Port. The only disadvantage is we can only support 5V chips.
++
++Layout:
++
++Cannon 25 pin
++
++SDA - connect to pin 14 (Auto Linefeed)
++SCL - connect to pin 16 (Initialize Printer)
++GND - connect to pin 18-25
+++5V - use external supply (I use 5V from 3.5" floppy connector)
++
++no pullups requied
++
++Module parameters:
++
++base = 0xXXX
++XXX - 278 or 378
++
++That's all.
++
++Daniel Smolik
++marvin@sitour.cz
+--- linux-old/Documentation/i2c/i2c-protocol Sun Aug 31 14:51:49 CEST 2003
++++ linux/Documentation/i2c/i2c-protocol Sun Aug 31 14:51:49 CEST 2003
+@@ -53,8 +53,8 @@
+
+ Flag I2C_M_NOSTART:
+- In a combined transaction, no 'S Addr' is generated at some point.
+- For example, setting I2C_M_NOSTART on the second partial message
++ In a combined transaction, no 'S Addr Wr/Rd [A]' is generated at some
++ point. For example, setting I2C_M_NOSTART on the second partial message
+ generates something like:
+- S Addr Rd [A] [Data] NA Wr [A] Data [A] P
++ S Addr Rd [A] [Data] NA Data [A] P
+ If you set the I2C_M_NOSTART variable for the first partial message,
+ we do not generate Addr, but we do generate the startbit S. This will
+@@ -66,3 +66,11 @@
+ flag. For example:
+ S Addr Rd [A] Data [A] Data [A] ... [A] Data [A] P
+-
++
++ Flags I2C_M_IGNORE_NAK
++ Normally message is interrupted immediately if there is [NA] from the
++ client. Setting this flag treats any [NA] as [A], and all of
++ message is sent.
++ These messages may still fail to SCL lo->hi timeout.
++
++ Flags I2C_M_NO_RD_ACK
++ In a read message, master A/NA bit is skipped.
+--- linux-old/Documentation/i2c/i2c-velleman Sun Aug 31 14:51:49 CEST 2003
++++ linux/Documentation/i2c/i2c-velleman Sun Aug 31 14:51:49 CEST 2003
+@@ -0,0 +1,27 @@
++i2c-velleman driver
++-------------------
++This is a driver for i2c-hw access for Velleman K9000 and other adapters.
++
++Useful links
++------------
++Velleman:
++ http://www.velleman.be/
++
++Velleman K8000 Howto:
++ http://howto.htlw16.ac.at/k8000-howto.html
++
++
++K8000 and K8005 libraries
++-------------------------
++The project has lead to new libs for the Velleman K8000 and K8005..
++LIBK8000 v1.99.1 and LIBK8005 v0.21
++
++With these libs you can control the K8000 and K8005 with the original
++simple commands which are in the original Velleman software.
++Like SetIOchannel, ReadADchannel, SendStepCCWFull and many more.
++Via i2c kernel device /dev/velleman
++
++The libs can be found on http://groups.yahoo.com/group/k8000/files/linux/
++
++The Velleman K8000 interface card on http://www.velleman.be/kits/k8000.htm
++The Velleman K8005 steppermotorcard on http://www.velleman.be/kits/k8005.htm
+--- linux-old/Documentation/i2c/proc-interface Sun Aug 31 14:51:49 CEST 2003
++++ linux/Documentation/i2c/proc-interface Sun Aug 31 14:51:49 CEST 2003
+--- linux-old/Documentation/i2c/smbus-protocol Sun Aug 31 14:51:50 CEST 2003
++++ linux/Documentation/i2c/smbus-protocol Sun Aug 31 14:51:50 CEST 2003
+@@ -1,2 +1,9 @@
++SMBus Protocol Summary
++======================
++The following is a summary of the SMBus protocol. It applies to
++all revisions of the protocol (1.0, 1.1, and 2.0).
++Certain protocol features which are not supported by
++this package are briefly described at the end of this document.
++
+ Some adapters understand only the SMBus (System Management Bus) protocol,
+ which is a subset from the I2C protocol. Fortunately, many devices use
+@@ -7,5 +14,5 @@
+ SMBus adapters and I2C adapters (the SMBus command set is automatically
+ translated to I2C on I2C adapters, but plain I2C commands can not be
+-handled at all on a pure SMBus adapter).
++handled at all on most pure SMBus adapters).
+
+ Below is a list of SMBus commands.
+@@ -55,5 +62,5 @@
+ See Read Byte for more information.
+
+-S Addr Wr [A] Data NA P
++S Addr Wr [A] Data [A] P
+
+
+@@ -110,5 +117,5 @@
+ ================
+
+-This command reads a block of upto 32 bytes from a device, from a
++This command reads a block of up to 32 bytes from a device, from a
+ designated register that is specified through the Comm byte. The amount
+ of data is specified by the device in the Count byte.
+@@ -121,7 +128,89 @@
+ =================
+
+-The opposite of the Block Read command, this writes upto 32 bytes to
++The opposite of the Block Read command, this writes up to 32 bytes to
+ a device, to a designated register that is specified through the
+ Comm byte. The amount of data is specified in the Count byte.
+
+ S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P
++
++
++SMBus Block Process Call
++========================
++
++SMBus Block Process Call was introduced in Revision 2.0 of the specification.
++
++This command selects a device register (through the Comm byte), sends
++1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return.
++
++S Addr Wr [A] Comm [A] Count [A] Data [A] ...
++ S Addr Rd [A] [Count] A [Data] ... NA P
++
++
++SMBus Host Notify
++=================
++
++This command is sent from a SMBus device acting as a master to the
++SMBus host acting as a slave.
++It is the same form as Write Word, with the command code replaced by the
++alerting device's address.
++
++[S] [HostAddr] [Wr] A [DevAddr] A [DataLow] A [DataHigh] A [P]
++
++
++Packet Error Checking (PEC)
++===========================
++Packet Error Checking was introduced in Revision 1.1 of the specification.
++
++PEC adds a CRC-8 error-checking byte to all transfers.
++
++
++Address Resolution Protocol (ARP)
++=================================
++The Address Resolution Protocol was introduced in Revision 2.0 of
++the specification. It is a higher-layer protocol which uses the
++messages above.
++
++ARP adds device enumeration and dynamic address assignment to
++the protocol. All ARP communications use slave address 0x61 and
++require PEC checksums.
++
++
++I2C Block Transactions
++======================
++The following I2C block transactions are supported by the
++SMBus layer and are described here for completeness.
++I2C block transactions do not limit the number of bytes transferred
++but the SMBus layer places a limit of 32 bytes.
++
++
++I2C Block Read
++==============
++
++This command reads a block of bytes from a device, from a
++designated register that is specified through the Comm byte.
++
++S Addr Wr [A] Comm [A]
++ S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
++
++
++I2C Block Read (2 Comm bytes)
++=============================
++
++This command reads a block of bytes from a device, from a
++designated register that is specified through the two Comm bytes.
++
++S Addr Wr [A] Comm1 [A] Comm2 [A]
++ S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
++
++
++I2C Block Write
++===============
++
++The opposite of the Block Read command, this writes bytes to
++a device, to a designated register that is specified through the
++Comm byte. Note that command lengths of 0, 2, or more bytes are
++supported as they are indistinguishable from data.
++
++S Addr Wr [A] Comm [A] Data [A] Data [A] ... [A] Data [A] P
++
++
+--- linux-old/Documentation/i2c/summary Sun Aug 31 14:51:50 CEST 2003
++++ linux/Documentation/i2c/summary Sun Aug 31 14:51:50 CEST 2003
+@@ -5,5 +5,5 @@
+
+ I2C (pronounce: I squared C) is a protocol developed by Philips. It is a
+-slow two-wire protocol (10-100 kHz), but it suffices for many types of
++slow two-wire protocol (10-400 kHz), but it suffices for many types of
+ devices.
+
+@@ -44,5 +44,5 @@
+ Included Bus Drivers
+ ====================
+-Note that not only stable drivers are patched into the kernel by 'mkpatch'.
++Note that only stable drivers are patched into the kernel by 'mkpatch'.
+
+
+@@ -50,7 +50,7 @@
+ ------------
+
+-i2c-core: The basic I2C code, including the /proc interface
+-i2c-dev: The /dev interface
+-i2c-proc: The /proc interface for device (client) drivers
++i2c-core: The basic I2C code, including the /proc/bus/i2c* interface
++i2c-dev: The /dev/i2c-* interface
++i2c-proc: The /proc/sys/dev/sensors interface for device (client) drivers
+
+ Algorithm drivers
+@@ -60,5 +60,5 @@
+ i2c-algo-bit: A bit-banging algorithm
+ i2c-algo-pcf: A PCF 8584 style algorithm
+-i2c-algo-ppc405: An algorithm for the I2C device in IBM 405xx processors (NOT BUILT BY DEFAULT)
++i2c-algo-ibm_ocp: An algorithm for the I2C device in IBM 4xx processors (NOT BUILT BY DEFAULT)
+
+ Adapter drivers
+@@ -67,7 +67,7 @@
+ i2c-elektor: Elektor ISA card (uses i2c-algo-pcf)
+ i2c-elv: ELV parallel port adapter (uses i2c-algo-bit)
+-i2c-pcf-epp: PCF8584 on a EPP parallel port (uses i2c-algo-pcf) (BROKEN - missing i2c-pcf-epp.h)
++i2c-pcf-epp: PCF8584 on a EPP parallel port (uses i2c-algo-pcf) (NOT mkpatched)
+ i2c-philips-par: Philips style parallel port adapter (uses i2c-algo-bit)
+-i2c-ppc405: IBM 405xx processor I2C device (uses i2c-algo-ppc405) (NOT BUILT BY DEFAULT)
++i2c-adap-ibm_ocp: IBM 4xx processor I2C device (uses i2c-algo-ibm_ocp) (NOT BUILT BY DEFAULT)
+ i2c-pport: Primitive parallel port adapter (uses i2c-algo-bit)
+ i2c-rpx: RPX board Motorola 8xx I2C device (uses i2c-algo-8xx) (NOT BUILT BY DEFAULT)
+--- linux-old/Documentation/i2c/ten-bit-addresses Sun Aug 31 14:51:50 CEST 2003
++++ linux/Documentation/i2c/ten-bit-addresses Sun Aug 31 14:51:50 CEST 2003
+--- linux-old/Documentation/i2c/writing-clients Sun Aug 31 14:51:50 CEST 2003
++++ linux/Documentation/i2c/writing-clients Sun Aug 31 14:51:50 CEST 2003
+@@ -25,14 +25,12 @@
+ address.
+
+- struct i2c_driver foo_driver
+- {
+- /* name */ "Foo version 2.3 and later driver",
+- /* id */ I2C_DRIVERID_FOO,
+- /* flags */ I2C_DF_NOTIFY,
+- /* attach_adapter */ &foo_attach_adapter,
+- /* detach_client */ &foo_detach_client,
+- /* command */ &foo_command, /* May be NULL */
+- /* inc_use */ &foo_inc_use, /* May be NULL */
+- /* dec_use */ &foo_dec_use /* May be NULL */
++ static struct i2c_driver foo_driver = {
++ .owner = THIS_MODULE,
++ .name = "Foo version 2.3 driver",
++ .id = I2C_DRIVERID_FOO, /* usually from i2c-id.h */
++ .flags = I2C_DF_NOTIFY,
++ .attach_adapter = &foo_attach_adapter,
++ .detach_client = &foo_detach_client,
++ .command = &foo_command /* may be NULL */
+ }
+
+@@ -51,41 +49,6 @@
+ below.
+
+-
+-Module usage count
+-==================
+-
+-If your driver can also be compiled as a module, there are moments at
+-which the module can not be removed from memory. For example, when you
+-are doing a lengthy transaction, or when you create a /proc directory,
+-and some process has entered that directory (this last case is the
+-main reason why these call-backs were introduced).
+-
+-To increase or decrease the module usage count, you can use the
+-MOD_{INC,DEC}_USE_COUNT macros. They must be called from the module
+-which needs to get its usage count changed; that is why each driver
+-module has to implement its own callback.
+-
+- void foo_inc_use (struct i2c_client *client)
+- {
+- #ifdef MODULE
+- MOD_INC_USE_COUNT;
+- #endif
+- }
+-
+- void foo_dec_use (struct i2c_client *client)
+- {
+- #ifdef MODULE
+- MOD_DEC_USE_COUNT;
+- #endif
+- }
+-
+-Do not call these call-back functions directly; instead, use one of the
+-following functions defined in i2c.h:
+- void i2c_inc_use_client(struct i2c_client *);
+- void i2c_dec_use_client(struct i2c_client *);
+-
+-You should *not* increase the module count just because a device is
+-detected and a client created. This would make it impossible to remove
+-an adapter driver!
++There use to be two additional fields in this structure, inc_use et dec_use,
++for module usage count, but these fields were obsoleted and removed.
+
+
+@@ -366,5 +329,5 @@
+ The detect client function is called by i2c_probe or i2c_detect.
+ The `kind' parameter contains 0 if this call is due to a `force'
+-parameter, and 0 otherwise (for i2c_detect, it contains 0 if
++parameter, and -1 otherwise (for i2c_detect, it contains 0 if
+ this call is due to the generic `force' parameter, and the chip type
+ number if it is due to a specific `force' parameter).
+@@ -449,7 +412,7 @@
+ need it, remove it. We do it here to help to lessen memory
+ fragmentation. */
+- if (! (new_client = kmalloc(sizeof(struct i2c_client)) +
++ if (! (new_client = kmalloc(sizeof(struct i2c_client) +
+ sizeof(struct foo_data),
+- GFP_KERNEL)) {
++ GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto ERROR0;
+--- linux-old/drivers/i2c/i2c-adap-ibm_ocp.c Sun Aug 31 14:51:51 CEST 2003
++++ linux/drivers/i2c/i2c-adap-ibm_ocp.c Sun Aug 31 14:51:51 CEST 2003
+@@ -0,0 +1,346 @@
++/*
++ -------------------------------------------------------------------------
++ i2c-adap-ibm_ocp.c i2c-hw access for the IIC peripheral on the IBM PPC 405
++ -------------------------------------------------------------------------
++
++ Ian DaSilva, MontaVista Software, Inc.
++ idasilva@mvista.com or source@mvista.com
++
++ Copyright 2000 MontaVista Software Inc.
++
++ Changes made to support the IIC peripheral on the IBM PPC 405
++
++
++ ----------------------------------------------------------------------------
++ This file was highly leveraged from i2c-elektor.c, which was created
++ by Simon G. Vogl and Hans Berglund:
++
++
++ Copyright (C) 1995-97 Simon G. Vogl
++ 1998-99 Hans Berglund
++
++ With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
++ Frodo Looijaard <frodol@dds.nl>
++
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ ----------------------------------------------------------------------------
++
++ History: 01/20/12 - Armin
++ akuster@mvista.com
++ ported up to 2.4.16+
++
++ Version 02/03/25 - Armin
++ converted to ocp format
++ removed commented out or #if 0 code
++
++ TODO: convert to ocp_register
++ add PM hooks
++
++*/
++
++
++#include <linux/kernel.h>
++#include <linux/ioport.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-ibm_ocp.h>
++#include <asm/irq.h>
++#include <asm/io.h>
++#include <asm/ocp.h>
++
++/*
++ * This next section is configurable, and it is used to set the number
++ * of i2c controllers in the system. The default number of instances is 1,
++ * however, this should be changed to reflect your system's configuration.
++ */
++
++/*
++ * The STB03xxx, with a PPC405 core, has two i2c controllers.
++ */
++//(sizeof(IIC_ADDR)/sizeof(struct iic_regs))
++extern iic_t *IIC_ADDR[];
++static struct iic_ibm iic_ibmocp_adaps[IIC_NUMS][5];
++
++static struct i2c_algo_iic_data *iic_ibmocp_data[IIC_NUMS];
++static struct i2c_adapter *iic_ibmocp_ops[IIC_NUMS];
++
++static int i2c_debug=0;
++static wait_queue_head_t iic_wait[IIC_NUMS];
++static int iic_pending;
++static spinlock_t irq_driver_lock = SPIN_LOCK_UNLOCKED;
++
++
++/* ----- global defines ----------------------------------------------- */
++#define DEB(x) if (i2c_debug>=1) x
++#define DEB2(x) if (i2c_debug>=2) x
++#define DEB3(x) if (i2c_debug>=3) x
++#define DEBE(x) x /* error messages */
++
++/* ----- local functions ---------------------------------------------- */
++
++//
++// Description: Write a byte to IIC hardware
++//
++static void iic_ibmocp_setbyte(void *data, int ctl, int val)
++{
++ // writeb resolves to a write to the specified memory location
++ // plus a call to eieio. eieio ensures that all instructions
++ // preceding it are completed before any further stores are
++ // completed.
++ // Delays at this level (to protect writes) are not needed here.
++ writeb(val, ctl);
++}
++
++
++//
++// Description: Read a byte from IIC hardware
++//
++static int iic_ibmocp_getbyte(void *data, int ctl)
++{
++ int val;
++
++ val = readb(ctl);
++ return (val);
++}
++
++
++//
++// Description: Return our slave address. This is the address
++// put on the I2C bus when another master on the bus wants to address us
++// as a slave
++//
++static int iic_ibmocp_getown(void *data)
++{
++ return(((struct iic_ibm *)(data))->iic_own);
++}
++
++
++//
++// Description: Return the clock rate
++//
++static int iic_ibmocp_getclock(void *data)
++{
++ return(((struct iic_ibm *)(data))->iic_clock);
++}
++
++
++
++//
++// Description: Put this process to sleep. We will wake up when the
++// IIC controller interrupts.
++//
++static void iic_ibmocp_waitforpin(void *data) {
++
++ int timeout = 2;
++ struct iic_ibm *priv_data = data;
++
++ //
++ // If interrupts are enabled (which they are), then put the process to
++ // sleep. This process will be awakened by two events -- either the
++ // the IIC peripheral interrupts or the timeout expires.
++ //
++ if (priv_data->iic_irq > 0) {
++ spin_lock_irq(&irq_driver_lock);
++ if (iic_pending == 0) {
++ interruptible_sleep_on_timeout(&(iic_wait[priv_data->index]), timeout*HZ );
++ } else
++ iic_pending = 0;
++ spin_unlock_irq(&irq_driver_lock);
++ } else {
++ //
++ // If interrupts are not enabled then delay for a reasonable amount
++ // of time and return. We expect that by time we return to the calling
++ // function that the IIC has finished our requested transaction and
++ // the status bit reflects this.
++ //
++ // udelay is probably not the best choice for this since it is
++ // the equivalent of a busy wait
++ //
++ udelay(100);
++ }
++ //printk("iic_ibmocp_waitforpin: exitting\n");
++}
++
++
++//
++// Description: The registered interrupt handler
++//
++static void iic_ibmocp_handler(int this_irq, void *dev_id, struct pt_regs *regs)
++{
++ int ret;
++ struct iic_regs *iic;
++ struct iic_ibm *priv_data = dev_id;
++ iic = (struct iic_regs *) priv_data->iic_base;
++ iic_pending = 1;
++ DEB2(printk("iic_ibmocp_handler: in interrupt handler\n"));
++ // Read status register
++ ret = readb((int) &(iic->sts));
++ DEB2(printk("iic_ibmocp_handler: status = %x\n", ret));
++ // Clear status register. See IBM PPC 405 reference manual for details
++ writeb(0x0a, (int) &(iic->sts));
++ wake_up_interruptible(&(iic_wait[priv_data->index]));
++}
++
++
++//
++// Description: This function is very hardware dependent. First, we lock
++// the region of memory where out registers exist. Next, we request our
++// interrupt line and register its associated handler. Our IIC peripheral
++// uses interrupt number 2, as specified by the 405 reference manual.
++//
++static int iic_hw_resrc_init(int instance)
++{
++
++ DEB(printk("iic_hw_resrc_init: Physical Base address: 0x%x\n", (u32) IIC_ADDR[instance] ));
++ iic_ibmocp_adaps[instance]->iic_base = (u32)ioremap((unsigned long)IIC_ADDR[instance],PAGE_SIZE);
++
++ DEB(printk("iic_hw_resrc_init: ioremapped base address: 0x%x\n", iic_ibmocp_adaps[instance]->iic_base));
++
++ if (iic_ibmocp_adaps[instance]->iic_irq > 0) {
++
++ if (request_irq(iic_ibmocp_adaps[instance]->iic_irq, iic_ibmocp_handler,
++ 0, "IBM OCP IIC", iic_ibmocp_adaps[instance]) < 0) {
++ printk(KERN_ERR "iic_hw_resrc_init: Request irq%d failed\n",
++ iic_ibmocp_adaps[instance]->iic_irq);
++ iic_ibmocp_adaps[instance]->iic_irq = 0;
++ } else {
++ DEB3(printk("iic_hw_resrc_init: Enabled interrupt\n"));
++ }
++ }
++ return 0;
++}
++
++
++//
++// Description: Release irq and memory
++//
++static void iic_ibmocp_release(void)
++{
++ int i;
++
++ for(i=0; i<IIC_NUMS; i++) {
++ struct iic_ibm *priv_data = (struct iic_ibm *)iic_ibmocp_data[i]->data;
++ if (priv_data->iic_irq > 0) {
++ disable_irq(priv_data->iic_irq);
++ free_irq(priv_data->iic_irq, 0);
++ }
++ kfree(iic_ibmocp_data[i]);
++ kfree(iic_ibmocp_ops[i]);
++ }
++}
++
++
++//
++// Description: Called when the module is loaded. This function starts the
++// cascade of calls up through the heirarchy of i2c modules (i.e. up to the
++// algorithm layer and into to the core layer)
++//
++static int __init iic_ibmocp_init(void)
++{
++ int i;
++
++ printk(KERN_INFO "iic_ibmocp_init: IBM on-chip iic adapter module\n");
++
++ for(i=0; i<IIC_NUMS; i++) {
++ iic_ibmocp_data[i] = kmalloc(sizeof(struct i2c_algo_iic_data),GFP_KERNEL);
++ if(iic_ibmocp_data[i] == NULL) {
++ return -ENOMEM;
++ }
++ memset(iic_ibmocp_data[i], 0, sizeof(struct i2c_algo_iic_data));
++
++ switch (i) {
++ case 0:
++ iic_ibmocp_adaps[i]->iic_irq = IIC_IRQ(0);
++ break;
++ case 1:
++ iic_ibmocp_adaps[i]->iic_irq = IIC_IRQ(1);
++ break;
++ }
++ iic_ibmocp_adaps[i]->iic_clock = IIC_CLOCK;
++ iic_ibmocp_adaps[i]->iic_own = IIC_OWN;
++ iic_ibmocp_adaps[i]->index = i;
++
++ DEB(printk("irq %x\n", iic_ibmocp_adaps[i]->iic_irq));
++ DEB(printk("clock %x\n", iic_ibmocp_adaps[i]->iic_clock));
++ DEB(printk("own %x\n", iic_ibmocp_adaps[i]->iic_own));
++ DEB(printk("index %x\n", iic_ibmocp_adaps[i]->index));
++
++
++ iic_ibmocp_data[i]->data = (struct iic_regs *)iic_ibmocp_adaps[i];
++ iic_ibmocp_data[i]->setiic = iic_ibmocp_setbyte;
++ iic_ibmocp_data[i]->getiic = iic_ibmocp_getbyte;
++ iic_ibmocp_data[i]->getown = iic_ibmocp_getown;
++ iic_ibmocp_data[i]->getclock = iic_ibmocp_getclock;
++ iic_ibmocp_data[i]->waitforpin = iic_ibmocp_waitforpin;
++ iic_ibmocp_data[i]->udelay = 80;
++ iic_ibmocp_data[i]->mdelay = 80;
++ iic_ibmocp_data[i]->timeout = HZ;
++
++ iic_ibmocp_ops[i] = kmalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
++ if(iic_ibmocp_ops[i] == NULL) {
++ return -ENOMEM;
++ }
++ memset(iic_ibmocp_ops[i], 0, sizeof(struct i2c_adapter));
++ strcpy(iic_ibmocp_ops[i]->name, "IBM OCP IIC adapter");
++ iic_ibmocp_ops[i]->owner = THIS_MODULE;
++ iic_ibmocp_ops[i]->id = I2C_HW_OCP;
++ iic_ibmocp_ops[i]->algo = NULL;
++ iic_ibmocp_ops[i]->algo_data = iic_ibmocp_data[i];
++
++
++ init_waitqueue_head(&(iic_wait[i]));
++ if (iic_hw_resrc_init(i) == 0) {
++ if (i2c_ocp_add_bus(iic_ibmocp_ops[i]) < 0)
++ return -ENODEV;
++ } else {
++ return -ENODEV;
++ }
++ DEB(printk(KERN_INFO "iic_ibmocp_init: found device at %#x.\n\n", iic_ibmocp_adaps[i]->iic_base));
++ }
++ return 0;
++}
++
++
++static void __exit iic_ibmocp_exit(void)
++{
++ int i;
++
++ for(i=0; i<IIC_NUMS; i++) {
++ i2c_ocp_del_bus(iic_ibmocp_ops[i]);
++ }
++ iic_ibmocp_release();
++}
++
++//
++// If modules is NOT defined when this file is compiled, then the MODULE_*
++// macros will resolve to nothing
++//
++MODULE_AUTHOR("MontaVista Software <www.mvista.com>");
++MODULE_DESCRIPTION("I2C-Bus adapter routines for PPC 405 IIC bus adapter");
++MODULE_LICENSE("GPL");
++
++MODULE_PARM(base, "i");
++MODULE_PARM(irq, "i");
++MODULE_PARM(clock, "i");
++MODULE_PARM(own, "i");
++MODULE_PARM(i2c_debug,"i");
++
++
++module_init(iic_ibmocp_init);
++module_exit(iic_ibmocp_exit);
+--- linux-old/drivers/i2c/i2c-algo-8xx.c Sun Aug 31 14:51:51 CEST 2003
++++ linux/drivers/i2c/i2c-algo-8xx.c Sun Aug 31 14:51:51 CEST 2003
+@@ -0,0 +1,616 @@
++/*
++ * i2c-algo-8xx.c i2x driver algorithms for MPC8XX CPM
++ * Copyright (c) 1999 Dan Malek (dmalek@jlc.net).
++ *
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ * moved into proper i2c interface; separated out platform specific
++ * parts into i2c-rpx.c
++ * Brad Parker (brad@heeltoe.com)
++ */
++
++// XXX todo
++// timeout sleep?
++
++/* $Id$ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-8xx.h>
++#include <asm/mpc8xx.h>
++#include <asm/commproc.h>
++
++
++#define CPM_MAX_READ 513
++/* #define I2C_CHIP_ERRATA */ /* Try uncomment this if you have an older CPU(earlier than rev D4) */
++static wait_queue_head_t iic_wait;
++static ushort r_tbase, r_rbase;
++
++int cpm_debug = 0;
++
++static void
++cpm_iic_interrupt(void *dev_id, struct pt_regs *regs)
++{
++ volatile i2c8xx_t *i2c = (i2c8xx_t *)dev_id;
++ if (cpm_debug > 1)
++ printk("cpm_iic_interrupt(dev_id=%p)\n", dev_id);
++#if 0
++ /* Chip errata, clear enable. This is not needed on rev D4 CPUs */
++ /* This should probably be removed and replaced by I2C_CHIP_ERRATA stuff */
++ /* Someone with a buggy CPU needs to confirm that */
++ i2c->i2c_i2mod &= ~1;
++#endif
++ /* Clear interrupt.
++ */
++ i2c->i2c_i2cer = 0xff;
++
++ /* Get 'me going again.
++ */
++ wake_up_interruptible(&iic_wait);
++}
++
++static void
++cpm_iic_init(struct i2c_algo_8xx_data *cpm)
++{
++ volatile iic_t *iip = cpm->iip;
++ volatile i2c8xx_t *i2c = cpm->i2c;
++ unsigned char brg;
++ bd_t *bd = (bd_t *)__res;
++
++ if (cpm_debug) printk(KERN_DEBUG "cpm_iic_init()\n");
++
++ /* Initialize the parameter ram.
++ * We need to make sure many things are initialized to zero,
++ * especially in the case of a microcode patch.
++ */
++ iip->iic_rstate = 0;
++ iip->iic_rdp = 0;
++ iip->iic_rbptr = 0;
++ iip->iic_rbc = 0;
++ iip->iic_rxtmp = 0;
++ iip->iic_tstate = 0;
++ iip->iic_tdp = 0;
++ iip->iic_tbptr = 0;
++ iip->iic_tbc = 0;
++ iip->iic_txtmp = 0;
++
++ /* Set up the IIC parameters in the parameter ram.
++ */
++ iip->iic_tbase = r_tbase = cpm->dp_addr;
++ iip->iic_rbase = r_rbase = cpm->dp_addr + sizeof(cbd_t)*2;
++
++ iip->iic_tfcr = SMC_EB;
++ iip->iic_rfcr = SMC_EB;
++
++ /* Set maximum receive size.
++ */
++ iip->iic_mrblr = CPM_MAX_READ;
++
++ /* Initialize Tx/Rx parameters.
++ */
++ if (cpm->reloc == 0) {
++ volatile cpm8xx_t *cp = cpm->cp;
++
++ cp->cp_cpcr =
++ mk_cr_cmd(CPM_CR_CH_I2C, CPM_CR_INIT_TRX) | CPM_CR_FLG;
++ while (cp->cp_cpcr & CPM_CR_FLG);
++ } else {
++ iip->iic_rbptr = iip->iic_rbase;
++ iip->iic_tbptr = iip->iic_tbase;
++ iip->iic_rstate = 0;
++ iip->iic_tstate = 0;
++ }
++
++ /* Select an arbitrary address. Just make sure it is unique.
++ */
++ i2c->i2c_i2add = 0xfe;
++
++ /* Make clock run at 60 KHz.
++ */
++ brg = (unsigned char) (bd->bi_intfreq/(32*2*60000) -3);
++ i2c->i2c_i2brg = brg;
++
++ i2c->i2c_i2mod = 0x00;
++ i2c->i2c_i2com = 0x01; /* Master mode */
++
++ /* Disable interrupts.
++ */
++ i2c->i2c_i2cmr = 0;
++ i2c->i2c_i2cer = 0xff;
++
++ init_waitqueue_head(&iic_wait);
++
++ /* Install interrupt handler.
++ */
++ if (cpm_debug) {
++ printk ("%s[%d] Install ISR for IRQ %d\n",
++ __func__,__LINE__, CPMVEC_I2C);
++ }
++ (*cpm->setisr)(CPMVEC_I2C, cpm_iic_interrupt, (void *)i2c);
++}
++
++
++static int
++cpm_iic_shutdown(struct i2c_algo_8xx_data *cpm)
++{
++ volatile i2c8xx_t *i2c = cpm->i2c;
++
++ /* Shut down IIC.
++ */
++ i2c->i2c_i2mod &= ~1;
++ i2c->i2c_i2cmr = 0;
++ i2c->i2c_i2cer = 0xff;
++
++ return(0);
++}
++
++static void
++cpm_reset_iic_params(volatile iic_t *iip)
++{
++ iip->iic_tbase = r_tbase;
++ iip->iic_rbase = r_rbase;
++
++ iip->iic_tfcr = SMC_EB;
++ iip->iic_rfcr = SMC_EB;
++
++ iip->iic_mrblr = CPM_MAX_READ;
++
++ iip->iic_rstate = 0;
++ iip->iic_rdp = 0;
++ iip->iic_rbptr = iip->iic_rbase;
++ iip->iic_rbc = 0;
++ iip->iic_rxtmp = 0;
++ iip->iic_tstate = 0;
++ iip->iic_tdp = 0;
++ iip->iic_tbptr = iip->iic_tbase;
++ iip->iic_tbc = 0;
++ iip->iic_txtmp = 0;
++}
++
++#define BD_SC_NAK ((ushort)0x0004) /* NAK - did not respond */
++#define BD_SC_OV ((ushort)0x0002) /* OV - receive overrun */
++#define CPM_CR_CLOSE_RXBD ((ushort)0x0007)
++
++static void force_close(struct i2c_algo_8xx_data *cpm)
++{
++ volatile i2c8xx_t *i2c = cpm->i2c;
++ if (cpm->reloc == 0) { /* micro code disabled */
++ volatile cpm8xx_t *cp = cpm->cp;
++
++ if (cpm_debug) printk("force_close()\n");
++ cp->cp_cpcr =
++ mk_cr_cmd(CPM_CR_CH_I2C, CPM_CR_CLOSE_RXBD) |
++ CPM_CR_FLG;
++
++ while (cp->cp_cpcr & CPM_CR_FLG);
++ }
++ i2c->i2c_i2cmr = 0x00; /* Disable all interrupts */
++ i2c->i2c_i2cer = 0xff;
++}
++
++
++/* Read from IIC...
++ * abyte = address byte, with r/w flag already set
++ */
++static int
++cpm_iic_read(struct i2c_algo_8xx_data *cpm, u_char abyte, char *buf, int count)
++{
++ volatile iic_t *iip = cpm->iip;
++ volatile i2c8xx_t *i2c = cpm->i2c;
++ volatile cpm8xx_t *cp = cpm->cp;
++ volatile cbd_t *tbdf, *rbdf;
++ u_char *tb;
++ unsigned long flags, tmo;
++
++ if (count >= CPM_MAX_READ)
++ return -EINVAL;
++
++ /* check for and use a microcode relocation patch */
++ if (cpm->reloc) {
++ cpm_reset_iic_params(iip);
++ }
++
++ tbdf = (cbd_t *)&cp->cp_dpmem[iip->iic_tbase];
++ rbdf = (cbd_t *)&cp->cp_dpmem[iip->iic_rbase];
++
++ /* To read, we need an empty buffer of the proper length.
++ * All that is used is the first byte for address, the remainder
++ * is just used for timing (and doesn't really have to exist).
++ */
++ tb = cpm->temp;
++ tb = (u_char *)(((uint)tb + 15) & ~15);
++ tb[0] = abyte; /* Device address byte w/rw flag */
++
++ flush_dcache_range((unsigned long) tb, (unsigned long) (tb+1));
++
++ if (cpm_debug) printk("cpm_iic_read(abyte=0x%x)\n", abyte);
++
++ tbdf->cbd_bufaddr = __pa(tb);
++ tbdf->cbd_datlen = count + 1;
++ tbdf->cbd_sc =
++ BD_SC_READY | BD_SC_LAST |
++ BD_SC_WRAP | BD_IIC_START;
++
++ iip->iic_mrblr = count +1; /* prevent excessive read, +1
++ is needed otherwise will the
++ RXB interrupt come too early */
++
++ /* flush will invalidate too. */
++ flush_dcache_range((unsigned long) buf, (unsigned long) (buf+count));
++
++ rbdf->cbd_datlen = 0;
++ rbdf->cbd_bufaddr = __pa(buf);
++ rbdf->cbd_sc = BD_SC_EMPTY | BD_SC_WRAP| BD_SC_INTRPT;
++ if(count > 16){
++ /* Chip bug, set enable here */
++ local_irq_save(flags);
++ i2c->i2c_i2cmr = 0x13; /* Enable some interupts */
++ i2c->i2c_i2cer = 0xff;
++ i2c->i2c_i2mod |= 1; /* Enable */
++ i2c->i2c_i2com |= 0x80; /* Begin transmission */
++
++ /* Wait for IIC transfer */
++ tmo = interruptible_sleep_on_timeout(&iic_wait,1*HZ);
++ local_irq_restore(flags);
++ } else { /* busy wait for small transfers, its faster */
++ i2c->i2c_i2cmr = 0x00; /* Disable I2C interupts */
++ i2c->i2c_i2cer = 0xff;
++ i2c->i2c_i2mod |= 1; /* Enable */
++ i2c->i2c_i2com |= 0x80; /* Begin transmission */
++ tmo = jiffies + 1*HZ;
++ while(!(i2c->i2c_i2cer & 0x11 || time_after(jiffies, tmo))); /* Busy wait, with a timeout */
++ }
++
++ if (signal_pending(current) || !tmo){
++ force_close(cpm);
++ if(cpm_debug)
++ printk("IIC read: timeout!\n");
++ return -EIO;
++ }
++#ifdef I2C_CHIP_ERRATA
++ /* Chip errata, clear enable. This is not needed on rev D4 CPUs.
++ Disabling I2C too early may cause too short stop condition */
++ udelay(4);
++ i2c->i2c_i2mod &= ~1;
++#endif
++ if (cpm_debug) {
++ printk("tx sc %04x, rx sc %04x\n",
++ tbdf->cbd_sc, rbdf->cbd_sc);
++ }
++
++ if (tbdf->cbd_sc & BD_SC_READY) {
++ printk("IIC read; complete but tbuf ready\n");
++ force_close(cpm);
++ printk("tx sc %04x, rx sc %04x\n",
++ tbdf->cbd_sc, rbdf->cbd_sc);
++ }
++
++ if (tbdf->cbd_sc & BD_SC_NAK) {
++ if (cpm_debug)
++ printk("IIC read; no ack\n");
++ return -EREMOTEIO;
++ }
++
++ if (rbdf->cbd_sc & BD_SC_EMPTY) {
++ /* force_close(cpm); */
++ if (cpm_debug){
++ printk("IIC read; complete but rbuf empty\n");
++ printk("tx sc %04x, rx sc %04x\n",
++ tbdf->cbd_sc, rbdf->cbd_sc);
++ }
++ return -EREMOTEIO;
++ }
++
++ if (rbdf->cbd_sc & BD_SC_OV) {
++ if (cpm_debug)
++ printk("IIC read; Overrun\n");
++ return -EREMOTEIO;;
++ }
++
++ if (cpm_debug) printk("read %d bytes\n", rbdf->cbd_datlen);
++
++ if (rbdf->cbd_datlen < count) {
++ if (cpm_debug)
++ printk("IIC read; short, wanted %d got %d\n",
++ count, rbdf->cbd_datlen);
++ return 0;
++ }
++
++ return count;
++}
++
++/* Write to IIC...
++ * addr = address byte, with r/w flag already set
++ */
++static int
++cpm_iic_write(struct i2c_algo_8xx_data *cpm, u_char abyte, char *buf,int count)
++{
++ volatile iic_t *iip = cpm->iip;
++ volatile i2c8xx_t *i2c = cpm->i2c;
++ volatile cpm8xx_t *cp = cpm->cp;
++ volatile cbd_t *tbdf;
++ u_char *tb;
++ unsigned long flags, tmo;
++
++ /* check for and use a microcode relocation patch */
++ if (cpm->reloc) {
++ cpm_reset_iic_params(iip);
++ }
++ tb = cpm->temp;
++ tb = (u_char *)(((uint)tb + 15) & ~15);
++ *tb = abyte; /* Device address byte w/rw flag */
++
++ flush_dcache_range((unsigned long) tb, (unsigned long) (tb+1));
++ flush_dcache_range((unsigned long) buf, (unsigned long) (buf+count));
++
++ if (cpm_debug) printk("cpm_iic_write(abyte=0x%x)\n", abyte);
++
++ /* set up 2 descriptors */
++ tbdf = (cbd_t *)&cp->cp_dpmem[iip->iic_tbase];
++
++ tbdf[0].cbd_bufaddr = __pa(tb);
++ tbdf[0].cbd_datlen = 1;
++ tbdf[0].cbd_sc = BD_SC_READY | BD_IIC_START;
++
++ tbdf[1].cbd_bufaddr = __pa(buf);
++ tbdf[1].cbd_datlen = count;
++ tbdf[1].cbd_sc = BD_SC_READY | BD_SC_INTRPT | BD_SC_LAST | BD_SC_WRAP;
++
++ if(count > 16){
++ /* Chip bug, set enable here */
++ local_irq_save(flags);
++ i2c->i2c_i2cmr = 0x13; /* Enable some interupts */
++ i2c->i2c_i2cer = 0xff;
++ i2c->i2c_i2mod |= 1; /* Enable */
++ i2c->i2c_i2com |= 0x80; /* Begin transmission */
++
++ /* Wait for IIC transfer */
++ tmo = interruptible_sleep_on_timeout(&iic_wait,1*HZ);
++ local_irq_restore(flags);
++ } else { /* busy wait for small transfers, its faster */
++ i2c->i2c_i2cmr = 0x00; /* Disable I2C interupts */
++ i2c->i2c_i2cer = 0xff;
++ i2c->i2c_i2mod |= 1; /* Enable */
++ i2c->i2c_i2com |= 0x80; /* Begin transmission */
++ tmo = jiffies + 1*HZ;
++ while(!(i2c->i2c_i2cer & 0x12 || time_after(jiffies, tmo))); /* Busy wait, with a timeout */
++ }
++
++ if (signal_pending(current) || !tmo){
++ force_close(cpm);
++ if(cpm_debug && !tmo)
++ printk("IIC write: timeout!\n");
++ return -EIO;
++ }
++
++#if I2C_CHIP_ERRATA
++ /* Chip errata, clear enable. This is not needed on rev D4 CPUs.
++ Disabling I2C too early may cause too short stop condition */
++ udelay(4);
++ i2c->i2c_i2mod &= ~1;
++#endif
++ if (cpm_debug) {
++ printk("tx0 sc %04x, tx1 sc %04x\n",
++ tbdf[0].cbd_sc, tbdf[1].cbd_sc);
++ }
++
++ if (tbdf->cbd_sc & BD_SC_NAK) {
++ if (cpm_debug)
++ printk("IIC write; no ack\n");
++ return 0;
++ }
++
++ if (tbdf->cbd_sc & BD_SC_READY) {
++ if (cpm_debug)
++ printk("IIC write; complete but tbuf ready\n");
++ return 0;
++ }
++
++ return count;
++}
++
++/* See if an IIC address exists..
++ * addr = 7 bit address, unshifted
++ */
++static int
++cpm_iic_tryaddress(struct i2c_algo_8xx_data *cpm, int addr)
++{
++ volatile iic_t *iip = cpm->iip;
++ volatile i2c8xx_t *i2c = cpm->i2c;
++ volatile cpm8xx_t *cp = cpm->cp;
++ volatile cbd_t *tbdf, *rbdf;
++ u_char *tb;
++ unsigned long flags, len, tmo;
++
++ if (cpm_debug > 1)
++ printk("cpm_iic_tryaddress(cpm=%p,addr=%d)\n", cpm, addr);
++
++ /* check for and use a microcode relocation patch */
++ if (cpm->reloc) {
++ cpm_reset_iic_params(iip);
++ }
++
++ if (cpm_debug && addr == 0) {
++ printk("iip %p, dp_addr 0x%x\n", cpm->iip, cpm->dp_addr);
++ printk("iic_tbase %d, r_tbase %d\n", iip->iic_tbase, r_tbase);
++ }
++
++ tbdf = (cbd_t *)&cp->cp_dpmem[iip->iic_tbase];
++ rbdf = (cbd_t *)&cp->cp_dpmem[iip->iic_rbase];
++
++ tb = cpm->temp;
++ tb = (u_char *)(((uint)tb + 15) & ~15);
++
++ /* do a simple read */
++ tb[0] = (addr << 1) | 1; /* device address (+ read) */
++ len = 2;
++
++ flush_dcache_range((unsigned long) tb, (unsigned long) (tb+2));
++
++ tbdf->cbd_bufaddr = __pa(tb);
++ tbdf->cbd_datlen = len;
++ tbdf->cbd_sc =
++ BD_SC_READY | BD_SC_LAST |
++ BD_SC_WRAP | BD_IIC_START;
++
++ rbdf->cbd_datlen = 0;
++ rbdf->cbd_bufaddr = __pa(tb+2);
++ rbdf->cbd_sc = BD_SC_EMPTY | BD_SC_WRAP | BD_SC_INTRPT;
++
++ local_irq_save(flags);
++ i2c->i2c_i2cmr = 0x13; /* Enable some interupts */
++ i2c->i2c_i2cer = 0xff;
++ i2c->i2c_i2mod |= 1; /* Enable */
++ i2c->i2c_i2com |= 0x80; /* Begin transmission */
++
++ if (cpm_debug > 1) printk("about to sleep\n");
++
++ /* wait for IIC transfer */
++ tmo = interruptible_sleep_on_timeout(&iic_wait,1*HZ);
++ local_irq_restore(flags);
++
++#ifdef I2C_CHIP_ERRATA
++ /* Chip errata, clear enable. This is not needed on rev D4 CPUs.
++ Disabling I2C too early may cause too short stop condition */
++ udelay(4);
++ i2c->i2c_i2mod &= ~1;
++#endif
++
++ if (signal_pending(current) || !tmo){
++ force_close(cpm);
++ if(cpm_debug && !tmo)
++ printk("IIC tryaddress: timeout!\n");
++ return -EIO;
++ }
++
++ if (cpm_debug > 1) printk("back from sleep\n");
++
++ if (tbdf->cbd_sc & BD_SC_NAK) {
++ if (cpm_debug > 1) printk("IIC try; no ack\n");
++ return 0;
++ }
++
++ if (tbdf->cbd_sc & BD_SC_READY) {
++ printk("IIC try; complete but tbuf ready\n");
++ }
++
++ return 1;
++}
++
++static int cpm_xfer(struct i2c_adapter *adap,
++ struct i2c_msg msgs[],
++ int num)
++{
++ struct i2c_algo_8xx_data *cpm = adap->algo_data;
++ struct i2c_msg *pmsg;
++ int i, ret;
++ u_char addr;
++
++ for (i = 0; i < num; i++) {
++ pmsg = &msgs[i];
++
++ if (cpm_debug)
++ printk("i2c-algo-8xx.o: "
++ "#%d addr=0x%x flags=0x%x len=%d\n buf=%lx\n",
++ i, pmsg->addr, pmsg->flags, pmsg->len, (unsigned long)pmsg->buf);
++
++ addr = pmsg->addr << 1;
++ if (pmsg->flags & I2C_M_RD )
++ addr |= 1;
++ if (pmsg->flags & I2C_M_REV_DIR_ADDR )
++ addr ^= 1;
++
++ if (!(pmsg->flags & I2C_M_NOSTART)) {
++ }
++ if (pmsg->flags & I2C_M_RD ) {
++ /* read bytes into buffer*/
++ ret = cpm_iic_read(cpm, addr, pmsg->buf, pmsg->len);
++ if (cpm_debug)
++ printk("i2c-algo-8xx.o: read %d bytes\n", ret);
++ if (ret < pmsg->len ) {
++ return (ret<0)? ret : -EREMOTEIO;
++ }
++ } else {
++ /* write bytes from buffer */
++ ret = cpm_iic_write(cpm, addr, pmsg->buf, pmsg->len);
++ if (cpm_debug)
++ printk("i2c-algo-8xx.o: wrote %d\n", ret);
++ if (ret < pmsg->len ) {
++ return (ret<0) ? ret : -EREMOTEIO;
++ }
++ }
++ }
++ return (num);
++}
++
++static u32 cpm_func(struct i2c_adapter *adap)
++{
++ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
++ I2C_FUNC_PROTOCOL_MANGLING;
++}
++
++/* -----exported algorithm data: ------------------------------------- */
++
++static struct i2c_algorithm cpm_algo = {
++ .owner = THIS_MODULE,
++ .name = "MPC8xx CPM algorithm",
++ .id = I2C_ALGO_MPC8XX,
++ .master_xfer = cpm_xfer,
++ .functionality = cpm_func,
++};
++
++/*
++ * registering functions to load algorithms at runtime
++ */
++int i2c_8xx_add_bus(struct i2c_adapter *adap)
++{
++ int i;
++ struct i2c_algo_8xx_data *cpm = adap->algo_data;
++
++ if (cpm_debug)
++ printk("i2c-algo-8xx.o: hw routines for %s registered.\n",
++ adap->name);
++
++ /* register new adapter to i2c module... */
++
++ adap->id |= cpm_algo.id;
++ adap->algo = &cpm_algo;
++
++ i2c_add_adapter(adap);
++ cpm_iic_init(cpm);
++}
++
++
++int i2c_8xx_del_bus(struct i2c_adapter *adap)
++{
++ struct i2c_algo_8xx_data *cpm = adap->algo_data;
++
++ cpm_iic_shutdown(cpm);
++
++ return i2c_del_adapter(adap);
++}
++
++EXPORT_SYMBOL(i2c_8xx_add_bus);
++EXPORT_SYMBOL(i2c_8xx_del_bus);
++
++MODULE_AUTHOR("Brad Parker <brad@heeltoe.com>");
++MODULE_DESCRIPTION("I2C-Bus MPC8XX algorithm");
++MODULE_LICENSE("GPL");
+--- linux-old/include/linux/i2c-algo-8xx.h Sun Aug 31 14:51:51 CEST 2003
++++ linux/include/linux/i2c-algo-8xx.h Sun Aug 31 14:51:51 CEST 2003
+@@ -0,0 +1,43 @@
++/* ------------------------------------------------------------------------- */
++/* i2c-algo-8xx.h i2c driver algorithms for MPX8XX CPM */
++/*
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
++/* ------------------------------------------------------------------------- */
++
++/* $Id$ */
++
++#ifndef _LINUX_I2C_ALGO_8XX_H
++#define _LINUX_I2C_ALGO_8XX_H
++
++#include "asm/commproc.h"
++
++struct i2c_algo_8xx_data {
++ uint dp_addr;
++ int reloc;
++ volatile i2c8xx_t *i2c;
++ volatile iic_t *iip;
++ volatile cpm8xx_t *cp;
++
++ int (*setisr) (int irq,
++ void (*func)(void *, void *),
++ void *data);
++
++ u_char temp[513];
++};
++
++int i2c_8xx_add_bus(struct i2c_adapter *);
++int i2c_8xx_del_bus(struct i2c_adapter *);
++
++#endif /* _LINUX_I2C_ALGO_8XX_H */
+--- linux-old/drivers/i2c/i2c-algo-bit.c Sun Aug 31 14:51:52 CEST 2003
++++ linux/drivers/i2c/i2c-algo-bit.c Sun Aug 31 14:51:52 CEST 2003
+@@ -22,5 +22,5 @@
+ Frodo Looijaard <frodol@dds.nl> */
+
+-/* $Id$ */
++/* $Id$ */
+
+ #include <linux/kernel.h>
+@@ -28,14 +28,11 @@
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+-#include <linux/version.h>
+ #include <linux/init.h>
+-#include <asm/uaccess.h>
+-#include <linux/ioport.h>
+ #include <linux/errno.h>
+ #include <linux/sched.h>
+-
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
+
++
+ /* ----- global defines ----------------------------------------------- */
+ #define DEB(x) if (i2c_debug>=1) x;
+@@ -45,25 +42,11 @@
+ /* debug the protocol by showing transferred bits */
+
+-/* debugging - slow down transfer to have a look at the data .. */
+-/* I use this with two leds&resistors, each one connected to sda,scl */
+-/* respectively. This makes sure that the algorithm works. Some chips */
+-/* might not like this, as they have an internal timeout of some mils */
+-/*
+-#define SLO_IO jif=jiffies;while(time_before_eq(jiffies, jif+i2c_table[minor].veryslow))\
+- if (need_resched) schedule();
+-*/
+-
+
+ /* ----- global variables --------------------------------------------- */
+
+-#ifdef SLO_IO
+- int jif;
+-#endif
+-
+ /* module parameters:
+ */
+ static int i2c_debug;
+ static int bit_test; /* see if the line-setting functions work */
+-static int bit_scan; /* have a look at what's hanging 'round */
+
+ /* --- setting states on the bus with the right timing: --------------- */
+@@ -90,7 +73,4 @@
+ setscl(adap,0);
+ udelay(adap->udelay);
+-#ifdef SLO_IO
+- SLO_IO
+-#endif
+ }
+
+@@ -101,15 +81,14 @@
+ static inline int sclhi(struct i2c_algo_bit_data *adap)
+ {
+- int start=jiffies;
++ int start;
+
+ setscl(adap,1);
+
+- udelay(adap->udelay);
+-
+ /* Not all adapters have scl sense line... */
+ if (adap->getscl == NULL )
+ return 0;
+
+- while (! getscl(adap) ) {
++ start=jiffies;
++ while (! getscl(adap) ) {
+ /* the hw knows how to read the clock line,
+ * so we wait until it actually gets high.
+@@ -117,15 +96,16 @@
+ * while they are processing data internally.
+ */
+- setscl(adap,1);
+ if (time_after_eq(jiffies, start+adap->timeout)) {
+ return -ETIMEDOUT;
+ }
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ if (current->need_resched)
+ schedule();
+- }
+- DEBSTAT(printk("needed %ld jiffies\n", jiffies-start));
+-#ifdef SLO_IO
+- SLO_IO
++#else
++ cond_resched();
+ #endif
++ }
++ DEBSTAT(printk(KERN_DEBUG "needed %ld jiffies\n", jiffies-start));
++ udelay(adap->udelay);
+ return 0;
+ }
+@@ -146,5 +126,5 @@
+ DEBPROTO(printk(" Sr "));
+ setsda(adap,1);
+- setscl(adap,1);
++ sclhi(adap);
+ udelay(adap->udelay);
+
+@@ -180,12 +160,12 @@
+
+ /* assert: scl is low */
+- DEB2(printk(" i2c_outb:%2.2X\n",c&0xff));
+ for ( i=7 ; i>=0 ; i-- ) {
+ sb = c & ( 1 << i );
+ setsda(adap,sb);
+ udelay(adap->udelay);
+- DEBPROTO(printk("%d",sb!=0));
++ DEBPROTO(printk(KERN_DEBUG "%d",sb!=0));
+ if (sclhi(adap)<0) { /* timed out */
+ sdahi(adap); /* we don't want to block the net */
++ DEB2(printk(KERN_DEBUG " i2c_outb: 0x%02x, timeout at bit #%d\n", c&0xff, i));
+ return -ETIMEDOUT;
+ };
+@@ -198,12 +178,13 @@
+ sdahi(adap);
+ if (sclhi(adap)<0){ /* timeout */
+- return -ETIMEDOUT;
++ DEB2(printk(KERN_DEBUG " i2c_outb: 0x%02x, timeout at ack\n", c&0xff));
++ return -ETIMEDOUT;
+ };
+ /* read ack: SDA should be pulled down by slave */
+ ack=getsda(adap); /* ack: sda is pulled low ->success. */
+- DEB2(printk(" i2c_outb: getsda() = 0x%2.2x\n", ~ack ));
++ DEB2(printk(KERN_DEBUG " i2c_outb: 0x%02x , getsda() = %d\n", c & 0xff, ack));
+
+- DEBPROTO( printk("[%2.2x]",c&0xff) );
+- DEBPROTO(if (0==ack){ printk(" A ");} else printk(" NA ") );
++ DEBPROTO( printk(KERN_DEBUG "[%2.2x]",c&0xff) );
++ DEBPROTO(if (0==ack){ printk(KERN_DEBUG " A ");} else printk(KERN_DEBUG " NA ") );
+ scllo(adap);
+ return 0==ack; /* return 1 if device acked */
+@@ -221,9 +202,8 @@
+
+ /* assert: scl is low */
+- DEB2(printk("i2c_inb.\n"));
+-
+ sdahi(adap);
+ for (i=0;i<8;i++) {
+ if (sclhi(adap)<0) { /* timeout */
++ DEB2(printk(KERN_DEBUG " i2c_inb: timeout at bit #%d\n", 7-i));
+ return -ETIMEDOUT;
+ };
+@@ -234,5 +214,7 @@
+ }
+ /* assert: scl is low */
+- DEBPROTO(printk(" %2.2x", indata & 0xff));
++ DEB2(printk(KERN_DEBUG "i2c_inb: 0x%02x\n", indata & 0xff));
++
++ DEBPROTO(printk(KERN_DEBUG " 0x%02x", indata & 0xff));
+ return (int) (indata & 0xff);
+ }
+@@ -246,67 +228,67 @@
+ sda=getsda(adap);
+ if (adap->getscl==NULL) {
+- printk("i2c-algo-bit.o: Warning: Adapter can't read from clock line - skipping test.\n");
++ printk(KERN_WARNING "i2c-algo-bit.o: Warning: Adapter can't read from clock line - skipping test.\n");
+ return 0;
+ }
+ scl=getscl(adap);
+- printk("i2c-algo-bit.o: Adapter: %s scl: %d sda: %d -- testing...\n",
++ printk(KERN_INFO "i2c-algo-bit.o: Adapter: %s scl: %d sda: %d -- testing...\n",
+ name,getscl(adap),getsda(adap));
+ if (!scl || !sda ) {
+- printk("i2c-algo-bit.o: %s seems to be busy.\n",name);
++ printk(KERN_INFO " i2c-algo-bit.o: %s seems to be busy.\n",name);
+ goto bailout;
+ }
+ sdalo(adap);
+- printk("i2c-algo-bit.o:1 scl: %d sda: %d \n",getscl(adap),
++ printk(KERN_DEBUG "i2c-algo-bit.o:1 scl: %d sda: %d \n",getscl(adap),
+ getsda(adap));
+ if ( 0 != getsda(adap) ) {
+- printk("i2c-algo-bit.o: %s SDA stuck high!\n",name);
++ printk(KERN_WARNING "i2c-algo-bit.o: %s SDA stuck high!\n",name);
+ sdahi(adap);
+ goto bailout;
+ }
+ if ( 0 == getscl(adap) ) {
+- printk("i2c-algo-bit.o: %s SCL unexpected low while pulling SDA low!\n",
++ printk(KERN_WARNING "i2c-algo-bit.o: %s SCL unexpected low while pulling SDA low!\n",
+ name);
+ goto bailout;
+ }
+ sdahi(adap);
+- printk("i2c-algo-bit.o:2 scl: %d sda: %d \n",getscl(adap),
++ printk(KERN_DEBUG "i2c-algo-bit.o:2 scl: %d sda: %d \n",getscl(adap),
+ getsda(adap));
+ if ( 0 == getsda(adap) ) {
+- printk("i2c-algo-bit.o: %s SDA stuck low!\n",name);
++ printk(KERN_WARNING "i2c-algo-bit.o: %s SDA stuck low!\n",name);
+ sdahi(adap);
+ goto bailout;
+ }
+ if ( 0 == getscl(adap) ) {
+- printk("i2c-algo-bit.o: %s SCL unexpected low while SDA high!\n",
++ printk(KERN_WARNING "i2c-algo-bit.o: %s SCL unexpected low while SDA high!\n",
+ name);
+ goto bailout;
+ }
+ scllo(adap);
+- printk("i2c-algo-bit.o:3 scl: %d sda: %d \n",getscl(adap),
++ printk(KERN_DEBUG "i2c-algo-bit.o:3 scl: %d sda: %d \n",getscl(adap),
+ getsda(adap));
+ if ( 0 != getscl(adap) ) {
+- printk("i2c-algo-bit.o: %s SCL stuck high!\n",name);
++ printk(KERN_WARNING "i2c-algo-bit.o: %s SCL stuck high!\n",name);
+ sclhi(adap);
+ goto bailout;
+ }
+ if ( 0 == getsda(adap) ) {
+- printk("i2c-algo-bit.o: %s SDA unexpected low while pulling SCL low!\n",
++ printk(KERN_WARNING "i2c-algo-bit.o: %s SDA unexpected low while pulling SCL low!\n",
+ name);
+ goto bailout;
+ }
+ sclhi(adap);
+- printk("i2c-algo-bit.o:4 scl: %d sda: %d \n",getscl(adap),
++ printk(KERN_DEBUG "i2c-algo-bit.o:4 scl: %d sda: %d \n",getscl(adap),
+ getsda(adap));
+ if ( 0 == getscl(adap) ) {
+- printk("i2c-algo-bit.o: %s SCL stuck low!\n",name);
++ printk(KERN_WARNING "i2c-algo-bit.o: %s SCL stuck low!\n",name);
+ sclhi(adap);
+ goto bailout;
+ }
+ if ( 0 == getsda(adap) ) {
+- printk("i2c-algo-bit.o: %s SDA unexpected low while SCL high!\n",
++ printk(KERN_WARNING "i2c-algo-bit.o: %s SDA unexpected low while SCL high!\n",
+ name);
+ goto bailout;
+ }
+- printk("i2c-algo-bit.o: %s passed test.\n",name);
++ printk(KERN_INFO "i2c-algo-bit.o: %s passed test.\n",name);
+ return 0;
+ bailout:
+@@ -342,14 +324,19 @@
+ udelay(adap->udelay);
+ }
+- DEB2(if (i) printk("i2c-algo-bit.o: needed %d retries for %d\n",
+- i,addr));
++ DEB2(if (i)
++ printk(KERN_DEBUG "i2c-algo-bit.o: Used %d tries to %s client at 0x%02x : %s\n",
++ i+1, addr & 1 ? "read" : "write", addr>>1,
++ ret==1 ? "success" : ret==0 ? "no ack" : "failed, timeout?" )
++ );
+ return ret;
+ }
+
+-static int sendbytes(struct i2c_adapter *i2c_adap,const char *buf, int count)
++static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
+ {
+ struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+ char c;
+- const char *temp = buf;
++ const char *temp = msg->buf;
++ int count = msg->len;
++ unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK;
+ int retval;
+ int wrcount=0;
+@@ -357,13 +344,13 @@
+ while (count > 0) {
+ c = *temp;
+- DEB2(printk("i2c-algo-bit.o: %s i2c_write: writing %2.2X\n",
++ DEB2(printk(KERN_DEBUG "i2c-algo-bit.o: %s sendbytes: writing %2.2X\n",
+ i2c_adap->name, c&0xff));
+ retval = i2c_outb(i2c_adap,c);
+- if (retval>0) {
++ if ((retval>0) || (nak_ok && (retval==0))) { /* ok or ignored NAK */
+ count--;
+ temp++;
+ wrcount++;
+ } else { /* arbitration or no acknowledge */
+- printk("i2c-algo-bit.o: %s i2c_write: error - bailout.\n",
++ printk(KERN_ERR "i2c-algo-bit.o: %s sendbytes: error - bailout.\n",
+ i2c_adap->name);
+ i2c_stop(adap);
+@@ -379,10 +366,11 @@
+ }
+
+-static inline int readbytes(struct i2c_adapter *i2c_adap,char *buf,int count)
++static inline int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
+ {
+- char *temp = buf;
+ int inval;
+ int rdcount=0; /* counts bytes read */
+ struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
++ char *temp = msg->buf;
++ int count = msg->len;
+
+ while (count > 0) {
+@@ -393,5 +381,5 @@
+ rdcount++;
+ } else { /* read timed out */
+- printk("i2c-algo-bit.o: i2c_read: i2c_inb timed out.\n");
++ printk(KERN_ERR "i2c-algo-bit.o: readbytes: i2c_inb timed out.\n");
+ break;
+ }
+@@ -406,5 +394,5 @@
+ if (sclhi(adap)<0) { /* timeout */
+ sdahi(adap);
+- printk("i2c-algo-bit.o: i2c_read: Timeout at ack\n");
++ printk(KERN_ERR "i2c-algo-bit.o: readbytes: Timeout at ack\n");
+ return -ETIMEDOUT;
+ };
+@@ -421,31 +409,34 @@
+ * reads, writes as well as 10bit-addresses.
+ * returns:
+- * 0 everything went okay, the chip ack'ed
++ * 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set
+ * -x an error occurred (like: -EREMOTEIO if the device did not answer, or
+ * -ETIMEDOUT, for example if the lines are stuck...)
+ */
+-static inline int bit_doAddress(struct i2c_adapter *i2c_adap,
+- struct i2c_msg *msg, int retries)
++static inline int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
+ {
+ unsigned short flags = msg->flags;
++ unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK;
+ struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+
+ unsigned char addr;
+- int ret;
++ int ret, retries;
++
++ retries = nak_ok ? 0 : i2c_adap->retries;
++
+ if ( (flags & I2C_M_TEN) ) {
+ /* a ten bit address */
+ addr = 0xf0 | (( msg->addr >> 7) & 0x03);
+- DEB2(printk("addr0: %d\n",addr));
++ DEB2(printk(KERN_DEBUG "addr0: %d\n",addr));
+ /* try extended address code...*/
+ ret = try_address(i2c_adap, addr, retries);
+- if (ret!=1) {
+- printk("died at extended address code.\n");
++ if ((ret != 1) && !nak_ok) {
++ printk(KERN_ERR "died at extended address code.\n");
+ return -EREMOTEIO;
+ }
+ /* the remaining 8 bit address */
+ ret = i2c_outb(i2c_adap,msg->addr & 0x7f);
+- if (ret != 1) {
++ if ((ret != 1) && !nak_ok) {
+ /* the chip did not ack / xmission error occurred */
+- printk("died at 2nd address code.\n");
++ printk(KERN_ERR "died at 2nd address code.\n");
+ return -EREMOTEIO;
+ }
+@@ -455,6 +446,6 @@
+ addr |= 0x01;
+ ret = try_address(i2c_adap, addr, retries);
+- if (ret!=1) {
+- printk("died at extended address code.\n");
++ if ((ret!=1) && !nak_ok) {
++ printk(KERN_ERR "died at extended address code.\n");
+ return -EREMOTEIO;
+ }
+@@ -467,8 +458,8 @@
+ addr ^= 1;
+ ret = try_address(i2c_adap, addr, retries);
+- if (ret!=1) {
++ if ((ret!=1) && !nak_ok)
+ return -EREMOTEIO;
+- }
+ }
++
+ return 0;
+ }
+@@ -481,23 +472,25 @@
+
+ int i,ret;
++ unsigned short nak_ok;
+
+ i2c_start(adap);
+ for (i=0;i<num;i++) {
+ pmsg = &msgs[i];
++ nak_ok = pmsg->flags & I2C_M_IGNORE_NAK;
+ if (!(pmsg->flags & I2C_M_NOSTART)) {
+ if (i) {
+ i2c_repstart(adap);
+ }
+- ret = bit_doAddress(i2c_adap,pmsg,i2c_adap->retries);
+- if (ret != 0) {
+- DEB2(printk("i2c-algo-bit.o: NAK from device adr %#2x msg #%d\n"
+- ,msgs[i].addr,i));
+- return (ret<0) ? ret : -EREMOTEIO;
++ ret = bit_doAddress(i2c_adap, pmsg);
++ if ((ret != 0) && !nak_ok) {
++ DEB2(printk(KERN_DEBUG "i2c-algo-bit.o: NAK from device addr %2.2x msg #%d\n"
++ ,msgs[i].addr,i));
++ return (ret<0) ? ret : -EREMOTEIO;
+ }
+ }
+ if (pmsg->flags & I2C_M_RD ) {
+ /* read bytes into buffer*/
+- ret = readbytes(i2c_adap,pmsg->buf,pmsg->len);
+- DEB2(printk("i2c-algo-bit.o: read %d bytes.\n",ret));
++ ret = readbytes(i2c_adap, pmsg);
++ DEB2(printk(KERN_DEBUG "i2c-algo-bit.o: read %d bytes.\n",ret));
+ if (ret < pmsg->len ) {
+ return (ret<0)? ret : -EREMOTEIO;
+@@ -505,6 +498,6 @@
+ } else {
+ /* write bytes from buffer */
+- ret = sendbytes(i2c_adap,pmsg->buf,pmsg->len);
+- DEB2(printk("i2c-algo-bit.o: wrote %d bytes.\n",ret));
++ ret = sendbytes(i2c_adap, pmsg);
++ DEB2(printk(KERN_DEBUG "i2c-algo-bit.o: wrote %d bytes.\n",ret));
+ if (ret < pmsg->len ) {
+ return (ret<0) ? ret : -EREMOTEIO;
+@@ -516,11 +509,5 @@
+ }
+
+-static int algo_control(struct i2c_adapter *adapter,
+- unsigned int cmd, unsigned long arg)
+-{
+- return 0;
+-}
+-
+-static u32 bit_func(struct i2c_adapter *adap)
++static u32 bit_func(struct i2c_adapter *i2c_adap)
+ {
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
+@@ -532,12 +519,9 @@
+
+ static struct i2c_algorithm i2c_bit_algo = {
+- "Bit-shift algorithm",
+- I2C_ALGO_BIT,
+- bit_xfer,
+- NULL,
+- NULL, /* slave_xmit */
+- NULL, /* slave_recv */
+- algo_control, /* ioctl */
+- bit_func, /* functionality */
++ .owner = THIS_MODULE,
++ .name = "Bit-shift algorithm",
++ .id = I2C_ALGO_BIT,
++ .master_xfer = bit_xfer,
++ .functionality = bit_func,
+ };
+
+@@ -547,5 +531,4 @@
+ int i2c_bit_add_bus(struct i2c_adapter *adap)
+ {
+- int i;
+ struct i2c_algo_bit_data *bit_adap = adap->algo_data;
+
+@@ -556,5 +539,5 @@
+ }
+
+- DEB2(printk("i2c-algo-bit.o: hw routines for %s registered.\n",
++ DEB2(printk(KERN_DEBUG "i2c-algo-bit.o: hw routines for %s registered.\n",
+ adap->name));
+
+@@ -567,26 +550,5 @@
+ adap->retries = 3; /* be replaced by defines */
+
+- /* scan bus */
+- if (bit_scan) {
+- int ack;
+- printk(KERN_INFO " i2c-algo-bit.o: scanning bus %s.\n",
+- adap->name);
+- for (i = 0x00; i < 0xff; i+=2) {
+- i2c_start(bit_adap);
+- ack = i2c_outb(adap,i);
+- i2c_stop(bit_adap);
+- if (ack>0) {
+- printk("(%02x)",i>>1);
+- } else
+- printk(".");
+- }
+- printk("\n");
+- }
+-
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+ i2c_add_adapter(adap);
+-
+ return 0;
+ }
+@@ -595,29 +557,10 @@
+ int i2c_bit_del_bus(struct i2c_adapter *adap)
+ {
+- int res;
+-
+- if ((res = i2c_del_adapter(adap)) < 0)
+- return res;
+-
+- DEB2(printk("i2c-algo-bit.o: adapter unregistered: %s\n",adap->name));
+-
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+- return 0;
+-}
+-
+-int __init i2c_algo_bit_init (void)
+-{
+- printk(KERN_INFO "i2c-algo-bit.o: i2c bit algorithm module\n");
+- return 0;
++ return i2c_del_adapter(adap);
+ }
+
+-
+-
+ EXPORT_SYMBOL(i2c_bit_add_bus);
+ EXPORT_SYMBOL(i2c_bit_del_bus);
+
+-#ifdef MODULE
+ MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
+ MODULE_DESCRIPTION("I2C-Bus bit-banging algorithm");
+@@ -625,19 +568,7 @@
+
+ MODULE_PARM(bit_test, "i");
+-MODULE_PARM(bit_scan, "i");
+ MODULE_PARM(i2c_debug,"i");
+
+ MODULE_PARM_DESC(bit_test, "Test the lines of the bus to see if it is stuck");
+-MODULE_PARM_DESC(bit_scan, "Scan for active chips on the bus");
+ MODULE_PARM_DESC(i2c_debug,
+- "debug level - 0 off; 1 normal; 2,3 more verbose; 9 bit-protocol");
+-
+-int init_module(void)
+-{
+- return i2c_algo_bit_init();
+-}
+-
+-void cleanup_module(void)
+-{
+-}
+-#endif
++ "debug level - 0 off; 1 normal; 2,3 more verbose; 9 bit-protocol");
+--- linux-old/include/linux/i2c-algo-bit.h Sun Aug 31 14:51:52 CEST 2003
++++ linux/include/linux/i2c-algo-bit.h Sun Aug 31 14:51:52 CEST 2003
+@@ -22,10 +22,8 @@
+ Frodo Looijaard <frodol@dds.nl> */
+
+-/* $Id$ */
++/* $Id$ */
+
+-#ifndef I2C_ALGO_BIT_H
+-#define I2C_ALGO_BIT_H 1
+-
+-#include <linux/i2c.h>
++#ifndef _LINUX_I2C_ALGO_BIT_H
++#define _LINUX_I2C_ALGO_BIT_H
+
+ /* --- Defines for bit-adapters --------------------------------------- */
+@@ -43,7 +41,8 @@
+
+ /* local settings */
+- int udelay;
+- int mdelay;
+- int timeout;
++ int udelay; /* half-clock-cycle time in microsecs */
++ /* i.e. clock is (500 / udelay) KHz */
++ int mdelay; /* in millisecs, unused */
++ int timeout; /* in jiffies */
+ };
+
+@@ -53,3 +52,3 @@
+ int i2c_bit_del_bus(struct i2c_adapter *);
+
+-#endif /* I2C_ALGO_BIT_H */
++#endif /* _LINUX_I2C_ALGO_BIT_H */
+--- linux-old/drivers/i2c/i2c-algo-ibm_ocp.c Sun Aug 31 14:51:52 CEST 2003
++++ linux/drivers/i2c/i2c-algo-ibm_ocp.c Sun Aug 31 14:51:52 CEST 2003
+@@ -0,0 +1,901 @@
++/*
++ -------------------------------------------------------------------------
++ i2c-algo-ibm_ocp.c i2c driver algorithms for IBM PPC 405 adapters
++ -------------------------------------------------------------------------
++
++ Ian DaSilva, MontaVista Software, Inc.
++ idasilva@mvista.com or source@mvista.com
++
++ Copyright 2000 MontaVista Software Inc.
++
++ Changes made to support the IIC peripheral on the IBM PPC 405
++
++
++ ---------------------------------------------------------------------------
++ This file was highly leveraged from i2c-algo-pcf.c, which was created
++ by Simon G. Vogl and Hans Berglund:
++
++
++ Copyright (C) 1995-1997 Simon G. Vogl
++ 1998-2000 Hans Berglund
++
++ With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
++ Frodo Looijaard <frodol@dds.nl> ,and also from Martin Bailey
++ <mbailey@littlefeet-inc.com>
++
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ ---------------------------------------------------------------------------
++
++ History: 01/20/12 - Armin
++ akuster@mvista.com
++ ported up to 2.4.16+
++
++ Version 02/03/25 - Armin
++ converted to ocp format
++ removed commented out or #if 0 code
++ added Gérard Basler's fix to iic_combined_transaction() such that it
++ returns the number of successfully completed transfers .
++*/
++
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-ibm_ocp.h>
++#include <asm/ocp.h>
++
++
++/* ----- global defines ----------------------------------------------- */
++#define DEB(x) if (i2c_debug>=1) x
++#define DEB2(x) if (i2c_debug>=2) x
++#define DEB3(x) if (i2c_debug>=3) x /* print several statistical values*/
++#define DEBPROTO(x) if (i2c_debug>=9) x;
++ /* debug the protocol by showing transferred bits */
++#define DEF_TIMEOUT 5
++
++
++/* ----- global variables --------------------------------------------- */
++
++
++/* module parameters:
++ */
++static int i2c_debug=0;
++
++/* --- setting states on the bus with the right timing: --------------- */
++
++#define iic_outb(adap, reg, val) adap->setiic(adap->data, (int) &(reg), val)
++#define iic_inb(adap, reg) adap->getiic(adap->data, (int) &(reg))
++
++#define IICO_I2C_SDAHIGH 0x0780
++#define IICO_I2C_SDALOW 0x0781
++#define IICO_I2C_SCLHIGH 0x0782
++#define IICO_I2C_SCLLOW 0x0783
++#define IICO_I2C_LINEREAD 0x0784
++
++#define IIC_SINGLE_XFER 0
++#define IIC_COMBINED_XFER 1
++
++#define IIC_ERR_LOST_ARB -2
++#define IIC_ERR_INCOMPLETE_XFR -3
++#define IIC_ERR_NACK -1
++
++/* --- other auxiliary functions -------------------------------------- */
++
++
++//
++// Description: Puts this process to sleep for a period equal to timeout
++//
++static inline void iic_sleep(unsigned long timeout)
++{
++ schedule_timeout( timeout * HZ);
++}
++
++
++//
++// Description: This performs the IBM PPC 405 IIC initialization sequence
++// as described in the PPC405GP data book.
++//
++static int iic_init (struct i2c_algo_iic_data *adap)
++{
++ struct iic_regs *iic;
++ struct iic_ibm *adap_priv_data = adap->data;
++ unsigned short retval;
++ iic = (struct iic_regs *) adap_priv_data->iic_base;
++
++ /* Clear master low master address */
++ iic_outb(adap,iic->lmadr, 0);
++
++ /* Clear high master address */
++ iic_outb(adap,iic->hmadr, 0);
++
++ /* Clear low slave address */
++ iic_outb(adap,iic->lsadr, 0);
++
++ /* Clear high slave address */
++ iic_outb(adap,iic->hsadr, 0);
++
++ /* Clear status */
++ iic_outb(adap,iic->sts, 0x0a);
++
++ /* Clear extended status */
++ iic_outb(adap,iic->extsts, 0x8f);
++
++ /* Set clock division */
++ iic_outb(adap,iic->clkdiv, 0x04);
++
++ retval = iic_inb(adap, iic->clkdiv);
++ DEB(printk("iic_init: CLKDIV register = %x\n", retval));
++
++ /* Enable interrupts on Requested Master Transfer Complete */
++ iic_outb(adap,iic->intmsk, 0x01);
++
++ /* Clear transfer count */
++ iic_outb(adap,iic->xfrcnt, 0x0);
++
++ /* Clear extended control and status */
++ iic_outb(adap,iic->xtcntlss, 0xf0);
++
++ /* Set mode control (flush master data buf, enable hold SCL, exit */
++ /* unknown state. */
++ iic_outb(adap,iic->mdcntl, 0x47);
++
++ /* Clear control register */
++ iic_outb(adap,iic->cntl, 0x0);
++
++ DEB2(printk(KERN_DEBUG "iic_init: Initialized IIC on PPC 405\n"));
++ return 0;
++}
++
++
++//
++// Description: After we issue a transaction on the IIC bus, this function
++// is called. It puts this process to sleep until we get an interrupt from
++// from the controller telling us that the transaction we requested in complete.
++//
++static int wait_for_pin(struct i2c_algo_iic_data *adap, int *status)
++{
++
++ int timeout = DEF_TIMEOUT;
++ int retval;
++ struct iic_regs *iic;
++ struct iic_ibm *adap_priv_data = adap->data;
++ iic = (struct iic_regs *) adap_priv_data->iic_base;
++
++
++ *status = iic_inb(adap, iic->sts);
++#ifndef STUB_I2C
++
++ while (timeout-- && (*status & 0x01)) {
++ adap->waitforpin(adap->data);
++ *status = iic_inb(adap, iic->sts);
++ }
++#endif
++ if (timeout <= 0) {
++ /* Issue stop signal on the bus, and force an interrupt */
++ retval = iic_inb(adap, iic->cntl);
++ iic_outb(adap, iic->cntl, retval | 0x80);
++ /* Clear status register */
++ iic_outb(adap, iic->sts, 0x0a);
++ /* Exit unknown bus state */
++ retval = iic_inb(adap, iic->mdcntl);
++ iic_outb(adap, iic->mdcntl, (retval | 0x02));
++
++ // Check the status of the controller. Does it still see a
++ // pending transfer, even though we've tried to stop any
++ // ongoing transaction?
++ retval = iic_inb(adap, iic->sts);
++ retval = retval & 0x01;
++ if(retval) {
++ // The iic controller is hosed. It is not responding to any
++ // of our commands. We have already tried to force it into
++ // a known state, but it has not worked. Our only choice now
++ // is a soft reset, which will clear all registers, and force
++ // us to re-initialize the controller.
++ /* Soft reset */
++ iic_outb(adap, iic->xtcntlss, 0x01);
++ udelay(500);
++ iic_init(adap);
++ /* Is the pending transfer bit in the sts reg finally cleared? */
++ retval = iic_inb(adap, iic->sts);
++ retval = retval & 0x01;
++ if(retval) {
++ printk(KERN_CRIT "The IIC Controller is hosed. A processor reset is required\n");
++ }
++ // For some reason, even though the interrupt bit in this
++ // register was set during iic_init, it didn't take. We
++ // need to set it again. Don't ask me why....this is just what
++ // I saw when testing timeouts.
++ iic_outb(adap, iic->intmsk, 0x01);
++ }
++ return(-1);
++ }
++ else
++ return(0);
++}
++
++
++//------------------------------------
++// Utility functions
++//
++
++
++//
++// Description: Look at the status register to see if there was an error
++// in the requested transaction. If there is, look at the extended status
++// register and determine the exact cause.
++//
++int analyze_status(struct i2c_algo_iic_data *adap, int *error_code)
++{
++ int ret;
++ struct iic_regs *iic;
++ struct iic_ibm *adap_priv_data = adap->data;
++ iic = (struct iic_regs *) adap_priv_data->iic_base;
++
++
++ ret = iic_inb(adap, iic->sts);
++ if(ret & 0x04) {
++ // Error occurred
++ ret = iic_inb(adap, iic->extsts);
++ if(ret & 0x04) {
++ // Lost arbitration
++ *error_code = IIC_ERR_LOST_ARB;
++ }
++ if(ret & 0x02) {
++ // Incomplete transfer
++ *error_code = IIC_ERR_INCOMPLETE_XFR;
++ }
++ if(ret & 0x01) {
++ // Master transfer aborted by a NACK during the transfer of the
++ // address byte
++ *error_code = IIC_ERR_NACK;
++ }
++ return -1;
++ }
++ return 0;
++}
++
++
++//
++// Description: This function is called by the upper layers to do the
++// grunt work for a master send transaction
++//
++static int iic_sendbytes(struct i2c_adapter *i2c_adap,const char *buf,
++ int count, int xfer_flag)
++{
++ struct iic_regs *iic;
++ struct i2c_algo_iic_data *adap = i2c_adap->algo_data;
++ struct iic_ibm *adap_priv_data = adap->data;
++ int wrcount, status, timeout;
++ int loops, remainder, i, j;
++ int ret, error_code;
++ iic = (struct iic_regs *) adap_priv_data->iic_base;
++
++
++ if( count == 0 ) return 0;
++ wrcount = 0;
++ loops = count / 4;
++ remainder = count % 4;
++
++ if((loops > 1) && (remainder == 0)) {
++ for(i=0; i<(loops-1); i++) {
++ //
++ // Write four bytes to master data buffer
++ //
++ for(j=0; j<4; j++) {
++ iic_outb(adap, iic->mdbuf,
++ buf[wrcount++]);
++ }
++ //
++ // Issue command to IICO device to begin transmission
++ //
++ iic_outb(adap, iic->cntl, 0x35);
++ //
++ // Wait for transmission to complete. When it does,
++ //loop to the top of the for statement and write the
++ // next four bytes.
++ //
++ timeout = wait_for_pin(adap, &status);
++ if(timeout < 0) {
++ //
++ // Error handling
++ //
++ //printk(KERN_ERR "Error: write timeout\n");
++ return wrcount;
++ }
++ ret = analyze_status(adap, &error_code);
++ if(ret < 0) {
++ if(error_code == IIC_ERR_INCOMPLETE_XFR) {
++ // Return the number of bytes transferred
++ ret = iic_inb(adap, iic->xfrcnt);
++ ret = ret & 0x07;
++ return (wrcount-4+ret);
++ }
++ else return error_code;
++ }
++ }
++ }
++ else if((loops >= 1) && (remainder > 0)){
++ //printk(KERN_DEBUG "iic_sendbytes: (loops >= 1)\n");
++ for(i=0; i<loops; i++) {
++ //
++ // Write four bytes to master data buffer
++ //
++ for(j=0; j<4; j++) {
++ iic_outb(adap, iic->mdbuf,
++ buf[wrcount++]);
++ }
++ //
++ // Issue command to IICO device to begin transmission
++ //
++ iic_outb(adap, iic->cntl, 0x35);
++ //
++ // Wait for transmission to complete. When it does,
++ //loop to the top of the for statement and write the
++ // next four bytes.
++ //
++ timeout = wait_for_pin(adap, &status);
++ if(timeout < 0) {
++ //
++ // Error handling
++ //
++ //printk(KERN_ERR "Error: write timeout\n");
++ return wrcount;
++ }
++ ret = analyze_status(adap, &error_code);
++ if(ret < 0) {
++ if(error_code == IIC_ERR_INCOMPLETE_XFR) {
++ // Return the number of bytes transferred
++ ret = iic_inb(adap, iic->xfrcnt);
++ ret = ret & 0x07;
++ return (wrcount-4+ret);
++ }
++ else return error_code;
++ }
++ }
++ }
++
++ //printk(KERN_DEBUG "iic_sendbytes: expedite write\n");
++ if(remainder == 0) remainder = 4;
++ // remainder = remainder - 1;
++ //
++ // Write the remaining bytes (less than or equal to 4)
++ //
++ for(i=0; i<remainder; i++) {
++ iic_outb(adap, iic->mdbuf, buf[wrcount++]);
++ //printk(KERN_DEBUG "iic_sendbytes: data transferred = %x, wrcount = %d\n", buf[wrcount-1], (wrcount-1));
++ }
++ //printk(KERN_DEBUG "iic_sendbytes: Issuing write\n");
++
++ if(xfer_flag == IIC_COMBINED_XFER) {
++ iic_outb(adap, iic->cntl, (0x09 | ((remainder-1) << 4)));
++ }
++ else {
++ iic_outb(adap, iic->cntl, (0x01 | ((remainder-1) << 4)));
++ }
++ DEB2(printk(KERN_DEBUG "iic_sendbytes: Waiting for interrupt\n"));
++ timeout = wait_for_pin(adap, &status);
++ if(timeout < 0) {
++ //
++ // Error handling
++ //
++ //printk(KERN_ERR "Error: write timeout\n");
++ return wrcount;
++ }
++ ret = analyze_status(adap, &error_code);
++ if(ret < 0) {
++ if(error_code == IIC_ERR_INCOMPLETE_XFR) {
++ // Return the number of bytes transferred
++ ret = iic_inb(adap, iic->xfrcnt);
++ ret = ret & 0x07;
++ return (wrcount-4+ret);
++ }
++ else return error_code;
++ }
++ DEB2(printk(KERN_DEBUG "iic_sendbytes: Got interrupt\n"));
++ return wrcount;
++}
++
++
++//
++// Description: Called by the upper layers to do the grunt work for
++// a master read transaction.
++//
++static int iic_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count, int xfer_type)
++{
++ struct iic_regs *iic;
++ int rdcount=0, i, status, timeout;
++ struct i2c_algo_iic_data *adap = i2c_adap->algo_data;
++ struct iic_ibm *adap_priv_data = adap->data;
++ int loops, remainder, j;
++ int ret, error_code;
++ iic = (struct iic_regs *) adap_priv_data->iic_base;
++
++ if(count == 0) return 0;
++ loops = count / 4;
++ remainder = count % 4;
++
++ //printk(KERN_DEBUG "iic_readbytes: loops = %d, remainder = %d\n", loops, remainder);
++
++ if((loops > 1) && (remainder == 0)) {
++ //printk(KERN_DEBUG "iic_readbytes: (loops > 1) && (remainder == 0)\n");
++ for(i=0; i<(loops-1); i++) {
++ //
++ // Issue command to begin master read (4 bytes maximum)
++ //
++ //printk(KERN_DEBUG "--->Issued read command\n");
++ iic_outb(adap, iic->cntl, 0x37);
++ //
++ // Wait for transmission to complete. When it does,
++ // loop to the top of the for statement and write the
++ // next four bytes.
++ //
++ //printk(KERN_DEBUG "--->Waiting for interrupt\n");
++ timeout = wait_for_pin(adap, &status);
++ if(timeout < 0) {
++ // Error Handler
++ //printk(KERN_ERR "Error: read timed out\n");
++ return rdcount;
++ }
++ //printk(KERN_DEBUG "--->Got interrupt\n");
++
++ ret = analyze_status(adap, &error_code);
++ if(ret < 0) {
++ if(error_code == IIC_ERR_INCOMPLETE_XFR)
++ return rdcount;
++ else
++ return error_code;
++ }
++
++ for(j=0; j<4; j++) {
++ // Wait for data to shuffle to top of data buffer
++ // This value needs to optimized.
++ udelay(1);
++ buf[rdcount] = iic_inb(adap, iic->mdbuf);
++ rdcount++;
++ //printk(KERN_DEBUG "--->Read one byte\n");
++ }
++ }
++ }
++
++ else if((loops >= 1) && (remainder > 0)){
++ //printk(KERN_DEBUG "iic_readbytes: (loops >=1) && (remainder > 0)\n");
++ for(i=0; i<loops; i++) {
++ //
++ // Issue command to begin master read (4 bytes maximum)
++ //
++ //printk(KERN_DEBUG "--->Issued read command\n");
++ iic_outb(adap, iic->cntl, 0x37);
++ //
++ // Wait for transmission to complete. When it does,
++ // loop to the top of the for statement and write the
++ // next four bytes.
++ //
++ //printk(KERN_DEBUG "--->Waiting for interrupt\n");
++ timeout = wait_for_pin(adap, &status);
++ if(timeout < 0) {
++ // Error Handler
++ //printk(KERN_ERR "Error: read timed out\n");
++ return rdcount;
++ }
++ //printk(KERN_DEBUG "--->Got interrupt\n");
++
++ ret = analyze_status(adap, &error_code);
++ if(ret < 0) {
++ if(error_code == IIC_ERR_INCOMPLETE_XFR)
++ return rdcount;
++ else
++ return error_code;
++ }
++
++ for(j=0; j<4; j++) {
++ // Wait for data to shuffle to top of data buffer
++ // This value needs to optimized.
++ udelay(1);
++ buf[rdcount] = iic_inb(adap, iic->mdbuf);
++ rdcount++;
++ //printk(KERN_DEBUG "--->Read one byte\n");
++ }
++ }
++ }
++
++ //printk(KERN_DEBUG "iic_readbytes: expedite read\n");
++ if(remainder == 0) remainder = 4;
++ DEB2(printk(KERN_DEBUG "iic_readbytes: writing %x to IICO_CNTL\n", (0x03 | ((remainder-1) << 4))));
++
++ if(xfer_type == IIC_COMBINED_XFER) {
++ iic_outb(adap, iic->cntl, (0x0b | ((remainder-1) << 4)));
++ }
++ else {
++ iic_outb(adap, iic->cntl, (0x03 | ((remainder-1) << 4)));
++ }
++ DEB2(printk(KERN_DEBUG "iic_readbytes: Wait for pin\n"));
++ timeout = wait_for_pin(adap, &status);
++ DEB2(printk(KERN_DEBUG "iic_readbytes: Got the interrupt\n"));
++ if(timeout < 0) {
++ // Error Handler
++ //printk(KERN_ERR "Error: read timed out\n");
++ return rdcount;
++ }
++
++ ret = analyze_status(adap, &error_code);
++ if(ret < 0) {
++ if(error_code == IIC_ERR_INCOMPLETE_XFR)
++ return rdcount;
++ else
++ return error_code;
++ }
++
++ //printk(KERN_DEBUG "iic_readbyte: Begin reading data buffer\n");
++ for(i=0; i<remainder; i++) {
++ buf[rdcount] = iic_inb(adap, iic->mdbuf);
++ // printk(KERN_DEBUG "iic_readbytes: Character read = %x\n", buf[rdcount]);
++ rdcount++;
++ }
++
++ return rdcount;
++}
++
++
++//
++// Description: This function implements combined transactions. Combined
++// transactions consist of combinations of reading and writing blocks of data.
++// Each transfer (i.e. a read or a write) is separated by a repeated start
++// condition.
++//
++static int iic_combined_transaction(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
++{
++ int i;
++ struct i2c_msg *pmsg;
++ int ret;
++
++ DEB2(printk(KERN_DEBUG "Beginning combined transaction\n"));
++ for(i=0; i < num; i++) {
++ pmsg = &msgs[i];
++ if(pmsg->flags & I2C_M_RD) {
++
++ // Last read or write segment needs to be terminated with a stop
++ if(i < num-1) {
++ DEB2(printk(KERN_DEBUG "This one is a read\n"));
++ }
++ else {
++ DEB2(printk(KERN_DEBUG "Doing the last read\n"));
++ }
++ ret = iic_readbytes(i2c_adap, pmsg->buf, pmsg->len, (i < num-1) ? IIC_COMBINED_XFER : IIC_SINGLE_XFER);
++
++ if (ret != pmsg->len) {
++ DEB2(printk("i2c-algo-ppc405.o: fail: "
++ "only read %d bytes.\n",ret));
++ return i;
++ }
++ else {
++ DEB2(printk("i2c-algo-ppc405.o: read %d bytes.\n",ret));
++ }
++ }
++ else if(!(pmsg->flags & I2C_M_RD)) {
++
++ // Last read or write segment needs to be terminated with a stop
++ if(i < num-1) {
++ DEB2(printk(KERN_DEBUG "This one is a write\n"));
++ }
++ else {
++ DEB2(printk(KERN_DEBUG "Doing the last write\n"));
++ }
++ ret = iic_sendbytes(i2c_adap, pmsg->buf, pmsg->len, (i < num-1) ? IIC_COMBINED_XFER : IIC_SINGLE_XFER);
++
++ if (ret != pmsg->len) {
++ DEB2(printk("i2c-algo-ppc405.o: fail: "
++ "only wrote %d bytes.\n",ret));
++ return i;
++ }
++ else {
++ DEB2(printk("i2c-algo-ppc405.o: wrote %d bytes.\n",ret));
++ }
++ }
++ }
++
++ return num;
++}
++
++
++//
++// Description: Whenever we initiate a transaction, the first byte clocked
++// onto the bus after the start condition is the address (7 bit) of the
++// device we want to talk to. This function manipulates the address specified
++// so that it makes sense to the hardware when written to the IIC peripheral.
++//
++// Note: 10 bit addresses are not supported in this driver, although they are
++// supported by the hardware. This functionality needs to be implemented.
++//
++static inline int iic_doAddress(struct i2c_algo_iic_data *adap,
++ struct i2c_msg *msg, int retries)
++{
++ struct iic_regs *iic;
++ unsigned short flags = msg->flags;
++ unsigned char addr;
++ struct iic_ibm *adap_priv_data = adap->data;
++ iic = (struct iic_regs *) adap_priv_data->iic_base;
++
++//
++// The following segment for 10 bit addresses needs to be ported
++//
++/* Ten bit addresses not supported right now
++ if ( (flags & I2C_M_TEN) ) {
++ // a ten bit address
++ addr = 0xf0 | (( msg->addr >> 7) & 0x03);
++ DEB2(printk(KERN_DEBUG "addr0: %d\n",addr));
++ // try extended address code...
++ ret = try_address(adap, addr, retries);
++ if (ret!=1) {
++ printk(KERN_ERR "iic_doAddress: died at extended address code.\n");
++ return -EREMOTEIO;
++ }
++ // the remaining 8 bit address
++ iic_outb(adap,msg->addr & 0x7f);
++ // Status check comes here
++ if (ret != 1) {
++ printk(KERN_ERR "iic_doAddress: died at 2nd address code.\n");
++ return -EREMOTEIO;
++ }
++ if ( flags & I2C_M_RD ) {
++ i2c_repstart(adap);
++ // okay, now switch into reading mode
++ addr |= 0x01;
++ ret = try_address(adap, addr, retries);
++ if (ret!=1) {
++ printk(KERN_ERR "iic_doAddress: died at extended address code.\n");
++ return -EREMOTEIO;
++ }
++ }
++ } else ----------> // normal 7 bit address
++
++Ten bit addresses not supported yet */
++
++ addr = ( msg->addr << 1 );
++ if (flags & I2C_M_RD )
++ addr |= 1;
++ if (flags & I2C_M_REV_DIR_ADDR )
++ addr ^= 1;
++ //
++ // Write to the low slave address
++ //
++ iic_outb(adap, iic->lmadr, addr);
++ //
++ // Write zero to the high slave register since we are
++ // only using 7 bit addresses
++ //
++ iic_outb(adap, iic->hmadr, 0);
++
++ return 0;
++}
++
++
++//
++// Description: Prepares the controller for a transaction (clearing status
++// registers, data buffers, etc), and then calls either iic_readbytes or
++// iic_sendbytes to do the actual transaction.
++//
++static int iic_xfer(struct i2c_adapter *i2c_adap,
++ struct i2c_msg msgs[],
++ int num)
++{
++ struct iic_regs *iic;
++ struct i2c_algo_iic_data *adap = i2c_adap->algo_data;
++ struct iic_ibm *adap_priv_data = adap->data;
++ struct i2c_msg *pmsg;
++ int i = 0;
++ int ret;
++ iic = (struct iic_regs *) adap_priv_data->iic_base;
++
++ pmsg = &msgs[i];
++
++ //
++ // Clear status register
++ //
++ DEB2(printk(KERN_DEBUG "iic_xfer: iic_xfer: Clearing status register\n"));
++ iic_outb(adap, iic->sts, 0x0a);
++
++ //
++ // Wait for any pending transfers to complete
++ //
++ DEB2(printk(KERN_DEBUG "iic_xfer: Waiting for any pending transfers to complete\n"));
++ while((ret = iic_inb(adap, iic->sts)) == 0x01) {
++ ;
++ }
++
++ //
++ // Flush master data buf
++ //
++ DEB2(printk(KERN_DEBUG "iic_xfer: Clearing master data buffer\n"));
++ ret = iic_inb(adap, iic->mdcntl);
++ iic_outb(adap, iic->mdcntl, ret | 0x40);
++
++ //
++ // Load slave address
++ //
++ DEB2(printk(KERN_DEBUG "iic_xfer: Loading slave address\n"));
++ ret = iic_doAddress(adap, pmsg, i2c_adap->retries);
++
++ //
++ // Check to see if the bus is busy
++ //
++ ret = iic_inb(adap, iic->extsts);
++ // Mask off the irrelevent bits
++ ret = ret & 0x70;
++ // When the bus is free, the BCS bits in the EXTSTS register are 0b100
++ if(ret != 0x40) return IIC_ERR_LOST_ARB;
++
++ //
++ // Combined transaction (read and write)
++ //
++ if(num > 1) {
++ DEB2(printk(KERN_DEBUG "iic_xfer: Call combined transaction\n"));
++ ret = iic_combined_transaction(i2c_adap, msgs, num);
++ }
++ //
++ // Read only
++ //
++ else if((num == 1) && (pmsg->flags & I2C_M_RD)) {
++ //
++ // Tell device to begin reading data from the master data
++ //
++ DEB2(printk(KERN_DEBUG "iic_xfer: Call adapter's read\n"));
++ ret = iic_readbytes(i2c_adap, pmsg->buf, pmsg->len, IIC_SINGLE_XFER);
++ }
++ //
++ // Write only
++ //
++ else if((num == 1 ) && (!(pmsg->flags & I2C_M_RD))) {
++ //
++ // Write data to master data buffers and tell our device
++ // to begin transmitting
++ //
++ DEB2(printk(KERN_DEBUG "iic_xfer: Call adapter's write\n"));
++ ret = iic_sendbytes(i2c_adap, pmsg->buf, pmsg->len, IIC_SINGLE_XFER);
++ }
++
++ return ret;
++}
++
++
++//
++// Description: Implements device specific ioctls. Higher level ioctls can
++// be found in i2c-core.c and are typical of any i2c controller (specifying
++// slave address, timeouts, etc). These ioctls take advantage of any hardware
++// features built into the controller for which this algorithm-adapter set
++// was written. These ioctls allow you to take control of the data and clock
++// lines on the IBM PPC 405 IIC controller and set the either high or low,
++// similar to a GPIO pin.
++//
++static int algo_control(struct i2c_adapter *adapter,
++ unsigned int cmd, unsigned long arg)
++{
++ struct iic_regs *iic;
++ struct i2c_algo_iic_data *adap = adapter->algo_data;
++ struct iic_ibm *adap_priv_data = adap->data;
++ int ret=0;
++ int lines;
++ iic = (struct iic_regs *) adap_priv_data->iic_base;
++
++ lines = iic_inb(adap, iic->directcntl);
++
++ if (cmd == IICO_I2C_SDAHIGH) {
++ lines = lines & 0x01;
++ if( lines ) lines = 0x04;
++ else lines = 0;
++ iic_outb(adap, iic->directcntl,(0x08|lines));
++ }
++ else if (cmd == IICO_I2C_SDALOW) {
++ lines = lines & 0x01;
++ if( lines ) lines = 0x04;
++ else lines = 0;
++ iic_outb(adap, iic->directcntl,(0x00|lines));
++ }
++ else if (cmd == IICO_I2C_SCLHIGH) {
++ lines = lines & 0x02;
++ if( lines ) lines = 0x08;
++ else lines = 0;
++ iic_outb(adap, iic->directcntl,(0x04|lines));
++ }
++ else if (cmd == IICO_I2C_SCLLOW) {
++ lines = lines & 0x02;
++ if( lines ) lines = 0x08;
++ else lines = 0;
++ iic_outb(adap, iic->directcntl,(0x00|lines));
++ }
++ else if (cmd == IICO_I2C_LINEREAD) {
++ ret = lines;
++ }
++ return ret;
++}
++
++
++static u32 iic_func(struct i2c_adapter *adap)
++{
++ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
++ I2C_FUNC_PROTOCOL_MANGLING;
++}
++
++
++/* -----exported algorithm data: ------------------------------------- */
++
++static struct i2c_algorithm iic_algo = {
++ .owner = THIS_MODULE,
++ .name = "IBM on-chip IIC algorithm",
++ .id = I2C_ALGO_OCP,
++ .master_xfer = iic_xfer,
++ .algo_control = algo_control,
++ .functionality = iic_func,
++};
++
++/*
++ * registering functions to load algorithms at runtime
++ */
++
++
++//
++// Description: Register bus structure
++//
++int i2c_ocp_add_bus(struct i2c_adapter *adap)
++{
++ struct i2c_algo_iic_data *iic_adap = adap->algo_data;
++
++ DEB2(printk(KERN_DEBUG "i2c-algo-iic.o: hw routines for %s registered.\n",
++ adap->name));
++
++ /* register new adapter to i2c module... */
++
++ adap->id |= iic_algo.id;
++ adap->algo = &iic_algo;
++
++ adap->timeout = 100; /* default values, should */
++ adap->retries = 3; /* be replaced by defines */
++
++ iic_init(iic_adap);
++ i2c_add_adapter(adap);
++ return 0;
++}
++
++
++//
++// Done
++//
++int i2c_ocp_del_bus(struct i2c_adapter *adap)
++{
++ return i2c_del_adapter(adap);
++}
++
++
++EXPORT_SYMBOL(i2c_ocp_add_bus);
++EXPORT_SYMBOL(i2c_ocp_del_bus);
++
++//
++// The MODULE_* macros resolve to nothing if MODULES is not defined
++// when this file is compiled.
++//
++MODULE_AUTHOR("MontaVista Software <www.mvista.com>");
++MODULE_DESCRIPTION("PPC 405 iic algorithm");
++MODULE_LICENSE("GPL");
++
++MODULE_PARM(i2c_debug,"i");
++
++MODULE_PARM_DESC(i2c_debug,
++ "debug level - 0 off; 1 normal; 2,3 more verbose; 9 iic-protocol");
++
+--- linux-old/include/linux/i2c-algo-ibm_ocp.h Sun Aug 31 14:51:52 CEST 2003
++++ linux/include/linux/i2c-algo-ibm_ocp.h Sun Aug 31 14:51:52 CEST 2003
+@@ -0,0 +1,52 @@
++/* ------------------------------------------------------------------------- */
++/* i2c-algo-ibm_ocp.h i2c driver algorithms for IBM PPC 405 IIC adapters */
++/* ------------------------------------------------------------------------- */
++/* Copyright (C) 1995-97 Simon G. Vogl
++ 1998-99 Hans Berglund
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
++/* ------------------------------------------------------------------------- */
++
++/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
++ Frodo Looijaard <frodol@dds.nl> */
++
++/* Modifications by MontaVista Software, August 2000
++ Changes made to support the IIC peripheral on the IBM PPC 405 */
++
++#ifndef _LINUX_I2C_ALGO_IBM_OCP_H
++#define _LINUX_I2C_ALGO_IBM_OCP_H
++
++struct i2c_algo_iic_data {
++ struct iic_regs *data; /* private data for lolevel routines */
++ void (*setiic) (void *data, int ctl, int val);
++ int (*getiic) (void *data, int ctl);
++ int (*getown) (void *data);
++ int (*getclock) (void *data);
++ void (*waitforpin) (void *data);
++
++ /* local settings */
++ int udelay;
++ int mdelay;
++ int timeout;
++};
++
++
++#define I2C_IIC_ADAP_MAX 16
++
++
++int i2c_ocp_add_bus(struct i2c_adapter *);
++int i2c_ocp_del_bus(struct i2c_adapter *);
++
++#endif /* _LINUX_I2C_ALGO_IBM_OCP_H */
+--- linux-old/drivers/i2c/i2c-algo-pcf.c Sun Aug 31 14:51:53 CEST 2003
++++ linux/drivers/i2c/i2c-algo-pcf.c Sun Aug 31 14:51:53 CEST 2003
+@@ -32,14 +32,9 @@
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+-#include <linux/version.h>
+ #include <linux/init.h>
+-#include <asm/uaccess.h>
+-#include <linux/ioport.h>
+ #include <linux/errno.h>
+-#include <linux/sched.h>
+-
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-pcf.h>
+-#include "i2c-pcf8584.h"
++
+
+ /* ----- global defines ----------------------------------------------- */
+@@ -54,5 +49,4 @@
+ */
+ static int i2c_debug=0;
+-static int pcf_scan=0; /* have a look at what's hanging 'round */
+
+ /* --- setting states on the bus with the right timing: --------------- */
+@@ -100,5 +94,5 @@
+ #endif
+ if (timeout <= 0) {
+- printk("Timeout waiting for Bus Busy\n");
++ printk(KERN_ERR "Timeout waiting for Bus Busy\n");
+ }
+
+@@ -145,5 +139,5 @@
+ unsigned char temp;
+
+- DEB3(printk("i2c-algo-pcf.o: PCF state 0x%02x\n", get_pcf(adap, 1)));
++ DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: PCF state 0x%02x\n", get_pcf(adap, 1)));
+
+ /* S1=0x80: S0 selected, serial interface off */
+@@ -151,7 +145,6 @@
+ /* check to see S1 now used as R/W ctrl -
+ PCF8584 does that when ESO is zero */
+- /* PCF also resets PIN bit */
+- if ((temp = get_pcf(adap, 1)) != (0)) {
+- DEB2(printk("i2c-algo-pcf.o: PCF detection failed -- can't select S0 (0x%02x).\n", temp));
++ if (((temp = get_pcf(adap, 1)) & 0x7f) != (0)) {
++ DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S0 (0x%02x).\n", temp));
+ return -ENXIO; /* definetly not PCF8584 */
+ }
+@@ -161,5 +154,5 @@
+ /* check it's realy writen */
+ if ((temp = i2c_inb(adap)) != get_own(adap)) {
+- DEB2(printk("i2c-algo-pcf.o: PCF detection failed -- can't set S0 (0x%02x).\n", temp));
++ DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S0 (0x%02x).\n", temp));
+ return -ENXIO;
+ }
+@@ -168,6 +161,6 @@
+ set_pcf(adap, 1, I2C_PCF_PIN | I2C_PCF_ES1);
+ /* check to see S2 now selected */
+- if ((temp = get_pcf(adap, 1)) != I2C_PCF_ES1) {
+- DEB2(printk("i2c-algo-pcf.o: PCF detection failed -- can't select S2 (0x%02x).\n", temp));
++ if (((temp = get_pcf(adap, 1)) & 0x7f) != I2C_PCF_ES1) {
++ DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S2 (0x%02x).\n", temp));
+ return -ENXIO;
+ }
+@@ -177,5 +170,5 @@
+ /* check it's realy writen, the only 5 lowest bits does matter */
+ if (((temp = i2c_inb(adap)) & 0x1f) != get_clock(adap)) {
+- DEB2(printk("i2c-algo-pcf.o: PCF detection failed -- can't set S2 (0x%02x).\n", temp));
++ DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S2 (0x%02x).\n", temp));
+ return -ENXIO;
+ }
+@@ -186,9 +179,9 @@
+ /* check to see PCF is realy idled and we can access status register */
+ if ((temp = get_pcf(adap, 1)) != (I2C_PCF_PIN | I2C_PCF_BB)) {
+- DEB2(printk("i2c-algo-pcf.o: PCF detection failed -- can't select S1` (0x%02x).\n", temp));
++ DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S1` (0x%02x).\n", temp));
+ return -ENXIO;
+ }
+
+- printk("i2c-algo-pcf.o: deteted and initialized PCF8584.\n");
++ printk(KERN_DEBUG "i2c-algo-pcf.o: deteted and initialized PCF8584.\n");
+
+ return 0;
+@@ -216,5 +209,5 @@
+ udelay(adap->udelay);
+ }
+- DEB2(if (i) printk("i2c-algo-pcf.o: needed %d retries for %d\n",i,
++ DEB2(if (i) printk(KERN_DEBUG "i2c-algo-pcf.o: needed %d retries for %d\n",i,
+ addr));
+ return ret;
+@@ -229,5 +222,5 @@
+
+ for (wrcount=0; wrcount<count; ++wrcount) {
+- DEB2(printk("i2c-algo-pcf.o: %s i2c_write: writing %2.2X\n",
++ DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: %s i2c_write: writing %2.2X\n",
+ i2c_adap->name, buf[wrcount]&0xff));
+ i2c_outb(adap, buf[wrcount]);
+@@ -235,5 +228,5 @@
+ if (timeout) {
+ i2c_stop(adap);
+- printk("i2c-algo-pcf.o: %s i2c_write: "
++ printk(KERN_ERR "i2c-algo-pcf.o: %s i2c_write: "
+ "error - timeout.\n", i2c_adap->name);
+ return -EREMOTEIO; /* got a better one ?? */
+@@ -242,5 +235,5 @@
+ if (status & I2C_PCF_LRB) {
+ i2c_stop(adap);
+- printk("i2c-algo-pcf.o: %s i2c_write: "
++ printk(KERN_ERR "i2c-algo-pcf.o: %s i2c_write: "
+ "error - no ack.\n", i2c_adap->name);
+ return -EREMOTEIO; /* got a better one ?? */
+@@ -270,5 +263,5 @@
+ if (wait_for_pin(adap, &status)) {
+ i2c_stop(adap);
+- printk("i2c-algo-pcf.o: pcf_readbytes timed out.\n");
++ printk(KERN_ERR "i2c-algo-pcf.o: pcf_readbytes timed out.\n");
+ return (-1);
+ }
+@@ -277,5 +270,5 @@
+ if ((status & I2C_PCF_LRB) && (i != count)) {
+ i2c_stop(adap);
+- printk("i2c-algo-pcf.o: i2c_read: i2c_inb, No ack.\n");
++ printk(KERN_ERR "i2c-algo-pcf.o: i2c_read: i2c_inb, No ack.\n");
+ return (-1);
+ }
+@@ -313,9 +306,9 @@
+ /* a ten bit address */
+ addr = 0xf0 | (( msg->addr >> 7) & 0x03);
+- DEB2(printk("addr0: %d\n",addr));
++ DEB2(printk(KERN_DEBUG "addr0: %d\n",addr));
+ /* try extended address code...*/
+ ret = try_address(adap, addr, retries);
+ if (ret!=1) {
+- printk("died at extended address code.\n");
++ printk(KERN_ERR "died at extended address code.\n");
+ return -EREMOTEIO;
+ }
+@@ -324,5 +317,5 @@
+ /* Status check comes here */
+ if (ret != 1) {
+- printk("died at 2nd address code.\n");
++ printk(KERN_ERR "died at 2nd address code.\n");
+ return -EREMOTEIO;
+ }
+@@ -333,5 +326,5 @@
+ ret = try_address(adap, addr, retries);
+ if (ret!=1) {
+- printk("died at extended address code.\n");
++ printk(KERN_ERR "died at extended address code.\n");
+ return -EREMOTEIO;
+ }
+@@ -361,5 +354,5 @@
+ timeout = wait_for_bb(adap);
+ if (timeout) {
+- DEB2(printk("i2c-algo-pcf.o: "
++ DEB2(printk(KERN_ERR "i2c-algo-pcf.o: "
+ "Timeout waiting for BB in pcf_xfer\n");)
+ return -EIO;
+@@ -369,5 +362,5 @@
+ pmsg = &msgs[i];
+
+- DEB2(printk("i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n",
++ DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n",
+ pmsg->flags & I2C_M_RD ? "read" : "write",
+ pmsg->len, pmsg->addr, i + 1, num);)
+@@ -384,5 +377,5 @@
+ if (timeout) {
+ i2c_stop(adap);
+- DEB2(printk("i2c-algo-pcf.o: Timeout waiting "
++ DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting "
+ "for PIN(1) in pcf_xfer\n");)
+ return (-EREMOTEIO);
+@@ -393,10 +386,10 @@
+ if (status & I2C_PCF_LRB) {
+ i2c_stop(adap);
+- DEB2(printk("i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");)
++ DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");)
+ return (-EREMOTEIO);
+ }
+ #endif
+
+- DEB3(printk("i2c-algo-pcf.o: Msg %d, addr=0x%x, flags=0x%x, len=%d\n",
++ DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: Msg %d, addr=0x%x, flags=0x%x, len=%d\n",
+ i, msgs[i].addr, msgs[i].flags, msgs[i].len);)
+
+@@ -408,8 +401,8 @@
+
+ if (ret != pmsg->len) {
+- DEB2(printk("i2c-algo-pcf.o: fail: "
++ DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: "
+ "only read %d bytes.\n",ret));
+ } else {
+- DEB2(printk("i2c-algo-pcf.o: read %d bytes.\n",ret));
++ DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: read %d bytes.\n",ret));
+ }
+ } else { /* Write */
+@@ -418,8 +411,8 @@
+
+ if (ret != pmsg->len) {
+- DEB2(printk("i2c-algo-pcf.o: fail: "
++ DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: "
+ "only wrote %d bytes.\n",ret));
+ } else {
+- DEB2(printk("i2c-algo-pcf.o: wrote %d bytes.\n",ret));
++ DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: wrote %d bytes.\n",ret));
+ }
+ }
+@@ -429,10 +422,4 @@
+ }
+
+-static int algo_control(struct i2c_adapter *adapter,
+- unsigned int cmd, unsigned long arg)
+-{
+- return 0;
+-}
+-
+ static u32 pcf_func(struct i2c_adapter *adap)
+ {
+@@ -444,12 +431,9 @@
+
+ static struct i2c_algorithm pcf_algo = {
+- "PCF8584 algorithm",
+- I2C_ALGO_PCF,
+- pcf_xfer,
+- NULL,
+- NULL, /* slave_xmit */
+- NULL, /* slave_recv */
+- algo_control, /* ioctl */
+- pcf_func, /* functionality */
++ .owner = THIS_MODULE,
++ .name = "PCF8584 algorithm",
++ .id = I2C_ALGO_PCF,
++ .master_xfer = pcf_xfer,
++ .functionality = pcf_func,
+ };
+
+@@ -459,8 +443,8 @@
+ int i2c_pcf_add_bus(struct i2c_adapter *adap)
+ {
+- int i, status;
++ int i;
+ struct i2c_algo_pcf_data *pcf_adap = adap->algo_data;
+
+- DEB2(printk("i2c-algo-pcf.o: hw routines for %s registered.\n",
++ DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: hw routines for %s registered.\n",
+ adap->name));
+
+@@ -477,33 +461,5 @@
+ }
+
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+-
+ i2c_add_adapter(adap);
+-
+- /* scan bus */
+- if (pcf_scan) {
+- printk(KERN_INFO " i2c-algo-pcf.o: scanning bus %s.\n",
+- adap->name);
+- for (i = 0x00; i < 0xff; i+=2) {
+- if (wait_for_bb(pcf_adap)) {
+- printk(KERN_INFO " i2c-algo-pcf.o: scanning bus %s - TIMEOUTed.\n",
+- adap->name);
+- break;
+- }
+- i2c_outb(pcf_adap, i);
+- i2c_start(pcf_adap);
+- if ((wait_for_pin(pcf_adap, &status) >= 0) &&
+- ((status & I2C_PCF_LRB) == 0)) {
+- printk("(%02x)",i>>1);
+- } else {
+- printk(".");
+- }
+- i2c_stop(pcf_adap);
+- udelay(pcf_adap->udelay);
+- }
+- printk("\n");
+- }
+ return 0;
+ }
+@@ -512,45 +468,15 @@
+ int i2c_pcf_del_bus(struct i2c_adapter *adap)
+ {
+- int res;
+- if ((res = i2c_del_adapter(adap)) < 0)
+- return res;
+- DEB2(printk("i2c-algo-pcf.o: adapter unregistered: %s\n",adap->name));
+-
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+- return 0;
+-}
+-
+-int __init i2c_algo_pcf_init (void)
+-{
+- printk("i2c-algo-pcf.o: i2c pcf8584 algorithm module\n");
+- return 0;
++ return i2c_del_adapter(adap);
+ }
+
+-
+ EXPORT_SYMBOL(i2c_pcf_add_bus);
+ EXPORT_SYMBOL(i2c_pcf_del_bus);
+
+-#ifdef MODULE
+ MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>");
+ MODULE_DESCRIPTION("I2C-Bus PCF8584 algorithm");
+ MODULE_LICENSE("GPL");
+
+-MODULE_PARM(pcf_scan, "i");
+ MODULE_PARM(i2c_debug,"i");
+-
+-MODULE_PARM_DESC(pcf_scan, "Scan for active chips on the bus");
+ MODULE_PARM_DESC(i2c_debug,
+ "debug level - 0 off; 1 normal; 2,3 more verbose; 9 pcf-protocol");
+-
+-
+-int init_module(void)
+-{
+- return i2c_algo_pcf_init();
+-}
+-
+-void cleanup_module(void)
+-{
+-}
+-#endif
+--- linux-old/include/linux/i2c-algo-pcf.h Sun Aug 31 14:51:53 CEST 2003
++++ linux/include/linux/i2c-algo-pcf.h Sun Aug 31 14:51:53 CEST 2003
+@@ -23,11 +23,10 @@
+ Frodo Looijaard <frodol@dds.nl> */
+
+-/* $Id$ */
++/* $Id$ */
+
+-#ifndef I2C_ALGO_PCF_H
+-#define I2C_ALGO_PCF_H 1
++#ifndef _LINUX_I2C_ALGO_PCF_H
++#define _LINUX_I2C_ALGO_PCF_H
+
+-/* --- Defines for pcf-adapters --------------------------------------- */
+-#include <linux/i2c.h>
++#include <linux/i2c-pcf8584.h>
+
+ struct i2c_algo_pcf_data {
+@@ -50,3 +49,3 @@
+ int i2c_pcf_del_bus(struct i2c_adapter *);
+
+-#endif /* I2C_ALGO_PCF_H */
++#endif /* _LINUX_I2C_ALGO_PCF_H */
+--- linux-old/drivers/i2c/i2c-algo-sibyte.c Sun Aug 31 14:51:53 CEST 2003
++++ linux/drivers/i2c/i2c-algo-sibyte.c Sun Aug 31 14:51:53 CEST 2003
+@@ -1,231 +0,0 @@
+-/* ------------------------------------------------------------------------- */
+-/* i2c-algo-sibyte.c i2c driver algorithms for bit-shift adapters */
+-/* ------------------------------------------------------------------------- */
+-/* Copyright (C) 2001,2002,2003 Broadcom Corporation
+- Copyright (C) 1995-2000 Simon G. Vogl
+-
+- This program is free software; you can redistribute it and/or modify
+- it under the terms of the GNU General Public License as published by
+- the Free Software Foundation; either version 2 of the License, or
+- (at your option) any later version.
+-
+- This program is distributed in the hope that it will be useful,
+- but WITHOUT ANY WARRANTY; without even the implied warranty of
+- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- GNU General Public License for more details.
+-
+- You should have received a copy of the GNU General Public License
+- along with this program; if not, write to the Free Software
+- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+-/* ------------------------------------------------------------------------- */
+-
+-/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
+- Frodo Looijaard <frodol@dds.nl>. */
+-
+-/* Ported for SiByte SOCs by Broadcom Corporation. */
+-
+-#include <linux/config.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/init.h>
+-
+-#include <asm/sibyte/64bit.h>
+-#include <asm/sibyte/sb1250_regs.h>
+-#include <asm/sibyte/sb1250_smbus.h>
+-
+-#include <linux/i2c.h>
+-#include <linux/i2c-algo-sibyte.h>
+-
+-/* ----- global defines ----------------------------------------------- */
+-#define SMB_CSR(a,r) ((long)(a->reg_base + r))
+-
+-/* ----- global variables --------------------------------------------- */
+-
+-/* module parameters:
+- */
+-static int bit_scan=0; /* have a look at what's hanging 'round */
+-
+-
+-static int smbus_xfer(struct i2c_adapter *i2c_adap, u16 addr,
+- unsigned short flags, char read_write,
+- u8 command, int size, union i2c_smbus_data * data)
+-{
+- struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data;
+- int data_bytes = 0;
+- int error;
+-
+- while (csr_in32(SMB_CSR(adap, R_SMB_STATUS)) & M_SMB_BUSY)
+- ;
+-
+- switch (size) {
+- case I2C_SMBUS_QUICK:
+- csr_out32((V_SMB_ADDR(addr) | (read_write == I2C_SMBUS_READ ? M_SMB_QDATA : 0) |
+- V_SMB_TT_QUICKCMD), SMB_CSR(adap, R_SMB_START));
+- break;
+- case I2C_SMBUS_BYTE:
+- if (read_write == I2C_SMBUS_READ) {
+- csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_RD1BYTE),
+- SMB_CSR(adap, R_SMB_START));
+- data_bytes = 1;
+- } else {
+- csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD));
+- csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR1BYTE),
+- SMB_CSR(adap, R_SMB_START));
+- }
+- break;
+- case I2C_SMBUS_BYTE_DATA:
+- csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD));
+- if (read_write == I2C_SMBUS_READ) {
+- csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_CMD_RD1BYTE),
+- SMB_CSR(adap, R_SMB_START));
+- data_bytes = 1;
+- } else {
+- csr_out32(V_SMB_LB(data->byte), SMB_CSR(adap, R_SMB_DATA));
+- csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR2BYTE),
+- SMB_CSR(adap, R_SMB_START));
+- }
+- break;
+- case I2C_SMBUS_WORD_DATA:
+- csr_out32(V_SMB_CMD(command), SMB_CSR(adap, R_SMB_CMD));
+- if (read_write == I2C_SMBUS_READ) {
+- csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_CMD_RD2BYTE),
+- SMB_CSR(adap, R_SMB_START));
+- data_bytes = 2;
+- } else {
+- csr_out32(V_SMB_LB(data->word & 0xff), SMB_CSR(adap, R_SMB_DATA));
+- csr_out32(V_SMB_MB(data->word >> 8), SMB_CSR(adap, R_SMB_DATA));
+- csr_out32((V_SMB_ADDR(addr) | V_SMB_TT_WR2BYTE),
+- SMB_CSR(adap, R_SMB_START));
+- }
+- break;
+- default:
+- return -1; /* XXXKW better error code? */
+- }
+-
+- while (csr_in32(SMB_CSR(adap, R_SMB_STATUS)) & M_SMB_BUSY)
+- ;
+-
+- error = csr_in32(SMB_CSR(adap, R_SMB_STATUS));
+- if (error & M_SMB_ERROR) {
+- /* Clear error bit by writing a 1 */
+- csr_out32(M_SMB_ERROR, SMB_CSR(adap, R_SMB_STATUS));
+- return -1; /* XXXKW better error code? */
+- }
+-
+- if (data_bytes == 1)
+- data->byte = csr_in32(SMB_CSR(adap, R_SMB_DATA)) & 0xff;
+- if (data_bytes == 2)
+- data->word = csr_in32(SMB_CSR(adap, R_SMB_DATA)) & 0xffff;
+-
+- return 0;
+-}
+-
+-static int algo_control(struct i2c_adapter *adapter,
+- unsigned int cmd, unsigned long arg)
+-{
+- return 0;
+-}
+-
+-static u32 bit_func(struct i2c_adapter *adap)
+-{
+- return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA);
+-}
+-
+-
+-/* -----exported algorithm data: ------------------------------------- */
+-
+-static struct i2c_algorithm i2c_sibyte_algo = {
+- "SiByte algorithm",
+- I2C_ALGO_SIBYTE,
+- NULL, /* master_xfer */
+- smbus_xfer, /* smbus_xfer */
+- NULL, /* slave_xmit */
+- NULL, /* slave_recv */
+- algo_control, /* ioctl */
+- bit_func, /* functionality */
+-};
+-
+-/*
+- * registering functions to load algorithms at runtime
+- */
+-int i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
+-{
+- int i;
+- struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data;
+-
+- /* register new adapter to i2c module... */
+-
+- i2c_adap->id |= i2c_sibyte_algo.id;
+- i2c_adap->algo = &i2c_sibyte_algo;
+-
+- /* Set the frequency to 100 kHz */
+- csr_out32(speed, SMB_CSR(adap,R_SMB_FREQ));
+- csr_out32(0, SMB_CSR(adap,R_SMB_CONTROL));
+-
+- /* scan bus */
+- if (bit_scan) {
+- union i2c_smbus_data data;
+- int rc;
+- printk(KERN_INFO " i2c-algo-sibyte.o: scanning bus %s.\n",
+- i2c_adap->name);
+- for (i = 0x00; i < 0x7f; i++) {
+- /* XXXKW is this a realistic probe? */
+- rc = smbus_xfer(i2c_adap, i, 0, I2C_SMBUS_READ, 0,
+- I2C_SMBUS_BYTE_DATA, &data);
+- if (!rc) {
+- printk("(%02x)",i);
+- } else
+- printk(".");
+- }
+- printk("\n");
+- }
+-
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+- i2c_add_adapter(i2c_adap);
+-
+- return 0;
+-}
+-
+-
+-int i2c_sibyte_del_bus(struct i2c_adapter *adap)
+-{
+- int res;
+-
+- if ((res = i2c_del_adapter(adap)) < 0)
+- return res;
+-
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+- return 0;
+-}
+-
+-int __init i2c_algo_sibyte_init (void)
+-{
+- printk("i2c-algo-sibyte.o: i2c SiByte algorithm module\n");
+- return 0;
+-}
+-
+-
+-EXPORT_SYMBOL(i2c_sibyte_add_bus);
+-EXPORT_SYMBOL(i2c_sibyte_del_bus);
+-
+-#ifdef MODULE
+-MODULE_AUTHOR("Kip Walker, Broadcom Corp.");
+-MODULE_DESCRIPTION("SiByte I2C-Bus algorithm");
+-MODULE_PARM(bit_scan, "i");
+-MODULE_PARM_DESC(bit_scan, "Scan for active chips on the bus");
+-MODULE_LICENSE("GPL");
+-
+-int init_module(void)
+-{
+- return i2c_algo_sibyte_init();
+-}
+-
+-void cleanup_module(void)
+-{
+-}
+-#endif
+--- linux-old/include/linux/i2c-algo-sibyte.h Sun Aug 31 14:51:53 CEST 2003
++++ linux/include/linux/i2c-algo-sibyte.h Sun Aug 31 14:51:53 CEST 2003
+@@ -1,33 +0,0 @@
+-/*
+- * Copyright (C) 2001,2002,2003 Broadcom Corporation
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License
+- * as published by the Free Software Foundation; either version 2
+- * of the License, or (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- */
+-
+-#ifndef I2C_ALGO_SIBYTE_H
+-#define I2C_ALGO_SIBYTE_H 1
+-
+-#include <linux/i2c.h>
+-
+-struct i2c_algo_sibyte_data {
+- void *data; /* private data */
+- int bus; /* which bus */
+- void *reg_base; /* CSR base */
+-};
+-
+-int i2c_sibyte_add_bus(struct i2c_adapter *, int speed);
+-int i2c_sibyte_del_bus(struct i2c_adapter *);
+-
+-#endif /* I2C_ALGO_SIBYTE_H */
+--- linux-old/drivers/i2c/i2c-core.c Sun Aug 31 14:51:54 CEST 2003
++++ linux/drivers/i2c/i2c-core.c Sun Aug 31 14:51:54 CEST 2003
+@@ -19,7 +19,8 @@
+
+ /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
+- All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl> */
++ All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
++ SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> */
+
+-/* $Id$ */
++/* i2c-core.c,v 1.91.2.2 2003/01/21 10:00:19 kmalkki Exp */
+
+ #include <linux/module.h>
+@@ -28,31 +29,10 @@
+ #include <linux/slab.h>
+ #include <linux/proc_fs.h>
+-#include <linux/config.h>
+-
+-#include <linux/i2c.h>
+-
+-/* ----- compatibility stuff ----------------------------------------------- */
+-
+-#include <linux/version.h>
+ #include <linux/init.h>
+-
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
+-#define init_MUTEX(s) do { *(s) = MUTEX; } while(0)
+-#endif
+-
++#include <linux/i2c.h>
+ #include <asm/uaccess.h>
+
+ /* ----- global defines ---------------------------------------------------- */
+
+-/* exclusive access to the bus */
+-#define I2C_LOCK(adap) down(&adap->lock)
+-#define I2C_UNLOCK(adap) up(&adap->lock)
+-
+-#define ADAP_LOCK() down(&adap_lock)
+-#define ADAP_UNLOCK() up(&adap_lock)
+-
+-#define DRV_LOCK() down(&driver_lock)
+-#define DRV_UNLOCK() up(&driver_lock)
+-
+ #define DEB(x) if (i2c_debug>=1) x;
+ #define DEB2(x) if (i2c_debug>=2) x;
+@@ -60,18 +40,10 @@
+ /* ----- global variables -------------------------------------------------- */
+
+-/**** lock for writing to global variables: the adapter & driver list */
+-struct semaphore adap_lock;
+-struct semaphore driver_lock;
+-
+-/**** adapter list */
++DECLARE_MUTEX(core_lists);
+ static struct i2c_adapter *adapters[I2C_ADAP_MAX];
+-static int adap_count;
+-
+-/**** drivers list */
+ static struct i2c_driver *drivers[I2C_DRIVER_MAX];
+-static int driver_count;
+
+ /**** debug level */
+-static int i2c_debug=1;
++static int i2c_debug;
+
+ /* ---------------------------------------------------
+@@ -81,12 +53,4 @@
+
+ #ifdef CONFIG_PROC_FS
+-
+-static int i2cproc_init(void);
+-static int i2cproc_cleanup(void);
+-
+-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,3,27))
+-static void monitor_bus_i2c(struct inode *inode, int fill);
+-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,58)) */
+-
+ static ssize_t i2cproc_bus_read(struct file * file, char * buf,size_t count,
+ loff_t *ppos);
+@@ -97,19 +61,9 @@
+ implementation of the read hook */
+ static struct file_operations i2cproc_operations = {
+- read: i2cproc_bus_read,
++ .read = i2cproc_bus_read,
+ };
+
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48))
+-static struct inode_operations i2cproc_inode_operations = {
+- &i2cproc_operations
+-};
+-#endif
+-
+-static int i2cproc_initialized = 0;
+-
+-#else /* undef CONFIG_PROC_FS */
+-
+-#define i2cproc_init() 0
+-#define i2cproc_cleanup() 0
++static int i2cproc_register(struct i2c_adapter *adap, int bus);
++static void i2cproc_remove(int bus);
+
+ #endif /* CONFIG_PROC_FS */
+@@ -128,7 +82,7 @@
+ int i2c_add_adapter(struct i2c_adapter *adap)
+ {
+- int i,j,res;
++ int i,j,res = 0;
+
+- ADAP_LOCK();
++ down(&core_lists);
+ for (i = 0; i < I2C_ADAP_MAX; i++)
+ if (NULL == adapters[i])
+@@ -141,45 +95,18 @@
+ goto ERROR0;
+ }
++
++#ifdef CONFIG_PROC_FS
++ res = i2cproc_register(adap, i);
++ if (res<0)
++ goto ERROR0;
++#endif /* def CONFIG_PROC_FS */
+
+ adapters[i] = adap;
+- adap_count++;
+- ADAP_UNLOCK();
+
+ /* init data types */
+- init_MUTEX(&adap->lock);
+-
+-#ifdef CONFIG_PROC_FS
+-
+- if (i2cproc_initialized) {
+- char name[8];
+- struct proc_dir_entry *proc_entry;
+-
+- sprintf(name,"i2c-%d", i);
+-
+- proc_entry = create_proc_entry(name,0,proc_bus);
+- if (! proc_entry) {
+- printk("i2c-core.o: Could not create /proc/bus/%s\n",
+- name);
+- res = -ENOENT;
+- goto ERROR1;
+- }
+-
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,48))
+- proc_entry->proc_fops = &i2cproc_operations;
+-#else
+- proc_entry->ops = &i2cproc_inode_operations;
+-#endif
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,27))
+- proc_entry->owner = THIS_MODULE;
+-#else
+- proc_entry->fill_inode = &monitor_bus_i2c;
+-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,58)) */
+- adap->inode = proc_entry->low_ino;
+- }
+-
+-#endif /* def CONFIG_PROC_FS */
++ init_MUTEX(&adap->bus);
++ init_MUTEX(&adap->list);
+
+ /* inform drivers of new adapters */
+- DRV_LOCK();
+ for (j=0;j<I2C_DRIVER_MAX;j++)
+ if (drivers[j]!=NULL &&
+@@ -187,18 +114,9 @@
+ /* We ignore the return code; if it fails, too bad */
+ drivers[j]->attach_adapter(adap);
+- DRV_UNLOCK();
+
+- DEB(printk("i2c-core.o: adapter %s registered as adapter %d.\n",
++ DEB(printk(KERN_DEBUG "i2c-core.o: adapter %s registered as adapter %d.\n",
+ adap->name,i));
+-
+- return 0;
+-
+-
+-ERROR1:
+- ADAP_LOCK();
+- adapters[i] = NULL;
+- adap_count--;
+-ERROR0:
+- ADAP_UNLOCK();
++ ERROR0:
++ up(&core_lists);
+ return res;
+ }
+@@ -207,13 +125,12 @@
+ int i2c_del_adapter(struct i2c_adapter *adap)
+ {
+- int i,j,res;
+-
+- ADAP_LOCK();
++ int i,j, res=0;
+
++ down(&core_lists);
+ for (i = 0; i < I2C_ADAP_MAX; i++)
+ if (adap == adapters[i])
+ break;
+ if (I2C_ADAP_MAX == i) {
+- printk( "i2c-core.o: unregister_adapter adap [%s] not found.\n",
++ printk( KERN_WARNING "i2c-core.o: unregister_adapter adap [%s] not found.\n",
+ adap->name);
+ res = -ENODEV;
+@@ -226,15 +143,12 @@
+ * this or hell will break loose...
+ */
+- DRV_LOCK();
+ for (j = 0; j < I2C_DRIVER_MAX; j++)
+ if (drivers[j] && (drivers[j]->flags & I2C_DF_DUMMY))
+ if ((res = drivers[j]->attach_adapter(adap))) {
+- printk("i2c-core.o: can't detach adapter %s "
++ printk(KERN_WARNING "i2c-core.o: can't detach adapter %s "
+ "while detaching driver %s: driver not "
+ "detached!",adap->name,drivers[j]->name);
+- goto ERROR1;
++ goto ERROR0;
+ }
+- DRV_UNLOCK();
+-
+
+ /* detach any active clients. This must be done first, because
+@@ -248,5 +162,5 @@
+ */
+ if ((res=client->driver->detach_client(client))) {
+- printk("i2c-core.o: adapter %s not "
++ printk(KERN_ERR "i2c-core.o: adapter %s not "
+ "unregistered, because client at "
+ "address %02x can't be detached. ",
+@@ -255,24 +169,13 @@
+ }
+ }
++
+ #ifdef CONFIG_PROC_FS
+- if (i2cproc_initialized) {
+- char name[8];
+- sprintf(name,"i2c-%d", i);
+- remove_proc_entry(name,proc_bus);
+- }
++ i2cproc_remove(i);
+ #endif /* def CONFIG_PROC_FS */
+
+ adapters[i] = NULL;
+- adap_count--;
+-
+- ADAP_UNLOCK();
+- DEB(printk("i2c-core.o: adapter unregistered: %s\n",adap->name));
+- return 0;
+-
+-ERROR0:
+- ADAP_UNLOCK();
+- return res;
+-ERROR1:
+- DRV_UNLOCK();
++ DEB(printk(KERN_DEBUG "i2c-core.o: adapter unregistered: %s\n",adap->name));
++ ERROR0:
++ up(&core_lists);
+ return res;
+ }
+@@ -288,5 +191,6 @@
+ {
+ int i;
+- DRV_LOCK();
++
++ down(&core_lists);
+ for (i = 0; i < I2C_DRIVER_MAX; i++)
+ if (NULL == drivers[i])
+@@ -297,17 +201,10 @@
+ "- enlarge I2C_DRIVER_MAX.\n",
+ driver->name);
+- DRV_UNLOCK();
++ up(&core_lists);
+ return -ENOMEM;
+ }
+-
+- drivers[i] = driver;
+- driver_count++;
+-
+- DRV_UNLOCK(); /* driver was successfully added */
+-
+- DEB(printk("i2c-core.o: driver %s registered.\n",driver->name));
++ drivers[i] = driver;
++ DEB(printk(KERN_DEBUG "i2c-core.o: driver %s registered.\n",driver->name));
+
+- ADAP_LOCK();
+-
+ /* now look for instances of driver on our adapters
+ */
+@@ -318,5 +215,5 @@
+ driver->attach_adapter(adapters[i]);
+ }
+- ADAP_UNLOCK();
++ up(&core_lists);
+ return 0;
+ }
+@@ -324,7 +221,7 @@
+ int i2c_del_driver(struct i2c_driver *driver)
+ {
+- int i,j,k,res;
++ int i,j,k,res = 0;
+
+- DRV_LOCK();
++ down(&core_lists);
+ for (i = 0; i < I2C_DRIVER_MAX; i++)
+ if (driver == drivers[i])
+@@ -334,5 +231,5 @@
+ "[%s] not found\n",
+ driver->name);
+- DRV_UNLOCK();
++ up(&core_lists);
+ return -ENODEV;
+ }
+@@ -341,15 +238,14 @@
+ * afterwards.
+ */
+- DEB2(printk("i2c-core.o: unregister_driver - looking for clients.\n"));
++ DEB2(printk(KERN_DEBUG "i2c-core.o: unregister_driver - looking for clients.\n"));
+ /* removing clients does not depend on the notify flag, else
+ * invalid operation might (will!) result, when using stale client
+ * pointers.
+ */
+- ADAP_LOCK(); /* should be moved inside the if statement... */
+ for (k=0;k<I2C_ADAP_MAX;k++) {
+ struct i2c_adapter *adap = adapters[k];
+ if (adap == NULL) /* skip empty entries. */
+ continue;
+- DEB2(printk("i2c-core.o: examining adapter %s:\n",
++ DEB2(printk(KERN_DEBUG "i2c-core.o: examining adapter %s:\n",
+ adap->name));
+ if (driver->flags & I2C_DF_DUMMY) {
+@@ -360,11 +256,10 @@
+ */
+ if ((res = driver->attach_adapter(adap))) {
+- printk("i2c-core.o: while unregistering "
++ printk(KERN_WARNING "i2c-core.o: while unregistering "
+ "dummy driver %s, adapter %s could "
+ "not be detached properly; driver "
+ "not unloaded!",driver->name,
+ adap->name);
+- ADAP_UNLOCK();
+- return res;
++ goto ERROR0;
+ }
+ } else {
+@@ -373,5 +268,5 @@
+ if (client != NULL &&
+ client->driver == driver) {
+- DEB2(printk("i2c-core.o: "
++ DEB2(printk(KERN_DEBUG "i2c-core.o: "
+ "detaching client %s:\n",
+ client->name));
+@@ -379,16 +274,15 @@
+ detach_client(client)))
+ {
+- printk("i2c-core.o: while "
++ printk(KERN_ERR "i2c-core.o: while "
+ "unregistering driver "
+ "`%s', the client at "
+ "address %02x of "
+- "adapter `%s' could not"
+- "be detached; driver"
++ "adapter `%s' could not "
++ "be detached; driver "
+ "not unloaded!",
+ driver->name,
+ client->addr,
+ adap->name);
+- ADAP_UNLOCK();
+- return res;
++ goto ERROR0;
+ }
+ }
+@@ -396,14 +290,13 @@
+ }
+ }
+- ADAP_UNLOCK();
+ drivers[i] = NULL;
+- driver_count--;
+- DRV_UNLOCK();
+-
+- DEB(printk("i2c-core.o: driver unregistered: %s\n",driver->name));
+- return 0;
++ DEB(printk(KERN_DEBUG "i2c-core.o: driver unregistered: %s\n",driver->name));
++
++ ERROR0:
++ up(&core_lists);
++ return res;
+ }
+
+-int i2c_check_addr (struct i2c_adapter *adapter, int addr)
++static int __i2c_check_addr (struct i2c_adapter *adapter, int addr)
+ {
+ int i;
+@@ -411,7 +304,19 @@
+ if (adapter->clients[i] && (adapter->clients[i]->addr == addr))
+ return -EBUSY;
++
+ return 0;
+ }
+
++int i2c_check_addr (struct i2c_adapter *adapter, int addr)
++{
++ int rval;
++
++ down(&adapter->list);
++ rval = __i2c_check_addr(adapter, addr);
++ up(&adapter->list);
++
++ return rval;
++}
++
+ int i2c_attach_client(struct i2c_client *client)
+ {
+@@ -422,4 +327,5 @@
+ return -EBUSY;
+
++ down(&adapter->list);
+ for (i = 0; i < I2C_CLIENT_MAX; i++)
+ if (NULL == adapter->clients[i])
+@@ -429,16 +335,16 @@
+ " i2c-core.o: attach_client(%s) - enlarge I2C_CLIENT_MAX.\n",
+ client->name);
++ up(&adapter->list);
+ return -ENOMEM;
+ }
+-
+ adapter->clients[i] = client;
+- adapter->client_count++;
++ up(&adapter->list);
+
+ if (adapter->client_register)
+ if (adapter->client_register(client))
+- printk("i2c-core.o: warning: client_register seems "
++ printk(KERN_DEBUG "i2c-core.o: warning: client_register seems "
+ "to have failed for client %02x at adapter %s\n",
+ client->addr,adapter->name);
+- DEB(printk("i2c-core.o: client [%s] registered to adapter [%s](pos. %d).\n",
++ DEB(printk(KERN_DEBUG "i2c-core.o: client [%s] registered to adapter [%s](pos. %d).\n",
+ client->name, adapter->name,i));
+
+@@ -455,14 +361,4 @@
+ int i,res;
+
+- for (i = 0; i < I2C_CLIENT_MAX; i++)
+- if (client == adapter->clients[i])
+- break;
+- if (I2C_CLIENT_MAX == i) {
+- printk(KERN_WARNING " i2c-core.o: unregister_client "
+- "[%s] not found\n",
+- client->name);
+- return -ENODEV;
+- }
+-
+ if( (client->flags & I2C_CLIENT_ALLOW_USE) &&
+ (client->usage_count>0))
+@@ -471,36 +367,44 @@
+ if (adapter->client_unregister != NULL)
+ if ((res = adapter->client_unregister(client))) {
+- printk("i2c-core.o: client_unregister [%s] failed, "
++ printk(KERN_ERR "i2c-core.o: client_unregister [%s] failed, "
+ "client not detached",client->name);
+ return res;
+ }
+
++ down(&adapter->list);
++ for (i = 0; i < I2C_CLIENT_MAX; i++)
++ if (client == adapter->clients[i])
++ break;
++ if (I2C_CLIENT_MAX == i) {
++ printk(KERN_WARNING " i2c-core.o: unregister_client "
++ "[%s] not found\n",
++ client->name);
++ up(&adapter->list);
++ return -ENODEV;
++ }
+ adapter->clients[i] = NULL;
+- adapter->client_count--;
++ up(&adapter->list);
+
+- DEB(printk("i2c-core.o: client [%s] unregistered.\n",client->name));
++ DEB(printk(KERN_DEBUG "i2c-core.o: client [%s] unregistered.\n",client->name));
+ return 0;
+ }
+
+-void i2c_inc_use_client(struct i2c_client *client)
++static void i2c_inc_use_client(struct i2c_client *client)
+ {
+-
+- if (client->driver->inc_use != NULL)
+- client->driver->inc_use(client);
+-
+- if (client->adapter->inc_use != NULL)
+- client->adapter->inc_use(client->adapter);
++ if(client->driver->owner)
++ __MOD_INC_USE_COUNT(client->driver->owner);
++ if(client->adapter->owner)
++ __MOD_INC_USE_COUNT(client->adapter->owner);
+ }
+
+-void i2c_dec_use_client(struct i2c_client *client)
++static void i2c_dec_use_client(struct i2c_client *client)
+ {
+-
+- if (client->driver->dec_use != NULL)
+- client->driver->dec_use(client);
+-
+- if (client->adapter->dec_use != NULL)
+- client->adapter->dec_use(client->adapter);
++ if(client->driver->owner)
++ __MOD_DEC_USE_COUNT(client->driver->owner);
++ if(client->adapter->owner)
++ __MOD_DEC_USE_COUNT(client->adapter->owner);
+ }
+
++#if 0 /* just forget about this for now --km */
+ struct i2c_client *i2c_get_client(int driver_id, int adapter_id,
+ struct i2c_client *prev)
+@@ -569,16 +473,15 @@
+ return 0;
+ }
++#endif
+
+ int i2c_use_client(struct i2c_client *client)
+ {
+- if(client->flags & I2C_CLIENT_ALLOW_USE) {
+- if (client->flags & I2C_CLIENT_ALLOW_MULTIPLE_USE)
++ if (client->flags & I2C_CLIENT_ALLOW_USE) {
++ if (client->flags & I2C_CLIENT_ALLOW_MULTIPLE_USE)
++ client->usage_count++;
++ else if (client->usage_count > 0)
++ return -EBUSY;
++ else
+ client->usage_count++;
+- else {
+- if(client->usage_count > 0)
+- return -EBUSY;
+- else
+- client->usage_count++;
+- }
+ }
+
+@@ -612,18 +515,6 @@
+ #ifdef CONFIG_PROC_FS
+
+-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,3,27))
+-/* Monitor access to /proc/bus/i2c*; make unloading i2c-proc impossible
+- if some process still uses it or some file in it */
+-void monitor_bus_i2c(struct inode *inode, int fill)
+-{
+- if (fill)
+- MOD_INC_USE_COUNT;
+- else
+- MOD_DEC_USE_COUNT;
+-}
+-#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,3,37)) */
+-
+ /* This function generates the output for /proc/bus/i2c */
+-int read_bus_i2c(char *buf, char **start, off_t offset, int len, int *eof,
++static int read_bus_i2c(char *buf, char **start, off_t offset, int len, int *eof,
+ void *private)
+ {
+@@ -631,4 +522,5 @@
+ int nr = 0;
+ /* Note that it is safe to write a `little' beyond len. Yes, really. */
++ down(&core_lists);
+ for (i = 0; (i < I2C_ADAP_MAX) && (nr < len); i++)
+ if (adapters[i]) {
+@@ -647,4 +539,5 @@
+ adapters[i]->algo->name);
+ }
++ up(&core_lists);
+ return nr;
+ }
+@@ -657,100 +550,123 @@
+ char *kbuf;
+ struct i2c_client *client;
++ struct i2c_adapter *adap;
+ int i,j,k,order_nr,len=0;
+ size_t len_total;
+ int order[I2C_CLIENT_MAX];
++#define OUTPUT_LENGTH_PER_LINE 70
+
+- if (count > 4000)
+- return -EINVAL;
+ len_total = file->f_pos + count;
+- /* Too bad if this gets longer (unlikely) */
+- if (len_total > 4000)
+- len_total = 4000;
+- for (i = 0; i < I2C_ADAP_MAX; i++)
+- if (adapters[i]->inode == inode->i_ino) {
+- /* We need a bit of slack in the kernel buffer; this makes the
+- sprintf safe. */
+- if (! (kbuf = kmalloc(count + 80,GFP_KERNEL)))
+- return -ENOMEM;
+- /* Order will hold the indexes of the clients
+- sorted by address */
+- order_nr=0;
+- for (j = 0; j < I2C_CLIENT_MAX; j++) {
+- if ((client = adapters[i]->clients[j]) &&
+- (client->driver->id != I2C_DRIVERID_I2CDEV)) {
+- for(k = order_nr;
+- (k > 0) &&
+- adapters[i]->clients[order[k-1]]->
+- addr > client->addr;
+- k--)
+- order[k] = order[k-1];
+- order[k] = j;
+- order_nr++;
+- }
+- }
+-
+-
+- for (j = 0; (j < order_nr) && (len < len_total); j++) {
+- client = adapters[i]->clients[order[j]];
+- len += sprintf(kbuf+len,"%02x\t%-32s\t%-32s\n",
+- client->addr,
+- client->name,
+- client->driver->name);
+- }
+- len = len - file->f_pos;
+- if (len > count)
+- len = count;
+- if (len < 0)
+- len = 0;
+- if (copy_to_user (buf,kbuf+file->f_pos, len)) {
+- kfree(kbuf);
+- return -EFAULT;
+- }
+- file->f_pos += len;
+- kfree(kbuf);
+- return len;
+- }
+- return -ENOENT;
++ if (len_total > (I2C_CLIENT_MAX * OUTPUT_LENGTH_PER_LINE) )
++ /* adjust to maximum file size */
++ len_total = (I2C_CLIENT_MAX * OUTPUT_LENGTH_PER_LINE);
++
++ down(&core_lists);
++ /* adap = file->private_data; ?? --km */
++ for (i = 0; i < I2C_ADAP_MAX; i++) {
++ adap = adapters[i];
++ if (adap && (adap->inode == inode->i_ino))
++ break;
++ }
++ if ( I2C_ADAP_MAX == i ) {
++ up(&core_lists);
++ return -ENOENT;
++ }
++
++ /* We need a bit of slack in the kernel buffer; this makes the
++ sprintf safe. */
++ if (! (kbuf = kmalloc(len_total +
++ OUTPUT_LENGTH_PER_LINE,
++ GFP_KERNEL)))
++ return -ENOMEM;
++
++ /* Order will hold the indexes of the clients
++ sorted by address */
++ order_nr=0;
++ down(&adap->list);
++ for (j = 0; j < I2C_CLIENT_MAX; j++) {
++ if ((client = adap->clients[j]) &&
++ (client->driver->id != I2C_DRIVERID_I2CDEV)) {
++ for(k = order_nr;
++ (k > 0) &&
++ adap->clients[order[k-1]]->
++ addr > client->addr;
++ k--)
++ order[k] = order[k-1];
++ order[k] = j;
++ order_nr++;
++ }
++ }
++
++
++ for (j = 0; (j < order_nr) && (len < len_total); j++) {
++ client = adap->clients[order[j]];
++ len += sprintf(kbuf+len,"%02x\t%-32s\t%-32s\n",
++ client->addr,
++ client->name,
++ client->driver->name);
++ }
++ up(&adap->list);
++ up(&core_lists);
++
++ len = len - file->f_pos;
++ if (len > count)
++ len = count;
++ if (len < 0)
++ len = 0;
++ if (copy_to_user (buf,kbuf+file->f_pos, len)) {
++ kfree(kbuf);
++ return -EFAULT;
++ }
++ file->f_pos += len;
++ kfree(kbuf);
++ return len;
++}
++
++static int i2cproc_register(struct i2c_adapter *adap, int bus)
++{
++ char name[8];
++ struct proc_dir_entry *proc_entry;
++
++ sprintf(name,"i2c-%d", bus);
++ proc_entry = create_proc_entry(name,0,proc_bus);
++ if (! proc_entry) {
++ printk(KERN_ERR "i2c-core.o: Could not create /proc/bus/%s\n",
++ name);
++ return -ENOENT;
++ }
++
++ proc_entry->proc_fops = &i2cproc_operations;
++ proc_entry->owner = adap->owner;
++ adap->inode = proc_entry->low_ino;
++ return 0;
+ }
+
+-int i2cproc_init(void)
++static void i2cproc_remove(int bus)
+ {
++ char name[8];
++ sprintf(name,"i2c-%d", bus);
++ remove_proc_entry(name, proc_bus);
++}
+
++static int __init i2cproc_init(void)
++{
+ struct proc_dir_entry *proc_bus_i2c;
+
+- i2cproc_initialized = 0;
+-
+- if (! proc_bus) {
+- printk("i2c-core.o: /proc/bus/ does not exist");
+- i2cproc_cleanup();
+- return -ENOENT;
+- }
+ proc_bus_i2c = create_proc_entry("i2c",0,proc_bus);
+ if (!proc_bus_i2c) {
+- printk("i2c-core.o: Could not create /proc/bus/i2c");
+- i2cproc_cleanup();
++ printk(KERN_ERR "i2c-core.o: Could not create /proc/bus/i2c");
+ return -ENOENT;
+ }
++
+ proc_bus_i2c->read_proc = &read_bus_i2c;
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,27))
+ proc_bus_i2c->owner = THIS_MODULE;
+-#else
+- proc_bus_i2c->fill_inode = &monitor_bus_i2c;
+-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,27)) */
+- i2cproc_initialized += 2;
+ return 0;
+ }
+
+-int i2cproc_cleanup(void)
++static void __exit i2cproc_cleanup(void)
+ {
+-
+- if (i2cproc_initialized >= 1) {
+- remove_proc_entry("i2c",proc_bus);
+- i2cproc_initialized -= 2;
+- }
+- return 0;
++ remove_proc_entry("i2c",proc_bus);
+ }
+
+-
+ #endif /* def CONFIG_PROC_FS */
+
+@@ -765,14 +681,14 @@
+
+ if (adap->algo->master_xfer) {
+- DEB2(printk("i2c-core.o: master_xfer: %s with %d msgs.\n",
++ DEB2(printk(KERN_DEBUG "i2c-core.o: master_xfer: %s with %d msgs.\n",
+ adap->name,num));
+
+- I2C_LOCK(adap);
++ down(&adap->bus);
+ ret = adap->algo->master_xfer(adap,msgs,num);
+- I2C_UNLOCK(adap);
++ up(&adap->bus);
+
+ return ret;
+ } else {
+- printk("i2c-core.o: I2C adapter %04x: I2C level transfers not supported\n",
++ printk(KERN_ERR "i2c-core.o: I2C adapter %04x: I2C level transfers not supported\n",
+ adap->id);
+ return -ENOSYS;
+@@ -792,10 +708,10 @@
+ (const char *)msg.buf = buf;
+
+- DEB2(printk("i2c-core.o: master_send: writing %d bytes on %s.\n",
++ DEB2(printk(KERN_DEBUG "i2c-core.o: master_send: writing %d bytes on %s.\n",
+ count,client->adapter->name));
+
+- I2C_LOCK(adap);
++ down(&adap->bus);
+ ret = adap->algo->master_xfer(adap,&msg,1);
+- I2C_UNLOCK(adap);
++ up(&adap->bus);
+
+ /* if everything went ok (i.e. 1 msg transmitted), return #bytes
+@@ -804,5 +720,5 @@
+ return (ret == 1 )? count : ret;
+ } else {
+- printk("i2c-core.o: I2C adapter %04x: I2C level transfers not supported\n",
++ printk(KERN_ERR "i2c-core.o: I2C adapter %04x: I2C level transfers not supported\n",
+ client->adapter->id);
+ return -ENOSYS;
+@@ -822,12 +738,12 @@
+ msg.buf = buf;
+
+- DEB2(printk("i2c-core.o: master_recv: reading %d bytes on %s.\n",
++ DEB2(printk(KERN_DEBUG "i2c-core.o: master_recv: reading %d bytes on %s.\n",
+ count,client->adapter->name));
+
+- I2C_LOCK(adap);
++ down(&adap->bus);
+ ret = adap->algo->master_xfer(adap,&msg,1);
+- I2C_UNLOCK(adap);
++ up(&adap->bus);
+
+- DEB2(printk("i2c-core.o: master_recv: return:%d (count:%d, addr:0x%02x)\n",
++ DEB2(printk(KERN_DEBUG "i2c-core.o: master_recv: return:%d (count:%d, addr:0x%02x)\n",
+ ret, count, client->addr));
+
+@@ -837,5 +753,5 @@
+ return (ret == 1 )? count : ret;
+ } else {
+- printk("i2c-core.o: I2C adapter %04x: I2C level transfers not supported\n",
++ printk(KERN_DEBUG "i2c-core.o: I2C adapter %04x: I2C level transfers not supported\n",
+ client->adapter->id);
+ return -ENOSYS;
+@@ -850,5 +766,5 @@
+ struct i2c_adapter *adap = client->adapter;
+
+- DEB2(printk("i2c-core.o: i2c ioctl, cmd: 0x%x, arg: %#lx\n", cmd, arg));
++ DEB2(printk(KERN_DEBUG "i2c-core.o: i2c ioctl, cmd: 0x%x, arg: %#lx\n", cmd, arg));
+ switch ( cmd ) {
+ case I2C_RETRIES:
+@@ -895,5 +811,5 @@
+ (address_data->force[i] == ANY_I2C_BUS)) &&
+ (addr == address_data->force[i+1])) {
+- DEB2(printk("i2c-core.o: found force parameter for adapter %d, addr %04x\n",
++ DEB2(printk(KERN_DEBUG "i2c-core.o: found force parameter for adapter %d, addr %04x\n",
+ adap_id,addr));
+ if ((err = found_proc(adapter,addr,0,0)))
+@@ -913,5 +829,5 @@
+ ((address_data->ignore[i] == ANY_I2C_BUS))) &&
+ (addr == address_data->ignore[i+1])) {
+- DEB2(printk("i2c-core.o: found ignore parameter for adapter %d, "
++ DEB2(printk(KERN_DEBUG "i2c-core.o: found ignore parameter for adapter %d, "
+ "addr %04x\n", adap_id ,addr));
+ found = 1;
+@@ -925,5 +841,5 @@
+ (addr >= address_data->ignore_range[i+1]) &&
+ (addr <= address_data->ignore_range[i+2])) {
+- DEB2(printk("i2c-core.o: found ignore_range parameter for adapter %d, "
++ DEB2(printk(KERN_DEBUG "i2c-core.o: found ignore_range parameter for adapter %d, "
+ "addr %04x\n", adap_id,addr));
+ found = 1;
+@@ -940,5 +856,5 @@
+ if (addr == address_data->normal_i2c[i]) {
+ found = 1;
+- DEB2(printk("i2c-core.o: found normal i2c entry for adapter %d, "
++ DEB2(printk(KERN_DEBUG "i2c-core.o: found normal i2c entry for adapter %d, "
+ "addr %02x", adap_id,addr));
+ }
+@@ -951,5 +867,5 @@
+ (addr <= address_data->normal_i2c_range[i+1])) {
+ found = 1;
+- DEB2(printk("i2c-core.o: found normal i2c_range entry for adapter %d, "
++ DEB2(printk(KERN_DEBUG "i2c-core.o: found normal i2c_range entry for adapter %d, "
+ "addr %04x\n", adap_id,addr));
+ }
+@@ -963,5 +879,5 @@
+ (addr == address_data->probe[i+1])) {
+ found = 1;
+- DEB2(printk("i2c-core.o: found probe parameter for adapter %d, "
++ DEB2(printk(KERN_DEBUG "i2c-core.o: found probe parameter for adapter %d, "
+ "addr %04x\n", adap_id,addr));
+ }
+@@ -975,5 +891,5 @@
+ (addr <= address_data->probe_range[i+2])) {
+ found = 1;
+- DEB2(printk("i2c-core.o: found probe_range parameter for adapter %d, "
++ DEB2(printk(KERN_DEBUG "i2c-core.o: found probe_range parameter for adapter %d, "
+ "addr %04x\n", adap_id,addr));
+ }
+@@ -1005,4 +921,121 @@
+ /* The SMBus parts */
+
++#define POLY (0x1070U << 3)
++static u8
++crc8(u16 data)
++{
++ int i;
++
++ for(i = 0; i < 8; i++) {
++ if (data & 0x8000)
++ data = data ^ POLY;
++ data = data << 1;
++ }
++ return (u8)(data >> 8);
++}
++
++/* CRC over count bytes in the first array plus the bytes in the rest
++ array if it is non-null. rest[0] is the (length of rest) - 1
++ and is included. */
++u8 i2c_smbus_partial_pec(u8 crc, int count, u8 *first, u8 *rest)
++{
++ int i;
++
++ for(i = 0; i < count; i++)
++ crc = crc8((crc ^ first[i]) << 8);
++ if(rest != NULL)
++ for(i = 0; i <= rest[0]; i++)
++ crc = crc8((crc ^ rest[i]) << 8);
++ return crc;
++}
++
++u8 i2c_smbus_pec(int count, u8 *first, u8 *rest)
++{
++ return i2c_smbus_partial_pec(0, count, first, rest);
++}
++
++/* Returns new "size" (transaction type)
++ Note that we convert byte to byte_data and byte_data to word_data
++ rather than invent new xxx_PEC transactions. */
++int i2c_smbus_add_pec(u16 addr, u8 command, int size,
++ union i2c_smbus_data *data)
++{
++ u8 buf[3];
++
++ buf[0] = addr << 1;
++ buf[1] = command;
++ switch(size) {
++ case I2C_SMBUS_BYTE:
++ data->byte = i2c_smbus_pec(2, buf, NULL);
++ size = I2C_SMBUS_BYTE_DATA;
++ break;
++ case I2C_SMBUS_BYTE_DATA:
++ buf[2] = data->byte;
++ data->word = buf[2] ||
++ (i2c_smbus_pec(3, buf, NULL) << 8);
++ size = I2C_SMBUS_WORD_DATA;
++ break;
++ case I2C_SMBUS_WORD_DATA:
++ /* unsupported */
++ break;
++ case I2C_SMBUS_BLOCK_DATA:
++ data->block[data->block[0] + 1] =
++ i2c_smbus_pec(2, buf, data->block);
++ size = I2C_SMBUS_BLOCK_DATA_PEC;
++ break;
++ }
++ return size;
++}
++
++int i2c_smbus_check_pec(u16 addr, u8 command, int size, u8 partial,
++ union i2c_smbus_data *data)
++{
++ u8 buf[3], rpec, cpec;
++
++ buf[1] = command;
++ switch(size) {
++ case I2C_SMBUS_BYTE_DATA:
++ buf[0] = (addr << 1) | 1;
++ cpec = i2c_smbus_pec(2, buf, NULL);
++ rpec = data->byte;
++ break;
++ case I2C_SMBUS_WORD_DATA:
++ buf[0] = (addr << 1) | 1;
++ buf[2] = data->word & 0xff;
++ cpec = i2c_smbus_pec(3, buf, NULL);
++ rpec = data->word >> 8;
++ break;
++ case I2C_SMBUS_WORD_DATA_PEC:
++ /* unsupported */
++ cpec = rpec = 0;
++ break;
++ case I2C_SMBUS_PROC_CALL_PEC:
++ /* unsupported */
++ cpec = rpec = 0;
++ break;
++ case I2C_SMBUS_BLOCK_DATA_PEC:
++ buf[0] = (addr << 1);
++ buf[2] = (addr << 1) | 1;
++ cpec = i2c_smbus_pec(3, buf, data->block);
++ rpec = data->block[data->block[0] + 1];
++ break;
++ case I2C_SMBUS_BLOCK_PROC_CALL_PEC:
++ buf[0] = (addr << 1) | 1;
++ rpec = i2c_smbus_partial_pec(partial, 1,
++ buf, data->block);
++ cpec = data->block[data->block[0] + 1];
++ break;
++ default:
++ cpec = rpec = 0;
++ break;
++ }
++ if(rpec != cpec) {
++ DEB(printk(KERN_DEBUG "i2c-core.o: Bad PEC 0x%02x vs. 0x%02x\n",
++ rpec, cpec));
++ return -1;
++ }
++ return 0;
++}
++
+ extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value)
+ {
+@@ -1023,6 +1056,7 @@
+ extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value)
+ {
++ union i2c_smbus_data data; /* only for PEC */
+ return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
+- I2C_SMBUS_WRITE,value, I2C_SMBUS_BYTE,NULL);
++ I2C_SMBUS_WRITE,value, I2C_SMBUS_BYTE,&data);
+ }
+
+@@ -1102,6 +1136,6 @@
+ union i2c_smbus_data data;
+ int i;
+- if (length > 32)
+- length = 32;
++ if (length > I2C_SMBUS_BLOCK_MAX)
++ length = I2C_SMBUS_BLOCK_MAX;
+ for (i = 1; i <= length; i++)
+ data.block[i] = values[i-1];
+@@ -1112,4 +1146,41 @@
+ }
+
++/* Returns the number of read bytes */
++extern s32 i2c_smbus_block_process_call(struct i2c_client * client,
++ u8 command, u8 length, u8 *values)
++{
++ union i2c_smbus_data data;
++ int i;
++ if (length > I2C_SMBUS_BLOCK_MAX - 1)
++ return -1;
++ data.block[0] = length;
++ for (i = 1; i <= length; i++)
++ data.block[i] = values[i-1];
++ if(i2c_smbus_xfer(client->adapter,client->addr,client->flags,
++ I2C_SMBUS_WRITE, command,
++ I2C_SMBUS_BLOCK_PROC_CALL, &data))
++ return -1;
++ for (i = 1; i <= data.block[0]; i++)
++ values[i-1] = data.block[i];
++ return data.block[0];
++}
++
++/* Returns the number of read bytes */
++extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client,
++ u8 command, u8 *values)
++{
++ union i2c_smbus_data data;
++ int i;
++ if (i2c_smbus_xfer(client->adapter,client->addr,client->flags,
++ I2C_SMBUS_READ,command,
++ I2C_SMBUS_I2C_BLOCK_DATA,&data))
++ return -1;
++ else {
++ for (i = 1; i <= data.block[0]; i++)
++ values[i-1] = data.block[i];
++ return data.block[0];
++ }
++}
++
+ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
+ u8 command, u8 length, u8 *values)
+@@ -1117,6 +1188,6 @@
+ union i2c_smbus_data data;
+ int i;
+- if (length > 32)
+- length = 32;
++ if (length > I2C_SMBUS_I2C_BLOCK_MAX)
++ length = I2C_SMBUS_I2C_BLOCK_MAX;
+ for (i = 1; i <= length; i++)
+ data.block[i] = values[i-1];
+@@ -1180,4 +1251,5 @@
+ case I2C_SMBUS_PROC_CALL:
+ num = 2; /* Special case */
++ read_write = I2C_SMBUS_READ;
+ msg[0].len = 3;
+ msg[1].len = 2;
+@@ -1186,22 +1258,45 @@
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
++ case I2C_SMBUS_BLOCK_DATA_PEC:
+ if (read_write == I2C_SMBUS_READ) {
+- printk("i2c-core.o: Block read not supported under "
+- "I2C emulation!\n");
+- return -1;
++ printk(KERN_ERR "i2c-core.o: Block read not supported "
++ "under I2C emulation!\n");
++ return -1;
+ } else {
+ msg[0].len = data->block[0] + 2;
+- if (msg[0].len > 34) {
+- printk("i2c-core.o: smbus_access called with "
++ if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 2) {
++ printk(KERN_ERR "i2c-core.o: smbus_access called with "
+ "invalid block write size (%d)\n",
+- msg[0].len);
++ data->block[0]);
+ return -1;
+ }
++ if(size == I2C_SMBUS_BLOCK_DATA_PEC)
++ (msg[0].len)++;
+ for (i = 1; i <= msg[0].len; i++)
+ msgbuf0[i] = data->block[i-1];
+ }
+ break;
++ case I2C_SMBUS_BLOCK_PROC_CALL:
++ case I2C_SMBUS_BLOCK_PROC_CALL_PEC:
++ printk(KERN_ERR "i2c-core.o: Block process call not supported "
++ "under I2C emulation!\n");
++ return -1;
++ case I2C_SMBUS_I2C_BLOCK_DATA:
++ if (read_write == I2C_SMBUS_READ) {
++ msg[1].len = I2C_SMBUS_I2C_BLOCK_MAX;
++ } else {
++ msg[0].len = data->block[0] + 1;
++ if (msg[0].len > I2C_SMBUS_I2C_BLOCK_MAX + 1) {
++ printk("i2c-core.o: i2c_smbus_xfer_emulated called with "
++ "invalid block write size (%d)\n",
++ data->block[0]);
++ return -1;
++ }
++ for (i = 1; i <= data->block[0]; i++)
++ msgbuf0[i] = data->block[i];
++ }
++ break;
+ default:
+- printk("i2c-core.o: smbus_access called with invalid size (%d)\n",
++ printk(KERN_ERR "i2c-core.o: smbus_access called with invalid size (%d)\n",
+ size);
+ return -1;
+@@ -1223,4 +1318,10 @@
+ data->word = msgbuf1[0] | (msgbuf1[1] << 8);
+ break;
++ case I2C_SMBUS_I2C_BLOCK_DATA:
++ /* fixed at 32 for now */
++ data->block[0] = I2C_SMBUS_I2C_BLOCK_MAX;
++ for (i = 0; i < I2C_SMBUS_I2C_BLOCK_MAX; i++)
++ data->block[i+1] = msgbuf1[i];
++ break;
+ }
+ return 0;
+@@ -1228,18 +1329,48 @@
+
+
+-s32 i2c_smbus_xfer(struct i2c_adapter * adapter, u16 addr, unsigned short flags,
++s32 i2c_smbus_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
+ char read_write, u8 command, int size,
+ union i2c_smbus_data * data)
+ {
+ s32 res;
+- flags = flags & I2C_M_TEN;
+- if (adapter->algo->smbus_xfer) {
+- I2C_LOCK(adapter);
+- res = adapter->algo->smbus_xfer(adapter,addr,flags,read_write,
++ int swpec = 0;
++ u8 partial = 0;
++
++ flags &= I2C_M_TEN | I2C_CLIENT_PEC;
++ if((flags & I2C_CLIENT_PEC) &&
++ !(i2c_check_functionality(adap, I2C_FUNC_SMBUS_HWPEC_CALC))) {
++ swpec = 1;
++ if(read_write == I2C_SMBUS_READ &&
++ size == I2C_SMBUS_BLOCK_DATA)
++ size = I2C_SMBUS_BLOCK_DATA_PEC;
++ else if(size == I2C_SMBUS_PROC_CALL)
++ size = I2C_SMBUS_PROC_CALL_PEC;
++ else if(size == I2C_SMBUS_BLOCK_PROC_CALL) {
++ i2c_smbus_add_pec(addr, command,
++ I2C_SMBUS_BLOCK_DATA, data);
++ partial = data->block[data->block[0] + 1];
++ size = I2C_SMBUS_BLOCK_PROC_CALL_PEC;
++ } else if(read_write == I2C_SMBUS_WRITE &&
++ size != I2C_SMBUS_QUICK &&
++ size != I2C_SMBUS_I2C_BLOCK_DATA)
++ size = i2c_smbus_add_pec(addr, command, size, data);
++ }
++
++ if (adap->algo->smbus_xfer) {
++ down(&adap->bus);
++ res = adap->algo->smbus_xfer(adap,addr,flags,read_write,
+ command,size,data);
+- I2C_UNLOCK(adapter);
++ up(&adap->bus);
+ } else
+- res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write,
++ res = i2c_smbus_xfer_emulated(adap,addr,flags,read_write,
+ command,size,data);
++
++ if(res >= 0 && swpec &&
++ size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA &&
++ (read_write == I2C_SMBUS_READ || size == I2C_SMBUS_PROC_CALL_PEC ||
++ size == I2C_SMBUS_BLOCK_PROC_CALL_PEC)) {
++ if(i2c_smbus_check_pec(addr, command, size, partial, data))
++ return -1;
++ }
+ return res;
+ }
+@@ -1265,128 +1396,30 @@
+ static int __init i2c_init(void)
+ {
+- printk(KERN_INFO "i2c-core.o: i2c core module\n");
++ printk(KERN_INFO "i2c-core.o: i2c core module version %s (%s)\n", I2C_VERSION, I2C_DATE);
+ memset(adapters,0,sizeof(adapters));
+ memset(drivers,0,sizeof(drivers));
+- adap_count=0;
+- driver_count=0;
+
+- init_MUTEX(&adap_lock);
+- init_MUTEX(&driver_lock);
+-
+- i2cproc_init();
+-
++#ifdef CONFIG_PROC_FS
++ return i2cproc_init();
++#else
+ return 0;
+-}
+-
+-#ifndef MODULE
+-#ifdef CONFIG_I2C_CHARDEV
+- extern int i2c_dev_init(void);
+-#endif
+-#ifdef CONFIG_I2C_ALGOBIT
+- extern int i2c_algo_bit_init(void);
+-#endif
+-#ifdef CONFIG_I2C_PHILIPSPAR
+- extern int i2c_bitlp_init(void);
+-#endif
+-#ifdef CONFIG_I2C_ELV
+- extern int i2c_bitelv_init(void);
+-#endif
+-#ifdef CONFIG_I2C_VELLEMAN
+- extern int i2c_bitvelle_init(void);
+-#endif
+-#ifdef CONFIG_I2C_BITVIA
+- extern int i2c_bitvia_init(void);
+-#endif
+-
+-#ifdef CONFIG_I2C_ALGOPCF
+- extern int i2c_algo_pcf_init(void);
+-#endif
+-#ifdef CONFIG_I2C_ELEKTOR
+- extern int i2c_pcfisa_init(void);
+-#endif
+-
+-#ifdef CONFIG_I2C_ALGO8XX
+- extern int i2c_algo_8xx_init(void);
+-#endif
+-#ifdef CONFIG_I2C_RPXLITE
+- extern int i2c_rpx_init(void);
+ #endif
+
+-#ifdef CONFIG_I2C_ALGO_SIBYTE
+- extern int i2c_algo_sibyte_init(void);
+- extern int i2c_sibyte_init(void);
+-#endif
+-#ifdef CONFIG_I2C_MAX1617
+- extern int i2c_max1617_init(void);
+-#endif
++}
+
+-#ifdef CONFIG_I2C_PROC
+- extern int sensors_init(void);
++static void __exit i2c_exit(void)
++{
++#ifdef CONFIG_PROC_FS
++ i2cproc_cleanup();
+ #endif
++}
+
+-/* This is needed for automatic patch generation: sensors code starts here */
+-/* This is needed for automatic patch generation: sensors code ends here */
+-
++/* leave this in for now simply to make patching easier so we don't have
++ to remove the call in drivers/char/mem.c */
+ int __init i2c_init_all(void)
+ {
+- /* --------------------- global ----- */
+- i2c_init();
+-
+-#ifdef CONFIG_I2C_CHARDEV
+- i2c_dev_init();
+-#endif
+- /* --------------------- bit -------- */
+-#ifdef CONFIG_I2C_ALGOBIT
+- i2c_algo_bit_init();
+-#endif
+-#ifdef CONFIG_I2C_PHILIPSPAR
+- i2c_bitlp_init();
+-#endif
+-#ifdef CONFIG_I2C_ELV
+- i2c_bitelv_init();
+-#endif
+-#ifdef CONFIG_I2C_VELLEMAN
+- i2c_bitvelle_init();
+-#endif
+-
+- /* --------------------- pcf -------- */
+-#ifdef CONFIG_I2C_ALGOPCF
+- i2c_algo_pcf_init();
+-#endif
+-#ifdef CONFIG_I2C_ELEKTOR
+- i2c_pcfisa_init();
+-#endif
+-
+- /* --------------------- 8xx -------- */
+-#ifdef CONFIG_I2C_ALGO8XX
+- i2c_algo_8xx_init();
+-#endif
+-#ifdef CONFIG_I2C_RPXLITE
+- i2c_rpx_init();
+-#endif
+-
+- /* --------------------- SiByte -------- */
+-#ifdef CONFIG_I2C_ALGO_SIBYTE
+- i2c_algo_sibyte_init();
+- i2c_sibyte_init();
+-#endif
+-#ifdef CONFIG_I2C_MAX1617
+- i2c_max1617_init();
+-#endif
+-
+- /* -------------- proc interface ---- */
+-#ifdef CONFIG_I2C_PROC
+- sensors_init();
+-#endif
+-/* This is needed for automatic patch generation: sensors code starts here */
+-/* This is needed for automatic patch generation: sensors code ends here */
+-
+ return 0;
+ }
+
+-#endif
+-
+-
+-
+ EXPORT_SYMBOL(i2c_add_adapter);
+ EXPORT_SYMBOL(i2c_del_adapter);
+@@ -1395,7 +1428,4 @@
+ EXPORT_SYMBOL(i2c_attach_client);
+ EXPORT_SYMBOL(i2c_detach_client);
+-EXPORT_SYMBOL(i2c_inc_use_client);
+-EXPORT_SYMBOL(i2c_dec_use_client);
+-EXPORT_SYMBOL(i2c_get_client);
+ EXPORT_SYMBOL(i2c_use_client);
+ EXPORT_SYMBOL(i2c_release_client);
+@@ -1421,23 +1451,17 @@
+ EXPORT_SYMBOL(i2c_smbus_read_block_data);
+ EXPORT_SYMBOL(i2c_smbus_write_block_data);
++EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data);
++EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data);
+
+ EXPORT_SYMBOL(i2c_get_functionality);
+ EXPORT_SYMBOL(i2c_check_functionality);
+
+-#ifdef MODULE
+ MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
+ MODULE_DESCRIPTION("I2C-Bus main module");
+-MODULE_PARM(i2c_debug, "i");
+-MODULE_PARM_DESC(i2c_debug,"debug level");
+ MODULE_LICENSE("GPL");
+
+-int init_module(void)
+-{
+- return i2c_init();
+-}
++MODULE_PARM(i2c_debug, "i");
++MODULE_PARM_DESC(i2c_debug,"debug level");
+
+-void cleanup_module(void)
+-{
+- i2cproc_cleanup();
+-}
+-#endif
++module_init(i2c_init);
++module_exit(i2c_exit);
+--- linux-old/drivers/i2c/i2c-dev.c Sun Aug 31 14:51:55 CEST 2003
++++ linux/drivers/i2c/i2c-dev.c Sun Aug 31 14:51:55 CEST 2003
+@@ -29,39 +29,25 @@
+ <pmhahn@titan.lahn.de> */
+
+-/* $Id$ */
++/* $Id$ */
+
+-#include <linux/config.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/fs.h>
+ #include <linux/slab.h>
+-#include <linux/version.h>
+-#if LINUX_KERNEL_VERSION >= KERNEL_VERSION(2,4,0)
+ #include <linux/smp_lock.h>
+-#endif /* LINUX_KERNEL_VERSION >= KERNEL_VERSION(2,4,0) */
+ #ifdef CONFIG_DEVFS_FS
+ #include <linux/devfs_fs_kernel.h>
+ #endif
+-
+-
+-/* If you want debugging uncomment: */
+-/* #define DEBUG */
+-
+ #include <linux/init.h>
+-#include <asm/uaccess.h>
+-
+ #include <linux/i2c.h>
+ #include <linux/i2c-dev.h>
++#include <asm/uaccess.h>
++
++/* If you want debugging uncomment: */
++/* #define DEBUG */
+
+-#ifdef MODULE
+-extern int init_module(void);
+-extern int cleanup_module(void);
+-#endif /* def MODULE */
+
+ /* struct file_operations changed too often in the 2.1 series for nice code */
+
+-#if LINUX_KERNEL_VERSION < KERNEL_VERSION(2,4,9)
+-static loff_t i2cdev_lseek (struct file *file, loff_t offset, int origin);
+-#endif
+ static ssize_t i2cdev_read (struct file *file, char *buf, size_t count,
+ loff_t *offset);
+@@ -80,26 +66,12 @@
+ void *arg);
+
+-#ifdef MODULE
+-static
+-#else
+-extern
+-#endif
+- int __init i2c_dev_init(void);
+-static int i2cdev_cleanup(void);
+-
+ static struct file_operations i2cdev_fops = {
+-#if LINUX_KERNEL_VERSION >= KERNEL_VERSION(2,4,0)
+- owner: THIS_MODULE,
+-#endif /* LINUX_KERNEL_VERSION >= KERNEL_VERSION(2,4,0) */
+-#if LINUX_KERNEL_VERSION < KERNEL_VERSION(2,4,9)
+- llseek: i2cdev_lseek,
+-#else
+- llseek: no_llseek,
+-#endif
+- read: i2cdev_read,
+- write: i2cdev_write,
+- ioctl: i2cdev_ioctl,
+- open: i2cdev_open,
+- release: i2cdev_release,
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .read = i2cdev_read,
++ .write = i2cdev_write,
++ .ioctl = i2cdev_ioctl,
++ .open = i2cdev_open,
++ .release = i2cdev_release,
+ };
+
+@@ -112,40 +84,20 @@
+
+ static struct i2c_driver i2cdev_driver = {
+- name: "i2c-dev dummy driver",
+- id: I2C_DRIVERID_I2CDEV,
+- flags: I2C_DF_DUMMY,
+- attach_adapter: i2cdev_attach_adapter,
+- detach_client: i2cdev_detach_client,
+- command: i2cdev_command,
+-/* inc_use: NULL,
+- dec_use: NULL, */
++ .owner = THIS_MODULE, /* not really used */
++ .name = "i2c-dev dummy driver",
++ .id = I2C_DRIVERID_I2CDEV,
++ .flags = I2C_DF_DUMMY,
++ .attach_adapter = i2cdev_attach_adapter,
++ .detach_client = i2cdev_detach_client,
++ .command = i2cdev_command,
+ };
+
+ static struct i2c_client i2cdev_client_template = {
+- name: "I2C /dev entry",
+- id: 1,
+- flags: 0,
+- addr: -1,
+-/* adapter: NULL, */
+- driver: &i2cdev_driver,
+-/* data: NULL */
++ .name = "I2C /dev entry",
++ .id = 1,
++ .addr = -1,
++ .driver = &i2cdev_driver,
+ };
+
+-static int i2cdev_initialized;
+-
+-#if LINUX_KERNEL_VERSION < KERNEL_VERSION(2,4,9)
+-/* Note that the lseek function is called llseek in 2.1 kernels. But things
+- are complicated enough as is. */
+-loff_t i2cdev_lseek (struct file *file, loff_t offset, int origin)
+-{
+-#ifdef DEBUG
+- struct inode *inode = file->f_dentry->d_inode;
+- printk("i2c-dev.o: i2c-%d lseek to %ld bytes relative to %d.\n",
+- MINOR(inode->i_rdev),(long) offset,origin);
+-#endif /* DEBUG */
+- return -ESPIPE;
+-}
+-#endif
+-
+ static ssize_t i2cdev_read (struct file *file, char *buf, size_t count,
+ loff_t *offset)
+@@ -160,7 +112,7 @@
+ struct i2c_client *client = (struct i2c_client *)file->private_data;
+
+- if(count > 8192)
++ if (count > 8192)
+ count = 8192;
+-
++
+ /* copy user space data to kernel space. */
+ tmp = kmalloc(count,GFP_KERNEL);
+@@ -169,5 +121,5 @@
+
+ #ifdef DEBUG
+- printk("i2c-dev.o: i2c-%d reading %d bytes.\n",MINOR(inode->i_rdev),
++ printk(KERN_DEBUG "i2c-dev.o: i2c-%d reading %d bytes.\n",minor(inode->i_rdev),
+ count);
+ #endif
+@@ -191,7 +143,7 @@
+ #endif /* DEBUG */
+
+- if(count > 8192)
++ if (count > 8192)
+ count = 8192;
+-
++
+ /* copy user space data to kernel space. */
+ tmp = kmalloc(count,GFP_KERNEL);
+@@ -204,5 +156,5 @@
+
+ #ifdef DEBUG
+- printk("i2c-dev.o: i2c-%d writing %d bytes.\n",MINOR(inode->i_rdev),
++ printk(KERN_DEBUG "i2c-dev.o: i2c-%d writing %d bytes.\n",minor(inode->i_rdev),
+ count);
+ #endif
+@@ -225,6 +177,6 @@
+
+ #ifdef DEBUG
+- printk("i2c-dev.o: i2c-%d ioctl, cmd: 0x%x, arg: %lx.\n",
+- MINOR(inode->i_rdev),cmd, arg);
++ printk(KERN_DEBUG "i2c-dev.o: i2c-%d ioctl, cmd: 0x%x, arg: %lx.\n",
++ minor(inode->i_rdev),cmd, arg);
+ #endif /* DEBUG */
+
+@@ -245,4 +197,10 @@
+ client->flags &= ~I2C_M_TEN;
+ return 0;
++ case I2C_PEC:
++ if (arg)
++ client->flags |= I2C_CLIENT_PEC;
++ else
++ client->flags &= ~I2C_CLIENT_PEC;
++ return 0;
+ case I2C_FUNCS:
+ funcs = i2c_get_functionality(client->adapter);
+@@ -260,5 +218,5 @@
+ if (rdwr_arg.nmsgs > 42)
+ return -EINVAL;
+-
++
+ rdwr_pa = (struct i2c_msg *)
+ kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg),
+@@ -345,7 +303,8 @@
+ (data_arg.size != I2C_SMBUS_PROC_CALL) &&
+ (data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
+- (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA)) {
++ (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
++ (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
+ #ifdef DEBUG
+- printk("i2c-dev.o: size out of range (%x) in ioctl I2C_SMBUS.\n",
++ printk(KERN_DEBUG "i2c-dev.o: size out of range (%x) in ioctl I2C_SMBUS.\n",
+ data_arg.size);
+ #endif
+@@ -357,5 +316,5 @@
+ (data_arg.read_write != I2C_SMBUS_WRITE)) {
+ #ifdef DEBUG
+- printk("i2c-dev.o: read_write out of range (%x) in ioctl I2C_SMBUS.\n",
++ printk(KERN_DEBUG "i2c-dev.o: read_write out of range (%x) in ioctl I2C_SMBUS.\n",
+ data_arg.read_write);
+ #endif
+@@ -377,5 +336,5 @@
+ if (data_arg.data == NULL) {
+ #ifdef DEBUG
+- printk("i2c-dev.o: data is NULL pointer in ioctl I2C_SMBUS.\n");
++ printk(KERN_DEBUG "i2c-dev.o: data is NULL pointer in ioctl I2C_SMBUS.\n");
+ #endif
+ return -EINVAL;
+@@ -388,8 +347,9 @@
+ (data_arg.size == I2C_SMBUS_PROC_CALL))
+ datasize = sizeof(data_arg.data->word);
+- else /* size == I2C_SMBUS_BLOCK_DATA */
++ else /* size == smbus block, i2c block, or block proc. call */
+ datasize = sizeof(data_arg.data->block);
+
+ if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
++ (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+ (data_arg.read_write == I2C_SMBUS_WRITE)) {
+ if (copy_from_user(&temp, data_arg.data, datasize))
+@@ -400,4 +360,5 @@
+ data_arg.command,data_arg.size,&temp);
+ if (! res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
++ (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+ (data_arg.read_write == I2C_SMBUS_READ))) {
+ if (copy_to_user(data_arg.data, &temp, datasize))
+@@ -414,10 +375,10 @@
+ int i2cdev_open (struct inode *inode, struct file *file)
+ {
+- unsigned int minor = MINOR(inode->i_rdev);
++ unsigned int minor = minor(inode->i_rdev);
+ struct i2c_client *client;
+
+ if ((minor >= I2CDEV_ADAPS_MAX) || ! (i2cdev_adaps[minor])) {
+ #ifdef DEBUG
+- printk("i2c-dev.o: Trying to open unattached adapter i2c-%d\n",
++ printk(KERN_DEBUG "i2c-dev.o: Trying to open unattached adapter i2c-%d\n",
+ minor);
+ #endif
+@@ -430,15 +391,14 @@
+ return -ENOMEM;
+ memcpy(client,&i2cdev_client_template,sizeof(struct i2c_client));
++
++ /* registered with adapter, passed as client to user */
+ client->adapter = i2cdev_adaps[minor];
+ file->private_data = client;
+
+- if (i2cdev_adaps[minor]->inc_use)
+- i2cdev_adaps[minor]->inc_use(i2cdev_adaps[minor]);
+-#if LINUX_KERNEL_VERSION < KERNEL_VERSION(2,4,0)
+- MOD_INC_USE_COUNT;
+-#endif /* LINUX_KERNEL_VERSION < KERNEL_VERSION(2,4,0) */
++ if(client->adapter->owner)
++ __MOD_INC_USE_COUNT(client->adapter->owner);
+
+ #ifdef DEBUG
+- printk("i2c-dev.o: opened i2c-%d\n",minor);
++ printk(KERN_DEBUG "i2c-dev.o: opened i2c-%d\n",minor);
+ #endif
+ return 0;
+@@ -447,20 +407,17 @@
+ static int i2cdev_release (struct inode *inode, struct file *file)
+ {
+- unsigned int minor = MINOR(inode->i_rdev);
+- kfree(file->private_data);
+- file->private_data=NULL;
+-#ifdef DEBUG
+- printk("i2c-dev.o: Closed: i2c-%d\n", minor);
+-#endif
+-#if LINUX_KERNEL_VERSION < KERNEL_VERSION(2,4,0)
+- MOD_DEC_USE_COUNT;
+-#else /* LINUX_KERNEL_VERSION >= KERNEL_VERSION(2,4,0) */
+- lock_kernel();
+-#endif /* LINUX_KERNEL_VERSION < KERNEL_VERSION(2,4,0) */
+- if (i2cdev_adaps[minor]->dec_use)
+- i2cdev_adaps[minor]->dec_use(i2cdev_adaps[minor]);
+-#if LINUX_KERNEL_VERSION >= KERNEL_VERSION(2,4,0)
+- unlock_kernel();
+-#endif /* LINUX_KERNEL_VERSION >= KERNEL_VERSION(2,4,0) */
++ struct i2c_client *client;
++#ifdef DEBUG
++ unsigned int minor = minor(inode->i_rdev);
++#endif
++
++ client = file->private_data;
++ file->private_data = NULL;
++ if(client->adapter->owner)
++ __MOD_DEC_USE_COUNT(client->adapter->owner);
++ kfree(client);
++#ifdef DEBUG
++ printk(KERN_DEBUG "i2c-dev.o: Closed: i2c-%d\n", minor);
++#endif
+ return 0;
+ }
+@@ -472,9 +429,9 @@
+
+ if ((i = i2c_adapter_id(adap)) < 0) {
+- printk("i2c-dev.o: Unknown adapter ?!?\n");
++ printk(KERN_DEBUG "i2c-dev.o: Unknown adapter ?!?\n");
+ return -ENODEV;
+ }
+ if (i >= I2CDEV_ADAPS_MAX) {
+- printk("i2c-dev.o: Adapter number too large?!? (%d)\n",i);
++ printk(KERN_DEBUG "i2c-dev.o: Adapter number too large?!? (%d)\n",i);
+ return -ENODEV;
+ }
+@@ -487,7 +444,7 @@
+ DEVFS_FL_DEFAULT, I2C_MAJOR, i,
+ S_IFCHR | S_IRUSR | S_IWUSR,
+- &i2cdev_fops, NULL);
++ &i2cdev_fops, adap);
+ #endif
+- printk("i2c-dev.o: Registered '%s' as minor %d\n",adap->name,i);
++ printk(KERN_DEBUG "i2c-dev.o: Registered '%s' as minor %d\n",adap->name,i);
+ } else {
+ /* This is actually a detach_adapter call! */
+@@ -497,5 +454,5 @@
+ i2cdev_adaps[i] = NULL;
+ #ifdef DEBUG
+- printk("i2c-dev.o: Adapter unregistered: %s\n",adap->name);
++ printk(KERN_DEBUG "i2c-dev.o: Adapter unregistered: %s\n",adap->name);
+ #endif
+ }
+@@ -519,7 +476,6 @@
+ int res;
+
+- printk("i2c-dev.o: i2c /dev entries driver module\n");
++ printk(KERN_INFO "i2c-dev.o: i2c /dev entries driver module version %s (%s)\n", I2C_VERSION, I2C_DATE);
+
+- i2cdev_initialized = 0;
+ #ifdef CONFIG_DEVFS_FS
+ if (devfs_register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops)) {
+@@ -527,5 +483,5 @@
+ if (register_chrdev(I2C_MAJOR,"i2c",&i2cdev_fops)) {
+ #endif
+- printk("i2c-dev.o: unable to get major %d for i2c bus\n",
++ printk(KERN_ERR "i2c-dev.o: unable to get major %d for i2c bus\n",
+ I2C_MAJOR);
+ return -EIO;
+@@ -534,62 +490,28 @@
+ devfs_handle = devfs_mk_dir(NULL, "i2c", NULL);
+ #endif
+- i2cdev_initialized ++;
+-
+ if ((res = i2c_add_driver(&i2cdev_driver))) {
+- printk("i2c-dev.o: Driver registration failed, module not inserted.\n");
+- i2cdev_cleanup();
++ printk(KERN_ERR "i2c-dev.o: Driver registration failed, module not inserted.\n");
++#ifdef CONFIG_DEVFS_FS
++ devfs_unregister(devfs_handle);
++#endif
++ unregister_chrdev(I2C_MAJOR,"i2c");
+ return res;
+ }
+- i2cdev_initialized ++;
+ return 0;
+ }
+
+-int i2cdev_cleanup(void)
++static void __exit i2c_dev_exit(void)
+ {
+- int res;
+-
+- if (i2cdev_initialized >= 2) {
+- if ((res = i2c_del_driver(&i2cdev_driver))) {
+- printk("i2c-dev.o: Driver deregistration failed, "
+- "module not removed.\n");
+- return res;
+- }
+- i2cdev_initialized --;
+- }
+-
+- if (i2cdev_initialized >= 1) {
++ i2c_del_driver(&i2cdev_driver);
+ #ifdef CONFIG_DEVFS_FS
+- devfs_unregister(devfs_handle);
+- if ((res = devfs_unregister_chrdev(I2C_MAJOR, "i2c"))) {
+-#else
+- if ((res = unregister_chrdev(I2C_MAJOR,"i2c"))) {
++ devfs_unregister(devfs_handle);
+ #endif
+- printk("i2c-dev.o: unable to release major %d for i2c bus\n",
+- I2C_MAJOR);
+- return res;
+- }
+- i2cdev_initialized --;
+- }
+- return 0;
++ unregister_chrdev(I2C_MAJOR,"i2c");
+ }
+
+-EXPORT_NO_SYMBOLS;
+-
+-#ifdef MODULE
+-
+ MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and Simon G. Vogl <simon@tk.uni-linz.ac.at>");
+ MODULE_DESCRIPTION("I2C /dev entries driver");
+ MODULE_LICENSE("GPL");
+
+-int init_module(void)
+-{
+- return i2c_dev_init();
+-}
+-
+-int cleanup_module(void)
+-{
+- return i2cdev_cleanup();
+-}
+-
+-#endif /* def MODULE */
+-
++module_init(i2c_dev_init);
++module_exit(i2c_dev_exit);
+--- linux-old/include/linux/i2c-dev.h Sun Aug 31 14:51:55 CEST 2003
++++ linux/include/linux/i2c-dev.h Sun Aug 31 14:51:55 CEST 2003
+@@ -20,12 +20,14 @@
+ */
+
+-/* $Id$ */
+-
+-#ifndef I2C_DEV_H
+-#define I2C_DEV_H
++/* $Id$ */
+
++#ifndef _LINUX_I2C_DEV_H
++#define _LINUX_I2C_DEV_H
+
+ #include <linux/types.h>
+-#include <linux/i2c.h>
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)
++#define minor(d) MINOR(d)
++#endif
+
+ /* Some IOCTL commands are defined in <linux/i2c.h> */
+@@ -46,136 +48,3 @@
+ };
+
+-#ifndef __KERNEL__
+-
+-#include <sys/ioctl.h>
+-
+-static inline __s32 i2c_smbus_access(int file, char read_write, __u8 command,
+- int size, union i2c_smbus_data *data)
+-{
+- struct i2c_smbus_ioctl_data args;
+-
+- args.read_write = read_write;
+- args.command = command;
+- args.size = size;
+- args.data = data;
+- return ioctl(file,I2C_SMBUS,&args);
+-}
+-
+-
+-static inline __s32 i2c_smbus_write_quick(int file, __u8 value)
+-{
+- return i2c_smbus_access(file,value,0,I2C_SMBUS_QUICK,NULL);
+-}
+-
+-static inline __s32 i2c_smbus_read_byte(int file)
+-{
+- union i2c_smbus_data data;
+- if (i2c_smbus_access(file,I2C_SMBUS_READ,0,I2C_SMBUS_BYTE,&data))
+- return -1;
+- else
+- return 0x0FF & data.byte;
+-}
+-
+-static inline __s32 i2c_smbus_write_byte(int file, __u8 value)
+-{
+- return i2c_smbus_access(file,I2C_SMBUS_WRITE,value,
+- I2C_SMBUS_BYTE,NULL);
+-}
+-
+-static inline __s32 i2c_smbus_read_byte_data(int file, __u8 command)
+-{
+- union i2c_smbus_data data;
+- if (i2c_smbus_access(file,I2C_SMBUS_READ,command,
+- I2C_SMBUS_BYTE_DATA,&data))
+- return -1;
+- else
+- return 0x0FF & data.byte;
+-}
+-
+-static inline __s32 i2c_smbus_write_byte_data(int file, __u8 command,
+- __u8 value)
+-{
+- union i2c_smbus_data data;
+- data.byte = value;
+- return i2c_smbus_access(file,I2C_SMBUS_WRITE,command,
+- I2C_SMBUS_BYTE_DATA, &data);
+-}
+-
+-static inline __s32 i2c_smbus_read_word_data(int file, __u8 command)
+-{
+- union i2c_smbus_data data;
+- if (i2c_smbus_access(file,I2C_SMBUS_READ,command,
+- I2C_SMBUS_WORD_DATA,&data))
+- return -1;
+- else
+- return 0x0FFFF & data.word;
+-}
+-
+-static inline __s32 i2c_smbus_write_word_data(int file, __u8 command,
+- __u16 value)
+-{
+- union i2c_smbus_data data;
+- data.word = value;
+- return i2c_smbus_access(file,I2C_SMBUS_WRITE,command,
+- I2C_SMBUS_WORD_DATA, &data);
+-}
+-
+-static inline __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value)
+-{
+- union i2c_smbus_data data;
+- data.word = value;
+- if (i2c_smbus_access(file,I2C_SMBUS_WRITE,command,
+- I2C_SMBUS_PROC_CALL,&data))
+- return -1;
+- else
+- return 0x0FFFF & data.word;
+-}
+-
+-
+-/* Returns the number of read bytes */
+-static inline __s32 i2c_smbus_read_block_data(int file, __u8 command,
+- __u8 *values)
+-{
+- union i2c_smbus_data data;
+- int i;
+- if (i2c_smbus_access(file,I2C_SMBUS_READ,command,
+- I2C_SMBUS_BLOCK_DATA,&data))
+- return -1;
+- else {
+- for (i = 1; i <= data.block[0]; i++)
+- values[i-1] = data.block[i];
+- return data.block[0];
+- }
+-}
+-
+-static inline __s32 i2c_smbus_write_block_data(int file, __u8 command,
+- __u8 length, __u8 *values)
+-{
+- union i2c_smbus_data data;
+- int i;
+- if (length > 32)
+- length = 32;
+- for (i = 1; i <= length; i++)
+- data.block[i] = values[i-1];
+- data.block[0] = length;
+- return i2c_smbus_access(file,I2C_SMBUS_WRITE,command,
+- I2C_SMBUS_BLOCK_DATA, &data);
+-}
+-
+-static inline __s32 i2c_smbus_write_i2c_block_data(int file, __u8 command,
+- __u8 length, __u8 *values)
+-{
+- union i2c_smbus_data data;
+- int i;
+- if (length > 32)
+- length = 32;
+- for (i = 1; i <= length; i++)
+- data.block[i] = values[i-1];
+- data.block[0] = length;
+- return i2c_smbus_access(file,I2C_SMBUS_WRITE,command,
+- I2C_SMBUS_I2C_BLOCK_DATA, &data);
+-}
+-
+-#endif /* ndef __KERNEL__ */
+-
+-#endif
++#endif /* _LINUX_I2C_DEV_H */
+--- linux-old/drivers/i2c/i2c-elektor.c Sun Aug 31 14:51:55 CEST 2003
++++ linux/drivers/i2c/i2c-elektor.c Sun Aug 31 14:51:55 CEST 2003
+@@ -31,23 +31,21 @@
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+-#include <linux/version.h>
+ #include <linux/init.h>
++#include <linux/interrupt.h>
+ #include <linux/pci.h>
+-#include <asm/irq.h>
+-#include <asm/io.h>
+-
++#include <linux/wait.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-pcf.h>
+-#include <linux/i2c-elektor.h>
+-#include "i2c-pcf8584.h"
++#include <asm/io.h>
++#include <asm/irq.h>
+
+ #define DEFAULT_BASE 0x330
+
+-static int base = 0;
+-static int irq = 0;
++static int base;
++static int irq;
+ static int clock = 0x1c;
+ static int own = 0x55;
+-static int mmapped = 0;
+-static int i2c_debug = 0;
++static int mmapped;
++static int i2c_debug;
+
+ /* vdovikin: removed static struct i2c_pcf_isa gpi; code -
+@@ -56,10 +54,7 @@
+ need to be rewriten - but for now just remove this for simpler reading */
+
+-#if (LINUX_VERSION_CODE < 0x020301)
+-static struct wait_queue *pcf_wait = NULL;
+-#else
+ static wait_queue_head_t pcf_wait;
+-#endif
+ static int pcf_pending;
++static spinlock_t irq_driver_lock = SPIN_LOCK_UNLOCKED;
+
+ /* ----- global defines ----------------------------------------------- */
+@@ -69,4 +64,14 @@
+ #define DEBE(x) x /* error messages */
+
++
++/* compatibility */
++#ifndef isa_readb
++#define isa_readb readb
++#endif
++
++#ifndef isa_writeb
++#define isa_writeb writeb
++#endif
++
+ /* ----- local functions ---------------------------------------------- */
+
+@@ -75,9 +80,10 @@
+ int address = ctl ? (base + 1) : base;
+
+- if (ctl && irq) {
++ /* enable irq if any specified for serial operation */
++ if (ctl && irq && (val & I2C_PCF_ESO)) {
+ val |= I2C_PCF_ENI;
+ }
+
+- DEB3(printk("i2c-elektor.o: Write 0x%X 0x%02X\n", address, val & 255));
++ DEB3(printk(KERN_DEBUG "i2c-elektor.o: Write 0x%X 0x%02X\n", address, val & 255));
+
+ switch (mmapped) {
+@@ -87,8 +93,8 @@
+ case 2: /* double mapped I/O needed for UP2000 board,
+ I don't know why this... */
+- writeb(val, address);
++ isa_writeb(val, address);
+ /* fall */
+ case 1: /* memory mapped I/O */
+- writeb(val, address);
++ isa_writeb(val, address);
+ break;
+ }
+@@ -98,7 +104,7 @@
+ {
+ int address = ctl ? (base + 1) : base;
+- int val = mmapped ? readb(address) : inb(address);
++ int val = mmapped ? isa_readb(address) : inb(address);
+
+- DEB3(printk("i2c-elektor.o: Read 0x%X 0x%02X\n", address, val));
++ DEB3(printk(KERN_DEBUG "i2c-elektor.o: Read 0x%X 0x%02X\n", address, val));
+
+ return (val);
+@@ -121,10 +127,10 @@
+
+ if (irq > 0) {
+- cli();
++ spin_lock_irq(&irq_driver_lock);
+ if (pcf_pending == 0) {
+ interruptible_sleep_on_timeout(&pcf_wait, timeout*HZ );
+ } else
+ pcf_pending = 0;
+- sti();
++ spin_unlock_irq(&irq_driver_lock);
+ } else {
+ udelay(100);
+@@ -142,14 +148,14 @@
+ {
+ if (!mmapped) {
+- if (check_region(base, 2) < 0 ) {
+- printk("i2c-elektor.o: requested I/O region (0x%X:2) is in use.\n", base);
++ if (!request_region(base, 2, "i2c (isa bus adapter)")) {
++ printk(KERN_ERR
++ "i2c-elektor.o: requested I/O region (0x%X:2) "
++ "is in use.\n", base);
+ return -ENODEV;
+- } else {
+- request_region(base, 2, "i2c (isa bus adapter)");
+ }
+ }
+ if (irq > 0) {
+ if (request_irq(irq, pcf_isa_handler, 0, "PCF8584", 0) < 0) {
+- printk("i2c-elektor.o: Request irq%d failed\n", irq);
++ printk(KERN_ERR "i2c-elektor.o: Request irq%d failed\n", irq);
+ irq = 0;
+ } else
+@@ -160,42 +166,4 @@
+
+
+-static void __exit pcf_isa_exit(void)
+-{
+- if (irq > 0) {
+- disable_irq(irq);
+- free_irq(irq, 0);
+- }
+- if (!mmapped) {
+- release_region(base , 2);
+- }
+-}
+-
+-
+-static int pcf_isa_reg(struct i2c_client *client)
+-{
+- return 0;
+-}
+-
+-
+-static int pcf_isa_unreg(struct i2c_client *client)
+-{
+- return 0;
+-}
+-
+-static void pcf_isa_inc_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+-}
+-
+-static void pcf_isa_dec_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+-}
+-
+-
+ /* ------------------------------------------------------------------------
+ * Encapsulate the above functions in the correct operations structure.
+@@ -203,25 +171,22 @@
+ */
+ static struct i2c_algo_pcf_data pcf_isa_data = {
+- NULL,
+- pcf_isa_setbyte,
+- pcf_isa_getbyte,
+- pcf_isa_getown,
+- pcf_isa_getclock,
+- pcf_isa_waitforpin,
+- 10, 10, 100, /* waits, timeout */
++ .setpcf = pcf_isa_setbyte,
++ .getpcf = pcf_isa_getbyte,
++ .getown = pcf_isa_getown,
++ .getclock = pcf_isa_getclock,
++ .waitforpin = pcf_isa_waitforpin,
++ .udelay = 10,
++ .mdelay = 10,
++ .timeout = HZ,
+ };
+
+ static struct i2c_adapter pcf_isa_ops = {
+- "PCF8584 ISA adapter",
+- I2C_HW_P_ELEK,
+- NULL,
+- &pcf_isa_data,
+- pcf_isa_inc_use,
+- pcf_isa_dec_use,
+- pcf_isa_reg,
+- pcf_isa_unreg,
++ .owner = THIS_MODULE,
++ .name = "PCF8584 ISA adapter",
++ .id = I2C_HW_P_ELEK,
++ .algo_data = &pcf_isa_data,
+ };
+
+-int __init i2c_pcfisa_init(void)
++static int __init i2c_pcfisa_init(void)
+ {
+ #ifdef __alpha__
+@@ -239,5 +204,5 @@
+ if (!pci_read_config_byte(cy693_dev, 0x47, &config)) {
+
+- DEB3(printk("i2c-elektor.o: found cy82c693, config register 0x47 = 0x%02x.\n", config));
++ DEB3(printk(KERN_DEBUG "i2c-elektor.o: found cy82c693, config register 0x47 = 0x%02x.\n", config));
+
+ /* UP2000 board has this register set to 0xe1,
+@@ -261,5 +226,5 @@
+ (this can be read from cypress) */
+ clock = I2C_PCF_CLK | I2C_PCF_TRNS90;
+- printk("i2c-elektor.o: found API UP2000 like board, will probe PCF8584 later.\n");
++ printk(KERN_INFO "i2c-elektor.o: found API UP2000 like board, will probe PCF8584 later.\n");
+ }
+ }
+@@ -270,9 +235,9 @@
+ /* sanity checks for mmapped I/O */
+ if (mmapped && base < 0xc8000) {
+- printk("i2c-elektor.o: incorrect base address (0x%0X) specified for mmapped I/O.\n", base);
++ printk(KERN_ERR "i2c-elektor.o: incorrect base address (0x%0X) specified for mmapped I/O.\n", base);
+ return -ENODEV;
+ }
+
+- printk("i2c-elektor.o: i2c pcf8584-isa adapter module\n");
++ printk(KERN_INFO "i2c-elektor.o: i2c pcf8584-isa adapter module version %s (%s)\n", I2C_VERSION, I2C_DATE);
+
+ if (base == 0) {
+@@ -280,23 +245,39 @@
+ }
+
+-#if (LINUX_VERSION_CODE >= 0x020301)
+ init_waitqueue_head(&pcf_wait);
+-#endif
+- if (pcf_isa_init() == 0) {
+- if (i2c_pcf_add_bus(&pcf_isa_ops) < 0)
+- return -ENODEV;
+- } else {
++ if (pcf_isa_init())
+ return -ENODEV;
+- }
++ if (i2c_pcf_add_bus(&pcf_isa_ops) < 0)
++ goto fail;
+
+- printk("i2c-elektor.o: found device at %#x.\n", base);
++ printk(KERN_ERR "i2c-elektor.o: found device at %#x.\n", base);
+
+ return 0;
++
++ fail:
++ if (irq > 0) {
++ disable_irq(irq);
++ free_irq(irq, 0);
++ }
++
++ if (!mmapped)
++ release_region(base , 2);
++ return -ENODEV;
+ }
+
+
+-EXPORT_NO_SYMBOLS;
++static void i2c_pcfisa_exit(void)
++{
++ i2c_pcf_del_bus(&pcf_isa_ops);
++
++ if (irq > 0) {
++ disable_irq(irq);
++ free_irq(irq, 0);
++ }
++
++ if (!mmapped)
++ release_region(base , 2);
++}
+
+-#ifdef MODULE
+ MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>");
+ MODULE_DESCRIPTION("I2C-Bus adapter routines for PCF8584 ISA bus adapter");
+@@ -310,14 +291,4 @@
+ MODULE_PARM(i2c_debug, "i");
+
+-int init_module(void)
+-{
+- return i2c_pcfisa_init();
+-}
+-
+-void cleanup_module(void)
+-{
+- i2c_pcf_del_bus(&pcf_isa_ops);
+- pcf_isa_exit();
+-}
+-
+-#endif
++module_init(i2c_pcfisa_init);
++module_exit(i2c_pcfisa_exit);
+--- linux-old/include/linux/i2c-elektor.h Sun Aug 31 14:51:55 CEST 2003
++++ linux/include/linux/i2c-elektor.h Sun Aug 31 14:51:55 CEST 2003
+@@ -1,47 +0,0 @@
+-/* ------------------------------------------------------------------------- */
+-/* i2c-elektor.c i2c-hw access for PCF8584 style isa bus adaptes */
+-/* ------------------------------------------------------------------------- */
+-/* Copyright (C) 1995-97 Simon G. Vogl
+- 1998-99 Hans Berglund
+-
+- This program is free software; you can redistribute it and/or modify
+- it under the terms of the GNU General Public License as published by
+- the Free Software Foundation; either version 2 of the License, or
+- (at your option) any later version.
+-
+- This program is distributed in the hope that it will be useful,
+- but WITHOUT ANY WARRANTY; without even the implied warranty of
+- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- GNU General Public License for more details.
+-
+- You should have received a copy of the GNU General Public License
+- along with this program; if not, write to the Free Software
+- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+-/* ------------------------------------------------------------------------- */
+-
+-/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
+- Frodo Looijaard <frodol@dds.nl> */
+-
+-/* $Id$ */
+-
+-#ifndef I2C_PCF_ELEKTOR_H
+-#define I2C_PCF_ELEKTOR_H 1
+-
+-/*
+- * This struct contains the hw-dependent functions of PCF8584 adapters to
+- * manipulate the registers, and to init any hw-specific features.
+- * vdovikin: removed: this module in real supports only one device,
+- * due to missing arguments in some functions, called from the algo-pcf module.
+- * Sometimes it's need to be rewriten -
+- * but for now just remove this for simpler reading */
+-
+-/*
+-struct i2c_pcf_isa {
+- int pi_base;
+- int pi_irq;
+- int pi_clock;
+- int pi_own;
+-};
+-*/
+-
+-#endif /* PCF_ELEKTOR_H */
+--- linux-old/drivers/i2c/i2c-elv.c Sun Aug 31 14:51:55 CEST 2003
++++ linux/drivers/i2c/i2c-elv.c Sun Aug 31 14:51:55 CEST 2003
+@@ -22,5 +22,5 @@
+ Frodo Looijaard <frodol@dds.nl> */
+
+-/* $Id$ */
++/* $Id$ */
+
+ #include <linux/kernel.h>
+@@ -28,14 +28,10 @@
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+-#include <linux/version.h>
+ #include <linux/init.h>
+-
+-#include <asm/uaccess.h>
+-
+ #include <linux/ioport.h>
+-#include <asm/io.h>
+ #include <linux/errno.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
++#include <asm/io.h>
+
+ #define DEFAULT_BASE 0x378
+@@ -91,56 +87,29 @@
+ static int bit_elv_init(void)
+ {
+- if (check_region(base,(base == 0x3bc)? 3 : 8) < 0 ) {
+- return -ENODEV;
+- } else {
+- /* test for ELV adap. */
+- if (inb(base+1) & 0x80) { /* BUSY should be high */
+- DEBINIT(printk("i2c-elv.o: Busy was low.\n"));
+- return -ENODEV;
+- } else {
+- outb(0x0c,base+2); /* SLCT auf low */
+- udelay(400);
+- if ( !(inb(base+1) && 0x10) ) {
+- outb(0x04,base+2);
+- DEBINIT(printk("i2c-elv.o: Select was high.\n"));
+- return -ENODEV;
+- }
+- }
+- request_region(base,(base == 0x3bc)? 3 : 8,
+- "i2c (ELV adapter)");
+- PortData = 0;
+- bit_elv_setsda((void*)base,1);
+- bit_elv_setscl((void*)base,1);
++ if (!request_region(base, (base == 0x3bc) ? 3 : 8,
++ "i2c (ELV adapter)"))
++ return -ENODEV;
++
++ if (inb(base+1) & 0x80) { /* BUSY should be high */
++ DEBINIT(printk(KERN_DEBUG "i2c-elv.o: Busy was low.\n"));
++ goto fail;
++ }
++
++ outb(0x0c,base+2); /* SLCT auf low */
++ udelay(400);
++ if (!(inb(base+1) && 0x10)) {
++ outb(0x04,base+2);
++ DEBINIT(printk(KERN_DEBUG "i2c-elv.o: Select was high.\n"));
++ goto fail;
+ }
+- return 0;
+-}
+-
+-static void __exit bit_elv_exit(void)
+-{
+- release_region( base , (base == 0x3bc)? 3 : 8 );
+-}
+
+-static int bit_elv_reg(struct i2c_client *client)
+-{
++ PortData = 0;
++ bit_elv_setsda((void*)base,1);
++ bit_elv_setscl((void*)base,1);
+ return 0;
+-}
+
+-static int bit_elv_unreg(struct i2c_client *client)
+-{
+- return 0;
+-}
+-
+-static void bit_elv_inc_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+-}
+-
+-static void bit_elv_dec_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
++fail:
++ release_region(base , (base == 0x3bc) ? 3 : 8);
++ return -ENODEV;
+ }
+
+@@ -150,26 +119,23 @@
+ */
+ static struct i2c_algo_bit_data bit_elv_data = {
+- NULL,
+- bit_elv_setsda,
+- bit_elv_setscl,
+- bit_elv_getsda,
+- bit_elv_getscl,
+- 80, 80, 100, /* waits, timeout */
++ .setsda = bit_elv_setsda,
++ .setscl = bit_elv_setscl,
++ .getsda = bit_elv_getsda,
++ .getscl = bit_elv_getscl,
++ .udelay = 80,
++ .mdelay = 80,
++ .timeout = HZ
+ };
+
+ static struct i2c_adapter bit_elv_ops = {
+- "ELV Parallel port adaptor",
+- I2C_HW_B_ELV,
+- NULL,
+- &bit_elv_data,
+- bit_elv_inc_use,
+- bit_elv_dec_use,
+- bit_elv_reg,
+- bit_elv_unreg,
++ .owner = THIS_MODULE,
++ .name = "ELV Parallel port adaptor",
++ .id = I2C_HW_B_ELV,
++ .algo_data = &bit_elv_data,
+ };
+
+-int __init i2c_bitelv_init(void)
++static int __init i2c_bitelv_init(void)
+ {
+- printk("i2c-elv.o: i2c ELV parallel port adapter module\n");
++ printk(KERN_INFO "i2c-elv.o: i2c ELV parallel port adapter module version %s (%s)\n", I2C_VERSION, I2C_DATE);
+ if (base==0) {
+ /* probe some values */
+@@ -191,29 +157,22 @@
+ }
+ }
+- printk("i2c-elv.o: found device at %#x.\n",base);
++ printk(KERN_DEBUG "i2c-elv.o: found device at %#x.\n",base);
+ return 0;
+ }
+
++static void __exit i2c_bitelv_exit(void)
++{
++ i2c_bit_del_bus(&bit_elv_ops);
++ release_region(base , (base == 0x3bc) ? 3 : 8);
++}
+
+ EXPORT_NO_SYMBOLS;
+
+-#ifdef MODULE
+ MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
+ MODULE_DESCRIPTION("I2C-Bus adapter routines for ELV parallel port adapter");
+ MODULE_LICENSE("GPL");
+
+-
+ MODULE_PARM(base, "i");
+
+-int init_module(void)
+-{
+- return i2c_bitelv_init();
+-}
+-
+-void cleanup_module(void)
+-{
+- i2c_bit_del_bus(&bit_elv_ops);
+- bit_elv_exit();
+-}
+-
+-#endif
++module_init(i2c_bitelv_init);
++module_exit(i2c_bitelv_exit);
+--- linux-old/drivers/i2c/i2c-frodo.c Sun Aug 31 14:51:55 CEST 2003
++++ linux/drivers/i2c/i2c-frodo.c Sun Aug 31 14:51:55 CEST 2003
+@@ -0,0 +1,83 @@
++
++/*
++ * linux/drivers/i2c/i2c-frodo.c
++ *
++ * Author: Abraham van der Merwe <abraham@2d3d.co.za>
++ *
++ * An I2C adapter driver for the 2d3D, Inc. StrongARM SA-1110
++ * Development board (Frodo).
++ *
++ * This source code is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-bit.h>
++#include <asm/hardware.h>
++
++
++static void frodo_setsda (void *data,int state)
++{
++ if (state)
++ FRODO_CPLD_I2C |= FRODO_I2C_SDA_OUT;
++ else
++ FRODO_CPLD_I2C &= ~FRODO_I2C_SDA_OUT;
++}
++
++static void frodo_setscl (void *data,int state)
++{
++ if (state)
++ FRODO_CPLD_I2C |= FRODO_I2C_SCL_OUT;
++ else
++ FRODO_CPLD_I2C &= ~FRODO_I2C_SCL_OUT;
++}
++
++static int frodo_getsda (void *data)
++{
++ return ((FRODO_CPLD_I2C & FRODO_I2C_SDA_IN) != 0);
++}
++
++static int frodo_getscl (void *data)
++{
++ return ((FRODO_CPLD_I2C & FRODO_I2C_SCL_IN) != 0);
++}
++
++static struct i2c_algo_bit_data bit_frodo_data = {
++ .setsda = frodo_setsda,
++ .setscl = frodo_setscl,
++ .getsda = frodo_getsda,
++ .getscl = frodo_getscl,
++ .udelay = 80,
++ .mdelay = 80,
++ .timeout = HZ
++};
++
++static struct i2c_adapter frodo_ops = {
++ .owner = THIS_MODULE,
++ .name = "Frodo adapter driver",
++ .id = I2C_HW_B_FRODO,
++ .algo_data = &bit_frodo_data,
++};
++
++static int __init i2c_frodo_init (void)
++{
++ return i2c_bit_add_bus(&frodo_ops);
++}
++
++static void __exit i2c_frodo_exit (void)
++{
++ i2c_bit_del_bus(&frodo_ops);
++}
++
++MODULE_AUTHOR ("Abraham van der Merwe <abraham@2d3d.co.za>");
++MODULE_DESCRIPTION ("I2C-Bus adapter routines for Frodo");
++MODULE_LICENSE ("GPL");
++
++module_init (i2c_frodo_init);
++module_exit (i2c_frodo_exit);
++
+--- linux-old/include/linux/i2c-id.h Sun Aug 31 14:51:55 CEST 2003
++++ linux/include/linux/i2c-id.h Sun Aug 31 14:51:55 CEST 2003
+@@ -21,8 +21,9 @@
+ /* ------------------------------------------------------------------------- */
+
+-/* $Id$ */
++/* $Id$ */
++
++#ifndef LINUX_I2C_ID_H
++#define LINUX_I2C_ID_H
+
+-#ifndef I2C_ID_H
+-#define I2C_ID_H
+ /*
+ * This file is part of the i2c-bus package and contains the identifier
+@@ -91,5 +92,16 @@
+ #define I2C_DRIVERID_SP5055 44 /* Satellite tuner */
+ #define I2C_DRIVERID_STV0030 45 /* Multipurpose switch */
+-#define I2C_DRIVERID_ADV717X 48 /* video encoder */
++#define I2C_DRIVERID_SAA7108 46 /* video decoder, image scaler */
++#define I2C_DRIVERID_DS1307 47 /* DS1307 real time clock */
++#define I2C_DRIVERID_ADV717x 48 /* ADV 7175/7176 video encoder */
++#define I2C_DRIVERID_ZR36067 49 /* Zoran 36067 video encoder */
++#define I2C_DRIVERID_ZR36120 50 /* Zoran 36120 video encoder */
++#define I2C_DRIVERID_24LC32A 51 /* Microchip 24LC32A 32k EEPROM */
++#define I2C_DRIVERID_STM41T00 52 /* real time clock */
++#define I2C_DRIVERID_UDA1342 53 /* UDA1342 audio codec */
++#define I2C_DRIVERID_ADV7170 54 /* video encoder */
++#define I2C_DRIVERID_RADEON 55 /* I2C bus on Radeon boards */
++
++
+
+ #define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
+@@ -100,4 +112,6 @@
+ #define I2C_DRIVERID_I2CDEV 900
+ #define I2C_DRIVERID_I2CPROC 901
++#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
++#define I2C_DRIVERID_ALERT 903 /* SMBus Alert Responder Client */
+
+ /* IDs -- Use DRIVERIDs 1000-1999 for sensors.
+@@ -129,4 +143,22 @@
+ #define I2C_DRIVERID_IT87 1026
+ #define I2C_DRIVERID_CH700X 1027 /* single driver for CH7003-7009 digital pc to tv encoders */
++#define I2C_DRIVERID_FSCPOS 1028
++#define I2C_DRIVERID_FSCSCY 1029
++#define I2C_DRIVERID_PCF8591 1030
++#define I2C_DRIVERID_SMSC47M1 1031
++#define I2C_DRIVERID_VT1211 1032
++#define I2C_DRIVERID_LM92 1033
++#define I2C_DRIVERID_VT8231 1034
++#define I2C_DRIVERID_SMARTBATT 1035
++#define I2C_DRIVERID_BMCSENSORS 1036
++#define I2C_DRIVERID_FS451 1037
++#define I2C_DRIVERID_W83627HF 1038
++#define I2C_DRIVERID_LM85 1039
++#define I2C_DRIVERID_LM83 1040
++#define I2C_DRIVERID_SAA1064 1041
++#define I2C_DRIVERID_LM90 1042
++#define I2C_DRIVERID_ASB100 1043
++#define I2C_DRIVERID_MAX6650 1044
++#define I2C_DRIVERID_XEONTEMP 1045
+
+ /*
+@@ -145,8 +177,16 @@
+ #define I2C_ALGO_SAA7146 0x060000 /* SAA 7146 video decoder bus */
+ #define I2C_ALGO_ACB 0x070000 /* ACCESS.bus algorithm */
+-
++#define I2C_ALGO_IIC 0x080000 /* ITE IIC bus */
++#define I2C_ALGO_SAA7134 0x090000
++#define I2C_ALGO_MPC824X 0x0a0000 /* Motorola 8240 / 8245 */
++#define I2C_ALGO_IPMI 0x0b0000 /* IPMI dummy adapter */
++#define I2C_ALGO_IPMB 0x0c0000 /* IPMB adapter */
++#define I2C_ALGO_MPC107 0x0d0000
+ #define I2C_ALGO_EC 0x100000 /* ACPI embedded controller */
+
+ #define I2C_ALGO_MPC8XX 0x110000 /* MPC8xx PowerPC I2C algorithm */
++#define I2C_ALGO_OCP 0x120000 /* IBM or otherwise On-chip I2C algorithm */
++#define I2C_ALGO_BITHS 0x130000 /* enhanced bit style adapters */
++#define I2C_ALGO_OCP_IOP3XX 0x140000 /* XSCALE IOP3XX On-chip I2C alg */
+
+ #define I2C_ALGO_EXP 0x800000 /* experimental */
+@@ -176,7 +216,15 @@
+ #define I2C_HW_B_VOO 0x0b /* 3dfx Voodoo 3 / Banshee */
+ #define I2C_HW_B_PPORT 0x0c /* Primitive parallel port adapter */
++#define I2C_HW_B_SAVG 0x0d /* Savage 4 */
++#define I2C_HW_B_SCX200 0x0e /* Nat'l Semi SCx200 I2C */
+ #define I2C_HW_B_RIVA 0x10 /* Riva based graphics cards */
+ #define I2C_HW_B_IOC 0x11 /* IOC bit-wiggling */
+ #define I2C_HW_B_TSUNA 0x12 /* DEC Tsunami chipset */
++#define I2C_HW_B_FRODO 0x13 /* 2d3D, Inc. SA-1110 Development Board */
++#define I2C_HW_B_OMAHA 0x14 /* Omaha I2C interface (ARM) */
++#define I2C_HW_B_GUIDE 0x15 /* Guide bit-basher */
++#define I2C_HW_B_IXP2000 0x16 /* GPIO on IXP2000 systems */
++#define I2C_HW_B_IXP425 0x17 /* GPIO on IXP425 systems */
++#define I2C_HW_B_S3VIA 0x18 /* S3Via ProSavage adapter */
+
+ /* --- PCF 8584 based algorithms */
+@@ -188,7 +236,19 @@
+ #define I2C_HW_ACPI_EC 0x00
+
++/* --- MPC824x PowerPC adapters */
++#define I2C_HW_MPC824X 0x00 /* Motorola 8240 / 8245 */
++
+ /* --- MPC8xx PowerPC adapters */
+ #define I2C_HW_MPC8XX_EPON 0x00 /* Eponymous MPC8xx I2C adapter */
+
++/* --- ITE based algorithms */
++#define I2C_HW_I_IIC 0x00 /* controller on the ITE */
++
++/* --- PowerPC on-chip adapters */
++#define I2C_HW_OCP 0x00 /* IBM on-chip I2C adapter */
++
++/* --- XSCALE on-chip adapters */
++#define I2C_HW_IOP321 0x00
++
+ /* --- SMBus only adapters */
+ #define I2C_HW_SMBUS_PIIX4 0x00
+@@ -200,7 +260,21 @@
+ #define I2C_HW_SMBUS_SIS5595 0x06
+ #define I2C_HW_SMBUS_ALI1535 0x07
++#define I2C_HW_SMBUS_SIS630 0x08
++#define I2C_HW_SMBUS_SIS645 0x09
++#define I2C_HW_SMBUS_AMD8111 0x0a
++#define I2C_HW_SMBUS_SCX200 0x0b
++#define I2C_HW_SMBUS_NFORCE2 0x0c
+
+ /* --- ISA pseudo-adapter */
+ #define I2C_HW_ISA 0x00
+
+-#endif /* I2C_ID_H */
++/* --- IPMI pseudo-adapter */
++#define I2C_HW_IPMI 0x00
++
++/* --- IPMB adapter */
++#define I2C_HW_IPMB 0x00
++
++/* --- MCP107 adapter */
++#define I2C_HW_MPC107 0x00
++
++#endif /* LINUX_I2C_ID_H */
+--- linux-old/drivers/i2c/i2c-max1617.c Sun Aug 31 14:51:56 CEST 2003
++++ linux/drivers/i2c/i2c-max1617.c Sun Aug 31 14:51:56 CEST 2003
+@@ -1,225 +0,0 @@
+-/*
+- * Copyright (C) 2001,2002,2003 Broadcom Corporation
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License
+- * as published by the Free Software Foundation; either version 2
+- * of the License, or (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- */
+-
+-/*
+- * SMBus/I2C device driver for the MAX1617 temperature sensor
+- */
+-
+-#include <linux/config.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/timer.h>
+-#include <linux/errno.h>
+-
+-#include <linux/i2c.h>
+-#include <linux/i2c-algo-sibyte.h>
+-
+-#define IF_NAME "max1617"
+-
+-#define MAX1617_SMBUS_DEV 0x2A
+-#define MAX1617_LOCAL 0
+-#define MAX1617_REMOTE 1
+-#define MAX1617_STATUS 2
+-#define MAX1617_POLL_PERIOD 10
+-
+-static int max1617_verbose = 0;
+-static int max1617_polling = 1;
+-
+-/* Addresses to scan */
+-static unsigned short normal_i2c[] = {MAX1617_SMBUS_DEV, I2C_CLIENT_END};
+-static unsigned short normal_i2c_range[] = {I2C_CLIENT_END};
+-static unsigned short probe[2] = { I2C_CLIENT_END, I2C_CLIENT_END };
+-static unsigned short probe_range[2] = { I2C_CLIENT_END, I2C_CLIENT_END };
+-static unsigned short ignore[2] = { I2C_CLIENT_END, I2C_CLIENT_END };
+-static unsigned short ignore_range[2] = { I2C_CLIENT_END, I2C_CLIENT_END };
+-static unsigned short force[2] = { I2C_CLIENT_END, I2C_CLIENT_END };
+-
+-static struct i2c_client_address_data addr_data = {
+- normal_i2c, normal_i2c_range,
+- probe, probe_range,
+- ignore, ignore_range,
+- force
+-};
+-
+-struct max1617_info {
+- struct i2c_client *client;
+- struct timer_list timer;
+- int local;
+- int remote;
+-};
+-
+-static int max1617_probe(struct i2c_adapter *adap);
+-static int max1617_detach(struct i2c_client *device);
+-static int max1617_command(struct i2c_client *device, unsigned int cmd, void *arg);
+-static void max1617_inc_use(struct i2c_client *device);
+-static void max1617_dec_use(struct i2c_client *device);
+-
+-struct i2c_driver i2c_driver_max1617 = {
+- name: IF_NAME,
+- id: I2C_DRIVERID_MAX1617,
+- flags: I2C_DF_NOTIFY,
+- attach_adapter: max1617_probe,
+- detach_client: max1617_detach,
+- command: max1617_command,
+- inc_use: max1617_inc_use,
+- dec_use: max1617_dec_use
+-};
+-\
+-static int max1617_read(struct i2c_client *client, unsigned char subaddr)
+-{
+- return i2c_smbus_read_byte_data(client, subaddr);
+-}
+-
+-/* poll the device, check for temperature/status changes */
+-static void max1617_update(unsigned long arg)
+-{
+- struct max1617_info *m = (struct max1617_info *)arg;
+- int status, remote, local;
+- char statstr[50];
+-
+- status = max1617_read(m->client, MAX1617_STATUS);
+- remote = max1617_read(m->client, MAX1617_REMOTE);
+- local = max1617_read(m->client, MAX1617_LOCAL);
+- if (status < 0 || remote < 0 || local < 0) {
+- printk(KERN_ERR IF_NAME ": sensor device did not respond.\n");
+- } else {
+- statstr[0] = 0;
+- if (status & 0x80) strcat(statstr,"Busy ");
+- if (status & 0x40) strcat(statstr,"HiTempLcl ");
+- if (status & 0x20) strcat(statstr,"LoTempLcl ");
+- if (status & 0x10) strcat(statstr,"HiTempRem ");
+- if (status & 0x08) strcat(statstr,"LoTempRem ");
+- if (status & 0x04) strcat(statstr,"Fault ");
+-
+- if (max1617_verbose || (local != m->local) || (remote != m->remote)) {
+- printk(KERN_DEBUG IF_NAME ": Temperature - CPU: %dC Board: %dC Status:%02X [ %s]\n",
+- remote, local, status, statstr);
+- }
+- m->local = local;
+- m->remote = remote;
+- mod_timer(&m->timer, jiffies + (HZ * MAX1617_POLL_PERIOD));
+- }
+-}
+-
+-/* attach to an instance of the device that was probed on a bus */
+-static int max1617_attach(struct i2c_adapter *adap, int addr, unsigned short flags, int kind)
+-{
+- struct max1617_info *m;
+- struct i2c_client *client;
+- int err;
+-
+- client = kmalloc(sizeof(*client), GFP_KERNEL);
+- if (client == NULL)
+- return -ENOMEM;
+- client->adapter = adap;
+- client->addr = addr;
+- client->driver = &i2c_driver_max1617;
+- sprintf(client->name, "%s-%x", IF_NAME, addr);
+- if ((err = i2c_attach_client(client)) < 0) {
+- kfree(client);
+- return err;
+- }
+-
+- m = kmalloc(sizeof(*m), GFP_KERNEL);
+- if (m == NULL) {
+- i2c_detach_client(client);
+- kfree(client);
+- return -ENOMEM;
+- }
+- m->client = client;
+- m->remote = m->local = 0;
+- init_timer(&m->timer);
+- m->timer.data = (unsigned long)m;
+- m->timer.function = max1617_update;
+- if (max1617_polling) {
+- m->timer.expires = jiffies + (HZ * MAX1617_POLL_PERIOD);
+- add_timer(&m->timer);
+- }
+- client->data = m;
+- return 0;
+-}
+-
+-/* initiate probing on a particular bus */
+-static int max1617_probe(struct i2c_adapter *adap)
+-{
+- /* Look for this device on the given adapter (bus) */
+- if (adap->id == (I2C_ALGO_SIBYTE | I2C_HW_SIBYTE))
+- return i2c_probe(adap, &addr_data, &max1617_attach);
+- else
+- return 0;
+-}
+-
+-static int max1617_detach(struct i2c_client *device)
+-{
+- struct max1617_info *m = (struct max1617_info *)device->data;
+- int rc = 0;
+-
+- if ((rc = i2c_detach_client(device)) != 0) {
+- printk(IF_NAME "detach failed: %d\n", rc);
+- } else {
+- kfree(device);
+- if (max1617_polling)
+- del_timer(&m->timer);
+- kfree(m);
+- }
+- return rc;
+-}
+-
+-static int max1617_command(struct i2c_client *device, unsigned int cmd, void *arg)
+-{
+- return 0;
+-}
+-
+-static void max1617_inc_use(struct i2c_client *client)
+-{
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+-}
+-
+-static void max1617_dec_use(struct i2c_client *client)
+-{
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+-}
+-
+-void i2c_max1617_init(void)
+-{
+- i2c_add_driver(&i2c_driver_max1617);
+-}
+-
+-EXPORT_NO_SYMBOLS;
+-
+-#ifdef MODULE
+-MODULE_AUTHOR("Kip Walker, Broadcom Corp.");
+-MODULE_DESCRIPTION("Max 1617 temperature sensor for SiByte SOC boards");
+-MODULE_LICENSE("GPL");
+-
+-int init_module(void)
+-{
+- i2c_max1617_init();
+- return 0;
+-}
+-
+-void cleanup_module(void)
+-{
+- i2c_del_driver(&i2c_driver_max1617);
+-}
+-#endif
+--- linux-old/drivers/i2c/i2c-pcf-epp.c Sun Aug 31 14:51:56 CEST 2003
++++ linux/drivers/i2c/i2c-pcf-epp.c Sun Aug 31 14:51:56 CEST 2003
+@@ -0,0 +1,281 @@
++/* ------------------------------------------------------------------------- */
++/* i2c-pcf-epp.c i2c-hw access for PCF8584 style EPP parallel port adapters */
++/* ------------------------------------------------------------------------- */
++/* Copyright (C) 1998-99 Hans Berglund
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
++/* ------------------------------------------------------------------------- */
++
++/* With some changes from Ryosuke Tajima <rosk@jsk.t.u-tokyo.ac.jp> */
++
++#include <linux/kernel.h>
++#include <linux/ioport.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/parport.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-pcf.h>
++#include <asm/irq.h>
++#include <asm/io.h>
++
++
++struct i2c_pcf_epp {
++ int pe_base;
++ int pe_irq;
++ int pe_clock;
++ int pe_own;
++} ;
++
++#define DEFAULT_BASE 0x378
++#define DEFAULT_IRQ 7
++#define DEFAULT_CLOCK 0x1c
++#define DEFAULT_OWN 0x55
++
++static int base = 0;
++static int irq = 0;
++static int clock = 0;
++static int own = 0;
++static int i2c_debug=0;
++static struct i2c_pcf_epp gpe;
++static wait_queue_head_t pcf_wait;
++static int pcf_pending;
++static spinlock_t irq_driver_lock = SPIN_LOCK_UNLOCKED;
++
++/* ----- global defines ----------------------------------------------- */
++#define DEB(x) if (i2c_debug>=1) x
++#define DEB2(x) if (i2c_debug>=2) x
++#define DEB3(x) if (i2c_debug>=3) x
++#define DEBE(x) x /* error messages */
++
++/* --- Convenience defines for the EPP/SPP port: */
++#define BASE ((struct i2c_pcf_epp *)(data))->pe_base
++// #define DATA BASE /* SPP data port */
++#define STAT (BASE+1) /* SPP status port */
++#define CTRL (BASE+2) /* SPP control port */
++#define EADD (BASE+3) /* EPP address port */
++#define EDAT (BASE+4) /* EPP data port */
++
++/* ----- local functions ---------------------------------------------- */
++
++static void pcf_epp_setbyte(void *data, int ctl, int val)
++{
++ if (ctl) {
++ if (gpe.pe_irq > 0) {
++ DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: Write control 0x%x\n",
++ val|I2C_PCF_ENI));
++ // set A0 pin HIGH
++ outb(inb(CTRL) | PARPORT_CONTROL_INIT, CTRL);
++ // DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: CTRL port = 0x%x\n", inb(CTRL)));
++ // DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: STAT port = 0x%x\n", inb(STAT)));
++
++ // EPP write data cycle
++ outb(val | I2C_PCF_ENI, EDAT);
++ } else {
++ DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: Write control 0x%x\n", val));
++ // set A0 pin HIGH
++ outb(inb(CTRL) | PARPORT_CONTROL_INIT, CTRL);
++ outb(val, CTRL);
++ }
++ } else {
++ DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: Write data 0x%x\n", val));
++ // set A0 pin LO
++ outb(inb(CTRL) & ~PARPORT_CONTROL_INIT, CTRL);
++ // DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: CTRL port = 0x%x\n", inb(CTRL)));
++ // DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: STAT port = 0x%x\n", inb(STAT)));
++ outb(val, EDAT);
++ }
++}
++
++static int pcf_epp_getbyte(void *data, int ctl)
++{
++ int val;
++
++ if (ctl) {
++ // set A0 pin HIGH
++ outb(inb(CTRL) | PARPORT_CONTROL_INIT, CTRL);
++ val = inb(EDAT);
++ DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: Read control 0x%x\n", val));
++ } else {
++ // set A0 pin LOW
++ outb(inb(CTRL) & ~PARPORT_CONTROL_INIT, CTRL);
++ val = inb(EDAT);
++ DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: Read data 0x%x\n", val));
++ }
++ return (val);
++}
++
++static int pcf_epp_getown(void *data)
++{
++ return (gpe.pe_own);
++}
++
++
++static int pcf_epp_getclock(void *data)
++{
++ return (gpe.pe_clock);
++}
++
++#if 0
++static void pcf_epp_sleep(unsigned long timeout)
++{
++ schedule_timeout( timeout * HZ);
++}
++#endif
++
++static void pcf_epp_waitforpin(void) {
++ int timeout = 10;
++
++ if (gpe.pe_irq > 0) {
++ spin_lock_irq(&irq_driver_lock);
++ if (pcf_pending == 0) {
++ interruptible_sleep_on_timeout(&pcf_wait, timeout*HZ);
++ //udelay(100);
++ } else {
++ pcf_pending = 0;
++ }
++ spin_unlock_irq(&irq_driver_lock);
++ } else {
++ udelay(100);
++ }
++}
++
++static void pcf_epp_handler(int this_irq, void *dev_id, struct pt_regs *regs) {
++ pcf_pending = 1;
++ wake_up_interruptible(&pcf_wait);
++ DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: in interrupt handler.\n"));
++}
++
++
++static int pcf_epp_init(void *data)
++{
++ if (check_region(gpe.pe_base, 5) < 0 ) {
++
++ printk(KERN_WARNING "Could not request port region with base 0x%x\n", gpe.pe_base);
++ return -ENODEV;
++ } else {
++ request_region(gpe.pe_base, 5, "i2c (EPP parallel port adapter)");
++ }
++
++ DEB3(printk(KERN_DEBUG "i2c-pcf-epp.o: init status port = 0x%x\n", inb(0x379)));
++
++ if (gpe.pe_irq > 0) {
++ if (request_irq(gpe.pe_irq, pcf_epp_handler, 0, "PCF8584", 0) < 0) {
++ printk(KERN_NOTICE "i2c-pcf-epp.o: Request irq%d failed\n", gpe.pe_irq);
++ gpe.pe_irq = 0;
++ } else
++ disable_irq(gpe.pe_irq);
++ enable_irq(gpe.pe_irq);
++ }
++ // EPP mode initialize
++ // enable interrupt from nINTR pin
++ outb(inb(CTRL)|0x14, CTRL);
++ // clear ERROR bit of STAT
++ outb(inb(STAT)|0x01, STAT);
++ outb(inb(STAT)&~0x01,STAT);
++
++ return 0;
++}
++
++/* ------------------------------------------------------------------------
++ * Encapsulate the above functions in the correct operations structure.
++ * This is only done when more than one hardware adapter is supported.
++ */
++static struct i2c_algo_pcf_data pcf_epp_data = {
++ .setpcf = pcf_epp_setbyte,
++ .getpcf = pcf_epp_getbyte,
++ .getown = pcf_epp_getown,
++ .getclock = pcf_epp_getclock,
++ .waitforpin = pcf_epp_waitforpin,
++ .udelay = 80,
++ .mdelay = 80,
++ .timeout = HZ,
++};
++
++static struct i2c_adapter pcf_epp_ops = {
++ .owner = THIS_MODULE,
++ .name = "PCF8584 EPP adapter",
++ .id = I2C_HW_P_LP,
++ .algo_data = &pcf_epp_data,
++};
++
++static int __init i2c_pcfepp_init(void)
++{
++ struct i2c_pcf_epp *pepp = &gpe;
++
++ printk(KERN_DEBUG "i2c-pcf-epp.o: i2c pcf8584-epp adapter module version %s (%s)\n", I2C_VERSION, I2C_DATE);
++ if (base == 0)
++ pepp->pe_base = DEFAULT_BASE;
++ else
++ pepp->pe_base = base;
++
++ if (irq == 0)
++ pepp->pe_irq = DEFAULT_IRQ;
++ else if (irq<0) {
++ // switch off irq
++ pepp->pe_irq=0;
++ } else {
++ pepp->pe_irq = irq;
++ }
++ if (clock == 0)
++ pepp->pe_clock = DEFAULT_CLOCK;
++ else
++ pepp->pe_clock = clock;
++
++ if (own == 0)
++ pepp->pe_own = DEFAULT_OWN;
++ else
++ pepp->pe_own = own;
++
++ pcf_epp_data.data = (void *)pepp;
++ init_waitqueue_head(&pcf_wait);
++ if (pcf_epp_init(pepp) == 0) {
++ int ret;
++ if ( (ret = i2c_pcf_add_bus(&pcf_epp_ops)) < 0) {
++ printk(KERN_WARNING "i2c_pcf_add_bus caused an error: %d\n",ret);
++ release_region(pepp->pe_base , 5);
++ return ret;
++ }
++ } else {
++
++ return -ENODEV;
++ }
++ printk(KERN_DEBUG "i2c-pcf-epp.o: found device at %#x.\n", pepp->pe_base);
++ return 0;
++}
++
++static void __exit pcf_epp_exit(void)
++{
++ i2c_pcf_del_bus(&pcf_epp_ops);
++ if (gpe.pe_irq > 0) {
++ disable_irq(gpe.pe_irq);
++ free_irq(gpe.pe_irq, 0);
++ }
++ release_region(gpe.pe_base , 5);
++}
++
++MODULE_AUTHOR("Hans Berglund <hb@spacetec.no> \n modified by Ryosuke Tajima <rosk@jsk.t.u-tokyo.ac.jp>");
++MODULE_DESCRIPTION("I2C-Bus adapter routines for PCF8584 EPP parallel port adapter");
++MODULE_LICENSE("GPL");
++
++MODULE_PARM(base, "i");
++MODULE_PARM(irq, "i");
++MODULE_PARM(clock, "i");
++MODULE_PARM(own, "i");
++MODULE_PARM(i2c_debug, "i");
++
++module_init(i2c_pcfepp_init);
++module_exit(pcf_epp_exit);
+--- linux-old/include/linux/i2c-pcf8584.h Sun Aug 31 14:51:56 CEST 2003
++++ linux/include/linux/i2c-pcf8584.h Sun Aug 31 14:51:56 CEST 2003
+@@ -0,0 +1,78 @@
++/* -------------------------------------------------------------------- */
++/* i2c-pcf8584.h: PCF 8584 global defines */
++/* -------------------------------------------------------------------- */
++/* Copyright (C) 1996 Simon G. Vogl
++ 1999 Hans Berglund
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
++/* -------------------------------------------------------------------- */
++
++/* With some changes from Frodo Looijaard <frodol@dds.nl> */
++
++/* $Id$ */
++
++#ifndef _LINUX_I2C_PCF8584_H
++#define _LINUX_I2C_PCF8584_H
++
++/* ----- Control register bits ---------------------------------------- */
++#define I2C_PCF_PIN 0x80
++#define I2C_PCF_ESO 0x40
++#define I2C_PCF_ES1 0x20
++#define I2C_PCF_ES2 0x10
++#define I2C_PCF_ENI 0x08
++#define I2C_PCF_STA 0x04
++#define I2C_PCF_STO 0x02
++#define I2C_PCF_ACK 0x01
++
++#define I2C_PCF_START (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_STA | I2C_PCF_ACK)
++#define I2C_PCF_STOP (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_STO | I2C_PCF_ACK)
++#define I2C_PCF_REPSTART ( I2C_PCF_ESO | I2C_PCF_STA | I2C_PCF_ACK)
++#define I2C_PCF_IDLE (I2C_PCF_PIN | I2C_PCF_ESO | I2C_PCF_ACK)
++
++/* ----- Status register bits ----------------------------------------- */
++/*#define I2C_PCF_PIN 0x80 as above*/
++
++#define I2C_PCF_INI 0x40 /* 1 if not initialized */
++#define I2C_PCF_STS 0x20
++#define I2C_PCF_BER 0x10
++#define I2C_PCF_AD0 0x08
++#define I2C_PCF_LRB 0x08
++#define I2C_PCF_AAS 0x04
++#define I2C_PCF_LAB 0x02
++#define I2C_PCF_BB 0x01
++
++/* ----- Chip clock frequencies --------------------------------------- */
++#define I2C_PCF_CLK3 0x00
++#define I2C_PCF_CLK443 0x10
++#define I2C_PCF_CLK6 0x14
++#define I2C_PCF_CLK 0x18
++#define I2C_PCF_CLK12 0x1c
++
++/* ----- transmission frequencies ------------------------------------- */
++#define I2C_PCF_TRNS90 0x00 /* 90 kHz */
++#define I2C_PCF_TRNS45 0x01 /* 45 kHz */
++#define I2C_PCF_TRNS11 0x02 /* 11 kHz */
++#define I2C_PCF_TRNS15 0x03 /* 1.5 kHz */
++
++
++/* ----- Access to internal registers according to ES1,ES2 ------------ */
++/* they are mapped to the data port ( a0 = 0 ) */
++/* available when ESO == 0 : */
++
++#define I2C_PCF_OWNADR 0
++#define I2C_PCF_INTREG I2C_PCF_ES2
++#define I2C_PCF_CLKREG I2C_PCF_ES1
++
++#endif /* _LINUX_I2C_PCF8584_H */
+--- linux-old/drivers/i2c/i2c-philips-par.c Sun Aug 31 14:51:56 CEST 2003
++++ linux/drivers/i2c/i2c-philips-par.c Sun Aug 31 14:51:56 CEST 2003
+@@ -22,5 +22,5 @@
+ Frodo Looijaard <frodol@dds.nl> */
+
+-/* $Id$ */
++/* $Id$ */
+
+ #include <linux/kernel.h>
+@@ -30,12 +30,8 @@
+ #include <linux/stddef.h>
+ #include <linux/parport.h>
+-
++#include <linux/slab.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
+
+-#ifndef __exit
+-#define __exit __init
+-#endif
+-
+ static int type;
+
+@@ -131,24 +127,4 @@
+ }
+
+-static int bit_lp_reg(struct i2c_client *client)
+-{
+- return 0;
+-}
+-
+-static int bit_lp_unreg(struct i2c_client *client)
+-{
+- return 0;
+-}
+-
+-static void bit_lp_inc_use(struct i2c_adapter *adap)
+-{
+- MOD_INC_USE_COUNT;
+-}
+-
+-static void bit_lp_dec_use(struct i2c_adapter *adap)
+-{
+- MOD_DEC_USE_COUNT;
+-}
+-
+ /* ------------------------------------------------------------------------
+ * Encapsulate the above functions in the correct operations structure.
+@@ -157,31 +133,26 @@
+
+ static struct i2c_algo_bit_data bit_lp_data = {
+- NULL,
+- bit_lp_setsda,
+- bit_lp_setscl,
+- bit_lp_getsda,
+- bit_lp_getscl,
+- 80, 80, 100, /* waits, timeout */
++ .setsda = bit_lp_setsda,
++ .setscl = bit_lp_setscl,
++ .getsda = bit_lp_getsda,
++ .getscl = bit_lp_getscl,
++ .udelay = 80,
++ .mdelay = 80,
++ .timeout = HZ
+ };
+
+ static struct i2c_algo_bit_data bit_lp_data2 = {
+- NULL,
+- bit_lp_setsda2,
+- bit_lp_setscl2,
+- bit_lp_getsda2,
+- NULL,
+- 80, 80, 100, /* waits, timeout */
++ .setsda = bit_lp_setsda2,
++ .setscl = bit_lp_setscl2,
++ .getsda = bit_lp_getsda2,
++ .udelay = 80,
++ .mdelay = 80,
++ .timeout = HZ
+ };
+
+ static struct i2c_adapter bit_lp_ops = {
+- "Philips Parallel port adapter",
+- I2C_HW_B_LP,
+- NULL,
+- NULL,
+- bit_lp_inc_use,
+- bit_lp_dec_use,
+- bit_lp_reg,
+-
+- bit_lp_unreg,
++ .owner = THIS_MODULE,
++ .name = "Philips Parallel port adapter",
++ .id = I2C_HW_B_LP,
+ };
+
+@@ -191,9 +162,9 @@
+ GFP_KERNEL);
+ if (!adapter) {
+- printk("i2c-philips-par: Unable to malloc.\n");
++ printk(KERN_ERR "i2c-philips-par: Unable to malloc.\n");
+ return;
+ }
+
+- printk("i2c-philips-par.o: attaching to %s\n", port->name);
++ printk(KERN_DEBUG "i2c-philips-par.o: attaching to %s\n", port->name);
+
+ adapter->pdev = parport_register_device(port, "i2c-philips-par",
+@@ -202,5 +173,6 @@
+ NULL);
+ if (!adapter->pdev) {
+- printk("i2c-philips-par: Unable to register with parport.\n");
++ printk(KERN_ERR "i2c-philips-par: Unable to register with parport.\n");
++ kfree(adapter);
+ return;
+ }
+@@ -211,6 +183,10 @@
+ adapter->bit_lp_data.data = port;
+
++ if (parport_claim_or_block(adapter->pdev) < 0 ) {
++ printk(KERN_ERR "i2c-philips-par: Could not claim parallel port.\n");
++ kfree(adapter);
++ return;
++ }
+ /* reset hardware to sane state */
+- parport_claim_or_block(adapter->pdev);
+ bit_lp_setsda(port, 1);
+ bit_lp_setscl(port, 1);
+@@ -219,5 +195,5 @@
+ if (i2c_bit_add_bus(&adapter->adapter) < 0)
+ {
+- printk("i2c-philips-par: Unable to register with I2C.\n");
++ printk(KERN_ERR "i2c-philips-par: Unable to register with I2C.\n");
+ parport_unregister_device(adapter->pdev);
+ kfree(adapter);
+@@ -251,5 +227,4 @@
+
+
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,4)
+ static struct parport_driver i2c_driver = {
+ "i2c-philips-par",
+@@ -258,19 +233,10 @@
+ NULL
+ };
+-#endif
+
+ int __init i2c_bitlp_init(void)
+ {
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,4)
+- struct parport *port;
+-#endif
+- printk("i2c-philips-par.o: i2c Philips parallel port adapter module\n");
++ printk(KERN_INFO "i2c-philips-par.o: i2c Philips parallel port adapter module version %s (%s)\n", I2C_VERSION, I2C_DATE);
+
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,4)
+ parport_register_driver(&i2c_driver);
+-#else
+- for (port = parport_enumerate(); port; port=port->next)
+- i2c_parport_attach(port);
+-#endif
+
+ return 0;
+@@ -279,11 +245,5 @@
+ void __exit i2c_bitlp_exit(void)
+ {
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,4)
+ parport_unregister_driver(&i2c_driver);
+-#else
+- struct parport *port;
+- for (port = parport_enumerate(); port; port=port->next)
+- i2c_parport_detach(port);
+-#endif
+ }
+
+@@ -296,13 +256,4 @@
+ MODULE_PARM(type, "i");
+
+-#ifdef MODULE
+-int init_module(void)
+-{
+- return i2c_bitlp_init();
+-}
+-
+-void cleanup_module(void)
+-{
+- i2c_bitlp_exit();
+-}
+-#endif
++module_init(i2c_bitlp_init);
++module_exit(i2c_bitlp_exit);
+--- linux-old/drivers/i2c/i2c-pport.c Sun Aug 31 14:51:56 CEST 2003
++++ linux/drivers/i2c/i2c-pport.c Sun Aug 31 14:51:56 CEST 2003
+@@ -0,0 +1,205 @@
++/* ------------------------------------------------------------------------- */
++/* i2c-pport.c i2c-hw access for primitive i2c par. port adapter */
++/* ------------------------------------------------------------------------- */
++/* Copyright (C) 2001 Daniel Smolik
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
++/* ------------------------------------------------------------------------- */
++
++/*
++ See doc/i2c-pport for instructions on wiring to the
++ parallel port connector.
++
++ Cut & paste :-) based on Velleman K9000 driver by Simon G. Vogl
++*/
++
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/ioport.h>
++#include <linux/errno.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-bit.h>
++#include <asm/io.h>
++
++
++#define DEFAULT_BASE 0x378
++static int base=0;
++static unsigned char PortData = 0;
++
++/* ----- global defines ----------------------------------------------- */
++#define DEB(x) /* should be reasonable open, close &c. */
++#define DEB2(x) /* low level debugging - very slow */
++#define DEBE(x) x /* error messages */
++#define DEBINIT(x) x /* detection status messages */
++
++/* --- Convenience defines for the parallel port: */
++#define BASE (unsigned int)(data)
++#define DATA BASE /* Centronics data port */
++#define STAT (BASE+1) /* Centronics status port */
++#define CTRL (BASE+2) /* Centronics control port */
++
++/* we will use SDA - Auto Linefeed(14) bit 1 POUT */
++/* we will use SCL - Initialize printer(16) BUSY bit 2*/
++
++#define SET_SCL | 0x04
++#define CLR_SCL & 0xFB
++
++
++
++
++#define SET_SDA & 0x04
++#define CLR_SDA | 0x02
++
++
++/* ----- local functions ---------------------------------------------- */
++
++
++static void bit_pport_setscl(void *data, int state)
++{
++ if (state) {
++ //high
++ PortData = PortData SET_SCL;
++ } else {
++ //low
++ PortData = PortData CLR_SCL;
++ }
++ outb(PortData, CTRL);
++}
++
++static void bit_pport_setsda(void *data, int state)
++{
++ if (state) {
++
++ PortData = PortData SET_SDA;
++ } else {
++
++ PortData = PortData CLR_SDA;
++ }
++ outb(PortData, CTRL);
++}
++
++static int bit_pport_getscl(void *data)
++{
++
++ return ( 4 == ( (inb_p(CTRL)) & 0x04 ) );
++}
++
++static int bit_pport_getsda(void *data)
++{
++ return ( 0 == ( (inb_p(CTRL)) & 0x02 ) );
++}
++
++static int bit_pport_init(void)
++{
++ if (!request_region((base+2),1, "i2c (PPORT adapter)")) {
++ return -ENODEV;
++ } else {
++ /* test for PPORT adap. */
++
++
++ PortData=inb(base+2);
++ PortData= (PortData SET_SDA) SET_SCL;
++ outb(PortData,base+2);
++
++ if (!(inb(base+2) | 0x06)) { /* SDA and SCL will be high */
++ DEBINIT(printk("i2c-pport.o: SDA and SCL was low.\n"));
++ return -ENODEV;
++ } else {
++
++ /*SCL high and SDA low*/
++ PortData = PortData SET_SCL CLR_SDA;
++ outb(PortData,base+2);
++ schedule_timeout(400);
++ if ( !(inb(base+2) | 0x4) ) {
++ //outb(0x04,base+2);
++ DEBINIT(printk("i2c-port.o: SDA was high.\n"));
++ return -ENODEV;
++ }
++ }
++ bit_pport_setsda((void*)base,1);
++ bit_pport_setscl((void*)base,1);
++ }
++ return 0;
++}
++
++
++/* ------------------------------------------------------------------------
++ * Encapsulate the above functions in the correct operations structure.
++ * This is only done when more than one hardware adapter is supported.
++ */
++static struct i2c_algo_bit_data bit_pport_data = {
++ .setsda = bit_pport_setsda,
++ .setscl = bit_pport_setscl,
++ .getsda = bit_pport_getsda,
++ .getscl = bit_pport_getscl,
++ .udelay = 40,
++ .mdelay = 80,
++ .timeout = HZ
++};
++
++static struct i2c_adapter bit_pport_ops = {
++ .owner = THIS_MODULE,
++ .name = "Primitive Parallel port adaptor",
++ .id = I2C_HW_B_PPORT,
++ .algo_data = &bit_pport_data,
++};
++
++int __init i2c_bitpport_init(void)
++{
++ printk("i2c-pport.o: i2c Primitive parallel port adapter module version %s (%s)\n", I2C_VERSION, I2C_DATE);
++
++ if (base==0) {
++ /* probe some values */
++ base=DEFAULT_BASE;
++ bit_pport_data.data=(void*)DEFAULT_BASE;
++ if (bit_pport_init()==0) {
++ if(i2c_bit_add_bus(&bit_pport_ops) < 0)
++ return -ENODEV;
++ } else {
++ return -ENODEV;
++ }
++ } else {
++ bit_pport_data.data=(void*)base;
++ if (bit_pport_init()==0) {
++ if(i2c_bit_add_bus(&bit_pport_ops) < 0)
++ return -ENODEV;
++ } else {
++ return -ENODEV;
++ }
++ }
++ printk("i2c-pport.o: found device at %#x.\n",base);
++ return 0;
++}
++
++static void __exit i2c_bitpport_exit(void)
++{
++ i2c_bit_del_bus(&bit_pport_ops);
++ release_region((base+2),1);
++}
++
++EXPORT_NO_SYMBOLS;
++
++MODULE_AUTHOR("Daniel Smolik <marvin@sitour.cz>");
++MODULE_DESCRIPTION("I2C-Bus adapter routines for Primitive parallel port adapter");
++MODULE_LICENSE("GPL");
++
++MODULE_PARM(base, "i");
++
++module_init(i2c_bitpport_init);
++module_exit(i2c_bitpport_exit);
+--- linux-old/drivers/i2c/i2c-proc.c Sun Aug 31 14:51:57 CEST 2003
++++ linux/drivers/i2c/i2c-proc.c Sun Aug 31 14:51:57 CEST 2003
+@@ -24,5 +24,4 @@
+ */
+
+-#include <linux/version.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -31,25 +30,13 @@
+ #include <linux/sysctl.h>
+ #include <linux/proc_fs.h>
++#include <linux/init.h>
+ #include <linux/ioport.h>
+-#include <asm/uaccess.h>
+-
+ #include <linux/i2c.h>
+ #include <linux/i2c-proc.h>
++#include <asm/uaccess.h>
+
+-#include <linux/init.h>
+-
+-/* FIXME need i2c versioning */
+-#define LM_DATE "20010825"
+-#define LM_VERSION "2.6.1"
+-
+-#ifndef THIS_MODULE
+-#define THIS_MODULE NULL
+-#endif
+-
+-static int i2c_create_name(char **name, const char *prefix,
+- struct i2c_adapter *adapter, int addr);
+ static int i2c_parse_reals(int *nrels, void *buffer, int bufsize,
+ long *results, int magnitude);
+-static int i2c_write_reals(int nrels, void *buffer, int *bufsize,
++static int i2c_write_reals(int nrels, void *buffer, size_t *bufsize,
+ long *results, int magnitude);
+ static int i2c_proc_chips(ctl_table * ctl, int write,
+@@ -61,24 +48,8 @@
+ void **context);
+
+-int __init sensors_init(void);
+-
+ #define SENSORS_ENTRY_MAX 20
+ static struct ctl_table_header *i2c_entries[SENSORS_ENTRY_MAX];
+
+ static struct i2c_client *i2c_clients[SENSORS_ENTRY_MAX];
+-static unsigned short i2c_inodes[SENSORS_ENTRY_MAX];
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
+-static void i2c_fill_inode(struct inode *inode, int fill);
+-static void i2c_dir_fill_inode(struct inode *inode, int fill);
+-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1) */
+-
+-static ctl_table sysctl_table[] = {
+- {CTL_DEV, "dev", NULL, 0, 0555},
+- {0},
+- {DEV_SENSORS, "sensors", NULL, 0, 0555},
+- {0},
+- {0, NULL, NULL, 0, 0555},
+- {0}
+-};
+
+ static ctl_table i2c_proc_dev_sensors[] = {
+@@ -101,5 +72,4 @@
+
+ static struct ctl_table_header *i2c_proc_header;
+-static int i2c_initialized;
+
+ /* This returns a nice name for a new directory; for example lm78-isa-0310
+@@ -107,23 +77,38 @@
+ a LM75 chip on the third i2c bus at address 0x4e).
+ name is allocated first. */
+-int i2c_create_name(char **name, const char *prefix,
+- struct i2c_adapter *adapter, int addr)
++static char *generate_name(struct i2c_client *client, const char *prefix)
+ {
+- char name_buffer[50];
+- int id;
+- if (i2c_is_isa_adapter(adapter))
++ struct i2c_adapter *adapter = client->adapter;
++ int addr = client->addr;
++ char name_buffer[50], *name;
++
++ if (i2c_is_isa_adapter(adapter)) {
+ sprintf(name_buffer, "%s-isa-%04x", prefix, addr);
+- else {
+- if ((id = i2c_adapter_id(adapter)) < 0)
+- return -ENOENT;
++ } else if (adapter->algo->smbus_xfer || adapter->algo->master_xfer) {
++ int id = i2c_adapter_id(adapter);
++ if (id < 0)
++ return ERR_PTR(-ENOENT);
+ sprintf(name_buffer, "%s-i2c-%d-%02x", prefix, id, addr);
++ } else { /* dummy adapter, generate prefix */
++ int end, i;
++
++ sprintf(name_buffer, "%s-", prefix);
++ end = strlen(name_buffer);
++
++ for (i = 0; i < 32; i++) {
++ if (adapter->algo->name[i] == ' ')
++ break;
++ name_buffer[end++] = tolower(adapter->algo->name[i]);
++ }
++
++ name_buffer[end] = 0;
++ sprintf(name_buffer + end, "-%04x", addr);
+ }
+- *name = kmalloc(strlen(name_buffer) + 1, GFP_KERNEL);
+- if (!*name) {
+- printk (KERN_WARNING "i2c_create_name: not enough memory\n");
+- return -ENOMEM;
+- }
+- strcpy(*name, name_buffer);
+- return 0;
++
++ name = kmalloc(strlen(name_buffer) + 1, GFP_KERNEL);
++ if (!name)
++ return ERR_PTR(-ENOMEM);
++ strcpy(name, name_buffer);
++ return name;
+ }
+
+@@ -134,142 +119,89 @@
+ copied in memory. The extra2 field of each file is set to point to client.
+ If any driver wants subdirectories within the newly created directory,
+- this function must be updated!
+- controlling_mod is the controlling module. It should usually be
+- THIS_MODULE when calling. Note that this symbol is not defined in
+- kernels before 2.3.13; define it to NULL in that case. We will not use it
+- for anything older than 2.3.27 anyway. */
++ this function must be updated! */
+ int i2c_register_entry(struct i2c_client *client, const char *prefix,
+- ctl_table * ctl_template,
+- struct module *controlling_mod)
++ struct ctl_table *ctl_template)
+ {
+- int i, res, len, id;
+- ctl_table *new_table;
+- char *name;
+- struct ctl_table_header *new_header;
++ struct { struct ctl_table root[2], dev[2], sensors[2]; } *tbl;
++ struct ctl_table_header *hdr;
++ struct ctl_table *tmp, *leaf;
++ const char *name;
++ int id, len = 0;
+
+- if ((res = i2c_create_name(&name, prefix, client->adapter,
+- client->addr))) return res;
++ name = generate_name(client, prefix);
++ if (IS_ERR(name))
++ return PTR_ERR(name);
+
+- for (id = 0; id < SENSORS_ENTRY_MAX; id++)
+- if (!i2c_entries[id]) {
+- break;
+- }
+- if (id == SENSORS_ENTRY_MAX) {
+- kfree(name);
+- return -ENOMEM;
++ for (id = 0; id < SENSORS_ENTRY_MAX; id++) {
++ if (!i2c_entries[id])
++ goto free_slot;
+ }
+- id += 256;
+
+- len = 0;
++ goto out_free_name;
++
++ free_slot:
+ while (ctl_template[len].procname)
+ len++;
+- len += 7;
+- if (!(new_table = kmalloc(sizeof(ctl_table) * len, GFP_KERNEL))) {
+- kfree(name);
+- return -ENOMEM;
+- }
+-
+- memcpy(new_table, sysctl_table, 6 * sizeof(ctl_table));
+- new_table[0].child = &new_table[2];
+- new_table[2].child = &new_table[4];
+- new_table[4].child = &new_table[6];
+- new_table[4].procname = name;
+- new_table[4].ctl_name = id;
+- memcpy(new_table + 6, ctl_template, (len - 6) * sizeof(ctl_table));
+- for (i = 6; i < len; i++)
+- new_table[i].extra2 = client;
+-
+- if (!(new_header = register_sysctl_table(new_table, 0))) {
+- kfree(new_table);
+- kfree(name);
+- return -ENOMEM;
+- }
+-
+- i2c_entries[id - 256] = new_header;
+-
+- i2c_clients[id - 256] = client;
+-#ifdef DEBUG
+- if (!new_header || !new_header->ctl_table ||
+- !new_header->ctl_table->child ||
+- !new_header->ctl_table->child->child ||
+- !new_header->ctl_table->child->child->de) {
+- printk
+- ("i2c-proc.o: NULL pointer when trying to install fill_inode fix!\n");
+- return id;
+- }
+-#endif /* DEBUG */
+- i2c_inodes[id - 256] =
+- new_header->ctl_table->child->child->de->low_ino;
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,27))
+- new_header->ctl_table->child->child->de->owner = controlling_mod;
+-#else
+- new_header->ctl_table->child->child->de->fill_inode =
+- &i2c_dir_fill_inode;
+-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,27)) */
+-
+- return id;
++ tbl = kmalloc(sizeof(*tbl) + sizeof(ctl_table) * (len + 1),
++ GFP_KERNEL);
++ if (!tbl)
++ goto out_free_name;
++ memset(tbl, 0, sizeof(*tbl));
++
++ /* The client sysctls */
++ leaf = (struct ctl_table *) (tbl + 1);
++ memcpy(leaf, ctl_template, sizeof(ctl_table) * (len+1));
++ for (tmp = leaf; tmp->ctl_name; tmp++)
++ tmp->extra2 = client;
++
++ tbl->sensors->ctl_name = id+256;
++ tbl->sensors->procname = name;
++ tbl->sensors->mode = 0555;
++ tbl->sensors->child = leaf;
++
++ tbl->dev->ctl_name = DEV_SENSORS;
++ tbl->dev->procname = "sensors";
++ tbl->dev->mode = 0555;
++ tbl->dev->child = tbl->sensors;
++
++ tbl->root->ctl_name = CTL_DEV;
++ tbl->root->procname = "dev";
++ tbl->root->mode = 0555;
++ tbl->root->child = tbl->dev;
++
++ hdr = register_sysctl_table(tbl->root, 0);
++ if (!hdr)
++ goto out_free_tbl;
++
++ i2c_entries[id] = hdr;
++ i2c_clients[id] = client;
++
++ return (id + 256); /* XXX(hch) why?? */
++
++ out_free_tbl:
++ kfree(tbl);
++ out_free_name:
++ kfree(name);
++ return -ENOMEM;
+ }
+
+ void i2c_deregister_entry(int id)
+ {
+- ctl_table *table;
+- char *temp;
+ id -= 256;
+- if (i2c_entries[id]) {
+- table = i2c_entries[id]->ctl_table;
+- unregister_sysctl_table(i2c_entries[id]);
+- /* 2-step kfree needed to keep gcc happy about const points */
+- (const char *) temp = table[4].procname;
+- kfree(temp);
+- kfree(table);
+- i2c_entries[id] = NULL;
+- i2c_clients[id] = NULL;
+- }
+-}
+
+-/* Monitor access for /proc/sys/dev/sensors; make unloading i2c-proc.o
+- impossible if some process still uses it or some file in it */
+-void i2c_fill_inode(struct inode *inode, int fill)
+-{
+- if (fill)
+- MOD_INC_USE_COUNT;
+- else
+- MOD_DEC_USE_COUNT;
+-}
+-
+-/* Monitor access for /proc/sys/dev/sensors/ directories; make unloading
+- the corresponding module impossible if some process still uses it or
+- some file in it */
+-void i2c_dir_fill_inode(struct inode *inode, int fill)
+-{
+- int i;
+- struct i2c_client *client;
++ if (i2c_entries[id]) {
++ struct ctl_table_header *hdr = i2c_entries[id];
++ struct ctl_table *tbl = hdr->ctl_table;
+
+-#ifdef DEBUG
+- if (!inode) {
+- printk("i2c-proc.o: Warning: inode NULL in fill_inode()\n");
+- return;
++ unregister_sysctl_table(hdr);
++ kfree(tbl->child->child->procname);
++ kfree(tbl); /* actually the whole anonymous struct */
+ }
+-#endif /* def DEBUG */
+
+- for (i = 0; i < SENSORS_ENTRY_MAX; i++)
+- if (i2c_clients[i]
+- && (i2c_inodes[i] == inode->i_ino)) break;
+-#ifdef DEBUG
+- if (i == SENSORS_ENTRY_MAX) {
+- printk
+- ("i2c-proc.o: Warning: inode (%ld) not found in fill_inode()\n",
+- inode->i_ino);
+- return;
+- }
+-#endif /* def DEBUG */
+- client = i2c_clients[i];
+- if (fill)
+- client->driver->inc_use(client);
+- else
+- client->driver->dec_use(client);
++ i2c_entries[id] = NULL;
++ i2c_clients[id] = NULL;
+ }
+
+-int i2c_proc_chips(ctl_table * ctl, int write, struct file *filp,
++static int i2c_proc_chips(ctl_table * ctl, int write, struct file *filp,
+ void *buffer, size_t * lenp)
+ {
+@@ -309,5 +241,5 @@
+ }
+
+-int i2c_sysctl_chips(ctl_table * table, int *name, int nlen,
++static int i2c_sysctl_chips(ctl_table * table, int *name, int nlen,
+ void *oldval, size_t * oldlenp, void *newval,
+ size_t newlen, void **context)
+@@ -471,5 +403,5 @@
+ hidden bugs in it, even leading to crashes and things!
+ */
+-int i2c_parse_reals(int *nrels, void *buffer, int bufsize,
++static int i2c_parse_reals(int *nrels, void *buffer, int bufsize,
+ long *results, int magnitude)
+ {
+@@ -572,5 +504,5 @@
+ }
+
+-int i2c_write_reals(int nrels, void *buffer, int *bufsize,
++static int i2c_write_reals(int nrels, void *buffer, size_t *bufsize,
+ long *results, int magnitude)
+ {
+@@ -661,4 +593,5 @@
+
+ for (addr = 0x00; addr <= (is_isa ? 0xffff : 0x7f); addr++) {
++ /* XXX: WTF is going on here??? */
+ if ((is_isa && check_region(addr, 1)) ||
+ (!is_isa && i2c_check_addr(adapter, addr)))
+@@ -685,5 +618,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found force parameter for adapter %d, addr %04x\n",
++ (KERN_DEBUG "i2c-proc.o: found force parameter for adapter %d, addr %04x\n",
+ adapter_id, addr);
+ #endif
+@@ -715,5 +648,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found ignore parameter for adapter %d, "
++ (KERN_DEBUG "i2c-proc.o: found ignore parameter for adapter %d, "
+ "addr %04x\n", adapter_id, addr);
+ #endif
+@@ -735,5 +668,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found ignore_range parameter for adapter %d, "
++ (KERN_DEBUG "i2c-proc.o: found ignore_range parameter for adapter %d, "
+ "addr %04x\n", adapter_id, addr);
+ #endif
+@@ -754,5 +687,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found normal isa entry for adapter %d, "
++ (KERN_DEBUG "i2c-proc.o: found normal isa entry for adapter %d, "
+ "addr %04x\n", adapter_id,
+ addr);
+@@ -776,5 +709,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found normal isa_range entry for adapter %d, "
++ (KERN_DEBUG "i2c-proc.o: found normal isa_range entry for adapter %d, "
+ "addr %04x", adapter_id, addr);
+ #endif
+@@ -790,5 +723,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found normal i2c entry for adapter %d, "
++ (KERN_DEBUG "i2c-proc.o: found normal i2c entry for adapter %d, "
+ "addr %02x", adapter_id, addr);
+ #endif
+@@ -806,5 +739,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found normal i2c_range entry for adapter %d, "
++ (KERN_DEBUG "i2c-proc.o: found normal i2c_range entry for adapter %d, "
+ "addr %04x\n", adapter_id, addr);
+ #endif
+@@ -823,5 +756,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found probe parameter for adapter %d, "
++ (KERN_DEBUG "i2c-proc.o: found probe parameter for adapter %d, "
+ "addr %04x\n", adapter_id, addr);
+ #endif
+@@ -842,5 +775,5 @@
+ #ifdef DEBUG
+ printk
+- ("i2c-proc.o: found probe_range parameter for adapter %d, "
++ (KERN_DEBUG "i2c-proc.o: found probe_range parameter for adapter %d, "
+ "addr %04x\n", adapter_id, addr);
+ #endif
+@@ -861,28 +794,27 @@
+ }
+
+-int __init sensors_init(void)
++static int __init i2c_proc_init(void)
+ {
+- printk("i2c-proc.o version %s (%s)\n", LM_VERSION, LM_DATE);
+- i2c_initialized = 0;
++ printk(KERN_INFO "i2c-proc.o version %s (%s)\n", I2C_VERSION, I2C_DATE);
+ if (!
+ (i2c_proc_header =
+- register_sysctl_table(i2c_proc, 0))) return -ENOMEM;
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,1))
++ register_sysctl_table(i2c_proc, 0))) {
++ printk(KERN_ERR "i2c-proc.o: error: sysctl interface not supported by kernel!\n");
++ return -EPERM;
++ }
+ i2c_proc_header->ctl_table->child->de->owner = THIS_MODULE;
+-#else
+- i2c_proc_header->ctl_table->child->de->fill_inode =
+- &i2c_fill_inode;
+-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,1)) */
+- i2c_initialized++;
+ return 0;
+ }
+
++static void __exit i2c_proc_exit(void)
++{
++ unregister_sysctl_table(i2c_proc_header);
++}
++
++EXPORT_SYMBOL(i2c_register_entry);
+ EXPORT_SYMBOL(i2c_deregister_entry);
+-EXPORT_SYMBOL(i2c_detect);
+ EXPORT_SYMBOL(i2c_proc_real);
+-EXPORT_SYMBOL(i2c_register_entry);
+ EXPORT_SYMBOL(i2c_sysctl_real);
+-
+-#ifdef MODULE
++EXPORT_SYMBOL(i2c_detect);
+
+ MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+@@ -890,21 +822,4 @@
+ MODULE_LICENSE("GPL");
+
+-int i2c_cleanup(void)
+-{
+- if (i2c_initialized >= 1) {
+- unregister_sysctl_table(i2c_proc_header);
+- i2c_initialized--;
+- }
+- return 0;
+-}
+-
+-int init_module(void)
+-{
+- return sensors_init();
+-}
+-
+-int cleanup_module(void)
+-{
+- return i2c_cleanup();
+-}
+-#endif /* MODULE */
++module_init(i2c_proc_init);
++module_exit(i2c_proc_exit);
+--- linux-old/include/linux/i2c-proc.h Sun Aug 31 14:51:57 CEST 2003
++++ linux/include/linux/i2c-proc.h Sun Aug 31 14:51:57 CEST 2003
+@@ -1,5 +1,6 @@
+ /*
+- sensors.h - Part of lm_sensors, Linux kernel modules for hardware
+- monitoring
++ i2c-proc.h - Part of the i2c package
++ was originally sensors.h - Part of lm_sensors, Linux kernel modules
++ for hardware monitoring
+ Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
+
+@@ -19,12 +20,7 @@
+ */
+
+-#ifndef SENSORS_SENSORS_H
+-#define SENSORS_SENSORS_H
++#ifndef _LINUX_I2C_PROC_H
++#define _LINUX_I2C_PROC_H
+
+-#ifdef __KERNEL__
+-
+-/* Next two must be included before sysctl.h can be included, in 2.0 kernels */
+-#include <linux/types.h>
+-#include <linux/fs.h>
+ #include <linux/sysctl.h>
+
+@@ -74,6 +70,5 @@
+ extern int i2c_register_entry(struct i2c_client *client,
+ const char *prefix,
+- ctl_table * ctl_template,
+- struct module *controlling_mod);
++ ctl_table * ctl_template);
+
+ extern void i2c_deregister_entry(int id);
+@@ -348,4 +343,29 @@
+ SENSORS_INSMOD
+
++#define SENSORS_INSMOD_8(chip1,chip2,chip3,chip4,chip5,chip6,chip7,chip8) \
++ enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8 }; \
++ SENSORS_MODULE_PARM(force, \
++ "List of adapter,address pairs to boldly assume " \
++ "to be present"); \
++ SENSORS_MODULE_PARM_FORCE(chip1); \
++ SENSORS_MODULE_PARM_FORCE(chip2); \
++ SENSORS_MODULE_PARM_FORCE(chip3); \
++ SENSORS_MODULE_PARM_FORCE(chip4); \
++ SENSORS_MODULE_PARM_FORCE(chip5); \
++ SENSORS_MODULE_PARM_FORCE(chip6); \
++ SENSORS_MODULE_PARM_FORCE(chip7); \
++ SENSORS_MODULE_PARM_FORCE(chip8); \
++ static struct i2c_force_data forces[] = {{force,any_chip}, \
++ {force_ ## chip1,chip1}, \
++ {force_ ## chip2,chip2}, \
++ {force_ ## chip3,chip3}, \
++ {force_ ## chip4,chip4}, \
++ {force_ ## chip5,chip5}, \
++ {force_ ## chip6,chip6}, \
++ {force_ ## chip7,chip7}, \
++ {force_ ## chip8,chip8}, \
++ {NULL}}; \
++ SENSORS_INSMOD
++
+ typedef int i2c_found_addr_proc(struct i2c_adapter *adapter,
+ int addr, unsigned short flags,
+@@ -363,5 +383,5 @@
+ /* This macro is used to scale user-input to sensible values in almost all
+ chip drivers. */
+-extern inline int SENSORS_LIMIT(long value, long low, long high)
++static inline int SENSORS_LIMIT(long value, long low, long high)
+ {
+ if (value < low)
+@@ -373,6 +393,4 @@
+ }
+
+-#endif /* def __KERNEL__ */
+-
+
+ /* The maximum length of the prefix */
+@@ -393,4 +411,4 @@
+ };
+
+-#endif /* def SENSORS_SENSORS_H */
++#endif /* def _LINUX_I2C_PROC_H */
+
+--- linux-old/drivers/i2c/i2c-rpx.c Sun Aug 31 14:51:57 CEST 2003
++++ linux/drivers/i2c/i2c-rpx.c Sun Aug 31 14:51:57 CEST 2003
+@@ -0,0 +1,103 @@
++/*
++ * Embedded Planet RPX Lite MPC8xx CPM I2C interface.
++ * Copyright (c) 1999 Dan Malek (dmalek@jlc.net).
++ *
++ * moved into proper i2c interface;
++ * Brad Parker (brad@heeltoe.com)
++ *
++ * RPX lite specific parts of the i2c interface
++ * Update: There actually isn't anything RPXLite-specific about this module.
++ * This should work for most any 8xx board. The console messages have been
++ * changed to eliminate RPXLite references.
++ */
++
++#include <linux/kernel.h>
++#include <linux/ioport.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/stddef.h>
++#include <linux/parport.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-8xx.h>
++#include <asm/mpc8xx.h>
++#include <asm/commproc.h>
++
++
++static void
++rpx_iic_init(struct i2c_algo_8xx_data *data)
++{
++ volatile cpm8xx_t *cp;
++ volatile immap_t *immap;
++
++ cp = cpmp; /* Get pointer to Communication Processor */
++ immap = (immap_t *)IMAP_ADDR; /* and to internal registers */
++
++ data->iip = (iic_t *)&cp->cp_dparam[PROFF_IIC];
++
++ /* Check for and use a microcode relocation patch.
++ */
++ if ((data->reloc = data->iip->iic_rpbase))
++ data->iip = (iic_t *)&cp->cp_dpmem[data->iip->iic_rpbase];
++
++ data->i2c = (i2c8xx_t *)&(immap->im_i2c);
++ data->cp = cp;
++
++ /* Initialize Port B IIC pins.
++ */
++ cp->cp_pbpar |= 0x00000030;
++ cp->cp_pbdir |= 0x00000030;
++ cp->cp_pbodr |= 0x00000030;
++
++ /* Allocate space for two transmit and two receive buffer
++ * descriptors in the DP ram.
++ */
++ data->dp_addr = m8xx_cpm_dpalloc(sizeof(cbd_t) * 4);
++
++ /* ptr to i2c area */
++ data->i2c = (i2c8xx_t *)&(((immap_t *)IMAP_ADDR)->im_i2c);
++}
++
++static int rpx_install_isr(int irq, void (*func)(void *, void *), void *data)
++{
++ /* install interrupt handler */
++ cpm_install_handler(irq, (void (*)(void *, struct pt_regs *)) func, data);
++
++ return 0;
++}
++
++static struct i2c_algo_8xx_data rpx_data = {
++ .setisr = rpx_install_isr
++};
++
++static struct i2c_adapter rpx_ops = {
++ .owner = THIS_MODULE,
++ .name = "m8xx",
++ .id = I2C_HW_MPC8XX_EPON,
++ .algo_data = &rpx_data,
++};
++
++int __init i2c_rpx_init(void)
++{
++ printk("i2c-rpx.o: i2c MPC8xx module version %s (%s)\n", I2C_VERSION, I2C_DATE);
++
++ /* reset hardware to sane state */
++ rpx_iic_init(&rpx_data);
++
++ if (i2c_8xx_add_bus(&rpx_ops) < 0) {
++ printk("i2c-rpx: Unable to register with I2C\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++void __exit i2c_rpx_exit(void)
++{
++ i2c_8xx_del_bus(&rpx_ops);
++}
++
++MODULE_AUTHOR("Dan Malek <dmalek@jlc.net>");
++MODULE_DESCRIPTION("I2C-Bus adapter routines for MPC8xx boards");
++
++module_init(i2c_rpx_init);
++module_exit(i2c_rpx_exit);
+--- linux-old/drivers/i2c/i2c-sibyte.c Sun Aug 31 14:51:57 CEST 2003
++++ linux/drivers/i2c/i2c-sibyte.c Sun Aug 31 14:51:57 CEST 2003
+@@ -1,114 +0,0 @@
+-/*
+- * Copyright (C) 2001,2002,2003 Broadcom Corporation
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License
+- * as published by the Free Software Foundation; either version 2
+- * of the License, or (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+- */
+-
+-#include <linux/config.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-
+-#include <asm/sibyte/sb1250_regs.h>
+-#include <asm/sibyte/sb1250_smbus.h>
+-
+-#include <linux/i2c.h>
+-#include <linux/i2c-algo-sibyte.h>
+-
+-static int sibyte_reg(struct i2c_client *client)
+-{
+- return 0;
+-}
+-
+-static int sibyte_unreg(struct i2c_client *client)
+-{
+- return 0;
+-}
+-
+-static void sibyte_inc_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+-}
+-
+-static void sibyte_dec_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+-}
+-
+-static struct i2c_algo_sibyte_data sibyte_board_data[2] = {
+- { NULL, 0, (void *)(KSEG1+A_SMB_BASE(0)) },
+- { NULL, 1, (void *)(KSEG1+A_SMB_BASE(1)) }
+-};
+-
+-static struct i2c_adapter sibyte_board_adapter[2] = {
+- {
+- name: "SiByte SMBus 0",
+- id: I2C_HW_SIBYTE,
+- algo: NULL,
+- algo_data: &sibyte_board_data[0],
+- inc_use: sibyte_inc_use,
+- dec_use: sibyte_dec_use,
+- client_register: sibyte_reg,
+- client_unregister: sibyte_unreg,
+- client_count: 0
+- } ,
+- {
+- name: "SiByte SMBus 1",
+- id: I2C_HW_SIBYTE,
+- algo: NULL,
+- algo_data: &sibyte_board_data[1],
+- inc_use: sibyte_inc_use,
+- dec_use: sibyte_dec_use,
+- client_register: sibyte_reg,
+- client_unregister: sibyte_unreg,
+- client_count: 0
+- }
+-};
+-
+-int __init i2c_sibyte_init(void)
+-{
+- printk("i2c-swarm.o: i2c SMBus adapter module for SiByte board\n");
+- if (i2c_sibyte_add_bus(&sibyte_board_adapter[0], K_SMB_FREQ_100KHZ) < 0)
+- return -ENODEV;
+- if (i2c_sibyte_add_bus(&sibyte_board_adapter[1], K_SMB_FREQ_400KHZ) < 0)
+- return -ENODEV;
+- return 0;
+-}
+-
+-
+-EXPORT_NO_SYMBOLS;
+-
+-#ifdef MODULE
+-MODULE_AUTHOR("Kip Walker, Broadcom Corp.");
+-MODULE_DESCRIPTION("SMBus adapter routines for SiByte boards");
+-MODULE_LICENSE("GPL");
+-
+-int init_module(void)
+-{
+- return i2c_sibyte_init();
+-}
+-
+-void cleanup_module(void)
+-{
+- i2c_sibyte_del_bus(&sibyte_board_adapter[0]);
+- i2c_sibyte_del_bus(&sibyte_board_adapter[1]);
+-}
+-
+-#endif
+--- linux-old/drivers/i2c/i2c-velleman.c Sun Aug 31 14:51:58 CEST 2003
++++ linux/drivers/i2c/i2c-velleman.c Sun Aug 31 14:51:58 CEST 2003
+@@ -19,5 +19,5 @@
+ /* ------------------------------------------------------------------------- */
+
+-/* $Id$ */
++/* $Id$ */
+
+ #include <linux/kernel.h>
+@@ -25,10 +25,10 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+-#include <linux/string.h> /* for 2.0 kernels to get NULL */
+-#include <asm/errno.h> /* for 2.0 kernels to get ENODEV */
+-#include <asm/io.h>
+-
++#include <linux/errno.h>
++#include <linux/delay.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
++#include <asm/io.h>
++#include <asm/param.h> /* for HZ */
+
+ /* ----- global defines ----------------------------------------------- */
+@@ -91,47 +91,13 @@
+ static int bit_velle_init(void)
+ {
+- if (check_region(base,(base == 0x3bc)? 3 : 8) < 0 ) {
+- DEBE(printk("i2c-velleman.o: Port %#x already in use.\n",
+- base));
++ if (!request_region(base, (base == 0x3bc) ? 3 : 8,
++ "i2c (Vellemann adapter)"))
+ return -ENODEV;
+- } else {
+- request_region(base, (base == 0x3bc)? 3 : 8,
+- "i2c (Vellemann adapter)");
+- bit_velle_setsda((void*)base,1);
+- bit_velle_setscl((void*)base,1);
+- }
+- return 0;
+-}
+-
+-static void __exit bit_velle_exit(void)
+-{
+- release_region( base , (base == 0x3bc)? 3 : 8 );
+-}
+-
+-
+-static int bit_velle_reg(struct i2c_client *client)
+-{
+- return 0;
+-}
+
+-static int bit_velle_unreg(struct i2c_client *client)
+-{
++ bit_velle_setsda((void*)base,1);
++ bit_velle_setscl((void*)base,1);
+ return 0;
+ }
+
+-static void bit_velle_inc_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+-}
+-
+-static void bit_velle_dec_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+-}
+-
+ /* ------------------------------------------------------------------------
+ * Encapsulate the above functions in the correct operations structure.
+@@ -140,26 +106,23 @@
+
+ static struct i2c_algo_bit_data bit_velle_data = {
+- NULL,
+- bit_velle_setsda,
+- bit_velle_setscl,
+- bit_velle_getsda,
+- bit_velle_getscl,
+- 10, 10, 100, /* waits, timeout */
++ .setsda = bit_velle_setsda,
++ .setscl = bit_velle_setscl,
++ .getsda = bit_velle_getsda,
++ .getscl = bit_velle_getscl,
++ .udelay = 10,
++ .mdelay = 10,
++ .timeout = HZ
+ };
+
+ static struct i2c_adapter bit_velle_ops = {
+- "Velleman K8000",
+- I2C_HW_B_VELLE,
+- NULL,
+- &bit_velle_data,
+- bit_velle_inc_use,
+- bit_velle_dec_use,
+- bit_velle_reg,
+- bit_velle_unreg,
++ .owner = THIS_MODULE,
++ .name = "Velleman K8000",
++ .id = I2C_HW_B_VELLE,
++ .algo_data = &bit_velle_data,
+ };
+
+-int __init i2c_bitvelle_init(void)
++static int __init i2c_bitvelle_init(void)
+ {
+- printk("i2c-velleman.o: i2c Velleman K8000 adapter module\n");
++ printk(KERN_INFO "i2c-velleman.o: i2c Velleman K8000 adapter module version %s (%s)\n", I2C_VERSION, I2C_DATE);
+ if (base==0) {
+ /* probe some values */
+@@ -181,11 +144,14 @@
+ }
+ }
+- printk("i2c-velleman.o: found device at %#x.\n",base);
++ printk(KERN_DEBUG "i2c-velleman.o: found device at %#x.\n",base);
+ return 0;
+ }
+
+-EXPORT_NO_SYMBOLS;
++static void __exit i2c_bitvelle_exit(void)
++{
++ i2c_bit_del_bus(&bit_velle_ops);
++ release_region(base, (base == 0x3bc) ? 3 : 8);
++}
+
+-#ifdef MODULE
+ MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
+ MODULE_DESCRIPTION("I2C-Bus adapter routines for Velleman K8000 adapter");
+@@ -194,14 +160,4 @@
+ MODULE_PARM(base, "i");
+
+-int init_module(void)
+-{
+- return i2c_bitvelle_init();
+-}
+-
+-void cleanup_module(void)
+-{
+- i2c_bit_del_bus(&bit_velle_ops);
+- bit_velle_exit();
+-}
+-
+-#endif
++module_init(i2c_bitvelle_init);
++module_exit(i2c_bitvelle_exit);
+--- linux-old/include/linux/i2c.h Sun Aug 31 14:51:58 CEST 2003
++++ linux/include/linux/i2c.h Sun Aug 31 14:51:58 CEST 2003
+@@ -24,44 +24,31 @@
+ Frodo Looijaard <frodol@dds.nl> */
+
+-/* $Id$ */
++/* $Id$ */
+
+-#ifndef I2C_H
+-#define I2C_H
++#ifndef _LINUX_I2C_H
++#define _LINUX_I2C_H
+
+-#define I2C_DATE "20010830"
+-#define I2C_VERSION "2.6.1"
++#define I2C_DATE "20030714"
++#define I2C_VERSION "2.8.0"
+
+-#include <linux/i2c-id.h> /* id values of adapters et. al. */
++#include <linux/module.h>
+ #include <linux/types.h>
+-
+-
+-struct i2c_msg;
+-
+-
+-#ifdef __KERNEL__
+-
+-/* --- Includes and compatibility declarations ------------------------ */
+-
+-#include <linux/version.h>
+-#ifndef KERNEL_VERSION
+-#define KERNEL_VERSION(a,b,c) (((a) << 16) | ((b) << 8) | (c))
+-#endif
+-
+-#include <asm/page.h> /* for 2.2.xx */
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,0,25)
++#include <linux/errno.h>
+ #include <linux/sched.h>
+-#else
+ #include <asm/semaphore.h>
++#include <linux/i2c-id.h>
++
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10)
++#define MODULE_LICENSE(x)
+ #endif
+-#include <linux/config.h>
+
+ /* --- General options ------------------------------------------------ */
+
+-#define I2C_ALGO_MAX 4 /* control memory consumption */
+-#define I2C_ADAP_MAX 16
++#define I2C_ADAP_MAX 16 /* control memory consumption */
+ #define I2C_DRIVER_MAX 16
+ #define I2C_CLIENT_MAX 32
+-#define I2C_DUMMY_MAX 4
+
++struct i2c_msg;
+ struct i2c_algorithm;
+ struct i2c_adapter;
+@@ -71,5 +58,4 @@
+ union i2c_smbus_data;
+
+-
+ /*
+ * The master routines are the ones normally used to transmit data to devices
+@@ -124,4 +110,6 @@
+ u8 command, u8 length,
+ u8 *values);
++extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client,
++ u8 command, u8 *values);
+ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
+ u8 command, u8 length,
+@@ -136,4 +124,5 @@
+
+ struct i2c_driver {
++ struct module *owner;
+ char name[32];
+ int id;
+@@ -159,16 +148,4 @@
+ */
+ int (*command)(struct i2c_client *client,unsigned int cmd, void *arg);
+-
+- /* These two are mainly used for bookkeeping & dynamic unloading of
+- * kernel modules. inc_use tells the driver that a client is being
+- * used by another module & that it should increase its ref. counter.
+- * dec_use is the inverse operation.
+- * NB: Make sure you have no circular dependencies, or else you get a
+- * deadlock when trying to unload the modules.
+- * You should use the i2c_{inc,dec}_use_client functions instead of
+- * calling this function directly.
+- */
+- void (*inc_use)(struct i2c_client *client);
+- void (*dec_use)(struct i2c_client *client);
+ };
+
+@@ -203,4 +180,5 @@
+ */
+ struct i2c_algorithm {
++ struct module *owner; /* future use --km */
+ char name[32]; /* textual description */
+ unsigned int id;
+@@ -227,8 +205,4 @@
+ };
+
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,29)
+-struct proc_dir_entry;
+-#endif
+-
+ /*
+ * i2c_adapter is the structure used to identify a physical i2c bus along
+@@ -236,4 +210,5 @@
+ */
+ struct i2c_adapter {
++ struct module *owner;
+ char name[32]; /* some useful name to identify the adapter */
+ unsigned int id;/* == is algo->id | hwdep.struct->id, */
+@@ -242,8 +217,4 @@
+ void *algo_data;
+
+- /* --- These may be NULL, but should increase the module use count */
+- void (*inc_use)(struct i2c_adapter *);
+- void (*dec_use)(struct i2c_adapter *);
+-
+ /* --- administration stuff. */
+ int (*client_register)(struct i2c_client *);
+@@ -256,9 +227,9 @@
+
+ /* data fields that are valid for all devices */
+- struct semaphore lock;
++ struct semaphore bus;
++ struct semaphore list;
+ unsigned int flags;/* flags specifying div. data */
+
+ struct i2c_client *clients[I2C_CLIENT_MAX];
+- int client_count;
+
+ int timeout;
+@@ -268,7 +239,4 @@
+ /* No need to set this when you initialize the adapter */
+ int inode;
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,29)
+- struct proc_dir_entry *proc_entry;
+-#endif
+ #endif /* def CONFIG_PROC_FS */
+ };
+@@ -282,4 +250,7 @@
+ #define I2C_CLIENT_ALLOW_MULTIPLE_USE 0x02 /* Allow multiple access-locks */
+ /* on an i2c_client */
++#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
++#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
++ /* Must equal I2C_M_TEN below */
+
+ /* i2c_client_address_data is the struct for holding default client
+@@ -320,10 +291,4 @@
+ extern int i2c_detach_client(struct i2c_client *);
+
+-/* Only call these if you grab a resource that makes unloading the
+- client and the adapter it is on completely impossible. Like when a
+- /proc directory is entered. */
+-extern void i2c_inc_use_client(struct i2c_client *);
+-extern void i2c_dec_use_client(struct i2c_client *);
+-
+ /* New function: This is to get an i2c_client-struct for controlling the
+ client either by using i2c_control-function or having the
+@@ -359,4 +324,13 @@
+ i2c_client_found_addr_proc *found_proc);
+
++static inline int i2c_client_command(struct i2c_client *client,
++ unsigned int cmd, void *arg)
++{
++ if (client->driver && client->driver->command)
++ return client->driver->command(client, cmd, arg);
++ else
++ return -EINVAL;
++}
++
+ /* An ioctl like call to set div. parameters of the adapter.
+ */
+@@ -376,6 +350,4 @@
+ extern int i2c_check_functionality (struct i2c_adapter *adap, u32 func);
+
+-#endif /* __KERNEL__ */
+-
+ /*
+ * I2C Message - used for pure i2c transaction, also from /dev interface
+@@ -388,6 +360,10 @@
+ #define I2C_M_NOSTART 0x4000
+ #define I2C_M_REV_DIR_ADDR 0x2000
++#define I2C_M_IGNORE_NAK 0x1000
++#define I2C_M_NO_RD_ACK 0x0800
+ __u16 len; /* msg length */
+ __u8 *buf; /* pointer to msg data */
++ int err;
++ short done;
+ };
+
+@@ -396,5 +372,11 @@
+ #define I2C_FUNC_I2C 0x00000001
+ #define I2C_FUNC_10BIT_ADDR 0x00000002
+-#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART} */
++#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART,..} */
++#define I2C_FUNC_SMBUS_HWPEC_CALC 0x00000008 /* SMBus 2.0 */
++#define I2C_FUNC_SMBUS_READ_WORD_DATA_PEC 0x00000800 /* SMBus 2.0 */
++#define I2C_FUNC_SMBUS_WRITE_WORD_DATA_PEC 0x00001000 /* SMBus 2.0 */
++#define I2C_FUNC_SMBUS_PROC_CALL_PEC 0x00002000 /* SMBus 2.0 */
++#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL_PEC 0x00004000 /* SMBus 2.0 */
++#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
+ #define I2C_FUNC_SMBUS_QUICK 0x00010000
+ #define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
+@@ -407,6 +389,10 @@
+ #define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000
+ #define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
+-#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* New I2C-like block */
+-#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* transfer */
++#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */
++#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */
++#define I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 0x10000000 /* I2C-like block xfer */
++#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2 0x20000000 /* w/ 2-byte reg. addr. */
++#define I2C_FUNC_SMBUS_READ_BLOCK_DATA_PEC 0x40000000 /* SMBus 2.0 */
++#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA_PEC 0x80000000 /* SMBus 2.0 */
+
+ #define I2C_FUNC_SMBUS_BYTE I2C_FUNC_SMBUS_READ_BYTE | \
+@@ -420,4 +406,17 @@
+ #define I2C_FUNC_SMBUS_I2C_BLOCK I2C_FUNC_SMBUS_READ_I2C_BLOCK | \
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK
++#define I2C_FUNC_SMBUS_I2C_BLOCK_2 I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 | \
++ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2
++#define I2C_FUNC_SMBUS_BLOCK_DATA_PEC I2C_FUNC_SMBUS_READ_BLOCK_DATA_PEC | \
++ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA_PEC
++#define I2C_FUNC_SMBUS_WORD_DATA_PEC I2C_FUNC_SMBUS_READ_WORD_DATA_PEC | \
++ I2C_FUNC_SMBUS_WRITE_WORD_DATA_PEC
++
++#define I2C_FUNC_SMBUS_READ_BYTE_PEC I2C_FUNC_SMBUS_READ_BYTE_DATA
++#define I2C_FUNC_SMBUS_WRITE_BYTE_PEC I2C_FUNC_SMBUS_WRITE_BYTE_DATA
++#define I2C_FUNC_SMBUS_READ_BYTE_DATA_PEC I2C_FUNC_SMBUS_READ_WORD_DATA
++#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA_PEC I2C_FUNC_SMBUS_WRITE_WORD_DATA
++#define I2C_FUNC_SMBUS_BYTE_PEC I2C_FUNC_SMBUS_BYTE_DATA
++#define I2C_FUNC_SMBUS_BYTE_DATA_PEC I2C_FUNC_SMBUS_WORD_DATA
+
+ #define I2C_FUNC_SMBUS_EMUL I2C_FUNC_SMBUS_QUICK | \
+@@ -426,13 +425,19 @@
+ I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_PROC_CALL | \
+- I2C_FUNC_SMBUS_WRITE_BLOCK_DATA
++ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
++ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA_PEC | \
++ I2C_FUNC_SMBUS_I2C_BLOCK
+
+ /*
+ * Data for SMBus Messages
+ */
++#define I2C_SMBUS_BLOCK_MAX 32 /* As specified in SMBus standard */
++#define I2C_SMBUS_I2C_BLOCK_MAX 32 /* Not specified but we use same structure */
+ union i2c_smbus_data {
+ __u8 byte;
+ __u16 word;
+- __u8 block[33]; /* block[0] is used for length */
++ __u8 block[I2C_SMBUS_BLOCK_MAX + 3]; /* block[0] is used for length */
++ /* one more for read length in block process call */
++ /* and one more for PEC */
+ };
+
+@@ -450,4 +455,9 @@
+ #define I2C_SMBUS_BLOCK_DATA 5
+ #define I2C_SMBUS_I2C_BLOCK_DATA 6
++#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
++#define I2C_SMBUS_BLOCK_DATA_PEC 8 /* SMBus 2.0 */
++#define I2C_SMBUS_PROC_CALL_PEC 9 /* SMBus 2.0 */
++#define I2C_SMBUS_BLOCK_PROC_CALL_PEC 10 /* SMBus 2.0 */
++#define I2C_SMBUS_WORD_DATA_PEC 11 /* SMBus 2.0 */
+
+
+@@ -475,4 +485,5 @@
+ #define I2C_FUNCS 0x0705 /* Get the adapter functionality */
+ #define I2C_RDWR 0x0707 /* Combined R/W transfer (one stop only)*/
++#define I2C_PEC 0x0708 /* != 0 for SMBus PEC */
+ #if 0
+ #define I2C_ACK_TEST 0x0710 /* See if a slave is at a specific address */
+@@ -490,14 +501,4 @@
+ #define I2C_MAJOR 89 /* Device major number */
+
+-#ifdef __KERNEL__
+-
+-# ifndef NULL
+-# define NULL ( (void *) 0 )
+-# endif
+-
+-# ifndef ENODEV
+-# include <asm/errno.h>
+-# endif
+-
+ /* These defines are used for probing i2c client addresses */
+ /* Default fill of many variables */
+@@ -561,4 +562,10 @@
+ ((adapptr)->algo->id == I2C_ALGO_ISA)
+
+-#endif /* def __KERNEL__ */
+-#endif /* I2C_H */
++/* Tiny delay function used by the i2c bus drivers */
++static inline void i2c_delay(signed long timeout)
++{
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(timeout);
++}
++
++#endif /* _LINUX_I2C_H */
+--- linux-old/Documentation/Configure.help Sun Aug 31 14:51:59 CEST 2003
++++ linux/Documentation/Configure.help Sun Aug 31 14:51:59 CEST 2003
+@@ -18109,4 +18109,45 @@
+ board at <http://www.mvista.com/allies/semiconductor/ite.html>.
+
++UltraSPARC-III bootbus i2c controller driver
++CONFIG_BBC_I2C
++ The BBC devices on the UltraSPARC III have two I2C controllers. The
++ first I2C controller connects mainly to configuration PROMs (NVRAM,
++ CPU configuration, DIMM types, etc.). The second I2C controller
++ connects to environmental control devices such as fans and
++ temperature sensors. The second controller also connects to the
++ smartcard reader, if present. Say Y to enable support for these.
++
++ITE I2C Algorithm
++CONFIG_ITE_I2C_ALGO
++ This supports the use the ITE8172 I2C interface found on some MIPS
++ systems. Say Y if you have one of these. You should also say Y for
++ the ITE I2C peripheral driver support below.
++
++ This support is also available as a module. If you want to compile
++ it as a modules, say M here and read
++ <file:Documentation/modules.txt>.
++ The module will be called i2c-algo-ite.o.
++
++ITE I2C Adapter
++CONFIG_ITE_I2C_ADAP
++ This supports the ITE8172 I2C peripheral found on some MIPS
++ systems. Say Y if you have one of these. You should also say Y for
++ the ITE I2C driver algorithm support above.
++
++ This support is also available as a module. If you want to compile
++ it as a module, say M here and read
++ <file:Documentation/modules.txt>.
++ The module will be called i2c-adap-ite.o.
++
++SiByte I2C Algorithm
++CONFIG_I2C_ALGO_SIBYTE
++ Supports the SiByte SOC on-chip I2C interfaces (2 channels).
++
++MAX1617 Temperature Sensor
++CONFIG_I2C_MAX1617
++ This builds a simple polling driver for the Maxim 1617 temperature
++ sensor. Currently the device is only supported on a SiByte I2C
++ adapter, and the driver prints status updates to the system log.
++
+ I2C support
+ CONFIG_I2C
+@@ -18133,13 +18174,4 @@
+ The module will be called i2c-core.o.
+
+-UltraSPARC-III bootbus i2c controller driver
+-CONFIG_BBC_I2C
+- The BBC devices on the UltraSPARC III have two I2C controllers. The
+- first I2C controller connects mainly to configuration PROMs (NVRAM,
+- CPU configuration, DIMM types, etc.). The second I2C controller
+- connects to environmental control devices such as fans and
+- temperature sensors. The second controller also connects to the
+- smartcard reader, if present. Say Y to enable support for these.
+-
+ I2C bit-banging interfaces
+ CONFIG_I2C_ALGOBIT
+@@ -18186,4 +18218,14 @@
+ The module will be called i2c-velleman.o.
+
++Basic I2C on Parallel Port adapter
++CONFIG_I2C_PPORT
++ This supports directly connecting I2C devices to the parallel port.
++ See <file:Documentation/i2c/i2c-pport> for more information.
++
++ This driver is also available as a module. If you want to compile
++ it as a module, say M here and read
++ <file:Documentation/modules.txt>.
++ The module will be called i2c-pport.o.
++
+ I2C PCF 8584 interfaces
+ CONFIG_I2C_ALGOPCF
+@@ -18207,35 +18249,57 @@
+ The module will be called i2c-elektor.o.
+
+-ITE I2C Algorithm
+-CONFIG_ITE_I2C_ALGO
+- This supports the use the ITE8172 I2C interface found on some MIPS
+- systems. Say Y if you have one of these. You should also say Y for
+- the ITE I2C peripheral driver support below.
++PCF on the EPP Parallel Port
++CONFIG_I2C_PCFEPP
++ This supports the PCF8584 connected to the parallel port.
+
+- This support is also available as a module. If you want to compile
+- it as a modules, say M here and read
++ This driver is also available as a module. If you want to compile
++ it as a module, say M here and read
+ <file:Documentation/modules.txt>.
+- The module will be called i2c-algo-ite.o.
++ The module will be called i2c-pcf-epp.o.
+
+-ITE I2C Adapter
+-CONFIG_ITE_I2C_ADAP
+- This supports the ITE8172 I2C peripheral found on some MIPS
+- systems. Say Y if you have one of these. You should also say Y for
+- the ITE I2C driver algorithm support above.
++Motorola 8xx I2C algorithm
++CONFIG_I2C_ALGO8XX
++ This is the algorithm that allows you to use Motorola 8xx I2C adapters.
+
+- This support is also available as a module. If you want to compile
++ This driver is also available as a module. If you want to compile
+ it as a module, say M here and read
+ <file:Documentation/modules.txt>.
+- The module will be called i2c-adap-ite.o.
++ The module will be called i2c-algo-8xx.o.
+
+-SiByte I2C Algorithm
+-CONFIG_I2C_ALGO_SIBYTE
+- Supports the SiByte SOC on-chip I2C interfaces (2 channels).
++Motorola 8xx I2C interface
++CONFIG_I2C_RPXLITE
++ This supports the Motorola 8xx I2C device.
+
+-MAX1617 Temperature Sensor
+-CONFIG_I2C_MAX1617
+- This builds a simple polling driver for the Maxim 1617 temperature
+- sensor. Currently the device is only supported on a SiByte I2C
+- adapter, and the driver prints status updates to the system log.
++ This driver is also available as a module. If you want to compile
++ it as a module, say M here and read
++ <file:Documentation/modules.txt>.
++ The module will be called i2c-rpx.o.
++
++IBM 405 I2C algorithm
++CONFIG_I2C_IBM_OCP_ALGO
++ This is the algorithm that allows you to use IBM 405 I2C adapters.
++
++ This driver is also available as a module. If you want to compile
++ it as a module, say M here and read
++ <file:Documentation/modules.txt>.
++ The module will be called i2c-algo-ibm_ocp.o.
++
++IBM 405 I2C interface
++CONFIG_I2C_IBM_OCP_ADAP
++ This supports the IBM 405 I2C device.
++
++ This driver is also available as a module. If you want to compile
++ it as a module, say M here and read
++ <file:Documentation/modules.txt>.
++ The module will be called i2c-adap-ibm_ocp.o.
++
++StrongARM SA-1110 interface
++CONFIG_I2C_FRODO
++ This supports the StrongARM SA-1110 Development Board.
++
++ This driver is also available as a module. If you want to compile
++ it as a module, say M here and read
++ <file:Documentation/modules.txt>.
++ The module will be called i2c-frodo.o.
+
+ I2C device interface
+--- linux-2.4.22-orig/drivers/i2c/i2c-adap-ite.c Tue Jul 15 12:23:01 2003
++++ linux-2.4.22-i2c/drivers/i2c/i2c-adap-ite.c Thu Aug 28 18:24:16 2003
+@@ -199,22 +199,6 @@
+ }
+
+
+-static void iic_ite_inc_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+-}
+-
+-
+-static void iic_ite_dec_use(struct i2c_adapter *adap)
+-{
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+-}
+-
+-
+ /* ------------------------------------------------------------------------
+ * Encapsulate the above functions in the correct operations structure.
+ * This is only done when more than one hardware adapter is supported.
+@@ -230,14 +214,13 @@
+ };
+
+ static struct i2c_adapter iic_ite_ops = {
+- "ITE IIC adapter",
+- I2C_HW_I_IIC,
+- NULL,
+- &iic_ite_data,
+- iic_ite_inc_use,
+- iic_ite_dec_use,
+- iic_ite_reg,
+- iic_ite_unreg,
++ .owner = THIS_MODULE,
++ .name = "ITE IIC adapter",
++ .id = I2C_HW_I_IIC,
++ .algo = NULL,
++ .algo_data = &iic_ite_data,
++ .client_register = iic_ite_reg,
++ .client_unregieter = iic_ite_unreg,
+ };
+
+ /* Called when the module is loaded. This function starts the
+--- linux-2.4.22-orig/drivers/i2c/i2c-algo-ite.c Thu Oct 11 17:05:47 2001
++++ linux-2.4.22-i2c/drivers/i2c/i2c-algo-ite.c Thu Aug 28 18:39:48 2003
+@@ -742,14 +742,12 @@
+ /* -----exported algorithm data: ------------------------------------- */
+
+ static struct i2c_algorithm iic_algo = {
+- "ITE IIC algorithm",
+- I2C_ALGO_IIC,
+- iic_xfer, /* master_xfer */
+- NULL, /* smbus_xfer */
+- NULL, /* slave_xmit */
+- NULL, /* slave_recv */
+- algo_control, /* ioctl */
+- iic_func, /* functionality */
++ .owner = THIS_MODULE,
++ .name = "ITE IIC algorithm",
++ .id = I2C_ALGO_IIC,
++ .master_xfer = iic_xfer,
++ .algo_control = algo_control,
++ .functionality = iic_func,
+ };
+
+
+--- linux-2.4.22-orig/drivers/i2c/i2c-keywest.c Tue Jul 15 12:23:26 2003
++++ linux-2.4.22-i2c/drivers/i2c/i2c-keywest.c Sun Sep 14 10:33:10 2003
+@@ -412,20 +412,9 @@
+ I2C_FUNC_SMBUS_BLOCK_DATA;
+ }
+
+-static void
+-keywest_inc(struct i2c_adapter *adapter)
+-{
+- MOD_INC_USE_COUNT;
+-}
+-
+-static void
+-keywest_dec(struct i2c_adapter *adapter)
+-{
+- MOD_DEC_USE_COUNT;
+-}
+-
+ /* For now, we only handle combined mode (smbus) */
+ static struct i2c_algorithm keywest_algorithm = {
++ owner: THIS_MODULE,
+ name: "Keywest i2c",
+ id: I2C_ALGO_SMBUS,
+ smbus_xfer: keywest_smbus_xfer,
+@@ -527,14 +516,13 @@
+ struct keywest_chan* chan = &iface->channels[i];
+ u8 addr;
+
++ chan->adapter.owner = THIS_MODULE;
+ sprintf(chan->adapter.name, "%s %d", np->parent->name, i);
+ chan->iface = iface;
+ chan->chan_no = i;
+ chan->adapter.id = I2C_ALGO_SMBUS;
+ chan->adapter.algo = &keywest_algorithm;
+ chan->adapter.algo_data = NULL;
+- chan->adapter.inc_use = keywest_inc;
+- chan->adapter.dec_use = keywest_dec;
+ chan->adapter.client_register = NULL;
+ chan->adapter.client_unregister = NULL;
+ chan->adapter.data = chan;
+--- linux-2.4.22-orig/drivers/i2c/scx200_acb.c Tue Jul 15 12:23:49 2003
++++ linux-2.4.22-i2c/drivers/i2c/scx200_acb.c Sun Aug 31 12:04:13 2003
+@@ -408,16 +408,6 @@
+ return 0;
+ }
+
+-static void scx200_acb_inc_use(struct i2c_adapter *adapter)
+-{
+- MOD_INC_USE_COUNT;
+-}
+-
+-static void scx200_acb_dec_use(struct i2c_adapter *adapter)
+-{
+- MOD_DEC_USE_COUNT;
+-}
+-
+ /* For now, we only handle combined mode (smbus) */
+ static struct i2c_algorithm scx200_acb_algorithm = {
+ name: "NatSemi SCx200 ACCESS.bus",
+@@ -479,11 +469,10 @@
+ memset(iface, 0, sizeof(*iface));
+ adapter = &iface->adapter;
+ adapter->data = iface;
++ adapter->owner = THIS_MODULE;
+ sprintf(adapter->name, "SCx200 ACB%d", index);
+ adapter->id = I2C_ALGO_SMBUS;
+ adapter->algo = &scx200_acb_algorithm;
+- adapter->inc_use = scx200_acb_inc_use;
+- adapter->dec_use = scx200_acb_dec_use;
+ adapter->client_register = scx200_acb_reg;
+ adapter->client_unregister = scx200_acb_unreg;
+
+--- linux-2.4.22-orig/drivers/i2c/scx200_i2c.c Tue Jul 15 12:23:49 2003
++++ linux-2.4.22-i2c/drivers/i2c/scx200_i2c.c Sun Aug 31 12:04:18 2003
+@@ -76,16 +76,6 @@
+ return 0;
+ }
+
+-static void scx200_i2c_inc_use(struct i2c_adapter *adap)
+-{
+- MOD_INC_USE_COUNT;
+-}
+-
+-static void scx200_i2c_dec_use(struct i2c_adapter *adap)
+-{
+- MOD_DEC_USE_COUNT;
+-}
+-
+ /* ------------------------------------------------------------------------
+ * Encapsulate the above functions in the correct operations structure.
+ * This is only done when more than one hardware adapter is supported.
+@@ -101,11 +91,10 @@
+ };
+
+ static struct i2c_adapter scx200_i2c_ops = {
++ .owner = THIS_MODULE,
+ .name = "NatSemi SCx200 I2C",
+ .id = I2C_HW_B_VELLE,
+ .algo_data = &scx200_i2c_data,
+- .inc_use = scx200_i2c_inc_use,
+- .dec_use = scx200_i2c_dec_use,
+ .client_register = scx200_i2c_reg,
+ .client_unregister = scx200_i2c_unreg,
+ };
+diff -ru linux-2.4.22-orig/drivers/ieee1394/pcilynx.c linux-2.4.22-i2c/drivers/ieee1394/pcilynx.c
+--- linux-2.4.22-orig/drivers/ieee1394/pcilynx.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/ieee1394/pcilynx.c Sun Aug 31 12:04:23 2003
+@@ -139,6 +139,7 @@
+ };
+
+ static struct i2c_adapter bit_ops = {
++ .owner = THIS_MODULE,
+ .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
+ .client_register = bit_reg,
+ .client_unregister = bit_unreg,
+diff -ru linux-2.4.22-orig/drivers/media/video/Makefile linux-2.4.22-i2c/drivers/media/video/Makefile
+--- linux-2.4.22-orig/drivers/media/video/Makefile Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/Makefile Sun Aug 31 12:29:43 2003
+@@ -40,7 +40,7 @@
+
+ obj-$(CONFIG_VIDEO_ZR36120) += zoran.o
+ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o i2c-old.o
+-obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o i2c-old.o
++obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o
+ obj-$(CONFIG_VIDEO_CQCAM) += c-qcam.o
+ obj-$(CONFIG_VIDEO_BWQCAM) += bw-qcam.o
+ obj-$(CONFIG_VIDEO_W9966) += w9966.o
+@@ -48,11 +48,10 @@
+ obj-$(CONFIG_VIDEO_ZORAN_BUZ) += saa7111.o saa7185.o
+ obj-$(CONFIG_VIDEO_ZORAN_DC10) += saa7110.o adv7175.o
+ obj-$(CONFIG_VIDEO_ZORAN_LML33) += bt819.o bt856.o
+-obj-$(CONFIG_VIDEO_LML33) += bt856.o bt819.o
+ obj-$(CONFIG_VIDEO_PMS) += pms.o
+ obj-$(CONFIG_VIDEO_PLANB) += planb.o
+ obj-$(CONFIG_VIDEO_VINO) += vino.o
+-obj-$(CONFIG_VIDEO_STRADIS) += stradis.o
++obj-$(CONFIG_VIDEO_STRADIS) += stradis.o i2c-old.o
+ obj-$(CONFIG_VIDEO_CPIA) += cpia.o
+ obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
+ obj-$(CONFIG_VIDEO_CPIA_USB) += cpia_usb.o
+@@ -81,8 +80,8 @@
+
+ fastdep:
+
+-zoran.o: zr36120.o zr36120_i2c.o zr36120_mem.o
+- $(LD) $(LD_RFLAG) -r -o $@ zr36120.o zr36120_i2c.o zr36120_mem.o
++zoran.o: $(zoran-objs)
++ $(LD) $(LD_RFLAG) -r -o $@ $(zoran-objs)
+
+ bttv.o: $(bttv-objs)
+ $(LD) $(LD_RFLAG) -r -o $@ $(bttv-objs)
+diff -ru linux-2.4.22-orig/drivers/media/video/bt832.c linux-2.4.22-i2c/drivers/media/video/bt832.c
+--- linux-2.4.22-orig/drivers/media/video/bt832.c Tue Jul 15 12:23:50 2003
++++ linux-2.4.22-i2c/drivers/media/video/bt832.c Sun Aug 31 12:29:43 2003
+@@ -257,6 +257,7 @@
+ /* ----------------------------------------------------------------------- */
+
+ static struct i2c_driver driver = {
++ .owner = THIS_MODULE,
+ .name = "i2c bt832 driver",
+ .id = -1, /* FIXME */
+ .flags = I2C_DF_NOTIFY,
+diff -ru linux-2.4.22-orig/drivers/media/video/bttv-if.c linux-2.4.22-i2c/drivers/media/video/bttv-if.c
+--- linux-2.4.22-orig/drivers/media/video/bttv-if.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/bttv-if.c Sun Aug 31 12:30:33 2003
+@@ -190,16 +190,6 @@
+ return state;
+ }
+
+-static void bttv_inc_use(struct i2c_adapter *adap)
+-{
+- MOD_INC_USE_COUNT;
+-}
+-
+-static void bttv_dec_use(struct i2c_adapter *adap)
+-{
+- MOD_DEC_USE_COUNT;
+-}
+-
+ static int attach_inform(struct i2c_client *client)
+ {
+ struct bttv *btv = i2c_get_adapdata(client->adapter);
+@@ -241,8 +231,7 @@
+ };
+
+ static struct i2c_adapter bttv_i2c_adap_template = {
+- .inc_use = bttv_inc_use,
+- .dec_use = bttv_dec_use,
++ .owner = THIS_MODULE,
+ I2C_DEVNAME("bt848"),
+ .id = I2C_HW_B_BT848,
+ .client_register = attach_inform,
+diff -ru linux-2.4.22-orig/drivers/media/video/i2c-old.c linux-2.4.22-i2c/drivers/media/video/i2c-old.c
+--- linux-2.4.22-orig/drivers/media/video/i2c-old.c Sun Sep 30 21:26:06 2001
++++ linux-2.4.22-i2c/drivers/media/video/i2c-old.c Sun Aug 31 12:29:43 2003
+@@ -36,28 +36,11 @@
+ static struct i2c_driver *drivers[I2C_DRIVER_MAX];
+ static int bus_count = 0, driver_count = 0;
+
+-#ifdef CONFIG_VIDEO_BUZ
+-extern int saa7111_init(void);
+-extern int saa7185_init(void);
+-#endif
+-#ifdef CONFIG_VIDEO_LML33
+-extern int bt819_init(void);
+-extern int bt856_init(void);
+-#endif
+-
+ int i2c_init(void)
+ {
+ printk(KERN_INFO "i2c: initialized%s\n",
+ scan ? " (i2c bus scan enabled)" : "");
+ /* anything to do here ? */
+-#ifdef CONFIG_VIDEO_BUZ
+- saa7111_init();
+- saa7185_init();
+-#endif
+-#ifdef CONFIG_VIDEO_LML33
+- bt819_init();
+- bt856_init();
+-#endif
+ return 0;
+ }
+
+diff -ru linux-2.4.22-orig/drivers/media/video/msp3400.c linux-2.4.22-i2c/drivers/media/video/msp3400.c
+--- linux-2.4.22-orig/drivers/media/video/msp3400.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/msp3400.c Sun Aug 31 12:29:43 2003
+@@ -1243,6 +1243,7 @@
+ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg);
+
+ static struct i2c_driver driver = {
++ .owner = THIS_MODULE,
+ .name = "i2c msp3400 driver",
+ .id = I2C_DRIVERID_MSP3400,
+ .flags = I2C_DF_NOTIFY,
+diff -ru linux-2.4.22-orig/drivers/media/video/saa5249.c linux-2.4.22-i2c/drivers/media/video/saa5249.c
+--- linux-2.4.22-orig/drivers/media/video/saa5249.c Sun Sep 30 21:26:06 2001
++++ linux-2.4.22-i2c/drivers/media/video/saa5249.c Sun Aug 31 12:29:43 2003
+@@ -258,12 +258,13 @@
+
+ static struct i2c_driver i2c_driver_videotext =
+ {
+- IF_NAME, /* name */
+- I2C_DRIVERID_SAA5249, /* in i2c.h */
+- I2C_DF_NOTIFY,
+- saa5249_probe,
+- saa5249_detach,
+- saa5249_command
++ .owner = THIS_MODULE,
++ .name = IF_NAME,
++ .id = I2C_DRIVERID_SAA5249, /* in i2c-id.h */
++ .flags = I2C_DF_NOTIFY,
++ .attach_adapter = saa5249_probe,
++ .detach_client = saa5249_detach,
++ .command = saa5249_command
+ };
+
+ static struct i2c_client client_template = {
+diff -ru linux-2.4.22-orig/drivers/media/video/saa7146.h linux-2.4.22-i2c/drivers/media/video/saa7146.h
+--- linux-2.4.22-orig/drivers/media/video/saa7146.h Mon Dec 11 22:15:51 2000
++++ linux-2.4.22-i2c/drivers/media/video/saa7146.h Sun Aug 31 12:29:43 2003
+@@ -25,9 +25,6 @@
+ #include <linux/types.h>
+ #include <linux/wait.h>
+
+-#include <linux/i2c.h>
+-#include <linux/videodev.h>
+-
+ #ifndef O_NONCAP
+ #define O_NONCAP O_TRUNC
+ #endif
+diff -ru linux-2.4.22-orig/drivers/media/video/swarm_saa7114h.c linux-2.4.22-i2c/drivers/media/video/swarm_saa7114h.c
+--- linux-2.4.22-orig/drivers/media/video/swarm_saa7114h.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/swarm_saa7114h.c Sun Aug 31 12:42:00 2003
+@@ -201,11 +201,12 @@
+
+ struct i2c_driver i2c_driver_saa7114h =
+ {
+- name: "saa7114h", /* name */
+- id: I2C_DRIVERID_SAA7114H, /* ID */
+- flags: I2C_DF_NOTIFY, /* XXXKW do I care? */
+- attach_adapter: saa7114h_probe,
+- detach_client: saa7114h_detach
++ .owner = THIS_MODULE,
++ .name = "saa7114h",
++ .id = I2C_DRIVERID_SAA7114H,
++ .flags = I2C_DF_NOTIFY,
++ .attach_adapter = saa7114h_probe,
++ .detach_client = saa7114h_detach
+ };
+
+ /* -----------------------------------------------------------------------
+diff -ru linux-2.4.22-orig/drivers/media/video/tda7432.c linux-2.4.22-i2c/drivers/media/video/tda7432.c
+--- linux-2.4.22-orig/drivers/media/video/tda7432.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/tda7432.c Sun Aug 31 12:29:43 2003
+@@ -515,6 +515,7 @@
+ }
+
+ static struct i2c_driver driver = {
++ .owner = THIS_MODULE,
+ .name = "i2c tda7432 driver",
+ .id = I2C_DRIVERID_TDA7432,
+ .flags = I2C_DF_NOTIFY,
+diff -ru linux-2.4.22-orig/drivers/media/video/tda9875.c linux-2.4.22-i2c/drivers/media/video/tda9875.c
+--- linux-2.4.22-orig/drivers/media/video/tda9875.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/tda9875.c Sun Aug 31 12:29:43 2003
+@@ -386,6 +386,7 @@
+
+
+ static struct i2c_driver driver = {
++ .owner = THIS_MODULE,
+ .name = "i2c tda9875 driver",
+ .id = I2C_DRIVERID_TDA9875,
+ .flags = I2C_DF_NOTIFY,
+diff -ru linux-2.4.22-orig/drivers/media/video/tda9887.c linux-2.4.22-i2c/drivers/media/video/tda9887.c
+--- linux-2.4.22-orig/drivers/media/video/tda9887.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/tda9887.c Sun Aug 31 12:29:43 2003
+@@ -438,6 +438,7 @@
+ /* ----------------------------------------------------------------------- */
+
+ static struct i2c_driver driver = {
++ .owner = THIS_MODULE,
+ .name = "i2c tda9887 driver",
+ .id = -1, /* FIXME */
+ .flags = I2C_DF_NOTIFY,
+diff -ru linux-2.4.22-orig/drivers/media/video/tuner-3036.c linux-2.4.22-i2c/drivers/media/video/tuner-3036.c
+--- linux-2.4.22-orig/drivers/media/video/tuner-3036.c Sun Sep 30 21:26:06 2001
++++ linux-2.4.22-i2c/drivers/media/video/tuner-3036.c Sun Aug 31 12:29:43 2003
+@@ -185,12 +185,13 @@
+ static struct i2c_driver
+ i2c_driver_tuner =
+ {
+- "sab3036", /* name */
+- I2C_DRIVERID_SAB3036, /* ID */
+- I2C_DF_NOTIFY,
+- tuner_probe,
+- tuner_detach,
+- tuner_command
++ .owner = THIS_MODULE,
++ .name = "sab3036",
++ .id = I2C_DRIVERID_SAB3036,
++ .flags = I2C_DF_NOTIFY,
++ .attach_adapter = tuner_probe,
++ .detach_client = tuner_detach,
++ .command = tuner_command
+ };
+
+ static struct i2c_client client_template =
+diff -ru linux-2.4.22-orig/drivers/media/video/tuner.c linux-2.4.22-i2c/drivers/media/video/tuner.c
+--- linux-2.4.22-orig/drivers/media/video/tuner.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/tuner.c Sun Aug 31 12:29:43 2003
+@@ -963,6 +963,7 @@
+ /* ----------------------------------------------------------------------- */
+
+ static struct i2c_driver driver = {
++ .owner = THIS_MODULE,
+ .name = "i2c TV tuner driver",
+ .id = I2C_DRIVERID_TUNER,
+ .flags = I2C_DF_NOTIFY,
+diff -ru linux-2.4.22-orig/drivers/media/video/tvaudio.c linux-2.4.22-i2c/drivers/media/video/tvaudio.c
+--- linux-2.4.22-orig/drivers/media/video/tvaudio.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/tvaudio.c Sun Aug 31 12:29:43 2003
+@@ -1644,6 +1644,7 @@
+
+
+ static struct i2c_driver driver = {
++ .owner = THIS_MODULE,
+ .name = "generic i2c audio driver",
+ .id = I2C_DRIVERID_TVAUDIO,
+ .flags = I2C_DF_NOTIFY,
+diff -ru linux-2.4.22-orig/drivers/media/video/tvmixer.c linux-2.4.22-i2c/drivers/media/video/tvmixer.c
+--- linux-2.4.22-orig/drivers/media/video/tvmixer.c Wed Aug 27 18:11:47 2003
++++ linux-2.4.22-i2c/drivers/media/video/tvmixer.c Sun Aug 31 12:29:43 2003
+@@ -192,8 +192,6 @@
+
+ /* lock bttv in memory while the mixer is in use */
+ file->private_data = mix;
+- if (client->adapter->inc_use)
+- client->adapter->inc_use(client->adapter);
+ return 0;
+ }
+
+@@ -207,12 +205,11 @@
+ return -ENODEV;
+ }
+
+- if (client->adapter->dec_use)
+- client->adapter->dec_use(client->adapter);
+ return 0;
+ }
+
+ static struct i2c_driver driver = {
++ .owner = THIS_MODULE,
+ .name = "tv card mixer driver",
+ .id = I2C_DRIVERID_TVMIXER,
+ .flags = I2C_DF_DUMMY,
+diff -ru linux-2.4.22-orig/drivers/pcmcia/sa1100_stork.c linux-2.4.22-i2c/drivers/pcmcia/sa1100_stork.c
+--- linux-2.4.22-orig/drivers/pcmcia/sa1100_stork.c Tue Jul 15 12:23:03 2003
++++ linux-2.4.22-i2c/drivers/pcmcia/sa1100_stork.c Sun Aug 31 12:32:31 2003
+@@ -24,7 +24,6 @@
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+-#include <linux/i2c.h>
+
+ #include <asm/hardware.h>
+ #include <asm/irq.h>
+diff -ru linux-2.4.22-orig/drivers/sound/dmasound/dac3550a.c linux-2.4.22-i2c/drivers/sound/dmasound/dac3550a.c
+--- linux-2.4.22-orig/drivers/sound/dmasound/dac3550a.c Tue Jul 15 12:22:41 2003
++++ linux-2.4.22-i2c/drivers/sound/dmasound/dac3550a.c Sun Aug 31 12:32:41 2003
+@@ -52,14 +52,13 @@
+ };
+
+ struct i2c_driver daca_driver = {
+- name: "DAC3550A driver V " DACA_VERSION,
+- id: I2C_DRIVERID_DACA,
+- flags: I2C_DF_NOTIFY,
+- attach_adapter: &daca_attach_adapter,
+- detach_client: &daca_detach_client,
+- command: NULL,
+- inc_use: NULL, /* &daca_inc_use, */
+- dec_use: NULL /* &daca_dev_use */
++ .owner = THIS_MODULE,
++ .name = "DAC3550A driver V " DACA_VERSION,
++ .id = I2C_DRIVERID_DACA,
++ .flags = I2C_DF_NOTIFY,
++ .attach_adapter = &daca_attach_adapter,
++ .detach_client = &daca_detach_client,
++ .command = NULL
+ };
+
+
+diff -ru linux-2.4.22-orig/drivers/sound/dmasound/tas3001c.c linux-2.4.22-i2c/drivers/sound/dmasound/tas3001c.c
+--- linux-2.4.22-orig/drivers/sound/dmasound/tas3001c.c Tue Jul 15 12:22:41 2003
++++ linux-2.4.22-i2c/drivers/sound/dmasound/tas3001c.c Sun Aug 31 12:32:41 2003
+@@ -90,14 +90,13 @@
+ };
+
+ struct i2c_driver tas_driver = {
+- name: "TAS3001C driver V 0.3",
+- id: I2C_DRIVERID_TAS,
+- flags: I2C_DF_NOTIFY,
+- attach_adapter: &tas_attach_adapter,
+- detach_client: &tas_detach_client,
+- command: NULL,
+- inc_use: NULL, /* &tas_inc_use, */
+- dec_use: NULL /* &tas_dev_use */
++ .owner = THIS_MODULE,
++ .name = "TAS3001C driver V 0.3",
++ .id = I2C_DRIVERID_TAS,
++ .flags = I2C_DF_NOTIFY,
++ .attach_adapter = &tas_attach_adapter,
++ .detach_client = &tas_detach_client,
++ .command = NULL
+ };
+
+ int
+diff -ru linux-2.4.22-orig/drivers/video/matrox/i2c-matroxfb.c linux-2.4.22-i2c/drivers/video/matrox/i2c-matroxfb.c
+--- linux-2.4.22-orig/drivers/video/matrox/i2c-matroxfb.c Tue Jul 15 12:23:53 2003
++++ linux-2.4.22-i2c/drivers/video/matrox/i2c-matroxfb.c Sun Aug 31 12:32:51 2003
+@@ -87,19 +87,10 @@
+ return (matroxfb_read_gpio(b->minfo) & b->mask.clock) ? 1 : 0;
+ }
+
+-static void matroxfb_dh_inc_use(struct i2c_adapter* dummy) {
+- MOD_INC_USE_COUNT;
+-}
+-
+-static void matroxfb_dh_dec_use(struct i2c_adapter* dummy) {
+- MOD_DEC_USE_COUNT;
+-}
+-
+ static struct i2c_adapter matrox_i2c_adapter_template =
+ {
++ .owner = THIS_MODULE,
+ .id = I2C_HW_B_G400,
+- .inc_use = matroxfb_dh_inc_use,
+- .dec_use = matroxfb_dh_dec_use,
+ };
+
+ static struct i2c_algo_bit_data matrox_i2c_algo_template =
+diff -ru linux-2.4.22-orig/drivers/video/matrox/matroxfb_maven.c linux-2.4.22-i2c/drivers/video/matrox/matroxfb_maven.c
+--- linux-2.4.22-orig/drivers/video/matrox/matroxfb_maven.c Tue Jul 15 12:23:53 2003
++++ linux-2.4.22-i2c/drivers/video/matrox/matroxfb_maven.c Sun Aug 31 12:32:51 2003
+@@ -1246,14 +1246,6 @@
+ static unsigned short normal_i2c_range[] = { MAVEN_I2CID, MAVEN_I2CID, I2C_CLIENT_END };
+ I2C_CLIENT_INSMOD;
+
+-static void maven_inc_use(struct i2c_client* clnt) {
+- MOD_INC_USE_COUNT;
+-}
+-
+-static void maven_dec_use(struct i2c_client* clnt) {
+- MOD_DEC_USE_COUNT;
+-}
+-
+ static struct i2c_driver maven_driver;
+
+ static int maven_detect_client(struct i2c_adapter* adapter, int address, unsigned short flags,
+@@ -1320,14 +1312,13 @@
+ static int maven_driver_registered = 0;
+
+ static struct i2c_driver maven_driver={
+- "maven",
+- I2C_DRIVERID_MGATVO,
+- I2C_DF_NOTIFY,
+- maven_attach_adapter,
+- maven_detach_client,
+- maven_command,
+- maven_inc_use,
+- maven_dec_use
++ .owner = THIS_MODULE,
++ .name = "maven",
++ .id = I2C_DRIVERID_MGATVO,
++ .flags = I2C_DF_NOTIFY,
++ .attach_adapter = maven_attach_adapter,
++ .detach_client = maven_detach_client,
++ .command = maven_command
+ };
+
+ /* ************************** */