+++ /dev/null
-Finally, I debugged it today. The problem is in PNP BIOS. pnp_bus_suspend()
-calls pnp_stop_dev() for the device if the device can be disabled according
-to pnp_can_disable(). The problem is that pnpbios_disable_resources()
-returns -EPERM if the device is not dynamic (!pnpbios_is_dynamic()) but
-insert_device() happily sets PNP_DISABLE capability/flag even if the device
-is not dynamic. So we try to disable non-dynamic devices which will fail.
-This patch prevents insert_device() from setting PNP_DISABLE if the device is
-not dynamic and fixes suspend on my system.
-
-Signed-off-by: Ondrej Zary <linux@rainbow-software.org>
-
---- linux-2.6.17.orig/drivers/pnp/pnpbios/core.c 2006-08-15 18:12:15.129352250 +0200
-+++ linux-2.6.17/drivers/pnp/pnpbios/core.c 2006-08-15 18:12:36.366679500 +0200
-@@ -346,7 +346,7 @@
- dev->flags = node->flags;
- if (!(dev->flags & PNPBIOS_NO_CONFIG))
- dev->capabilities |= PNP_CONFIGURABLE;
-- if (!(dev->flags & PNPBIOS_NO_DISABLE))
-+ if (!(dev->flags & PNPBIOS_NO_DISABLE) && pnpbios_is_dynamic(dev))
- dev->capabilities |= PNP_DISABLE;
- dev->capabilities |= PNP_READ;
- if (pnpbios_is_dynamic(dev))
+++ /dev/null
-From: Linus Torvalds <torvalds@linux-foundation.org>
-Date: Thu, 13 Aug 2009 15:28:36 +0000 (-0700)
-Subject: Make sock_sendpage() use kernel_sendpage()
-X-Git-Tag: v2.6.31-rc6~8
-X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=e694958388c50148389b0e9b9e9e8945cf0f1b98
-
-Make sock_sendpage() use kernel_sendpage()
-
-kernel_sendpage() does the proper default case handling for when the
-socket doesn't have a native sendpage implementation.
-
-Now, arguably this might be something that we could instead solve by
-just specifying that all protocols should do it themselves at the
-protocol level, but we really only care about the common protocols.
-Does anybody really care about sendpage on something like Appletalk? Not
-likely.
-
-Acked-by: David S. Miller <davem@davemloft.net>
-Acked-by: Julien TINNES <julien@cr0.org>
-Acked-by: Tavis Ormandy <taviso@sdf.lonestar.org>
-Cc: stable@kernel.org
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
----
-
-diff --git a/net/socket.c b/net/socket.c
-index 791d71a..6d47165 100644
---- a/net/socket.c
-+++ b/net/socket.c
-@@ -736,7 +736,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
- if (more)
- flags |= MSG_MORE;
-
-- return sock->ops->sendpage(sock, page, offset, size, flags);
-+ return kernel_sendpage(sock, page, offset, size, flags);
- }
-
- static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
# TODO:
-# - test pax stuff (btw. tested ok in softmode)
-# - prepare config for non SEGMEXEC capable archs (ie not x86/32bit)
-# - patch scripts/Makefile.xen not to require bash
-# - make PAE usage configurable when Xen is on
+# - test pax stuff (btw. tested ok in softmode)
+# - prepare config for non SEGMEXEC capable archs (ie not x86/32bit)
+# - patch scripts/Makefile.xen not to require bash
+# - make PAE usage configurable when Xen is on
# ALL
# - #vserver: try to get a 2.2.x kernel patch or if you like development
# features a 2.3.x one instead of the long discontinued 2.1.x you are using
-# - with xen0/xenU does not compile due to cyrix-specific changes in 2.6.16.61:
-# http://git.kernel.org/?p=linux/kernel/git/stable/linux-2.6.16.y.git;a=commitdiff;h=69731ebbb3d2283c2c33a2bf262d785e2362b876
-#
#
# WARNING: Kernels from 2.6.16.X series not work under OldWorldMac
#
%endif
%if %{with xen0} || %{with xenU}
-%define with_pae 1
-%define with_xen 1
+%define pae 1
%endif
## Programs required by kernel to work.
%define squashfs_version 3.1
%define suspend_version 2.2.5
-%define xen_hv_abi 3.0
+%define xen_version 3.0.2
-%define __alt_kernel %{?with_pax:pax}%{?with_grsec_full:grsecurity}%{?with_xen0:xen0}%{?with_xenU:xenU}%{!?with_xen:%{?with_pae:pae}}
+%define __alt_kernel %{?with_pax:pax}%{?with_grsec_full:grsecurity}%{?with_xen0:xen0}%{?with_xenU:xenU}
%if "%{__alt_kernel}" != ""
%define alt_kernel %{__alt_kernel}
%endif
# Our Kernel ABI, increase this when you want the out of source modules being rebuilt
# Usually same as %{_rel}
-%define KABI 1
+%define KABI 6
# kernel release (used in filesystem and eventually in uname -r)
# modules will be looked from /lib/modules/%{kernel_release}%{?smp}
%define kernel_release %{version}%{?alt_kernel:_%{alt_kernel}}-%{_localversion}
%define _basever 2.6.16
-%define _postver .62
-%define _rel 2
+%define _postver .60
+%define _rel 14
Summary: The Linux kernel (the core of the Linux operating system)
Summary(de.UTF-8): Der Linux-Kernel (Kern des Linux-Betriebssystems)
Summary(et.UTF-8): Linuxi kernel (ehk operatsioonisüsteemi tuum)
Source1: kernel-autoconf.h
Source2: kernel-config.h
Source3: http://www.kernel.org/pub/linux/kernel/v2.6/patch-%{version}.bz2
-# Source3-md5: 65e7b7a48bbef41ea1e3702e139b3411
+# Source3-md5: be03a1889d7c89f208a18b55870e3a6f
Source5: kernel-ppclibs.Makefile
Source7: kernel-module-build.pl
Patch102: linux-2.6-vs2.1-128IPs.patch
Patch103: linux-vcontext-selinux.patch
Patch104: kernel-CVE-2008-0163.patch
-Patch105: kernel-CVE-2009-2692.patch
# from http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/xen-3.0.2-src.tgz
-#Patch120: kernel-xen.patch
Patch120: xen-3.0-2.6.16.patch
Patch121: linux-xen-page_alloc.patch
Patch122: kernel-xen-sparse-nv.patch
Patch201: linux-2.6-x86_64-stack-protector.patch
Patch202: linux-2.6-unwind-through-signal-frames.patch
-# nForce ethernet driver forcedeth and newer nvidia sata drivers from nvidia's website
-Patch250: linux-nvidia.patch
+# Wake-On-Lan patch for nVidia nForce ethernet driver forcedeth
+Patch250: linux-2.6.16-forcedeth-WON.patch
+Patch251: linux-nvidia.patch
# From ALSA 1.0.13 for nVidia
Patch252: linux-alsa-hda.patch
# add tty ioctl to figure physical device of the console. used by showconsole.spec (blogd)
Patch256: kernel-TIOCGDEV.patch
-# HP/Compaq cciss driver
-Patch260: linux-2.6-cciss-3.6.18.patch
-
Patch1000: linux-2.6-grsec-minimal.patch
Patch1001: linux-2.6-grsec-wrong-deref.patch
Provides: %{name}(vermagic) = %{kernel_release}
Provides: %{name}-up = %{epoch}:%{version}-%{release}
%if %{with xen0}
-Requires: xen-hypervisor-abi = %{xen_hv_abi}
+Requires: xen >= %{xen_version}
+Provides: kernel(xen0) = %{xen_version}
%endif
Obsoletes: kernel-misc-fuse
Obsoletes: kernel-modules
Conflicts: udev < %{_udev_ver}
Conflicts: util-linux < %{_util_linux_ver}
Conflicts: xfsprogs < %{_xfsprogs_ver}
-%if %{with xen} || %{with pae}
-ExclusiveArch: %{ix86} %{?with_xen:%{x8664}}
+%if %{with xen0} || %{with xenU}
+ExclusiveArch: %{ix86} %{x8664}
ExcludeArch: i386 i486 i586
%else
ExclusiveArch: %{ix86} alpha %{x8664} ia64 ppc ppc64 sparc sparc64
Provides: %{name}(netfilter) = %{netfilter_snap}
Provides: %{name}-smp(vermagic) = %{kernel_release}
%if %{with xen0}
-Requires: xen-hypervisor-abi = %{xen_hv_abi}
+Requires: xen >= %{xen_version}
+Provides: kernel(xen0) = %{xen_version}
%endif
Obsoletes: kernel-smp-misc-fuse
Obsoletes: kernel-smp-net-hostap
%patch102 -p1
%patch103 -p1
%patch104 -p1
-%patch105 -p1
-%if %{with xen}
+%if %{with xen0} || %{with xenU}
%ifarch %{ix86} %{x8664} ia64
%patch120 -p1
%patch121 -p1
%endif
%patch250 -p1
+%patch251 -p1
%patch252 -p1
%patch256 -p1
-%patch260 -p1
-
# security patches
%patch1200 -p1
cat %{SOURCE51} >> arch/%{_target_base_arch}/defconfig
%endif
-%if %{with xen}
+%if %{with xen0} || %{with xenU}
sed -i "s:CONFIG_X86_PC=y:# CONFIG_X86_PC is not set:" arch/%{_target_base_arch}/defconfig
sed -i "s:CONFIG_RIO=[ym]:# CONFIG_RIO is not set:" arch/%{_target_base_arch}/defconfig
%endif
%else
%{__make} %CrossOpts \
-%if %{with xen}
+%if %{with xen0} || %{with xenU}
SHELL=/bin/bash \
%endif
%{?with_verbose:V=1}
mkdir -p $KERNEL_INSTALL_DIR/boot
install System.map $KERNEL_INSTALL_DIR/boot/System.map-$KernelVer
%ifarch %{ix86} %{x8664}
-%if %{with xen}
+%if %{with xen0} || %{with xenU}
install vmlinuz $KERNEL_INSTALL_DIR/boot/vmlinuz-$KernelVer
%else
install arch/%{_target_base_arch}/boot/bzImage $KERNEL_INSTALL_DIR/boot/vmlinuz-$KernelVer
if [ -x /sbin/new-kernel-pkg ]; then
%if %{with xen0}
- xenimg=%{initrd_dir}/xen.gz
- xenver=
- xen=$(readlink -f $xenimg)
- if [ "$xen" != "$xenimg" ]; then
- xenver=${xen#%{initrd_dir}/xen-}
- xenver=${xenver%.gz}
- fi
+ xen=$(readlink -f /boot/xen.gz)
+ xenver=${xen#/boot/xen-}
+ xenver=${xenver%.gz}
- title="Xen${xenver:+ $xenver} / PLD Linux (%{pld_release})"
+ title="Xen $xenver / PLD Linux (%{pld_release})"
args=--multiboot=$xen
%else
title="PLD Linux (%{pld_release})%{?alt_kernel: / %{alt_kernel}}"
if [ -x /sbin/new-kernel-pkg ]; then
%if %{with xen0}
- xenimg=%{initrd_dir}/xen.gz
- xenver=
- xen=$(readlink -f $xenimg)
- if [ "$xen" != "$xenimg" ]; then
- xenver=${xen#%{initrd_dir}/xen-}
- xenver=${xenver%.gz}
- fi
+ xen=$(readlink -f /boot/xen.gz)
+ xenver=${xen#/boot/xen-}
+ xenver=${xenver%.gz}
- title="Xen${xenver:+ $xenver} / PLD Linux (%{pld_release})"
+ title="Xen $xenver / PLD Linux (%{pld_release})"
args=--multiboot=$xen
%else
title="PLD Linux (%{pld_release})%{?alt_kernel: / %{alt_kernel}}"
%if %{have_drm}
%exclude /lib/modules/%{kernel_release}/kernel/drivers/char/drm
%endif
-%if %{have_oss} && %{have_isa} && %{without xen}
+%if %{have_oss} && %{have_isa} && %{without xen0} && %{without xenU}
%exclude /lib/modules/%{kernel_release}/kernel/drivers/media/radio/miropcm20.ko*
%endif
%if %{with abi}
%files sound-oss
%defattr(644,root,root,755)
/lib/modules/%{kernel_release}/kernel/sound/oss
-%if %{have_isa} && %{without xen}
+%if %{have_isa} && %{without xen0} && %{without xenU}
/lib/modules/%{kernel_release}/kernel/drivers/media/radio/miropcm20.ko*
%endif
%endif
%if %{have_drm}
%exclude /lib/modules/%{kernel_release}smp/kernel/drivers/char/drm
%endif
-%if %{have_oss} && %{have_isa} && %{without xen}
+%if %{have_oss} && %{have_isa} && %{without xen0} && %{without xenU}
%exclude /lib/modules/%{kernel_release}smp/kernel/drivers/media/radio/miropcm20.ko*
%endif
%if %{with abi}
%files smp-sound-oss
%defattr(644,root,root,755)
/lib/modules/%{kernel_release}smp/kernel/sound/oss
-%if %{have_isa} && %{without xen}
+%if %{have_isa} && %{without xen0} && %{without xenU}
/lib/modules/%{kernel_release}smp/kernel/drivers/media/radio/miropcm20.ko*
%endif
%endif
+++ /dev/null
-diff -uNr linux-2.6.16.orig/Documentation/cciss.txt linux-2.6.16/Documentation/cciss.txt
---- linux-2.6.16.orig/Documentation/cciss.txt 2006-03-20 06:53:29.000000000 +0100
-+++ linux-2.6.16/Documentation/cciss.txt 2008-10-03 02:40:19.000000000 +0200
-@@ -16,10 +16,16 @@
- * SA 6i
- * SA P600
- * SA P800
-- * SA E400
-+ * SA P400
- * SA P400i
- * SA E200
- * SA E200i
-+ * SA E500
-+
-+This driver also implements a new method of detecting HP Smart Array
-+controllers. If an "unknown" controller is detected, we will attempt to
-+bind to the controller. On success a message is displayed informing the
-+user to upgrade the driver. On failure we bail.
-
- If nodes are not already created in the /dev/cciss directory, run as root:
-
-@@ -36,8 +42,8 @@
- Major numbers:
- 104 cciss0
- 105 cciss1
-- 106 cciss2
-- 105 cciss3
-+ 106 cciss2
-+ 107 cciss3
- 108 cciss4
- 109 cciss5
- 110 cciss6
-@@ -79,7 +85,7 @@
- the SCSI core may not yet be initialized (because the driver is a block
- driver) and attempting to register it with the SCSI core in such a case
- would cause a hang. This is best done via an initialization script
--(typically in /etc/init.d, but could vary depending on distibution).
-+(typically in /etc/init.d, but could vary depending on distribution).
- For example:
-
- for x in /proc/driver/cciss/cciss[0-9]*
-@@ -145,18 +151,18 @@
- If that doesn't work, the SCSI bus is reset. If that doesn't work
- the host bus adapter is reset. Because the cciss driver is a block
- driver as well as a SCSI driver and only the tape drives and medium
--changers are presented to the SCSI mid layer, and unlike more
-+changers are presented to the SCSI mid layer, and unlike more
- straightforward SCSI drivers, disk i/o continues through the block
- side during the SCSI error recovery process, the cciss driver only
- implements the first two of these actions, aborting the command, and
--resetting the device. Additionally, most tape drives will not oblige
--in aborting commands, and sometimes it appears they will not even
--obey a reset coommand, though in most circumstances they will. In
--the case that the command cannot be aborted and the device cannot be
-+resetting the device. Additionally, most tape drives will not oblige
-+in aborting commands, and sometimes it appears they will not even
-+obey a reset command, though in most circumstances they will. In
-+the case that the command cannot be aborted and the device cannot be
- reset, the device will be set offline.
-
- In the event the error handling code is triggered and a tape drive is
--successfully reset or the tardy command is successfully aborted, the
-+successfully reset or the tardy command is successfully aborted, the
- tape drive may still not allow i/o to continue until some command
- is issued which positions the tape to a known position. Typically you
- must rewind the tape (by issuing "mt -f /dev/st0 rewind" for example)
-diff -uNr linux-2.6.16.orig/drivers/block/cciss.c linux-2.6.16/drivers/block/cciss.c
---- linux-2.6.16.orig/drivers/block/cciss.c 2008-11-02 19:51:53.000000000 +0100
-+++ linux-2.6.16/drivers/block/cciss.c 2008-10-03 02:40:19.000000000 +0200
-@@ -33,6 +33,7 @@
- #include <linux/bio.h>
- #include <linux/blkpg.h>
- #include <linux/timer.h>
-+#include <linux/seq_file.h>
- #include <linux/proc_fs.h>
- #include <linux/init.h>
- #include <linux/hdreg.h>
-@@ -40,111 +41,121 @@
- #include <linux/compat.h>
- #include <asm/uaccess.h>
- #include <asm/io.h>
-+#include <asm/div64.h>
-+
-+#ifdef CONFIG_BLK_DEV_IO_TRACE
-+#include <linux/blktrace_api.h>
-+#endif
-
- #include <linux/dma-mapping.h>
- #include <linux/blkdev.h>
- #include <linux/genhd.h>
- #include <linux/completion.h>
-+#include <scsi/scsi.h>
-+#include <scsi/sg.h>
-+#include <scsi/scsi_ioctl.h>
-+#include <linux/cdrom.h>
-
- #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
--#define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
--#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
-+#define DRIVER_NAME "HP CISS Driver (v 3.6.18)"
-+#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,18)
-
- /* Embedded module documentation macros - see modules.h */
- MODULE_AUTHOR("Hewlett-Packard Company");
--MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
-+MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.18");
- MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
-- " SA6i P600 P800 P400 P400i E200 E200i");
-+ " SA6i P600 P800 P400 P400i E200 E200i E500");
- MODULE_LICENSE("GPL");
-
- #include "cciss_cmd.h"
- #include "cciss.h"
- #include <linux/cciss_ioctl.h>
-
-+#ifndef PCI_DEVICE_ID_COMPAQ_CISSC
-+#define PCI_DEVICE_ID_COMPAQ_CISSC 0x46
-+#endif
-+#ifndef PCI_DEVICE_ID_HP_CISS
-+#define PCI_DEVICE_ID_HP_CISS 0x3210
-+#endif
-+#ifndef PCI_DEVICE_ID_HP_CISSA
-+#define PCI_DEVICE_ID_HP_CISSA 0x3220
-+#endif
-+#ifndef PCI_DEVICE_ID_HP_CISSC
-+#define PCI_DEVICE_ID_HP_CISSC 0x3230
-+#endif
-+#ifndef PCI_DEVICE_ID_HP_CISSD
-+#define PCI_DEVICE_ID_HP_CISSD 0x3238
-+#endif
- /* define the PCI info for the cards we can control */
- static const struct pci_device_id cciss_pci_device_id[] = {
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
-- 0x0E11, 0x4070, 0, 0, 0},
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
-- 0x0E11, 0x4080, 0, 0, 0},
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
-- 0x0E11, 0x4082, 0, 0, 0},
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
-- 0x0E11, 0x4083, 0, 0, 0},
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
-- 0x0E11, 0x409A, 0, 0, 0},
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
-- 0x0E11, 0x409B, 0, 0, 0},
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
-- 0x0E11, 0x409C, 0, 0, 0},
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
-- 0x0E11, 0x409D, 0, 0, 0},
-- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
-- 0x0E11, 0x4091, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
-- 0x103C, 0x3225, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
-- 0x103c, 0x3223, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
-- 0x103c, 0x3234, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
-- 0x103c, 0x3235, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
-- 0x103c, 0x3211, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
-- 0x103c, 0x3212, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
-- 0x103c, 0x3213, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
-- 0x103c, 0x3214, 0, 0, 0},
-- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
-- 0x103c, 0x3215, 0, 0, 0},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
-+ {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409E},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103c, 0x3234},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103c, 0x3235},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103c, 0x3211},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103c, 0x3212},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103c, 0x3213},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103c, 0x3214},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103c, 0x3215},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103c, 0x3237},
-+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
-+ {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
-+ PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
- {0,}
- };
- MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
-
--#define NR_PRODUCTS ARRAY_SIZE(products)
--
- /* board_id = Subsystem Device ID & Vendor ID
- * product = Marketing Name for the board
- * access = Address of the struct of function pointers
-+ * nr_cmds = Number of commands supported by controller
- */
- static struct board_type products[] = {
-- { 0x40700E11, "Smart Array 5300", &SA5_access },
-- { 0x40800E11, "Smart Array 5i", &SA5B_access},
-- { 0x40820E11, "Smart Array 532", &SA5B_access},
-- { 0x40830E11, "Smart Array 5312", &SA5B_access},
-- { 0x409A0E11, "Smart Array 641", &SA5_access},
-- { 0x409B0E11, "Smart Array 642", &SA5_access},
-- { 0x409C0E11, "Smart Array 6400", &SA5_access},
-- { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
-- { 0x40910E11, "Smart Array 6i", &SA5_access},
-- { 0x3225103C, "Smart Array P600", &SA5_access},
-- { 0x3223103C, "Smart Array P800", &SA5_access},
-- { 0x3234103C, "Smart Array P400", &SA5_access},
-- { 0x3235103C, "Smart Array P400i", &SA5_access},
-- { 0x3211103C, "Smart Array E200i", &SA5_access},
-- { 0x3212103C, "Smart Array E200", &SA5_access},
-- { 0x3213103C, "Smart Array E200i", &SA5_access},
-- { 0x3214103C, "Smart Array E200i", &SA5_access},
-- { 0x3215103C, "Smart Array E200i", &SA5_access},
-+ { 0x40700E11, "Smart Array 5300", &SA5_access, 384},
-+ { 0x40800E11, "Smart Array 5i", &SA5B_access, 384},
-+ { 0x40820E11, "Smart Array 532", &SA5B_access, 384},
-+ { 0x40830E11, "Smart Array 5312", &SA5B_access, 384},
-+ { 0x409A0E11, "Smart Array 641", &SA5_access, 384},
-+ { 0x409B0E11, "Smart Array 642", &SA5_access, 384},
-+ { 0x409C0E11, "Smart Array 6400", &SA5_access, 384},
-+ { 0x409D0E11, "Smart Array 6400 EM", &SA5_access, 384},
-+ { 0x40910E11, "Smart Array 6i", &SA5_access, 384},
-+ { 0x409E0E11, "Smart Array 6422", &SA5_access, 384},
-+ { 0x3225103C, "Smart Array P600", &SA5_access, 384},
-+ { 0x3234103C, "Smart Array P400", &SA5_access, 512},
-+ { 0x3235103C, "Smart Array P400i", &SA5_access, 512},
-+ { 0x3211103C, "Smart Array E200i", &SA5_access,120},
-+ { 0x3212103C, "Smart Array E200", &SA5_access,120},
-+ { 0x3213103C, "Smart Array E200i", &SA5_access,120},
-+ { 0x3214103C, "Smart Array E200i", &SA5_access, 120},
-+ { 0x3215103C, "Smart Array E200i", &SA5_access, 120},
-+ { 0x3223103C, "Smart Array P800", &SA5_access, 512},
-+ { 0x3237103C, "Smart Array E500", &SA5_access, 128},
-+ { 0x323D103C, "Smart Array P700m", &SA5_access, 512},
-+ { 0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
- };
-
--/* How long to wait (in millesconds) for board to go into simple mode */
-+/* How long to wait (in millisconds) for board to go into simple mode */
- #define MAX_CONFIG_WAIT 30000
- #define MAX_IOCTL_CONFIG_WAIT 1000
-
- /*define how many times we will try a command because of bus resets */
- #define MAX_CMD_RETRIES 3
--
--#define READ_AHEAD 1024
--#define NR_CMDS 384 /* #commands that can be outstanding */
- #define MAX_CTLR 32
-
- /* Originally cciss driver only supports 8 major numbers */
- #define MAX_CTLR_ORIG 8
-
--
- static ctlr_info_t *hba[MAX_CTLR];
-
- static void do_cciss_request(request_queue_t *q);
-@@ -155,35 +166,40 @@
- unsigned int cmd, unsigned long arg);
- static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
--static int revalidate_allvol(ctlr_info_t *host);
- static int cciss_revalidate(struct gendisk *disk);
- static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
--static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
--
--static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
-- int withirq, unsigned int *total_size, unsigned int *block_size);
-+static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
-+ int clear_all);
-+static void cciss_read_capacity(int ctlr, int logvol, int withirq,
-+ sector_t *total_size, unsigned int *block_size);
-+static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
-+ sector_t *total_size, unsigned int *block_size);
- static void cciss_geometry_inquiry(int ctlr, int logvol,
-- int withirq, unsigned int total_size,
-+ int withirq, sector_t total_size,
- unsigned int block_size, InquiryData_struct *inq_buff,
- drive_info_struct *drv);
- static void cciss_getgeometry(int cntl_num);
--static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32);
-+static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
-+ __u32);
- static void start_io( ctlr_info_t *h);
- static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
-- unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
-- unsigned char *scsi3addr, int cmd_type);
-+ unsigned int use_unit_num, unsigned int log_unit,
-+ __u8 page_code, unsigned char *scsi3addr, int cmd_type);
- static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
-- unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
-- int cmd_type);
-+ unsigned int use_unit_num, unsigned int log_unit,
-+ __u8 page_code, int cmd_type);
-
--static void fail_all_cmds(unsigned long ctlr);
-+static void cciss_shutdown (struct pci_dev *pdev);
-+static void __devexit cciss_remove_one(struct pci_dev *pdev);
-
-+static void fail_all_cmds(unsigned long ctlr);
-+static void print_cmd(CommandList_struct *);
- #ifdef CONFIG_PROC_FS
--static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
-- int length, int *eof, void *data);
- static void cciss_procinit(int i);
- #else
--static void cciss_procinit(int i) {}
-+static void cciss_procinit(int i)
-+{
-+}
- #endif /* CONFIG_PROC_FS */
-
- #ifdef CONFIG_COMPAT
-@@ -222,7 +238,8 @@
- CommandList_struct *c)
- {
- if (c && c->next != c) {
-- if (*Qptr == c) *Qptr = c->next;
-+ if (*Qptr == c)
-+ *Qptr = c->next;
- c->prev->next = c->next;
- c->next->prev = c->prev;
- } else {
-@@ -231,46 +248,492 @@
- return c;
- }
-
--#include "cciss_scsi.c" /* For SCSI tape support */
-+static inline int find_drv_index(int ctlr, drive_info_struct *drv){
-+ int i;
-+ for (i=0; i < CISS_MAX_LUN; i++) {
-+ if (hba[ctlr]->drv[i].LunID == drv->LunID)
-+ return i;
-+ }
-+ return i;
-+}
-
--#ifdef CONFIG_PROC_FS
-+#include "cciss_scsi.c" /* For SCSI tape support */
-
--/*
-- * Report information about this controller.
-- */
- #define ENG_GIG 1000000000
- #define ENG_GIG_FACTOR (ENG_GIG/512)
- #define RAID_UNKNOWN 6
-+#define ENGAGE_SCSI "engage scsi"
-+#define RESCAN_VOLUMES "rescan volumes"
-+
- static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
- "UNKNOWN"};
-
--static struct proc_dir_entry *proc_cciss;
-+static spinlock_t sysfs_lock = SPIN_LOCK_UNLOCKED;
-+
-+static void cciss_sysfs_stat_inquiry(int ctlr, int logvol,
-+ int withirq, drive_info_struct *drv)
-+{
-+ int return_code;
-+ InquiryData_struct *inq_buff;
-+
-+ /* If there are no heads then this is the controller disk and
-+ * not a valid logical drive so don't query it.
-+ */
-+ if (!drv->heads)
-+ return;
-+
-+ inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-+ if (!inq_buff) {
-+ printk(KERN_ERR "cciss: out of memory\n");
-+ goto err;
-+ }
-+
-+ if (withirq)
-+ return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
-+ inq_buff, sizeof(*inq_buff), 1, logvol ,0, TYPE_CMD);
-+ else
-+ return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
-+ sizeof(*inq_buff), 1, logvol , 0, NULL, TYPE_CMD);
-+ if (return_code == IO_OK) {
-+ memcpy(drv->vendor, &inq_buff->data_byte[8], 8);
-+ drv->vendor[8]='\0';
-+ memcpy(drv->model, &inq_buff->data_byte[16], 16);
-+ drv->model[16] = '\0';
-+ memcpy(drv->rev, &inq_buff->data_byte[32], 4);
-+ drv->rev[4] = '\0';
-+ } else { /* Get geometry failed */
-+ printk(KERN_WARNING "cciss: inquiry for VPD page 0 failed\n");
-+ }
-+
-+ if (withirq)
-+ return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
-+ inq_buff, sizeof(*inq_buff), 1, logvol ,0x83, TYPE_CMD);
-+ else
-+ return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
-+ sizeof(*inq_buff), 1, logvol , 0x83, NULL, TYPE_CMD);
-+
-+ if (return_code == IO_OK) {
-+ memcpy(drv->uid, &inq_buff->data_byte[8], 16);
-+ } else { /* Get geometry failed */
-+ printk(KERN_WARNING "cciss: id logical drive failed\n");
-+ }
-+
-+ kfree(inq_buff);
-+err:
-+ drv->vendor[8] = '\0';
-+ drv->model[16] = '\0';
-+ drv->rev[4] = '\0';
-+
-+}
-
--static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
-- int length, int *eof, void *data)
-+static ssize_t cciss_show_raid_level(struct device *dev,
-+ struct device_attribute *attr, char *buf)
- {
-- off_t pos = 0;
-- off_t len = 0;
-- int size, i, ctlr;
-- ctlr_info_t *h = (ctlr_info_t*)data;
-- drive_info_struct *drv;
-+ struct drv_dynamic *d;
-+ drive_info_struct *drv;
-+ ctlr_info_t *h;
- unsigned long flags;
-- sector_t vol_sz, vol_sz_frac;
-+ int raid;
-
-- ctlr = h->ctlr;
-+ d = container_of(dev, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ if (!d->disk) {
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+ }
-
-- /* prevent displaying bogus info during configuration
-- * or deconfiguration of a logical volume
-- */
-- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-+ h = get_host(d->disk);
-+
-+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
- if (h->busy_configuring) {
-- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-- return -EBUSY;
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 30, "Device busy configuring\n");
- }
-- h->busy_configuring = 1;
-- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
-- size = sprintf(buffer, "%s: HP %s Controller\n"
-+ drv = d->disk->private_data;
-+ if ((drv->raid_level < 0) || (drv->raid_level) > 5)
-+ raid = RAID_UNKNOWN;
-+ else
-+ raid = drv->raid_level;
-+
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 20, "RAID %s\n", raid_label[raid]);
-+}
-+
-+static ssize_t cciss_show_disk_size(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct drv_dynamic *d;
-+ drive_info_struct *drv;
-+ ctlr_info_t *h;
-+ unsigned long flags;
-+ sector_t vol_sz, vol_sz_frac;
-+
-+ d = container_of(dev, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ if (!d->disk) {
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+ }
-+ h = get_host(d->disk);
-+
-+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-+ if (h->busy_configuring) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 30, "Device busy configuring\n");
-+ }
-+
-+ drv = d->disk->private_data;
-+ vol_sz = drv->nr_blocks;
-+ vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
-+ vol_sz_frac *= 100;
-+ sector_div(vol_sz_frac, ENG_GIG_FACTOR);
-+
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 30, "%4u.%02uGB\n", (int)vol_sz, (int)vol_sz_frac);
-+}
-+
-+static ssize_t cciss_show_usage_count(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct drv_dynamic *d;
-+ drive_info_struct *drv;
-+ int count;
-+
-+ d = container_of(dev, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ if (!d->disk) {
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+ }
-+ drv = d->disk->private_data;
-+ count = drv->usage_count;
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 20, "%d\n", count);
-+}
-+
-+static ssize_t cciss_show_vendor(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct drv_dynamic *d;
-+ drive_info_struct *drv;
-+ ctlr_info_t *h;
-+ unsigned long flags;
-+ int drv_index;
-+
-+ d = container_of(dev, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ if (!d->disk) {
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+ }
-+
-+ h = get_host(d->disk);
-+
-+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-+ if (h->busy_configuring) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -EBUSY;
-+ }
-+
-+ drv = d->disk->private_data;
-+
-+ if (!drv->heads) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+ }
-+
-+ drv_index = find_drv_index(h->ctlr, drv);
-+ if (drv_index != CISS_MAX_LUN) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 20, "%s\n", (char *)drv->vendor);
-+ }
-+
-+ printk(KERN_ERR "cciss: logical drive not found\n");
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+}
-+
-+static ssize_t cciss_show_model(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct drv_dynamic *d;
-+ drive_info_struct *drv;
-+ ctlr_info_t *h;
-+ unsigned long flags;
-+ int drv_index;
-+
-+ d = container_of(dev, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ if (!d->disk) {
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+ }
-+
-+ h = get_host(d->disk);
-+
-+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-+ if (h->busy_configuring) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -EBUSY;
-+ }
-+
-+ drv = d->disk->private_data;
-+
-+ if (!drv->heads) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+ }
-+
-+ drv_index = find_drv_index(h->ctlr, drv);
-+ if (drv_index != CISS_MAX_LUN) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 20, "%s\n", (char *)drv->model);
-+ }
-+ printk(KERN_ERR "cciss: logical drive not found\n");
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+}
-+
-+static ssize_t cciss_show_rev(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct drv_dynamic *d;
-+ drive_info_struct *drv;
-+ ctlr_info_t *h;
-+ unsigned long flags;
-+ int drv_index;
-+
-+ d = container_of(dev, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ if (!d->disk) {
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+ }
-+
-+ h = get_host(d->disk);
-+
-+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-+ if (h->busy_configuring) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -EBUSY;
-+ }
-+
-+ drv = d->disk->private_data;
-+
-+ if (!drv->heads) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+ }
-+
-+ drv_index = find_drv_index(h->ctlr, drv);
-+ if (drv_index != CISS_MAX_LUN) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 20, "%s\n", (char *)drv->rev);
-+ }
-+
-+ printk(KERN_ERR "cciss: logical drive not found\n");
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+}
-+
-+static ssize_t cciss_show_unique_id(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct drv_dynamic *d;
-+ drive_info_struct *drv;
-+ ctlr_info_t *h;
-+ unsigned long flags;
-+ int drv_index;
-+
-+ d = container_of(dev, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ if (!d->disk) {
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+ }
-+
-+ h = get_host(d->disk);
-+
-+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-+ if (h->busy_configuring) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 30, "Device busy configuring\n");
-+ }
-+ drv = d->disk->private_data;
-+
-+ if (!drv->heads) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+ }
-+
-+ drv_index = find_drv_index(h->ctlr, drv);
-+ if (drv_index != CISS_MAX_LUN) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+
-+ return snprintf(buf, 40, "%02X%02X%02X%02X%02X%02X%02X%02X"
-+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
-+ drv->uid[0], drv->uid[1], drv->uid[2],
-+ drv->uid[3], drv->uid[4], drv->uid[5],
-+ drv->uid[6], drv->uid[7], drv->uid[8],
-+ drv->uid[9], drv->uid[10], drv->uid[11],
-+ drv->uid[12], drv->uid[13], drv->uid[14],
-+ drv->uid[15]);
-+ }
-+
-+ printk(KERN_ERR "cciss: logical drive not found\n");
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+}
-+
-+static ssize_t cciss_show_bus(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return snprintf(buf, 20, "cciss\n");
-+}
-+
-+static ssize_t cciss_show_lunid(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct drv_dynamic *d;
-+ drive_info_struct *drv;
-+ ctlr_info_t *h;
-+ unsigned long flags;
-+ int drv_index;
-+
-+ d = container_of(dev, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ if (!d->disk) {
-+ spin_unlock(&sysfs_lock);
-+ return -ENOENT;
-+ }
-+
-+ h = get_host(d->disk);
-+
-+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-+ if (h->busy_configuring) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -EBUSY;
-+ }
-+
-+ drv = d->disk->private_data;
-+
-+ if (!drv->heads) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+ }
-+
-+ drv_index = find_drv_index(h->ctlr, drv);
-+ if (drv_index != CISS_MAX_LUN) {
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return snprintf(buf, 20, "%d\n", drv->LunID);
-+ }
-+
-+ printk(KERN_ERR "cciss: logical drive not found\n");
-+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+ spin_unlock(&sysfs_lock);
-+ return -ENOTTY;
-+}
-+
-+DEVICE_ATTR(raid_level, S_IRUGO | S_IWUSR, cciss_show_raid_level, NULL);
-+DEVICE_ATTR(disk_size, S_IRUGO | S_IWUSR, cciss_show_disk_size, NULL);
-+DEVICE_ATTR(usage_count, S_IRUGO | S_IWUSR, cciss_show_usage_count, NULL);
-+DEVICE_ATTR(vendor, S_IRUGO | S_IWUSR, cciss_show_vendor, NULL);
-+DEVICE_ATTR(model, S_IRUGO | S_IWUSR, cciss_show_model, NULL);
-+DEVICE_ATTR(rev, S_IRUGO | S_IWUSR, cciss_show_rev, NULL);
-+DEVICE_ATTR(unique_id, S_IRUGO | S_IWUSR, cciss_show_unique_id, NULL);
-+DEVICE_ATTR(bus, S_IRUGO | S_IWUSR, cciss_show_bus, NULL);
-+DEVICE_ATTR(lunid, S_IRUGO | S_IWUSR, cciss_show_lunid, NULL);
-+
-+static struct attribute *cciss_sysfs_attrs[] = {
-+ &dev_attr_raid_level.attr,
-+ &dev_attr_disk_size.attr,
-+ &dev_attr_usage_count.attr,
-+ &dev_attr_vendor.attr,
-+ &dev_attr_model.attr,
-+ &dev_attr_rev.attr,
-+ &dev_attr_unique_id.attr,
-+ &dev_attr_bus.attr,
-+ &dev_attr_lunid.attr,
-+ NULL
-+};
-+
-+static struct attribute_group cciss_attrs = {.attrs = cciss_sysfs_attrs};
-+
-+static void cciss_add_blk_sysfs_dev(drive_info_struct *drv,
-+ struct gendisk* disk,
-+ struct pci_dev *pdev, int disk_num)
-+{
-+ struct drv_dynamic *d = kmalloc(sizeof(struct drv_dynamic), GFP_KERNEL);
-+ if (!d)
-+ return;
-+ memset(d, 0, sizeof(struct drv_dynamic));
-+ disk->driverfs_dev = &d->dev;
-+ d->dev.parent = &pdev->dev;
-+ d->dev.release = (void (*)(struct device *))kfree;
-+ sprintf(d->dev.bus_id, "disk%d", disk_num);
-+ d->dev.driver_data = "cciss";
-+ if (device_register(&d->dev)) {
-+ put_device(&d->dev);
-+ return;
-+ }
-+ sysfs_create_group(&d->dev.kobj, &cciss_attrs);
-+ d->disk = disk;
-+ drv->dev_info = &d->dev;
-+}
-+
-+static void cciss_remove_blk_sysfs_dev(struct gendisk *disk)
-+{
-+ drive_info_struct *drv = get_drv(disk);
-+ struct drv_dynamic *d;
-+
-+ if (!drv->dev_info)
-+ return;
-+
-+ d = container_of(drv->dev_info, struct drv_dynamic, dev);
-+ spin_lock(&sysfs_lock);
-+ sysfs_remove_group(&d->dev.kobj, &cciss_attrs);
-+ d->disk = NULL;
-+ spin_unlock(&sysfs_lock);
-+ device_unregister(drv->dev_info);
-+ drv->dev_info = NULL;
-+}
-+
-+#ifdef CONFIG_PROC_FS
-+
-+/*
-+ * Report information about this controller.
-+ */
-+static struct proc_dir_entry *proc_cciss;
-+
-+static void cciss_seq_show_header(struct seq_file *seq)
-+{
-+ ctlr_info_t *h = seq->private;
-+
-+ seq_printf(seq, "%s: HP %s Controller\n"
- "Board ID: 0x%08lx\n"
- "Firmware Version: %c%c%c%c\n"
- "IRQ: %d\n"
-@@ -279,94 +742,196 @@
- "Current # commands on controller: %d\n"
- "Max Q depth since init: %d\n"
- "Max # commands on controller since init: %d\n"
-- "Max SG entries since init: %d\n\n",
-- h->devname,
-- h->product_name,
-- (unsigned long)h->board_id,
-- h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
-- (unsigned int)h->intr[SIMPLE_MODE_INT],
-- h->num_luns,
-+ "Max SG entries since init: %d\n",
-+ h->devname,
-+ h->product_name,
-+ (unsigned long)h->board_id,
-+ h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
-+ h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
-+ h->num_luns,
- h->Qdepth, h->commands_outstanding,
- h->maxQsinceinit, h->max_outstanding, h->maxSG);
-+
-+#ifdef CONFIG_CISS_SCSI_TAPE
-+ cciss_seq_tape_report(seq, h->ctlr);
-+#endif /* CONFIG_CISS_SCSI_TAPE */
-+}
-
-- pos += size; len += size;
-- cciss_proc_tape_report(ctlr, buffer, &pos, &len);
-- for(i=0; i<=h->highest_lun; i++) {
-+static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
-+{
-+ ctlr_info_t *h = seq->private;
-+ unsigned ctlr = h->ctlr;
-+ unsigned long flags;
-
-- drv = &h->drv[i];
-- if (drv->heads == 0)
-- continue;
-+ /* prevent displaying bogus info during configuration
-+ * or deconfiguration of a logical volume
-+ */
-+ spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-+ if (h->busy_configuring) {
-+ spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-+ return ERR_PTR(-EBUSY);
-+ }
-+ h->busy_configuring = 1;
-+ spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
-- vol_sz = drv->nr_blocks;
-- vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
-- vol_sz_frac *= 100;
-- sector_div(vol_sz_frac, ENG_GIG_FACTOR);
-+ if (*pos == 0)
-+ cciss_seq_show_header(seq);
-
-- if (drv->raid_level > 5)
-- drv->raid_level = RAID_UNKNOWN;
-- size = sprintf(buffer+len, "cciss/c%dd%d:"
-- "\t%4u.%02uGB\tRAID %s\n",
-- ctlr, i, (int)vol_sz, (int)vol_sz_frac,
-- raid_label[drv->raid_level]);
-- pos += size; len += size;
-- }
-+ return pos;
-+}
-+
-+static int cciss_seq_show(struct seq_file *seq, void *v)
-+{
-+ sector_t vol_sz, vol_sz_frac;
-+ ctlr_info_t *h = seq->private;
-+ unsigned ctlr = h->ctlr;
-+ loff_t *pos = v;
-+ drive_info_struct *drv = &h->drv[*pos];
-+
-+ if (*pos > h->highest_lun)
-+ return 0;
-+
-+ if (drv->heads == 0)
-+ return 0;
-+
-+ vol_sz = drv->nr_blocks;
-+ vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
-+ vol_sz_frac *= 100;
-+ sector_div(vol_sz_frac, ENG_GIG_FACTOR);
-+
-+ if (drv->raid_level > 5)
-+ drv->raid_level = RAID_UNKNOWN;
-+ seq_printf(seq, "cciss/c%dd%d:"
-+ "\t%4u.%02uGB\tRAID %s\n",
-+ ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
-+ raid_label[drv->raid_level]);
-+ return 0;
-+}
-+
-+static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ ctlr_info_t *h = seq->private;
-+
-+ if (*pos > h->highest_lun)
-+ return NULL;
-+ *pos += 1;
-+
-+ return pos;
-+}
-+
-+static void cciss_seq_stop(struct seq_file *seq, void *v)
-+{
-+ ctlr_info_t *h = seq->private;
-+
-+ /* Only reset h->busy_configuring if we succeeded in setting
-+ * it during cciss_seq_start. */
-+ if (v == ERR_PTR(-EBUSY))
-+ return;
-
-- *eof = 1;
-- *start = buffer+offset;
-- len -= offset;
-- if (len>length)
-- len = length;
- h->busy_configuring = 0;
-- return len;
- }
-
--static int
--cciss_proc_write(struct file *file, const char __user *buffer,
-- unsigned long count, void *data)
-+static struct seq_operations cciss_seq_ops = {
-+ .start = cciss_seq_start,
-+ .show = cciss_seq_show,
-+ .next = cciss_seq_next,
-+ .stop = cciss_seq_stop,
-+};
-+
-+static int cciss_seq_open(struct inode *inode, struct file *file)
-+{
-+ int ret = seq_open(file, &cciss_seq_ops);
-+ struct seq_file *seq = file->private_data;
-+
-+ if (!ret)
-+ seq->private = PDE(inode)->data;
-+
-+ return ret;
-+}
-+
-+static ssize_t
-+cciss_proc_write(struct file *file, const char __user *buf,
-+ size_t length, loff_t *ppos)
- {
-- unsigned char cmd[80];
-- int len;
--#ifdef CONFIG_CISS_SCSI_TAPE
-- ctlr_info_t *h = (ctlr_info_t *) data;
-+ int err;
-+ char *buffer;
-+ struct seq_file *seq = file->private_data;
-+ ctlr_info_t *h = seq->private;
- int rc;
--#endif
-
-- if (count > sizeof(cmd)-1) return -EINVAL;
-- if (copy_from_user(cmd, buffer, count)) return -EFAULT;
-- cmd[count] = '\0';
-- len = strlen(cmd); // above 3 lines ensure safety
-- if (len && cmd[len-1] == '\n')
-- cmd[--len] = '\0';
--# ifdef CONFIG_CISS_SCSI_TAPE
-- if (strcmp("engage scsi", cmd)==0) {
-- rc = cciss_engage_scsi(h->ctlr);
-- if (rc != 0) return -rc;
-- return count;
-- }
-+ if (!buf || length > PAGE_SIZE - 1)
-+ return -EINVAL;
-+
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer)
-+ return -ENOMEM;
-+
-+ err = -EFAULT;
-+ if (copy_from_user(buffer, buf, length))
-+ goto out;
-+ buffer[length] = '\0';
-+
-+ /* For the MSA2000 the firmware cannot tell the driver to
-+ * rescan when new logical volumes are created. We provide
-+ * this interface so users can `echo "rescan volumes" >
-+ * /proc/driver/cciss/ccissN` to accomplish that task. It's not
-+ * the best solution because it must be done on every server
-+ * that connected to the storage.
-+ */
-+ if (strncmp(RESCAN_VOLUMES, buffer, sizeof RESCAN_VOLUMES - 1) == 0) {
-+ /* rebuild_lun_table returns -1 on success to tell ACU
-+ * to quit calling it. In this case we just ignore any
-+ * return code.
-+ */
-+ (void) rebuild_lun_table(h, NULL);
-+ err = length;
-+ goto out;
-+ }
-+
-+#ifdef CONFIG_CISS_SCSI_TAPE
-+ if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
-+ rc = cciss_engage_scsi(h->ctlr);
-+ if (rc != 0)
-+ err = -rc;
-+ else
-+ err = length;
-+ } else
-+#endif /* CONFIG_CISS_SCSI_TAPE */
-+ err = -EINVAL;
- /* might be nice to have "disengage" too, but it's not
- safely possible. (only 1 module use count, lock issues.) */
--# endif
-- return -EINVAL;
-+
-+out:
-+ free_page((unsigned long)buffer);
-+ return err;
- }
-
--/*
-- * Get us a file in /proc/cciss that says something about each controller.
-- * Create /proc/cciss if it doesn't exist yet.
-- */
-+static struct file_operations cciss_proc_fops = {
-+ .owner = THIS_MODULE,
-+ .open = cciss_seq_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+ .write = cciss_proc_write,
-+};
-+
- static void __devinit cciss_procinit(int i)
- {
- struct proc_dir_entry *pde;
-
-- if (proc_cciss == NULL) {
-+ if (proc_cciss == NULL)
- proc_cciss = proc_mkdir("cciss", proc_root_driver);
-- if (!proc_cciss)
-- return;
-- }
-
-- pde = create_proc_read_entry(hba[i]->devname,
-- S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
-- proc_cciss, cciss_proc_get_info, hba[i]);
-- pde->write_proc = cciss_proc_write;
-+ if (!proc_cciss)
-+ return;
-+
-+ pde = create_proc_entry(hba[i]->devname,
-+ S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, proc_cciss);
-+ if (!pde)
-+ return;
-+
-+ pde->proc_fops = &cciss_proc_fops;
-+ pde->data = hba[i];
- }
- #endif /* CONFIG_PROC_FS */
-
-@@ -377,52 +942,51 @@
- * to possible sleep, this routine can be called with get_from_pool set to 0.
- * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
- */
--static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
-+static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
- {
- CommandList_struct *c;
- int i;
- u64bit temp64;
- dma_addr_t cmd_dma_handle, err_dma_handle;
-
-- if (!get_from_pool)
-- {
-- c = (CommandList_struct *) pci_alloc_consistent(
-- h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
-- if(c==NULL)
-+ if (!get_from_pool) {
-+ c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
-+ sizeof(CommandList_struct), &cmd_dma_handle);
-+ if(c == NULL)
- return NULL;
- memset(c, 0, sizeof(CommandList_struct));
-
- c->cmdindex = -1;
-
-- c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
-- h->pdev, sizeof(ErrorInfo_struct),
-+ c->err_info = (ErrorInfo_struct *)
-+ pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
- &err_dma_handle);
-
-- if (c->err_info == NULL)
-- {
-+ if (c->err_info == NULL) {
- pci_free_consistent(h->pdev,
- sizeof(CommandList_struct), c, cmd_dma_handle);
- return NULL;
- }
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-- } else /* get it out of the controllers pool */
-- {
-- do {
-- i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
-- if (i == NR_CMDS)
-- return NULL;
-- } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
-+ } else { /* get it out of the controllers pool */
-+ do {
-+ i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
-+ if (i == h->nr_cmds)
-+ return NULL;
-+ } while(test_and_set_bit
-+ (i & (BITS_PER_LONG - 1),
-+ h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
- #ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
- #endif
- c = h->cmd_pool + i;
- memset(c, 0, sizeof(CommandList_struct));
-- cmd_dma_handle = h->cmd_pool_dhandle
-- + i*sizeof(CommandList_struct);
-+ cmd_dma_handle = h->cmd_pool_dhandle
-+ + i * sizeof(CommandList_struct);
- c->err_info = h->errinfo_pool + i;
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- err_dma_handle = h->errinfo_pool_dhandle
-- + i*sizeof(ErrorInfo_struct);
-+ + i * sizeof(ErrorInfo_struct);
- h->nr_allocs++;
-
- c->cmdindex = i;
-@@ -436,8 +1000,6 @@
-
- c->ctlr = h->ctlr;
- return c;
--
--
- }
-
- /*
-@@ -448,32 +1010,21 @@
- int i;
- u64bit temp64;
-
-- if( !got_from_pool)
-- {
-+ if (!got_from_pool) {
- temp64.val32.lower = c->ErrDesc.Addr.lower;
- temp64.val32.upper = c->ErrDesc.Addr.upper;
- pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
- c->err_info, (dma_addr_t) temp64.val);
- pci_free_consistent(h->pdev, sizeof(CommandList_struct),
- c, (dma_addr_t) c->busaddr);
-- } else
-- {
-+ } else {
- i = c - h->cmd_pool;
-- clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
-+ clear_bit(i & (BITS_PER_LONG - 1),
-+ h->cmd_pool_bits + (i / BITS_PER_LONG));
- h->nr_frees++;
- }
- }
-
--static inline ctlr_info_t *get_host(struct gendisk *disk)
--{
-- return disk->queue->queuedata;
--}
--
--static inline drive_info_struct *get_drv(struct gendisk *disk)
--{
-- return disk->private_data;
--}
--
- /*
- * Open. Make sure the device is really there.
- */
-@@ -496,7 +1047,7 @@
- * but I'm already using way to many device nodes to claim another one
- * for "raw controller".
- */
-- if (drv->nr_blocks == 0) {
-+ if (drv->heads == 0) {
- if (iminor(inode) != 0) { /* not node 0? */
- /* if not node 0 make sure it is a partition = 0 */
- if (iminor(inode) & 0x0f) {
-@@ -513,6 +1064,7 @@
- host->usage_count++;
- return 0;
- }
-+
- /*
- * Close. Sync first.
- */
-@@ -522,7 +1074,8 @@
- drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
-
- #ifdef CCISS_DEBUG
-- printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
-+ printk(KERN_DEBUG "cciss_release %s\n",
-+ inode->i_bdev->bd_disk->disk_name);
- #endif /* CCISS_DEBUG */
-
- drv->usage_count--;
-@@ -541,8 +1094,10 @@
- return ret;
- }
-
--static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
--static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
-+static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
-+ unsigned long arg);
-+static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
-+ unsigned long arg);
-
- static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
- {
-@@ -574,7 +1129,8 @@
- }
- }
-
--static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
-+static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
-+ unsigned long arg)
- {
- IOCTL32_Command_struct __user *arg32 =
- (IOCTL32_Command_struct __user *) arg;
-@@ -584,9 +1140,12 @@
- u32 cp;
-
- err = 0;
-- err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
-- err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
-- err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
-+ err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
-+ sizeof(arg64.LUN_info));
-+ err |= copy_from_user(&arg64.Request, &arg32->Request,
-+ sizeof(arg64.Request));
-+ err |= copy_from_user(&arg64.error_info, &arg32->error_info,
-+ sizeof(arg64.error_info));
- err |= get_user(arg64.buf_size, &arg32->buf_size);
- err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
-@@ -595,28 +1154,34 @@
- if (err)
- return -EFAULT;
-
-- err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
-+ err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
- if (err)
- return err;
-- err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
-+ err |= copy_in_user(&arg32->error_info, &p->error_info,
-+ sizeof(arg32->error_info));
- if (err)
- return -EFAULT;
- return err;
- }
-
--static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
-+static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
-+ unsigned long arg)
- {
- BIG_IOCTL32_Command_struct __user *arg32 =
- (BIG_IOCTL32_Command_struct __user *) arg;
- BIG_IOCTL_Command_struct arg64;
-- BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
-+ BIG_IOCTL_Command_struct __user *p =
-+ compat_alloc_user_space(sizeof(arg64));
- int err;
- u32 cp;
-
- err = 0;
-- err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
-- err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
-- err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
-+ err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
-+ sizeof(arg64.LUN_info));
-+ err |= copy_from_user(&arg64.Request, &arg32->Request,
-+ sizeof(arg64.Request));
-+ err |= copy_from_user(&arg64.error_info, &arg32->error_info,
-+ sizeof(arg64.error_info));
- err |= get_user(arg64.buf_size, &arg32->buf_size);
- err |= get_user(arg64.malloc_size, &arg32->malloc_size);
- err |= get_user(cp, &arg32->buf);
-@@ -626,10 +1191,11 @@
- if (err)
- return -EFAULT;
-
-- err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
-+ err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
- if (err)
- return err;
-- err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
-+ err |= copy_in_user(&arg32->error_info, &p->error_info,
-+ sizeof(arg32->error_info));
- if (err)
- return -EFAULT;
- return err;
-@@ -666,29 +1232,33 @@
- printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
- #endif /* CCISS_DEBUG */
-
-- switch(cmd) {
-+ switch (cmd) {
- case CCISS_GETPCIINFO:
- {
- cciss_pci_info_struct pciinfo;
-
-- if (!arg) return -EINVAL;
-+ if (!arg)
-+ return -EINVAL;
- pciinfo.domain = pci_domain_nr(host->pdev->bus);
- pciinfo.bus = host->pdev->bus->number;
- pciinfo.dev_fn = host->pdev->devfn;
- pciinfo.board_id = host->board_id;
-- if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
-- return -EFAULT;
-- return(0);
-+ if (copy_to_user(argp, &pciinfo,
-+ sizeof( cciss_pci_info_struct)))
-+ return -EFAULT;
-+ return 0;
- }
- case CCISS_GETINTINFO:
- {
- cciss_coalint_struct intinfo;
-- if (!arg) return -EINVAL;
-+ if (!arg)
-+ return -EINVAL;
- intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
- intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
-- if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
-+ if (copy_to_user(argp, &intinfo,
-+ sizeof( cciss_coalint_struct)))
- return -EFAULT;
-- return(0);
-+ return 0;
- }
- case CCISS_SETINTINFO:
- {
-@@ -696,25 +1266,28 @@
- unsigned long flags;
- int i;
-
-- if (!arg) return -EINVAL;
-- if (!capable(CAP_SYS_ADMIN)) return -EPERM;
-- if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
-+ if (!arg)
-+ return -EINVAL;
-+ if (!capable(CAP_SYS_ADMIN))
-+ return -EPERM;
-+ if (copy_from_user(&intinfo, argp,
-+ sizeof(cciss_coalint_struct)))
- return -EFAULT;
-- if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
-+ if ((intinfo.delay == 0 ) && (intinfo.count == 0))
-
- {
- // printk("cciss_ioctl: delay and count cannot be 0\n");
-- return( -EINVAL);
-+ return -EINVAL;
- }
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
- /* Update the field, and then ring the doorbell */
-- writel( intinfo.delay,
-+ writel(intinfo.delay,
- &(host->cfgtable->HostWrite.CoalIntDelay));
-- writel( intinfo.count,
-+ writel(intinfo.count,
- &(host->cfgtable->HostWrite.CoalIntCount));
-- writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
-+ writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
-
-- for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
-+ for(i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(host->vaddr + SA5_DOORBELL)
- & CFGTBL_ChangeReq))
- break;
-@@ -724,19 +1297,21 @@
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- if (i >= MAX_IOCTL_CONFIG_WAIT)
- return -EAGAIN;
-- return(0);
-+ return 0;
- }
- case CCISS_GETNODENAME:
- {
- NodeName_type NodeName;
- int i;
-
-- if (!arg) return -EINVAL;
-- for(i=0;i<16;i++)
-+ if (!arg)
-+ return -EINVAL;
-+ for(i = 0; i < 16; i++) {
- NodeName[i] = readb(&host->cfgtable->ServerName[i]);
-- if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
-- return -EFAULT;
-- return(0);
-+ }
-+ if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
-+ return -EFAULT;
-+ return 0;
- }
- case CCISS_SETNODENAME:
- {
-@@ -744,21 +1319,23 @@
- unsigned long flags;
- int i;
-
-- if (!arg) return -EINVAL;
-- if (!capable(CAP_SYS_ADMIN)) return -EPERM;
-+ if (!arg)
-+ return -EINVAL;
-+ if (!capable(CAP_SYS_ADMIN))
-+ return -EPERM;
-
-- if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
-+ if (copy_from_user(NodeName, argp, sizeof(NodeName_type)))
- return -EFAULT;
-
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-
- /* Update the field, and then ring the doorbell */
-- for(i=0;i<16;i++)
-- writeb( NodeName[i], &host->cfgtable->ServerName[i]);
-+ for(i = 0; i < 16; i++)
-+ writeb(NodeName[i], &host->cfgtable->ServerName[i]);
-
-- writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
-+ writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
-
-- for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
-+ for(i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(host->vaddr + SA5_DOORBELL)
- & CFGTBL_ChangeReq))
- break;
-@@ -768,70 +1345,70 @@
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- if (i >= MAX_IOCTL_CONFIG_WAIT)
- return -EAGAIN;
-- return(0);
-+ return 0;
- }
-
- case CCISS_GETHEARTBEAT:
- {
- Heartbeat_type heartbeat;
-
-- if (!arg) return -EINVAL;
-+ if (!arg)
-+ return -EINVAL;
- heartbeat = readl(&host->cfgtable->HeartBeat);
-- if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
-+ if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
- return -EFAULT;
-- return(0);
-+ return 0;
- }
- case CCISS_GETBUSTYPES:
- {
- BusTypes_type BusTypes;
-
-- if (!arg) return -EINVAL;
-+ if (!arg)
-+ return -EINVAL;
- BusTypes = readl(&host->cfgtable->BusTypes);
-- if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
-- return -EFAULT;
-- return(0);
-+ if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
-+ return -EFAULT;
-+ return 0;
- }
- case CCISS_GETFIRMVER:
- {
- FirmwareVer_type firmware;
-
-- if (!arg) return -EINVAL;
-+ if (!arg)
-+ return -EINVAL;
- memcpy(firmware, host->firm_ver, 4);
-
-- if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
-+ if (copy_to_user(argp, firmware, sizeof(FirmwareVer_type)))
- return -EFAULT;
-- return(0);
-+ return 0;
- }
- case CCISS_GETDRIVVER:
- {
- DriverVer_type DriverVer = DRIVER_VERSION;
-
-- if (!arg) return -EINVAL;
-+ if (!arg)
-+ return -EINVAL;
-
-- if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
-+ if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
- return -EFAULT;
-- return(0);
-+ return 0;
- }
-
-- case CCISS_REVALIDVOLS:
-- if (bdev != bdev->bd_contains || drv != host->drv)
-- return -ENXIO;
-- return revalidate_allvol(host);
--
-- case CCISS_GETLUNINFO: {
-+ case CCISS_GETLUNINFO:
-+ {
- LogvolInfo_struct luninfo;
-
- luninfo.LunID = drv->LunID;
- luninfo.num_opens = drv->usage_count;
-- luninfo.num_parts = 0;
-+
- if (copy_to_user(argp, &luninfo,
- sizeof(LogvolInfo_struct)))
- return -EFAULT;
-- return(0);
-+ return 0;
- }
-- case CCISS_DEREGDISK:
-- return rebuild_lun_table(host, disk);
-
-+ case CCISS_REVALIDVOLS:
-+ case CCISS_DEREGDISK:
- case CCISS_REGNEWD:
- return rebuild_lun_table(host, NULL);
-
-@@ -842,17 +1419,19 @@
- char *buff = NULL;
- u64bit temp64;
- unsigned long flags;
-- DECLARE_COMPLETION(wait);
-+ CCISS_DECLARE_COMPLETION(wait);
-
-- if (!arg) return -EINVAL;
-+ if (!arg)
-+ return -EINVAL;
-
-- if (!capable(CAP_SYS_RAWIO)) return -EPERM;
-+ if (!capable(CAP_SYS_RAWIO))
-+ return -EPERM;
-
-- if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
-+ if (copy_from_user(&iocommand, argp,
-+ sizeof(IOCTL_Command_struct)))
- return -EFAULT;
-- if((iocommand.buf_size < 1) &&
-- (iocommand.Request.Type.Direction != XFER_NONE))
-- {
-+ if((iocommand.buf_size < 1) &&
-+ (iocommand.Request.Type.Direction != XFER_NONE)) {
- return -EINVAL;
- }
- #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
-@@ -860,25 +1439,22 @@
- if(iocommand.buf_size > 128000)
- return -EINVAL;
- #endif
-- if(iocommand.buf_size > 0)
-- {
-+ if (iocommand.buf_size > 0) {
- buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
- if( buff == NULL)
-- return -EFAULT;
-+ return -ENOMEM;
- }
-- if (iocommand.Request.Type.Direction == XFER_WRITE)
-- {
-+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
- /* Copy the data into the buffer we created */
-- if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
-- {
-+ if (copy_from_user(buff,
-+ iocommand.buf, iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
- }
- } else {
- memset(buff, 0, iocommand.buf_size);
- }
-- if ((c = cmd_alloc(host , 0)) == NULL)
-- {
-+ if ((c = cmd_alloc(host, 0)) == NULL) {
- kfree(buff);
- return -ENOMEM;
- }
-@@ -886,14 +1462,12 @@
- c->cmd_type = CMD_IOCTL_PEND;
- // Fill in Command Header
- c->Header.ReplyQueue = 0; // unused in simple mode
-- if( iocommand.buf_size > 0) // buffer to fill
-- {
-+ if(iocommand.buf_size > 0) { // buffer to fill
- c->Header.SGList = 1;
-- c->Header.SGTotal= 1;
-- } else // no buffers to fill
-- {
-+ c->Header.SGTotal = 1;
-+ } else { // no buffers to fill
- c->Header.SGList = 0;
-- c->Header.SGTotal= 0;
-+ c->Header.SGTotal = 0;
- }
- c->Header.LUN = iocommand.LUN_info;
- c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
-@@ -902,11 +1476,10 @@
- c->Request = iocommand.Request;
-
- // Fill in the scatter gather information
-- if (iocommand.buf_size > 0 )
-- {
-- temp64.val = pci_map_single( host->pdev, buff,
-- iocommand.buf_size,
-- PCI_DMA_BIDIRECTIONAL);
-+ if (iocommand.buf_size > 0) {
-+ temp64.val = pci_map_single(host->pdev, buff,
-+ iocommand.buf_size,
-+ PCI_DMA_BIDIRECTIONAL);
- c->SG[0].Addr.lower = temp64.val32.lower;
- c->SG[0].Addr.upper = temp64.val32.upper;
- c->SG[0].Len = iocommand.buf_size;
-@@ -926,23 +1499,22 @@
- /* unlock the buffers from DMA */
- temp64.val32.lower = c->SG[0].Addr.lower;
- temp64.val32.upper = c->SG[0].Addr.upper;
-- pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
-+ pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
-
- /* Copy the error information out */
- iocommand.error_info = *(c->err_info);
-- if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
-- {
-+ if (copy_to_user(argp, &iocommand,
-+ sizeof(IOCTL_Command_struct))) {
- kfree(buff);
- cmd_free(host, c, 0);
-- return( -EFAULT);
-+ return -EFAULT;
- }
-
-- if (iocommand.Request.Type.Direction == XFER_READ)
-- {
-+ if (iocommand.Request.Type.Direction == XFER_READ) {
- /* Copy the data out of the buffer we created */
-- if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
-- {
-+ if (copy_to_user(iocommand.buf, buff,
-+ iocommand.buf_size)) {
- kfree(buff);
- cmd_free(host, c, 0);
- return -EFAULT;
-@@ -950,9 +1522,10 @@
- }
- kfree(buff);
- cmd_free(host, c, 0);
-- return(0);
-+ return 0;
- }
-- case CCISS_BIG_PASSTHRU: {
-+ case CCISS_BIG_PASSTHRU:
-+ {
- BIG_IOCTL_Command_struct *ioc;
- CommandList_struct *c;
- unsigned char **buff = NULL;
-@@ -962,7 +1535,7 @@
- BYTE sg_used = 0;
- int status = 0;
- int i;
-- DECLARE_COMPLETION(wait);
-+ CCISS_DECLARE_COMPLETION(wait);
- __u32 left;
- __u32 sz;
- BYTE __user *data_ptr;
-@@ -995,14 +1568,13 @@
- status = -EINVAL;
- goto cleanup1;
- }
-- buff = (unsigned char **) kmalloc(MAXSGENTRIES *
-- sizeof(char *), GFP_KERNEL);
-+ buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
-+
- if (!buff) {
- status = -ENOMEM;
- goto cleanup1;
- }
-- memset(buff, 0, MAXSGENTRIES);
-- buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
-+ buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
- GFP_KERNEL);
- if (!buff_size) {
- status = -ENOMEM;
-@@ -1019,8 +1591,8 @@
- goto cleanup1;
- }
- if (ioc->Request.Type.Direction == XFER_WRITE) {
-- if (copy_from_user(buff[sg_used], data_ptr, sz)) {
-- status = -ENOMEM;
-+ if (copy_from_user(buff[sg_used],data_ptr,sz)) {
-+ status = -EFAULT;
- goto cleanup1;
- }
- } else {
-@@ -1030,19 +1602,19 @@
- data_ptr += sz;
- sg_used++;
- }
-- if ((c = cmd_alloc(host , 0)) == NULL) {
-+ if ((c = cmd_alloc(host, 0)) == NULL) {
- status = -ENOMEM;
- goto cleanup1;
- }
- c->cmd_type = CMD_IOCTL_PEND;
- c->Header.ReplyQueue = 0;
-
-- if( ioc->buf_size > 0) {
-+ if (ioc->buf_size > 0) {
- c->Header.SGList = sg_used;
-- c->Header.SGTotal= sg_used;
-+ c->Header.SGTotal = sg_used;
- } else {
- c->Header.SGList = 0;
-- c->Header.SGTotal= 0;
-+ c->Header.SGTotal = 0;
- }
- c->Header.LUN = ioc->LUN_info;
- c->Header.Tag.lower = c->busaddr;
-@@ -1050,8 +1622,9 @@
- c->Request = ioc->Request;
- if (ioc->buf_size > 0 ) {
- int i;
-- for(i=0; i<sg_used; i++) {
-- temp64.val = pci_map_single( host->pdev, buff[i],
-+ for (i = 0; i < sg_used; i++) {
-+ temp64.val =
-+ pci_map_single(host->pdev, buff[i],
- buff_size[i],
- PCI_DMA_BIDIRECTIONAL);
- c->SG[i].Addr.lower = temp64.val32.lower;
-@@ -1069,10 +1642,10 @@
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- wait_for_completion(&wait);
- /* unlock the buffers from DMA */
-- for(i=0; i<sg_used; i++) {
-+ for (i = 0; i < sg_used; i++) {
- temp64.val32.lower = c->SG[i].Addr.lower;
- temp64.val32.upper = c->SG[i].Addr.upper;
-- pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
-+ pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
- buff_size[i], PCI_DMA_BIDIRECTIONAL);
- }
- /* Copy the error information out */
-@@ -1085,7 +1658,7 @@
- if (ioc->Request.Type.Direction == XFER_READ) {
- /* Copy the data out of the buffer we created */
- BYTE __user *ptr = ioc->buf;
-- for(i=0; i< sg_used; i++) {
-+ for(i=0; i < sg_used; i++) {
- if (copy_to_user(ptr, buff[i], buff_size[i])) {
- cmd_free(host, c, 0);
- status = -EFAULT;
-@@ -1098,101 +1671,103 @@
- status = 0;
- cleanup1:
- if (buff) {
-- for(i=0; i<sg_used; i++)
-+ for (i = 0; i < sg_used; i++)
- kfree(buff[i]);
-+
- kfree(buff);
- }
- kfree(buff_size);
- kfree(ioc);
-- return(status);
-+ return status;
- }
-+
-+ /* scsi_cmd_ioctl handles these, below, though some are not */
-+ /* very meaningful for cciss. SG_IO is the main one people want. */
-+
-+ case SG_GET_VERSION_NUM:
-+ case SG_SET_TIMEOUT:
-+ case SG_GET_TIMEOUT:
-+ case SG_GET_RESERVED_SIZE:
-+ case SG_SET_RESERVED_SIZE:
-+ case SG_EMULATED_HOST:
-+ case SG_IO:
-+ case SCSI_IOCTL_SEND_COMMAND:
-+ return scsi_cmd_ioctl(filep, disk, cmd, argp);
-+
-+ /* scsi_cmd_ioctl would normally handle these, below, but */
-+ /* they aren't a good fit for cciss, as CD-ROMs are */
-+ /* not supported, and we don't have any bus/target/lun */
-+ /* which we present to the kernel. */
-+
-+ case CDROM_SEND_PACKET:
-+ case CDROMCLOSETRAY:
-+ case CDROMEJECT:
-+ case SCSI_IOCTL_GET_IDLUN:
-+ case SCSI_IOCTL_GET_BUS_NUMBER:
- default:
- return -ENOTTY;
- }
-
- }
-
--/*
-- * revalidate_allvol is for online array config utilities. After a
-- * utility reconfigures the drives in the array, it can use this function
-- * (through an ioctl) to make the driver zap any previous disk structs for
-- * that controller and get new ones.
-- *
-- * Right now I'm using the getgeometry() function to do this, but this
-- * function should probably be finer grained and allow you to revalidate one
-- * particualar logical volume (instead of all of them on a particular
-- * controller).
-- */
--static int revalidate_allvol(ctlr_info_t *host)
--{
-- int ctlr = host->ctlr, i;
-- unsigned long flags;
--
-- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-- if (host->usage_count > 1) {
-- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-- printk(KERN_WARNING "cciss: Device busy for volume"
-- " revalidation (usage=%d)\n", host->usage_count);
-- return -EBUSY;
-- }
-- host->usage_count++;
-- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
--
-- for(i=0; i< NWD; i++) {
-- struct gendisk *disk = host->gendisk[i];
-- if (disk) {
-- request_queue_t *q = disk->queue;
--
-- if (disk->flags & GENHD_FL_UP)
-- del_gendisk(disk);
-- if (q)
-- blk_cleanup_queue(q);
-- }
-- }
--
-- /*
-- * Set the partition and block size structures for all volumes
-- * on this controller to zero. We will reread all of this data
-- */
-- memset(host->drv, 0, sizeof(drive_info_struct)
-- * CISS_MAX_LUN);
-- /*
-- * Tell the array controller not to give us any interrupts while
-- * we check the new geometry. Then turn interrupts back on when
-- * we're done.
-- */
-- host->access.set_intr_mask(host, CCISS_INTR_OFF);
-- cciss_getgeometry(ctlr);
-- host->access.set_intr_mask(host, CCISS_INTR_ON);
--
-- /* Loop through each real device */
-- for (i = 0; i < NWD; i++) {
-- struct gendisk *disk = host->gendisk[i];
-- drive_info_struct *drv = &(host->drv[i]);
-- /* we must register the controller even if no disks exist */
-- /* this is for the online array utilities */
-- if (!drv->heads && i)
-- continue;
-- blk_queue_hardsect_size(drv->queue, drv->block_size);
-- set_capacity(disk, drv->nr_blocks);
-- add_disk(disk);
-- }
-- host->usage_count--;
-- return 0;
--}
--
- static inline void complete_buffers(struct bio *bio, int status)
- {
- while (bio) {
- struct bio *xbh = bio->bi_next;
- int nr_sectors = bio_sectors(bio);
--
-+
- bio->bi_next = NULL;
- blk_finished_io(len);
- bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
- bio = xbh;
- }
-+}
-+
-+static void cciss_check_queues(ctlr_info_t *h)
-+{
-+ int start_queue = h->next_to_run;
-+ int i;
-+
-+ /* check to see if we have maxed out the number of commands that can
-+ * be placed on the queue. If so then exit. We do this check here
-+ * in case the interrupt we serviced was from an ioctl and did not
-+ * free any new commands.
-+ */
-+ if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
-+ return;
-+
-+ /* We have room on the queue for more commands. Now we need to queue
-+ * them up. We will also keep track of the next queue to run so
-+ * that every queue gets a chance to be started first.
-+ */
-+ for (i = 0; i < h->highest_lun + 1; i++) {
-+ int curr_queue = (start_queue + i) % (h->highest_lun + 1);
-+ /* make sure the disk has been added and the drive is real
-+ * because this can be called from the middle of init_one.
-+ */
-+ if (!(h->drv[curr_queue].queue) ||
-+ !(h->drv[curr_queue].heads) ||
-+ h->drv[curr_queue].busy_configuring)
-+ continue;
-+
-+ blk_start_queue(h->gendisk[curr_queue]->queue);
-
-+ /* check to see if we have maxed out the number of commands
-+ * that can be placed on the queue.
-+ */
-+ if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
-+ if (curr_queue == start_queue) {
-+ h->next_to_run =
-+ (start_queue + 1) % (h->highest_lun + 1);
-+ break;
-+ } else {
-+ h->next_to_run = curr_queue;
-+ break;
-+ }
-+ } else {
-+ curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
-+ }
-+ }
- }
-
- static void cciss_softirq_done(struct request *rq)
-@@ -1210,17 +1785,16 @@
-
- /* command did not need to be retried */
- /* unmap the DMA mapping for all the scatter gather elements */
-- for(i=0; i<cmd->Header.SGList; i++) {
-+ for (i = 0; i < cmd->Header.SGList; i++) {
- temp64.val32.lower = cmd->SG[i].Addr.lower;
- temp64.val32.upper = cmd->SG[i].Addr.upper;
- pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
- }
-
-- complete_buffers(rq->bio, rq->errors);
-+ complete_buffers(rq->bio, (rq->errors == 0));
-
- if (blk_fs_request(rq)) {
- const int rw = rq_data_dir(rq);
--
- disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
- }
-
-@@ -1228,9 +1802,11 @@
- printk("Done with %p\n", rq);
- #endif /* CCISS_DEBUG */
-
-+ add_disk_randomness(rq->rq_disk);
- spin_lock_irqsave(&h->lock, flags);
-- end_that_request_last(rq, rq->errors);
-- cmd_free(h, cmd,1);
-+ end_that_request_last(rq, (rq->errors == 0));
-+ cmd_free(h, cmd, 1);
-+ cciss_check_queues(h);
- spin_unlock_irqrestore(&h->lock, flags);
- }
-
-@@ -1241,23 +1817,26 @@
- * will always be left registered with the kernel since it is also the
- * controller node. Any changes to disk 0 will show up on the next
- * reboot.
--*/
-+ */
- static void cciss_update_drive_info(int ctlr, int drv_index)
-- {
-+{
- ctlr_info_t *h = hba[ctlr];
- struct gendisk *disk;
-- ReadCapdata_struct *size_buff = NULL;
- InquiryData_struct *inq_buff = NULL;
- unsigned int block_size;
-- unsigned int total_size;
-+ sector_t total_size;
- unsigned long flags = 0;
- int ret = 0;
-
-- /* if the disk already exists then deregister it before proceeding*/
-- if (h->drv[drv_index].raid_level != -1){
-+ /* if the disk already exists then deregister it before proceeding */
-+ if (h->drv[drv_index].raid_level != -1) {
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
- h->drv[drv_index].busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-+
-+ /* deregister_disk sets h->drv[drv_index].queue = NULL */
-+ /* which keeps the interrupt handler from starting */
-+ /* the queue. */
- ret = deregister_disk(h->gendisk[drv_index],
- &h->drv[drv_index], 0);
- h->drv[drv_index].busy_configuring = 0;
-@@ -1268,16 +1847,38 @@
- return;
-
-
-- /* Get information about the disk and modify the driver sturcture */
-- size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
-- if (size_buff == NULL)
-- goto mem_msg;
-- inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
-+ /* Get information about the disk and modify the driver structure */
-+ inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- if (inq_buff == NULL)
- goto mem_msg;
-
-- cciss_read_capacity(ctlr, drv_index, size_buff, 1,
-+ /* testing to see if 16-byte CDBs are already
-+ being used */
-+ if (h->cciss_read == CCISS_READ_16) {
-+ cciss_read_capacity_16(h->ctlr, drv_index, 1,
-+ &total_size, &block_size);
-+ goto geo_inq;
-+ }
-+
-+ cciss_read_capacity(ctlr, drv_index, 1,
- &total_size, &block_size);
-+
-+ /*
-+ * If read_capacity returns all F's the logical volume is >2TB
-+ * so we switch to 16-byte CDBs for all read/write ops
-+ */
-+
-+ if (total_size == 0xFFFFFFFFULL) {
-+ cciss_read_capacity_16(ctlr, drv_index, 1,
-+ &total_size, &block_size);
-+ h->cciss_read = CCISS_READ_16;
-+ h->cciss_write = CCISS_WRITE_16;
-+ } else {
-+ h->cciss_read = CCISS_READ_10;
-+ h->cciss_write = CCISS_WRITE_10;
-+ }
-+
-+geo_inq:
- cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
- inq_buff, &h->drv[drv_index]);
-
-@@ -1287,11 +1888,16 @@
-
-
- /* if it's the controller it's already added */
-- if (drv_index){
-+ if (drv_index) {
-+
- disk->queue = blk_init_queue(do_cciss_request, &h->lock);
-+ sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
-+ disk->major = h->major;
-+ disk->first_minor = drv_index << NWD_SHIFT;
-+ disk->fops = &cciss_fops;
-+ disk->private_data = &h->drv[drv_index];
-
- /* Set up queue information */
-- disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
- blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
-
- /* This is a hardware imposed limit. */
-@@ -1304,17 +1910,30 @@
-
- blk_queue_softirq_done(disk->queue, cciss_softirq_done);
-
-+ blk_queue_max_sectors(disk->queue, hba[ctlr]->cciss_sector_size);
-+
- disk->queue->queuedata = hba[ctlr];
-
-+ if (!h->drv[drv_index].heads)
-+ goto freeret;
-+
- blk_queue_hardsect_size(disk->queue,
- hba[ctlr]->drv[drv_index].block_size);
-
-+
-+ cciss_sysfs_stat_inquiry(ctlr, drv_index, 1, &h->drv[drv_index]);
-+
-+ cciss_add_blk_sysfs_dev(&h->drv[drv_index], disk, h->pdev, drv_index);
-+
-+ /* Make sure all queue data is written out before */
-+ /* setting h->drv[drv_index].queue, as setting this */
-+ /* allows the interrupt handler to start the queue */
-+ wmb();
- h->drv[drv_index].queue = disk->queue;
- add_disk(disk);
- }
-
- freeret:
-- kfree(size_buff);
- kfree(inq_buff);
- return;
- mem_msg:
-@@ -1327,13 +1946,13 @@
- * where new drives will be added. If the index to be returned is greater
- * than the highest_lun index for the controller then highest_lun is set
- * to this new index. If there are no available indexes then -1 is returned.
--*/
-+ */
- static int cciss_find_free_drive_index(int ctlr)
- {
- int i;
-
-- for (i=0; i < CISS_MAX_LUN; i++){
-- if (hba[ctlr]->drv[i].raid_level == -1){
-+ for (i = 0; i < CISS_MAX_LUN; i++){
-+ if (hba[ctlr]->drv[i].raid_level == -1) {
- if (i > hba[ctlr]->highest_lun)
- hba[ctlr]->highest_lun = i;
- return i;
-@@ -1343,7 +1962,7 @@
- }
-
- /* This function will add and remove logical drives from the Logical
-- * drive array of the controller and maintain persistancy of ordering
-+ * drive array of the controller and maintain persistency of ordering
- * so that mount points are preserved until the next reboot. This allows
- * for the removal of logical drives in the middle of the drive array
- * without a re-ordering of those drives.
-@@ -1351,7 +1970,7 @@
- * h = The controller to perform the operations on
- * del_disk = The disk to remove if specified. If the value given
- * is NULL then no disk is removed.
--*/
-+ */
- static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
- {
- int ctlr = h->ctlr;
-@@ -1368,12 +1987,7 @@
-
- /* Set busy_configuring flag for this operation */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-- if (h->num_luns >= CISS_MAX_LUN){
-- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-- return -EINVAL;
-- }
--
-- if (h->busy_configuring){
-+ if (h->busy_configuring) {
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
- return -EBUSY;
- }
-@@ -1383,7 +1997,7 @@
- * and update the logical drive table. If it is not NULL then
- * we will check if the disk is in use or not.
- */
-- if (del_disk != NULL){
-+ if (del_disk != NULL) {
- drv = get_drv(del_disk);
- drv->busy_configuring = 1;
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-@@ -1405,11 +2019,9 @@
- TYPE_CMD);
-
- if (return_code == IO_OK){
-- listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
-- listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
-- listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
-- listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
-- } else{ /* reading number of logical volumes failed */
-+ listlength =
-+ be32_to_cpu(*(__u32 *) ld_buff->LUNListLength);
-+ } else { /* reading number of logical volumes failed */
- printk(KERN_WARNING "cciss: report logical volume"
- " command failed\n");
- listlength = 0;
-@@ -1417,7 +2029,7 @@
- }
-
- num_luns = listlength / 8; /* 8 bytes per entry */
-- if (num_luns > CISS_MAX_LUN){
-+ if (num_luns > CISS_MAX_LUN) {
- num_luns = CISS_MAX_LUN;
- printk(KERN_WARNING "cciss: more luns configured"
- " on controller than can be handled by"
-@@ -1428,7 +2040,7 @@
- * Check for updates in the drive information and any new drives
- * on the controller.
- */
-- for (i=0; i < num_luns; i++){
-+ for (i = 0; i < num_luns; i++) {
- int j;
-
- drv_found = 0;
-@@ -1447,19 +2059,26 @@
- * if not is use. If it does not exist then find
- * the first free index and add it.
- */
-- for (j=0; j <= h->highest_lun; j++){
-- if (h->drv[j].LunID == lunid){
-+ for (j = 0; j <= h->highest_lun; j++) {
-+ if (h->drv[j].LunID == lunid) {
- drv_index = j;
- drv_found = 1;
- }
- }
-
- /* check if the drive was found already in the array */
-- if (!drv_found){
-+ if (!drv_found) {
- drv_index = cciss_find_free_drive_index(ctlr);
- if (drv_index == -1)
- goto freeret;
--
-+ /*Check if the gendisk needs to be allocated */
-+ if (!h->gendisk[drv_index]) {
-+ h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
-+ if (!h->gendisk[drv_index]) {
-+ printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
-+ goto mem_msg;
-+ }
-+ }
- }
- h->drv[drv_index].LunID = lunid;
- cciss_update_drive_info(ctlr, drv_index);
-@@ -1490,41 +2109,68 @@
- * clear_all = This flag determines whether or not the disk information
- * is going to be completely cleared out and the highest_lun
- * reset. Sometimes we want to clear out information about
-- * the disk in preperation for re-adding it. In this case
-+ * the disk in preparation for re-adding it. In this case
- * the highest_lun should be left unchanged and the LunID
- * should not be cleared.
- */
- static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
- int clear_all)
- {
-+ int i;
- ctlr_info_t *h = get_host(disk);
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- /* make sure logical volume is NOT is use */
-- if(clear_all || (h->gendisk[0] == disk)) {
-+ if (clear_all || (h->gendisk[0] == disk)) {
- if (drv->usage_count > 1)
- return -EBUSY;
-- }
-- else
-- if( drv->usage_count > 0 )
-+ } else if (drv->usage_count > 0)
- return -EBUSY;
-
- /* invalidate the devices and deregister the disk. If it is disk
- * zero do not deregister it but just zero out it's values. This
- * allows us to delete disk zero but keep the controller registered.
- */
-- if (h->gendisk[0] != disk){
-+ if (h->gendisk[0] != disk) {
- if (disk) {
- request_queue_t *q = disk->queue;
-- if (disk->flags & GENHD_FL_UP)
-+ if (disk->flags & GENHD_FL_UP) {
-+ cciss_remove_blk_sysfs_dev(disk);
- del_gendisk(disk);
-+ }
- if (q) {
- blk_cleanup_queue(q);
-+ /* Set drv->queue to NULL so that we do not try
-+ * to call blk_start_queue on this queue in the
-+ * interrupt handler
-+ */
- drv->queue = NULL;
- }
-+ /* If clear_all is set then we are deleting the logical
-+ * drive, not just refreshing its info. For drives
-+ * other than disk 0 we will call put_disk. We do not
-+ * do this for disk 0 as we need it to be able to
-+ * configure the controller.
-+ */
-+ if (clear_all) {
-+ /* This isn't pretty, but we need to find the
-+ * disk in our array and NULL our the pointer.
-+ * This is so that we will call alloc_disk if
-+ * this index is used again later.
-+ */
-+ for (i = 0; i < CISS_MAX_LUN; i++) {
-+ if (h->gendisk[i] == disk) {
-+ h->gendisk[i] = NULL;
-+ break;
-+ }
-+ }
-+ put_disk(disk);
-+ }
- }
-+ } else {
-+ set_capacity(disk, 0);
- }
-
- --h->num_luns;
-@@ -1539,22 +2185,22 @@
- * array is free.
- */
-
-- if (clear_all){
-- /* check to see if it was the last disk */
-- if (drv == h->drv + h->highest_lun) {
-- /* if so, find the new hightest lun */
-- int i, newhighest =-1;
-- for(i=0; i<h->highest_lun; i++) {
-- /* if the disk has size > 0, it is available */
-- if (h->drv[i].heads)
-- newhighest = i;
-+ if (clear_all) {
-+ /* check to see if it was the last disk */
-+ if (drv == h->drv + h->highest_lun) {
-+ /* if so, find the new hightest lun */
-+ int i, newhighest = -1;
-+ for (i = 0; i < h->highest_lun; i++) {
-+ /* if the disk has size > 0, it is available */
-+ if (h->drv[i].heads)
-+ newhighest = i;
-+ }
-+ h->highest_lun = newhighest;
- }
-- h->highest_lun = newhighest;
-- }
-
-- drv->LunID = 0;
-+ drv->LunID = 0;
- }
-- return(0);
-+ return 0;
- }
-
- static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
-@@ -1565,24 +2211,24 @@
- unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
- int cmd_type)
- {
-- ctlr_info_t *h= hba[ctlr];
-+ ctlr_info_t *h = hba[ctlr];
- u64bit buff_dma_handle;
- int status = IO_OK;
-
- c->cmd_type = CMD_IOCTL_PEND;
- c->Header.ReplyQueue = 0;
-- if( buff != NULL) {
-+ if (buff != NULL) {
- c->Header.SGList = 1;
-- c->Header.SGTotal= 1;
-+ c->Header.SGTotal = 1;
- } else {
- c->Header.SGList = 0;
-- c->Header.SGTotal= 0;
-+ c->Header.SGTotal = 0;
- }
- c->Header.Tag.lower = c->busaddr;
-
- c->Request.Type.Type = cmd_type;
- if (cmd_type == TYPE_CMD) {
-- switch(cmd) {
-+ switch (cmd) {
- case CISS_INQUIRY:
- /* If the logical unit number is 0 then, this is going
- to controller so It's a physical command
-@@ -1592,15 +2238,16 @@
- otherwise, if use_unit_num == 2,
- mode = 0(periph dev addr) target = scsi3addr */
- if (use_unit_num == 1) {
-- c->Header.LUN.LogDev.VolId=
-+ c->Header.LUN.LogDev.VolId =
- h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
- } else if (use_unit_num == 2) {
-- memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
-+ memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
-+ 8);
- c->Header.LUN.LogDev.Mode = 0;
- }
- /* are we trying to read a vital product page */
-- if(page_code != 0) {
-+ if (page_code != 0) {
- c->Request.CDB[1] = 0x01;
- c->Request.CDB[2] = page_code;
- }
-@@ -1636,6 +2283,20 @@
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
-+ case CCISS_READ_CAPACITY_16:
-+ c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
-+ c->Header.LUN.LogDev.Mode = 1;
-+ c->Request.CDBLen = 16;
-+ c->Request.Type.Attribute = ATTR_SIMPLE;
-+ c->Request.Type.Direction = XFER_READ;
-+ c->Request.Timeout = 0;
-+ c->Request.CDB[0] = cmd;
-+ c->Request.CDB[1] = 0x10;
-+ c->Request.CDB[10] = (size >> 24) & 0xFF;
-+ c->Request.CDB[11] = (size >> 16) & 0xFF;
-+ c->Request.CDB[12] = (size >> 8) & 0xFF;
-+ c->Request.CDB[13] = size & 0xFF;
-+ break;
- case CCISS_CACHE_FLUSH:
- c->Request.CDBLen = 12;
- c->Request.Type.Attribute = ATTR_SIMPLE;
-@@ -1647,7 +2308,7 @@
- default:
- printk(KERN_WARNING
- "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
-- return(IO_ERROR);
-+ return IO_ERROR;
- }
- } else if (cmd_type == TYPE_MSG) {
- switch (cmd) {
-@@ -1669,6 +2330,7 @@
- memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
- c->Request.CDB[0] = cmd; /* reset */
- c->Request.CDB[1] = 0x04; /* reset a LUN */
-+ break;
- case 3: /* No-Op message */
- c->Request.CDBLen = 1;
- c->Request.Type.Attribute = ATTR_SIMPLE;
-@@ -1698,6 +2360,7 @@
- }
- return status;
- }
-+
- static int sendcmd_withirq(__u8 cmd,
- int ctlr,
- void *buff,
-@@ -1712,9 +2375,10 @@
- u64bit buff_dma_handle;
- unsigned long flags;
- int return_status;
-- DECLARE_COMPLETION(wait);
-+ int print_this_error = 1; /* by default print this error */
-+ CCISS_DECLARE_COMPLETION(wait);
-
-- if ((c = cmd_alloc(h , 0)) == NULL)
-+ if ((c = cmd_alloc(h, 0)) == NULL)
- return -ENOMEM;
- return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
- log_unit, page_code, NULL, cmd_type);
-@@ -1734,22 +2398,56 @@
-
- wait_for_completion(&wait);
-
-- if(c->err_info->CommandStatus != 0)
-- { /* an error has occurred */
-- switch(c->err_info->CommandStatus)
-- {
-+ if(c->err_info->CommandStatus != 0) { /* an error has occurred */
-+ switch(c->err_info->CommandStatus) {
-+ unsigned char sense_key;
- case CMD_TARGET_STATUS:
-- printk(KERN_WARNING "cciss: cmd %p has "
-- " completed with errors\n", c);
-- if( c->err_info->ScsiStatus)
-- {
-- printk(KERN_WARNING "cciss: cmd %p "
-- "has SCSI Status = %x\n",
-- c,
-- c->err_info->ScsiStatus);
-- }
-+ return_status = IO_ERROR;
-+ switch( c->err_info->ScsiStatus) {
-+ case 0x02: /* Check Condition */
-+ /* Check if condition is Unit
-+ * Attention
-+ */
-+ if((c->err_info->SenseInfo[2] == 6) &&
-+ (c->retry_count < MAX_CMD_RETRIES))
-+ {
-+ printk(KERN_WARNING
-+ "cciss%d: retrying unit "
-+ "attention\n", ctlr);
-+ c->retry_count++;
-+ /* erase old err info */
-+ memset(c->err_info, 0,
-+ sizeof(ErrorInfo_struct));
-+ return_status = IO_OK;
-+ INIT_COMPLETION(wait);
-+ goto resend_cmd2;
-+ } else {
-+ printk(KERN_WARNING "cciss%d: "
-+ "has CHECK CONDITION "
-+ "byte 2 = 0x%x\n", ctlr,
-+ c->err_info->SenseInfo[2]);
-+ }
-+ /* check the sense key */
-+ sense_key = 0xf &
-+ c->err_info->SenseInfo[2];
-+
-+ /* no status or recovered error */
-
-+ if((sense_key == 0x0) ||
-+ (sense_key == 0x1) )
-+ return_status=IO_OK;
-+ break;
-+ case 0x18: /* Reserve Conflict */
-+ return_status=IO_ERROR;
-+ print_this_error=0;
-+ break;
-+ default:
-+ printk(KERN_WARNING "cciss%d:"
-+ " cmd has SCSI Status"
-+ " = %x\n", ctlr,
-+ c->err_info->ScsiStatus);
- break;
-+ }
- case CMD_DATA_UNDERRUN:
- case CMD_DATA_OVERRUN:
- /* expected for inquire and report lun commands */
-@@ -1764,7 +2462,7 @@
- "protocol error \n", c);
- return_status = IO_ERROR;
- break;
--case CMD_HARDWARE_ERR:
-+ case CMD_HARDWARE_ERR:
- printk(KERN_WARNING "cciss: cmd %p had "
- " hardware error\n", c);
- return_status = IO_ERROR;
-@@ -1807,23 +2505,31 @@
- "unknown status %x\n", c,
- c->err_info->CommandStatus);
- return_status = IO_ERROR;
-+
-+
- }
- }
-+ if ((return_status == IO_ERROR) && (print_this_error)) {
-+ print_cmd(c);
-+ }
-+
- /* unlock the buffers from DMA */
- buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
- buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
-- pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
-+ pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
- c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
- cmd_free(h, c, 0);
-- return(return_status);
-+ return return_status;
-
- }
- static void cciss_geometry_inquiry(int ctlr, int logvol,
-- int withirq, unsigned int total_size,
-+ int withirq, sector_t total_size,
- unsigned int block_size, InquiryData_struct *inq_buff,
- drive_info_struct *drv)
- {
- int return_code;
-+ unsigned int t;
-+
- memset(inq_buff, 0, sizeof(InquiryData_struct));
- if (withirq)
- return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
-@@ -1832,58 +2538,98 @@
- return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
- sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
- if (return_code == IO_OK) {
-- if(inq_buff->data_byte[8] == 0xFF) {
-+ if (inq_buff->data_byte[8] == 0xFF) {
- printk(KERN_WARNING
- "cciss: reading geometry failed, volume "
- "does not support reading geometry\n");
-- drv->block_size = block_size;
-- drv->nr_blocks = total_size;
- drv->heads = 255;
- drv->sectors = 32; // Sectors per track
-- drv->cylinders = total_size / 255 / 32;
-+ drv->raid_level = RAID_UNKNOWN;
- } else {
-- unsigned int t;
--
-- drv->block_size = block_size;
-- drv->nr_blocks = total_size;
- drv->heads = inq_buff->data_byte[6];
- drv->sectors = inq_buff->data_byte[7];
- drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
- drv->cylinders += inq_buff->data_byte[5];
- drv->raid_level = inq_buff->data_byte[8];
-- t = drv->heads * drv->sectors;
-- if (t > 1) {
-- drv->cylinders = total_size/t;
-- }
-+ }
-+ drv->block_size = block_size;
-+ drv->nr_blocks = total_size + 1;
-+ t = drv->heads * drv->sectors;
-+ if (t > 1) {
-+ sector_t real_size = total_size+1;
-+ unsigned long rem = sector_div(real_size, t);
-+ if (rem)
-+ real_size++;
-+ drv->cylinders = real_size;
- }
- } else { /* Get geometry failed */
- printk(KERN_WARNING "cciss: reading geometry failed\n");
- }
-- printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
-- drv->heads, drv->sectors, drv->cylinders);
- }
-+
- static void
--cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
-- int withirq, unsigned int *total_size, unsigned int *block_size)
-+cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
-+ unsigned int *block_size)
- {
-+ ReadCapdata_struct *buf;
- int return_code;
-- memset(buf, 0, sizeof(*buf));
-+ buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
-+ if (buf == NULL) {
-+ printk(KERN_WARNING "cciss: out of memory\n");
-+ return;
-+ }
-+ memset(buf, 0, sizeof(ReadCapdata_struct));
-+
- if (withirq)
- return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
-- ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
-+ ctlr, buf, sizeof(ReadCapdata_struct),
-+ 1, logvol, 0, TYPE_CMD);
- else
- return_code = sendcmd(CCISS_READ_CAPACITY,
-- ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
-+ ctlr, buf, sizeof(ReadCapdata_struct),
-+ 1, logvol, 0, NULL, TYPE_CMD);
-+ if (return_code == IO_OK) {
-+ *total_size = be32_to_cpu(*(__u32 *) buf->total_size);
-+ *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
-+ } else { /* read capacity command failed */
-+ printk(KERN_WARNING "cciss: read capacity failed\n");
-+ *total_size = 0;
-+ *block_size = BLOCK_SIZE;
-+ }
-+ kfree(buf);
-+ return;
-+}
-+
-+static void
-+cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
-+{
-+ ReadCapdata_struct_16 *buf;
-+ int return_code;
-+ buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
-+ if (buf == NULL) {
-+ printk(KERN_WARNING "cciss: out of memory\n");
-+ return;
-+ }
-+ memset(buf, 0, sizeof(ReadCapdata_struct_16));
-+ if (withirq) {
-+ return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
-+ ctlr, buf, sizeof(ReadCapdata_struct_16),
-+ 1, logvol, 0, TYPE_CMD);
-+ }
-+ else {
-+ return_code = sendcmd(CCISS_READ_CAPACITY_16,
-+ ctlr, buf, sizeof(ReadCapdata_struct_16),
-+ 1, logvol, 0, NULL, TYPE_CMD);
-+ }
- if (return_code == IO_OK) {
-- *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
-- *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
-+ *total_size = be64_to_cpu(*(__u64 *) buf->total_size);
-+ *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
- } else { /* read capacity command failed */
- printk(KERN_WARNING "cciss: read capacity failed\n");
- *total_size = 0;
- *block_size = BLOCK_SIZE;
- }
-- printk(KERN_INFO " blocks= %u block_size= %d\n",
-- *total_size, *block_size);
-+ kfree(buf);
- return;
- }
-
-@@ -1894,8 +2640,7 @@
- int logvol;
- int FOUND=0;
- unsigned int block_size;
-- unsigned int total_size;
-- ReadCapdata_struct *size_buff = NULL;
-+ sector_t total_size;
- InquiryData_struct *inq_buff = NULL;
-
- for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
-@@ -1908,27 +2653,24 @@
-
- if (!FOUND) return 1;
-
-- size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
-- if (size_buff == NULL)
-- {
-- printk(KERN_WARNING "cciss: out of memory\n");
-- return 1;
-- }
- inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
-- if (inq_buff == NULL)
-- {
-+ if (inq_buff == NULL) {
- printk(KERN_WARNING "cciss: out of memory\n");
-- kfree(size_buff);
- return 1;
- }
--
-- cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
-- cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
-+ if (h->cciss_read == CCISS_READ_10) {
-+ cciss_read_capacity(h->ctlr, logvol, 1,
-+ &total_size, &block_size);
-+ } else {
-+ cciss_read_capacity_16(h->ctlr, logvol, 1,
-+ &total_size, &block_size);
-+ }
-+ cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
-+ inq_buff, drv);
-
- blk_queue_hardsect_size(drv->queue, drv->block_size);
- set_capacity(disk, drv->nr_blocks);
-
-- kfree(size_buff);
- kfree(inq_buff);
- return 0;
- }
-@@ -1943,14 +2685,17 @@
- unsigned long done;
- int i;
-
-- /* Wait (up to 20 seconds) for a command to complete */
-+ /* Increase timeout from 20 to 60 seconds to support a
-+ * large number of logical volumes. Otherwise we may
-+ * timeout during init.
-+ */
-
-- for (i = 20 * HZ; i > 0; i--) {
-+ for (i = 60 * HZ; i > 0; i--) {
- done = hba[ctlr]->access.command_completed(hba[ctlr]);
- if (done == FIFO_EMPTY)
- schedule_timeout_uninterruptible(1);
- else
-- return (done);
-+ return done;
- }
- /* Invalid address to tell caller we ran out of time */
- return 1;
-@@ -1978,7 +2723,7 @@
- /* or reset) then we don't expect anything weird. */
- if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
- #endif
-- printk( KERN_WARNING "cciss cciss%d: SendCmd "
-+ printk(KERN_WARNING "cciss cciss%d: SendCmd "
- "Invalid command list address returned! (%lx)\n",
- ctlr, complete);
- /* not much we can do. */
-@@ -1988,7 +2733,7 @@
-
- /* We've sent down an abort or reset, but something else
- has completed */
-- if (srl->ncompletions >= (NR_CMDS + 2)) {
-+ if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
- /* Uh oh. No room to save it for later... */
- printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
- "reject list overflow, command lost!\n", ctlr);
-@@ -2021,13 +2766,15 @@
- CommandList_struct *c;
- int i;
- unsigned long complete;
-- ctlr_info_t *info_p= hba[ctlr];
-+ ctlr_info_t *info_p = hba[ctlr];
- u64bit buff_dma_handle;
- int status, done = 0;
-+ unsigned char sense_key;
-+ int print_this_error = 1; /* print errors by default */
-
- if ((c = cmd_alloc(info_p, 1)) == NULL) {
- printk(KERN_WARNING "cciss: unable to get memory");
-- return(IO_ERROR);
-+ return IO_ERROR;
- }
- status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
- log_unit, page_code, scsi3addr, cmd_type);
-@@ -2048,12 +2795,9 @@
- /* Actually it should be completely empty at this time */
- /* unless we are in here doing error handling for the scsi */
- /* tape side of the driver. */
-- for (i = 200000; i > 0; i--)
-- {
-+ for (i = 200000; i > 0; i--) {
- /* if fifo isn't full go */
-- if (!(info_p->access.fifo_full(info_p)))
-- {
--
-+ if (!(info_p->access.fifo_full(info_p))) {
- break;
- }
- udelay(10);
-@@ -2081,11 +2825,9 @@
- done = 1;
- break;
- }
--
- /* This will need to change for direct lookup completions */
-- if ( (complete & CISS_ERROR_BIT)
-- && (complete & ~CISS_ERROR_BIT) == c->busaddr)
-- {
-+ if ((complete & CISS_ERROR_BIT)
-+ && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
- /* if data overrun or underun on Report command
- ignore it
- */
-@@ -2095,37 +2837,44 @@
- ((c->err_info->CommandStatus ==
- CMD_DATA_OVERRUN) ||
- (c->err_info->CommandStatus ==
-- CMD_DATA_UNDERRUN)
-- ))
-- {
-+ CMD_DATA_UNDERRUN))) {
- complete = c->busaddr;
- } else {
-- if (c->err_info->CommandStatus ==
-- CMD_UNSOLICITED_ABORT) {
-+ switch(c->err_info->CommandStatus) {
-+ case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING "cciss%d: "
-- "unsolicited abort %p\n",
-- ctlr, c);
-- if (c->retry_count < MAX_CMD_RETRIES) {
-+ "unsolicited abort\n",
-+ ctlr);
-+ if (c->retry_count
-+ < MAX_CMD_RETRIES) {
- printk(KERN_WARNING
-- "cciss%d: retrying %p\n",
-- ctlr, c);
-+ "cciss%d:"
-+ " retrying"
-+ " cmd\n",
-+ ctlr);
- c->retry_count++;
-- /* erase the old error */
-- /* information */
-+ /* erase the old
-+ * error info
-+ */
- memset(c->err_info, 0,
-- sizeof(ErrorInfo_struct));
-+ sizeof (ErrorInfo_struct));
- goto resend_cmd1;
- } else {
- printk(KERN_WARNING
-- "cciss%d: retried %p too "
-- "many times\n", ctlr, c);
-+ "cciss%d: retried"
-+ " cmd too many "
-+ " times\n", ctlr);
- status = IO_ERROR;
- goto cleanup1;
- }
-- } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
-- printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
-+ break;
-+ case CMD_UNABORTABLE:
-+ printk(KERN_WARNING "cciss%d"
-+ " command could not be"
-+ " aborted.\n", ctlr);
- status = IO_ERROR;
- goto cleanup1;
-+ break;
- }
- printk(KERN_WARNING "ciss ciss%d: sendcmd"
- " Error %x \n", ctlr,
-@@ -2140,6 +2889,15 @@
- goto cleanup1;
- }
- }
-+ if ((c->err_info->ScsiStatus) ==
-+ CMD_RESERVATION_CONFLICT) {
-+ printk(KERN_WARNING "cciss:%d"
-+ " device is reserved."
-+ "\n", ctlr);
-+ print_this_error = 0;
-+ status = IO_ERROR;
-+ goto cleanup1;
-+ }
- /* This will need changing for direct lookup completions */
- if (complete != c->busaddr) {
- if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
-@@ -2151,6 +2909,10 @@
- } while (!done);
-
- cleanup1:
-+
-+ if ((status == IO_ERROR) && (print_this_error))
-+ print_cmd(c);
-+
- /* unlock the data buffer from DMA */
- buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
- buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
-@@ -2162,7 +2924,7 @@
- do_cciss_intr(0, info_p, NULL);
- #endif
- cmd_free(info_p, c, 1);
-- return (status);
-+ return status;
- }
- /*
- * Map (physical) PCI mem into (virtual) kernel space
-@@ -2171,7 +2933,7 @@
- {
- ulong page_base = ((ulong) base) & PAGE_MASK;
- ulong page_offs = ((ulong) base) - page_base;
-- void __iomem *page_remapped = ioremap(page_base, page_offs+size);
-+ void __iomem *page_remapped = ioremap(page_base, page_offs + size);
-
- return page_remapped ? (page_remapped + page_offs) : NULL;
- }
-@@ -2180,19 +2942,18 @@
- * Takes jobs of the Q and sends them to the hardware, then puts it on
- * the Q to wait for completion.
- */
--static void start_io( ctlr_info_t *h)
-+static void start_io(ctlr_info_t *h)
- {
- CommandList_struct *c;
-
-- while(( c = h->reqQ) != NULL )
-- {
-+ while(( c = h->reqQ) != NULL ) {
- /* can't do anything if fifo is full */
- if ((h->access.fifo_full(h))) {
- printk(KERN_WARNING "cciss: fifo full\n");
- break;
- }
-
-- /* Get the frist entry from the Request Q */
-+ /* Get the first entry from the Request Q */
- removeQ(&(h->reqQ), c);
- h->Qdepth--;
-
-@@ -2200,145 +2961,220 @@
- h->access.submit_command(h, c);
-
- /* Put job onto the completed Q */
-- addQ (&(h->cmpQ), c);
-+ addQ(&(h->cmpQ), c);
- }
- }
- /* Assumes that CCISS_LOCK(h->ctlr) is held. */
- /* Zeros out the error record and then resends the command back */
- /* to the controller */
--static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
-+static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
- {
- /* erase the old error information */
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-
- /* add it to software queue and then send it to the controller */
-- addQ(&(h->reqQ),c);
-+ addQ(&(h->reqQ), c);
- h->Qdepth++;
-- if(h->Qdepth > h->maxQsinceinit)
-+ if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
-
- start_io(h);
- }
-
-+static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
-+ unsigned int msg_byte, unsigned int host_byte,
-+ unsigned int driver_byte)
-+{
-+ /* inverse of macros in scsi.h */
-+ return (scsi_status_byte & 0xff) |
-+ ((msg_byte & 0xff) << 8) |
-+ ((host_byte & 0xff) << 16) |
-+ ((driver_byte & 0xff) << 24);
-+}
-+
-+static inline int evaluate_target_status(CommandList_struct *cmd)
-+{
-+ unsigned char sense_key;
-+ unsigned char status_byte, msg_byte, host_byte, driver_byte;
-+ int error_value;
-+
-+ /* If we get in here, it means we got "target status", that is, scsi status */
-+ status_byte = cmd->err_info->ScsiStatus;
-+ driver_byte = DRIVER_OK;
-+ msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
-+
-+ if (blk_pc_request(cmd->rq))
-+ host_byte = DID_PASSTHROUGH;
-+ else
-+ host_byte = DID_OK;
-+
-+ error_value = make_status_bytes(status_byte, msg_byte,
-+ host_byte, driver_byte);
-+
-+ if (cmd->err_info->ScsiStatus != CMD_RESERVATION_CONFLICT) {
-+ if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
-+ if (!blk_pc_request(cmd->rq))
-+ printk(KERN_WARNING "cciss: cmd %p "
-+ "has SCSI Status 0x%x\n",
-+ cmd, cmd->err_info->ScsiStatus);
-+ return error_value;
-+ }
-+
-+ /* check the sense key */
-+ sense_key = 0xf & cmd->err_info->SenseInfo[2];
-+ /* no status or recovered error */
-+ if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
-+ error_value = 0;
-+
-+ if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
-+ if (error_value != 0)
-+ printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
-+ " sense key = 0x%x\n", cmd, sense_key);
-+ return error_value;
-+ }
-+
-+ /* SG_IO or similar, copy sense data back */
-+ if (cmd->rq->sense) {
-+ if (cmd->rq->sense_len > cmd->err_info->SenseLen)
-+ cmd->rq->sense_len = cmd->err_info->SenseLen;
-+ memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
-+ cmd->rq->sense_len);
-+ } else
-+ cmd->rq->sense_len = 0;
-+
-+ return error_value;
-+}
-+
- /* checks the status of the job and calls complete buffers to mark all
- * buffers for the completed job. Note that this function does not need
- * to hold the hba/queue lock.
- */
--static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
-+static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
- int timeout)
- {
-- int status = 1;
- int retry_cmd = 0;
--
-+ struct request *rq = cmd->rq;
-+ int ctlr = h->ctlr;
-+ int print_this_error = 1; /* print errors by default */
-+
-+ rq->errors = 0;
- if (timeout)
-- status = 0;
-+ rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
-
-- if(cmd->err_info->CommandStatus != 0)
-- { /* an error has occurred */
-- switch(cmd->err_info->CommandStatus)
-- {
-- unsigned char sense_key;
-- case CMD_TARGET_STATUS:
-- status = 0;
--
-- if( cmd->err_info->ScsiStatus == 0x02)
-- {
-- printk(KERN_WARNING "cciss: cmd %p "
-- "has CHECK CONDITION "
-- " byte 2 = 0x%x\n", cmd,
-- cmd->err_info->SenseInfo[2]
-- );
-- /* check the sense key */
-- sense_key = 0xf &
-- cmd->err_info->SenseInfo[2];
-- /* no status or recovered error */
-- if((sense_key == 0x0) ||
-- (sense_key == 0x1))
-- {
-- status = 1;
-- }
-- } else
-- {
-- printk(KERN_WARNING "cciss: cmd %p "
-- "has SCSI Status 0x%x\n",
-- cmd, cmd->err_info->ScsiStatus);
-- }
-- break;
-- case CMD_DATA_UNDERRUN:
-- printk(KERN_WARNING "cciss: cmd %p has"
-- " completed with data underrun "
-- "reported\n", cmd);
-- break;
-- case CMD_DATA_OVERRUN:
-- printk(KERN_WARNING "cciss: cmd %p has"
-- " completed with data overrun "
-- "reported\n", cmd);
-- break;
-- case CMD_INVALID:
-- printk(KERN_WARNING "cciss: cmd %p is "
-- "reported invalid\n", cmd);
-- status = 0;
-- break;
-- case CMD_PROTOCOL_ERR:
-- printk(KERN_WARNING "cciss: cmd %p has "
-- "protocol error \n", cmd);
-- status = 0;
-- break;
-- case CMD_HARDWARE_ERR:
-- printk(KERN_WARNING "cciss: cmd %p had "
-- " hardware error\n", cmd);
-- status = 0;
-- break;
-- case CMD_CONNECTION_LOST:
-- printk(KERN_WARNING "cciss: cmd %p had "
-- "connection lost\n", cmd);
-- status=0;
-- break;
-- case CMD_ABORTED:
-- printk(KERN_WARNING "cciss: cmd %p was "
-- "aborted\n", cmd);
-- status=0;
-- break;
-- case CMD_ABORT_FAILED:
-- printk(KERN_WARNING "cciss: cmd %p reports "
-- "abort failed\n", cmd);
-- status=0;
-- break;
-- case CMD_UNSOLICITED_ABORT:
-- printk(KERN_WARNING "cciss%d: unsolicited "
-- "abort %p\n", h->ctlr, cmd);
-- if (cmd->retry_count < MAX_CMD_RETRIES) {
-- retry_cmd=1;
-- printk(KERN_WARNING
-- "cciss%d: retrying %p\n",
-- h->ctlr, cmd);
-- cmd->retry_count++;
-- } else
-- printk(KERN_WARNING
-- "cciss%d: %p retried too "
-- "many times\n", h->ctlr, cmd);
-- status=0;
-- break;
-- case CMD_TIMEOUT:
-- printk(KERN_WARNING "cciss: cmd %p timedout\n",
-- cmd);
-- status=0;
-- break;
-- default:
-- printk(KERN_WARNING "cciss: cmd %p returned "
-- "unknown status %x\n", cmd,
-- cmd->err_info->CommandStatus);
-- status=0;
-- }
-+ if(cmd->err_info->CommandStatus == 0) /* no error has occurred */
-+ goto after_error_processing;
-+
-+ switch(cmd->err_info->CommandStatus) {
-+ case CMD_TARGET_STATUS:
-+ rq->errors = evaluate_target_status(cmd);
-+ break;
-+ case CMD_DATA_UNDERRUN:
-+ if (blk_fs_request(rq))
-+ printk(KERN_WARNING "cciss: cmd %p has"
-+ " completed with data underrun "
-+ "reported\n", cmd);
-+ break;
-+ case CMD_DATA_OVERRUN:
-+ if (blk_fs_request(rq))
-+ printk(KERN_WARNING "cciss: cmd %p has"
-+ " completed with data overrun "
-+ "reported\n", cmd);
-+ break;
-+ case CMD_INVALID:
-+ printk(KERN_WARNING "cciss%d: cmd is "
-+ "reported invalid\n", ctlr);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ERROR);
-+ break;
-+ case CMD_PROTOCOL_ERR:
-+ printk(KERN_WARNING "cciss%d: cmd has "
-+ "protocol error \n", ctlr);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ERROR);
-+ break;
-+ case CMD_HARDWARE_ERR:
-+ printk(KERN_WARNING "cciss%d: cmd had "
-+ " hardware error\n", ctlr);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ERROR);
-+ break;
-+ case CMD_CONNECTION_LOST:
-+ printk(KERN_WARNING "cciss%d: cmd had "
-+ "connection lost\n", ctlr);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ERROR);
-+ break;
-+ case CMD_ABORTED:
-+ printk(KERN_WARNING "cciss%d: cmd was "
-+ "aborted\n", ctlr);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ABORT);
-+ break;
-+ case CMD_ABORT_FAILED:
-+ printk(KERN_WARNING "cciss%d: cmd reports "
-+ "abort failed\n", ctlr);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ERROR);
-+ break;
-+ case CMD_UNSOLICITED_ABORT:
-+ printk(KERN_WARNING "cciss%d: unsolicited "
-+ "abort\n", ctlr);
-+ if (cmd->retry_count < MAX_CMD_RETRIES) {
-+ retry_cmd = 1;
-+ printk(KERN_WARNING
-+ "cciss%d: retrying cmd\n", ctlr);
-+ cmd->retry_count++;
-+ } else
-+ printk(KERN_WARNING
-+ "cciss%d: cmd retried too "
-+ "many times\n", ctlr);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ABORT);
-+ break;
-+ case CMD_TIMEOUT:
-+ printk(KERN_WARNING "cciss%d: cmd timedout\n", ctlr);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ERROR);
-+ break;
-+ case CMD_RESERVATION_CONFLICT:
-+ printk(KERN_WARNING "reservation conflict in complete_command\n");
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ERROR);
-+ print_this_error = 0;
-+ break;
-+ default:
-+ printk(KERN_WARNING "cciss%d: cmd returned "
-+ "unknown status %x\n", ctlr,
-+ cmd->err_info->CommandStatus);
-+ rq->errors = make_status_bytes(SAM_STAT_GOOD,
-+ cmd->err_info->CommandStatus, DRIVER_OK,
-+ blk_pc_request(rq) ? DID_PASSTHROUGH : DID_ERROR);
- }
-+
-+after_error_processing:
-+
- /* We need to return this command */
-- if(retry_cmd) {
-- resend_cciss_cmd(h,cmd);
-+ if (retry_cmd) {
-+ resend_cciss_cmd(h, cmd);
- return;
-- }
-+ }
-+
-+ if ((rq->errors != 0) && !blk_pc_request(rq) && print_this_error)
-+ print_cmd(cmd);
-
- cmd->rq->completion_data = cmd;
-- cmd->rq->errors = status;
-+#ifdef CONFIG_BLK_DEV_IO_TRACE
-+ blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
-+#endif
- blk_complete_request(cmd->rq);
- }
-
-@@ -2347,9 +3183,10 @@
- */
- static void do_cciss_request(request_queue_t *q)
- {
-- ctlr_info_t *h= q->queuedata;
-+ ctlr_info_t *h = q->queuedata;
- CommandList_struct *c;
-- int start_blk, seg;
-+ sector_t start_blk;
-+ int seg;
- struct request *creq;
- u64bit temp64;
- struct scatterlist tmp_sg[MAXSGENTRIES];
-@@ -2367,10 +3204,9 @@
- if (!creq)
- goto startio;
-
-- if (creq->nr_phys_segments > MAXSGENTRIES)
-- BUG();
-+ BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
-
-- if (( c = cmd_alloc(h, 1)) == NULL)
-+ if ((c = cmd_alloc(h, 1)) == NULL)
- goto full;
-
- blkdev_dequeue_request(creq);
-@@ -2388,7 +3224,7 @@
- /* The first 2 bits are reserved for controller error reporting. */
- c->Header.Tag.lower = (c->cmdindex << 3);
- c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
-- c->Header.LUN.LogDev.VolId= drv->LunID;
-+ c->Header.LUN.LogDev.VolId = drv->LunID;
- c->Header.LUN.LogDev.Mode = 1;
- c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
- c->Request.Type.Type = TYPE_CMD; // It is a command.
-@@ -2396,10 +3232,10 @@
- c->Request.Type.Direction =
- (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
- c->Request.Timeout = 0; // Don't time out
-- c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
-+ c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
- start_blk = creq->sector;
- #ifdef CCISS_DEBUG
-- printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
-+ printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
- (int) creq->nr_sectors);
- #endif /* CCISS_DEBUG */
-
-@@ -2411,8 +3247,7 @@
- else
- dir = PCI_DMA_TODEVICE;
-
-- for (i=0; i<seg; i++)
-- {
-+ for (i = 0; i < seg; i++) {
- c->SG[i].Len = tmp_sg[i].length;
- temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
- tmp_sg[i].offset, tmp_sg[i].length,
-@@ -2422,7 +3257,7 @@
- c->SG[i].Ext = 0; // we are not chaining
- }
- /* track how many SG entries we are using */
-- if( seg > h->maxSG)
-+ if(seg > h->maxSG)
- h->maxSG = seg;
-
- #ifdef CCISS_DEBUG
-@@ -2430,21 +3265,49 @@
- #endif /* CCISS_DEBUG */
-
- c->Header.SGList = c->Header.SGTotal = seg;
-- c->Request.CDB[1]= 0;
-- c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
-- c->Request.CDB[3]= (start_blk >> 16) & 0xff;
-- c->Request.CDB[4]= (start_blk >> 8) & 0xff;
-- c->Request.CDB[5]= start_blk & 0xff;
-- c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
-- c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
-- c->Request.CDB[8]= creq->nr_sectors & 0xff;
-- c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
-+ if (likely(blk_fs_request(creq))) {
-+ if(h->cciss_read == CCISS_READ_10) {
-+ c->Request.CDB[1] = 0;
-+ c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
-+ c->Request.CDB[3] = (start_blk >> 16) & 0xff;
-+ c->Request.CDB[4] = (start_blk >> 8) & 0xff;
-+ c->Request.CDB[5] = start_blk & 0xff;
-+ c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
-+ c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
-+ c->Request.CDB[8] = creq->nr_sectors & 0xff;
-+ c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
-+ } else {
-+ c->Request.CDBLen = 16;
-+ c->Request.CDB[1] = 0;
-+ c->Request.CDB[2] = (start_blk >> 56) & 0xff; //MSB
-+ c->Request.CDB[3] = (start_blk >> 48) & 0xff;
-+ c->Request.CDB[4] = (start_blk >> 40) & 0xff;
-+ c->Request.CDB[5] = (start_blk >> 32) & 0xff;
-+ c->Request.CDB[6] = (start_blk >> 24) & 0xff;
-+ c->Request.CDB[7] = (start_blk >> 16) & 0xff;
-+ c->Request.CDB[8] = (start_blk >> 8) & 0xff;
-+ c->Request.CDB[9] = start_blk & 0xff;
-+ c->Request.CDB[10] = (creq->nr_sectors >> 24) & 0xff;
-+ c->Request.CDB[11] = (creq->nr_sectors >> 16) & 0xff;
-+ c->Request.CDB[12] = (creq->nr_sectors >> 8) & 0xff;
-+ c->Request.CDB[13] = creq->nr_sectors & 0xff;
-+ c->Request.CDB[14] = c->Request.CDB[15] = 0;
-+ }
-+ } else if (blk_pc_request(creq)) {
-+ c->Request.CDBLen = creq->cmd_len;
-+ memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
-+ } else {
-+ /* In later kernels, rq->flags is supplanted by rq->cmd_type */
-+ printk(KERN_WARNING "cciss%d: bad request type, rq->flags=%ld\n",
-+ h->ctlr, creq->flags);
-+ BUG();
-+ }
-
- spin_lock_irq(q->queue_lock);
-
-- addQ(&(h->reqQ),c);
-+ addQ(&(h->reqQ), c);
- h->Qdepth++;
-- if(h->Qdepth > h->maxQsinceinit)
-+ if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
-
- goto queue;
-@@ -2480,7 +3343,7 @@
- static inline int interrupt_pending(ctlr_info_t *h)
- {
- #ifdef CONFIG_CISS_SCSI_TAPE
-- return ( h->access.intr_pending(h)
-+ return (h->access.intr_pending(h)
- || (h->scsi_rejects.ncompletions > 0));
- #else
- return h->access.intr_pending(h);
-@@ -2505,8 +3368,6 @@
- CommandList_struct *c;
- unsigned long flags;
- __u32 a, a1, a2;
-- int j;
-- int start_queue = h->next_to_run;
-
- if (interrupt_not_for_us(h))
- return IRQ_NONE;
-@@ -2516,12 +3377,14 @@
- */
- spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
- while (interrupt_pending(h)) {
-- while((a = get_next_completion(h)) != FIFO_EMPTY) {
-+ while ((a = get_next_completion(h)) != FIFO_EMPTY) {
- a1 = a;
- if ((a & 0x04)) {
- a2 = (a >> 3);
-- if (a2 >= NR_CMDS) {
-+ if (a2 >= h->nr_cmds) {
- printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
-+ spin_unlock_irqrestore(
-+ CCISS_LOCK(h->ctlr), flags);
- fail_all_cmds(h->ctlr);
- return IRQ_HANDLED;
- }
-@@ -2535,7 +3398,7 @@
- printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
- continue;
- }
-- while(c->busaddr != a) {
-+ while (c->busaddr != a) {
- c = c->next;
- if (c == h->cmpQ)
- break;
-@@ -2560,66 +3423,25 @@
- }
- }
- }
--
-- /* check to see if we have maxed out the number of commands that can
-- * be placed on the queue. If so then exit. We do this check here
-- * in case the interrupt we serviced was from an ioctl and did not
-- * free any new commands.
-- */
-- if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
-- goto cleanup;
--
-- /* We have room on the queue for more commands. Now we need to queue
-- * them up. We will also keep track of the next queue to run so
-- * that every queue gets a chance to be started first.
-- */
-- for (j=0; j < h->highest_lun + 1; j++){
-- int curr_queue = (start_queue + j) % (h->highest_lun + 1);
-- /* make sure the disk has been added and the drive is real
-- * because this can be called from the middle of init_one.
-- */
-- if(!(h->drv[curr_queue].queue) ||
-- !(h->drv[curr_queue].heads))
-- continue;
-- blk_start_queue(h->gendisk[curr_queue]->queue);
--
-- /* check to see if we have maxed out the number of commands
-- * that can be placed on the queue.
-- */
-- if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
-- {
-- if (curr_queue == start_queue){
-- h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
-- goto cleanup;
-- } else {
-- h->next_to_run = curr_queue;
-- goto cleanup;
-- }
-- } else {
-- curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
-- }
-- }
--
--cleanup:
- spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
- return IRQ_HANDLED;
- }
- /*
-- * We cannot read the structure directly, for portablity we must use
-+ * We cannot read the structure directly, for portability we must use
- * the io functions.
- * This is for debug only.
- */
- #ifdef CCISS_DEBUG
--static void print_cfg_table( CfgTable_struct *tb)
-+static void print_cfg_table(CfgTable_struct *tb)
- {
- int i;
- char temp_name[17];
-
- printk("Controller Configuration information\n");
- printk("------------------------------------\n");
-- for(i=0;i<4;i++)
-+ for (i = 0; i < 4; i++)
- temp_name[i] = readb(&(tb->Signature[i]));
-- temp_name[4]='\0';
-+ temp_name[4] = '\0';
- printk(" Signature = %s\n", temp_name);
- printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
- printk(" Transport methods supported = 0x%x\n",
-@@ -2628,14 +3450,14 @@
- readl(&(tb->TransportActive)));
- printk(" Requested transport Method = 0x%x\n",
- readl(&(tb->HostWrite.TransportRequest)));
-- printk(" Coalese Interrupt Delay = 0x%x\n",
-+ printk(" Coalesce Interrupt Delay = 0x%x\n",
- readl(&(tb->HostWrite.CoalIntDelay)));
-- printk(" Coalese Interrupt Count = 0x%x\n",
-+ printk(" Coalesce Interrupt Count = 0x%x\n",
- readl(&(tb->HostWrite.CoalIntCount)));
- printk(" Max outstanding commands = 0x%d\n",
- readl(&(tb->CmdsOutMax)));
- printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
-- for(i=0;i<16;i++)
-+ for (i = 0; i < 16; i++)
- temp_name[i] = readb(&(tb->ServerName[i]));
- temp_name[16] = '\0';
- printk(" Server Name = %s\n", temp_name);
-@@ -2644,16 +3466,6 @@
- }
- #endif /* CCISS_DEBUG */
-
--static void release_io_mem(ctlr_info_t *c)
--{
-- /* if IO mem was not protected do nothing */
-- if( c->io_mem_addr == 0)
-- return;
-- release_region(c->io_mem_addr, c->io_mem_length);
-- c->io_mem_addr = 0;
-- c->io_mem_length = 0;
--}
--
- static int find_PCI_BAR_index(struct pci_dev *pdev,
- unsigned long pci_bar_addr)
- {
-@@ -2661,7 +3473,7 @@
- if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
- return 0;
- offset = 0;
-- for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
-+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- bar_type = pci_resource_flags(pdev, i) &
- PCI_BASE_ADDRESS_SPACE;
- if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
-@@ -2684,7 +3496,7 @@
- }
- }
- if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
-- return i+1;
-+ return i + 1;
- }
- return -1;
- }
-@@ -2692,8 +3504,8 @@
- /* If MSI/MSI-X is supported by the kernel we will try to enable it on
- * controllers that are capable. If not, we use IO-APIC mode.
- */
--
--static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id)
-+static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
-+ struct pci_dev *pdev, __u32 board_id)
- {
- #ifdef CONFIG_PCI_MSI
- int err;
-@@ -2719,26 +3531,24 @@
- }
- if (err > 0) {
- printk(KERN_WARNING "cciss: only %d MSI-X vectors "
-- "available\n", err);
-+ " available\n", err);
-+ goto default_int_mode;
- } else {
-- printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
-- err);
-+ printk(KERN_WARNING "cciss: MSI-X init failed,"
-+ " failure mode unknown, error = %d\n", err);
-+ goto default_int_mode;
- }
- }
- if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
- if (!pci_enable_msi(pdev)) {
-- c->intr[SIMPLE_MODE_INT] = pdev->irq;
- c->msi_vector = 1;
-- return;
- } else {
- printk(KERN_WARNING "cciss: MSI init failed\n");
-- c->intr[SIMPLE_MODE_INT] = pdev->irq;
-- return;
- }
- }
-+default_int_mode:
- #endif /* CONFIG_PCI_MSI */
- /* if we get here we're going to use the default interrupt mode */
--default_int_mode:
- c->intr[SIMPLE_MODE_INT] = pdev->irq;
- return;
- }
-@@ -2750,53 +3560,34 @@
- __u64 cfg_offset;
- __u32 cfg_base_addr;
- __u64 cfg_base_addr_index;
-- int i;
-+ int i, err;
-
- /* check to see if controller has been disabled */
- /* BEFORE trying to enable it */
-- (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
-- if(!(command & 0x02))
-- {
-- printk(KERN_WARNING "cciss: controller appears to be disabled\n");
-- return(-1);
-+ (void)pci_read_config_word(pdev, PCI_COMMAND,&command);
-+ if (!(command & 0x02)) {
-+ printk(KERN_WARNING
-+ "cciss: controller appears to be disabled\n");
-+ return -ENODEV;
- }
-
-- if (pci_enable_device(pdev))
-- {
-+ err = pci_enable_device(pdev);
-+ if (err) {
- printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
-- return( -1);
-+ return err;
- }
-
-+ err = pci_request_regions(pdev, "cciss");
-+ if (err) {
-+ printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
-+ "aborting\n");
-+ goto err_out_disable_pdev;
-+ }
- subsystem_vendor_id = pdev->subsystem_vendor;
- subsystem_device_id = pdev->subsystem_device;
- board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
- subsystem_vendor_id);
-
-- /* search for our IO range so we can protect it */
-- for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
-- {
-- /* is this an IO range */
-- if( pci_resource_flags(pdev, i) & 0x01 ) {
-- c->io_mem_addr = pci_resource_start(pdev, i);
-- c->io_mem_length = pci_resource_end(pdev, i) -
-- pci_resource_start(pdev, i) +1;
--#ifdef CCISS_DEBUG
-- printk("IO value found base_addr[%d] %lx %lx\n", i,
-- c->io_mem_addr, c->io_mem_length);
--#endif /* CCISS_DEBUG */
-- /* register the IO range */
-- if(!request_region( c->io_mem_addr,
-- c->io_mem_length, "cciss"))
-- {
-- printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
-- c->io_mem_addr, c->io_mem_length);
-- c->io_mem_addr= 0;
-- c->io_mem_length = 0;
-- }
-- break;
-- }
-- }
--
- #ifdef CCISS_DEBUG
- printk("command = %x\n", command);
- printk("irq = %x\n", pdev->irq);
-@@ -2817,11 +3608,11 @@
- #ifdef CCISS_DEBUG
- printk("address 0 = %x\n", c->paddr);
- #endif /* CCISS_DEBUG */
-- c->vaddr = remap_pci_mem(c->paddr, 200);
-+ c->vaddr = remap_pci_mem(c->paddr, 0x250);
-
- /* Wait for the board to become ready. (PCI hotplug needs this.)
- * We poll for up to 120 secs, once per 100ms. */
-- for (i=0; i < 1200; i++) {
-+ for (i = 0; i < 1200; i++) {
- scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == CCISS_FIRMWARE_READY)
- break;
-@@ -2830,7 +3621,8 @@
- }
- if (scratchpad != CCISS_FIRMWARE_READY) {
- printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
-- return -1;
-+ err = -ENODEV;
-+ goto err_out_free_res;
- }
-
- /* get the address index number */
-@@ -2839,15 +3631,14 @@
- #ifdef CCISS_DEBUG
- printk("cfg base address = %x\n", cfg_base_addr);
- #endif /* CCISS_DEBUG */
-- cfg_base_addr_index =
-- find_PCI_BAR_index(pdev, cfg_base_addr);
-+ cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
- #ifdef CCISS_DEBUG
- printk("cfg base address index = %x\n", cfg_base_addr_index);
- #endif /* CCISS_DEBUG */
- if (cfg_base_addr_index == -1) {
- printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
-- release_io_mem(c);
-- return -1;
-+ err = -ENODEV;
-+ goto err_out_free_res;
- }
-
- cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
-@@ -2863,19 +3654,14 @@
- print_cfg_table(c->cfgtable);
- #endif /* CCISS_DEBUG */
-
-- for(i=0; i<NR_PRODUCTS; i++) {
-+ for (i = 0; i < ARRAY_SIZE(products); i++) {
- if (board_id == products[i].board_id) {
- c->product_name = products[i].product_name;
- c->access = *(products[i].access);
-+ c->nr_cmds = products[i].nr_cmds;
- break;
- }
- }
-- if (i == NR_PRODUCTS) {
-- printk(KERN_WARNING "cciss: Sorry, I don't know how"
-- " to access the Smart Array controller %08lx\n",
-- (unsigned long)board_id);
-- return -1;
-- }
- if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
- (readb(&c->cfgtable->Signature[1]) != 'I') ||
- (readb(&c->cfgtable->Signature[2]) != 'S') ||
-@@ -2885,6 +3671,27 @@
- return -1;
- }
-
-+ /* We didn't find the controller in our list. We know the
-+ * signature is valid. If it's an HP device let's try to
-+ * bind to the device and fire it up. Otherwise we bail.
-+ */
-+ if (i == ARRAY_SIZE(products)) {
-+ if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
-+ c->product_name = products[ARRAY_SIZE(products)-1].product_name;
-+ c->access = *(products[ARRAY_SIZE(products)-1].access);
-+ c->nr_cmds = products[ARRAY_SIZE(products)-1].nr_cmds;
-+ printk(KERN_WARNING "cciss: This is an unknown "
-+ "Smart Array controller.\n"
-+ "cciss: Please update to the latest driver "
-+ "available from www.hp.com.\n");
-+ } else {
-+ printk(KERN_WARNING "cciss: Sorry, I don't know how"
-+ " to access the Smart Array controller %08lx\n"
-+ , (unsigned long)board_id);
-+ return -1;
-+ }
-+ }
-+
- #ifdef CONFIG_X86
- {
- /* Need to enable prefetch in the SCSI core for 6400 in x86 */
-@@ -2895,6 +3702,28 @@
- }
- #endif
-
-+ /* Disabling DMA prefetch for the P600
-+ * An ASIC bug may result in a prefetch beyond
-+ * physical memory.
-+ */
-+ if(board_id == 0x3225103C) {
-+ __u32 dma_prefetch;
-+ dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
-+ dma_prefetch |= 0x8000;
-+ writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
-+#ifdef __ia64__
-+ /* On HP Integrity platforms, turn off DMA refetch
-+ * as well.
-+ */
-+ {
-+ __u32 dma_refetch;
-+ pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
-+ dma_refetch |= 0x1;
-+ pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
-+ }
-+#endif
-+ }
-+
- #ifdef CCISS_DEBUG
- printk("Trying to put board into Simple mode\n");
- #endif /* CCISS_DEBUG */
-@@ -2907,7 +3736,7 @@
- /* under certain very rare conditions, this can take awhile.
- * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
- * as we enter this code.) */
-- for(i=0;i<MAX_CONFIG_WAIT;i++) {
-+ for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- /* delay and try again */
-@@ -2922,14 +3751,20 @@
- print_cfg_table(c->cfgtable);
- #endif /* CCISS_DEBUG */
-
-- if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
-- {
-+ if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
- printk(KERN_WARNING "cciss: unable to get board into"
- " simple mode\n");
-- return -1;
-+ err = -ENODEV;
-+ goto err_out_free_res;
- }
- return 0;
-
-+err_out_free_res:
-+ pci_release_regions(pdev);
-+
-+err_out_disable_pdev:
-+ pci_disable_device(pdev);
-+ return err;
- }
-
- /*
-@@ -2938,48 +3773,34 @@
- static void cciss_getgeometry(int cntl_num)
- {
- ReportLunData_struct *ld_buff;
-- ReadCapdata_struct *size_buff;
- InquiryData_struct *inq_buff;
- int return_code;
- int i;
- int listlength = 0;
- __u32 lunid = 0;
- int block_size;
-- int total_size;
-+ sector_t total_size;
-
-- ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
-- if (ld_buff == NULL)
-- {
-+ ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
-+ if (ld_buff == NULL) {
- printk(KERN_ERR "cciss: out of memory\n");
- return;
- }
-- memset(ld_buff, 0, sizeof(ReportLunData_struct));
-- size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
-- if (size_buff == NULL)
-- {
-- printk(KERN_ERR "cciss: out of memory\n");
-- kfree(ld_buff);
-- return;
-- }
-- inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
-- if (inq_buff == NULL)
-- {
-+ inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-+ if (inq_buff == NULL) {
- printk(KERN_ERR "cciss: out of memory\n");
- kfree(ld_buff);
-- kfree(size_buff);
- return;
- }
- /* Get the firmware version */
- return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
- sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
-- if (return_code == IO_OK)
-- {
-+ if (return_code == IO_OK) {
- hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
- hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
- hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
- hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
-- } else /* send command failed */
-- {
-+ } else { /* send command failed */
- printk(KERN_WARNING "cciss: unable to determine firmware"
- " version of controller\n");
- }
-@@ -2987,8 +3808,7 @@
- return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
- sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
-
-- if( return_code == IO_OK)
-- {
-+ if (return_code == IO_OK) {
- #ifdef CCISS_DEBUG
- printk("LUN Data\n--------------------------\n");
- #endif /* CCISS_DEBUG */
-@@ -2997,15 +3817,14 @@
- listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
- listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
- listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
-- } else /* reading number of logical volumes failed */
-- {
-+ } else {
-+ /* reading number of logical volumes failed */
- printk(KERN_WARNING "cciss: report logical volume"
- " command failed\n");
- listlength = 0;
- }
-- hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
-- if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
-- {
-+ hba[cntl_num]->num_luns = listlength / 8; // 8 bytes per entry
-+ if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
- printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
- CISS_MAX_LUN);
- hba[cntl_num]->num_luns = CISS_MAX_LUN;
-@@ -3016,11 +3835,9 @@
- ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
- #endif /* CCISS_DEBUG */
-
-- hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
--// for(i=0; i< hba[cntl_num]->num_luns; i++)
-- for(i=0; i < CISS_MAX_LUN; i++)
-- {
-- if (i < hba[cntl_num]->num_luns){
-+ hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
-+ for (i = 0; i < CISS_MAX_LUN; i++) {
-+ if (i < hba[cntl_num]->num_luns) {
- lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
- << 24;
- lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
-@@ -3031,15 +3848,33 @@
-
- hba[cntl_num]->drv[i].LunID = lunid;
-
--
- #ifdef CCISS_DEBUG
- printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
- ld_buff->LUN[i][0], ld_buff->LUN[i][1],
- ld_buff->LUN[i][2], ld_buff->LUN[i][3],
- hba[cntl_num]->drv[i].LunID);
- #endif /* CCISS_DEBUG */
-- cciss_read_capacity(cntl_num, i, size_buff, 0,
-- &total_size, &block_size);
-+
-+ /* testing to see if 16-byte CDBs are already being used */
-+ if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
-+ cciss_read_capacity_16(cntl_num, i, 0,
-+ &total_size, &block_size);
-+ goto geo_inq;
-+ }
-+ cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
-+
-+ /* If read_capacity returns all F's the volume is >2TB */
-+ /* so we switch to 16-byte CDBs for all read/write ops */
-+ if(total_size == 0xFFFFFFFFULL) {
-+ cciss_read_capacity_16(cntl_num, i, 0,
-+ &total_size, &block_size);
-+ hba[cntl_num]->cciss_read = CCISS_READ_16;
-+ hba[cntl_num]->cciss_write = CCISS_WRITE_16;
-+ } else {
-+ hba[cntl_num]->cciss_read = CCISS_READ_10;
-+ hba[cntl_num]->cciss_write = CCISS_WRITE_10;
-+ }
-+geo_inq:
- cciss_geometry_inquiry(cntl_num, i, 0, total_size,
- block_size, inq_buff, &hba[cntl_num]->drv[i]);
- } else {
-@@ -3048,7 +3883,6 @@
- }
- }
- kfree(ld_buff);
-- kfree(size_buff);
- kfree(inq_buff);
- }
-
-@@ -3056,35 +3890,29 @@
- /* Returns -1 if no free entries are left. */
- static int alloc_cciss_hba(void)
- {
-- struct gendisk *disk[NWD];
-- int i, n;
-- for (n = 0; n < NWD; n++) {
-- disk[n] = alloc_disk(1 << NWD_SHIFT);
-- if (!disk[n])
-- goto out;
-- }
-+ int i;
-
-- for(i=0; i< MAX_CTLR; i++) {
-+ for (i = 0; i < MAX_CTLR; i++) {
- if (!hba[i]) {
- ctlr_info_t *p;
- p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
- if (!p)
- goto Enomem;
- memset(p, 0, sizeof(ctlr_info_t));
-- for (n = 0; n < NWD; n++)
-- p->gendisk[n] = disk[n];
-+ p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
-+ if (!p->gendisk[0]) {
-+ kfree(p);
-+ goto Enomem;
-+ }
- hba[i] = p;
- return i;
- }
- }
- printk(KERN_WARNING "cciss: This driver supports a maximum"
- " of %d controllers.\n", MAX_CTLR);
-- goto out;
-+ return -1;
- Enomem:
- printk(KERN_ERR "cciss: out of memory.\n");
--out:
-- while (n--)
-- put_disk(disk[n]);
- return -1;
- }
-
-@@ -3094,7 +3922,7 @@
- int n;
-
- hba[i] = NULL;
-- for (n = 0; n < NWD; n++)
-+ for (n = 0; n < CISS_MAX_LUN; n++)
- put_disk(p->gendisk[n]);
- kfree(p);
- }
-@@ -3107,18 +3935,19 @@
- static int __devinit cciss_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
- {
-- request_queue_t *q;
-- int i;
-- int j;
-+ int i, k;
-+ int j = 0;
- int rc;
-+ int dac;
-+ InquiryData_struct *inq_buff;
-
- printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
- " bus %d dev %d func %d\n",
- pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- i = alloc_cciss_hba();
-- if(i < 0)
-- return (-1);
-+ if (i < 0)
-+ return -1;
-
- hba[i]->busy_initializing = 1;
-
-@@ -3131,11 +3960,11 @@
-
- /* configure PCI DMA stuff */
- if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
-- printk("cciss: using DAC cycles\n");
-+ dac = 1;
- else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
-- printk("cciss: not using DAC cycles\n");
-+ dac = 0;
- else {
-- printk("cciss: no suitable DMA available\n");
-+ printk(KERN_ERR "cciss: no suitable DMA available\n");
- goto clean1;
- }
-
-@@ -3147,45 +3976,62 @@
- if (i < MAX_CTLR_ORIG)
- hba[i]->major = COMPAQ_CISS_MAJOR + i;
- rc = register_blkdev(hba[i]->major, hba[i]->devname);
-- if(rc == -EBUSY || rc == -EINVAL) {
-+ if (rc == -EBUSY || rc == -EINVAL) {
- printk(KERN_ERR
- "cciss: Unable to get major number %d for %s "
- "on hba %d\n", hba[i]->major, hba[i]->devname, i);
- goto clean1;
-- }
-- else {
-+ } else {
- if (i >= MAX_CTLR_ORIG)
- hba[i]->major = rc;
- }
-
- /* make sure the board interrupts are off */
- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
-- if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
-- SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
-+ /* If we are running in IO-APIC or MSI mode we register only
-+ * one interrupt. If we're running MSI-X mode we must register
-+ * four interrupts because the hardware ORing is ignored.
-+ */
-+ if (hba[i]->msix_vector) {
-+ for (k = 0; k < 4; k++) {
-+ if (request_irq(hba[i]->intr[k], do_cciss_intr,
-+ SA_INTERRUPT | SA_SAMPLE_RANDOM,
-+ hba[i]->devname, hba[i])) {
-+ printk(KERN_ERR "cciss: Unable to get msi irq %d"
-+ " for %s\n",
-+ hba[i]->intr[k], hba[i]->devname);
-+ goto clean2;
-+ }
-+ }
-+ } else {
-+ if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
-+ SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
- hba[i]->devname, hba[i])) {
-- printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
-- hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
-- goto clean2;
-+ printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
-+ hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
-+ goto clean2;
-+ }
- }
-- hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
-+
-+ hba[i]->cmd_pool_bits = kmalloc(((hba[i]->nr_cmds+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
- hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
-- hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
-+ hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
- &(hba[i]->cmd_pool_dhandle));
- hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
-- hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
-+ hba[i]->pdev, hba[i]->nr_cmds * sizeof( ErrorInfo_struct),
- &(hba[i]->errinfo_pool_dhandle));
-- if((hba[i]->cmd_pool_bits == NULL)
-+ if ((hba[i]->cmd_pool_bits == NULL)
- || (hba[i]->cmd_pool == NULL)
- || (hba[i]->errinfo_pool == NULL)) {
-- printk( KERN_ERR "cciss: out of memory");
-+ printk(KERN_ERR "cciss: out of memory");
- goto clean4;
- }
- #ifdef CONFIG_CISS_SCSI_TAPE
- hba[i]->scsi_rejects.complete =
- kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
-- (NR_CMDS + 5), GFP_KERNEL);
-+ (hba[i]->nr_cmds + 5), GFP_KERNEL);
- if (hba[i]->scsi_rejects.complete == NULL) {
-- printk( KERN_ERR "cciss: out of memory");
-+ printk(KERN_ERR "cciss: out of memory");
- goto clean4;
- }
- #endif
-@@ -3196,10 +4042,10 @@
- pci_set_drvdata(pdev, hba[i]);
- /* command and error info recs zeroed out before
- they are used */
-- memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
-+ memset(hba[i]->cmd_pool_bits, 0, ((hba[i]->nr_cmds+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
-
- #ifdef CCISS_DEBUG
-- printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
-+ printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
- #endif /* CCISS_DEBUG */
-
- cciss_getgeometry(i);
-@@ -3210,22 +4056,35 @@
- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
-
- cciss_procinit(i);
-+ hba[i]->cciss_sector_size = 2048;
-+
- hba[i]->busy_initializing = 0;
-
-- for(j=0; j < NWD; j++) { /* mfm */
-+ do {
- drive_info_struct *drv = &(hba[i]->drv[j]);
- struct gendisk *disk = hba[i]->gendisk[j];
-+ request_queue_t *q;
-+
-+ /* Check if the disk was allocated already */
-+ if (!disk) {
-+ hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
-+ disk = hba[i]->gendisk[j];
-+ }
-+
-+ /* Check that the disk was able to be allocated */
-+ if (!disk) {
-+ printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
-+ goto clean4;
-+ }
-
- q = blk_init_queue(do_cciss_request, &hba[i]->lock);
- if (!q) {
- printk(KERN_ERR
- "cciss: unable to allocate queue for disk %d\n",
- j);
-- break;
-+ goto clean4;
- }
-- drv->queue = q;
-
-- q->backing_dev_info.ra_pages = READ_AHEAD;
- blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
-
- /* This is a hardware imposed limit. */
-@@ -3234,130 +4093,181 @@
- /* This is a limit in the driver and could be eliminated. */
- blk_queue_max_phys_segments(q, MAXSGENTRIES);
-
-- blk_queue_max_sectors(q, 512);
-+ blk_queue_max_sectors(q, hba[i]->cciss_sector_size);
-
- blk_queue_softirq_done(q, cciss_softirq_done);
-
- q->queuedata = hba[i];
-- sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
-- sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
-- disk->major = hba[i]->major;
-- disk->first_minor = j << NWD_SHIFT;
-- disk->fops = &cciss_fops;
-- disk->queue = q;
-- disk->private_data = drv;
- /* we must register the controller even if no disks exist */
- /* this is for the online array utilities */
-- if(!drv->heads && j)
-+ if (!drv->heads && j)
- continue;
- blk_queue_hardsect_size(q, drv->block_size);
-+ cciss_sysfs_stat_inquiry(i, j, 1, drv);
-+ cciss_add_blk_sysfs_dev(drv, disk, pdev, j);
- set_capacity(disk, drv->nr_blocks);
-+ sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
-+ disk->major = hba[i]->major;
-+ disk->first_minor = j << NWD_SHIFT;
-+ disk->fops = &cciss_fops;
-+ disk->private_data = drv;
-+ disk->queue = q;
-+ drv->queue = q;
- add_disk(disk);
-- }
-+ j++;
-+ } while (j <= hba[i]->highest_lun);
-
-- return(1);
-+ return 1;
-
- clean4:
- #ifdef CONFIG_CISS_SCSI_TAPE
-- if(hba[i]->scsi_rejects.complete)
-- kfree(hba[i]->scsi_rejects.complete);
-+ kfree(hba[i]->scsi_rejects.complete);
- #endif
- kfree(hba[i]->cmd_pool_bits);
-- if(hba[i]->cmd_pool)
-+ if (hba[i]->cmd_pool)
- pci_free_consistent(hba[i]->pdev,
-- NR_CMDS * sizeof(CommandList_struct),
-+ hba[i]->nr_cmds * sizeof(CommandList_struct),
- hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-- if(hba[i]->errinfo_pool)
-+ if (hba[i]->errinfo_pool)
- pci_free_consistent(hba[i]->pdev,
-- NR_CMDS * sizeof( ErrorInfo_struct),
-+ hba[i]->nr_cmds * sizeof( ErrorInfo_struct),
- hba[i]->errinfo_pool,
- hba[i]->errinfo_pool_dhandle);
-- free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
-+ if (hba[i]->msix_vector) {
-+ for (j = 0; j < 4; j++)
-+ free_irq(hba[i]->intr[j], hba[i]);
-+ } else {
-+ free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
-+ }
-+#ifdef CONFIG_PCI_MSI
-+ if (hba[i]->msix_vector)
-+ pci_disable_msix(hba[i]->pdev);
-+ else if (hba[i]->msi_vector)
-+ pci_disable_msi(hba[i]->pdev);
-+#endif /* CONFIG_PCI_MSI */
- clean2:
- unregister_blkdev(hba[i]->major, hba[i]->devname);
- clean1:
-- release_io_mem(hba[i]);
- hba[i]->busy_initializing = 0;
-+ /* cleanup any queues that may have been initialized */
-+ for (j = 0; j <= hba[i]->highest_lun; j++){
-+ drive_info_struct *drv = &(hba[i]->drv[j]);
-+ if (drv->queue)
-+ blk_cleanup_queue(drv->queue);
-+ }
-+ pci_release_regions(pdev);
-+ /* This call to pci_disable_device causes the driver to be unable
-+ * to load/unload multiple times. No reason why yet, but we are
-+ * leaving it out for now.
-+ */
-+// pci_disable_device(pdev);
-+ pci_set_drvdata(pdev, NULL);
- free_hba(i);
-- return(-1);
-+ return -1;
- }
-
--static void __devexit cciss_remove_one (struct pci_dev *pdev)
-+static void cciss_shutdown (struct pci_dev *pdev)
- {
- ctlr_info_t *tmp_ptr;
- int i, j;
- char flush_buf[4];
- int return_code;
-
-- if (pci_get_drvdata(pdev) == NULL)
-- {
-- printk( KERN_ERR "cciss: Unable to remove device \n");
-- return;
-- }
- tmp_ptr = pci_get_drvdata(pdev);
-+ if (tmp_ptr == NULL)
-+ return;
- i = tmp_ptr->ctlr;
-- if (hba[i] == NULL)
-- {
-- printk(KERN_ERR "cciss: device appears to "
-- "already be removed \n");
-+ if (hba[i] == NULL)
- return;
-- }
-+
- /* Turn board interrupts off and send the flush cache command */
- /* sendcmd will turn off interrupt, and send the flush...
- * To write all data in the battery backed cache to disks */
- memset(flush_buf, 0, 4);
- return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
- TYPE_CMD);
-- if(return_code != IO_OK)
-- {
-+ if(return_code != IO_OK) {
- printk(KERN_WARNING "Error Flushing cache on controller %d\n",
- i);
- }
-- free_irq(hba[i]->intr[2], hba[i]);
-+ if (hba[i]->msix_vector) {
-+ for (j = 0; j < 4; j++)
-+ free_irq(hba[i]->intr[j], hba[i]);
-+ } else {
-+ free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
-+ }
-+}
-
--#ifdef CONFIG_PCI_MSI
-- if (hba[i]->msix_vector)
-- pci_disable_msix(hba[i]->pdev);
-- else if (hba[i]->msi_vector)
-- pci_disable_msi(hba[i]->pdev);
--#endif /* CONFIG_PCI_MSI */
-+static void __devexit cciss_remove_one(struct pci_dev *pdev)
-+{
-+ ctlr_info_t *tmp_ptr;
-+ int i, j;
-
-- pci_set_drvdata(pdev, NULL);
-- iounmap(hba[i]->vaddr);
-- cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
-+ if (pci_get_drvdata(pdev) == NULL) {
-+ printk(KERN_ERR "cciss: Unable to remove device \n");
-+ return;
-+ }
-+ tmp_ptr = pci_get_drvdata(pdev);
-+ i = tmp_ptr->ctlr;
-+ if (hba[i] == NULL) {
-+ printk(KERN_ERR "cciss: device appears to "
-+ "already be removed \n");
-+ return;
-+ }
-+
-+ remove_proc_entry(hba[i]->devname, proc_cciss);
- unregister_blkdev(hba[i]->major, hba[i]->devname);
-- remove_proc_entry(hba[i]->devname, proc_cciss);
--
-+
- /* remove it from the disk list */
-- for (j = 0; j < NWD; j++) {
-+ for (j = 0; j < CISS_MAX_LUN; j++) {
- struct gendisk *disk = hba[i]->gendisk[j];
- if (disk) {
- request_queue_t *q = disk->queue;
-
-- if (disk->flags & GENHD_FL_UP)
-+ if (disk->flags & GENHD_FL_UP)
-+ cciss_remove_blk_sysfs_dev(disk);
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- }
- }
-
-- pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
-+ cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
-+
-+ cciss_shutdown(pdev);
-+
-+#ifdef CONFIG_PCI_MSI
-+ if (hba[i]->msix_vector)
-+ pci_disable_msix(hba[i]->pdev);
-+ else if (hba[i]->msi_vector)
-+ pci_disable_msi(hba[i]->pdev);
-+#endif /* CONFIG_PCI_MSI */
-+
-+ iounmap(hba[i]->vaddr);
-+
-+ pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
- hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-- pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
-- hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
-+ pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
-+ hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
- kfree(hba[i]->cmd_pool_bits);
- #ifdef CONFIG_CISS_SCSI_TAPE
- kfree(hba[i]->scsi_rejects.complete);
- #endif
-- release_io_mem(hba[i]);
-+ /*
-+ * Deliberately omit pci_disable_device(): it does something nasty to
-+ * Smart Array controllers that pci_enable_device does not undo
-+ */
-+ pci_release_regions(pdev);
-+ pci_set_drvdata(pdev, NULL);
- free_hba(i);
--}
-+}
-
- static struct pci_driver cciss_pci_driver = {
-- .name = "cciss",
-- .probe = cciss_init_one,
-- .remove = __devexit_p(cciss_remove_one),
-- .id_table = cciss_pci_device_id, /* id_table */
-+ .name = "cciss",
-+ .probe = cciss_init_one,
-+ .remove = __devexit_p(cciss_remove_one),
-+ .id_table = cciss_pci_device_id, /* id_table */
-+ .shutdown = cciss_shutdown,
- };
-
- /*
-@@ -3378,10 +4288,8 @@
-
- pci_unregister_driver(&cciss_pci_driver);
- /* double check that all controller entrys have been removed */
-- for (i=0; i< MAX_CTLR; i++)
-- {
-- if (hba[i] != NULL)
-- {
-+ for (i = 0; i < MAX_CTLR; i++) {
-+ if (hba[i] != NULL) {
- printk(KERN_WARNING "cciss: had to remove"
- " controller %d\n", i);
- cciss_remove_one(hba[i]->pdev);
-@@ -3405,14 +4313,14 @@
- pci_disable_device(h->pdev); /* Make sure it is really dead. */
-
- /* move everything off the request queue onto the completed queue */
-- while( (c = h->reqQ) != NULL ) {
-+ while ((c = h->reqQ) != NULL) {
- removeQ(&(h->reqQ), c);
- h->Qdepth--;
-- addQ (&(h->cmpQ), c);
-+ addQ(&(h->cmpQ), c);
- }
-
- /* Now, fail everything on the completed queue with a HW error */
-- while( (c = h->cmpQ) != NULL ) {
-+ while ((c = h->cmpQ) != NULL) {
- removeQ(&h->cmpQ, c);
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
- if (c->cmd_type == CMD_RWREQ) {
-diff -uNr linux-2.6.16.orig/drivers/block/cciss_cmd.h linux-2.6.16/drivers/block/cciss_cmd.h
---- linux-2.6.16.orig/drivers/block/cciss_cmd.h 2006-03-20 06:53:29.000000000 +0100
-+++ linux-2.6.16/drivers/block/cciss_cmd.h 2008-10-03 02:40:19.000000000 +0200
-@@ -8,7 +8,7 @@
- //general boundary defintions
- #define SENSEINFOBYTES 32//note that this value may vary between host implementations
- #define MAXSGENTRIES 31
--#define MAXREPLYQS 256
-+#define MAXREPLYQS 1024
-
- //Command Status value
- #define CMD_SUCCESS 0x0000
-@@ -24,6 +24,7 @@
- #define CMD_UNSOLICITED_ABORT 0x000A
- #define CMD_TIMEOUT 0x000B
- #define CMD_UNABORTABLE 0x000C
-+#define CMD_RESERVATION_CONFLICT 0x0018
-
- //transfer direction
- #define XFER_NONE 0x00
-@@ -55,6 +56,7 @@
- #define I2O_INT_MASK 0x34
- #define I2O_IBPOST_Q 0x40
- #define I2O_OBPOST_Q 0x44
-+#define I2O_DMA1_CFG 0x214
-
- //Configuration Table
- #define CFGTBL_ChangeReq 0x00000001l
-@@ -88,7 +90,7 @@
- //###########################################################################
- //STRUCTURES
- //###########################################################################
--#define CISS_MAX_LUN 16
-+#define CISS_MAX_LUN 1024
- #define CISS_MAX_PHYS_LUN 1024
- // SCSI-3 Cmmands
-
-@@ -118,11 +120,34 @@
- BYTE block_size[4]; // Size of blocks in bytes
- } ReadCapdata_struct;
-
--// 12 byte commands not implemented in firmware yet.
--// #define CCISS_READ 0xa8 // Read(12)
--// #define CCISS_WRITE 0xaa // Write(12)
-- #define CCISS_READ 0x28 // Read(10)
-- #define CCISS_WRITE 0x2a // Write(10)
-+#define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */
-+
-+/* service action to differentiate a 16 byte read capacity from
-+ other commands that use the 0x9e SCSI op code */
-+
-+#define CCISS_READ_CAPACITY_16_SERVICE_ACT 0x10
-+
-+typedef struct _ReadCapdata_struct_16
-+{
-+ BYTE total_size[8]; /* Total size in blocks */
-+ BYTE block_size[4]; /* Size of blocks in bytes */
-+ BYTE prot_en:1; /* protection enable bit */
-+ BYTE rto_en:1; /* reference tag own enable bit */
-+ BYTE reserved:6; /* reserved bits */
-+ BYTE reserved2[18]; /* reserved bytes per spec */
-+} ReadCapdata_struct_16;
-+
-+/* Define the supported read/write commands for cciss based controllers */
-+
-+#define CCISS_READ_10 0x28 /* Read(10) */
-+#define CCISS_WRITE_10 0x2a /* Write(10) */
-+#define CCISS_READ_16 0x88 /* Read(16) */
-+#define CCISS_WRITE_16 0x8a /* Write(16) */
-+
-+/* Define the CDB lengths supported by cciss based controllers */
-+
-+#define CDB_LEN10 10
-+#define CDB_LEN16 16
-
- // BMIC commands
- #define BMIC_READ 0x26
-diff -uNr linux-2.6.16.orig/drivers/block/cciss.h linux-2.6.16/drivers/block/cciss.h
---- linux-2.6.16.orig/drivers/block/cciss.h 2006-03-20 06:53:29.000000000 +0100
-+++ linux-2.6.16/drivers/block/cciss.h 2008-10-03 02:40:19.000000000 +0200
-@@ -6,7 +6,6 @@
- #include "cciss_cmd.h"
-
-
--#define NWD 16
- #define NWD_SHIFT 4
- #define MAX_PART (1 << NWD_SHIFT)
-
-@@ -37,16 +36,21 @@
- * the drive is not in use/configured
- */
- int busy_configuring; /*This is set when the drive is being removed
-- *to prevent it from being opened or it's queue
-- *from being started.
-+ *to prevent it from being opened or it's
-+ *queue from being started.
- */
-+ char vendor[9];
-+ char model[17];
-+ char rev[5];
-+ BYTE uid[16];
-+ struct device *dev_info;
- } drive_info_struct;
-
- #ifdef CONFIG_CISS_SCSI_TAPE
-
- struct sendcmd_reject_list {
- int ncompletions;
-- unsigned long *complete; /* array of NR_CMDS tags */
-+ unsigned long *complete; /* array of tags */
- };
-
- #endif
-@@ -60,8 +64,7 @@
- __u32 board_id;
- void __iomem *vaddr;
- unsigned long paddr;
-- unsigned long io_mem_addr;
-- unsigned long io_mem_length;
-+ int nr_cmds; /* Number of commands allowed on this controller */
- CfgTable_struct __iomem *cfgtable;
- int interrupts_enabled;
- int major;
-@@ -78,6 +81,10 @@
- unsigned int intr[4];
- unsigned int msix_vector;
- unsigned int msi_vector;
-+ int cciss_sector_size; /* For setting blk_queue_max_sectors */
-+ BYTE cciss_read;
-+ BYTE cciss_write;
-+ BYTE cciss_read_capacity;
-
- // information about each logical volume
- drive_info_struct drv[CISS_MAX_LUN];
-@@ -109,7 +116,7 @@
- int next_to_run;
-
- // Disk structures we need to pass back
-- struct gendisk *gendisk[NWD];
-+ struct gendisk *gendisk[CISS_MAX_LUN];
- #ifdef CONFIG_CISS_SCSI_TAPE
- void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
- /* list of block side commands the scsi error handling sucked up */
-@@ -144,6 +151,17 @@
-
- #define CCISS_INTR_ON 1
- #define CCISS_INTR_OFF 0
-+
-+static inline ctlr_info_t *get_host(struct gendisk *disk)
-+{
-+ return disk->queue->queuedata;
-+}
-+
-+static inline drive_info_struct *get_drv(struct gendisk *disk)
-+{
-+ return disk->private_data;
-+}
-+
- /*
- Send the command to the hardware
- */
-@@ -281,9 +299,21 @@
- __u32 board_id;
- char *product_name;
- struct access_method *access;
-+ int nr_cmds; /* Max cmds to send to this kind of ctlr. */
-+};
-+
-+struct drv_dynamic {
-+ struct device dev; /* should be the first member */
-+ struct gendisk *disk;
- };
-
- #define CCISS_LOCK(i) (&hba[i]->lock)
-
-+#ifdef DECLARE_COMPLETION_ONSTACK
-+#define CCISS_DECLARE_COMPLETION(work) DECLARE_COMPLETION_ONSTACK(work)
-+#else
-+#define CCISS_DECLARE_COMPLETION(work) DECLARE_COMPLETION(work)
-+#endif
-+
- #endif /* CCISS_H */
-
-diff -uNr linux-2.6.16.orig/drivers/block/cciss_scsi.c linux-2.6.16/drivers/block/cciss_scsi.c
---- linux-2.6.16.orig/drivers/block/cciss_scsi.c 2006-03-20 06:53:29.000000000 +0100
-+++ linux-2.6.16/drivers/block/cciss_scsi.c 2008-10-03 02:40:19.000000000 +0200
-@@ -35,12 +35,12 @@
-
- #include <asm/atomic.h>
-
--#include <scsi/scsi.h>
- #include <scsi/scsi_cmnd.h>
- #include <scsi/scsi_device.h>
- #include <scsi/scsi_host.h>
-
- #include "cciss_scsi.h"
-+#include "cciss.h"
-
- #define CCISS_ABORT_MSG 0x00
- #define CCISS_RESET_MSG 0x01
-@@ -255,7 +255,6 @@
- #define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \
- "Unknown" : scsi_device_types[n]
-
--#if 0
- static int xmargin=8;
- static int amargin=60;
-
-@@ -293,6 +292,7 @@
- }
- }
-
-+
- static void
- print_cmd(CommandList_struct *cp)
- {
-@@ -329,18 +329,21 @@
- cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
- cp->ErrDesc.Len);
- printk("sgs..........Errorinfo:\n");
-- printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
-- printk("senselen:%d\n", cp->err_info->SenseLen);
-- printk("cmd status:%d\n", cp->err_info->CommandStatus);
-- printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
-- printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
-- printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
-- printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-+ printk("scsistatus: 0x%x\n", cp->err_info->ScsiStatus);
-+ printk("senselen: 0x%x\n", cp->err_info->SenseLen);
-+ printk("cmd status: 0x%x\n", cp->err_info->CommandStatus);
-+ printk("resid cnt: 0x%x\n", cp->err_info->ResidualCnt);
-+
-+ /* If this is a check condition print the SenseInfo */
-+ if ((cp->err_info->SenseInfo[2] & 0x0F) == 0x02)
-+ print_bytes(cp->err_info->SenseInfo, cp->err_info->SenseLen, 1, 0);
-+
-+ printk("offense size: 0x%x\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
-+ printk("offense byte: 0x%x\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
-+ printk("offense value: 0x%x\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-
- }
-
--#endif
--
- static int
- find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
- {
-@@ -578,7 +581,7 @@
-
- if (cmd->use_sg) {
- pci_unmap_sg(ctlr->pdev,
-- cmd->buffer, cmd->use_sg,
-+ cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
- }
- else if (cmd->request_bufflen) {
-@@ -652,6 +655,7 @@
- }
- break;
- case CMD_PROTOCOL_ERR:
-+ cmd->result = DID_ERROR << 16;
- printk(KERN_WARNING "cciss: cp %p has "
- "protocol error \n", cp);
- break;
-@@ -770,7 +774,7 @@
- int direction)
- {
- unsigned long flags;
-- DECLARE_COMPLETION(wait);
-+ CCISS_DECLARE_COMPLETION(wait);
-
- cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl
- cp->scsi_cmd = NULL;
-@@ -1211,7 +1215,7 @@
- struct scsi_cmnd *cmd)
- {
- unsigned int use_sg, nsegs=0, len;
-- struct scatterlist *scatter = (struct scatterlist *) cmd->buffer;
-+ struct scatterlist *scatter = (struct scatterlist *) cmd->request_buffer;
- __u64 addr64;
-
- /* is it just one virtual address? */
-@@ -1233,7 +1237,7 @@
- } /* else, must be a list of virtual addresses.... */
- else if (cmd->use_sg <= MAXSGENTRIES) { /* not too many addrs? */
-
-- use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg,
-+ use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
-
- for (nsegs=0; nsegs < use_sg; nsegs++) {
-@@ -1441,21 +1445,18 @@
- }
-
- static void
--cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
-+cciss_seq_tape_report(struct seq_file *seq, int ctlr)
- {
- unsigned long flags;
-- int size;
--
-- *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline
-
- CPQ_TAPE_LOCK(ctlr, flags);
-- size = sprintf(buffer + *len,
-+ seq_printf(seq,
- "Sequential access devices: %d\n\n",
- ccissscsi[ctlr].ndevices);
- CPQ_TAPE_UNLOCK(ctlr, flags);
-- *pos += size; *len += size;
- }
-
-+
- /* Need at least one of these error handlers to keep ../scsi/hosts.c from
- * complaining. Doing a host- or bus-reset can't do anything good here.
- * Despite what it might say in scsi_error.c, there may well be commands
-@@ -1535,6 +1536,5 @@
- #define cciss_scsi_setup(cntl_num)
- #define cciss_unregister_scsi(ctlr)
- #define cciss_register_scsi(ctlr)
--#define cciss_proc_tape_report(ctlr, buffer, pos, len)
-
- #endif /* CONFIG_CISS_SCSI_TAPE */
-diff -uNr linux-2.6.16.orig/include/linux/cciss_ioctl.h linux-2.6.16/include/linux/cciss_ioctl.h
---- linux-2.6.16.orig/include/linux/cciss_ioctl.h 2006-03-20 06:53:29.000000000 +0100
-+++ linux-2.6.16/include/linux/cciss_ioctl.h 2008-10-03 02:40:19.000000000 +0200
-@@ -11,7 +11,7 @@
- {
- unsigned char bus;
- unsigned char dev_fn;
-- unsigned short domain;
-+ __u16 domain;
- __u32 board_id;
- } cciss_pci_info_struct;
-
-@@ -80,7 +80,7 @@
- #define HWORD __u16
- #define DWORD __u32
-
--#define CISS_MAX_LUN 16
-+#define CISS_MAX_LUN 256
-
- #define LEVEL2LUN 1 // index into Target(x) structure, due to byte swapping
- #define LEVEL3LUN 0
+++ /dev/null
-diff -Nru linux-2.6.16.21.orig/sound/pci/hda/patch_realtek.c linux-2.6.16.21/sound/pci/hda/patch_realtek.c
---- linux-2.6.16.21.orig/sound/pci/hda/patch_realtek.c 2006-06-22 11:37:47.806532750 +0000
-+++ linux-2.6.16.21/sound/pci/hda/patch_realtek.c 2006-06-22 11:38:20.088550250 +0000
-@@ -2953,7 +2953,7 @@
- { .modelname = "hp", .config = ALC260_HP },
- { .pci_subvendor = 0x103c, .pci_subdevice = 0x3010, .config = ALC260_HP },
- { .pci_subvendor = 0x103c, .pci_subdevice = 0x3011, .config = ALC260_HP },
-- { .pci_subvendor = 0x103c, .pci_subdevice = 0x3012, .config = ALC260_HP },
-+ { .pci_subvendor = 0x103c, .pci_subdevice = 0x3012, .config = ALC260_HP_3013 },
- { .pci_subvendor = 0x103c, .pci_subdevice = 0x3013, .config = ALC260_HP_3013 },
- { .pci_subvendor = 0x103c, .pci_subdevice = 0x3014, .config = ALC260_HP },
- { .pci_subvendor = 0x103c, .pci_subdevice = 0x3015, .config = ALC260_HP },
+++ /dev/null
-From: Andi Kleen <ak@suse.de>
-Date: Fri, 21 Sep 2007 14:16:18 +0000 (+0200)
-Subject: [PATCH] x86_64: Zero extend all registers after ptrace in 32bit entry path.
-X-Git-Tag: v2.6.22.7~1
-X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.22.y.git;a=commitdiff_plain;h=fc370f287729799250e04cb1d880140d14612bf0
-
-[PATCH] x86_64: Zero extend all registers after ptrace in 32bit entry path.
-
-Strictly it's only needed for eax.
-
-It actually does a little more than strictly needed -- the other registers
-are already zero extended.
-
-Also remove the now unnecessary and non functional compat task check
-in ptrace.
-
-This is CVE-2007-4573
-
-Found by Wojciech Purczynski
-
-Signed-off-by: Andi Kleen <ak@suse.de>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Chris Wright <chrisw@sous-sol.org>
----
-
-diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
-index 47565c3..0bc623a 100644
---- a/arch/x86_64/ia32/ia32entry.S
-+++ b/arch/x86_64/ia32/ia32entry.S
-@@ -38,6 +38,18 @@
- movq %rax,R8(%rsp)
- .endm
-
-+ .macro LOAD_ARGS32 offset
-+ movl \offset(%rsp),%r11d
-+ movl \offset+8(%rsp),%r10d
-+ movl \offset+16(%rsp),%r9d
-+ movl \offset+24(%rsp),%r8d
-+ movl \offset+40(%rsp),%ecx
-+ movl \offset+48(%rsp),%edx
-+ movl \offset+56(%rsp),%esi
-+ movl \offset+64(%rsp),%edi
-+ movl \offset+72(%rsp),%eax
-+ .endm
-+
- .macro CFI_STARTPROC32 simple
- CFI_STARTPROC \simple
- CFI_UNDEFINED r8
-@@ -152,7 +164,7 @@ sysenter_tracesys:
- movq $-ENOSYS,RAX(%rsp) /* really needed? */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
-- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- movl %ebp, %ebp
- /* no need to do an access_ok check here because rbp has been
-@@ -255,7 +267,7 @@ cstar_tracesys:
- movq $-ENOSYS,RAX(%rsp) /* really needed? */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
-- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- movl RSP-ARGOFFSET(%rsp), %r8d
- /* no need to do an access_ok check here because r8 has been
-@@ -333,7 +345,7 @@ ia32_tracesys:
- movq $-ENOSYS,RAX(%rsp) /* really needed? */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
-- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- jmp ia32_do_syscall
- END(ia32_syscall)
-diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
-index 9409117..8d89d8c 100644
---- a/arch/x86_64/kernel/ptrace.c
-+++ b/arch/x86_64/kernel/ptrace.c
-@@ -223,10 +223,6 @@ static int putreg(struct task_struct *child,
- {
- unsigned long tmp;
-
-- /* Some code in the 64bit emulation may not be 64bit clean.
-- Don't take any chances. */
-- if (test_tsk_thread_flag(child, TIF_IA32))
-- value &= 0xffffffff;
- switch (regno) {
- case offsetof(struct user_regs_struct,fs):
- if (value && (value & 3) != 3)
diff -uNr linux-2.6.16.orig/drivers/net/forcedeth.c linux-2.6.16/drivers/net/forcedeth.c
---- linux-2.6.16.orig/drivers/net/forcedeth.c 2006-03-20 06:53:29.000000000 +0100
-+++ linux-2.6.16/drivers/net/forcedeth.c 2008-11-02 20:40:40.000000000 +0100
-@@ -102,6 +102,19 @@
+--- linux-2.6.16.orig/drivers/net/forcedeth.c 2007-06-23 20:16:01.572248000 +0200
++++ linux-2.6.16/drivers/net/forcedeth.c 2006-10-21 14:44:00.000000000 +0200
+@@ -102,6 +102,17 @@
* 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
* 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
* 0.49: 10 Dec 2005: Fix tso for large buffers.
+ * 0.58: 20 May 2006: Optimized rx and tx data paths.
+ * 0.59: 31 May 2006: Added support for sideband management unit.
+ * 0.60: 31 May 2006: Added support for recoverable error.
-+ * 0.61: 18 Jul 2006: Added support for suspend/resume.
-+ * 0.62: 16 Jan 2007: Fixed statistics, mgmt communication, and low phy speed on S5.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
-@@ -113,8 +126,9 @@
+@@ -113,7 +124,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.49"
-+#define FORCEDETH_VERSION "0.62-Driver Package V1.23"
++#define FORCEDETH_VERSION "0.60-Driver Package V1.21"
#define DRV_NAME "forcedeth"
-+#define DRV_DATE "2007/04/06"
#include <linux/module.h>
- #include <linux/types.h>
-@@ -131,18 +145,240 @@
+@@ -131,34 +142,189 @@
#include <linux/random.h>
#include <linux/init.h>
#include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
-+#include <linux/reboot.h>
+#include <linux/version.h>
+
+#define RHES3 0
+#define RHES4 2
+#define SUSE10 3
+#define FEDORA5 4
-+#define FEDORA6 5
+
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)
-+#define NVVER FEDORA6
-+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
+#define NVVER FEDORA5
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
+#define NVVER SUSE10
#define dprintk(x...) do { } while (0)
#endif
-+#define DPRINTK(nlevel,klevel,args...) (void)((debug & NETIF_MSG_##nlevel) && printk(klevel args))
-+
+/* it should add in pci_ids.h */
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_12
+#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_23
+#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_24
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054c
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_25
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054d
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_26
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054e
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_27
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054f
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_28
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07DC
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_29
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07DD
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_30
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07DE
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_31
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07DF
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_32
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_33
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_34
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_35
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
-+#endif
+
+/* it should add in mii.h */
+#ifndef ADVERTISE_1000HALF
+#define __iomem
+#endif
+
-+#ifndef __bitwise
-+#define __bitwise
-+#endif
-+
-+#ifndef __force
-+#define __force
-+#endif
-+
-+#ifndef PCI_D0
-+#define PCI_D0 ((int __bitwise __force) 0)
-+#endif
-+
-+#ifndef PM_EVENT_SUSPEND
-+#define PM_EVENT_SUSPEND 2
-+#endif
-+
-+#if NVVER < SUSE10
-+#define pm_message_t u32
-+#endif
-+
+/* rx/tx mac addr + type + vlan + align + slack*/
+#ifndef RX_NIC_BUFSIZE
+#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64)
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
-+#endif
-+
-+#ifndef PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
-+#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0x00
-+#endif
-+
-+#ifndef PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET
-+#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 0x04
-+#endif
-+
-+#ifndef PCI_MSIX_ENTRY_DATA_OFFSET
-+#define PCI_MSIX_ENTRY_DATA_OFFSET 0x08
-+#endif
-+
-+#ifndef PCI_MSIX_ENTRY_SIZE
-+#define PCI_MSIX_ENTRY_SIZE 16
-+#endif
-+
-+#ifndef PCI_MSIX_FLAGS_BIRMASK
-+#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
-+#endif
-+
-+#ifndef PCI_CAP_ID_MSIX
-+#define PCI_CAP_ID_MSIX 0x11
+#endif
/*
* Hardware access:
-@@ -153,11 +389,40 @@
+ */
+
+-#define DEV_NEED_TIMERIRQ 0x0000 /* work-around for Wake-On-Lan */
+-#define DEV_NEED_TIMERIRQ_ORIG 0x0001 /* set the timer irq flag in the irq mask */
++#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
+ #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
+#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
+#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
+#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
-+#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
-+#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
-+#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
-+#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
-+#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address */
++#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
++#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
++#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */
+
+#define NVIDIA_ETHERNET_ID(deviceid,nv_driver_data) {\
+ .vendor = PCI_VENDOR_ID_NVIDIA, \
+
+#define Mv_LED_Control 16
+#define Mv_Page_Address 22
-+#define Mv_LED_FORCE_OFF 0x88
-+#define Mv_LED_DUAL_MODE3 0x40
-+
-+struct nvmsi_msg{
-+ u32 address_lo;
-+ u32 address_hi;
-+ u32 data;
-+};
enum {
NvRegIrqStatus = 0x000,
NvRegIrqMask = 0x004,
#define NVREG_IRQ_RX_ERROR 0x0001
#define NVREG_IRQ_RX 0x0002
-@@ -166,14 +431,18 @@
+@@ -167,14 +333,18 @@
#define NVREG_IRQ_TX_OK 0x0010
#define NVREG_IRQ_TIMER 0x0020
#define NVREG_IRQ_LINK 0x0040
NvRegUnknownSetupReg6 = 0x008,
#define NVREG_UNKSETUP6_VAL 3
-@@ -185,25 +454,45 @@
+@@ -186,25 +356,45 @@
NvRegPollingInterval = 0x00c,
#define NVREG_POLL_DEFAULT_THROUGHPUT 970
#define NVREG_POLL_DEFAULT_CPU 13
NvRegReceiverStatus = 0x98,
#define NVREG_RCVSTAT_BUSY 0x01
-@@ -213,10 +502,12 @@
+@@ -214,10 +404,12 @@
#define NVREG_RNDSEED_FORCE2 0x2d00
#define NVREG_RNDSEED_FORCE3 0x7400
NvRegMacAddrA = 0xA8,
NvRegMacAddrB = 0xAC,
NvRegMulticastAddrA = 0xB0,
-@@ -233,7 +524,8 @@
+@@ -234,7 +426,8 @@
NvRegRingSizes = 0x108,
#define NVREG_RINGSZ_TXSHIFT 0
#define NVREG_RINGSZ_RXSHIFT 16
NvRegLinkSpeed = 0x110,
#define NVREG_LINKSPEED_FORCE 0x10000
#define NVREG_LINKSPEED_10 1000
-@@ -242,8 +534,10 @@
+@@ -243,8 +436,10 @@
#define NVREG_LINKSPEED_MASK (0xFFF)
NvRegUnknownSetupReg5 = 0x130,
#define NVREG_UNKSETUP5_BIT31 (1<<31)
NvRegTxRxControl = 0x144,
#define NVREG_TXRXCTL_KICK 0x0001
#define NVREG_TXRXCTL_BIT1 0x0002
-@@ -252,15 +546,22 @@
+@@ -253,15 +448,22 @@
#define NVREG_TXRXCTL_RESET 0x0010
#define NVREG_TXRXCTL_RXCHECK 0x0400
#define NVREG_TXRXCTL_DESC_1 0
NvRegAdapterControl = 0x188,
#define NVREG_ADAPTCTL_START 0x02
-@@ -290,6 +591,7 @@
+@@ -291,6 +493,7 @@
#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
NvRegPatternCRC = 0x204,
NvRegPatternMask = 0x208,
NvRegPowerCap = 0x268,
#define NVREG_POWERCAP_D3SUPP (1<<30)
-@@ -303,6 +605,43 @@
+@@ -304,6 +507,43 @@
#define NVREG_POWERSTATE_D1 0x0001
#define NVREG_POWERSTATE_D2 0x0002
#define NVREG_POWERSTATE_D3 0x0003
};
/* Big endian: should work, but is untested */
-@@ -314,7 +653,7 @@
+@@ -315,7 +555,7 @@
struct ring_desc_ex {
u32 PacketBufferHigh;
u32 PacketBufferLow;
u32 FlagLen;
};
-@@ -335,7 +674,7 @@
+@@ -336,7 +576,7 @@
#define NV_TX_CARRIERLOST (1<<27)
#define NV_TX_LATECOLLISION (1<<28)
#define NV_TX_UNDERFLOW (1<<29)
#define NV_TX_VALID (1<<31)
#define NV_TX2_LASTPACKET (1<<29)
-@@ -346,7 +685,7 @@
+@@ -347,7 +587,7 @@
#define NV_TX2_LATECOLLISION (1<<27)
#define NV_TX2_UNDERFLOW (1<<28)
/* error and valid are the same for both */
#define NV_TX2_VALID (1<<31)
#define NV_TX2_TSO (1<<28)
#define NV_TX2_TSO_SHIFT 14
-@@ -355,6 +694,8 @@
+@@ -356,6 +596,8 @@
#define NV_TX2_CHECKSUM_L3 (1<<27)
#define NV_TX2_CHECKSUM_L4 (1<<26)
#define NV_RX_DESCRIPTORVALID (1<<16)
#define NV_RX_MISSEDFRAME (1<<17)
#define NV_RX_SUBSTRACT1 (1<<18)
-@@ -365,7 +706,7 @@
+@@ -366,7 +608,7 @@
#define NV_RX_CRCERR (1<<27)
#define NV_RX_OVERFLOW (1<<28)
#define NV_RX_FRAMINGERR (1<<29)
#define NV_RX_AVAIL (1<<31)
#define NV_RX2_CHECKSUMMASK (0x1C000000)
-@@ -382,11 +723,16 @@
+@@ -383,11 +625,15 @@
#define NV_RX2_OVERFLOW (1<<23)
#define NV_RX2_FRAMINGERR (1<<24)
/* error and avail are the same for both */
/* Miscelaneous hardware related defines: */
-#define NV_PCI_REGSZ 0x270
+#define NV_PCI_REGSZ_VER1 0x270
-+#define NV_PCI_REGSZ_VER2 0x2d4
-+#define NV_PCI_REGSZ_VER3 0x604
++#define NV_PCI_REGSZ_VER2 0x604
/* various timeout delays: all in usec */
#define NV_TXRX_RESET_DELAY 4
-@@ -403,6 +749,7 @@
+@@ -404,6 +650,7 @@
#define NV_MIIBUSY_DELAY 50
#define NV_MIIPHY_DELAY 10
#define NV_MIIPHY_DELAYMAX 10000
#define NV_WAKEUPPATTERNS 5
#define NV_WAKEUPMASKENTRIES 4
-@@ -410,16 +757,18 @@
+@@ -411,16 +658,18 @@
/* General driver defaults */
#define NV_WATCHDOG_TIMEO (5*HZ)
/* rx/tx mac addr + type + vlan + align + slack*/
#define NV_RX_HEADERS (64)
-@@ -433,6 +782,7 @@
+@@ -434,6 +683,7 @@
#define OOM_REFILL (1+HZ/20)
#define POLL_WAIT (1+HZ/100)
#define LINK_TIMEOUT (3*HZ)
/*
* desc_ver values:
-@@ -448,16 +798,38 @@
+@@ -449,16 +699,37 @@
/* PHY defines */
#define PHY_OUI_MARVELL 0x5043
#define PHY_OUI_CICADA 0x03f1
-#define PHY_INIT6 0x02000
+#define PHYID2_MODEL_MASK 0x03f0
+#define PHY_MODEL_MARVELL_E3016 0x220
-+#define PHY_MODEL_MARVELL_E1011 0xb0
+#define PHY_MARVELL_E3016_INITMASK 0x0300
+#define PHY_CICADA_INIT1 0x0f000
+#define PHY_CICADA_INIT2 0x0e00
#define PHY_GIGABIT 0x0100
#define PHY_TIMEOUT 0x1
-@@ -467,14 +839,148 @@
+@@ -468,14 +739,148 @@
#define PHY_1000 0x2
#define PHY_HALF 0x100
+#define NV_MSI_X_VECTOR_TX 0x1
+#define NV_MSI_X_VECTOR_OTHER 0x2
+
++/* statistics */
++#define NV_STATS_COUNT_SW 10
++
+#define NVLAN_DISABLE_ALL_FEATURES do { \
+ msi = NV_MSI_INT_DISABLED; \
+ msix = NV_MSIX_INT_DISABLED; \
+ u64 rx_pause;
+ u64 rx_drop_frame;
+};
-+#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
-+#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 4)
-+#define NV_DEV_STATISTICS_SW_COUNT 10
+
+/* diagnostics */
+#define NV_TEST_COUNT_BASE 3
+ { "interrupt (offline) " },
+ { "loopback (offline) " }
+};
-
++
+struct register_test {
+ u32 reg;
+ u32 mask;
+ { NvRegWakeUpFlags, 0x07777 },
+ { 0,0 }
+};
-+
+
+struct nv_skb_map {
+ struct sk_buff *skb;
+ dma_addr_t dma;
/*
* SMP locking:
-@@ -489,57 +995,105 @@
+@@ -490,11 +895,48 @@
/* in dev: base, irq */
struct fe_priv {
int in_shutdown;
u32 linkspeed;
int duplex;
-+ int speed_duplex;
- int autoneg;
- int fixed_mode;
+@@ -503,44 +945,46 @@
int phyaddr;
int wolenabled;
unsigned int phy_oui;
+ /* flow control */
+ u32 pause_flags;
+ u32 led_stats[3];
-+ u32 saved_config_space[64];
-+ u32 saved_nvregphyinterface;
-+#if NVVER < SUSE10
-+ u32 pci_state[16];
-+#endif
-+ /* msix table */
-+ struct nvmsi_msg nvmsg[NV_MSI_X_MAX_VECTORS];
-+ unsigned long msix_pa_addr;
};
/*
-@@ -554,8 +1108,10 @@
+@@ -555,8 +999,10 @@
* Throughput Mode: Every tx and rx packet will generate an interrupt.
* CPU Mode: Interrupts are controlled by a timer.
*/
static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
/*
-@@ -567,14 +1123,221 @@
+@@ -568,14 +1014,213 @@
*/
static int poll_interval = -1;
+};
+static int tagging_8021pq = NV_8021PQ_ENABLED;
+
-+enum {
-+ NV_LOW_POWER_DISABLED,
-+ NV_LOW_POWER_ENABLED
-+};
-+static int lowpowerspeed = NV_LOW_POWER_ENABLED;
-+
-+static int debug = 0;
-+
+#if NVVER < RHES4
+static inline unsigned long nv_msecs_to_jiffies(const unsigned int m)
+{
}
static inline void pci_push(u8 __iomem *base)
-@@ -612,22 +1375,137 @@
+@@ -613,78 +1258,247 @@
return 0;
}
- reg = readl(base + NvRegMIIControl);
- if (reg & NVREG_MIICTL_INUSE) {
- writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
+- udelay(NV_MIIBUSY_DELAY);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ if (rxtx_flags & NV_SETUP_RX_RING) {
+ writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
+ writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
+ writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
+ }
-+ }
+ }
+}
-+
+
+- reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
+- if (value != MII_READ) {
+- writel(value, base + NvRegMIIData);
+- reg |= NVREG_MIICTL_WRITE;
+- }
+- writel(reg, base + NvRegMIIControl);
+static void free_rings(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
-+
+
+- if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
+- NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
+- dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
+- dev->name, miireg, addr);
+- retval = -1;
+- } else if (value != MII_READ) {
+- /* it was a write operation - fewer failures are detectable */
+- dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
+- dev->name, value, miireg, addr);
+- retval = 0;
+- } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
+- dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
+- dev->name, miireg, addr);
+- retval = -1;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ if(np->rx_ring.orig)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.orig, np->ring_addr);
-+ } else {
+ } else {
+- retval = readl(base + NvRegMIIData);
+- dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
+- dev->name, miireg, addr, retval);
+ if (np->rx_ring.ex)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.ex, np->ring_addr);
-+ }
+ }
+-
+- return retval;
+ if (np->rx_skb)
+ kfree(np->rx_skb);
+ if (np->tx_skb)
+ kfree(np->tx_skb);
-+}
-+
+ }
+
+-static int phy_reset(struct net_device *dev)
+static int using_multi_irqs(struct net_device *dev)
-+{
+ {
+- struct fe_priv *np = netdev_priv(dev);
+- u32 miicontrol;
+- unsigned int tries = 0;
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_enable_irq: begin\n",dev->name);
+ /* modify network device class id */
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_disable_irq: begin\n",dev->name);
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ reg = readl(base + NvRegMIIControl);
+ if (reg & NVREG_MIICTL_INUSE) {
+ writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
- udelay(NV_MIIBUSY_DELAY);
- }
-
-@@ -661,29 +1539,112 @@
- return retval;
- }
-
--static int phy_reset(struct net_device *dev)
++ udelay(NV_MIIBUSY_DELAY);
++ }
++
++ reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
++ if (value != MII_READ) {
++ writel(value, base + NvRegMIIData);
++ reg |= NVREG_MIICTL_WRITE;
++ }
++ writel(reg, base + NvRegMIIControl);
++
++ if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
++ NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
++ dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
++ dev->name, miireg, addr);
++ retval = -1;
++ } else if (value != MII_READ) {
++ /* it was a write operation - fewer failures are detectable */
++ dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
++ dev->name, value, miireg, addr);
++ retval = 0;
++ } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
++ dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
++ dev->name, miireg, addr);
++ retval = -1;
++ } else {
++ retval = readl(base + NvRegMIIData);
++ dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
++ dev->name, miireg, addr, retval);
++ }
++
++ return retval;
++}
++
+static void nv_save_LED_stats(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ dprintk(KERN_DEBUG "%s: save LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
+ }
+
++ reg = Mv_Page_Address;
++ value = 0;
++ mii_rw(dev,np->phyaddr,reg,value);
++ udelay(5);
+}
+
+static void nv_restore_LED_stats(struct net_device *dev)
+ u32 reg=0;
+ u32 value=0;
+ int i=0;
-+
+
+- miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+- miicontrol |= BMCR_RESET;
+ reg = Mv_Page_Address;
+ value = 3;
+ mii_rw(dev,np->phyaddr,reg,value);
+ dprintk(KERN_DEBUG "%s: restore LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
+ }
+
-+}
-+
-+static void nv_LED_on(struct net_device *dev)
-+{
-+ struct fe_priv *np = get_nvpriv(dev);
-+ u32 reg=0;
-+ u32 value=0;
-+
+ reg = Mv_Page_Address;
-+ value = 3;
++ value = 0;
+ mii_rw(dev,np->phyaddr,reg,value);
+ udelay(5);
-+
-+ reg = Mv_LED_Control;
-+ mii_rw(dev,np->phyaddr,reg,Mv_LED_DUAL_MODE3);
-+
+}
+
-+static void nv_LED_off(struct net_device *dev)
++static int phy_reset(struct net_device *dev, u32 bmcr_setup)
+{
+ struct fe_priv *np = get_nvpriv(dev);
-+ u32 reg=0;
-+ u32 value=0;
-+
-+ reg = Mv_Page_Address;
-+ value = 3;
-+ mii_rw(dev,np->phyaddr,reg,value);
-+ udelay(5);
-+
-+ reg = Mv_LED_Control;
-+ mii_rw(dev,np->phyaddr,reg,Mv_LED_FORCE_OFF);
-+ udelay(1);
-+
-+}
++ u32 miicontrol;
++ unsigned int tries = 0;
+
-+static int phy_reset(struct net_device *dev, u32 bmcr_setup)
- {
-- struct fe_priv *np = netdev_priv(dev);
-+ struct fe_priv *np = get_nvpriv(dev);
- u32 miicontrol;
- unsigned int tries = 0;
-
-- miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-- miicontrol |= BMCR_RESET;
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: phy_reset: begin\n",dev->name);
+ /**/
-+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) {
-+ nv_save_LED_stats(dev);
-+ }
++ nv_save_LED_stats(dev);
+ miicontrol = BMCR_RESET | bmcr_setup;
if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
return -1;
if (tries++ > 100)
return -1;
}
-+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) {
-+ nv_restore_LED_stats(dev);
-+ }
++ nv_restore_LED_stats(dev);
+
return 0;
}
-@@ -693,9 +1654,36 @@
+@@ -694,9 +1508,36 @@
u8 __iomem *base = get_hwbase(dev);
u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: phy_init: begin\n",dev->name);
+ /* phy errata for E3016 phy */
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
+ reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_AUTO)
++ if (speed_duplex == NV_SPEED_DUPLEX_AUTO)
+ reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL);
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
++ if (speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
+ reg |= ADVERTISE_10HALF;
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
++ if (speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
+ reg |= ADVERTISE_10FULL;
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
++ if (speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
+ reg |= ADVERTISE_100HALF;
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
++ if (speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
+ reg |= ADVERTISE_100FULL;
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
-@@ -708,14 +1696,18 @@
+@@ -709,14 +1550,18 @@
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
mii_control_1000 &= ~ADVERTISE_1000HALF;
- if (phyinterface & PHY_RGMII)
+ if (phyinterface & PHY_RGMII &&
-+ (np->speed_duplex == NV_SPEED_DUPLEX_AUTO ||
-+ (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_ENABLE)))
++ (speed_duplex == NV_SPEED_DUPLEX_AUTO ||
++ (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_ENABLE)))
mii_control_1000 |= ADVERTISE_1000FULL;
- else
+ else {
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_DISABLE)
++ if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_DISABLE)
+ printk(KERN_INFO "%s: 1000mpbs full only allowed with autoneg\n", pci_name(np->pci_dev));
mii_control_1000 &= ~ADVERTISE_1000FULL;
-
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
-@@ -723,8 +1715,25 @@
+@@ -724,8 +1569,25 @@
else
np->gigabit = 0;
- /* reset the phy */
- if (phy_reset(dev)) {
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-+ if (np->autoneg == AUTONEG_DISABLE){
++ if (autoneg == AUTONEG_DISABLE){
+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_ENABLE | NV_PAUSEFRAME_TX_ENABLE);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
-@@ -732,14 +1741,14 @@
+@@ -733,14 +1595,14 @@
/* phy vendor specific configuration */
if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
-@@ -747,18 +1756,92 @@
+@@ -748,18 +1610,92 @@
}
if (np->phy_oui == PHY_OUI_CICADA) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
- mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
- if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
- return PHY_ERROR;
-+ if (np->autoneg == AUTONEG_ENABLE) {
++ if (autoneg == AUTONEG_ENABLE) {
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
}
return 0;
-@@ -766,18 +1849,24 @@
+@@ -767,18 +1703,23 @@
static void nv_start_rx(struct net_device *dev)
{
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 rx_ctrl = readl(base + NvRegReceiverControl);
-+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
-- dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
+ dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
/* Already running? Stop it. */
- if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
- writel(0, base + NvRegReceiverControl);
dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
dev->name, np->duplex, np->linkspeed);
pci_push(base);
-@@ -785,47 +1874,66 @@
+@@ -786,44 +1727,63 @@
static void nv_stop_rx(struct net_device *dev)
{
u8 __iomem *base = get_hwbase(dev);
+ u32 rx_ctrl = readl(base + NvRegReceiverControl);
-- dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
+ dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
- writel(0, base + NvRegReceiverControl);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ if (!np->mac_in_use)
+ rx_ctrl &= ~NVREG_RCVCTL_START;
+ else
u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl = readl(base + NvRegTransmitterControl);
-- dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
+ dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
- writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ tx_ctrl |= NVREG_XMITCTL_START;
+ if (np->mac_in_use)
+ tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl = readl(base + NvRegTransmitterControl);
-- dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
+ dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
- writel(0, base + NvRegTransmitterControl);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ if (!np->mac_in_use)
+ tx_ctrl &= ~NVREG_XMITCTL_START;
+ else
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-- dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
- writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
- pci_push(base);
- udelay(NV_TXRX_RESET_DELAY);
-@@ -833,140 +1941,301 @@
+ dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
+@@ -834,140 +1794,301 @@
pci_push(base);
}
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
+ writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
+ pci_push(base);
+ writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
return 1;
} else {
return 0;
-@@ -975,14 +2244,19 @@
+@@ -976,14 +2097,19 @@
static void nv_drain_tx(struct net_device *dev)
{
if (nv_release_txskb(dev, i))
np->stats.tx_dropped++;
}
-@@ -990,20 +2264,25 @@
+@@ -991,20 +2117,25 @@
static void nv_drain_rx(struct net_device *dev)
{
}
}
}
-@@ -1020,52 +2299,51 @@
+@@ -1021,52 +2152,51 @@
*/
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct ring_desc* prev_tx;
+ struct nv_skb_map* prev_tx_ctx;
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ //dprintk(KERN_DEBUG "%s: nv_start_xmit \n", dev->name);
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
} while(size);
/* setup the fragments */
-@@ -1075,68 +2353,174 @@
+@@ -1076,34 +2206,133 @@
offset = 0;
do {
+ prev_tx_ctx->skb = skb;
+
+#ifdef NETIF_F_TSO
-+#if NVVER > FEDORA5
-+ if (skb_shinfo(skb)->gso_size)
-+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
-+#else
+ if (skb_shinfo(skb)->tso_size)
+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
-+#endif
+ else
+#endif
+ tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+
+ u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ //dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized \n", dev->name);
+ /* add fragments to entries count */
+ for (i = 0; i < fragments; i++) {
+ entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
+ prev_tx_ctx->skb = skb;
#ifdef NETIF_F_TSO
-+#if NVVER > FEDORA5
-+ if (skb_shinfo(skb)->gso_size)
-+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
-+#else
if (skb_shinfo(skb)->tso_size)
- tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
-+#endif
- else
+@@ -1112,32 +2341,29 @@
#endif
tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
}
/*
-@@ -1144,30 +2528,26 @@
+@@ -1145,30 +2371,26 @@
*
* Caller must own np->lock.
*/
- while (np->nic_tx != np->next_tx) {
- i = np->nic_tx % TX_RING;
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ //dprintk(KERN_DEBUG "%s: nv_tx_done \n", dev->name);
+ while ((np->get_tx.orig != put_tx) &&
+ !((Flags = le32_to_cpu(np->get_tx.orig->FlagLen)) & NV_TX_VALID)) {
+ dprintk(KERN_DEBUG "%s: nv_tx_done:NVLAN tx done\n", dev->name);
if (Flags & NV_TX_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (Flags & NV_TX_CARRIERLOST)
-@@ -1175,14 +2555,15 @@
+@@ -1176,14 +2398,15 @@
np->stats.tx_errors++;
} else {
np->stats.tx_packets++;
if (Flags & NV_TX2_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (Flags & NV_TX2_CARRIERLOST)
-@@ -1190,15 +2571,58 @@
+@@ -1191,15 +2414,59 @@
np->stats.tx_errors++;
} else {
np->stats.tx_packets++;
+ struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
+ struct ring_desc_ex* put_tx = np->put_tx.ex;
+
++ //dprintk(KERN_DEBUG "%s: nv_tx_done_optimized \n", dev->name);
+ while ((np->get_tx.ex != put_tx) &&
+ !((Flags = le32_to_cpu(np->get_tx.ex->FlagLen)) & NV_TX_VALID) &&
+ (max_work-- > 0)) {
}
/*
-@@ -1207,20 +2631,34 @@
+@@ -1208,20 +2475,34 @@
*/
static void nv_tx_timeout(struct net_device *dev)
{
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 status;
-+
+
+- printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
+- readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
+ if (!netif_running(dev))
+ return;
+
+ status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+ else
+ status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
-
-- printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
-- readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
++
+ printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
{
printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
i,
readl(base + i + 0), readl(base + i + 4),
-@@ -1229,7 +2667,7 @@
+@@ -1230,7 +2511,7 @@
readl(base + i + 24), readl(base + i + 28));
}
printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
i,
-@@ -1260,29 +2698,36 @@
+@@ -1261,29 +2542,35 @@
}
}
nv_stop_tx(dev);
/* 2) check that the packets were not sent already: */
-- nv_tx_done(dev);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-+ nv_tx_done(dev);
+ nv_tx_done(dev);
+ else
+ nv_tx_done_optimized(dev, np->tx_ring_size);
+ np->get_tx.orig = np->put_tx.orig = np->first_tx.orig;
else
- writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
-- netif_wake_queue(dev);
+ np->get_tx.ex = np->put_tx.ex = np->first_tx.ex;
+ np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx;
+ setup_hw_rings(dev, NV_SETUP_TX_RING);
+ netif_wake_queue(dev);
}
-+ netif_wake_queue(dev);
/* 4) restart tx engine */
nv_start_tx(dev);
-+
spin_unlock_irq(&np->lock);
+ nv_enable_irq(dev);
}
/*
-@@ -1338,41 +2783,23 @@
+@@ -1339,41 +2626,23 @@
}
}
-
- if (Flags & NV_RX_AVAIL)
- break; /* still owned by hardware, */
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ //dprintk(KERN_DEBUG "%s: nv_rx_process \n", dev->name);
+ while((np->get_rx.orig != np->put_rx.orig) &&
+ !((Flags = le32_to_cpu(np->get_rx.orig->FlagLen)) & NV_RX_AVAIL)) {
+
{
int j;
-@@ -1380,112 +2807,197 @@
+@@ -1381,112 +2650,198 @@
for (j=0; j<64; j++) {
if ((j%16) == 0)
dprintk("\n%03x:", j);
+ struct sk_buff *skb;
+ int len;
+
++// dprintk(KERN_DEBUG "%s: nv_rx_process_optimized \n", dev->name);
+ while((np->get_rx.ex != np->put_rx.ex) &&
+ !((Flags = le32_to_cpu(np->get_rx.ex->FlagLen)) & NV_RX2_AVAIL) &&
+ (rx_processed_cnt++ < max_work)) {
if (dev->mtu <= ETH_DATA_LEN)
np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
-@@ -1499,7 +3011,7 @@
+@@ -1500,7 +2855,7 @@
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
int old_mtu;
if (new_mtu < 64 || new_mtu > np->pkt_limit)
-@@ -1523,8 +3035,12 @@
+@@ -1524,7 +2879,7 @@
* guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter.
*/
- disable_irq(dev->irq);
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
spin_lock_bh(&dev->xmit_lock);
-+#endif
spin_lock(&np->lock);
/* stop engines */
- nv_stop_rx(dev);
-@@ -1534,22 +3050,15 @@
+@@ -1535,22 +2890,15 @@
nv_drain_rx(dev);
nv_drain_tx(dev);
/* reinit driver view of the rx queue */
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
-@@ -1559,8 +3068,12 @@
- nv_start_rx(dev);
+@@ -1561,7 +2909,7 @@
nv_start_tx(dev);
spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
spin_unlock_bh(&dev->xmit_lock);
- enable_irq(dev->irq);
-+#endif
+ nv_enable_irq(dev);
}
return 0;
}
-@@ -1571,11 +3084,11 @@
- u32 mac[2];
-
- mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
-- (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
-+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
- mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
--
- writel(mac[0], base + NvRegMacAddrA);
- writel(mac[1], base + NvRegMacAddrB);
-+
- }
-
- /*
-@@ -1584,17 +3097,22 @@
+@@ -1585,12 +2933,13 @@
*/
static int nv_set_mac_address(struct net_device *dev, void *addr)
{
if(!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_set_mac_address \n", dev->name);
/* synchronized against open : rtnl_lock() held by caller */
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
- if (netif_running(dev)) {
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
- spin_lock_bh(&dev->xmit_lock);
-+#endif
- spin_lock_irq(&np->lock);
-
- /* stop rx engine */
-@@ -1606,7 +3124,11 @@
- /* restart rx engine */
- nv_start_rx(dev);
- spin_unlock_irq(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
- spin_unlock_bh(&dev->xmit_lock);
-+#endif
- } else {
- nv_copy_mac_to_hw(dev);
- }
-@@ -1619,20 +3141,20 @@
+@@ -1620,20 +2969,20 @@
*/
static void nv_set_multicast(struct net_device *dev)
{
if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
u32 alwaysOff[2];
-@@ -1677,6 +3199,35 @@
+@@ -1678,6 +3027,35 @@
spin_unlock_irq(&np->lock);
}
/**
* nv_update_linkspeed: Setup the MAC according to the link partner
* @dev: Network device to be configured
-@@ -1690,14 +3241,16 @@
+@@ -1691,14 +3069,16 @@
*/
static int nv_update_linkspeed(struct net_device *dev)
{
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
-@@ -1714,7 +3267,7 @@
+@@ -1715,7 +3095,7 @@
goto set_speed;
}
dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
dev->name, np->fixed_mode);
if (np->fixed_mode & LPA_100FULL) {
-@@ -1743,10 +3296,14 @@
+@@ -1744,10 +3124,14 @@
goto set_speed;
}
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
-@@ -1758,27 +3315,22 @@
+@@ -1759,27 +3143,22 @@
}
}
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
-@@ -1815,12 +3367,71 @@
+@@ -1816,13 +3195,72 @@
phyreg |= PHY_1000;
writel(phyreg, base + NvRegPhyInterface);
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
+- return retval;
+ pause_flags = 0;
+ /* setup pause frame */
+ if (np->duplex != 0) {
+ }
+ nv_update_pause(dev, pause_flags);
+
- return retval;
++ return retval;
}
-@@ -1858,24 +3469,28 @@
+ static void nv_linkchange(struct net_device *dev)
+@@ -1859,7 +3297,7 @@
static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-- u32 events;
-+ u32 events,mask;
+ u32 events;
int i;
-
-- dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
+@@ -1867,16 +3305,19 @@
+ dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
for (i=0; ; i++) {
- events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
+ }
pci_push(base);
dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
-- if (!(events & np->irqmask))
-+ mask = readl(base + NvRegIrqMask);
-+ if (!(events & mask))
+ if (!(events & np->irqmask))
break;
- spin_lock(&np->lock);
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
-@@ -1907,11 +3522,16 @@
+@@ -1908,11 +3349,16 @@
if (i > max_interrupt_work) {
spin_lock(&np->lock);
/* disable interrupts on the nic */
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
spin_unlock(&np->lock);
break;
-@@ -1923,310 +3543,1950 @@
+@@ -1924,285 +3370,1749 @@
return IRQ_RETVAL(i);
}
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-+ u32 events,mask;
++ u32 events;
+ int i = 1;
- disable_irq(dev->irq);
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+ }
++ if (events & np->irqmask) {
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void nv_poll_controller(struct net_device *dev)
- nv_do_nic_poll((unsigned long) dev);
-}
-#endif
-+ mask = readl(base + NvRegIrqMask);
-+ if (events & mask) {
++ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
-static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "forcedeth");
- strcpy(info->version, FORCEDETH_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
-+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
-+
+ if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
+ if (unlikely(nv_alloc_rx_optimized(dev))) {
+ spin_lock(&np->lock);
- if (np->wolenabled)
- wolinfo->wolopts = WAKE_MAGIC;
- spin_unlock_irq(&np->lock);
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
+
+ for (i=0; ; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
+ u32 events;
+ int i;
+
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
+
+ for (i=0; ; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
- switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
- case NVREG_LINKSPEED_10:
- ecmd->speed = SPEED_10;
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
+
+ for (i=0; ; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
- SUPPORTED_MII);
- if (np->gigabit == PHY_GIGABIT)
- ecmd->supported |= SUPPORTED_1000baseT_Full;
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
- ecmd->phy_address = np->phyaddr;
- ecmd->transceiver = XCVR_EXTERNAL;
- if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+- return -EINVAL;
+- } else {
+- return -EINVAL;
+- }
+ if (np->msi_flags & NV_MSI_X_CAPABLE) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ np->msi_x_entry[i].entry = i;
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
-+
+
+- spin_lock_irq(&np->lock);
+- if (ecmd->autoneg == AUTONEG_ENABLE) {
+- int adv, bmcr;
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ goto out_err;
+ }
-+
+
+- np->autoneg = 1;
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIMap0);
+ writel(0, base + NvRegMSIMap1);
+ np->recover_error = 0;
+ printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
+ if (netif_running(dev)) {
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ }
+ }
+ /* FIXME: Do we need synchronize_irq(dev->irq) here? */
+ spin_lock_irq(&np->lock);
+
+ np->estats.tx_dropped = np->stats.tx_dropped;
-+ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
++ if (np->driver_data & DEV_HAS_STATISTICS) {
++ np->estats.tx_packets += readl(base + NvRegTxFrame);
+ np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+ np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
+ np->estats.tx_bytes += readl(base + NvRegTxCnt);
++ np->estats.rx_bytes += readl(base + NvRegRxCnt);
+ np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+ np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
++
+ np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+ np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+ np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+ np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+ np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+ np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
++ np->estats.tx_deferral += readl(base + NvRegTxDef);
++ np->estats.tx_pause += readl(base + NvRegTxPause);
++ np->estats.rx_pause += readl(base + NvRegRxPause);
++ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+ np->estats.rx_packets =
+ np->estats.rx_unicast +
+ np->estats.rx_multicast +
+ np->estats.rx_frame_too_long +
+ np->rx_len_errors;
+
-+ if (np->driver_data & DEV_HAS_STATISTICS_V2) {
-+ np->estats.tx_deferral += readl(base + NvRegTxDef);
-+ np->estats.tx_packets += readl(base + NvRegTxFrame);
-+ np->estats.rx_bytes += readl(base + NvRegRxCnt);
-+ np->estats.tx_pause += readl(base + NvRegTxPause);
-+ np->estats.rx_pause += readl(base + NvRegRxPause);
-+ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
-+ }
-+
+ /* copy to net_device stats */
++ np->stats.tx_packets = np->estats.tx_packets;
+ np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
+ np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
+ np->stats.tx_bytes = np->estats.tx_bytes;
++ np->stats.rx_bytes = np->estats.rx_bytes;
+ np->stats.rx_crc_errors = np->estats.rx_crc_errors;
+ np->stats.rx_over_errors = np->estats.rx_over_errors;
+ np->stats.rx_packets = np->estats.rx_packets;
+ mask |= ADVERTISED_1000baseT_Full;
+
+ if ((ecmd->advertising & mask) == 0)
- return -EINVAL;
++ return -EINVAL;
+
+ } else if (ecmd->autoneg == AUTONEG_DISABLE) {
+ /* Note: autonegotiation disable, speed 1000 intentionally
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ }
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ /* advertise only what has been requested */
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-+ if (ecmd->advertising & ADVERTISED_10baseT_Half) {
++ if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
-+ np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
-+ }
-+ if (ecmd->advertising & ADVERTISED_10baseT_Full) {
++ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
-+ }
-+ if (ecmd->advertising & ADVERTISED_100baseT_Half) {
++ if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
-+ np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
-+ }
-+ if (ecmd->advertising & ADVERTISED_100baseT_Full) {
++ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
-+ }
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ adv &= ~ADVERTISE_1000FULL;
-+ if (ecmd->advertising & ADVERTISED_1000baseT_Full) {
++ if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ adv |= ADVERTISE_1000FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_1000_FULL_DUPLEX;
-+ }
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
-+
-+ if (ecmd->advertising & (ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full|ADVERTISED_1000baseT_Full))
-+ np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
-+ } else {
-+ if (ecmd->advertising & (ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full))
-+ np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
-+ }
++ }
+
+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-+ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) {
++ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+ adv |= ADVERTISE_10HALF;
-+ np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
-+ }
-+ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) {
++ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+ adv |= ADVERTISE_10FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
-+ }
-+ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) {
++ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+ adv |= ADVERTISE_100HALF;
-+ np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
-+ }
-+ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) {
++ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+ adv |= ADVERTISE_100FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
-+ }
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+ }
+
+ u8 __iomem *base = get_hwbase(dev);
+ u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
+ dma_addr_t ring_addr;
-+
+
+- /* advertise only what has been requested */
+- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+- if (ecmd->advertising & ADVERTISED_10baseT_Half)
+- adv |= ADVERTISE_10HALF;
+- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+- adv |= ADVERTISE_10FULL;
+- if (ecmd->advertising & ADVERTISED_100baseT_Half)
+- adv |= ADVERTISE_100HALF;
+- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+- adv |= ADVERTISE_100FULL;
+- mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+ if (ring->rx_pending < RX_RING_MIN ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_mini_pending != 0 ||
+ ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
+ return -EINVAL;
+ }
-+
+
+- if (np->gigabit == PHY_GIGABIT) {
+- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+- adv &= ~ADVERTISE_1000FULL;
+- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+- adv |= ADVERTISE_1000FULL;
+- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ /* allocate new rings */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ rxtx_ring = pci_alloc_consistent(np->pci_dev,
+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-+ }
+ }
+
+ /* reinit nic view of the queues */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ nv_enable_irq(dev);
+ }
+ return 0;
+exit:
+ return -ENOMEM;
+}
-+
+
+static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ }
+
+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
+
+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
-+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ } else {
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (pause->rx_pause)
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int retcode = 0;
-+
+
+ if (np->driver_data & DEV_HAS_CHECKSUM) {
+
+ if (data) {
+ spin_unlock_irq(&np->lock);
+ }
} else {
- return -EINVAL;
- }
+- int adv, bmcr;
++ return -EINVAL;
++ }
+- np->autoneg = 0;
+ return retcode;
+}
-+
-+#ifdef NETIF_F_TSO
-+static int nv_set_tso(struct net_device *dev, u32 data)
-+{
-+ struct fe_priv *np = get_nvpriv(dev);
+
+- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+- adv |= ADVERTISE_10HALF;
+- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+- adv |= ADVERTISE_10FULL;
+- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+- adv |= ADVERTISE_100HALF;
+- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+- adv |= ADVERTISE_100FULL;
+- mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+- np->fixed_mode = adv;
++#ifdef NETIF_F_TSO
++static int nv_set_tso(struct net_device *dev, u32 data)
++{
++ struct fe_priv *np = get_nvpriv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM){
+#if NVVER < SUSE10
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
-+ if (np->driver_data & DEV_HAS_STATISTICS_V1)
-+ return NV_DEV_STATISTICS_V1_COUNT;
-+ else if (np->driver_data & DEV_HAS_STATISTICS_V2)
-+ return NV_DEV_STATISTICS_V2_COUNT;
-+ else
-+ return NV_DEV_STATISTICS_SW_COUNT;
++ if (np->driver_data & DEV_HAS_STATISTICS)
++ return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
++ else
++ return NV_STATS_COUNT_SW;
+}
+
+static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+ /* wait for at least one interrupt */
+ nv_msleep(100);
+
- spin_lock_irq(&np->lock);
-- if (ecmd->autoneg == AUTONEG_ENABLE) {
-- int adv, bmcr;
-
-- np->autoneg = 1;
++ spin_lock_irq(&np->lock);
++
+ /* flag should be set within ISR */
+ testcnt = np->intr_test;
+ if (!testcnt)
+ ret = 2;
-
-- /* advertise only what has been requested */
-- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-- if (ecmd->advertising & ADVERTISED_10baseT_Half)
-- adv |= ADVERTISE_10HALF;
-- if (ecmd->advertising & ADVERTISED_10baseT_Full)
-- adv |= ADVERTISE_10FULL;
-- if (ecmd->advertising & ADVERTISED_100baseT_Half)
-- adv |= ADVERTISE_100HALF;
-- if (ecmd->advertising & ADVERTISED_100baseT_Full)
-- adv |= ADVERTISE_100FULL;
-- mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
++
+ nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ else
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
-
-- if (np->gigabit == PHY_GIGABIT) {
-- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
-- adv &= ~ADVERTISE_1000FULL;
-- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
-- adv |= ADVERTISE_1000FULL;
-- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
-- }
++
+ spin_unlock_irq(&np->lock);
-
-- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
++
+ nv_free_irq(dev);
+
+ np->msi_flags = save_msi_flags;
+ if (nv_request_irq(dev, 0))
+ return 0;
+ }
-
++
+ return ret;
+}
+
+ u32 misc1_flags = 0;
+ int ret = 1;
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
-+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ filter_flags = readl(base + NvRegPacketFilterFlags);
+ misc1_flags = readl(base + NvRegMisc1);
- } else {
-- int adv, bmcr;
++ } else {
+ nv_txrx_reset(dev);
+ }
-
-- np->autoneg = 0;
++
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ nv_init_ring(dev);
-
-- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
-- adv |= ADVERTISE_10HALF;
-- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
-- adv |= ADVERTISE_10FULL;
-- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
-- adv |= ADVERTISE_100HALF;
-- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
-- adv |= ADVERTISE_100FULL;
-- mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
-- np->fixed_mode = adv;
++
+ /* setup hardware for loopback */
+ writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
+ writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
-
-- if (np->gigabit == PHY_GIGABIT) {
-- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
-- adv &= ~ADVERTISE_1000FULL;
-- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
-- }
++
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
-
-- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-- bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
-- if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
-- bmcr |= BMCR_FULLDPLX;
-- if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
-- bmcr |= BMCR_SPEED100;
-- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
++
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
-- if (netif_running(dev)) {
-- /* Wait a bit and then reconfigure the nic. */
-- udelay(10);
-- nv_linkchange(dev);
+- if (np->gigabit == PHY_GIGABIT) {
+- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+- adv &= ~ADVERTISE_1000FULL;
+- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+- }
+ /* setup packet for tx */
+ pkt_len = ETH_DATA_LEN;
+ tx_skb = dev_alloc_skb(pkt_len);
+ pkt_data[i] = (u8)(i & 0xff);
+ test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
-+
+
+- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+- bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
+- if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+- bmcr |= BMCR_FULLDPLX;
+- if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+- bmcr |= BMCR_SPEED100;
+- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
+ np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ }
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(get_hwbase(dev));
-+
+
+- if (netif_running(dev)) {
+- /* Wait a bit and then reconfigure the nic. */
+- udelay(10);
+- nv_linkchange(dev);
+ nv_msleep(500);
+
+ /* check for rx of the packet */
- spin_unlock_irq(&np->lock);
- return 0;
+-}
+ if (ret) {
+ if (len != pkt_len) {
+ ret = 0;
+ } else {
+ dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
+ }
-+
+
+-#define FORCEDETH_REGS_VER 1
+-#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
+ pci_unmap_page(np->pci_dev, test_dma_addr,
+ tx_skb->end-tx_skb->data,
+ PCI_DMA_TODEVICE);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
-+
+
+-static int nv_get_regs_len(struct net_device *dev)
+-{
+- return FORCEDETH_REGS_SIZE;
+ if (netif_running(dev)) {
+ writel(misc1_flags, base + NvRegMisc1);
+ writel(filter_flags, base + NvRegPacketFilterFlags);
+ return ret;
}
--#define FORCEDETH_REGS_VER 1
--#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
+-static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
-+{
+ {
+- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
-+ u8 __iomem *base = get_hwbase(dev);
+ u8 __iomem *base = get_hwbase(dev);
+- u32 *rbuf = buf;
+- int i;
+ int result;
+ memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
--static int nv_get_regs_len(struct net_device *dev)
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
-+
+- regs->version = FORCEDETH_REGS_VER;
+- spin_lock_irq(&np->lock);
+- for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
+- rbuf[i] = readl(base + i*sizeof(u32));
+- spin_unlock_irq(&np->lock);
+-}
+ if (!nv_link_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[0] = 1;
+ }
-+
+
+-static int nv_nway_reset(struct net_device *dev)
+-{
+- struct fe_priv *np = netdev_priv(dev);
+- int ret;
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock_irq(&np->lock);
+ nv_disable_hw_interrupts(dev, np->irqmask);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ spin_unlock_irq(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ }
-+
+
+- spin_lock_irq(&np->lock);
+- if (np->autoneg) {
+- int bmcr;
+ if (!nv_register_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[1] = 1;
+ }
-+
+
+- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ result = nv_interrupt_test(dev);
+ if (result != 1) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ /* bail out */
+ return;
+ }
-+
+
+- ret = 0;
+- } else {
+- ret = -EINVAL;
+ if (!nv_loopback_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[3] = 1;
+ netif_start_queue(dev);
+ nv_enable_hw_interrupts(dev, np->irqmask);
+ }
-+ }
+ }
+- spin_unlock_irq(&np->lock);
+}
-+
+
+- return ret;
+static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
- {
-- return FORCEDETH_REGS_SIZE;
++{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
+ }
}
--static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
-+static struct ethtool_ops ops = {
-+ .get_drvinfo = nv_get_drvinfo,
-+ .get_link = ethtool_op_get_link,
-+ .get_wol = nv_get_wol,
-+ .set_wol = nv_set_wol,
-+ .get_settings = nv_get_settings,
-+ .set_settings = nv_set_settings,
-+ .get_regs_len = nv_get_regs_len,
-+ .get_regs = nv_get_regs,
-+ .nway_reset = nv_nway_reset,
+ static struct ethtool_ops ops = {
+@@ -2215,68 +5125,175 @@
+ .get_regs_len = nv_get_regs_len,
+ .get_regs = nv_get_regs,
+ .nway_reset = nv_nway_reset,
+#if NVVER > SUSE10
-+ .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
+ .get_ringparam = nv_get_ringparam,
+ .set_ringparam = nv_set_ringparam,
+};
+
+static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
- {
-- struct fe_priv *np = netdev_priv(dev);
-- u8 __iomem *base = get_hwbase(dev);
-- u32 *rbuf = buf;
-- int i;
++{
+ struct fe_priv *np = get_nvpriv(dev);
-
-- regs->version = FORCEDETH_REGS_VER;
- spin_lock_irq(&np->lock);
-- for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
-- rbuf[i] = readl(base + i*sizeof(u32));
++
++ spin_lock_irq(&np->lock);
+
+ /* save vlan group */
+ np->vlangrp = grp;
+ if (grp) {
+ /* enable vlan on MAC */
+ np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
-+ /* vlan is dependent on rx checksum */
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ } else {
+ /* disable vlan on MAC */
+
+ writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+
- spin_unlock_irq(&np->lock);
--}
++ spin_unlock_irq(&np->lock);
+};
-
--static int nv_nway_reset(struct net_device *dev)
++
+static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ /* nothing to do */
-+};
-+
+ };
+
+/* The mgmt unit and driver use a semaphore to access the phy during init */
+static int nv_mgmt_acquire_sema(struct net_device *dev)
- {
-- struct fe_priv *np = netdev_priv(dev);
-- int ret;
++{
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 tx_ctrl, mgmt_sema;
-
-- spin_lock_irq(&np->lock);
-- if (np->autoneg) {
-- int bmcr;
++
+ for (i = 0; i < 10; i++) {
+ mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
+ if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) {
+ }
+ nv_msleep(500);
+ }
-
-- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
++
+ if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) {
+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is not free\n");
+ return 0;
+ }
-
-- ret = 0;
-- } else {
-- ret = -EINVAL;
++
+ for (i = 0; i < 2; i++) {
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
+ return 1;
+ } else
+ udelay(50);
- }
-- spin_unlock_irq(&np->lock);
-
-- return ret;
++ }
++
+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: exit\n");
+ return 0;
- }
-
--static struct ethtool_ops ops = {
-- .get_drvinfo = nv_get_drvinfo,
-- .get_link = ethtool_op_get_link,
-- .get_wol = nv_get_wol,
-- .set_wol = nv_set_wol,
-- .get_settings = nv_get_settings,
-- .set_settings = nv_set_settings,
-- .get_regs_len = nv_get_regs_len,
-- .get_regs = nv_get_regs,
-- .nway_reset = nv_nway_reset,
-- .get_perm_addr = ethtool_op_get_perm_addr,
--};
--
++}
++
++/* Indicate to mgmt unit whether driver is loaded or not */
++static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded)
++{
++ u8 __iomem *base = get_hwbase(dev);
++ u32 tx_ctrl;
++
++ tx_ctrl = readl(base + NvRegTransmitterControl);
++ if (loaded)
++ tx_ctrl |= NVREG_XMITCTL_HOST_LOADED;
++ else
++ tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED;
++ writel(tx_ctrl, base + NvRegTransmitterControl);
++}
++
static int nv_open(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
writel(0, base + NvRegMulticastMaskA);
-@@ -2238,44 +5498,44 @@
+ writel(0, base + NvRegMulticastMaskB);
+ writel(0, base + NvRegPacketFilterFlags);
+
+- writel(0, base + NvRegTransmitterControl);
+- writel(0, base + NvRegReceiverControl);
++ nv_stop_tx(dev);
++ nv_stop_rx(dev);
writel(0, base + NvRegAdapterControl);
set_bufsize(dev);
oom = nv_init_ring(dev);
- writel(0, base + NvRegLinkSpeed);
+- writel(0, base + NvRegLinkSpeed);
- writel(0, base + NvRegUnknownTransmitterReg);
-+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
nv_txrx_reset(dev);
writel(0, base + NvRegUnknownSetupReg6);
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
-@@ -2284,8 +5544,8 @@
+@@ -2285,8 +5302,8 @@
writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
get_random_bytes(&i, sizeof(i));
writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
if (poll_interval == -1) {
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
-@@ -2298,8 +5558,9 @@
+@@ -2299,8 +5316,9 @@
writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
base + NvRegAdapterControl);
writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
i = readl(base + NvRegPowerState);
if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
-@@ -2309,18 +5570,18 @@
+@@ -2310,18 +5328,18 @@
udelay(10);
writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
-@@ -2347,11 +5608,15 @@
+@@ -2348,11 +5366,15 @@
if (ret) {
netif_carrier_on(dev);
} else {
spin_unlock_irq(&np->lock);
return 0;
-@@ -2362,16 +5627,23 @@
+@@ -2363,16 +5385,23 @@
static int nv_close(struct net_device *dev)
{
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
-@@ -2381,25 +5653,19 @@
+@@ -2382,25 +5411,19 @@
/* disable interrupts on the nic or we will lock up */
base = get_hwbase(dev);
/* FIXME: power down nic */
return 0;
-@@ -2412,13 +5678,18 @@
+@@ -2413,13 +5436,19 @@
unsigned long addr;
u8 __iomem *base;
int err, i;
+ u32 powerstate, phystate_orig = 0, phystate, txreg;
+ int phyinitialized = 0;
++ //NVLAN_DISABLE_ALL_FEATURES ;
+ /* modify network device class id */
+ quirk_nforce_network_class(pci_dev);
dev = alloc_etherdev(sizeof(struct fe_priv));
goto out;
- np = netdev_priv(dev);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s:nv_probe: begin\n",dev->name);
+ np = get_nvpriv(dev);
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
SET_MODULE_OWNER(dev);
-@@ -2430,6 +5701,9 @@
+@@ -2431,6 +5460,9 @@
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
err = pci_enable_device(pci_dev);
if (err) {
-@@ -2444,15 +5718,22 @@
+@@ -2445,6 +5477,11 @@
if (err < 0)
goto out_disable;
-+ if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
-+ np->register_size = NV_PCI_REGSZ_VER3;
-+ else if (id->driver_data & DEV_HAS_STATISTICS_V1)
++ if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
+ np->register_size = NV_PCI_REGSZ_VER2;
+ else
+ np->register_size = NV_PCI_REGSZ_VER1;
err = -EINVAL;
addr = 0;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
- pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
-- pci_resource_len(pci_dev, i),
-- pci_resource_flags(pci_dev, i));
-+ (long)pci_resource_len(pci_dev, i),
-+ (long)pci_resource_flags(pci_dev, i));
+@@ -2453,7 +5490,7 @@
+ pci_resource_len(pci_dev, i),
+ pci_resource_flags(pci_dev, i));
if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
- pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
+ pci_resource_len(pci_dev, i) >= np->register_size) {
addr = pci_resource_start(pci_dev, i);
break;
}
-@@ -2463,17 +5744,29 @@
+@@ -2464,17 +5501,29 @@
goto out_relreg;
}
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2;
-@@ -2487,49 +5780,153 @@
+@@ -2488,49 +5537,149 @@
np->pkt_limit = NV_PKTLIMIT_1;
if (id->driver_data & DEV_HAS_LARGEDESC)
np->pkt_limit = NV_PKTLIMIT_2;
+ printk(KERN_INFO "forcedeth: speed_duplex of 1000 full can not enabled if autoneg is disabled\n");
+ goto out_relreg;
+ }
-+
-+ /* save phy config */
-+ np->autoneg = autoneg;
-+ np->speed_duplex = speed_duplex;
+
err = -ENOMEM;
- np->base = ioremap(addr, NV_PCI_REGSZ);
SET_ETHTOOL_OPS(dev, &ops);
dev->tx_timeout = nv_tx_timeout;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
-@@ -2541,15 +5938,37 @@
+@@ -2542,15 +5691,36 @@
np->orig_mac[0] = readl(base + NvRegMacAddrA);
np->orig_mac[1] = readl(base + NvRegMacAddrB);
-- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
-- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
-- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
-- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
-- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
-- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* check the workaround bit for correct mac address order */
+ txreg = readl(base + NvRegTransmitPoll);
-+ if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
-+ (id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
++ if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
+ /* mac address is already in correct order */
+ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ } else {
-+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
-+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
-+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
-+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
-+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
-+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* set permanent address to be correct aswell */
+ np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
-@@ -2568,20 +5987,41 @@
+@@ -2569,22 +5739,43 @@
dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ np->msi_flags |= 0x0001;
+ }
- if (id->driver_data & DEV_NEED_TIMERIRQ)
+- if (id->driver_data & DEV_NEED_TIMERIRQ_ORIG)
++ if (id->driver_data & DEV_NEED_TIMERIRQ)
np->irqmask |= NVREG_IRQ_TIMER;
-@@ -2594,6 +6034,41 @@
+ if (id->driver_data & DEV_NEED_LINKTIMER) {
+ dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
+@@ -2595,6 +5786,59 @@
np->need_linktimer = 0;
}
+ writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+
+ if (id->driver_data & DEV_HAS_MGMT_UNIT) {
++ writel(NV_UNKNOWN_VAL, base + NvRegPatternCRC);
++ pci_push(base);
++ nv_msleep(500);
+ /* management unit running on the mac? */
-+ if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
-+ np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
-+ dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
-+ for (i = 0; i < 5000; i++) {
-+ nv_msleep(1);
-+ if (nv_mgmt_acquire_sema(dev)) {
-+ /* management unit setup the phy already? */
-+ if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
-+ NVREG_XMITCTL_SYNC_PHY_INIT) {
-+ if(np->mac_in_use){
-+ /* phy is inited by mgmt unit */
-+ phyinitialized = 1;
-+ dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
-+ }
-+ } else {
-+ /* we need to init the phy */
++ np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
++ if (np->mac_in_use) {
++ u32 mgmt_sync;
++ dprintk(KERN_DEBUG "%s: probe: mac in use\n",dev->name);
++ /* management unit setup the phy already? */
++ mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
++ if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) {
++ dprintk(KERN_DEBUG"%s : probe: sync not ready\n",dev->name);
++ if (!nv_mgmt_acquire_sema(dev)) {
++ dprintk(KERN_DEBUG"%s: probe: could not acquire sema\n",dev->name);
++ for (i = 0; i < 5000; i++) {
++ nv_msleep(1);
++ mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
++ if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY)
++ continue;
++ if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
++ dprintk(KERN_DEBUG"%s: probe: phy inited by SMU 1\n",dev->name);
++ phyinitialized = 1;
+ }
+ break;
++ dprintk(KERN_DEBUG"%s: probe: breaking out of loop\n",dev->name);
+ }
++ } else {
++ /* we need to init the phy */
++ dprintk(KERN_DEBUG"%s: probe: we need to init phy 1\n",dev->name);
+ }
++ } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
++ dprintk(KERN_DEBUG"%s: probe: phy inited by SMU 2\n",dev->name);
++ /* phy is inited by SMU */
++ phyinitialized = 1;
++ } else {
++ /* we need to init the phy */
++ dprintk(KERN_DEBUG"%s: probe: we need to init phy 2\n",dev->name);
+ }
++ } else
++ dprintk(KERN_DEBUG"%s: probe: mac not in use\n",dev->name);
+ }
+
/* find a suitable phy */
for (i = 1; i <= 32; i++) {
int id1, id2;
-@@ -2610,32 +6085,45 @@
+@@ -2611,6 +5855,7 @@
if (id2 < 0 || id2 == 0xffff)
continue;
id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
-- pci_name(pci_dev), id1, id2, phyaddr);
-+ pci_name(pci_dev), id1, id2, phyaddr);
- np->phyaddr = phyaddr;
- np->phy_oui = id1 | id2;
- break;
- }
+@@ -2622,21 +5867,32 @@
if (i == 33) {
printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
-- pci_name(pci_dev));
+ pci_name(pci_dev));
- goto out_freering;
-+ pci_name(pci_dev));
+ goto out_error;
}
-- /* reset it */
-- phy_init(dev);
+ if (!phyinitialized) {
-+ /* reset it */
-+ phy_init(dev);
+ /* reset it */
+ phy_init(dev);
+ } else {
-+ /* see if it is a gigabit phy */
++ /* see if gigabit phy */
+ u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ }
+ }
-+
-+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E && np->pci_dev->subsystem_device==0x6676 ) {
-+ nv_LED_on(dev);
++ if (id->driver_data & DEV_HAS_MGMT_UNIT) {
++ nv_mgmt_driver_loaded(dev, 1);
+ }
/* set default link speed settings */
}
printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
-@@ -2643,14 +6131,12 @@
+@@ -2644,14 +5900,14 @@
return 0;
+out_error:
+ if (phystate_orig)
+ writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
++ if (np->mac_in_use)
++ nv_mgmt_driver_loaded(dev, 0);
pci_set_drvdata(pci_dev, NULL);
+out_freering:
+ free_rings(dev);
out_unmap:
iounmap(get_hwbase(dev));
out_relreg:
-@@ -2663,18 +6149,27 @@
- return err;
- }
-
-+#ifdef CONFIG_PM
-+static void nv_set_low_speed(struct net_device *dev);
-+#endif
+@@ -2667,15 +5923,20 @@
static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
-+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E && np->pci_dev->subsystem_device==0x6676) {
-+ nv_LED_off(dev);
-+ }
unregister_netdev(dev);
+ /* special op: write back the misordered MAC address - otherwise
+ * the next nv_probe would see a wrong address.
+ */
-+ writel(np->orig_mac[0], base + NvRegMacAddrA);
-+ writel(np->orig_mac[1], base + NvRegMacAddrB);
++ writel(np->orig_mac[0], base + NvRegMacAddrA);
++ writel(np->orig_mac[1], base + NvRegMacAddrB);
++ if (np->mac_in_use)
++ nv_mgmt_driver_loaded(dev, 0);
/* free all structures */
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
iounmap(get_hwbase(dev));
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
-@@ -2713,65 +6208,471 @@
- },
- { /* CK804 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
-- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
- },
- { /* CK804 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
-- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
- },
- { /* MCP04 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
-- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
- },
- { /* MCP04 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
-- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
+@@ -2730,19 +5991,51 @@
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP67 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP67 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP67 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP67 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP73 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP73 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP73 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP73 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP77 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
-+ },
-+ { /* MCP77 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
-+ },
-+ { /* MCP77 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
-+ },
-+ { /* MCP77 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{0,},
};
-
--static struct pci_driver driver = {
-+#ifdef CONFIG_PM
-+static void nv_set_low_speed(struct net_device *dev)
-+{
-+ struct fe_priv *np = get_nvpriv(dev);
-+ int adv = 0;
-+ int lpa = 0;
-+ int adv_lpa, bmcr, tries = 0;
-+ int mii_status;
-+ u32 control_1000;
-+
-+ if (np->autoneg == 0 || ((np->linkspeed & 0xFFF) != NVREG_LINKSPEED_1000))
-+ return;
-+
-+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-+ lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
-+ control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
-+
-+ adv_lpa = lpa & adv;
-+
-+ if ((adv_lpa & LPA_10FULL) || (adv_lpa & LPA_10HALF)) {
-+ adv &= ~(ADVERTISE_100BASE4 | ADVERTISE_100FULL | ADVERTISE_100HALF);
-+ control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
-+ printk(KERN_INFO "forcedeth %s: set low speed to 10mbs\n",dev->name);
-+ } else if ((adv_lpa & LPA_100FULL) || (adv_lpa & LPA_100HALF)) {
-+ control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
-+ } else
-+ return;
-+
-+ /* set new advertisements */
-+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
-+ mii_rw(dev, np->phyaddr, MII_CTRL1000, control_1000);
-+
-+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
-+ bmcr |= BMCR_ANENABLE;
-+ /* reset the phy in order for settings to stick,
-+ * and cause autoneg to start */
-+ if (phy_reset(dev, bmcr)) {
-+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
-+ return;
-+ }
-+ } else {
-+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
-+ }
-+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
-+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
-+ while (!(mii_status & BMSR_ANEGCOMPLETE)) {
-+ nv_msleep(100);
-+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
-+ if (tries++ > 50)
-+ break;
-+ }
-+
-+ nv_update_linkspeed(dev);
-+
-+ return;
-+}
-+
-+static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
-+{
-+ struct net_device *dev = pci_get_drvdata(pdev);
-+ struct fe_priv *np = get_nvpriv(dev);
-+ u8 __iomem *base = get_hwbase(dev);
-+ int i;
-+
-+ dprintk(KERN_INFO "forcedeth: nv_suspend\n");
-+
-+ /* save msix table */
-+ {
-+ unsigned long phys_addr;
-+ void __iomem *base_addr;
-+ void __iomem *base;
-+ unsigned int bir,len;
-+ unsigned int i;
-+ int pos;
-+ u32 table_offset;
-+
-+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-+ pci_read_config_dword(pdev, pos+0x04 , &table_offset);
-+ bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
-+ table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
-+ phys_addr = pci_resource_start(pdev, bir) + table_offset;
-+ np->msix_pa_addr = phys_addr;
-+ len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
-+ base_addr = ioremap_nocache(phys_addr, len);
-+
-+ for(i=0;i<NV_MSI_X_MAX_VECTORS;i++){
-+ base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
-+ np->nvmsg[i].address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
-+ np->nvmsg[i].address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET );
-+ np->nvmsg[i].data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
-+ }
-+
-+ iounmap(base_addr);
-+ }
-+
-+ nv_update_linkspeed(dev);
-+
-+ if (netif_running(dev)) {
-+ netif_device_detach(dev);
-+ /* bring down the adapter */
-+ nv_close(dev);
-+ }
-+
-+ /* set phy to a lower speed to conserve power */
-+ if((lowpowerspeed==NV_LOW_POWER_ENABLED)&&!np->mac_in_use)
-+ nv_set_low_speed(dev);
-+
-+#if NVVER > RHES4
-+ pci_save_state(pdev);
-+#else
-+ pci_save_state(pdev,np->pci_state);
-+#endif
-+ np->saved_nvregphyinterface= readl(base+NvRegPhyInterface);
-+ for(i=0;i<64;i++){
-+ pci_read_config_dword(pdev,i*4,&np->saved_config_space[i]);
-+ }
-+#if NVVER > RHES4
-+ pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
-+#else
-+ pci_enable_wake(pdev, state, np->wolenabled);
-+#endif
-+ pci_disable_device(pdev);
-+
-+#if NVVER > RHES4
-+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
-+#else
-+ pci_set_power_state(pdev, state);
-+#endif
-+
-+ return 0;
-+}
-+
-+static int nv_resume(struct pci_dev *pdev)
-+{
-+ struct net_device *dev = pci_get_drvdata(pdev);
-+ int rc = 0;
-+ struct fe_priv *np = get_nvpriv(dev);
-+ u8 __iomem *base = get_hwbase(dev);
-+ int i;
-+ u32 txreg;
-+
-+ dprintk(KERN_INFO "forcedeth: nv_resume\n");
-+
-+ pci_set_power_state(pdev, PCI_D0);
-+#if NVVER > RHES4
-+ pci_restore_state(pdev);
-+#else
-+ pci_restore_state(pdev,np->pci_state);
-+#endif
-+ for(i=0;i<64;i++){
-+ pci_write_config_dword(pdev,i*4,np->saved_config_space[i]);
-+ }
-+ pci_enable_device(pdev);
-+ pci_set_master(pdev);
-+
-+ txreg = readl(base + NvRegTransmitPoll);
-+ txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV;
-+ writel(txreg, base + NvRegTransmitPoll);
-+ writel(np->saved_nvregphyinterface,base+NvRegPhyInterface);
-+ writel(np->orig_mac[0], base + NvRegMacAddrA);
-+ writel(np->orig_mac[1], base + NvRegMacAddrB);
-+
-+ /* restore msix table */
-+ {
-+ unsigned long phys_addr;
-+ void __iomem *base_addr;
-+ void __iomem *base;
-+ unsigned int len;
-+ unsigned int i;
-+
-+ len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
-+ phys_addr = np->msix_pa_addr;
-+ base_addr = ioremap_nocache(phys_addr, len);
-+ for(i=0;i< NV_MSI_X_MAX_VECTORS;i++){
-+ base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
-+ writel(np->nvmsg[i].address_lo,base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
-+ writel(np->nvmsg[i].address_hi,base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
-+ writel(np->nvmsg[i].data,base + PCI_MSIX_ENTRY_DATA_OFFSET);
-+ }
-+
-+ iounmap(base_addr);
-+ }
-+
-+ if(lowpowerspeed==NV_LOW_POWER_ENABLED){
-+ /* re-initialize the phy */
-+ phy_init(dev);
-+ udelay(10);
-+ }
-+ /* bring up the adapter */
-+ if (netif_running(dev)){
-+ rc = nv_open(dev);
-+ }
-+ netif_device_attach(dev);
-+
-+ return rc;
-+}
-+
-+#endif /* CONFIG_PM */
-+static struct pci_driver nv_eth_driver = {
- .name = "forcedeth",
- .id_table = pci_tbl,
- .probe = nv_probe,
- .remove = __devexit_p(nv_remove),
-+#ifdef CONFIG_PM
-+ .suspend = nv_suspend,
-+ .resume = nv_resume,
-+#endif
- };
-
-+#ifdef CONFIG_PM
-+static int nv_reboot_handler(struct notifier_block *nb, unsigned long event, void *p)
-+{
-+ struct pci_dev *pdev = NULL;
-+ pm_message_t state = { PM_EVENT_SUSPEND };
-+
-+ switch (event)
-+ {
-+ case SYS_POWER_OFF:
-+ case SYS_HALT:
-+ case SYS_DOWN:
-+ while ((pdev = pci_find_device(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, pdev)) != NULL) {
-+ if (pci_dev_driver(pdev) == &nv_eth_driver) {
-+ nv_suspend(pdev, state);
-+ }
-+ }
-+ }
-+
-+ return NOTIFY_DONE;
-+}
-+
-+/*
-+ * Reboot notification
-+ */
-+struct notifier_block nv_reboot_notifier =
-+{
-+ notifier_call : nv_reboot_handler,
-+ next : NULL,
-+ priority : 0
-+};
-+#endif
-
+@@ -2758,6 +6051,7 @@
static int __init init_nic(void)
{
-+ int status;
printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
-- return pci_module_init(&driver);
-+ DPRINTK(DRV,KERN_DEBUG,"forcedeth:%s\n",DRV_DATE);
-+ status = pci_module_init(&nv_eth_driver);
-+#ifdef CONFIG_PM
-+ if (status >= 0)
-+ register_reboot_notifier(&nv_reboot_notifier);
-+#endif
-+ return status;
++ dprintk(KERN_DEBUG "DEBUG VERSION\n");
+ return pci_module_init(&driver);
}
- static void __exit exit_nic(void)
- {
-- pci_unregister_driver(&driver);
-+#ifdef CONFIG_PM
-+ unregister_reboot_notifier(&nv_reboot_notifier);
-+#endif
-+ pci_unregister_driver(&nv_eth_driver);
+@@ -2766,15 +6060,90 @@
+ pci_unregister_driver(&driver);
}
+#if NVVER > SLES9
-+module_param(debug, int, 0);
-+module_param(lowpowerspeed, int, 0);
-+MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting to 1 and disabled by setting to 0");
module_param(max_interrupt_work, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
module_param(optimization_mode, int, 0);
+module_param(tagging_8021pq, int, 0);
+MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
+#else
-+MODULE_PARM(debug, "i");
-+MODULE_PARM(lowpowerspeed, "i");
-+MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting to 1 and disabled by setting to 0");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
+MODULE_PARM(optimization_mode, "i");
+MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
+#endif
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
- MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
+-MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver (TIMERIRQ DISABLED)");
++MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
MODULE_LICENSE("GPL");
+
+ MODULE_DEVICE_TABLE(pci, pci_tbl);
diff -uNr linux-2.6.16.orig/drivers/scsi/sata_nv.c linux-2.6.16/drivers/scsi/sata_nv.c
---- linux-2.6.16.orig/drivers/scsi/sata_nv.c 2008-11-02 19:51:53.000000000 +0100
-+++ linux-2.6.16/drivers/scsi/sata_nv.c 2008-11-03 01:02:50.000000000 +0100
+--- linux-2.6.16.orig/drivers/scsi/sata_nv.c 2007-06-23 20:15:59.919947000 +0200
++++ linux-2.6.16/drivers/scsi/sata_nv.c 2006-10-21 14:45:00.000000000 +0200
@@ -1,630 +1,1284 @@
-/*
- * sata_nv.c - NVIDIA nForce SATA
-
-module_init(nv_init);
-module_exit(nv_exit);
-+/*
-+ * sata_nv.c - NVIDIA nForce SATA
-+ *
-+ * Copyright 2004 NVIDIA Corp. All rights reserved.
-+ * Copyright 2004 Andrew Chew
-+ *
-+ * The contents of this file are subject to the Open
-+ * Software License version 1.1 that can be found at
-+ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
-+ * by reference.
-+ *
-+ * Alternatively, the contents of this file may be used under the terms
-+ * of the GNU General Public License version 2 (the "GPL") as distributed
-+ * in the kernel source COPYING file, in which case the provisions of
-+ * the GPL are applicable instead of the above. If you wish to allow
-+ * the use of your version of this file only under the terms of the
-+ * GPL and not to allow others to use your version of this file under
-+ * the OSL, indicate your decision by deleting the provisions above and
-+ * replace them with the notice and other provisions required by the GPL.
-+ * If you do not delete the provisions above, a recipient may use your
-+ * version of this file under either the OSL or the GPL.
-+ *
-+ * 0.11
-+ * - Added sgpio support
-+ *
-+ * 0.10
-+ * - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB
-+ * drive. Also made the check_hotplug() callbacks return whether there
-+ * was a hotplug interrupt or not. This was not the source of the
-+ * spurious interrupts, but is the right thing to do anyway.
-+ *
-+ * 0.09
-+ * - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
-+ *
-+ * 0.08
-+ * - Added support for MCP51 and MCP55.
-+ *
-+ * 0.07
-+ * - Added support for RAID class code.
-+ *
-+ * 0.06
-+ * - Added generic SATA support by using a pci_device_id that filters on
-+ * the IDE storage class code.
-+ *
-+ * 0.03
-+ * - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using
-+ * mmio_base, which is only set for the CK804/MCP04 case.
-+ *
-+ * 0.02
-+ * - Added support for CK804 SATA controller.
-+ *
-+ * 0.01
-+ * - Initial revision.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/blkdev.h>
-+#include <linux/delay.h>
-+#include <linux/interrupt.h>
-+#include "scsi.h"
-+#include <scsi/scsi_host.h>
-+#include <linux/libata.h>
-+
-+#define DRV_NAME "sata_nv"
-+#define DRV_VERSION "0.11-Driver Package V1.23"
-+
-+#define NV_PORTS 2
-+#define NV_PIO_MASK 0x1f
-+#define NV_MWDMA_MASK 0x07
-+#define NV_UDMA_MASK 0x7f
-+#define NV_PORT0_SCR_REG_OFFSET 0x00
-+#define NV_PORT1_SCR_REG_OFFSET 0x40
-+
-+#define NV_INT_STATUS 0x10
-+#define NV_INT_STATUS_CK804 0x440
-+#define NV_INT_STATUS_MCP55 0x440
-+#define NV_INT_STATUS_PDEV_INT 0x01
-+#define NV_INT_STATUS_PDEV_PM 0x02
-+#define NV_INT_STATUS_PDEV_ADDED 0x04
-+#define NV_INT_STATUS_PDEV_REMOVED 0x08
-+#define NV_INT_STATUS_SDEV_INT 0x10
-+#define NV_INT_STATUS_SDEV_PM 0x20
-+#define NV_INT_STATUS_SDEV_ADDED 0x40
-+#define NV_INT_STATUS_SDEV_REMOVED 0x80
-+#define NV_INT_STATUS_PDEV_HOTPLUG (NV_INT_STATUS_PDEV_ADDED | \
-+ NV_INT_STATUS_PDEV_REMOVED)
-+#define NV_INT_STATUS_SDEV_HOTPLUG (NV_INT_STATUS_SDEV_ADDED | \
-+ NV_INT_STATUS_SDEV_REMOVED)
-+#define NV_INT_STATUS_HOTPLUG (NV_INT_STATUS_PDEV_HOTPLUG | \
-+ NV_INT_STATUS_SDEV_HOTPLUG)
-+
-+#define NV_INT_ENABLE 0x11
-+#define NV_INT_ENABLE_CK804 0x441
-+#define NV_INT_ENABLE_MCP55 0x444
-+#define NV_INT_ENABLE_PDEV_MASK 0x01
-+#define NV_INT_ENABLE_PDEV_PM 0x02
-+#define NV_INT_ENABLE_PDEV_ADDED 0x04
-+#define NV_INT_ENABLE_PDEV_REMOVED 0x08
-+#define NV_INT_ENABLE_SDEV_MASK 0x10
-+#define NV_INT_ENABLE_SDEV_PM 0x20
-+#define NV_INT_ENABLE_SDEV_ADDED 0x40
-+#define NV_INT_ENABLE_SDEV_REMOVED 0x80
-+#define NV_INT_ENABLE_PDEV_HOTPLUG (NV_INT_ENABLE_PDEV_ADDED | \
-+ NV_INT_ENABLE_PDEV_REMOVED)
-+#define NV_INT_ENABLE_SDEV_HOTPLUG (NV_INT_ENABLE_SDEV_ADDED | \
-+ NV_INT_ENABLE_SDEV_REMOVED)
-+#define NV_INT_ENABLE_HOTPLUG (NV_INT_ENABLE_PDEV_HOTPLUG | \
-+ NV_INT_ENABLE_SDEV_HOTPLUG)
-+
-+#define NV_INT_CONFIG 0x12
-+#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI
-+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
-+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F
-+
-+// For PCI config register 20
-+#define NV_MCP_SATA_CFG_20 0x50
-+#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN 0x04
-+
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+#define RHAS3U7
-+#endif
-+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,16)
-+#define SLES10
-+#endif
-+
-+//sgpio
-+// Sgpio defines
-+// SGPIO state defines
-+#define NV_SGPIO_STATE_RESET 0
-+#define NV_SGPIO_STATE_OPERATIONAL 1
-+#define NV_SGPIO_STATE_ERROR 2
-+
-+// SGPIO command opcodes
-+#define NV_SGPIO_CMD_RESET 0
-+#define NV_SGPIO_CMD_READ_PARAMS 1
-+#define NV_SGPIO_CMD_READ_DATA 2
-+#define NV_SGPIO_CMD_WRITE_DATA 3
-+
-+// SGPIO command status defines
-+#define NV_SGPIO_CMD_OK 0
-+#define NV_SGPIO_CMD_ACTIVE 1
-+#define NV_SGPIO_CMD_ERR 2
-+
-+#define NV_SGPIO_UPDATE_TICK 90
-+#define NV_SGPIO_MIN_UPDATE_DELTA 33
-+#define NV_CNTRLR_SHARE_INIT 2
-+#define NV_SGPIO_MAX_ACTIVITY_ON 20
-+#define NV_SGPIO_MIN_FORCE_OFF 5
-+#define NV_SGPIO_PCI_CSR_OFFSET 0x58
-+#define NV_SGPIO_PCI_CB_OFFSET 0x5C
-+#define NV_SGPIO_DFLT_CB_SIZE 256
-+#define NV_ON 1
-+#define NV_OFF 0
-+#ifndef bool
-+#define bool u8
-+#endif
-+
-+static inline unsigned int jiffies_to_msecs1(const unsigned long j)
-+{
-+#if HZ <= 1000 && !(1000 % HZ)
-+ return (1000 / HZ) * j;
-+#elif HZ > 1000 && !(HZ % 1000)
-+ return (j + (HZ / 1000) - 1)/(HZ / 1000);
-+#else
-+ return (j * 1000) / HZ;
-+#endif
-+}
-+
-+#define BF_EXTRACT(v, off, bc) \
-+ ((((u8)(v)) >> (off)) & ((1 << (bc)) - 1))
-+
-+#define BF_INS(v, ins, off, bc) \
-+ (((v) & ~((((1 << (bc)) - 1)) << (off))) | \
-+ (((u8)(ins)) << (off)))
-+
-+#define BF_EXTRACT_U32(v, off, bc) \
-+ ((((u32)(v)) >> (off)) & ((1 << (bc)) - 1))
-+
-+#define BF_INS_U32(v, ins, off, bc) \
-+ (((v) & ~((((1 << (bc)) - 1)) << (off))) | \
-+ (((u32)(ins)) << (off)))
-+
-+#define GET_SGPIO_STATUS(v) BF_EXTRACT(v, 0, 2)
-+#define GET_CMD_STATUS(v) BF_EXTRACT(v, 3, 2)
-+#define GET_CMD(v) BF_EXTRACT(v, 5, 3)
-+#define SET_CMD(v, cmd) BF_INS(v, cmd, 5, 3)
-+
-+#define GET_ENABLE(v) BF_EXTRACT_U32(v, 23, 1)
-+#define SET_ENABLE(v) BF_INS_U32(v, 1, 23, 1)
-+
-+// Needs to have a u8 bit-field insert.
-+#define GET_ACTIVITY(v) BF_EXTRACT(v, 5, 3)
-+#define SET_ACTIVITY(v, on_off) BF_INS(v, on_off, 5, 3)
-+
-+union nv_sgpio_nvcr
-+{
-+ struct {
-+ u8 init_cnt;
-+ u8 cb_size;
-+ u8 cbver;
-+ u8 rsvd;
-+ } bit;
-+ u32 all;
-+};
-+
-+union nv_sgpio_tx
-+{
-+ u8 tx_port[4];
-+ u32 all;
-+};
-+
-+struct nv_sgpio_cb
-+{
-+ u64 scratch_space;
-+ union nv_sgpio_nvcr nvcr;
-+ u32 cr0;
-+ u32 rsvd[4];
-+ union nv_sgpio_tx tx[2];
-+};
-+
-+struct nv_sgpio_host_share
-+{
-+ spinlock_t *plock;
-+ unsigned long *ptstamp;
-+};
-+
-+struct nv_sgpio_host_flags
-+{
-+ u8 sgpio_enabled:1;
-+ u8 need_update:1;
-+ u8 rsvd:6;
-+};
-+
-+struct nv_host_sgpio
-+{
-+ struct nv_sgpio_host_flags flags;
-+ u8 *pcsr;
-+ struct nv_sgpio_cb *pcb;
-+ struct nv_sgpio_host_share share;
-+ struct timer_list sgpio_timer;
-+};
-+
-+struct nv_sgpio_port_flags
-+{
-+ u8 last_state:1;
-+ u8 recent_activity:1;
-+ u8 rsvd:6;
-+};
-+
-+struct nv_sgpio_led
-+{
-+ struct nv_sgpio_port_flags flags;
-+ u8 force_off;
-+ u8 last_cons_active;
-+};
-+
-+struct nv_port_sgpio
-+{
-+ struct nv_sgpio_led activity;
-+};
-+
-+static spinlock_t nv_sgpio_lock;
-+static unsigned long nv_sgpio_tstamp;
-+
-+static inline void nv_sgpio_set_csr(u8 csr, unsigned long pcsr)
-+{
-+ outb(csr, pcsr);
-+}
-+
-+static inline u8 nv_sgpio_get_csr(unsigned long pcsr)
-+{
-+ return inb(pcsr);
-+}
-+
-+static inline u8 nv_sgpio_get_func(struct ata_host_set *host_set)
-+{
-+ u8 devfn = (to_pci_dev(host_set->dev))->devfn;
-+ return (PCI_FUNC(devfn));
-+}
-+
-+static inline u8 nv_sgpio_tx_host_offset(struct ata_host_set *host_set)
-+{
-+ return (nv_sgpio_get_func(host_set)/NV_CNTRLR_SHARE_INIT);
-+}
-+
-+static inline u8 nv_sgpio_calc_tx_offset(u8 cntrlr, u8 channel)
-+{
-+ return (sizeof(union nv_sgpio_tx) - (NV_CNTRLR_SHARE_INIT *
-+ (cntrlr % NV_CNTRLR_SHARE_INIT)) - channel - 1);
-+}
-+
-+static inline u8 nv_sgpio_tx_port_offset(struct ata_port *ap)
-+{
-+ u8 cntrlr = nv_sgpio_get_func(ap->host_set);
-+ return (nv_sgpio_calc_tx_offset(cntrlr, ap->port_no));
-+}
-+
-+static inline bool nv_sgpio_capable(const struct pci_device_id *ent)
-+{
-+ if (ent->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2)
-+ return 1;
-+ else
-+ return 0;
-+}
-+
-+
-+
-+
-+
-+
-+static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-+static irqreturn_t nv_interrupt (int irq, void *dev_instance,
-+ struct pt_regs *regs);
-+static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
-+static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
-+static void nv_host_stop (struct ata_host_set *host_set);
-+static int nv_port_start(struct ata_port *ap);
-+static void nv_port_stop(struct ata_port *ap);
-+static int nv_qc_issue(struct ata_queued_cmd *qc);
-+static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
-+static void nv_disable_hotplug(struct ata_host_set *host_set);
-+static void nv_check_hotplug(struct ata_host_set *host_set);
-+static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
-+static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
-+static void nv_check_hotplug_ck804(struct ata_host_set *host_set);
-+static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent);
-+static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set);
-+static void nv_check_hotplug_mcp55(struct ata_host_set *host_set);
-+enum nv_host_type
-+{
-+ GENERIC,
-+ NFORCE2,
-+ NFORCE3,
-+ CK804,
-+ MCP55
-+};
-+
-+static struct pci_device_id nv_pci_tbl[] = {
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
-+ { 0, } /* terminate list */
-+};
-+
-+#define NV_HOST_FLAGS_SCR_MMIO 0x00000001
-+
-+struct nv_host_desc
-+{
-+ enum nv_host_type host_type;
-+ void (*enable_hotplug)(struct ata_probe_ent *probe_ent);
-+ void (*disable_hotplug)(struct ata_host_set *host_set);
-+ void (*check_hotplug)(struct ata_host_set *host_set);
-+
-+};
-+static struct nv_host_desc nv_device_tbl[] = {
-+ {
-+ .host_type = GENERIC,
-+ .enable_hotplug = NULL,
-+ .disable_hotplug= NULL,
-+ .check_hotplug = NULL,
-+ },
-+ {
-+ .host_type = NFORCE2,
-+ .enable_hotplug = nv_enable_hotplug,
-+ .disable_hotplug= nv_disable_hotplug,
-+ .check_hotplug = nv_check_hotplug,
-+ },
-+ {
-+ .host_type = NFORCE3,
-+ .enable_hotplug = nv_enable_hotplug,
-+ .disable_hotplug= nv_disable_hotplug,
-+ .check_hotplug = nv_check_hotplug,
-+ },
-+ { .host_type = CK804,
-+ .enable_hotplug = nv_enable_hotplug_ck804,
-+ .disable_hotplug= nv_disable_hotplug_ck804,
-+ .check_hotplug = nv_check_hotplug_ck804,
-+ },
-+ { .host_type = MCP55,
-+ .enable_hotplug = nv_enable_hotplug_mcp55,
-+ .disable_hotplug= nv_disable_hotplug_mcp55,
-+ .check_hotplug = nv_check_hotplug_mcp55,
-+ },
-+};
-+
-+
-+struct nv_host
-+{
-+ struct nv_host_desc *host_desc;
-+ unsigned long host_flags;
-+ struct nv_host_sgpio host_sgpio;
-+ struct pci_dev *pdev;
-+};
-+
-+struct nv_port
-+{
-+ struct nv_port_sgpio port_sgpio;
-+};
-+
-+// SGPIO function prototypes
-+static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost);
-+static void nv_sgpio_reset(u8 *pcsr);
-+static void nv_sgpio_set_timer(struct timer_list *ptimer,
-+ unsigned int timeout_msec);
-+static void nv_sgpio_timer_handler(unsigned long ptr);
-+static void nv_sgpio_host_cleanup(struct nv_host *host);
-+static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off);
-+static void nv_sgpio_clear_all_leds(struct ata_port *ap);
-+static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd);
-+
-+
-+static struct pci_driver nv_pci_driver = {
-+ .name = DRV_NAME,
-+ .id_table = nv_pci_tbl,
-+ .probe = nv_init_one,
-+ .remove = ata_pci_remove_one,
-+};
-+
-+
-+#ifdef SLES10
-+static struct scsi_host_template nv_sht = {
-+#else
-+static Scsi_Host_Template nv_sht = {
-+#endif
-+ .module = THIS_MODULE,
-+ .name = DRV_NAME,
-+#ifdef RHAS3U7
-+ .detect = ata_scsi_detect,
-+ .release = ata_scsi_release,
-+#endif
-+ .ioctl = ata_scsi_ioctl,
-+ .queuecommand = ata_scsi_queuecmd,
-+ .eh_strategy_handler = ata_scsi_error,
-+ .can_queue = ATA_DEF_QUEUE,
-+ .this_id = ATA_SHT_THIS_ID,
-+ .sg_tablesize = LIBATA_MAX_PRD,
-+ .max_sectors = ATA_MAX_SECTORS,
-+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
-+#ifdef RHAS3U7
-+ .use_new_eh_code = ATA_SHT_NEW_EH_CODE,
-+#endif
-+ .emulated = ATA_SHT_EMULATED,
-+ .use_clustering = ATA_SHT_USE_CLUSTERING,
-+ .proc_name = DRV_NAME,
-+#ifndef RHAS3U7
-+ .dma_boundary = ATA_DMA_BOUNDARY,
-+ .slave_configure = ata_scsi_slave_config,
-+#endif
-+ .bios_param = ata_std_bios_param,
-+};
-+
-+static struct ata_port_operations nv_ops = {
-+ .port_disable = ata_port_disable,
-+ .tf_load = ata_tf_load,
-+ .tf_read = ata_tf_read,
-+ .exec_command = ata_exec_command,
-+ .check_status = ata_check_status,
-+ .dev_select = ata_std_dev_select,
-+ .phy_reset = sata_phy_reset,
-+ .bmdma_setup = ata_bmdma_setup,
-+ .bmdma_start = ata_bmdma_start,
-+ .bmdma_stop = ata_bmdma_stop,
-+ .bmdma_status = ata_bmdma_status,
-+ .qc_prep = ata_qc_prep,
-+ .qc_issue = nv_qc_issue,
-+ .eng_timeout = ata_eng_timeout,
-+ .irq_handler = nv_interrupt,
-+ .irq_clear = ata_bmdma_irq_clear,
-+ .scr_read = nv_scr_read,
-+ .scr_write = nv_scr_write,
-+ .port_start = nv_port_start,
-+ .port_stop = nv_port_stop,
-+ .host_stop = nv_host_stop,
-+};
-+
-+/* FIXME: The hardware provides the necessary SATA PHY controls
-+ * to support ATA_FLAG_SATA_RESET. However, it is currently
-+ * necessary to disable that flag, to solve misdetection problems.
-+ * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.
-+ *
-+ * This problem really needs to be investigated further. But in the
-+ * meantime, we avoid ATA_FLAG_SATA_RESET to get people working.
-+ */
-+static struct ata_port_info nv_port_info = {
-+ .sht = &nv_sht,
-+ .host_flags = ATA_FLAG_SATA |
-+ /* ATA_FLAG_SATA_RESET | */
-+ ATA_FLAG_SRST |
-+ ATA_FLAG_NO_LEGACY,
-+ .pio_mask = NV_PIO_MASK,
-+ .mwdma_mask = NV_MWDMA_MASK,
-+ .udma_mask = NV_UDMA_MASK,
-+ .port_ops = &nv_ops,
-+};
-+
-+MODULE_AUTHOR("NVIDIA");
-+MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
-+MODULE_LICENSE("GPL");
-+MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
-+MODULE_VERSION(DRV_VERSION);
-+
-+static irqreturn_t nv_interrupt (int irq, void *dev_instance,
-+ struct pt_regs *regs)
-+{
-+ struct ata_host_set *host_set = dev_instance;
-+ struct nv_host *host = host_set->private_data;
-+ unsigned int i;
-+ unsigned int handled = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&host_set->lock, flags);
-+
-+ for (i = 0; i < host_set->n_ports; i++) {
-+ struct ata_port *ap;
-+
-+ ap = host_set->ports[i];
-+#ifdef ATA_FLAG_NOINTR
-+ if (ap &&
-+ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
-+#else
-+ if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
-+#endif
-+ struct ata_queued_cmd *qc;
-+
-+ qc = ata_qc_from_tag(ap, ap->active_tag);
-+ if (qc && (!(qc->tf.ctl & ATA_NIEN)))
-+ handled += ata_host_intr(ap, qc);
-+ else
-+ // No request pending? Clear interrupt status
-+ // anyway, in case there's one pending.
-+ ap->ops->check_status(ap);
-+ }
-+
-+ }
-+
-+ if (host->host_desc->check_hotplug)
-+ host->host_desc->check_hotplug(host_set);
-+
-+ spin_unlock_irqrestore(&host_set->lock, flags);
-+
-+ return IRQ_RETVAL(handled);
-+}
-+
-+static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
-+{
-+ struct ata_host_set *host_set = ap->host_set;
-+ struct nv_host *host = host_set->private_data;
-+
-+ if (sc_reg > SCR_CONTROL)
-+ return 0xffffffffU;
-+
-+ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-+ return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4));
-+ else
-+ return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
-+}
-+
-+static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
-+{
-+ struct ata_host_set *host_set = ap->host_set;
-+ struct nv_host *host = host_set->private_data;
-+
-+ if (sc_reg > SCR_CONTROL)
-+ return;
-+
-+ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-+ writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4));
-+ else
-+ outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
-+}
-+
-+static void nv_host_stop (struct ata_host_set *host_set)
-+{
-+ struct nv_host *host = host_set->private_data;
-+
-+ // Disable hotplug event interrupts.
-+ if (host->host_desc->disable_hotplug)
-+ host->host_desc->disable_hotplug(host_set);
-+
-+ nv_sgpio_host_cleanup(host);
-+ kfree(host);
-+#ifdef RHAS3U7
-+
-+ ata_host_stop(host_set);
-+#endif
-+}
-+
-+static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
-+{
-+ static int printed_version = 0;
-+ struct nv_host *host;
-+ struct ata_port_info *ppi;
-+ struct ata_probe_ent *probe_ent;
-+ int pci_dev_busy = 0;
-+ int rc;
-+ u32 bar;
-+
-+ // Make sure this is a SATA controller by counting the number of bars
-+ // (NVIDIA SATA controllers will always have six bars). Otherwise,
-+ // it's an IDE controller and we ignore it.
-+ for (bar=0; bar<6; bar++)
-+ if (pci_resource_start(pdev, bar) == 0)
-+ return -ENODEV;
-+
-+ if (!printed_version++)
-+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
-+
-+ rc = pci_enable_device(pdev);
-+ if (rc)
-+ goto err_out;
-+
-+ rc = pci_request_regions(pdev, DRV_NAME);
-+ if (rc) {
-+ pci_dev_busy = 1;
-+ goto err_out_disable;
-+ }
-+
-+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-+ if (rc)
-+ goto err_out_regions;
-+#ifndef RHAS3U7
-+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
-+ if (rc)
-+ goto err_out_regions;
-+#endif
-+ rc = -ENOMEM;
-+
-+ ppi = &nv_port_info;
-+
-+ probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY|ATA_PORT_SECONDARY);
-+
-+ if (!probe_ent)
-+ goto err_out_regions;
-+
-+ host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
-+ if (!host)
-+ goto err_out_free_ent;
-+
-+ memset(host, 0, sizeof(struct nv_host));
-+ host->host_desc = &nv_device_tbl[ent->driver_data];
-+
-+ probe_ent->private_data = host;
-+
-+ if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)
-+ host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;
-+
-+ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
-+ unsigned long base;
-+
-+ probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),
-+ pci_resource_len(pdev, 5));
-+ if (probe_ent->mmio_base == NULL) {
-+ rc = -EIO;
-+ goto err_out_free_host;
-+ }
-+
-+ base = (unsigned long)probe_ent->mmio_base;
-+
-+ probe_ent->port[0].scr_addr =
-+ base + NV_PORT0_SCR_REG_OFFSET;
-+ probe_ent->port[1].scr_addr =
-+ base + NV_PORT1_SCR_REG_OFFSET;
-+ } else {
-+
-+ probe_ent->port[0].scr_addr =
-+ pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
-+ probe_ent->port[1].scr_addr =
-+ pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
-+ }
-+
-+ pci_set_master(pdev);
-+#ifdef RHAS3U7
-+ ata_add_to_probe_list(probe_ent);
-+
-+ if (nv_sgpio_capable(ent))
-+ nv_sgpio_init(pdev, host);
-+ // Enable hotplug event interrupts.
-+ if (host->host_desc->enable_hotplug)
-+ host->host_desc->enable_hotplug(probe_ent);
-+
-+ return 0;
-+#else
-+ rc = ata_device_add(probe_ent);
-+ if (rc != NV_PORTS)
-+ goto err_out_iounmap;
-+
-+ if (nv_sgpio_capable(ent))
-+ nv_sgpio_init(pdev, host);
-+ // Enable hotplug event interrupts.
-+ if (host->host_desc->enable_hotplug)
-+ host->host_desc->enable_hotplug(probe_ent);
-+
-+ kfree(probe_ent);
-+
-+ return 0;
-+
-+err_out_iounmap:
-+ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-+ iounmap(probe_ent->mmio_base);
-+#endif
-+err_out_free_host:
-+ kfree(host);
-+err_out_free_ent:
-+ kfree(probe_ent);
-+err_out_regions:
-+ pci_release_regions(pdev);
-+err_out_disable:
-+ if (!pci_dev_busy)
-+ pci_disable_device(pdev);
-+err_out:
-+ return rc;
-+}
-+
-+
-+static int nv_port_start(struct ata_port *ap)
-+{
-+ int stat;
-+ struct nv_port *port;
-+
-+ stat = ata_port_start(ap);
-+ if (stat) {
-+ return stat;
-+ }
-+
-+ port = kmalloc(sizeof(struct nv_port), GFP_KERNEL);
-+ if (!port)
-+ goto err_out_no_free;
-+
-+ memset(port, 0, sizeof(struct nv_port));
-+
-+ ap->private_data = port;
-+ return 0;
-+
-+err_out_no_free:
-+ return 1;
-+}
-+
-+static void nv_port_stop(struct ata_port *ap)
-+{
-+ nv_sgpio_clear_all_leds(ap);
-+
-+ if (ap->private_data) {
-+ kfree(ap->private_data);
-+ ap->private_data = NULL;
-+ }
-+ ata_port_stop(ap);
-+}
-+
-+static int nv_qc_issue(struct ata_queued_cmd *qc)
-+{
-+ struct nv_port *port = qc->ap->private_data;
-+
-+ if (port)
-+ port->port_sgpio.activity.flags.recent_activity = 1;
-+ return (ata_qc_issue_prot(qc));
-+}
-+
-+
-+
-+
-+static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)
-+{
-+ u8 intr_mask;
-+
-+ outb(NV_INT_STATUS_HOTPLUG,
-+ probe_ent->port[0].scr_addr + NV_INT_STATUS);
-+
-+ intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);
-+ intr_mask |= NV_INT_ENABLE_HOTPLUG;
-+
-+ outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);
-+}
-+
-+static void nv_disable_hotplug(struct ata_host_set *host_set)
-+{
-+ u8 intr_mask;
-+
-+ intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
-+
-+ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
-+
-+ outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
-+}
-+
-+static void nv_check_hotplug(struct ata_host_set *host_set)
-+{
-+ u8 intr_status;
-+
-+ intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
-+
-+ // Clear interrupt status.
-+ outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
-+
-+ if (intr_status & NV_INT_STATUS_HOTPLUG) {
-+ if (intr_status & NV_INT_STATUS_PDEV_ADDED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device added\n");
-+
-+ if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device removed\n");
-+
-+ if (intr_status & NV_INT_STATUS_SDEV_ADDED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device added\n");
-+
-+ if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device removed\n");
-+ }
-+}
-+
-+static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
-+{
-+ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
-+ u8 intr_mask;
-+ u8 regval;
-+
-+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
-+ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
-+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
-+
-+ writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
-+
-+ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
-+ intr_mask |= NV_INT_ENABLE_HOTPLUG;
-+
-+ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
-+}
-+
-+static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
-+{
-+ struct pci_dev *pdev = to_pci_dev(host_set->dev);
-+ u8 intr_mask;
-+ u8 regval;
-+
-+ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);
-+
-+ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
-+
-+ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
-+
-+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
-+ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
-+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
-+}
-+
-+static void nv_check_hotplug_ck804(struct ata_host_set *host_set)
-+{
-+ u8 intr_status;
-+
-+ intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
-+
-+ // Clear interrupt status.
-+ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
-+
-+ if (intr_status & NV_INT_STATUS_HOTPLUG) {
-+ if (intr_status & NV_INT_STATUS_PDEV_ADDED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device added\n");
-+
-+ if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device removed\n");
-+
-+ if (intr_status & NV_INT_STATUS_SDEV_ADDED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device added\n");
-+
-+ if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device removed\n");
-+ }
-+}
-+static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent)
-+{
-+ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
-+ u8 intr_mask;
-+ u8 regval;
-+
-+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
-+ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
-+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
-+
-+ writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55);
-+ writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55+2);
-+
-+ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55);
-+ intr_mask |= 0x0c;
-+ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55);
-+
-+ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);
-+ intr_mask |= 0x0c;
-+ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);
-+}
-+
-+static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set)
-+{
-+ struct pci_dev *pdev = to_pci_dev(host_set->dev);
-+ u8 intr_mask;
-+ u8 regval;
-+
-+ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55);
-+ intr_mask &= ~(0x0C);
-+ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55);
-+
-+ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55+2);
-+ intr_mask &= ~(0x0C);
-+ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55+2);
-+
-+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
-+ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
-+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
-+}
-+
-+static void nv_check_hotplug_mcp55(struct ata_host_set *host_set)
-+{
-+ u8 intr_status,intr_status1;
-+
-+ intr_status = readb(host_set->mmio_base + NV_INT_STATUS_MCP55);
-+ intr_status1 = readb(host_set->mmio_base + NV_INT_STATUS_MCP55+2);
-+
-+ // Clear interrupt status.
-+ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55);
-+ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55+2);
-+
-+ if ((intr_status & 0x0c) || (intr_status1&0x0c)) {
-+ if (intr_status & 0x04)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device added\n");
-+
-+ if (intr_status & 0x08)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device removed\n");
-+
-+ if (intr_status1 & 0x04)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device added\n");
-+
-+ if (intr_status1 & 0x08)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device removed\n");
-+ }
-+}
-+
-+
-+static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost)
-+{
-+ u16 csr_add;
-+ u32 cb_add, temp32;
-+ struct device *dev = pci_dev_to_dev(pdev);
-+ struct ata_host_set *host_set = dev_get_drvdata(dev);
-+ u8 pro=0;
-+ pci_read_config_word(pdev, NV_SGPIO_PCI_CSR_OFFSET, &csr_add);
-+ pci_read_config_dword(pdev, NV_SGPIO_PCI_CB_OFFSET, &cb_add);
-+ pci_read_config_byte(pdev, 0xA4, &pro);
-+
-+ if (csr_add == 0 || cb_add == 0)
-+ return;
-+
-+
-+ if (!(pro&0x40))
-+ return;
-+
-+
-+ temp32 = csr_add;
-+ phost->host_sgpio.pcsr = (void *)temp32;
-+ phost->host_sgpio.pcb = phys_to_virt(cb_add);
-+
-+ if (phost->host_sgpio.pcb->nvcr.bit.init_cnt!=0x2 || phost->host_sgpio.pcb->nvcr.bit.cbver!=0x0)
-+ return;
-+
-+ if (temp32 <=0x200 || temp32 >=0xFFFE )
-+ return;
-+
-+
-+ if (cb_add<=0x80000 || cb_add>=0x9FC00)
-+ return;
-+
-+
-+ if (phost->host_sgpio.pcb->scratch_space == 0) {
-+ spin_lock_init(&nv_sgpio_lock);
-+ phost->host_sgpio.share.plock = &nv_sgpio_lock;
-+ phost->host_sgpio.share.ptstamp = &nv_sgpio_tstamp;
-+ phost->host_sgpio.pcb->scratch_space =
-+ (unsigned long)&phost->host_sgpio.share;
-+ spin_lock(phost->host_sgpio.share.plock);
-+ nv_sgpio_reset(phost->host_sgpio.pcsr);
-+ phost->host_sgpio.pcb->cr0 =
-+ SET_ENABLE(phost->host_sgpio.pcb->cr0);
-+
-+ spin_unlock(phost->host_sgpio.share.plock);
-+ }
-+
-+ phost->host_sgpio.share =
-+ *(struct nv_sgpio_host_share *)(unsigned long)
-+ phost->host_sgpio.pcb->scratch_space;
-+ phost->host_sgpio.flags.sgpio_enabled = 1;
-+ phost->pdev = pdev;
-+ init_timer(&phost->host_sgpio.sgpio_timer);
-+ phost->host_sgpio.sgpio_timer.data = (unsigned long)phost;
-+ nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer,
-+ NV_SGPIO_UPDATE_TICK);
-+}
-+
-+static void __nv_sgpio_timer_handler(unsigned long context);
-+static void nv_sgpio_set_timer(struct timer_list *ptimer, unsigned int timeout_msec)
-+{
-+ if (!ptimer)
-+ return;
-+ ptimer->function = __nv_sgpio_timer_handler;
-+ ptimer->expires = msecs_to_jiffies(timeout_msec) + jiffies;
-+ add_timer(ptimer);
-+}
-+static void __nv_sgpio_timer_handler(unsigned long context)
-+{
-+ struct nv_host *phost = (struct nv_host*)context;
-+ struct device *dev = pci_dev_to_dev(phost->pdev);
-+ struct ata_host_set *host_set = dev_get_drvdata(dev);
-+
-+ if (!host_set)
-+ nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer,
-+ NV_SGPIO_UPDATE_TICK);
-+ else
-+ nv_sgpio_timer_handler(host_set);
-+
-+}
-+
-+static void nv_sgpio_timer_handler(unsigned long context)
-+{
-+
-+ struct ata_host_set *host_set = (struct ata_host_set *)context;
-+ struct nv_host *host;
-+ u8 count, host_offset, port_offset;
-+ union nv_sgpio_tx tx;
-+ bool on_off;
-+ unsigned long mask = 0xFFFF;
-+ struct nv_port *port;
-+
-+ if (!host_set)
-+ goto err_out;
-+ else
-+ host = (struct nv_host *)host_set->private_data;
-+
-+ if (!host->host_sgpio.flags.sgpio_enabled)
-+ goto err_out;
-+
-+ host_offset = nv_sgpio_tx_host_offset(host_set);
-+
-+ spin_lock(host->host_sgpio.share.plock);
-+ tx = host->host_sgpio.pcb->tx[host_offset];
-+ spin_unlock(host->host_sgpio.share.plock);
-+
-+ for (count = 0; count < host_set->n_ports; count++) {
-+ struct ata_port *ap;
-+
-+ ap = host_set->ports[count];
-+
-+ if (!(ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)))
-+ continue;
-+
-+ port = (struct nv_port *)ap->private_data;
-+ if (!port)
-+ continue;
-+ port_offset = nv_sgpio_tx_port_offset(ap);
-+ on_off = GET_ACTIVITY(tx.tx_port[port_offset]);
-+ if (nv_sgpio_update_led(&port->port_sgpio.activity, &on_off)) {
-+ tx.tx_port[port_offset] =
-+ SET_ACTIVITY(tx.tx_port[port_offset], on_off);
-+ host->host_sgpio.flags.need_update = 1;
-+ }
-+ }
-+
-+
-+ if (host->host_sgpio.flags.need_update) {
-+ spin_lock(host->host_sgpio.share.plock);
-+ if (nv_sgpio_get_func(host_set)
-+ % NV_CNTRLR_SHARE_INIT == 0) {
-+ host->host_sgpio.pcb->tx[host_offset].all &= mask;
-+ mask = mask << 16;
-+ tx.all &= mask;
-+ } else {
-+ tx.all &= mask;
-+ mask = mask << 16;
-+ host->host_sgpio.pcb->tx[host_offset].all &= mask;
-+ }
-+ host->host_sgpio.pcb->tx[host_offset].all |= tx.all;
-+ spin_unlock(host->host_sgpio.share.plock);
-+
-+ if (nv_sgpio_send_cmd(host, NV_SGPIO_CMD_WRITE_DATA)) {
-+ host->host_sgpio.flags.need_update = 0;
-+ return;
-+ }
-+ } else {
-+ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer,
-+ NV_SGPIO_UPDATE_TICK);
-+ }
-+err_out:
-+ return;
-+}
-+
-+static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd)
-+{
-+ u8 csr;
-+ unsigned long *ptstamp;
-+
-+ spin_lock(host->host_sgpio.share.plock);
-+ ptstamp = host->host_sgpio.share.ptstamp;
-+ if (jiffies_to_msecs1(jiffies - *ptstamp) >= NV_SGPIO_MIN_UPDATE_DELTA) {
-+ csr =
-+ nv_sgpio_get_csr((unsigned long)host->host_sgpio.pcsr);
-+ if ((GET_SGPIO_STATUS(csr) != NV_SGPIO_STATE_OPERATIONAL) ||
-+ (GET_CMD_STATUS(csr) == NV_SGPIO_CMD_ACTIVE)) {
-+ //nv_sgpio_reset(host->host_sgpio.pcsr);
-+ } else {
-+ host->host_sgpio.pcb->cr0 =
-+ SET_ENABLE(host->host_sgpio.pcb->cr0);
-+ csr = 0;
-+ csr = SET_CMD(csr, cmd);
-+ nv_sgpio_set_csr(csr,
-+ (unsigned long)host->host_sgpio.pcsr);
-+ *ptstamp = jiffies;
-+ }
-+ spin_unlock(host->host_sgpio.share.plock);
-+ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer,
-+ NV_SGPIO_UPDATE_TICK);
-+ return 1;
-+ } else {
-+ spin_unlock(host->host_sgpio.share.plock);
-+ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer,
-+ (NV_SGPIO_MIN_UPDATE_DELTA -
-+ jiffies_to_msecs1(jiffies - *ptstamp)));
-+ return 0;
-+ }
-+}
-+
-+static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off)
-+{
-+ bool need_update = 0;
-+
-+ if (led->force_off > 0) {
-+ led->force_off--;
-+ } else if (led->flags.recent_activity ^ led->flags.last_state) {
-+ *on_off = led->flags.recent_activity;
-+ led->flags.last_state = led->flags.recent_activity;
-+ need_update = 1;
-+ } else if ((led->flags.recent_activity & led->flags.last_state) &&
-+ (led->last_cons_active >= NV_SGPIO_MAX_ACTIVITY_ON)) {
-+ *on_off = NV_OFF;
-+ led->flags.last_state = NV_OFF;
-+ led->force_off = NV_SGPIO_MIN_FORCE_OFF;
-+ need_update = 1;
-+ }
-+
-+ if (*on_off)
-+ led->last_cons_active++;
-+ else
-+ led->last_cons_active = 0;
-+
-+ led->flags.recent_activity = 0;
-+ return need_update;
-+}
-+
-+static void nv_sgpio_reset(u8 *pcsr)
-+{
-+ u8 csr;
-+
-+ csr = nv_sgpio_get_csr((unsigned long)pcsr);
-+ if (GET_SGPIO_STATUS(csr) == NV_SGPIO_STATE_RESET) {
-+ csr = 0;
-+ csr = SET_CMD(csr, NV_SGPIO_CMD_RESET);
-+ nv_sgpio_set_csr(csr, (unsigned long)pcsr);
-+ }
-+ csr = 0;
-+ csr = SET_CMD(csr, NV_SGPIO_CMD_READ_PARAMS);
-+ nv_sgpio_set_csr(csr, (unsigned long)pcsr);
-+}
-+
-+static void nv_sgpio_host_cleanup(struct nv_host *host)
-+{
-+ u8 csr;
-+ if (!host)
-+ return;
-+
-+ if (host->host_sgpio.flags.sgpio_enabled){
-+ spin_lock(host->host_sgpio.share.plock);
-+ host->host_sgpio.pcb->cr0 =
-+ SET_ENABLE(host->host_sgpio.pcb->cr0);
-+ csr = 0;
-+ csr = SET_CMD(csr, NV_SGPIO_CMD_WRITE_DATA);
-+ nv_sgpio_set_csr(csr,
-+ (unsigned long)host->host_sgpio.pcsr);
-+ spin_unlock(host->host_sgpio.share.plock);
-+
-+ if (timer_pending(&host->host_sgpio.sgpio_timer))
-+ del_timer(&host->host_sgpio.sgpio_timer);
-+ host->host_sgpio.flags.sgpio_enabled = 0;
-+ host->host_sgpio.pcb->scratch_space = 0;
-+ }
-+
-+}
-+
-+static void nv_sgpio_clear_all_leds(struct ata_port *ap)
-+{
-+ struct nv_port *port = ap->private_data;
-+ struct nv_host *host;
-+ u8 host_offset, port_offset;
-+
-+ if (!port || !ap->host_set)
-+ return;
-+ if (!ap->host_set->private_data)
-+ return;
-+
-+ host = ap->host_set->private_data;
-+ if (!host->host_sgpio.flags.sgpio_enabled)
-+ return;
-+
-+ host_offset = nv_sgpio_tx_host_offset(ap->host_set);
-+ port_offset = nv_sgpio_tx_port_offset(ap);
-+
-+ spin_lock(host->host_sgpio.share.plock);
-+ host->host_sgpio.pcb->tx[host_offset].tx_port[port_offset] = 0;
-+ host->host_sgpio.flags.need_update = 1;
-+ spin_unlock(host->host_sgpio.share.plock);
-+}
-+
-+
-+
-+static int __init nv_init(void)
-+{
-+#ifdef RHAS3U7
-+ int rc;
-+ rc = pci_module_init(&nv_pci_driver);
-+ if (rc)
-+ return rc;
-+
-+ rc = scsi_register_module(MODULE_SCSI_HA, &nv_sht);
-+ if (rc) {
-+ pci_unregister_driver(&nv_pci_driver);
-+ /* TODO: does scsi_register_module return errno val? */
-+ return -ENODEV;
-+ }
-+
-+ return 0;
-+#else
-+ return pci_module_init(&nv_pci_driver);
-+#endif
-+}
-+
-+static void __exit nv_exit(void)
-+{
-+#ifdef RHAS3U7
-+ scsi_unregister_module(MODULE_SCSI_HA, &nv_sht);
-+#endif
-+ pci_unregister_driver(&nv_pci_driver);
-+
-+}
-+
-+module_init(nv_init);
-+module_exit(nv_exit);
++/*\r
++ * sata_nv.c - NVIDIA nForce SATA\r
++ *\r
++ * Copyright 2004 NVIDIA Corp. All rights reserved.\r
++ * Copyright 2004 Andrew Chew\r
++ *\r
++ * The contents of this file are subject to the Open\r
++ * Software License version 1.1 that can be found at\r
++ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein\r
++ * by reference.\r
++ *\r
++ * Alternatively, the contents of this file may be used under the terms\r
++ * of the GNU General Public License version 2 (the "GPL") as distributed\r
++ * in the kernel source COPYING file, in which case the provisions of\r
++ * the GPL are applicable instead of the above. If you wish to allow\r
++ * the use of your version of this file only under the terms of the\r
++ * GPL and not to allow others to use your version of this file under\r
++ * the OSL, indicate your decision by deleting the provisions above and\r
++ * replace them with the notice and other provisions required by the GPL.\r
++ * If you do not delete the provisions above, a recipient may use your\r
++ * version of this file under either the OSL or the GPL.\r
++ *\r
++ * 0.11\r
++ * - Added sgpio support\r
++ *\r
++ * 0.10\r
++ * - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB\r
++ * drive. Also made the check_hotplug() callbacks return whether there\r
++ * was a hotplug interrupt or not. This was not the source of the\r
++ * spurious interrupts, but is the right thing to do anyway.\r
++ *\r
++ * 0.09\r
++ * - Fixed bug introduced by 0.08's MCP51 and MCP55 support.\r
++ *\r
++ * 0.08\r
++ * - Added support for MCP51 and MCP55.\r
++ *\r
++ * 0.07\r
++ * - Added support for RAID class code.\r
++ *\r
++ * 0.06\r
++ * - Added generic SATA support by using a pci_device_id that filters on\r
++ * the IDE storage class code.\r
++ *\r
++ * 0.03\r
++ * - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using\r
++ * mmio_base, which is only set for the CK804/MCP04 case.\r
++ *\r
++ * 0.02\r
++ * - Added support for CK804 SATA controller.\r
++ *\r
++ * 0.01\r
++ * - Initial revision.\r
++ */\r
++\r
++#include <linux/config.h>\r
++#include <linux/version.h>\r
++#include <linux/kernel.h>\r
++#include <linux/module.h>\r
++#include <linux/pci.h>\r
++#include <linux/init.h>\r
++#include <linux/blkdev.h>\r
++#include <linux/delay.h>\r
++#include <linux/interrupt.h>\r
++#include "scsi.h"\r
++#include <scsi/scsi_host.h>\r
++#include <linux/libata.h>\r
++\r
++#define DRV_NAME "sata_nv"\r
++#define DRV_VERSION "0.11-Driver Package V1.21"\r
++\r
++#define NV_PORTS 2\r
++#define NV_PIO_MASK 0x1f\r
++#define NV_MWDMA_MASK 0x07\r
++#define NV_UDMA_MASK 0x7f\r
++#define NV_PORT0_SCR_REG_OFFSET 0x00\r
++#define NV_PORT1_SCR_REG_OFFSET 0x40\r
++\r
++#define NV_INT_STATUS 0x10\r
++#define NV_INT_STATUS_CK804 0x440\r
++#define NV_INT_STATUS_MCP55 0x440\r
++#define NV_INT_STATUS_PDEV_INT 0x01\r
++#define NV_INT_STATUS_PDEV_PM 0x02\r
++#define NV_INT_STATUS_PDEV_ADDED 0x04\r
++#define NV_INT_STATUS_PDEV_REMOVED 0x08\r
++#define NV_INT_STATUS_SDEV_INT 0x10\r
++#define NV_INT_STATUS_SDEV_PM 0x20\r
++#define NV_INT_STATUS_SDEV_ADDED 0x40\r
++#define NV_INT_STATUS_SDEV_REMOVED 0x80\r
++#define NV_INT_STATUS_PDEV_HOTPLUG (NV_INT_STATUS_PDEV_ADDED | \\r
++ NV_INT_STATUS_PDEV_REMOVED)\r
++#define NV_INT_STATUS_SDEV_HOTPLUG (NV_INT_STATUS_SDEV_ADDED | \\r
++ NV_INT_STATUS_SDEV_REMOVED)\r
++#define NV_INT_STATUS_HOTPLUG (NV_INT_STATUS_PDEV_HOTPLUG | \\r
++ NV_INT_STATUS_SDEV_HOTPLUG)\r
++\r
++#define NV_INT_ENABLE 0x11\r
++#define NV_INT_ENABLE_CK804 0x441\r
++#define NV_INT_ENABLE_MCP55 0x444\r
++#define NV_INT_ENABLE_PDEV_MASK 0x01\r
++#define NV_INT_ENABLE_PDEV_PM 0x02\r
++#define NV_INT_ENABLE_PDEV_ADDED 0x04\r
++#define NV_INT_ENABLE_PDEV_REMOVED 0x08\r
++#define NV_INT_ENABLE_SDEV_MASK 0x10\r
++#define NV_INT_ENABLE_SDEV_PM 0x20\r
++#define NV_INT_ENABLE_SDEV_ADDED 0x40\r
++#define NV_INT_ENABLE_SDEV_REMOVED 0x80\r
++#define NV_INT_ENABLE_PDEV_HOTPLUG (NV_INT_ENABLE_PDEV_ADDED | \\r
++ NV_INT_ENABLE_PDEV_REMOVED)\r
++#define NV_INT_ENABLE_SDEV_HOTPLUG (NV_INT_ENABLE_SDEV_ADDED | \\r
++ NV_INT_ENABLE_SDEV_REMOVED)\r
++#define NV_INT_ENABLE_HOTPLUG (NV_INT_ENABLE_PDEV_HOTPLUG | \\r
++ NV_INT_ENABLE_SDEV_HOTPLUG)\r
++\r
++#define NV_INT_CONFIG 0x12\r
++#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI\r
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E\r
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F\r
++\r
++// For PCI config register 20\r
++#define NV_MCP_SATA_CFG_20 0x50\r
++#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN 0x04\r
++\r
++\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)\r
++#define RHAS3U7\r
++#endif\r
++#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,16)\r
++#define SLES10\r
++#endif\r
++\r
++//sgpio\r
++// Sgpio defines\r
++// SGPIO state defines\r
++#define NV_SGPIO_STATE_RESET 0\r
++#define NV_SGPIO_STATE_OPERATIONAL 1\r
++#define NV_SGPIO_STATE_ERROR 2\r
++\r
++// SGPIO command opcodes\r
++#define NV_SGPIO_CMD_RESET 0\r
++#define NV_SGPIO_CMD_READ_PARAMS 1\r
++#define NV_SGPIO_CMD_READ_DATA 2\r
++#define NV_SGPIO_CMD_WRITE_DATA 3\r
++\r
++// SGPIO command status defines\r
++#define NV_SGPIO_CMD_OK 0\r
++#define NV_SGPIO_CMD_ACTIVE 1\r
++#define NV_SGPIO_CMD_ERR 2\r
++\r
++#define NV_SGPIO_UPDATE_TICK 90\r
++#define NV_SGPIO_MIN_UPDATE_DELTA 33\r
++#define NV_CNTRLR_SHARE_INIT 2\r
++#define NV_SGPIO_MAX_ACTIVITY_ON 20\r
++#define NV_SGPIO_MIN_FORCE_OFF 5\r
++#define NV_SGPIO_PCI_CSR_OFFSET 0x58\r
++#define NV_SGPIO_PCI_CB_OFFSET 0x5C\r
++#define NV_SGPIO_DFLT_CB_SIZE 256\r
++#define NV_ON 1\r
++#define NV_OFF 0\r
++#ifndef bool\r
++#define bool u8\r
++#endif\r
++\r
++static inline unsigned int jiffies_to_msecs1(const unsigned long j)\r
++{\r
++#if HZ <= 1000 && !(1000 % HZ)\r
++ return (1000 / HZ) * j;\r
++#elif HZ > 1000 && !(HZ % 1000)\r
++ return (j + (HZ / 1000) - 1)/(HZ / 1000);\r
++#else\r
++ return (j * 1000) / HZ;\r
++#endif\r
++}\r
++\r
++#define BF_EXTRACT(v, off, bc) \\r
++ ((((u8)(v)) >> (off)) & ((1 << (bc)) - 1))\r
++\r
++#define BF_INS(v, ins, off, bc) \\r
++ (((v) & ~((((1 << (bc)) - 1)) << (off))) | \\r
++ (((u8)(ins)) << (off)))\r
++\r
++#define BF_EXTRACT_U32(v, off, bc) \\r
++ ((((u32)(v)) >> (off)) & ((1 << (bc)) - 1))\r
++\r
++#define BF_INS_U32(v, ins, off, bc) \\r
++ (((v) & ~((((1 << (bc)) - 1)) << (off))) | \\r
++ (((u32)(ins)) << (off)))\r
++\r
++#define GET_SGPIO_STATUS(v) BF_EXTRACT(v, 0, 2)\r
++#define GET_CMD_STATUS(v) BF_EXTRACT(v, 3, 2)\r
++#define GET_CMD(v) BF_EXTRACT(v, 5, 3)\r
++#define SET_CMD(v, cmd) BF_INS(v, cmd, 5, 3) \r
++\r
++#define GET_ENABLE(v) BF_EXTRACT_U32(v, 23, 1)\r
++#define SET_ENABLE(v) BF_INS_U32(v, 1, 23, 1)\r
++\r
++// Needs to have a u8 bit-field insert.\r
++#define GET_ACTIVITY(v) BF_EXTRACT(v, 5, 3)\r
++#define SET_ACTIVITY(v, on_off) BF_INS(v, on_off, 5, 3)\r
++\r
++union nv_sgpio_nvcr \r
++{\r
++ struct {\r
++ u8 init_cnt;\r
++ u8 cb_size;\r
++ u8 cbver;\r
++ u8 rsvd;\r
++ } bit;\r
++ u32 all;\r
++};\r
++\r
++union nv_sgpio_tx \r
++{\r
++ u8 tx_port[4];\r
++ u32 all;\r
++};\r
++\r
++struct nv_sgpio_cb \r
++{\r
++ u64 scratch_space;\r
++ union nv_sgpio_nvcr nvcr;\r
++ u32 cr0;\r
++ u32 rsvd[4];\r
++ union nv_sgpio_tx tx[2];\r
++};\r
++\r
++struct nv_sgpio_host_share\r
++{\r
++ spinlock_t *plock;\r
++ unsigned long *ptstamp;\r
++};\r
++\r
++struct nv_sgpio_host_flags\r
++{\r
++ u8 sgpio_enabled:1;\r
++ u8 need_update:1;\r
++ u8 rsvd:6;\r
++};\r
++ \r
++struct nv_host_sgpio\r
++{\r
++ struct nv_sgpio_host_flags flags;\r
++ u8 *pcsr;\r
++ struct nv_sgpio_cb *pcb; \r
++ struct nv_sgpio_host_share share;\r
++ struct timer_list sgpio_timer;\r
++};\r
++\r
++struct nv_sgpio_port_flags\r
++{\r
++ u8 last_state:1;\r
++ u8 recent_activity:1;\r
++ u8 rsvd:6;\r
++};\r
++\r
++struct nv_sgpio_led \r
++{\r
++ struct nv_sgpio_port_flags flags;\r
++ u8 force_off;\r
++ u8 last_cons_active;\r
++};\r
++\r
++struct nv_port_sgpio\r
++{\r
++ struct nv_sgpio_led activity;\r
++};\r
++\r
++static spinlock_t nv_sgpio_lock;\r
++static unsigned long nv_sgpio_tstamp;\r
++\r
++static inline void nv_sgpio_set_csr(u8 csr, unsigned long pcsr)\r
++{\r
++ outb(csr, pcsr);\r
++}\r
++\r
++static inline u8 nv_sgpio_get_csr(unsigned long pcsr)\r
++{\r
++ return inb(pcsr);\r
++}\r
++\r
++static inline u8 nv_sgpio_get_func(struct ata_host_set *host_set)\r
++{\r
++ u8 devfn = (to_pci_dev(host_set->dev))->devfn;\r
++ return (PCI_FUNC(devfn));\r
++}\r
++\r
++static inline u8 nv_sgpio_tx_host_offset(struct ata_host_set *host_set)\r
++{\r
++ return (nv_sgpio_get_func(host_set)/NV_CNTRLR_SHARE_INIT);\r
++}\r
++\r
++static inline u8 nv_sgpio_calc_tx_offset(u8 cntrlr, u8 channel)\r
++{\r
++ return (sizeof(union nv_sgpio_tx) - (NV_CNTRLR_SHARE_INIT *\r
++ (cntrlr % NV_CNTRLR_SHARE_INIT)) - channel - 1);\r
++}\r
++\r
++static inline u8 nv_sgpio_tx_port_offset(struct ata_port *ap)\r
++{\r
++ u8 cntrlr = nv_sgpio_get_func(ap->host_set);\r
++ return (nv_sgpio_calc_tx_offset(cntrlr, ap->port_no));\r
++}\r
++\r
++static inline bool nv_sgpio_capable(const struct pci_device_id *ent)\r
++{\r
++ if (ent->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2)\r
++ return 1;\r
++ else\r
++ return 0;\r
++}\r
++\r
++\r
++\r
++\r
++\r
++\r
++static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);\r
++static irqreturn_t nv_interrupt (int irq, void *dev_instance,\r
++ struct pt_regs *regs);\r
++static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);\r
++static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);\r
++static void nv_host_stop (struct ata_host_set *host_set);\r
++static int nv_port_start(struct ata_port *ap);\r
++static void nv_port_stop(struct ata_port *ap);\r
++static int nv_qc_issue(struct ata_queued_cmd *qc);\r
++static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);\r
++static void nv_disable_hotplug(struct ata_host_set *host_set);\r
++static void nv_check_hotplug(struct ata_host_set *host_set);\r
++static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);\r
++static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);\r
++static void nv_check_hotplug_ck804(struct ata_host_set *host_set);\r
++static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent);\r
++static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set);\r
++static void nv_check_hotplug_mcp55(struct ata_host_set *host_set);\r
++enum nv_host_type\r
++{\r
++ GENERIC,\r
++ NFORCE2,\r
++ NFORCE3,\r
++ CK804,\r
++ MCP55\r
++};\r
++\r
++static struct pci_device_id nv_pci_tbl[] = {\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,\r
++ PCI_ANY_ID, PCI_ANY_ID,\r
++ PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },\r
++ { 0, } /* terminate list */\r
++};\r
++\r
++#define NV_HOST_FLAGS_SCR_MMIO 0x00000001\r
++\r
++struct nv_host_desc\r
++{\r
++ enum nv_host_type host_type;\r
++ void (*enable_hotplug)(struct ata_probe_ent *probe_ent);\r
++ void (*disable_hotplug)(struct ata_host_set *host_set);\r
++ void (*check_hotplug)(struct ata_host_set *host_set);\r
++\r
++};\r
++static struct nv_host_desc nv_device_tbl[] = {\r
++ {\r
++ .host_type = GENERIC,\r
++ .enable_hotplug = NULL,\r
++ .disable_hotplug= NULL,\r
++ .check_hotplug = NULL,\r
++ },\r
++ {\r
++ .host_type = NFORCE2,\r
++ .enable_hotplug = nv_enable_hotplug,\r
++ .disable_hotplug= nv_disable_hotplug,\r
++ .check_hotplug = nv_check_hotplug,\r
++ },\r
++ {\r
++ .host_type = NFORCE3,\r
++ .enable_hotplug = nv_enable_hotplug,\r
++ .disable_hotplug= nv_disable_hotplug,\r
++ .check_hotplug = nv_check_hotplug,\r
++ },\r
++ { .host_type = CK804,\r
++ .enable_hotplug = nv_enable_hotplug_ck804,\r
++ .disable_hotplug= nv_disable_hotplug_ck804,\r
++ .check_hotplug = nv_check_hotplug_ck804,\r
++ },\r
++ { .host_type = MCP55,\r
++ .enable_hotplug = nv_enable_hotplug_mcp55,\r
++ .disable_hotplug= nv_disable_hotplug_mcp55,\r
++ .check_hotplug = nv_check_hotplug_mcp55,\r
++ },\r
++};\r
++\r
++\r
++struct nv_host\r
++{\r
++ struct nv_host_desc *host_desc;\r
++ unsigned long host_flags;\r
++ struct nv_host_sgpio host_sgpio;\r
++ struct pci_dev *pdev;\r
++};\r
++\r
++struct nv_port\r
++{\r
++ struct nv_port_sgpio port_sgpio;\r
++};\r
++\r
++// SGPIO function prototypes\r
++static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost);\r
++static void nv_sgpio_reset(u8 *pcsr);\r
++static void nv_sgpio_set_timer(struct timer_list *ptimer, \r
++ unsigned int timeout_msec);\r
++static void nv_sgpio_timer_handler(unsigned long ptr);\r
++static void nv_sgpio_host_cleanup(struct nv_host *host);\r
++static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off);\r
++static void nv_sgpio_clear_all_leds(struct ata_port *ap);\r
++static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd);\r
++\r
++\r
++static struct pci_driver nv_pci_driver = {\r
++ .name = DRV_NAME,\r
++ .id_table = nv_pci_tbl,\r
++ .probe = nv_init_one,\r
++ .remove = ata_pci_remove_one,\r
++};\r
++\r
++\r
++#ifdef SLES10\r
++static struct scsi_host_template nv_sht = {\r
++#else\r
++static Scsi_Host_Template nv_sht = {\r
++#endif\r
++ .module = THIS_MODULE,\r
++ .name = DRV_NAME,\r
++#ifdef RHAS3U7\r
++ .detect = ata_scsi_detect,\r
++ .release = ata_scsi_release,\r
++#endif\r
++ .ioctl = ata_scsi_ioctl,\r
++ .queuecommand = ata_scsi_queuecmd,\r
++ .eh_strategy_handler = ata_scsi_error,\r
++ .can_queue = ATA_DEF_QUEUE,\r
++ .this_id = ATA_SHT_THIS_ID,\r
++ .sg_tablesize = LIBATA_MAX_PRD,\r
++ .max_sectors = ATA_MAX_SECTORS,\r
++ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,\r
++#ifdef RHAS3U7\r
++ .use_new_eh_code = ATA_SHT_NEW_EH_CODE,\r
++#endif\r
++ .emulated = ATA_SHT_EMULATED,\r
++ .use_clustering = ATA_SHT_USE_CLUSTERING,\r
++ .proc_name = DRV_NAME,\r
++#ifndef RHAS3U7\r
++ .dma_boundary = ATA_DMA_BOUNDARY,\r
++ .slave_configure = ata_scsi_slave_config,\r
++#endif\r
++ .bios_param = ata_std_bios_param,\r
++};\r
++\r
++static struct ata_port_operations nv_ops = {\r
++ .port_disable = ata_port_disable,\r
++ .tf_load = ata_tf_load,\r
++ .tf_read = ata_tf_read,\r
++ .exec_command = ata_exec_command,\r
++ .check_status = ata_check_status,\r
++ .dev_select = ata_std_dev_select,\r
++ .phy_reset = sata_phy_reset,\r
++ .bmdma_setup = ata_bmdma_setup,\r
++ .bmdma_start = ata_bmdma_start,\r
++ .bmdma_stop = ata_bmdma_stop,\r
++ .bmdma_status = ata_bmdma_status,\r
++ .qc_prep = ata_qc_prep,\r
++ .qc_issue = nv_qc_issue,\r
++ .eng_timeout = ata_eng_timeout,\r
++ .irq_handler = nv_interrupt,\r
++ .irq_clear = ata_bmdma_irq_clear,\r
++ .scr_read = nv_scr_read,\r
++ .scr_write = nv_scr_write,\r
++ .port_start = nv_port_start,\r
++ .port_stop = nv_port_stop,\r
++ .host_stop = nv_host_stop,\r
++};\r
++\r
++/* FIXME: The hardware provides the necessary SATA PHY controls\r
++ * to support ATA_FLAG_SATA_RESET. However, it is currently\r
++ * necessary to disable that flag, to solve misdetection problems.\r
++ * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.\r
++ *\r
++ * This problem really needs to be investigated further. But in the\r
++ * meantime, we avoid ATA_FLAG_SATA_RESET to get people working.\r
++ */\r
++static struct ata_port_info nv_port_info = {\r
++ .sht = &nv_sht,\r
++ .host_flags = ATA_FLAG_SATA |\r
++ /* ATA_FLAG_SATA_RESET | */\r
++ ATA_FLAG_SRST |\r
++ ATA_FLAG_NO_LEGACY,\r
++ .pio_mask = NV_PIO_MASK,\r
++ .mwdma_mask = NV_MWDMA_MASK,\r
++ .udma_mask = NV_UDMA_MASK,\r
++ .port_ops = &nv_ops,\r
++};\r
++\r
++MODULE_AUTHOR("NVIDIA");\r
++MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");\r
++MODULE_LICENSE("GPL");\r
++MODULE_DEVICE_TABLE(pci, nv_pci_tbl);\r
++MODULE_VERSION(DRV_VERSION);\r
++\r
++static irqreturn_t nv_interrupt (int irq, void *dev_instance,\r
++ struct pt_regs *regs)\r
++{\r
++ struct ata_host_set *host_set = dev_instance;\r
++ struct nv_host *host = host_set->private_data;\r
++ unsigned int i;\r
++ unsigned int handled = 0;\r
++ unsigned long flags;\r
++\r
++ spin_lock_irqsave(&host_set->lock, flags);\r
++\r
++ for (i = 0; i < host_set->n_ports; i++) {\r
++ struct ata_port *ap;\r
++\r
++ ap = host_set->ports[i];\r
++#ifdef ATA_FLAG_NOINTR\r
++ if (ap &&\r
++ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {\r
++#else\r
++ if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {\r
++#endif \r
++ struct ata_queued_cmd *qc;\r
++\r
++ qc = ata_qc_from_tag(ap, ap->active_tag);\r
++ if (qc && (!(qc->tf.ctl & ATA_NIEN)))\r
++ handled += ata_host_intr(ap, qc);\r
++ else\r
++ // No request pending? Clear interrupt status\r
++ // anyway, in case there's one pending.\r
++ ap->ops->check_status(ap);\r
++ }\r
++\r
++ }\r
++\r
++ if (host->host_desc->check_hotplug)\r
++ host->host_desc->check_hotplug(host_set);\r
++\r
++ spin_unlock_irqrestore(&host_set->lock, flags);\r
++\r
++ return IRQ_RETVAL(handled);\r
++}\r
++\r
++static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)\r
++{\r
++ struct ata_host_set *host_set = ap->host_set;\r
++ struct nv_host *host = host_set->private_data;\r
++\r
++ if (sc_reg > SCR_CONTROL)\r
++ return 0xffffffffU;\r
++\r
++ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
++ return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4));\r
++ else\r
++ return inl(ap->ioaddr.scr_addr + (sc_reg * 4));\r
++}\r
++\r
++static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)\r
++{\r
++ struct ata_host_set *host_set = ap->host_set;\r
++ struct nv_host *host = host_set->private_data;\r
++\r
++ if (sc_reg > SCR_CONTROL)\r
++ return;\r
++\r
++ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
++ writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4));\r
++ else\r
++ outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));\r
++}\r
++\r
++static void nv_host_stop (struct ata_host_set *host_set)\r
++{\r
++ struct nv_host *host = host_set->private_data;\r
++\r
++ // Disable hotplug event interrupts.\r
++ if (host->host_desc->disable_hotplug)\r
++ host->host_desc->disable_hotplug(host_set);\r
++\r
++ nv_sgpio_host_cleanup(host);\r
++ kfree(host);\r
++#ifdef RHAS3U7\r
++\r
++ ata_host_stop(host_set);\r
++#endif\r
++}\r
++\r
++static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)\r
++{\r
++ static int printed_version = 0;\r
++ struct nv_host *host;\r
++ struct ata_port_info *ppi;\r
++ struct ata_probe_ent *probe_ent;\r
++ int pci_dev_busy = 0;\r
++ int rc;\r
++ u32 bar;\r
++\r
++ // Make sure this is a SATA controller by counting the number of bars\r
++ // (NVIDIA SATA controllers will always have six bars). Otherwise,\r
++ // it's an IDE controller and we ignore it.\r
++ for (bar=0; bar<6; bar++)\r
++ if (pci_resource_start(pdev, bar) == 0)\r
++ return -ENODEV;\r
++\r
++ if (!printed_version++)\r
++ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");\r
++\r
++ rc = pci_enable_device(pdev);\r
++ if (rc)\r
++ goto err_out;\r
++\r
++ rc = pci_request_regions(pdev, DRV_NAME);\r
++ if (rc) {\r
++ pci_dev_busy = 1;\r
++ goto err_out_disable;\r
++ }\r
++\r
++ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);\r
++ if (rc)\r
++ goto err_out_regions;\r
++#ifndef RHAS3U7\r
++ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);\r
++ if (rc)\r
++ goto err_out_regions;\r
++#endif\r
++ rc = -ENOMEM;\r
++\r
++ ppi = &nv_port_info;\r
++\r
++ probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY|ATA_PORT_SECONDARY);\r
++\r
++ if (!probe_ent)\r
++ goto err_out_regions;\r
++\r
++ host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);\r
++ if (!host)\r
++ goto err_out_free_ent;\r
++\r
++ memset(host, 0, sizeof(struct nv_host));\r
++ host->host_desc = &nv_device_tbl[ent->driver_data];\r
++\r
++ probe_ent->private_data = host;\r
++\r
++ if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)\r
++ host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;\r
++\r
++ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {\r
++ unsigned long base;\r
++\r
++ probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),\r
++ pci_resource_len(pdev, 5));\r
++ if (probe_ent->mmio_base == NULL) {\r
++ rc = -EIO;\r
++ goto err_out_free_host;\r
++ }\r
++\r
++ base = (unsigned long)probe_ent->mmio_base;\r
++\r
++ probe_ent->port[0].scr_addr =\r
++ base + NV_PORT0_SCR_REG_OFFSET;\r
++ probe_ent->port[1].scr_addr =\r
++ base + NV_PORT1_SCR_REG_OFFSET;\r
++ } else {\r
++\r
++ probe_ent->port[0].scr_addr =\r
++ pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;\r
++ probe_ent->port[1].scr_addr =\r
++ pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;\r
++ }\r
++\r
++ pci_set_master(pdev);\r
++#ifdef RHAS3U7\r
++ ata_add_to_probe_list(probe_ent);\r
++ \r
++ if (nv_sgpio_capable(ent))\r
++ nv_sgpio_init(pdev, host);\r
++ // Enable hotplug event interrupts.\r
++ if (host->host_desc->enable_hotplug)\r
++ host->host_desc->enable_hotplug(probe_ent);\r
++\r
++ return 0;\r
++#else\r
++ rc = ata_device_add(probe_ent);\r
++ if (rc != NV_PORTS)\r
++ goto err_out_iounmap;\r
++ \r
++ if (nv_sgpio_capable(ent))\r
++ nv_sgpio_init(pdev, host);\r
++ // Enable hotplug event interrupts.\r
++ if (host->host_desc->enable_hotplug)\r
++ host->host_desc->enable_hotplug(probe_ent);\r
++\r
++ kfree(probe_ent);\r
++\r
++ return 0;\r
++\r
++err_out_iounmap:\r
++ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
++ iounmap(probe_ent->mmio_base);\r
++#endif\r
++err_out_free_host:\r
++ kfree(host);\r
++err_out_free_ent:\r
++ kfree(probe_ent);\r
++err_out_regions:\r
++ pci_release_regions(pdev);\r
++err_out_disable:\r
++ if (!pci_dev_busy)\r
++ pci_disable_device(pdev);\r
++err_out:\r
++ return rc;\r
++}\r
++\r
++\r
++static int nv_port_start(struct ata_port *ap)\r
++{\r
++ int stat;\r
++ struct nv_port *port;\r
++\r
++ stat = ata_port_start(ap);\r
++ if (stat) {\r
++ return stat;\r
++ }\r
++\r
++ port = kmalloc(sizeof(struct nv_port), GFP_KERNEL);\r
++ if (!port) \r
++ goto err_out_no_free;\r
++\r
++ memset(port, 0, sizeof(struct nv_port));\r
++\r
++ ap->private_data = port;\r
++ return 0;\r
++\r
++err_out_no_free:\r
++ return 1;\r
++}\r
++\r
++static void nv_port_stop(struct ata_port *ap)\r
++{\r
++ nv_sgpio_clear_all_leds(ap);\r
++\r
++ if (ap->private_data) {\r
++ kfree(ap->private_data);\r
++ ap->private_data = NULL;\r
++ }\r
++ ata_port_stop(ap);\r
++}\r
++\r
++static int nv_qc_issue(struct ata_queued_cmd *qc)\r
++{\r
++ struct nv_port *port = qc->ap->private_data;\r
++\r
++ if (port) \r
++ port->port_sgpio.activity.flags.recent_activity = 1;\r
++ return (ata_qc_issue_prot(qc));\r
++}\r
++\r
++\r
++\r
++\r
++static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)\r
++{\r
++ u8 intr_mask;\r
++\r
++ outb(NV_INT_STATUS_HOTPLUG,\r
++ probe_ent->port[0].scr_addr + NV_INT_STATUS);\r
++\r
++ intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);\r
++ intr_mask |= NV_INT_ENABLE_HOTPLUG;\r
++\r
++ outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);\r
++}\r
++\r
++static void nv_disable_hotplug(struct ata_host_set *host_set)\r
++{\r
++ u8 intr_mask;\r
++\r
++ intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);\r
++\r
++ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);\r
++\r
++ outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);\r
++}\r
++\r
++static void nv_check_hotplug(struct ata_host_set *host_set)\r
++{\r
++ u8 intr_status;\r
++\r
++ intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);\r
++\r
++ // Clear interrupt status.\r
++ outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);\r
++\r
++ if (intr_status & NV_INT_STATUS_HOTPLUG) {\r
++ if (intr_status & NV_INT_STATUS_PDEV_ADDED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device added\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_PDEV_REMOVED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device removed\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_SDEV_ADDED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device added\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_SDEV_REMOVED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device removed\n");\r
++ }\r
++}\r
++\r
++static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)\r
++{\r
++ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);\r
++ u8 intr_mask;\r
++ u8 regval;\r
++\r
++ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);\r
++ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
++ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
++\r
++ writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);\r
++\r
++ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);\r
++ intr_mask |= NV_INT_ENABLE_HOTPLUG;\r
++\r
++ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);\r
++}\r
++\r
++static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)\r
++{\r
++ struct pci_dev *pdev = to_pci_dev(host_set->dev);\r
++ u8 intr_mask;\r
++ u8 regval;\r
++\r
++ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);\r
++\r
++ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);\r
++\r
++ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);\r
++\r
++ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);\r
++ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
++ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
++}\r
++\r
++static void nv_check_hotplug_ck804(struct ata_host_set *host_set)\r
++{\r
++ u8 intr_status;\r
++\r
++ intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);\r
++\r
++ // Clear interrupt status.\r
++ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);\r
++\r
++ if (intr_status & NV_INT_STATUS_HOTPLUG) {\r
++ if (intr_status & NV_INT_STATUS_PDEV_ADDED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device added\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_PDEV_REMOVED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device removed\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_SDEV_ADDED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device added\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_SDEV_REMOVED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device removed\n");\r
++ }\r
++}\r
++static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent)\r
++{\r
++ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);\r
++ u8 intr_mask;\r
++ u8 regval;\r
++\r
++ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);\r
++ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
++ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
++\r
++ writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55);\r
++ writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55+2);\r
++\r
++ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55);\r
++ intr_mask |= 0x0c;\r
++ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55);\r
++\r
++ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);\r
++ intr_mask |= 0x0c;\r
++ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);\r
++}\r
++\r
++static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set)\r
++{\r
++ struct pci_dev *pdev = to_pci_dev(host_set->dev);\r
++ u8 intr_mask;\r
++ u8 regval;\r
++\r
++ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55);\r
++ intr_mask &= ~(0x0C);\r
++ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55);\r
++ \r
++ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55+2);\r
++ intr_mask &= ~(0x0C);\r
++ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55+2);\r
++\r
++ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);\r
++ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
++ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
++}\r
++\r
++static void nv_check_hotplug_mcp55(struct ata_host_set *host_set)\r
++{\r
++ u8 intr_status,intr_status1;\r
++\r
++ intr_status = readb(host_set->mmio_base + NV_INT_STATUS_MCP55);\r
++ intr_status1 = readb(host_set->mmio_base + NV_INT_STATUS_MCP55+2);\r
++\r
++ // Clear interrupt status.\r
++ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55);\r
++ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55+2); \r
++\r
++ if ((intr_status & 0x0c) || (intr_status1&0x0c)) {\r
++ if (intr_status & 0x04)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device added\n");\r
++\r
++ if (intr_status & 0x08)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device removed\n");\r
++\r
++ if (intr_status1 & 0x04)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device added\n");\r
++\r
++ if (intr_status1 & 0x08)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device removed\n");\r
++ }\r
++}\r
++\r
++\r
++static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost)\r
++{\r
++ u16 csr_add; \r
++ u32 cb_add, temp32;\r
++ struct device *dev = pci_dev_to_dev(pdev);\r
++ struct ata_host_set *host_set = dev_get_drvdata(dev);\r
++ u8 pro=0;\r
++ pci_read_config_word(pdev, NV_SGPIO_PCI_CSR_OFFSET, &csr_add);\r
++ pci_read_config_dword(pdev, NV_SGPIO_PCI_CB_OFFSET, &cb_add);\r
++ pci_read_config_byte(pdev, 0xA4, &pro);\r
++ \r
++ if (csr_add == 0 || cb_add == 0) \r
++ return;\r
++ \r
++\r
++ if (!(pro&0x40))\r
++ return; \r
++ \r
++ \r
++ temp32 = csr_add;\r
++ phost->host_sgpio.pcsr = (void *)temp32;\r
++ phost->host_sgpio.pcb = phys_to_virt(cb_add);\r
++\r
++ if (phost->host_sgpio.pcb->nvcr.bit.init_cnt!=0x2 || phost->host_sgpio.pcb->nvcr.bit.cbver!=0x0)\r
++ return;\r
++ \r
++ if (temp32 <=0x200 || temp32 >=0xFFFE )\r
++ return;\r
++ \r
++ \r
++ if (cb_add<=0x80000 || cb_add>=0x9FC00)\r
++ return;\r
++ \r
++ \r
++ if (phost->host_sgpio.pcb->scratch_space == 0) {\r
++ spin_lock_init(&nv_sgpio_lock);\r
++ phost->host_sgpio.share.plock = &nv_sgpio_lock;\r
++ phost->host_sgpio.share.ptstamp = &nv_sgpio_tstamp;\r
++ phost->host_sgpio.pcb->scratch_space = \r
++ (unsigned long)&phost->host_sgpio.share;\r
++ spin_lock(phost->host_sgpio.share.plock);\r
++ nv_sgpio_reset(phost->host_sgpio.pcsr);\r
++ phost->host_sgpio.pcb->cr0 = \r
++ SET_ENABLE(phost->host_sgpio.pcb->cr0);\r
++\r
++ spin_unlock(phost->host_sgpio.share.plock);\r
++ }\r
++\r
++ phost->host_sgpio.share = \r
++ *(struct nv_sgpio_host_share *)(unsigned long)\r
++ phost->host_sgpio.pcb->scratch_space;\r
++ phost->host_sgpio.flags.sgpio_enabled = 1;\r
++ phost->pdev = pdev;\r
++ init_timer(&phost->host_sgpio.sgpio_timer);\r
++ phost->host_sgpio.sgpio_timer.data = (unsigned long)phost;\r
++ nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer, \r
++ NV_SGPIO_UPDATE_TICK);\r
++}\r
++\r
++static void __nv_sgpio_timer_handler(unsigned long context);\r
++static void nv_sgpio_set_timer(struct timer_list *ptimer, unsigned int timeout_msec)\r
++{\r
++ if (!ptimer)\r
++ return;\r
++ ptimer->function = __nv_sgpio_timer_handler;\r
++ ptimer->expires = msecs_to_jiffies(timeout_msec) + jiffies;\r
++ add_timer(ptimer);\r
++}\r
++static void __nv_sgpio_timer_handler(unsigned long context)\r
++{\r
++ struct nv_host *phost = (struct nv_host*)context;\r
++ struct device *dev = pci_dev_to_dev(phost->pdev);\r
++ struct ata_host_set *host_set = dev_get_drvdata(dev);\r
++ \r
++ if (!host_set)\r
++ nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer, \r
++ NV_SGPIO_UPDATE_TICK);\r
++ else\r
++ nv_sgpio_timer_handler(host_set);\r
++ \r
++}\r
++\r
++static void nv_sgpio_timer_handler(unsigned long context)\r
++{\r
++\r
++ struct ata_host_set *host_set = (struct ata_host_set *)context;\r
++ struct nv_host *host;\r
++ u8 count, host_offset, port_offset;\r
++ union nv_sgpio_tx tx;\r
++ bool on_off;\r
++ unsigned long mask = 0xFFFF;\r
++ struct nv_port *port;\r
++\r
++ if (!host_set)\r
++ goto err_out;\r
++ else \r
++ host = (struct nv_host *)host_set->private_data;\r
++\r
++ if (!host->host_sgpio.flags.sgpio_enabled)\r
++ goto err_out;\r
++\r
++ host_offset = nv_sgpio_tx_host_offset(host_set);\r
++\r
++ spin_lock(host->host_sgpio.share.plock);\r
++ tx = host->host_sgpio.pcb->tx[host_offset];\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++\r
++ for (count = 0; count < host_set->n_ports; count++) {\r
++ struct ata_port *ap; \r
++\r
++ ap = host_set->ports[count];\r
++ \r
++ if (!(ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)))\r
++ continue;\r
++\r
++ port = (struct nv_port *)ap->private_data;\r
++ if (!port)\r
++ continue; \r
++ port_offset = nv_sgpio_tx_port_offset(ap);\r
++ on_off = GET_ACTIVITY(tx.tx_port[port_offset]);\r
++ if (nv_sgpio_update_led(&port->port_sgpio.activity, &on_off)) {\r
++ tx.tx_port[port_offset] = \r
++ SET_ACTIVITY(tx.tx_port[port_offset], on_off);\r
++ host->host_sgpio.flags.need_update = 1;\r
++ }\r
++ }\r
++\r
++\r
++ if (host->host_sgpio.flags.need_update) {\r
++ spin_lock(host->host_sgpio.share.plock); \r
++ if (nv_sgpio_get_func(host_set) \r
++ % NV_CNTRLR_SHARE_INIT == 0) {\r
++ host->host_sgpio.pcb->tx[host_offset].all &= mask;\r
++ mask = mask << 16;\r
++ tx.all &= mask;\r
++ } else {\r
++ tx.all &= mask;\r
++ mask = mask << 16;\r
++ host->host_sgpio.pcb->tx[host_offset].all &= mask;\r
++ }\r
++ host->host_sgpio.pcb->tx[host_offset].all |= tx.all;\r
++ spin_unlock(host->host_sgpio.share.plock); \r
++ \r
++ if (nv_sgpio_send_cmd(host, NV_SGPIO_CMD_WRITE_DATA)) { \r
++ host->host_sgpio.flags.need_update = 0;\r
++ return;\r
++ }\r
++ } else {\r
++ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
++ NV_SGPIO_UPDATE_TICK);\r
++ }\r
++err_out:\r
++ return;\r
++}\r
++\r
++static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd)\r
++{\r
++ u8 csr;\r
++ unsigned long *ptstamp;\r
++\r
++ spin_lock(host->host_sgpio.share.plock); \r
++ ptstamp = host->host_sgpio.share.ptstamp;\r
++ if (jiffies_to_msecs1(jiffies - *ptstamp) >= NV_SGPIO_MIN_UPDATE_DELTA) {\r
++ csr = \r
++ nv_sgpio_get_csr((unsigned long)host->host_sgpio.pcsr);\r
++ if ((GET_SGPIO_STATUS(csr) != NV_SGPIO_STATE_OPERATIONAL) ||\r
++ (GET_CMD_STATUS(csr) == NV_SGPIO_CMD_ACTIVE)) {\r
++ //nv_sgpio_reset(host->host_sgpio.pcsr);\r
++ } else {\r
++ host->host_sgpio.pcb->cr0 = \r
++ SET_ENABLE(host->host_sgpio.pcb->cr0);\r
++ csr = 0;\r
++ csr = SET_CMD(csr, cmd);\r
++ nv_sgpio_set_csr(csr, \r
++ (unsigned long)host->host_sgpio.pcsr);\r
++ *ptstamp = jiffies;\r
++ }\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
++ NV_SGPIO_UPDATE_TICK);\r
++ return 1;\r
++ } else {\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
++ (NV_SGPIO_MIN_UPDATE_DELTA - \r
++ jiffies_to_msecs1(jiffies - *ptstamp)));\r
++ return 0;\r
++ }\r
++}\r
++\r
++static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off)\r
++{\r
++ bool need_update = 0;\r
++\r
++ if (led->force_off > 0) {\r
++ led->force_off--;\r
++ } else if (led->flags.recent_activity ^ led->flags.last_state) {\r
++ *on_off = led->flags.recent_activity;\r
++ led->flags.last_state = led->flags.recent_activity;\r
++ need_update = 1;\r
++ } else if ((led->flags.recent_activity & led->flags.last_state) &&\r
++ (led->last_cons_active >= NV_SGPIO_MAX_ACTIVITY_ON)) {\r
++ *on_off = NV_OFF;\r
++ led->flags.last_state = NV_OFF;\r
++ led->force_off = NV_SGPIO_MIN_FORCE_OFF;\r
++ need_update = 1;\r
++ }\r
++\r
++ if (*on_off) \r
++ led->last_cons_active++; \r
++ else\r
++ led->last_cons_active = 0;\r
++\r
++ led->flags.recent_activity = 0;\r
++ return need_update;\r
++}\r
++\r
++static void nv_sgpio_reset(u8 *pcsr)\r
++{\r
++ u8 csr;\r
++\r
++ csr = nv_sgpio_get_csr((unsigned long)pcsr);\r
++ if (GET_SGPIO_STATUS(csr) == NV_SGPIO_STATE_RESET) {\r
++ csr = 0;\r
++ csr = SET_CMD(csr, NV_SGPIO_CMD_RESET);\r
++ nv_sgpio_set_csr(csr, (unsigned long)pcsr);\r
++ }\r
++ csr = 0;\r
++ csr = SET_CMD(csr, NV_SGPIO_CMD_READ_PARAMS);\r
++ nv_sgpio_set_csr(csr, (unsigned long)pcsr);\r
++}\r
++\r
++static void nv_sgpio_host_cleanup(struct nv_host *host)\r
++{\r
++ u8 csr;\r
++ if (!host)\r
++ return;\r
++\r
++ if (host->host_sgpio.flags.sgpio_enabled){\r
++ spin_lock(host->host_sgpio.share.plock);\r
++ host->host_sgpio.pcb->cr0 = \r
++ SET_ENABLE(host->host_sgpio.pcb->cr0);\r
++ csr = 0;\r
++ csr = SET_CMD(csr, NV_SGPIO_CMD_WRITE_DATA);\r
++ nv_sgpio_set_csr(csr, \r
++ (unsigned long)host->host_sgpio.pcsr);\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++ \r
++ if (timer_pending(&host->host_sgpio.sgpio_timer))\r
++ del_timer(&host->host_sgpio.sgpio_timer);\r
++ host->host_sgpio.flags.sgpio_enabled = 0;\r
++ host->host_sgpio.pcb->scratch_space = 0;\r
++ }\r
++ \r
++}\r
++\r
++static void nv_sgpio_clear_all_leds(struct ata_port *ap)\r
++{\r
++ struct nv_port *port = ap->private_data;\r
++ struct nv_host *host;\r
++ u8 host_offset, port_offset;\r
++\r
++ if (!port || !ap->host_set)\r
++ return;\r
++ if (!ap->host_set->private_data)\r
++ return;\r
++\r
++ host = ap->host_set->private_data;\r
++ if (!host->host_sgpio.flags.sgpio_enabled)\r
++ return;\r
++\r
++ host_offset = nv_sgpio_tx_host_offset(ap->host_set);\r
++ port_offset = nv_sgpio_tx_port_offset(ap);\r
++\r
++ spin_lock(host->host_sgpio.share.plock);\r
++ host->host_sgpio.pcb->tx[host_offset].tx_port[port_offset] = 0;\r
++ host->host_sgpio.flags.need_update = 1;\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++}\r
++\r
++\r
++\r
++static int __init nv_init(void)\r
++{\r
++#ifdef RHAS3U7\r
++ int rc;\r
++ rc = pci_module_init(&nv_pci_driver);\r
++ if (rc)\r
++ return rc;\r
++ \r
++ rc = scsi_register_module(MODULE_SCSI_HA, &nv_sht);\r
++ if (rc) {\r
++ pci_unregister_driver(&nv_pci_driver);\r
++ /* TODO: does scsi_register_module return errno val? */\r
++ return -ENODEV;\r
++ }\r
++\r
++ return 0;\r
++#else\r
++ return pci_module_init(&nv_pci_driver);\r
++#endif\r
++}\r
++\r
++static void __exit nv_exit(void)\r
++{\r
++#ifdef RHAS3U7\r
++ scsi_unregister_module(MODULE_SCSI_HA, &nv_sht);\r
++#endif\r
++ pci_unregister_driver(&nv_pci_driver);\r
++\r
++}\r
++\r
++module_init(nv_init);\r
++module_exit(nv_exit);\r
+++ /dev/null
- include/linux/netfilter_ipv4/ipt_ipp2p.h | 31 +
- net/ipv4/netfilter/Kconfig | 10
- net/ipv4/netfilter/Makefile | 1
- net/ipv4/netfilter/ipt_ipp2p.c | 863 +++++++++++++++++++++++++++++++
- 4 files changed, 905 insertions(+)
-
-diff -Nur --exclude '*.orig' linux.org/include/linux/netfilter_ipv4/ipt_ipp2p.h linux/include/linux/netfilter_ipv4/ipt_ipp2p.h
---- linux.org/include/linux/netfilter_ipv4/ipt_ipp2p.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux/include/linux/netfilter_ipv4/ipt_ipp2p.h 2006-05-04 11:24:36.000000000 +0200
-@@ -0,0 +1,31 @@
-+#ifndef __IPT_IPP2P_H
-+#define __IPT_IPP2P_H
-+#define IPP2P_VERSION "0.8.1_rc1"
-+
-+struct ipt_p2p_info {
-+ int cmd;
-+ int debug;
-+};
-+
-+#endif //__IPT_IPP2P_H
-+
-+#define SHORT_HAND_IPP2P 1 /* --ipp2p switch*/
-+//#define SHORT_HAND_DATA 4 /* --ipp2p-data switch*/
-+#define SHORT_HAND_NONE 5 /* no short hand*/
-+
-+#define IPP2P_EDK (1 << 1)
-+#define IPP2P_DATA_KAZAA (1 << 2)
-+#define IPP2P_DATA_EDK (1 << 3)
-+#define IPP2P_DATA_DC (1 << 4)
-+#define IPP2P_DC (1 << 5)
-+#define IPP2P_DATA_GNU (1 << 6)
-+#define IPP2P_GNU (1 << 7)
-+#define IPP2P_KAZAA (1 << 8)
-+#define IPP2P_BIT (1 << 9)
-+#define IPP2P_APPLE (1 << 10)
-+#define IPP2P_SOUL (1 << 11)
-+#define IPP2P_WINMX (1 << 12)
-+#define IPP2P_ARES (1 << 13)
-+#define IPP2P_MUTE (1 << 14)
-+#define IPP2P_WASTE (1 << 15)
-+#define IPP2P_XDCC (1 << 16)
-diff -Nur --exclude '*.orig' linux.org/net/ipv4/netfilter/Kconfig linux/net/ipv4/netfilter/Kconfig
---- linux.org/net/ipv4/netfilter/Kconfig 2006-05-02 23:38:44.000000000 +0200
-+++ linux/net/ipv4/netfilter/Kconfig 2006-05-04 11:24:36.000000000 +0200
-@@ -606,5 +606,15 @@
- Allows altering the ARP packet payload: source and destination
- hardware and network addresses.
-
-+config IP_NF_MATCH_IPP2P
-+ tristate 'IPP2P match support'
-+ depends on IP_NF_IPTABLES
-+ help
-+ This option makes possible to match some P2P packets
-+ therefore helps controlling such traffic.
-+
-+ If you want to compile it as a module, say M here and read
-+ <file:Documentation/modules.txt>. If unsure, say `N'.
-+
- endmenu
-
-diff -Nur --exclude '*.orig' linux.org/net/ipv4/netfilter/Makefile linux/net/ipv4/netfilter/Makefile
---- linux.org/net/ipv4/netfilter/Makefile 2006-05-02 23:38:44.000000000 +0200
-+++ linux/net/ipv4/netfilter/Makefile 2006-05-04 11:24:36.000000000 +0200
-@@ -0,0 +0,1 @@
-+obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
-diff -Nur --exclude '*.orig' linux.org/net/ipv4/netfilter/ipt_ipp2p.c linux/net/ipv4/netfilter/ipt_ipp2p.c
---- linux.org/net/ipv4/netfilter/ipt_ipp2p.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux/net/ipv4/netfilter/ipt_ipp2p.c 2006-05-04 11:24:36.000000000 +0200
-@@ -0,0 +1,863 @@
-+#if defined(MODVERSIONS)
-+#include <linux/modversions.h>
-+#endif
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ipt_ipp2p.h>
-+#include <net/tcp.h>
-+#include <net/udp.h>
-+
-+#define get_u8(X,O) (*(__u8 *)(X + O))
-+#define get_u16(X,O) (*(__u16 *)(X + O))
-+#define get_u32(X,O) (*(__u32 *)(X + O))
-+
-+MODULE_AUTHOR("Eicke Friedrich/Klaus Degner <ipp2p@ipp2p.org>");
-+MODULE_DESCRIPTION("An extension to iptables to identify P2P traffic.");
-+MODULE_LICENSE("GPL");
-+
-+
-+/*Search for UDP eDonkey/eMule/Kad commands*/
-+int
-+udp_search_edk (unsigned char *haystack, int packet_len)
-+{
-+ unsigned char *t = haystack;
-+ t += 8;
-+
-+ switch (t[0]) {
-+ case 0xe3:
-+ { /*edonkey*/
-+ switch (t[1])
-+ {
-+ /* client -> server status request */
-+ case 0x96:
-+ if (packet_len == 14) return ((IPP2P_EDK * 100) + 50);
-+ break;
-+ /* server -> client status request */
-+ case 0x97: if (packet_len == 42) return ((IPP2P_EDK * 100) + 51);
-+ break;
-+ /* server description request */
-+ /* e3 2a ff f0 .. | size == 6 */
-+ case 0xa2: if ( (packet_len == 14) && ( get_u16(t,2) == __constant_htons(0xfff0) ) ) return ((IPP2P_EDK * 100) + 52);
-+ break;
-+ /* server description response */
-+ /* e3 a3 ff f0 .. | size > 40 && size < 200 */
-+ //case 0xa3: return ((IPP2P_EDK * 100) + 53);
-+ // break;
-+ case 0x9a: if (packet_len==26) return ((IPP2P_EDK * 100) + 54);
-+ break;
-+
-+ case 0x92: if (packet_len==18) return ((IPP2P_EDK * 100) + 55);
-+ break;
-+ }
-+ break;
-+ }
-+ case 0xe4:
-+ {
-+ switch (t[1])
-+ {
-+ /* e4 20 .. | size == 43 */
-+ case 0x20: if ((packet_len == 43) && (t[2] != 0x00) && (t[34] != 0x00)) return ((IPP2P_EDK * 100) + 60);
-+ break;
-+ /* e4 00 .. 00 | size == 35 ? */
-+ case 0x00: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 61);
-+ break;
-+ /* e4 10 .. 00 | size == 35 ? */
-+ case 0x10: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 62);
-+ break;
-+ /* e4 18 .. 00 | size == 35 ? */
-+ case 0x18: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 63);
-+ break;
-+ /* e4 52 .. | size = 44 */
-+ case 0x52: if (packet_len == 44 ) return ((IPP2P_EDK * 100) + 64);
-+ break;
-+ /* e4 58 .. | size == 6 */
-+ case 0x58: if (packet_len == 14 ) return ((IPP2P_EDK * 100) + 65);
-+ break;
-+ /* e4 59 .. | size == 2 */
-+ case 0x59: if (packet_len == 10 )return ((IPP2P_EDK * 100) + 66);
-+ break;
-+ /* e4 28 .. | packet_len == 52,77,102,127... */
-+ case 0x28: if (((packet_len-52) % 25) == 0) return ((IPP2P_EDK * 100) + 67);
-+ break;
-+ /* e4 50 xx xx | size == 4 */
-+ case 0x50: if (packet_len == 12) return ((IPP2P_EDK * 100) + 68);
-+ break;
-+ /* e4 40 xx xx | size == 48 */
-+ case 0x40: if (packet_len == 56) return ((IPP2P_EDK * 100) + 69);
-+ break;
-+ }
-+ break;
-+ }
-+ } /* end of switch (t[0]) */
-+ return 0;
-+}/*udp_search_edk*/
-+
-+
-+/*Search for UDP Gnutella commands*/
-+int
-+udp_search_gnu (unsigned char *haystack, int packet_len)
-+{
-+ unsigned char *t = haystack;
-+ t += 8;
-+
-+ if (memcmp(t, "GND", 3) == 0) return ((IPP2P_GNU * 100) + 51);
-+ if (memcmp(t, "GNUTELLA ", 9) == 0) return ((IPP2P_GNU * 100) + 52);
-+ return 0;
-+}/*udp_search_gnu*/
-+
-+
-+/*Search for UDP KaZaA commands*/
-+int
-+udp_search_kazaa (unsigned char *haystack, int packet_len)
-+{
-+ unsigned char *t = haystack;
-+
-+ if (t[packet_len-1] == 0x00){
-+ t += (packet_len - 6);
-+ if (memcmp(t, "KaZaA", 5) == 0) return (IPP2P_KAZAA * 100 +50);
-+ }
-+
-+ return 0;
-+}/*udp_search_kazaa*/
-+
-+/*Search for UDP DirectConnect commands*/
-+int
-+udp_search_directconnect (unsigned char *haystack, int packet_len)
-+{
-+ unsigned char *t = haystack;
-+ if ((*(t + 8) == 0x24) && (*(t + packet_len - 1) == 0x7c)) {
-+ t+=8;
-+ if (memcmp(t, "SR ", 3) == 0) return ((IPP2P_DC * 100) + 60);
-+ if (memcmp(t, "Ping ", 5) == 0) return ((IPP2P_DC * 100) + 61);
-+ }
-+ return 0;
-+}/*udp_search_directconnect*/
-+
-+
-+
-+/*Search for UDP BitTorrent commands*/
-+int
-+udp_search_bit (unsigned char *haystack, int packet_len)
-+{
-+ switch(packet_len)
-+ {
-+ case 24:
-+ /* ^ 00 00 04 17 27 10 19 80 */
-+ if ((ntohl(get_u32(haystack, 8)) == 0x00000417) && (ntohl(get_u32(haystack, 12)) == 0x27101980))
-+ return (IPP2P_BIT * 100 + 50);
-+ break;
-+ case 44:
-+ if (get_u32(haystack, 16) == __constant_htonl(0x00000400) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
-+ return (IPP2P_BIT * 100 + 51);
-+ if (get_u32(haystack, 16) == __constant_htonl(0x00000400))
-+ return (IPP2P_BIT * 100 + 61);
-+ break;
-+ case 65:
-+ if (get_u32(haystack, 16) == __constant_htonl(0x00000404) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
-+ return (IPP2P_BIT * 100 + 52);
-+ if (get_u32(haystack, 16) == __constant_htonl(0x00000404))
-+ return (IPP2P_BIT * 100 + 62);
-+ break;
-+ case 67:
-+ if (get_u32(haystack, 16) == __constant_htonl(0x00000406) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
-+ return (IPP2P_BIT * 100 + 53);
-+ if (get_u32(haystack, 16) == __constant_htonl(0x00000406))
-+ return (IPP2P_BIT * 100 + 63);
-+ break;
-+ case 211:
-+ if (get_u32(haystack, 8) == __constant_htonl(0x00000405))
-+ return (IPP2P_BIT * 100 + 54);
-+ break;
-+ case 29:
-+ if ((get_u32(haystack, 8) == __constant_htonl(0x00000401)))
-+ return (IPP2P_BIT * 100 + 55);
-+ break;
-+ case 52:
-+ if (get_u32(haystack,8) == __constant_htonl(0x00000827) &&
-+ get_u32(haystack,12) == __constant_htonl(0x37502950))
-+ return (IPP2P_BIT * 100 + 80);
-+ break;
-+ default:
-+ /* this packet does not have a constant size */
-+ if (packet_len >= 40 && get_u32(haystack, 16) == __constant_htonl(0x00000402) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
-+ return (IPP2P_BIT * 100 + 56);
-+ break;
-+ }
-+
-+ /* some extra-bitcomet rules:
-+ * "d1:" [a|r] "d2:id20:"
-+ */
-+ if (packet_len > 30 && get_u8(haystack, 8) == 'd' && get_u8(haystack, 9) == '1' && get_u8(haystack, 10) == ':' )
-+ {
-+ if (get_u8(haystack, 11) == 'a' || get_u8(haystack, 11) == 'r')
-+ {
-+ if (memcmp(haystack+12,"d2:id20:",8)==0)
-+ return (IPP2P_BIT * 100 + 57);
-+ }
-+ }
-+
-+#if 0
-+ /* bitlord rules */
-+ /* packetlen must be bigger than 40 */
-+ /* first 4 bytes are zero */
-+ if (packet_len > 40 && get_u32(haystack, 8) == 0x00000000)
-+ {
-+ /* first rule: 00 00 00 00 01 00 00 xx xx xx xx 00 00 00 00*/
-+ if (get_u32(haystack, 12) == 0x00000000 &&
-+ get_u32(haystack, 16) == 0x00010000 &&
-+ get_u32(haystack, 24) == 0x00000000 )
-+ return (IPP2P_BIT * 100 + 71);
-+
-+ /* 00 01 00 00 0d 00 00 xx xx xx xx 00 00 00 00*/
-+ if (get_u32(haystack, 12) == 0x00000001 &&
-+ get_u32(haystack, 16) == 0x000d0000 &&
-+ get_u32(haystack, 24) == 0x00000000 )
-+ return (IPP2P_BIT * 100 + 71);
-+
-+
-+ }
-+#endif
-+
-+ return 0;
-+}/*udp_search_bit*/
-+
-+
-+
-+/*Search for Ares commands*/
-+//#define IPP2P_DEBUG_ARES
-+int
-+search_ares (const unsigned char *payload, const u16 plen)
-+//int search_ares (unsigned char *haystack, int packet_len, int head_len)
-+{
-+// const unsigned char *t = haystack + head_len;
-+
-+ /* all ares packets start with */
-+ if (payload[1] == 0 && (plen - payload[0]) == 3)
-+ {
-+ switch (payload[2])
-+ {
-+ case 0x5a:
-+ /* ares connect */
-+ if ( plen == 6 && payload[5] == 0x05 ) return ((IPP2P_ARES * 100) + 1);
-+ break;
-+ case 0x09:
-+ /* ares search, min 3 chars --> 14 bytes
-+ * lets define a search can be up to 30 chars --> max 34 bytes
-+ */
-+ if ( plen >= 14 && plen <= 34 ) return ((IPP2P_ARES * 100) + 1);
-+ break;
-+#ifdef IPP2P_DEBUG_ARES
-+ default:
-+ printk(KERN_DEBUG "Unknown Ares command %x recognized, len: %u \n", (unsigned int) payload[2],plen);
-+#endif /* IPP2P_DEBUG_ARES */
-+ }
-+ }
-+
-+#if 0
-+ /* found connect packet: 03 00 5a 04 03 05 */
-+ /* new version ares 1.8: 03 00 5a xx xx 05 */
-+ if ((plen) == 6){ /* possible connect command*/
-+ if ((payload[0] == 0x03) && (payload[1] == 0x00) && (payload[2] == 0x5a) && (payload[5] == 0x05))
-+ return ((IPP2P_ARES * 100) + 1);
-+ }
-+ if ((plen) == 60){ /* possible download command*/
-+ if ((payload[59] == 0x0a) && (payload[58] == 0x0a)){
-+ if (memcmp(t, "PUSH SHA1:", 10) == 0) /* found download command */
-+ return ((IPP2P_ARES * 100) + 2);
-+ }
-+ }
-+#endif
-+
-+ return 0;
-+} /*search_ares*/
-+
-+/*Search for SoulSeek commands*/
-+int
-+search_soul (const unsigned char *payload, const u16 plen)
-+{
-+//#define IPP2P_DEBUG_SOUL
-+ /* match: xx xx xx xx | xx = sizeof(payload) - 4 */
-+ if (get_u32(payload, 0) == (plen - 4)){
-+ const __u32 m=get_u32(payload, 4);
-+ /* match 00 yy yy 00, yy can be everything */
-+ if ( get_u8(payload, 4) == 0x00 && get_u8(payload, 7) == 0x00 )
-+ {
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "0: Soulseek command 0x%x recognized\n",get_u32(payload, 4));
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 1);
-+ }
-+
-+ /* next match: 01 yy 00 00 | yy can be everything */
-+ if ( get_u8(payload, 4) == 0x01 && get_u16(payload, 6) == 0x0000 )
-+ {
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "1: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 2);
-+ }
-+
-+ /* other soulseek commandos are: 1-5,7,9,13-18,22,23,26,28,35-37,40-46,50,51,60,62-69,91,92,1001 */
-+ /* try to do this in an intelligent way */
-+ /* get all small commandos */
-+ switch(m)
-+ {
-+ case 7:
-+ case 9:
-+ case 22:
-+ case 23:
-+ case 26:
-+ case 28:
-+ case 50:
-+ case 51:
-+ case 60:
-+ case 91:
-+ case 92:
-+ case 1001:
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "2: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 3);
-+ }
-+
-+ if (m > 0 && m < 6 )
-+ {
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "3: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 4);
-+ }
-+ if (m > 12 && m < 19 )
-+ {
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "4: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 5);
-+ }
-+
-+ if (m > 34 && m < 38 )
-+ {
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "5: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 6);
-+ }
-+
-+ if (m > 39 && m < 47 )
-+ {
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "6: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 7);
-+ }
-+
-+ if (m > 61 && m < 70 )
-+ {
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "7: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 8);
-+ }
-+
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "unknown SOULSEEK command: 0x%x, first 16 bit: 0x%x, first 8 bit: 0x%x ,soulseek ???\n",get_u32(payload, 4),get_u16(payload, 4) >> 16,get_u8(payload, 4) >> 24);
-+#endif /* IPP2P_DEBUG_SOUL */
-+ }
-+
-+ /* match 14 00 00 00 01 yy 00 00 00 STRING(YY) 01 00 00 00 00 46|50 00 00 00 00 */
-+ /* without size at the beginning !!! */
-+ if ( get_u32(payload, 0) == 0x14 && get_u8(payload, 4) == 0x01 )
-+ {
-+ __u32 y=get_u32(payload, 5);
-+ /* we need 19 chars + string */
-+ if ( (y + 19) <= (plen) )
-+ {
-+ const unsigned char *w=payload+9+y;
-+ if (get_u32(w, 0) == 0x01 && ( get_u16(w, 4) == 0x4600 || get_u16(w, 4) == 0x5000) && get_u32(w, 6) == 0x00);
-+#ifdef IPP2P_DEBUG_SOUL
-+ printk(KERN_DEBUG "Soulssek special client command recognized\n");
-+#endif /* IPP2P_DEBUG_SOUL */
-+ return ((IPP2P_SOUL * 100) + 9);
-+ }
-+ }
-+ return 0;
-+}
-+
-+
-+/*Search for WinMX commands*/
-+int
-+search_winmx (const unsigned char *payload, const u16 plen)
-+{
-+//#define IPP2P_DEBUG_WINMX
-+ if (((plen) == 4) && (memcmp(payload, "SEND", 4) == 0)) return ((IPP2P_WINMX * 100) + 1);
-+ if (((plen) == 3) && (memcmp(payload, "GET", 3) == 0)) return ((IPP2P_WINMX * 100) + 2);
-+ //if (packet_len < (head_len + 10)) return 0;
-+ if (plen < 10) return 0;
-+
-+ if ((memcmp(payload, "SEND", 4) == 0) || (memcmp(payload, "GET", 3) == 0)){
-+ u16 c=4;
-+ const u16 end=plen-2;
-+ u8 count=0;
-+ while (c < end)
-+ {
-+ if (payload[c]== 0x20 && payload[c+1] == 0x22)
-+ {
-+ c++;
-+ count++;
-+ if (count>=2) return ((IPP2P_WINMX * 100) + 3);
-+ }
-+ c++;
-+ }
-+ }
-+
-+ if ( plen == 149 && payload[0] == '8' )
-+ {
-+#ifdef IPP2P_DEBUG_WINMX
-+ printk(KERN_INFO "maybe WinMX\n");
-+#endif
-+ if (get_u32(payload,17) == 0 && get_u32(payload,21) == 0 && get_u32(payload,25) == 0 &&
-+// get_u32(payload,33) == __constant_htonl(0x71182b1a) && get_u32(payload,37) == __constant_htonl(0x05050000) &&
-+// get_u32(payload,133) == __constant_htonl(0x31097edf) && get_u32(payload,145) == __constant_htonl(0xdcb8f792))
-+ get_u16(payload,39) == 0 && get_u16(payload,135) == __constant_htons(0x7edf) && get_u16(payload,147) == __constant_htons(0xf792))
-+
-+ {
-+#ifdef IPP2P_DEBUG_WINMX
-+ printk(KERN_INFO "got WinMX\n");
-+#endif
-+ return ((IPP2P_WINMX * 100) + 4);
-+ }
-+ }
-+ return 0;
-+} /*search_winmx*/
-+
-+
-+/*Search for appleJuice commands*/
-+int
-+search_apple (const unsigned char *payload, const u16 plen)
-+{
-+ if ( (plen > 7) && (payload[6] == 0x0d) && (payload[7] == 0x0a) && (memcmp(payload, "ajprot", 6) == 0)) return (IPP2P_APPLE * 100);
-+
-+ return 0;
-+}
-+
-+
-+/*Search for BitTorrent commands*/
-+int
-+search_bittorrent (const unsigned char *payload, const u16 plen)
-+{
-+ if (plen > 20)
-+ {
-+ /* test for match 0x13+"BitTorrent protocol" */
-+ if (payload[0] == 0x13)
-+ {
-+ if (memcmp(payload+1, "BitTorrent protocol", 19) == 0) return (IPP2P_BIT * 100);
-+ }
-+
-+ /* get tracker commandos, all starts with GET /
-+ * then it can follow: scrape| announce
-+ * and then ?hash_info=
-+ */
-+ if (memcmp(payload,"GET /",5) == 0)
-+ {
-+ /* message scrape */
-+ if ( memcmp(payload+5,"scrape?info_hash=",17)==0 ) return (IPP2P_BIT * 100 + 1);
-+ /* message announce */
-+ if ( memcmp(payload+5,"announce?info_hash=",19)==0 ) return (IPP2P_BIT * 100 + 2);
-+ }
-+ }
-+ else
-+ {
-+ /* bitcomet encryptes the first packet, so we have to detect another
-+ * one later in the flow */
-+ /* first try failed, too many missdetections */
-+ //if ( size == 5 && get_u32(t,0) == __constant_htonl(1) && t[4] < 3) return (IPP2P_BIT * 100 + 3);
-+
-+ /* second try: block request packets */
-+ if ( plen == 17 && get_u32(payload,0) == __constant_htonl(0x0d) && payload[4] == 0x06 && get_u32(payload,13) == __constant_htonl(0x4000) ) return (IPP2P_BIT * 100 + 3);
-+ }
-+
-+ return 0;
-+}
-+
-+
-+
-+/*check for Kazaa get command*/
-+int
-+search_kazaa (const unsigned char *payload, const u16 plen)
-+
-+{
-+ if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a) && memcmp(payload, "GET /.hash=", 11) == 0)
-+ return (IPP2P_DATA_KAZAA * 100);
-+
-+ return 0;
-+}
-+
-+
-+/*check for gnutella get command*/
-+int
-+search_gnu (const unsigned char *payload, const u16 plen)
-+{
-+ if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
-+ {
-+ if (memcmp(payload, "GET /get/", 9) == 0) return ((IPP2P_DATA_GNU * 100) + 1);
-+ if (memcmp(payload, "GET /uri-res/", 13) == 0) return ((IPP2P_DATA_GNU * 100) + 2);
-+ }
-+ return 0;
-+}
-+
-+
-+/*check for gnutella get commands and other typical data*/
-+int
-+search_all_gnu (const unsigned char *payload, const u16 plen)
-+{
-+
-+ if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
-+ {
-+
-+ if (memcmp(payload, "GNUTELLA CONNECT/", 17) == 0) return ((IPP2P_GNU * 100) + 1);
-+ if (memcmp(payload, "GNUTELLA/", 9) == 0) return ((IPP2P_GNU * 100) + 2);
-+
-+
-+ if ((memcmp(payload, "GET /get/", 9) == 0) || (memcmp(payload, "GET /uri-res/", 13) == 0))
-+ {
-+ u16 c=8;
-+ const u16 end=plen-22;
-+ while (c < end) {
-+ if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Gnutella-", 11) == 0) || (memcmp(&payload[c+2], "X-Queue:", 8) == 0)))
-+ return ((IPP2P_GNU * 100) + 3);
-+ c++;
-+ }
-+ }
-+ }
-+ return 0;
-+}
-+
-+
-+/*check for KaZaA download commands and other typical data*/
-+int
-+search_all_kazaa (const unsigned char *payload, const u16 plen)
-+{
-+ if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
-+ {
-+
-+ if (memcmp(payload, "GIVE ", 5) == 0) return ((IPP2P_KAZAA * 100) + 1);
-+
-+ if (memcmp(payload, "GET /", 5) == 0) {
-+ u16 c = 8;
-+ const u16 end=plen-22;
-+ while (c < end) {
-+ if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Kazaa-Username: ", 18) == 0) || (memcmp(&payload[c+2], "User-Agent: PeerEnabler/", 24) == 0)))
-+ return ((IPP2P_KAZAA * 100) + 2);
-+ c++;
-+ }
-+ }
-+ }
-+ return 0;
-+}
-+
-+/*fast check for edonkey file segment transfer command*/
-+int
-+search_edk (const unsigned char *payload, const u16 plen)
-+{
-+ if (payload[0] != 0xe3)
-+ return 0;
-+ else {
-+ if (payload[5] == 0x47)
-+ return (IPP2P_DATA_EDK * 100);
-+ else
-+ return 0;
-+ }
-+}
-+
-+
-+
-+/*intensive but slower search for some edonkey packets including size-check*/
-+int
-+search_all_edk (const unsigned char *payload, const u16 plen)
-+{
-+ if (payload[0] != 0xe3)
-+ return 0;
-+ else {
-+ //t += head_len;
-+ const u16 cmd = get_u16(payload, 1);
-+ if (cmd == (plen - 5)) {
-+ switch (payload[5]) {
-+ case 0x01: return ((IPP2P_EDK * 100) + 1); /*Client: hello or Server:hello*/
-+ case 0x4c: return ((IPP2P_EDK * 100) + 9); /*Client: Hello-Answer*/
-+ }
-+ }
-+ return 0;
-+ }
-+}
-+
-+
-+/*fast check for Direct Connect send command*/
-+int
-+search_dc (const unsigned char *payload, const u16 plen)
-+{
-+
-+ if (payload[0] != 0x24 )
-+ return 0;
-+ else {
-+ if (memcmp(&payload[1], "Send|", 5) == 0)
-+ return (IPP2P_DATA_DC * 100);
-+ else
-+ return 0;
-+ }
-+
-+}
-+
-+
-+/*intensive but slower check for all direct connect packets*/
-+int
-+search_all_dc (const unsigned char *payload, const u16 plen)
-+{
-+// unsigned char *t = haystack;
-+
-+ if (payload[0] == 0x24 && payload[plen-1] == 0x7c)
-+ {
-+ const unsigned char *t=&payload[1];
-+ /* Client-Hub-Protocol */
-+ if (memcmp(t, "Lock ", 5) == 0) return ((IPP2P_DC * 100) + 1);
-+ /* Client-Client-Protocol, some are already recognized by client-hub (like lock) */
-+ if (memcmp(t, "MyNick ", 7) == 0) return ((IPP2P_DC * 100) + 38);
-+ }
-+ return 0;
-+}
-+
-+/*check for mute*/
-+int
-+search_mute (const unsigned char *payload, const u16 plen)
-+{
-+ if ( plen == 209 || plen == 345 || plen == 473 || plen == 609 || plen == 1121 )
-+ {
-+ //printk(KERN_DEBUG "size hit: %u",size);
-+ if (memcmp(payload,"PublicKey: ",11) == 0 )
-+ {
-+ return ((IPP2P_MUTE * 100) + 0);
-+
-+/* if (memcmp(t+size-14,"\x0aEndPublicKey\x0a",14) == 0)
-+ {
-+ printk(KERN_DEBUG "end pubic key hit: %u",size);
-+
-+ }*/
-+ }
-+ }
-+ return 0;
-+}
-+
-+
-+/* check for xdcc */
-+int
-+search_xdcc (const unsigned char *payload, const u16 plen)
-+{
-+ /* search in small packets only */
-+ if (plen > 20 && plen < 200 && payload[plen-1] == 0x0a && payload[plen-2] == 0x0d && memcmp(payload,"PRIVMSG ",8) == 0)
-+ {
-+
-+ u16 x=10;
-+ const u16 end=plen - 13;
-+
-+ /* is seems to be a irc private massage, chedck for xdcc command */
-+ while (x < end)
-+ {
-+ if (payload[x] == ':')
-+ {
-+ if ( memcmp(&payload[x+1],"xdcc send #",11) == 0 )
-+ return ((IPP2P_XDCC * 100) + 0);
-+ }
-+ x++;
-+ }
-+ }
-+ return 0;
-+}
-+
-+/* search for waste */
-+int search_waste(const unsigned char *payload, const u16 plen)
-+{
-+ if ( plen >= 8 && memcmp(payload,"GET.sha1:",9) == 0)
-+ return ((IPP2P_WASTE * 100) + 0);
-+
-+ return 0;
-+}
-+
-+
-+static struct {
-+ int command;
-+ __u8 short_hand; /*for fucntions included in short hands*/
-+ int packet_len;
-+ int (*function_name) (const unsigned char *, const u16);
-+} matchlist[] = {
-+ {IPP2P_EDK,SHORT_HAND_IPP2P,20, &search_all_edk},
-+// {IPP2P_DATA_KAZAA,SHORT_HAND_DATA,200, &search_kazaa},
-+// {IPP2P_DATA_EDK,SHORT_HAND_DATA,60, &search_edk},
-+// {IPP2P_DATA_DC,SHORT_HAND_DATA,26, &search_dc},
-+ {IPP2P_DC,SHORT_HAND_IPP2P,5, search_all_dc},
-+// {IPP2P_DATA_GNU,SHORT_HAND_DATA,40, &search_gnu},
-+ {IPP2P_GNU,SHORT_HAND_IPP2P,5, &search_all_gnu},
-+ {IPP2P_KAZAA,SHORT_HAND_IPP2P,5, &search_all_kazaa},
-+ {IPP2P_BIT,SHORT_HAND_IPP2P,20, &search_bittorrent},
-+ {IPP2P_APPLE,SHORT_HAND_IPP2P,5, &search_apple},
-+ {IPP2P_SOUL,SHORT_HAND_IPP2P,5, &search_soul},
-+ {IPP2P_WINMX,SHORT_HAND_IPP2P,2, &search_winmx},
-+ {IPP2P_ARES,SHORT_HAND_IPP2P,5, &search_ares},
-+ {IPP2P_MUTE,SHORT_HAND_NONE,200, &search_mute},
-+ {IPP2P_WASTE,SHORT_HAND_NONE,5, &search_waste},
-+ {IPP2P_XDCC,SHORT_HAND_NONE,5, &search_xdcc},
-+ {0,0,0,NULL}
-+};
-+
-+
-+static struct {
-+ int command;
-+ __u8 short_hand; /*for fucntions included in short hands*/
-+ int packet_len;
-+ int (*function_name) (unsigned char *, int);
-+} udp_list[] = {
-+ {IPP2P_KAZAA,SHORT_HAND_IPP2P,14, &udp_search_kazaa},
-+ {IPP2P_BIT,SHORT_HAND_IPP2P,23, &udp_search_bit},
-+ {IPP2P_GNU,SHORT_HAND_IPP2P,11, &udp_search_gnu},
-+ {IPP2P_EDK,SHORT_HAND_IPP2P,9, &udp_search_edk},
-+ {IPP2P_DC,SHORT_HAND_IPP2P,12, &udp_search_directconnect},
-+ {0,0,0,NULL}
-+};
-+
-+
-+static int
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ const void *matchinfo,
-+ int offset,
-+ unsigned int protoff,
-+ int *hotdrop)
-+{
-+ const struct ipt_p2p_info *info = matchinfo;
-+ unsigned char *haystack;
-+ struct iphdr *ip = skb->nh.iph;
-+ int p2p_result = 0, i = 0;
-+// int head_len;
-+ int hlen = ntohs(ip->tot_len)-(ip->ihl*4); /*hlen = packet-data length*/
-+
-+ /*must not be a fragment*/
-+ if (offset) {
-+ if (info->debug) printk("IPP2P.match: offset found %i \n",offset);
-+ return 0;
-+ }
-+
-+ /*make sure that skb is linear*/
-+ if(skb_is_nonlinear(skb)){
-+ if (info->debug) printk("IPP2P.match: nonlinear skb found\n");
-+ return 0;
-+ }
-+
-+
-+ haystack=(char *)ip+(ip->ihl*4); /*haystack = packet data*/
-+
-+ switch (ip->protocol){
-+ case IPPROTO_TCP: /*what to do with a TCP packet*/
-+ {
-+ struct tcphdr *tcph = (void *) ip + ip->ihl * 4;
-+
-+ if (tcph->fin) return 0; /*if FIN bit is set bail out*/
-+ if (tcph->syn) return 0; /*if SYN bit is set bail out*/
-+ if (tcph->rst) return 0; /*if RST bit is set bail out*/
-+
-+ haystack += tcph->doff * 4; /*get TCP-Header-Size*/
-+ hlen -= tcph->doff * 4;
-+ while (matchlist[i].command) {
-+ if ((((info->cmd & matchlist[i].command) == matchlist[i].command) ||
-+ ((info->cmd & matchlist[i].short_hand) == matchlist[i].short_hand)) &&
-+ (hlen > matchlist[i].packet_len)) {
-+ p2p_result = matchlist[i].function_name(haystack, hlen);
-+ if (p2p_result)
-+ {
-+ if (info->debug) printk("IPP2P.debug:TCP-match: %i from: %u.%u.%u.%u:%i to: %u.%u.%u.%u:%i Length: %i\n",
-+ p2p_result, NIPQUAD(ip->saddr),ntohs(tcph->source), NIPQUAD(ip->daddr),ntohs(tcph->dest),hlen);
-+ return p2p_result;
-+ }
-+ }
-+ i++;
-+ }
-+ return p2p_result;
-+ }
-+
-+ case IPPROTO_UDP: /*what to do with an UDP packet*/
-+ {
-+ struct udphdr *udph = (void *) ip + ip->ihl * 4;
-+
-+ while (udp_list[i].command){
-+ if ((((info->cmd & udp_list[i].command) == udp_list[i].command) ||
-+ ((info->cmd & udp_list[i].short_hand) == udp_list[i].short_hand)) &&
-+ (hlen > udp_list[i].packet_len)) {
-+ p2p_result = udp_list[i].function_name(haystack, hlen);
-+ if (p2p_result){
-+ if (info->debug) printk("IPP2P.debug:UDP-match: %i from: %u.%u.%u.%u:%i to: %u.%u.%u.%u:%i Length: %i\n",
-+ p2p_result, NIPQUAD(ip->saddr),ntohs(udph->source), NIPQUAD(ip->daddr),ntohs(udph->dest),hlen);
-+ return p2p_result;
-+ }
-+ }
-+ i++;
-+ }
-+ return p2p_result;
-+ }
-+
-+ default: return 0;
-+ }
-+}
-+
-+
-+
-+static int
-+checkentry(const char *tablename,
-+ const void *ip,
-+ void *matchinfo,
-+ unsigned int matchsize,
-+ unsigned int hook_mask)
-+{
-+ /* Must specify -p tcp */
-+/* if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) {
-+ * printk("ipp2p: Only works on TCP packets, use -p tcp\n");
-+ * return 0;
-+ * }*/
-+ return 1;
-+}
-+
-+
-+
-+
-+static struct ipt_match ipp2p_match = {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+ { NULL, NULL },
-+ "ipp2p",
-+ &match,
-+ &checkentry,
-+ NULL,
-+ THIS_MODULE
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-+ .name = "ipp2p",
-+ .match = &match,
-+ .checkentry = &checkentry,
-+ .me = THIS_MODULE,
-+#endif
-+};
-+
-+
-+static int __init init(void)
-+{
-+ printk(KERN_INFO "IPP2P v%s loading\n", IPP2P_VERSION);
-+ return ipt_register_match(&ipp2p_match);
-+}
-+
-+static void __exit fini(void)
-+{
-+ ipt_unregister_match(&ipp2p_match);
-+ printk(KERN_INFO "IPP2P v%s unloaded\n", IPP2P_VERSION);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-+
-+
+++ /dev/null
-diff --new-file -urp linux-2.6.15/fs/Kconfig linux-2.6.15-squashfs3.0/fs/Kconfig
---- linux-2.6.15/fs/Kconfig 2006-03-01 22:37:27.000000000 +0000
-+++ linux-2.6.15-squashfs3.0/fs/Kconfig 2006-03-07 21:12:37.000000000 +0000
-@@ -1151,6 +1151,71 @@ config CRAMFS
-
- If unsure, say N.
-
-+config SQUASHFS
-+ tristate "SquashFS 3.0 - Squashed file system support"
-+ select ZLIB_INFLATE
-+ help
-+ Saying Y here includes support for SquashFS 3.0 (a Compressed Read-Only File
-+ System). Squashfs is a highly compressed read-only filesystem for Linux.
-+ It uses zlib compression to compress both files, inodes and directories.
-+ Inodes in the system are very small and all blocks are packed to minimise
-+ data overhead. Block sizes greater than 4K are supported up to a maximum of 64K.
-+ SquashFS 3.0 supports 64 bit filesystems and files (larger than 4GB), full
-+ uid/gid information, hard links and timestamps.
-+
-+ Squashfs is intended for general read-only filesystem use, for archival
-+ use (i.e. in cases where a .tar.gz file may be used), and in embedded
-+ systems where low overhead is needed. Further information and filesystem tools
-+ are available from http://squashfs.sourceforge.net.
-+
-+ If you want to compile this as a module ( = code which can be
-+ inserted in and removed from the running kernel whenever you want),
-+ say M here and read <file:Documentation/modules.txt>. The module
-+ will be called squashfs. Note that the root file system (the one
-+ containing the directory /) cannot be compiled as a module.
-+
-+ If unsure, say N.
-+
-+config SQUASHFS_EMBEDDED
-+
-+ bool "Additional options for memory-constrained systems"
-+ depends on SQUASHFS
-+ default n
-+ help
-+ Saying Y here allows you to specify cache sizes and how Squashfs
-+ allocates memory. This is only intended for memory constrained
-+ systems.
-+
-+ If unsure, say N.
-+
-+config SQUASHFS_FRAGMENT_CACHE_SIZE
-+ int "Number of fragments cached" if SQUASHFS_EMBEDDED
-+ depends on SQUASHFS
-+ default "3"
-+ help
-+ By default SquashFS caches the last 3 fragments read from
-+ the filesystem. Increasing this amount may mean SquashFS
-+ has to re-read fragments less often from disk, at the expense
-+ of extra system memory. Decreasing this amount will mean
-+ SquashFS uses less memory at the expense of extra reads from disk.
-+
-+ Note there must be at least one cached fragment. Anything
-+ much more than three will probably not make much difference.
-+
-+config SQUASHFS_VMALLOC
-+ bool "Use Vmalloc rather than Kmalloc" if SQUASHFS_EMBEDDED
-+ depends on SQUASHFS
-+ default n
-+ help
-+ By default SquashFS uses kmalloc to obtain fragment cache memory.
-+ Kmalloc memory is the standard kernel allocator, but it can fail
-+ on memory constrained systems. Because of the way Vmalloc works,
-+ Vmalloc can succeed when kmalloc fails. Specifying this option
-+ will make SquashFS always use Vmalloc to allocate the
-+ fragment cache memory.
-+
-+ If unsure, say N.
-+
- config VXFS_FS
- tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
- help
-diff --new-file -urp linux-2.6.15/fs/Makefile linux-2.6.15-squashfs3.0/fs/Makefile
---- linux-2.6.15/fs/Makefile 2006-03-01 22:37:27.000000000 +0000
-+++ linux-2.6.15-squashfs3.0/fs/Makefile 2006-03-07 21:12:37.000000000 +0000
-@@ -55,6 +55,7 @@ obj-$(CONFIG_EXT3_FS) += ext3/ # Before
- obj-$(CONFIG_JBD) += jbd/
- obj-$(CONFIG_EXT2_FS) += ext2/
- obj-$(CONFIG_CRAMFS) += cramfs/
-+obj-$(CONFIG_SQUASHFS) += squashfs/
- obj-$(CONFIG_RAMFS) += ramfs/
- obj-$(CONFIG_HUGETLBFS) += hugetlbfs/
- obj-$(CONFIG_CODA_FS) += coda/
-diff --new-file -urp linux-2.6.15/fs/squashfs/inode.c linux-2.6.15-squashfs3.0/fs/squashfs/inode.c
---- linux-2.6.15/fs/squashfs/inode.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.15-squashfs3.0/fs/squashfs/inode.c 2006-03-07 21:12:37.000000000 +0000
-@@ -0,0 +1,2127 @@
-+/*
-+ * Squashfs - a compressed read only filesystem for Linux
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * inode.c
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/squashfs_fs.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/slab.h>
-+#include <linux/fs.h>
-+#include <linux/smp_lock.h>
-+#include <linux/slab.h>
-+#include <linux/squashfs_fs_sb.h>
-+#include <linux/squashfs_fs_i.h>
-+#include <linux/buffer_head.h>
-+#include <linux/vfs.h>
-+#include <linux/init.h>
-+#include <linux/dcache.h>
-+#include <linux/wait.h>
-+#include <linux/zlib.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <asm/uaccess.h>
-+#include <asm/semaphore.h>
-+
-+#include "squashfs.h"
-+
-+static void squashfs_put_super(struct super_block *);
-+static int squashfs_statfs(struct super_block *, struct kstatfs *);
-+static int squashfs_symlink_readpage(struct file *file, struct page *page);
-+static int squashfs_readpage(struct file *file, struct page *page);
-+static int squashfs_readpage4K(struct file *file, struct page *page);
-+static int squashfs_readdir(struct file *, void *, filldir_t);
-+static struct inode *squashfs_alloc_inode(struct super_block *sb);
-+static void squashfs_destroy_inode(struct inode *inode);
-+static int init_inodecache(void);
-+static void destroy_inodecache(void);
-+static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
-+ struct nameidata *);
-+static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode);
-+static long long read_blocklist(struct inode *inode, int index,
-+ int readahead_blks, char *block_list,
-+ unsigned short **block_p, unsigned int *bsize);
-+static struct super_block *squashfs_get_sb(struct file_system_type *, int,
-+ const char *, void *);
-+
-+
-+static z_stream stream;
-+
-+static struct file_system_type squashfs_fs_type = {
-+ .owner = THIS_MODULE,
-+ .name = "squashfs",
-+ .get_sb = squashfs_get_sb,
-+ .kill_sb = kill_block_super,
-+ .fs_flags = FS_REQUIRES_DEV
-+};
-+
-+static unsigned char squashfs_filetype_table[] = {
-+ DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
-+};
-+
-+static struct super_operations squashfs_ops = {
-+ .alloc_inode = squashfs_alloc_inode,
-+ .destroy_inode = squashfs_destroy_inode,
-+ .statfs = squashfs_statfs,
-+ .put_super = squashfs_put_super,
-+};
-+
-+SQSH_EXTERN struct address_space_operations squashfs_symlink_aops = {
-+ .readpage = squashfs_symlink_readpage
-+};
-+
-+SQSH_EXTERN struct address_space_operations squashfs_aops = {
-+ .readpage = squashfs_readpage
-+};
-+
-+SQSH_EXTERN struct address_space_operations squashfs_aops_4K = {
-+ .readpage = squashfs_readpage4K
-+};
-+
-+static struct file_operations squashfs_dir_ops = {
-+ .read = generic_read_dir,
-+ .readdir = squashfs_readdir
-+};
-+
-+SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
-+ .lookup = squashfs_lookup
-+};
-+
-+
-+static struct buffer_head *get_block_length(struct super_block *s,
-+ int *cur_index, int *offset, int *c_byte)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ unsigned short temp;
-+ struct buffer_head *bh;
-+
-+ if (!(bh = sb_bread(s, *cur_index)))
-+ goto out;
-+
-+ if (msblk->devblksize - *offset == 1) {
-+ if (msblk->swap)
-+ ((unsigned char *) &temp)[1] = *((unsigned char *)
-+ (bh->b_data + *offset));
-+ else
-+ ((unsigned char *) &temp)[0] = *((unsigned char *)
-+ (bh->b_data + *offset));
-+ brelse(bh);
-+ if (!(bh = sb_bread(s, ++(*cur_index))))
-+ goto out;
-+ if (msblk->swap)
-+ ((unsigned char *) &temp)[0] = *((unsigned char *)
-+ bh->b_data);
-+ else
-+ ((unsigned char *) &temp)[1] = *((unsigned char *)
-+ bh->b_data);
-+ *c_byte = temp;
-+ *offset = 1;
-+ } else {
-+ if (msblk->swap) {
-+ ((unsigned char *) &temp)[1] = *((unsigned char *)
-+ (bh->b_data + *offset));
-+ ((unsigned char *) &temp)[0] = *((unsigned char *)
-+ (bh->b_data + *offset + 1));
-+ } else {
-+ ((unsigned char *) &temp)[0] = *((unsigned char *)
-+ (bh->b_data + *offset));
-+ ((unsigned char *) &temp)[1] = *((unsigned char *)
-+ (bh->b_data + *offset + 1));
-+ }
-+ *c_byte = temp;
-+ *offset += 2;
-+ }
-+
-+ if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
-+ if (*offset == msblk->devblksize) {
-+ brelse(bh);
-+ if (!(bh = sb_bread(s, ++(*cur_index))))
-+ goto out;
-+ *offset = 0;
-+ }
-+ if (*((unsigned char *) (bh->b_data + *offset)) !=
-+ SQUASHFS_MARKER_BYTE) {
-+ ERROR("Metadata block marker corrupt @ %x\n",
-+ *cur_index);
-+ brelse(bh);
-+ goto out;
-+ }
-+ (*offset)++;
-+ }
-+ return bh;
-+
-+out:
-+ return NULL;
-+}
-+
-+
-+SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
-+ long long index, unsigned int length,
-+ long long *next_index)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
-+ msblk->devblksize_log2) + 2];
-+ unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
-+ unsigned int cur_index = index >> msblk->devblksize_log2;
-+ int bytes, avail_bytes, b = 0, k;
-+ char *c_buffer;
-+ unsigned int compressed;
-+ unsigned int c_byte = length;
-+
-+ if (c_byte) {
-+ bytes = msblk->devblksize - offset;
-+ compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
-+ c_buffer = compressed ? msblk->read_data : buffer;
-+ c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
-+
-+ TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
-+ ? "" : "un", (unsigned int) c_byte);
-+
-+ if (!(bh[0] = sb_getblk(s, cur_index)))
-+ goto block_release;
-+
-+ for (b = 1; bytes < c_byte; b++) {
-+ if (!(bh[b] = sb_getblk(s, ++cur_index)))
-+ goto block_release;
-+ bytes += msblk->devblksize;
-+ }
-+ ll_rw_block(READ, b, bh);
-+ } else {
-+ if (!(bh[0] = get_block_length(s, &cur_index, &offset,
-+ &c_byte)))
-+ goto read_failure;
-+
-+ bytes = msblk->devblksize - offset;
-+ compressed = SQUASHFS_COMPRESSED(c_byte);
-+ c_buffer = compressed ? msblk->read_data : buffer;
-+ c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
-+
-+ TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
-+ ? "" : "un", (unsigned int) c_byte);
-+
-+ for (b = 1; bytes < c_byte; b++) {
-+ if (!(bh[b] = sb_getblk(s, ++cur_index)))
-+ goto block_release;
-+ bytes += msblk->devblksize;
-+ }
-+ ll_rw_block(READ, b - 1, bh + 1);
-+ }
-+
-+ if (compressed)
-+ down(&msblk->read_data_mutex);
-+
-+ for (bytes = 0, k = 0; k < b; k++) {
-+ avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
-+ msblk->devblksize - offset :
-+ c_byte - bytes;
-+ wait_on_buffer(bh[k]);
-+ if (!buffer_uptodate(bh[k]))
-+ goto block_release;
-+ memcpy(c_buffer + bytes, bh[k]->b_data + offset, avail_bytes);
-+ bytes += avail_bytes;
-+ offset = 0;
-+ brelse(bh[k]);
-+ }
-+
-+ /*
-+ * uncompress block
-+ */
-+ if (compressed) {
-+ int zlib_err;
-+
-+ stream.next_in = c_buffer;
-+ stream.avail_in = c_byte;
-+ stream.next_out = buffer;
-+ stream.avail_out = msblk->read_size;
-+
-+ if (((zlib_err = zlib_inflateInit(&stream)) != Z_OK) ||
-+ ((zlib_err = zlib_inflate(&stream, Z_FINISH))
-+ != Z_STREAM_END) || ((zlib_err =
-+ zlib_inflateEnd(&stream)) != Z_OK)) {
-+ ERROR("zlib_fs returned unexpected result 0x%x\n",
-+ zlib_err);
-+ bytes = 0;
-+ } else
-+ bytes = stream.total_out;
-+
-+ up(&msblk->read_data_mutex);
-+ }
-+
-+ if (next_index)
-+ *next_index = index + c_byte + (length ? 0 :
-+ (SQUASHFS_CHECK_DATA(msblk->sblk.flags)
-+ ? 3 : 2));
-+ return bytes;
-+
-+block_release:
-+ while (--b >= 0)
-+ brelse(bh[b]);
-+
-+read_failure:
-+ ERROR("sb_bread failed reading block 0x%x\n", cur_index);
-+ return 0;
-+}
-+
-+
-+SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
-+ long long block, unsigned int offset,
-+ int length, long long *next_block,
-+ unsigned int *next_offset)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ int n, i, bytes, return_length = length;
-+ long long next_index;
-+
-+ TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
-+
-+ while ( 1 ) {
-+ for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
-+ if (msblk->block_cache[i].block == block)
-+ break;
-+
-+ down(&msblk->block_cache_mutex);
-+
-+ if (i == SQUASHFS_CACHED_BLKS) {
-+ /* read inode header block */
-+ for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
-+ n ; n --, i = (i + 1) %
-+ SQUASHFS_CACHED_BLKS)
-+ if (msblk->block_cache[i].block !=
-+ SQUASHFS_USED_BLK)
-+ break;
-+
-+ if (n == 0) {
-+ wait_queue_t wait;
-+
-+ init_waitqueue_entry(&wait, current);
-+ add_wait_queue(&msblk->waitq, &wait);
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ up(&msblk->block_cache_mutex);
-+ schedule();
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&msblk->waitq, &wait);
-+ continue;
-+ }
-+ msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
-+
-+ if (msblk->block_cache[i].block ==
-+ SQUASHFS_INVALID_BLK) {
-+ if (!(msblk->block_cache[i].data =
-+ kmalloc(SQUASHFS_METADATA_SIZE,
-+ GFP_KERNEL))) {
-+ ERROR("Failed to allocate cache"
-+ "block\n");
-+ up(&msblk->block_cache_mutex);
-+ goto out;
-+ }
-+ }
-+
-+ msblk->block_cache[i].block = SQUASHFS_USED_BLK;
-+ up(&msblk->block_cache_mutex);
-+
-+ if (!(msblk->block_cache[i].length =
-+ squashfs_read_data(s,
-+ msblk->block_cache[i].data,
-+ block, 0, &next_index))) {
-+ ERROR("Unable to read cache block [%llx:%x]\n",
-+ block, offset);
-+ goto out;
-+ }
-+
-+ down(&msblk->block_cache_mutex);
-+ wake_up(&msblk->waitq);
-+ msblk->block_cache[i].block = block;
-+ msblk->block_cache[i].next_index = next_index;
-+ TRACE("Read cache block [%llx:%x]\n", block, offset);
-+ }
-+
-+ if (msblk->block_cache[i].block != block) {
-+ up(&msblk->block_cache_mutex);
-+ continue;
-+ }
-+
-+ if ((bytes = msblk->block_cache[i].length - offset) >= length) {
-+ if (buffer)
-+ memcpy(buffer, msblk->block_cache[i].data +
-+ offset, length);
-+ if (msblk->block_cache[i].length - offset == length) {
-+ *next_block = msblk->block_cache[i].next_index;
-+ *next_offset = 0;
-+ } else {
-+ *next_block = block;
-+ *next_offset = offset + length;
-+ }
-+ up(&msblk->block_cache_mutex);
-+ goto finish;
-+ } else {
-+ if (buffer) {
-+ memcpy(buffer, msblk->block_cache[i].data +
-+ offset, bytes);
-+ buffer += bytes;
-+ }
-+ block = msblk->block_cache[i].next_index;
-+ up(&msblk->block_cache_mutex);
-+ length -= bytes;
-+ offset = 0;
-+ }
-+ }
-+
-+finish:
-+ return return_length;
-+out:
-+ return 0;
-+}
-+
-+
-+static int get_fragment_location(struct super_block *s, unsigned int fragment,
-+ long long *fragment_start_block,
-+ unsigned int *fragment_size)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ long long start_block =
-+ msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
-+ int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
-+ struct squashfs_fragment_entry fragment_entry;
-+
-+ if (msblk->swap) {
-+ struct squashfs_fragment_entry sfragment_entry;
-+
-+ if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
-+ start_block, offset,
-+ sizeof(sfragment_entry), &start_block,
-+ &offset))
-+ goto out;
-+ SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
-+ start_block, offset,
-+ sizeof(fragment_entry), &start_block,
-+ &offset))
-+ goto out;
-+
-+ *fragment_start_block = fragment_entry.start_block;
-+ *fragment_size = fragment_entry.size;
-+
-+ return 1;
-+
-+out:
-+ return 0;
-+}
-+
-+
-+SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
-+ squashfs_fragment_cache *fragment)
-+{
-+ down(&msblk->fragment_mutex);
-+ fragment->locked --;
-+ wake_up(&msblk->fragment_wait_queue);
-+ up(&msblk->fragment_mutex);
-+}
-+
-+
-+SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
-+ *s, long long start_block,
-+ int length)
-+{
-+ int i, n;
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+
-+ while ( 1 ) {
-+ down(&msblk->fragment_mutex);
-+
-+ for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
-+ msblk->fragment[i].block != start_block; i++);
-+
-+ if (i == SQUASHFS_CACHED_FRAGMENTS) {
-+ for (i = msblk->next_fragment, n =
-+ SQUASHFS_CACHED_FRAGMENTS; n &&
-+ msblk->fragment[i].locked; n--, i = (i + 1) %
-+ SQUASHFS_CACHED_FRAGMENTS);
-+
-+ if (n == 0) {
-+ wait_queue_t wait;
-+
-+ init_waitqueue_entry(&wait, current);
-+ add_wait_queue(&msblk->fragment_wait_queue,
-+ &wait);
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ up(&msblk->fragment_mutex);
-+ schedule();
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&msblk->fragment_wait_queue,
-+ &wait);
-+ continue;
-+ }
-+ msblk->next_fragment = (msblk->next_fragment + 1) %
-+ SQUASHFS_CACHED_FRAGMENTS;
-+
-+ if (msblk->fragment[i].data == NULL)
-+ if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
-+ (SQUASHFS_FILE_MAX_SIZE))) {
-+ ERROR("Failed to allocate fragment "
-+ "cache block\n");
-+ up(&msblk->fragment_mutex);
-+ goto out;
-+ }
-+
-+ msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
-+ msblk->fragment[i].locked = 1;
-+ up(&msblk->fragment_mutex);
-+
-+ if (!(msblk->fragment[i].length = squashfs_read_data(s,
-+ msblk->fragment[i].data,
-+ start_block, length, NULL))) {
-+ ERROR("Unable to read fragment cache block "
-+ "[%llx]\n", start_block);
-+ msblk->fragment[i].locked = 0;
-+ goto out;
-+ }
-+
-+ msblk->fragment[i].block = start_block;
-+ TRACE("New fragment %d, start block %lld, locked %d\n",
-+ i, msblk->fragment[i].block,
-+ msblk->fragment[i].locked);
-+ break;
-+ }
-+
-+ msblk->fragment[i].locked++;
-+ up(&msblk->fragment_mutex);
-+ TRACE("Got fragment %d, start block %lld, locked %d\n", i,
-+ msblk->fragment[i].block,
-+ msblk->fragment[i].locked);
-+ break;
-+ }
-+
-+ return &msblk->fragment[i];
-+
-+out:
-+ return NULL;
-+}
-+
-+
-+static struct inode *squashfs_new_inode(struct super_block *s,
-+ struct squashfs_base_inode_header *inodeb)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct inode *i = new_inode(s);
-+
-+ if (i) {
-+ i->i_ino = inodeb->inode_number;
-+ i->i_mtime.tv_sec = inodeb->mtime;
-+ i->i_atime.tv_sec = inodeb->mtime;
-+ i->i_ctime.tv_sec = inodeb->mtime;
-+ i->i_uid = msblk->uid[inodeb->uid];
-+ i->i_mode = inodeb->mode;
-+ i->i_size = 0;
-+ if (inodeb->guid == SQUASHFS_GUIDS)
-+ i->i_gid = i->i_uid;
-+ else
-+ i->i_gid = msblk->guid[inodeb->guid];
-+ }
-+
-+ return i;
-+}
-+
-+
-+static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode)
-+{
-+ struct inode *i;
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ long long block = SQUASHFS_INODE_BLK(inode) +
-+ sblk->inode_table_start;
-+ unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
-+ long long next_block;
-+ unsigned int next_offset;
-+ union squashfs_inode_header id, sid;
-+ struct squashfs_base_inode_header *inodeb = &id.base,
-+ *sinodeb = &sid.base;
-+
-+ TRACE("Entered squashfs_iget\n");
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
-+ offset, sizeof(*sinodeb), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
-+ sizeof(*sinodeb));
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *) inodeb, block,
-+ offset, sizeof(*inodeb), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ switch(inodeb->inode_type) {
-+ case SQUASHFS_FILE_TYPE: {
-+ unsigned int frag_size;
-+ long long frag_blk;
-+ struct squashfs_reg_inode_header *inodep = &id.reg;
-+ struct squashfs_reg_inode_header *sinodep = &sid.reg;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ frag_blk = SQUASHFS_INVALID_BLK;
-+ if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
-+ !get_fragment_location(s,
-+ inodep->fragment, &frag_blk, &frag_size))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_nlink = 1;
-+ i->i_size = inodep->file_size;
-+ i->i_fop = &generic_ro_fops;
-+ i->i_mode |= S_IFREG;
-+ i->i_blocks = ((i->i_size - 1) >> 9) + 1;
-+ i->i_blksize = PAGE_CACHE_SIZE;
-+ SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
-+ SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
-+ SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
-+ SQUASHFS_I(i)->start_block = inodep->start_block;
-+ SQUASHFS_I(i)->u.s1.block_list_start = next_block;
-+ SQUASHFS_I(i)->offset = next_offset;
-+ if (sblk->block_size > 4096)
-+ i->i_data.a_ops = &squashfs_aops;
-+ else
-+ i->i_data.a_ops = &squashfs_aops_4K;
-+
-+ TRACE("File inode %x:%x, start_block %llx, "
-+ "block_list_start %llx, offset %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ inodep->start_block, next_block,
-+ next_offset);
-+ break;
-+ }
-+ case SQUASHFS_LREG_TYPE: {
-+ unsigned int frag_size;
-+ long long frag_blk;
-+ struct squashfs_lreg_inode_header *inodep = &id.lreg;
-+ struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ frag_blk = SQUASHFS_INVALID_BLK;
-+ if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
-+ !get_fragment_location(s,
-+ inodep->fragment, &frag_blk, &frag_size))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_nlink = inodep->nlink;
-+ i->i_size = inodep->file_size;
-+ i->i_fop = &generic_ro_fops;
-+ i->i_mode |= S_IFREG;
-+ i->i_blocks = ((i->i_size - 1) >> 9) + 1;
-+ i->i_blksize = PAGE_CACHE_SIZE;
-+ SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
-+ SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
-+ SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
-+ SQUASHFS_I(i)->start_block = inodep->start_block;
-+ SQUASHFS_I(i)->u.s1.block_list_start = next_block;
-+ SQUASHFS_I(i)->offset = next_offset;
-+ if (sblk->block_size > 4096)
-+ i->i_data.a_ops = &squashfs_aops;
-+ else
-+ i->i_data.a_ops = &squashfs_aops_4K;
-+
-+ TRACE("File inode %x:%x, start_block %llx, "
-+ "block_list_start %llx, offset %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ inodep->start_block, next_block,
-+ next_offset);
-+ break;
-+ }
-+ case SQUASHFS_DIR_TYPE: {
-+ struct squashfs_dir_inode_header *inodep = &id.dir;
-+ struct squashfs_dir_inode_header *sinodep = &sid.dir;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_nlink = inodep->nlink;
-+ i->i_size = inodep->file_size;
-+ i->i_op = &squashfs_dir_inode_ops;
-+ i->i_fop = &squashfs_dir_ops;
-+ i->i_mode |= S_IFDIR;
-+ SQUASHFS_I(i)->start_block = inodep->start_block;
-+ SQUASHFS_I(i)->offset = inodep->offset;
-+ SQUASHFS_I(i)->u.s2.directory_index_count = 0;
-+ SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
-+
-+ TRACE("Directory inode %x:%x, start_block %x, offset "
-+ "%x\n", SQUASHFS_INODE_BLK(inode),
-+ offset, inodep->start_block,
-+ inodep->offset);
-+ break;
-+ }
-+ case SQUASHFS_LDIR_TYPE: {
-+ struct squashfs_ldir_inode_header *inodep = &id.ldir;
-+ struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
-+ sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_nlink = inodep->nlink;
-+ i->i_size = inodep->file_size;
-+ i->i_op = &squashfs_dir_inode_ops;
-+ i->i_fop = &squashfs_dir_ops;
-+ i->i_mode |= S_IFDIR;
-+ SQUASHFS_I(i)->start_block = inodep->start_block;
-+ SQUASHFS_I(i)->offset = inodep->offset;
-+ SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
-+ SQUASHFS_I(i)->u.s2.directory_index_offset =
-+ next_offset;
-+ SQUASHFS_I(i)->u.s2.directory_index_count =
-+ inodep->i_count;
-+ SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
-+
-+ TRACE("Long directory inode %x:%x, start_block %x, "
-+ "offset %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ inodep->start_block, inodep->offset);
-+ break;
-+ }
-+ case SQUASHFS_SYMLINK_TYPE: {
-+ struct squashfs_symlink_inode_header *inodep =
-+ &id.symlink;
-+ struct squashfs_symlink_inode_header *sinodep =
-+ &sid.symlink;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
-+ sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_nlink = inodep->nlink;
-+ i->i_size = inodep->symlink_size;
-+ i->i_op = &page_symlink_inode_operations;
-+ i->i_data.a_ops = &squashfs_symlink_aops;
-+ i->i_mode |= S_IFLNK;
-+ SQUASHFS_I(i)->start_block = next_block;
-+ SQUASHFS_I(i)->offset = next_offset;
-+
-+ TRACE("Symbolic link inode %x:%x, start_block %llx, "
-+ "offset %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ next_block, next_offset);
-+ break;
-+ }
-+ case SQUASHFS_BLKDEV_TYPE:
-+ case SQUASHFS_CHRDEV_TYPE: {
-+ struct squashfs_dev_inode_header *inodep = &id.dev;
-+ struct squashfs_dev_inode_header *sinodep = &sid.dev;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if ((i = squashfs_new_inode(s, inodeb)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_nlink = inodep->nlink;
-+ i->i_mode |= (inodeb->inode_type ==
-+ SQUASHFS_CHRDEV_TYPE) ? S_IFCHR :
-+ S_IFBLK;
-+ init_special_inode(i, i->i_mode,
-+ old_decode_dev(inodep->rdev));
-+
-+ TRACE("Device inode %x:%x, rdev %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ inodep->rdev);
-+ break;
-+ }
-+ case SQUASHFS_FIFO_TYPE:
-+ case SQUASHFS_SOCKET_TYPE: {
-+ struct squashfs_ipc_inode_header *inodep = &id.ipc;
-+ struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if ((i = squashfs_new_inode(s, inodeb)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_nlink = inodep->nlink;
-+ i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
-+ ? S_IFIFO : S_IFSOCK;
-+ init_special_inode(i, i->i_mode, 0);
-+ break;
-+ }
-+ default:
-+ ERROR("Unknown inode type %d in squashfs_iget!\n",
-+ inodeb->inode_type);
-+ goto failed_read1;
-+ }
-+
-+ insert_inode_hash(i);
-+ return i;
-+
-+failed_read:
-+ ERROR("Unable to read inode [%llx:%x]\n", block, offset);
-+
-+failed_read1:
-+ return NULL;
-+}
-+
-+
-+static int read_fragment_index_table(struct super_block *s)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+ /* Allocate fragment index table */
-+ if (!(msblk->fragment_index = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES
-+ (sblk->fragments), GFP_KERNEL))) {
-+ ERROR("Failed to allocate uid/gid table\n");
-+ return 0;
-+ }
-+
-+ if (SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments) &&
-+ !squashfs_read_data(s, (char *)
-+ msblk->fragment_index,
-+ sblk->fragment_table_start,
-+ SQUASHFS_FRAGMENT_INDEX_BYTES
-+ (sblk->fragments) |
-+ SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
-+ ERROR("unable to read fragment index table\n");
-+ return 0;
-+ }
-+
-+ if (msblk->swap) {
-+ int i;
-+ long long fragment;
-+
-+ for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments);
-+ i++) {
-+ SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
-+ &msblk->fragment_index[i], 1);
-+ msblk->fragment_index[i] = fragment;
-+ }
-+ }
-+
-+ return 1;
-+}
-+
-+
-+static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
-+{
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+ msblk->iget = squashfs_iget;
-+ msblk->read_blocklist = read_blocklist;
-+ msblk->read_fragment_index_table = read_fragment_index_table;
-+
-+ if (sblk->s_major == 1) {
-+ if (!squashfs_1_0_supported(msblk)) {
-+ SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
-+ "are unsupported\n");
-+ SERROR("Please recompile with "
-+ "Squashfs 1.0 support enabled\n");
-+ return 0;
-+ }
-+ } else if (sblk->s_major == 2) {
-+ if (!squashfs_2_0_supported(msblk)) {
-+ SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
-+ "are unsupported\n");
-+ SERROR("Please recompile with "
-+ "Squashfs 2.0 support enabled\n");
-+ return 0;
-+ }
-+ } else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
-+ SQUASHFS_MINOR) {
-+ SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
-+ "filesystem\n", sblk->s_major, sblk->s_minor);
-+ SERROR("Please update your kernel\n");
-+ return 0;
-+ }
-+
-+ return 1;
-+}
-+
-+
-+static int squashfs_fill_super(struct super_block *s, void *data, int silent)
-+{
-+ struct squashfs_sb_info *msblk;
-+ struct squashfs_super_block *sblk;
-+ int i;
-+ char b[BDEVNAME_SIZE];
-+ struct inode *root;
-+
-+ TRACE("Entered squashfs_read_superblock\n");
-+
-+ if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
-+ GFP_KERNEL))) {
-+ ERROR("Failed to allocate superblock\n");
-+ goto failure;
-+ }
-+ memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
-+ msblk = s->s_fs_info;
-+ sblk = &msblk->sblk;
-+
-+ msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
-+ msblk->devblksize_log2 = ffz(~msblk->devblksize);
-+
-+ init_MUTEX(&msblk->read_data_mutex);
-+ init_MUTEX(&msblk->read_page_mutex);
-+ init_MUTEX(&msblk->block_cache_mutex);
-+ init_MUTEX(&msblk->fragment_mutex);
-+ init_MUTEX(&msblk->meta_index_mutex);
-+
-+ init_waitqueue_head(&msblk->waitq);
-+ init_waitqueue_head(&msblk->fragment_wait_queue);
-+
-+ if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
-+ sizeof(struct squashfs_super_block) |
-+ SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
-+ SERROR("unable to read superblock\n");
-+ goto failed_mount;
-+ }
-+
-+ /* Check it is a SQUASHFS superblock */
-+ msblk->swap = 0;
-+ if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) {
-+ if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) {
-+ struct squashfs_super_block ssblk;
-+
-+ WARNING("Mounting a different endian SQUASHFS "
-+ "filesystem on %s\n", bdevname(s->s_bdev, b));
-+
-+ SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
-+ memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
-+ msblk->swap = 1;
-+ } else {
-+ SERROR("Can't find a SQUASHFS superblock on %s\n",
-+ bdevname(s->s_bdev, b));
-+ goto failed_mount;
-+ }
-+ }
-+
-+ /* Check the MAJOR & MINOR versions */
-+ if(!supported_squashfs_filesystem(msblk, silent))
-+ goto failed_mount;
-+
-+ TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
-+ TRACE("Inodes are %scompressed\n",
-+ SQUASHFS_UNCOMPRESSED_INODES
-+ (sblk->flags) ? "un" : "");
-+ TRACE("Data is %scompressed\n",
-+ SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
-+ ? "un" : "");
-+ TRACE("Check data is %s present in the filesystem\n",
-+ SQUASHFS_CHECK_DATA(sblk->flags) ?
-+ "" : "not");
-+ TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
-+ TRACE("Block size %d\n", sblk->block_size);
-+ TRACE("Number of inodes %d\n", sblk->inodes);
-+ if (sblk->s_major > 1)
-+ TRACE("Number of fragments %d\n", sblk->fragments);
-+ TRACE("Number of uids %d\n", sblk->no_uids);
-+ TRACE("Number of gids %d\n", sblk->no_guids);
-+ TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
-+ TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
-+ if (sblk->s_major > 1)
-+ TRACE("sblk->fragment_table_start %llx\n",
-+ sblk->fragment_table_start);
-+ TRACE("sblk->uid_start %llx\n", sblk->uid_start);
-+
-+ s->s_flags |= MS_RDONLY;
-+ s->s_op = &squashfs_ops;
-+
-+ /* Init inode_table block pointer array */
-+ if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
-+ SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
-+ ERROR("Failed to allocate block cache\n");
-+ goto failed_mount;
-+ }
-+
-+ for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
-+ msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
-+
-+ msblk->next_cache = 0;
-+
-+ /* Allocate read_data block */
-+ msblk->read_size = (sblk->block_size < SQUASHFS_METADATA_SIZE) ?
-+ SQUASHFS_METADATA_SIZE :
-+ sblk->block_size;
-+
-+ if (!(msblk->read_data = kmalloc(msblk->read_size, GFP_KERNEL))) {
-+ ERROR("Failed to allocate read_data block\n");
-+ goto failed_mount;
-+ }
-+
-+ /* Allocate read_page block */
-+ if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
-+ ERROR("Failed to allocate read_page block\n");
-+ goto failed_mount;
-+ }
-+
-+ /* Allocate uid and gid tables */
-+ if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
-+ sizeof(unsigned int), GFP_KERNEL))) {
-+ ERROR("Failed to allocate uid/gid table\n");
-+ goto failed_mount;
-+ }
-+ msblk->guid = msblk->uid + sblk->no_uids;
-+
-+ if (msblk->swap) {
-+ unsigned int suid[sblk->no_uids + sblk->no_guids];
-+
-+ if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
-+ ((sblk->no_uids + sblk->no_guids) *
-+ sizeof(unsigned int)) |
-+ SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
-+ ERROR("unable to read uid/gid table\n");
-+ goto failed_mount;
-+ }
-+
-+ SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
-+ sblk->no_guids), (sizeof(unsigned int) * 8));
-+ } else
-+ if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
-+ ((sblk->no_uids + sblk->no_guids) *
-+ sizeof(unsigned int)) |
-+ SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
-+ ERROR("unable to read uid/gid table\n");
-+ goto failed_mount;
-+ }
-+
-+
-+ if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
-+ goto allocate_root;
-+
-+ if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
-+ SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
-+ ERROR("Failed to allocate fragment block cache\n");
-+ goto failed_mount;
-+ }
-+
-+ for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
-+ msblk->fragment[i].locked = 0;
-+ msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
-+ msblk->fragment[i].data = NULL;
-+ }
-+
-+ msblk->next_fragment = 0;
-+
-+ /* Allocate fragment index table */
-+ if (msblk->read_fragment_index_table(s) == 0)
-+ goto failed_mount;
-+
-+allocate_root:
-+ if ((root = (msblk->iget)(s, sblk->root_inode)) == NULL)
-+ goto failed_mount;
-+
-+ if ((s->s_root = d_alloc_root(root)) == NULL) {
-+ ERROR("Root inode create failed\n");
-+ iput(root);
-+ goto failed_mount;
-+ }
-+
-+ TRACE("Leaving squashfs_read_super\n");
-+ return 0;
-+
-+failed_mount:
-+ kfree(msblk->fragment_index);
-+ kfree(msblk->fragment);
-+ kfree(msblk->uid);
-+ kfree(msblk->read_page);
-+ kfree(msblk->read_data);
-+ kfree(msblk->block_cache);
-+ kfree(msblk->fragment_index_2);
-+ kfree(s->s_fs_info);
-+ s->s_fs_info = NULL;
-+ return -EINVAL;
-+
-+failure:
-+ return -ENOMEM;
-+}
-+
-+
-+static int squashfs_statfs(struct super_block *s, struct kstatfs *buf)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+ TRACE("Entered squashfs_statfs\n");
-+
-+ buf->f_type = SQUASHFS_MAGIC;
-+ buf->f_bsize = sblk->block_size;
-+ buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
-+ buf->f_bfree = buf->f_bavail = 0;
-+ buf->f_files = sblk->inodes;
-+ buf->f_ffree = 0;
-+ buf->f_namelen = SQUASHFS_NAME_LEN;
-+
-+ return 0;
-+}
-+
-+
-+static int squashfs_symlink_readpage(struct file *file, struct page *page)
-+{
-+ struct inode *inode = page->mapping->host;
-+ int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
-+ long long block = SQUASHFS_I(inode)->start_block;
-+ int offset = SQUASHFS_I(inode)->offset;
-+ void *pageaddr = kmap(page);
-+
-+ TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
-+ "%llx, offset %x\n", page->index,
-+ SQUASHFS_I(inode)->start_block,
-+ SQUASHFS_I(inode)->offset);
-+
-+ for (length = 0; length < index; length += bytes) {
-+ if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
-+ block, offset, PAGE_CACHE_SIZE, &block,
-+ &offset))) {
-+ ERROR("Unable to read symbolic link [%llx:%x]\n", block,
-+ offset);
-+ goto skip_read;
-+ }
-+ }
-+
-+ if (length != index) {
-+ ERROR("(squashfs_symlink_readpage) length != index\n");
-+ bytes = 0;
-+ goto skip_read;
-+ }
-+
-+ bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
-+ i_size_read(inode) - length;
-+
-+ if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
-+ offset, bytes, &block, &offset)))
-+ ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
-+
-+skip_read:
-+ memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
-+ kunmap(page);
-+ SetPageUptodate(page);
-+ unlock_page(page);
-+
-+ return 0;
-+}
-+
-+
-+struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
-+{
-+ struct meta_index *meta = NULL;
-+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+ int i;
-+
-+ down(&msblk->meta_index_mutex);
-+
-+ TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
-+
-+ if(msblk->meta_index == NULL)
-+ goto not_allocated;
-+
-+ for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
-+ if (msblk->meta_index[i].inode_number == inode->i_ino &&
-+ msblk->meta_index[i].offset >= offset &&
-+ msblk->meta_index[i].offset <= index &&
-+ msblk->meta_index[i].locked == 0) {
-+ TRACE("locate_meta_index: entry %d, offset %d\n", i,
-+ msblk->meta_index[i].offset);
-+ meta = &msblk->meta_index[i];
-+ offset = meta->offset;
-+ }
-+
-+ if (meta)
-+ meta->locked = 1;
-+
-+not_allocated:
-+ up(&msblk->meta_index_mutex);
-+
-+ return meta;
-+}
-+
-+
-+struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
-+{
-+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+ struct meta_index *meta = NULL;
-+ int i;
-+
-+ down(&msblk->meta_index_mutex);
-+
-+ TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
-+
-+ if(msblk->meta_index == NULL) {
-+ if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
-+ SQUASHFS_META_NUMBER, GFP_KERNEL))) {
-+ ERROR("Failed to allocate meta_index\n");
-+ goto failed;
-+ }
-+ for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
-+ msblk->meta_index[i].inode_number = 0;
-+ msblk->meta_index[i].locked = 0;
-+ }
-+ msblk->next_meta_index = 0;
-+ }
-+
-+ for(i = SQUASHFS_META_NUMBER; i &&
-+ msblk->meta_index[msblk->next_meta_index].locked; i --)
-+ msblk->next_meta_index = (msblk->next_meta_index + 1) %
-+ SQUASHFS_META_NUMBER;
-+
-+ if(i == 0) {
-+ TRACE("empty_meta_index: failed!\n");
-+ goto failed;
-+ }
-+
-+ TRACE("empty_meta_index: returned meta entry %d, %p\n",
-+ msblk->next_meta_index,
-+ &msblk->meta_index[msblk->next_meta_index]);
-+
-+ meta = &msblk->meta_index[msblk->next_meta_index];
-+ msblk->next_meta_index = (msblk->next_meta_index + 1) %
-+ SQUASHFS_META_NUMBER;
-+
-+ meta->inode_number = inode->i_ino;
-+ meta->offset = offset;
-+ meta->skip = skip;
-+ meta->entries = 0;
-+ meta->locked = 1;
-+
-+failed:
-+ up(&msblk->meta_index_mutex);
-+ return meta;
-+}
-+
-+
-+void release_meta_index(struct inode *inode, struct meta_index *meta)
-+{
-+ meta->locked = 0;
-+}
-+
-+
-+static int read_block_index(struct super_block *s, int blocks, char *block_list,
-+ long long *start_block, int *offset)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ unsigned int *block_listp;
-+ int block = 0;
-+
-+ if (msblk->swap) {
-+ char sblock_list[blocks << 2];
-+
-+ if (!squashfs_get_cached_block(s, sblock_list, *start_block,
-+ *offset, blocks << 2, start_block, offset)) {
-+ ERROR("Unable to read block list [%llx:%x]\n",
-+ *start_block, *offset);
-+ goto failure;
-+ }
-+ SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
-+ ((unsigned int *)sblock_list), blocks);
-+ } else
-+ if (!squashfs_get_cached_block(s, block_list, *start_block,
-+ *offset, blocks << 2, start_block, offset)) {
-+ ERROR("Unable to read block list [%llx:%x]\n",
-+ *start_block, *offset);
-+ goto failure;
-+ }
-+
-+ for (block_listp = (unsigned int *) block_list; blocks;
-+ block_listp++, blocks --)
-+ block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
-+
-+ return block;
-+
-+failure:
-+ return -1;
-+}
-+
-+
-+#define SIZE 256
-+
-+static inline int calculate_skip(int blocks) {
-+ int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
-+ return skip >= 7 ? 7 : skip + 1;
-+}
-+
-+
-+static int get_meta_index(struct inode *inode, int index,
-+ long long *index_block, int *index_offset,
-+ long long *data_block, char *block_list)
-+{
-+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
-+ int offset = 0;
-+ struct meta_index *meta;
-+ struct meta_entry *meta_entry;
-+ long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
-+ int cur_offset = SQUASHFS_I(inode)->offset;
-+ long long cur_data_block = SQUASHFS_I(inode)->start_block;
-+ int i;
-+
-+ index /= SQUASHFS_META_INDEXES * skip;
-+
-+ while ( offset < index ) {
-+ meta = locate_meta_index(inode, index, offset + 1);
-+
-+ if (meta == NULL) {
-+ if ((meta = empty_meta_index(inode, offset + 1,
-+ skip)) == NULL)
-+ goto all_done;
-+ } else {
-+ offset = index < meta->offset + meta->entries ? index :
-+ meta->offset + meta->entries - 1;
-+ meta_entry = &meta->meta_entry[offset - meta->offset];
-+ cur_index_block = meta_entry->index_block + sblk->inode_table_start;
-+ cur_offset = meta_entry->offset;
-+ cur_data_block = meta_entry->data_block;
-+ TRACE("get_meta_index: offset %d, meta->offset %d, "
-+ "meta->entries %d\n", offset, meta->offset,
-+ meta->entries);
-+ TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
-+ " data_block 0x%llx\n", cur_index_block,
-+ cur_offset, cur_data_block);
-+ }
-+
-+ for (i = meta->offset + meta->entries; i <= index &&
-+ i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
-+ int blocks = skip * SQUASHFS_META_INDEXES;
-+
-+ while (blocks) {
-+ int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
-+ blocks;
-+ int res = read_block_index(inode->i_sb, block,
-+ block_list, &cur_index_block,
-+ &cur_offset);
-+
-+ if (res == -1)
-+ goto failed;
-+
-+ cur_data_block += res;
-+ blocks -= block;
-+ }
-+
-+ meta_entry = &meta->meta_entry[i - meta->offset];
-+ meta_entry->index_block = cur_index_block - sblk->inode_table_start;
-+ meta_entry->offset = cur_offset;
-+ meta_entry->data_block = cur_data_block;
-+ meta->entries ++;
-+ offset ++;
-+ }
-+
-+ TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
-+ meta->offset, meta->entries);
-+
-+ release_meta_index(inode, meta);
-+ }
-+
-+all_done:
-+ *index_block = cur_index_block;
-+ *index_offset = cur_offset;
-+ *data_block = cur_data_block;
-+
-+ return offset * SQUASHFS_META_INDEXES * skip;
-+
-+failed:
-+ release_meta_index(inode, meta);
-+ return -1;
-+}
-+
-+
-+static long long read_blocklist(struct inode *inode, int index,
-+ int readahead_blks, char *block_list,
-+ unsigned short **block_p, unsigned int *bsize)
-+{
-+ long long block_ptr;
-+ int offset;
-+ long long block;
-+ int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
-+ block_list);
-+
-+ TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
-+ " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
-+ block);
-+
-+ if(res == -1)
-+ goto failure;
-+
-+ index -= res;
-+
-+ while ( index ) {
-+ int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
-+ int res = read_block_index(inode->i_sb, blocks, block_list,
-+ &block_ptr, &offset);
-+ if (res == -1)
-+ goto failure;
-+ block += res;
-+ index -= blocks;
-+ }
-+
-+ if (read_block_index(inode->i_sb, 1, block_list,
-+ &block_ptr, &offset) == -1)
-+ goto failure;
-+ *bsize = *((unsigned int *) block_list);
-+
-+ return block;
-+
-+failure:
-+ return 0;
-+}
-+
-+
-+static int squashfs_readpage(struct file *file, struct page *page)
-+{
-+ struct inode *inode = page->mapping->host;
-+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ unsigned char block_list[SIZE];
-+ long long block;
-+ unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
-+ int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
-+ void *pageaddr;
-+ struct squashfs_fragment_cache *fragment = NULL;
-+ char *data_ptr = msblk->read_page;
-+
-+ int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
-+ int start_index = page->index & ~mask;
-+ int end_index = start_index | mask;
-+
-+ TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
-+ page->index,
-+ SQUASHFS_I(inode)->start_block);
-+
-+ if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-+ PAGE_CACHE_SHIFT))
-+ goto skip_read;
-+
-+ if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
-+ || index < (i_size_read(inode) >>
-+ sblk->block_log)) {
-+ if ((block = (msblk->read_blocklist)(inode, index, 1,
-+ block_list, NULL, &bsize)) == 0)
-+ goto skip_read;
-+
-+ down(&msblk->read_page_mutex);
-+
-+ if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
-+ block, bsize, NULL))) {
-+ ERROR("Unable to read page, block %llx, size %x\n", block,
-+ bsize);
-+ up(&msblk->read_page_mutex);
-+ goto skip_read;
-+ }
-+ } else {
-+ if ((fragment = get_cached_fragment(inode->i_sb,
-+ SQUASHFS_I(inode)->
-+ u.s1.fragment_start_block,
-+ SQUASHFS_I(inode)->u.s1.fragment_size))
-+ == NULL) {
-+ ERROR("Unable to read page, block %llx, size %x\n",
-+ SQUASHFS_I(inode)->
-+ u.s1.fragment_start_block,
-+ (int) SQUASHFS_I(inode)->
-+ u.s1.fragment_size);
-+ goto skip_read;
-+ }
-+ bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
-+ (i_size_read(inode) & (sblk->block_size
-+ - 1));
-+ byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
-+ data_ptr = fragment->data;
-+ }
-+
-+ for (i = start_index; i <= end_index && byte_offset < bytes;
-+ i++, byte_offset += PAGE_CACHE_SIZE) {
-+ struct page *push_page;
-+ int available_bytes = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
-+ PAGE_CACHE_SIZE : bytes - byte_offset;
-+
-+ TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
-+ bytes, i, byte_offset, available_bytes);
-+
-+ if (i == page->index) {
-+ pageaddr = kmap_atomic(page, KM_USER0);
-+ memcpy(pageaddr, data_ptr + byte_offset,
-+ available_bytes);
-+ memset(pageaddr + available_bytes, 0,
-+ PAGE_CACHE_SIZE - available_bytes);
-+ kunmap_atomic(pageaddr, KM_USER0);
-+ flush_dcache_page(page);
-+ SetPageUptodate(page);
-+ unlock_page(page);
-+ } else if ((push_page =
-+ grab_cache_page_nowait(page->mapping, i))) {
-+ pageaddr = kmap_atomic(push_page, KM_USER0);
-+
-+ memcpy(pageaddr, data_ptr + byte_offset,
-+ available_bytes);
-+ memset(pageaddr + available_bytes, 0,
-+ PAGE_CACHE_SIZE - available_bytes);
-+ kunmap_atomic(pageaddr, KM_USER0);
-+ flush_dcache_page(push_page);
-+ SetPageUptodate(push_page);
-+ unlock_page(push_page);
-+ page_cache_release(push_page);
-+ }
-+ }
-+
-+ if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
-+ || index < (i_size_read(inode) >>
-+ sblk->block_log))
-+ up(&msblk->read_page_mutex);
-+ else
-+ release_cached_fragment(msblk, fragment);
-+
-+ return 0;
-+
-+skip_read:
-+ pageaddr = kmap_atomic(page, KM_USER0);
-+ memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
-+ kunmap_atomic(pageaddr, KM_USER0);
-+ flush_dcache_page(page);
-+ SetPageUptodate(page);
-+ unlock_page(page);
-+
-+ return 0;
-+}
-+
-+
-+static int squashfs_readpage4K(struct file *file, struct page *page)
-+{
-+ struct inode *inode = page->mapping->host;
-+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ unsigned char block_list[SIZE];
-+ long long block;
-+ unsigned int bsize, bytes = 0;
-+ void *pageaddr;
-+
-+ TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
-+ page->index,
-+ SQUASHFS_I(inode)->start_block);
-+
-+ if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-+ PAGE_CACHE_SHIFT)) {
-+ pageaddr = kmap_atomic(page, KM_USER0);
-+ goto skip_read;
-+ }
-+
-+ if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
-+ || page->index < (i_size_read(inode) >>
-+ sblk->block_log)) {
-+ block = (msblk->read_blocklist)(inode, page->index, 1,
-+ block_list, NULL, &bsize);
-+
-+ down(&msblk->read_page_mutex);
-+ bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
-+ bsize, NULL);
-+ pageaddr = kmap_atomic(page, KM_USER0);
-+ if (bytes)
-+ memcpy(pageaddr, msblk->read_page, bytes);
-+ else
-+ ERROR("Unable to read page, block %llx, size %x\n",
-+ block, bsize);
-+ up(&msblk->read_page_mutex);
-+ } else {
-+ struct squashfs_fragment_cache *fragment =
-+ get_cached_fragment(inode->i_sb,
-+ SQUASHFS_I(inode)->
-+ u.s1.fragment_start_block,
-+ SQUASHFS_I(inode)-> u.s1.fragment_size);
-+ pageaddr = kmap_atomic(page, KM_USER0);
-+ if (fragment) {
-+ bytes = i_size_read(inode) & (sblk->block_size - 1);
-+ memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
-+ u.s1.fragment_offset, bytes);
-+ release_cached_fragment(msblk, fragment);
-+ } else
-+ ERROR("Unable to read page, block %llx, size %x\n",
-+ SQUASHFS_I(inode)->
-+ u.s1.fragment_start_block, (int)
-+ SQUASHFS_I(inode)-> u.s1.fragment_size);
-+ }
-+
-+skip_read:
-+ memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
-+ kunmap_atomic(pageaddr, KM_USER0);
-+ flush_dcache_page(page);
-+ SetPageUptodate(page);
-+ unlock_page(page);
-+
-+ return 0;
-+}
-+
-+
-+static int get_dir_index_using_offset(struct super_block *s, long long
-+ *next_block, unsigned int *next_offset,
-+ long long index_start,
-+ unsigned int index_offset, int i_count,
-+ long long f_pos)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ int i, length = 0;
-+ struct squashfs_dir_index index;
-+
-+ TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
-+ i_count, (unsigned int) f_pos);
-+
-+ f_pos =- 3;
-+ if (f_pos == 0)
-+ goto finish;
-+
-+ for (i = 0; i < i_count; i++) {
-+ if (msblk->swap) {
-+ struct squashfs_dir_index sindex;
-+ squashfs_get_cached_block(s, (char *) &sindex,
-+ index_start, index_offset,
-+ sizeof(sindex), &index_start,
-+ &index_offset);
-+ SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
-+ } else
-+ squashfs_get_cached_block(s, (char *) &index,
-+ index_start, index_offset,
-+ sizeof(index), &index_start,
-+ &index_offset);
-+
-+ if (index.index > f_pos)
-+ break;
-+
-+ squashfs_get_cached_block(s, NULL, index_start, index_offset,
-+ index.size + 1, &index_start,
-+ &index_offset);
-+
-+ length = index.index;
-+ *next_block = index.start_block + sblk->directory_table_start;
-+ }
-+
-+ *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
-+
-+finish:
-+ return length + 3;
-+}
-+
-+
-+static int get_dir_index_using_name(struct super_block *s, long long
-+ *next_block, unsigned int *next_offset,
-+ long long index_start,
-+ unsigned int index_offset, int i_count,
-+ const char *name, int size)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ int i, length = 0;
-+ char buffer[sizeof(struct squashfs_dir_index) + SQUASHFS_NAME_LEN + 1];
-+ struct squashfs_dir_index *index = (struct squashfs_dir_index *) buffer;
-+ char str[SQUASHFS_NAME_LEN + 1];
-+
-+ TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
-+
-+ strncpy(str, name, size);
-+ str[size] = '\0';
-+
-+ for (i = 0; i < i_count; i++) {
-+ if (msblk->swap) {
-+ struct squashfs_dir_index sindex;
-+ squashfs_get_cached_block(s, (char *) &sindex,
-+ index_start, index_offset,
-+ sizeof(sindex), &index_start,
-+ &index_offset);
-+ SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
-+ } else
-+ squashfs_get_cached_block(s, (char *) index,
-+ index_start, index_offset,
-+ sizeof(struct squashfs_dir_index),
-+ &index_start, &index_offset);
-+
-+ squashfs_get_cached_block(s, index->name, index_start,
-+ index_offset, index->size + 1,
-+ &index_start, &index_offset);
-+
-+ index->name[index->size + 1] = '\0';
-+
-+ if (strcmp(index->name, str) > 0)
-+ break;
-+
-+ length = index->index;
-+ *next_block = index->start_block + sblk->directory_table_start;
-+ }
-+
-+ *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
-+ return length + 3;
-+}
-+
-+
-+static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
-+{
-+ struct inode *i = file->f_dentry->d_inode;
-+ struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ long long next_block = SQUASHFS_I(i)->start_block +
-+ sblk->directory_table_start;
-+ int next_offset = SQUASHFS_I(i)->offset, length = 0, dirs_read = 0,
-+ dir_count;
-+ struct squashfs_dir_header dirh;
-+ char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN + 1];
-+ struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer;
-+
-+ TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
-+
-+ while(file->f_pos < 3) {
-+ char *name;
-+ int size, i_ino;
-+
-+ if(file->f_pos == 0) {
-+ name = ".";
-+ size = 1;
-+ i_ino = i->i_ino;
-+ } else {
-+ name = "..";
-+ size = 2;
-+ i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
-+ }
-+ TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
-+ (unsigned int) dirent, name, size, (int)
-+ file->f_pos, i_ino,
-+ squashfs_filetype_table[1]);
-+
-+ if (filldir(dirent, name, size,
-+ file->f_pos, i_ino,
-+ squashfs_filetype_table[1]) < 0) {
-+ TRACE("Filldir returned less than 0\n");
-+ goto finish;
-+ }
-+ file->f_pos += size;
-+ dirs_read++;
-+ }
-+
-+ length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
-+ SQUASHFS_I(i)->u.s2.directory_index_start,
-+ SQUASHFS_I(i)->u.s2.directory_index_offset,
-+ SQUASHFS_I(i)->u.s2.directory_index_count,
-+ file->f_pos);
-+
-+ while (length < i_size_read(i)) {
-+ /* read directory header */
-+ if (msblk->swap) {
-+ struct squashfs_dir_header sdirh;
-+
-+ if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
-+ next_block, next_offset, sizeof(sdirh),
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(sdirh);
-+ SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
-+ } else {
-+ if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
-+ next_block, next_offset, sizeof(dirh),
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(dirh);
-+ }
-+
-+ dir_count = dirh.count + 1;
-+ while (dir_count--) {
-+ if (msblk->swap) {
-+ struct squashfs_dir_entry sdire;
-+ if (!squashfs_get_cached_block(i->i_sb, (char *)
-+ &sdire, next_block, next_offset,
-+ sizeof(sdire), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(sdire);
-+ SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
-+ } else {
-+ if (!squashfs_get_cached_block(i->i_sb, (char *)
-+ dire, next_block, next_offset,
-+ sizeof(*dire), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(*dire);
-+ }
-+
-+ if (!squashfs_get_cached_block(i->i_sb, dire->name,
-+ next_block, next_offset,
-+ dire->size + 1, &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += dire->size + 1;
-+
-+ if (file->f_pos >= length)
-+ continue;
-+
-+ dire->name[dire->size + 1] = '\0';
-+
-+ TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
-+ (unsigned int) dirent, dire->name,
-+ dire->size + 1, (int) file->f_pos,
-+ dirh.start_block, dire->offset,
-+ dirh.inode_number + dire->inode_number,
-+ squashfs_filetype_table[dire->type]);
-+
-+ if (filldir(dirent, dire->name, dire->size + 1,
-+ file->f_pos,
-+ dirh.inode_number + dire->inode_number,
-+ squashfs_filetype_table[dire->type])
-+ < 0) {
-+ TRACE("Filldir returned less than 0\n");
-+ goto finish;
-+ }
-+ file->f_pos = length;
-+ dirs_read++;
-+ }
-+ }
-+
-+finish:
-+ return dirs_read;
-+
-+failed_read:
-+ ERROR("Unable to read directory block [%llx:%x]\n", next_block,
-+ next_offset);
-+ return 0;
-+}
-+
-+
-+static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
-+ struct nameidata *nd)
-+{
-+ const unsigned char *name = dentry->d_name.name;
-+ int len = dentry->d_name.len;
-+ struct inode *inode = NULL;
-+ struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ long long next_block = SQUASHFS_I(i)->start_block +
-+ sblk->directory_table_start;
-+ int next_offset = SQUASHFS_I(i)->offset, length = 0,
-+ dir_count;
-+ struct squashfs_dir_header dirh;
-+ char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN];
-+ struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer;
-+
-+ TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
-+
-+ if (len > SQUASHFS_NAME_LEN)
-+ goto exit_loop;
-+
-+ length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
-+ SQUASHFS_I(i)->u.s2.directory_index_start,
-+ SQUASHFS_I(i)->u.s2.directory_index_offset,
-+ SQUASHFS_I(i)->u.s2.directory_index_count, name,
-+ len);
-+
-+ while (length < i_size_read(i)) {
-+ /* read directory header */
-+ if (msblk->swap) {
-+ struct squashfs_dir_header sdirh;
-+ if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
-+ next_block, next_offset, sizeof(sdirh),
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(sdirh);
-+ SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
-+ } else {
-+ if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
-+ next_block, next_offset, sizeof(dirh),
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(dirh);
-+ }
-+
-+ dir_count = dirh.count + 1;
-+ while (dir_count--) {
-+ if (msblk->swap) {
-+ struct squashfs_dir_entry sdire;
-+ if (!squashfs_get_cached_block(i->i_sb, (char *)
-+ &sdire, next_block,next_offset,
-+ sizeof(sdire), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(sdire);
-+ SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
-+ } else {
-+ if (!squashfs_get_cached_block(i->i_sb, (char *)
-+ dire, next_block,next_offset,
-+ sizeof(*dire), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(*dire);
-+ }
-+
-+ if (!squashfs_get_cached_block(i->i_sb, dire->name,
-+ next_block, next_offset, dire->size + 1,
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += dire->size + 1;
-+
-+ if (name[0] < dire->name[0])
-+ goto exit_loop;
-+
-+ if ((len == dire->size + 1) && !strncmp(name,
-+ dire->name, len)) {
-+ squashfs_inode_t ino =
-+ SQUASHFS_MKINODE(dirh.start_block,
-+ dire->offset);
-+
-+ TRACE("calling squashfs_iget for directory "
-+ "entry %s, inode %x:%x, %d\n", name,
-+ dirh.start_block, dire->offset,
-+ dirh.inode_number + dire->inode_number);
-+
-+ inode = (msblk->iget)(i->i_sb, ino);
-+
-+ goto exit_loop;
-+ }
-+ }
-+ }
-+
-+exit_loop:
-+ d_add(dentry, inode);
-+ return ERR_PTR(0);
-+
-+failed_read:
-+ ERROR("Unable to read directory block [%llx:%x]\n", next_block,
-+ next_offset);
-+ goto exit_loop;
-+}
-+
-+
-+static void squashfs_put_super(struct super_block *s)
-+{
-+ int i;
-+
-+ if (s->s_fs_info) {
-+ struct squashfs_sb_info *sbi = s->s_fs_info;
-+ if (sbi->block_cache)
-+ for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
-+ if (sbi->block_cache[i].block !=
-+ SQUASHFS_INVALID_BLK)
-+ kfree(sbi->block_cache[i].data);
-+ if (sbi->fragment)
-+ for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
-+ SQUASHFS_FREE(sbi->fragment[i].data);
-+ kfree(sbi->fragment);
-+ kfree(sbi->block_cache);
-+ kfree(sbi->read_data);
-+ kfree(sbi->read_page);
-+ kfree(sbi->uid);
-+ kfree(sbi->fragment_index);
-+ kfree(sbi->fragment_index_2);
-+ kfree(sbi->meta_index);
-+ kfree(s->s_fs_info);
-+ s->s_fs_info = NULL;
-+ }
-+}
-+
-+
-+static struct super_block *squashfs_get_sb(struct file_system_type *fs_type,
-+ int flags, const char *dev_name, void *data)
-+{
-+ return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super);
-+}
-+
-+
-+static int __init init_squashfs_fs(void)
-+{
-+ int err = init_inodecache();
-+ if (err)
-+ goto out;
-+
-+ printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) "
-+ "Phillip Lougher\n");
-+
-+ if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
-+ ERROR("Failed to allocate zlib workspace\n");
-+ destroy_inodecache();
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ if ((err = register_filesystem(&squashfs_fs_type))) {
-+ vfree(stream.workspace);
-+ destroy_inodecache();
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+
-+static void __exit exit_squashfs_fs(void)
-+{
-+ vfree(stream.workspace);
-+ unregister_filesystem(&squashfs_fs_type);
-+ destroy_inodecache();
-+}
-+
-+
-+static kmem_cache_t * squashfs_inode_cachep;
-+
-+
-+static struct inode *squashfs_alloc_inode(struct super_block *sb)
-+{
-+ struct squashfs_inode_info *ei;
-+ ei = kmem_cache_alloc(squashfs_inode_cachep, SLAB_KERNEL);
-+ if (!ei)
-+ return NULL;
-+ return &ei->vfs_inode;
-+}
-+
-+
-+static void squashfs_destroy_inode(struct inode *inode)
-+{
-+ kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
-+}
-+
-+
-+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
-+{
-+ struct squashfs_inode_info *ei = foo;
-+
-+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
-+ SLAB_CTOR_CONSTRUCTOR)
-+ inode_init_once(&ei->vfs_inode);
-+}
-+
-+
-+static int __init init_inodecache(void)
-+{
-+ squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
-+ sizeof(struct squashfs_inode_info),
-+ 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
-+ init_once, NULL);
-+ if (squashfs_inode_cachep == NULL)
-+ return -ENOMEM;
-+ return 0;
-+}
-+
-+
-+static void destroy_inodecache(void)
-+{
-+ if (kmem_cache_destroy(squashfs_inode_cachep))
-+ printk(KERN_INFO "squashfs_inode_cache: not all structures "
-+ "were freed\n");
-+}
-+
-+
-+module_init(init_squashfs_fs);
-+module_exit(exit_squashfs_fs);
-+MODULE_DESCRIPTION("squashfs, a compressed read-only filesystem");
-+MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>");
-+MODULE_LICENSE("GPL");
-diff --new-file -urp linux-2.6.15/fs/squashfs/Makefile linux-2.6.15-squashfs3.0/fs/squashfs/Makefile
---- linux-2.6.15/fs/squashfs/Makefile 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.15-squashfs3.0/fs/squashfs/Makefile 2006-03-07 21:12:37.000000000 +0000
-@@ -0,0 +1,7 @@
-+#
-+# Makefile for the linux squashfs routines.
-+#
-+
-+obj-$(CONFIG_SQUASHFS) += squashfs.o
-+squashfs-y += inode.o
-+squashfs-y += squashfs2_0.o
-diff --new-file -urp linux-2.6.15/fs/squashfs/squashfs2_0.c linux-2.6.15-squashfs3.0/fs/squashfs/squashfs2_0.c
---- linux-2.6.15/fs/squashfs/squashfs2_0.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.15-squashfs3.0/fs/squashfs/squashfs2_0.c 2006-03-07 21:12:37.000000000 +0000
-@@ -0,0 +1,758 @@
-+/*
-+ * Squashfs - a compressed read only filesystem for Linux
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs2_0.c
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/squashfs_fs.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/slab.h>
-+#include <linux/fs.h>
-+#include <linux/smp_lock.h>
-+#include <linux/slab.h>
-+#include <linux/squashfs_fs_sb.h>
-+#include <linux/squashfs_fs_i.h>
-+#include <linux/buffer_head.h>
-+#include <linux/vfs.h>
-+#include <linux/init.h>
-+#include <linux/dcache.h>
-+#include <linux/wait.h>
-+#include <linux/zlib.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <asm/uaccess.h>
-+#include <asm/semaphore.h>
-+
-+#include "squashfs.h"
-+static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir);
-+static struct dentry *squashfs_lookup_2(struct inode *, struct dentry *,
-+ struct nameidata *);
-+
-+static struct file_operations squashfs_dir_ops_2 = {
-+ .read = generic_read_dir,
-+ .readdir = squashfs_readdir_2
-+};
-+
-+static struct inode_operations squashfs_dir_inode_ops_2 = {
-+ .lookup = squashfs_lookup_2
-+};
-+
-+static unsigned char squashfs_filetype_table[] = {
-+ DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
-+};
-+
-+static int read_fragment_index_table_2(struct super_block *s)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+ if (!(msblk->fragment_index_2 = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES_2
-+ (sblk->fragments), GFP_KERNEL))) {
-+ ERROR("Failed to allocate uid/gid table\n");
-+ return 0;
-+ }
-+
-+ if (SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments) &&
-+ !squashfs_read_data(s, (char *)
-+ msblk->fragment_index_2,
-+ sblk->fragment_table_start,
-+ SQUASHFS_FRAGMENT_INDEX_BYTES_2
-+ (sblk->fragments) |
-+ SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
-+ ERROR("unable to read fragment index table\n");
-+ return 0;
-+ }
-+
-+ if (msblk->swap) {
-+ int i;
-+ unsigned int fragment;
-+
-+ for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES_2(sblk->fragments);
-+ i++) {
-+ SQUASHFS_SWAP_FRAGMENT_INDEXES_2((&fragment),
-+ &msblk->fragment_index_2[i], 1);
-+ msblk->fragment_index_2[i] = fragment;
-+ }
-+ }
-+
-+ return 1;
-+}
-+
-+
-+static int get_fragment_location_2(struct super_block *s, unsigned int fragment,
-+ long long *fragment_start_block,
-+ unsigned int *fragment_size)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ long long start_block =
-+ msblk->fragment_index_2[SQUASHFS_FRAGMENT_INDEX_2(fragment)];
-+ int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET_2(fragment);
-+ struct squashfs_fragment_entry_2 fragment_entry;
-+
-+ if (msblk->swap) {
-+ struct squashfs_fragment_entry_2 sfragment_entry;
-+
-+ if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
-+ start_block, offset,
-+ sizeof(sfragment_entry), &start_block,
-+ &offset))
-+ goto out;
-+ SQUASHFS_SWAP_FRAGMENT_ENTRY_2(&fragment_entry, &sfragment_entry);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
-+ start_block, offset,
-+ sizeof(fragment_entry), &start_block,
-+ &offset))
-+ goto out;
-+
-+ *fragment_start_block = fragment_entry.start_block;
-+ *fragment_size = fragment_entry.size;
-+
-+ return 1;
-+
-+out:
-+ return 0;
-+}
-+
-+
-+static struct inode *squashfs_new_inode(struct super_block *s,
-+ struct squashfs_base_inode_header_2 *inodeb, unsigned int ino)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ struct inode *i = new_inode(s);
-+
-+ if (i) {
-+ i->i_ino = ino;
-+ i->i_mtime.tv_sec = sblk->mkfs_time;
-+ i->i_atime.tv_sec = sblk->mkfs_time;
-+ i->i_ctime.tv_sec = sblk->mkfs_time;
-+ i->i_uid = msblk->uid[inodeb->uid];
-+ i->i_mode = inodeb->mode;
-+ i->i_nlink = 1;
-+ i->i_size = 0;
-+ if (inodeb->guid == SQUASHFS_GUIDS)
-+ i->i_gid = i->i_uid;
-+ else
-+ i->i_gid = msblk->guid[inodeb->guid];
-+ }
-+
-+ return i;
-+}
-+
-+
-+static struct inode *squashfs_iget_2(struct super_block *s, squashfs_inode_t inode)
-+{
-+ struct inode *i;
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ unsigned int block = SQUASHFS_INODE_BLK(inode) +
-+ sblk->inode_table_start;
-+ unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
-+ unsigned int ino = SQUASHFS_MK_VFS_INODE(block
-+ - sblk->inode_table_start, offset);
-+ long long next_block;
-+ unsigned int next_offset;
-+ union squashfs_inode_header_2 id, sid;
-+ struct squashfs_base_inode_header_2 *inodeb = &id.base,
-+ *sinodeb = &sid.base;
-+
-+ TRACE("Entered squashfs_iget\n");
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
-+ offset, sizeof(*sinodeb), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_BASE_INODE_HEADER_2(inodeb, sinodeb,
-+ sizeof(*sinodeb));
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *) inodeb, block,
-+ offset, sizeof(*inodeb), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ switch(inodeb->inode_type) {
-+ case SQUASHFS_FILE_TYPE: {
-+ struct squashfs_reg_inode_header_2 *inodep = &id.reg;
-+ struct squashfs_reg_inode_header_2 *sinodep = &sid.reg;
-+ long long frag_blk;
-+ unsigned int frag_size;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_REG_INODE_HEADER_2(inodep, sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ frag_blk = SQUASHFS_INVALID_BLK;
-+ if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
-+ !get_fragment_location_2(s,
-+ inodep->fragment, &frag_blk, &frag_size))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_size = inodep->file_size;
-+ i->i_fop = &generic_ro_fops;
-+ i->i_mode |= S_IFREG;
-+ i->i_mtime.tv_sec = inodep->mtime;
-+ i->i_atime.tv_sec = inodep->mtime;
-+ i->i_ctime.tv_sec = inodep->mtime;
-+ i->i_blocks = ((i->i_size - 1) >> 9) + 1;
-+ i->i_blksize = PAGE_CACHE_SIZE;
-+ SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
-+ SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
-+ SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
-+ SQUASHFS_I(i)->start_block = inodep->start_block;
-+ SQUASHFS_I(i)->u.s1.block_list_start = next_block;
-+ SQUASHFS_I(i)->offset = next_offset;
-+ if (sblk->block_size > 4096)
-+ i->i_data.a_ops = &squashfs_aops;
-+ else
-+ i->i_data.a_ops = &squashfs_aops_4K;
-+
-+ TRACE("File inode %x:%x, start_block %x, "
-+ "block_list_start %llx, offset %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ inodep->start_block, next_block,
-+ next_offset);
-+ break;
-+ }
-+ case SQUASHFS_DIR_TYPE: {
-+ struct squashfs_dir_inode_header_2 *inodep = &id.dir;
-+ struct squashfs_dir_inode_header_2 *sinodep = &sid.dir;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_DIR_INODE_HEADER_2(inodep, sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_size = inodep->file_size;
-+ i->i_op = &squashfs_dir_inode_ops_2;
-+ i->i_fop = &squashfs_dir_ops_2;
-+ i->i_mode |= S_IFDIR;
-+ i->i_mtime.tv_sec = inodep->mtime;
-+ i->i_atime.tv_sec = inodep->mtime;
-+ i->i_ctime.tv_sec = inodep->mtime;
-+ SQUASHFS_I(i)->start_block = inodep->start_block;
-+ SQUASHFS_I(i)->offset = inodep->offset;
-+ SQUASHFS_I(i)->u.s2.directory_index_count = 0;
-+ SQUASHFS_I(i)->u.s2.parent_inode = 0;
-+
-+ TRACE("Directory inode %x:%x, start_block %x, offset "
-+ "%x\n", SQUASHFS_INODE_BLK(inode),
-+ offset, inodep->start_block,
-+ inodep->offset);
-+ break;
-+ }
-+ case SQUASHFS_LDIR_TYPE: {
-+ struct squashfs_ldir_inode_header_2 *inodep = &id.ldir;
-+ struct squashfs_ldir_inode_header_2 *sinodep = &sid.ldir;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_LDIR_INODE_HEADER_2(inodep,
-+ sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_size = inodep->file_size;
-+ i->i_op = &squashfs_dir_inode_ops_2;
-+ i->i_fop = &squashfs_dir_ops_2;
-+ i->i_mode |= S_IFDIR;
-+ i->i_mtime.tv_sec = inodep->mtime;
-+ i->i_atime.tv_sec = inodep->mtime;
-+ i->i_ctime.tv_sec = inodep->mtime;
-+ SQUASHFS_I(i)->start_block = inodep->start_block;
-+ SQUASHFS_I(i)->offset = inodep->offset;
-+ SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
-+ SQUASHFS_I(i)->u.s2.directory_index_offset =
-+ next_offset;
-+ SQUASHFS_I(i)->u.s2.directory_index_count =
-+ inodep->i_count;
-+ SQUASHFS_I(i)->u.s2.parent_inode = 0;
-+
-+ TRACE("Long directory inode %x:%x, start_block %x, "
-+ "offset %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ inodep->start_block, inodep->offset);
-+ break;
-+ }
-+ case SQUASHFS_SYMLINK_TYPE: {
-+ struct squashfs_symlink_inode_header_2 *inodep =
-+ &id.symlink;
-+ struct squashfs_symlink_inode_header_2 *sinodep =
-+ &sid.symlink;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(inodep,
-+ sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_size = inodep->symlink_size;
-+ i->i_op = &page_symlink_inode_operations;
-+ i->i_data.a_ops = &squashfs_symlink_aops;
-+ i->i_mode |= S_IFLNK;
-+ SQUASHFS_I(i)->start_block = next_block;
-+ SQUASHFS_I(i)->offset = next_offset;
-+
-+ TRACE("Symbolic link inode %x:%x, start_block %llx, "
-+ "offset %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ next_block, next_offset);
-+ break;
-+ }
-+ case SQUASHFS_BLKDEV_TYPE:
-+ case SQUASHFS_CHRDEV_TYPE: {
-+ struct squashfs_dev_inode_header_2 *inodep = &id.dev;
-+ struct squashfs_dev_inode_header_2 *sinodep = &sid.dev;
-+
-+ if (msblk->swap) {
-+ if (!squashfs_get_cached_block(s, (char *)
-+ sinodep, block, offset,
-+ sizeof(*sinodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+ SQUASHFS_SWAP_DEV_INODE_HEADER_2(inodep, sinodep);
-+ } else
-+ if (!squashfs_get_cached_block(s, (char *)
-+ inodep, block, offset,
-+ sizeof(*inodep), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ if ((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_mode |= (inodeb->inode_type ==
-+ SQUASHFS_CHRDEV_TYPE) ? S_IFCHR :
-+ S_IFBLK;
-+ init_special_inode(i, i->i_mode,
-+ old_decode_dev(inodep->rdev));
-+
-+ TRACE("Device inode %x:%x, rdev %x\n",
-+ SQUASHFS_INODE_BLK(inode), offset,
-+ inodep->rdev);
-+ break;
-+ }
-+ case SQUASHFS_FIFO_TYPE:
-+ case SQUASHFS_SOCKET_TYPE: {
-+ if ((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
-+ goto failed_read1;
-+
-+ i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
-+ ? S_IFIFO : S_IFSOCK;
-+ init_special_inode(i, i->i_mode, 0);
-+ break;
-+ }
-+ default:
-+ ERROR("Unknown inode type %d in squashfs_iget!\n",
-+ inodeb->inode_type);
-+ goto failed_read1;
-+ }
-+
-+ insert_inode_hash(i);
-+ return i;
-+
-+failed_read:
-+ ERROR("Unable to read inode [%x:%x]\n", block, offset);
-+
-+failed_read1:
-+ return NULL;
-+}
-+
-+
-+static int get_dir_index_using_offset(struct super_block *s, long long
-+ *next_block, unsigned int *next_offset,
-+ long long index_start,
-+ unsigned int index_offset, int i_count,
-+ long long f_pos)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ int i, length = 0;
-+ struct squashfs_dir_index_2 index;
-+
-+ TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
-+ i_count, (unsigned int) f_pos);
-+
-+ if (f_pos == 0)
-+ goto finish;
-+
-+ for (i = 0; i < i_count; i++) {
-+ if (msblk->swap) {
-+ struct squashfs_dir_index_2 sindex;
-+ squashfs_get_cached_block(s, (char *) &sindex,
-+ index_start, index_offset,
-+ sizeof(sindex), &index_start,
-+ &index_offset);
-+ SQUASHFS_SWAP_DIR_INDEX_2(&index, &sindex);
-+ } else
-+ squashfs_get_cached_block(s, (char *) &index,
-+ index_start, index_offset,
-+ sizeof(index), &index_start,
-+ &index_offset);
-+
-+ if (index.index > f_pos)
-+ break;
-+
-+ squashfs_get_cached_block(s, NULL, index_start, index_offset,
-+ index.size + 1, &index_start,
-+ &index_offset);
-+
-+ length = index.index;
-+ *next_block = index.start_block + sblk->directory_table_start;
-+ }
-+
-+ *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
-+
-+finish:
-+ return length;
-+}
-+
-+
-+static int get_dir_index_using_name(struct super_block *s, long long
-+ *next_block, unsigned int *next_offset,
-+ long long index_start,
-+ unsigned int index_offset, int i_count,
-+ const char *name, int size)
-+{
-+ struct squashfs_sb_info *msblk = s->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ int i, length = 0;
-+ char buffer[sizeof(struct squashfs_dir_index_2) + SQUASHFS_NAME_LEN + 1];
-+ struct squashfs_dir_index_2 *index = (struct squashfs_dir_index_2 *) buffer;
-+ char str[SQUASHFS_NAME_LEN + 1];
-+
-+ TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
-+
-+ strncpy(str, name, size);
-+ str[size] = '\0';
-+
-+ for (i = 0; i < i_count; i++) {
-+ if (msblk->swap) {
-+ struct squashfs_dir_index_2 sindex;
-+ squashfs_get_cached_block(s, (char *) &sindex,
-+ index_start, index_offset,
-+ sizeof(sindex), &index_start,
-+ &index_offset);
-+ SQUASHFS_SWAP_DIR_INDEX_2(index, &sindex);
-+ } else
-+ squashfs_get_cached_block(s, (char *) index,
-+ index_start, index_offset,
-+ sizeof(struct squashfs_dir_index_2),
-+ &index_start, &index_offset);
-+
-+ squashfs_get_cached_block(s, index->name, index_start,
-+ index_offset, index->size + 1,
-+ &index_start, &index_offset);
-+
-+ index->name[index->size + 1] = '\0';
-+
-+ if (strcmp(index->name, str) > 0)
-+ break;
-+
-+ length = index->index;
-+ *next_block = index->start_block + sblk->directory_table_start;
-+ }
-+
-+ *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
-+ return length;
-+}
-+
-+
-+static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir)
-+{
-+ struct inode *i = file->f_dentry->d_inode;
-+ struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ long long next_block = SQUASHFS_I(i)->start_block +
-+ sblk->directory_table_start;
-+ int next_offset = SQUASHFS_I(i)->offset, length = 0, dirs_read = 0,
-+ dir_count;
-+ struct squashfs_dir_header_2 dirh;
-+ char buffer[sizeof(struct squashfs_dir_entry_2) + SQUASHFS_NAME_LEN + 1];
-+ struct squashfs_dir_entry_2 *dire = (struct squashfs_dir_entry_2 *) buffer;
-+
-+ TRACE("Entered squashfs_readdir_2 [%llx:%x]\n", next_block, next_offset);
-+
-+ length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
-+ SQUASHFS_I(i)->u.s2.directory_index_start,
-+ SQUASHFS_I(i)->u.s2.directory_index_offset,
-+ SQUASHFS_I(i)->u.s2.directory_index_count,
-+ file->f_pos);
-+
-+ while (length < i_size_read(i)) {
-+ /* read directory header */
-+ if (msblk->swap) {
-+ struct squashfs_dir_header_2 sdirh;
-+
-+ if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
-+ next_block, next_offset, sizeof(sdirh),
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(sdirh);
-+ SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
-+ } else {
-+ if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
-+ next_block, next_offset, sizeof(dirh),
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(dirh);
-+ }
-+
-+ dir_count = dirh.count + 1;
-+ while (dir_count--) {
-+ if (msblk->swap) {
-+ struct squashfs_dir_entry_2 sdire;
-+ if (!squashfs_get_cached_block(i->i_sb, (char *)
-+ &sdire, next_block, next_offset,
-+ sizeof(sdire), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(sdire);
-+ SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
-+ } else {
-+ if (!squashfs_get_cached_block(i->i_sb, (char *)
-+ dire, next_block, next_offset,
-+ sizeof(*dire), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(*dire);
-+ }
-+
-+ if (!squashfs_get_cached_block(i->i_sb, dire->name,
-+ next_block, next_offset,
-+ dire->size + 1, &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += dire->size + 1;
-+
-+ if (file->f_pos >= length)
-+ continue;
-+
-+ dire->name[dire->size + 1] = '\0';
-+
-+ TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d)\n",
-+ (unsigned int) dirent, dire->name,
-+ dire->size + 1, (int) file->f_pos,
-+ dirh.start_block, dire->offset,
-+ squashfs_filetype_table[dire->type]);
-+
-+ if (filldir(dirent, dire->name, dire->size + 1,
-+ file->f_pos, SQUASHFS_MK_VFS_INODE(
-+ dirh.start_block, dire->offset),
-+ squashfs_filetype_table[dire->type])
-+ < 0) {
-+ TRACE("Filldir returned less than 0\n");
-+ goto finish;
-+ }
-+ file->f_pos = length;
-+ dirs_read++;
-+ }
-+ }
-+
-+finish:
-+ return dirs_read;
-+
-+failed_read:
-+ ERROR("Unable to read directory block [%llx:%x]\n", next_block,
-+ next_offset);
-+ return 0;
-+}
-+
-+
-+static struct dentry *squashfs_lookup_2(struct inode *i, struct dentry *dentry,
-+ struct nameidata *nd)
-+{
-+ const unsigned char *name = dentry->d_name.name;
-+ int len = dentry->d_name.len;
-+ struct inode *inode = NULL;
-+ struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+ long long next_block = SQUASHFS_I(i)->start_block +
-+ sblk->directory_table_start;
-+ int next_offset = SQUASHFS_I(i)->offset, length = 0,
-+ dir_count;
-+ struct squashfs_dir_header_2 dirh;
-+ char buffer[sizeof(struct squashfs_dir_entry_2) + SQUASHFS_NAME_LEN];
-+ struct squashfs_dir_entry_2 *dire = (struct squashfs_dir_entry_2 *) buffer;
-+ int sorted = sblk->s_major == 2 && sblk->s_minor >= 1;
-+
-+ TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
-+
-+ if (len > SQUASHFS_NAME_LEN)
-+ goto exit_loop;
-+
-+ length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
-+ SQUASHFS_I(i)->u.s2.directory_index_start,
-+ SQUASHFS_I(i)->u.s2.directory_index_offset,
-+ SQUASHFS_I(i)->u.s2.directory_index_count, name,
-+ len);
-+
-+ while (length < i_size_read(i)) {
-+ /* read directory header */
-+ if (msblk->swap) {
-+ struct squashfs_dir_header_2 sdirh;
-+ if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
-+ next_block, next_offset, sizeof(sdirh),
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(sdirh);
-+ SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
-+ } else {
-+ if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
-+ next_block, next_offset, sizeof(dirh),
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(dirh);
-+ }
-+
-+ dir_count = dirh.count + 1;
-+ while (dir_count--) {
-+ if (msblk->swap) {
-+ struct squashfs_dir_entry_2 sdire;
-+ if (!squashfs_get_cached_block(i->i_sb, (char *)
-+ &sdire, next_block,next_offset,
-+ sizeof(sdire), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(sdire);
-+ SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
-+ } else {
-+ if (!squashfs_get_cached_block(i->i_sb, (char *)
-+ dire, next_block,next_offset,
-+ sizeof(*dire), &next_block,
-+ &next_offset))
-+ goto failed_read;
-+
-+ length += sizeof(*dire);
-+ }
-+
-+ if (!squashfs_get_cached_block(i->i_sb, dire->name,
-+ next_block, next_offset, dire->size + 1,
-+ &next_block, &next_offset))
-+ goto failed_read;
-+
-+ length += dire->size + 1;
-+
-+ if (sorted && name[0] < dire->name[0])
-+ goto exit_loop;
-+
-+ if ((len == dire->size + 1) && !strncmp(name,
-+ dire->name, len)) {
-+ squashfs_inode_t ino =
-+ SQUASHFS_MKINODE(dirh.start_block,
-+ dire->offset);
-+
-+ TRACE("calling squashfs_iget for directory "
-+ "entry %s, inode %x:%x, %lld\n", name,
-+ dirh.start_block, dire->offset, ino);
-+
-+ inode = (msblk->iget)(i->i_sb, ino);
-+
-+ goto exit_loop;
-+ }
-+ }
-+ }
-+
-+exit_loop:
-+ d_add(dentry, inode);
-+ return ERR_PTR(0);
-+
-+failed_read:
-+ ERROR("Unable to read directory block [%llx:%x]\n", next_block,
-+ next_offset);
-+ goto exit_loop;
-+}
-+
-+
-+int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
-+{
-+ struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+ msblk->iget = squashfs_iget_2;
-+ msblk->read_fragment_index_table = read_fragment_index_table_2;
-+
-+ sblk->bytes_used = sblk->bytes_used_2;
-+ sblk->uid_start = sblk->uid_start_2;
-+ sblk->guid_start = sblk->guid_start_2;
-+ sblk->inode_table_start = sblk->inode_table_start_2;
-+ sblk->directory_table_start = sblk->directory_table_start_2;
-+ sblk->fragment_table_start = sblk->fragment_table_start_2;
-+
-+ return 1;
-+}
-diff --new-file -urp linux-2.6.15/fs/squashfs/squashfs.h linux-2.6.15-squashfs3.0/fs/squashfs/squashfs.h
---- linux-2.6.15/fs/squashfs/squashfs.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.15-squashfs3.0/fs/squashfs/squashfs.h 2006-03-07 21:12:37.000000000 +0000
-@@ -0,0 +1,86 @@
-+/*
-+ * Squashfs - a compressed read only filesystem for Linux
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs.h
-+ */
-+
-+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
-+#undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
-+#endif
-+
-+#ifdef SQUASHFS_TRACE
-+#define TRACE(s, args...) printk(KERN_NOTICE "SQUASHFS: "s, ## args)
-+#else
-+#define TRACE(s, args...) {}
-+#endif
-+
-+#define ERROR(s, args...) printk(KERN_ERR "SQUASHFS error: "s, ## args)
-+
-+#define SERROR(s, args...) do { \
-+ if (!silent) \
-+ printk(KERN_ERR "SQUASHFS error: "s, ## args);\
-+ } while(0)
-+
-+#define WARNING(s, args...) printk(KERN_WARNING "SQUASHFS: "s, ## args)
-+
-+static inline struct squashfs_inode_info *SQUASHFS_I(struct inode *inode)
-+{
-+ return list_entry(inode, struct squashfs_inode_info, vfs_inode);
-+}
-+
-+#if defined(CONFIG_SQUASHFS_1_0_COMPATIBILITY ) || defined(CONFIG_SQUASHFS_2_0_COMPATIBILITY)
-+#define SQSH_EXTERN
-+extern unsigned int squashfs_read_data(struct super_block *s, char *buffer,
-+ long long index, unsigned int length,
-+ long long *next_index);
-+extern int squashfs_get_cached_block(struct super_block *s, char *buffer,
-+ long long block, unsigned int offset,
-+ int length, long long *next_block,
-+ unsigned int *next_offset);
-+extern void release_cached_fragment(struct squashfs_sb_info *msblk, struct
-+ squashfs_fragment_cache *fragment);
-+extern struct squashfs_fragment_cache *get_cached_fragment(struct super_block
-+ *s, long long start_block,
-+ int length);
-+extern struct address_space_operations squashfs_symlink_aops;
-+extern struct address_space_operations squashfs_aops;
-+extern struct address_space_operations squashfs_aops_4K;
-+extern struct inode_operations squashfs_dir_inode_ops;
-+#else
-+#define SQSH_EXTERN static
-+#endif
-+
-+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
-+extern int squashfs_1_0_supported(struct squashfs_sb_info *msblk);
-+#else
-+static inline int squashfs_1_0_supported(struct squashfs_sb_info *msblk)
-+{
-+ return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
-+extern int squashfs_2_0_supported(struct squashfs_sb_info *msblk);
-+#else
-+static inline int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
-+{
-+ return 0;
-+}
-+#endif
-diff --new-file -urp linux-2.6.15/include/linux/squashfs_fs.h linux-2.6.15-squashfs3.0/include/linux/squashfs_fs.h
---- linux-2.6.15/include/linux/squashfs_fs.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.15-squashfs3.0/include/linux/squashfs_fs.h 2006-03-07 21:12:37.000000000 +0000
-@@ -0,0 +1,911 @@
-+#ifndef SQUASHFS_FS
-+#define SQUASHFS_FS
-+
-+/*
-+ * Squashfs
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs_fs.h
-+ */
-+
-+#ifndef CONFIG_SQUASHFS_2_0_COMPATIBILITY
-+#define CONFIG_SQUASHFS_2_0_COMPATIBILITY
-+#endif
-+
-+#ifdef CONFIG_SQUASHFS_VMALLOC
-+#define SQUASHFS_ALLOC(a) vmalloc(a)
-+#define SQUASHFS_FREE(a) vfree(a)
-+#else
-+#define SQUASHFS_ALLOC(a) kmalloc(a, GFP_KERNEL)
-+#define SQUASHFS_FREE(a) kfree(a)
-+#endif
-+#define SQUASHFS_CACHED_FRAGMENTS CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE
-+#define SQUASHFS_MAJOR 3
-+#define SQUASHFS_MINOR 0
-+#define SQUASHFS_MAGIC 0x73717368
-+#define SQUASHFS_MAGIC_SWAP 0x68737173
-+#define SQUASHFS_START 0
-+
-+/* size of metadata (inode and directory) blocks */
-+#define SQUASHFS_METADATA_SIZE 8192
-+#define SQUASHFS_METADATA_LOG 13
-+
-+/* default size of data blocks */
-+#define SQUASHFS_FILE_SIZE 65536
-+#define SQUASHFS_FILE_LOG 16
-+
-+#define SQUASHFS_FILE_MAX_SIZE 65536
-+
-+/* Max number of uids and gids */
-+#define SQUASHFS_UIDS 256
-+#define SQUASHFS_GUIDS 255
-+
-+/* Max length of filename (not 255) */
-+#define SQUASHFS_NAME_LEN 256
-+
-+#define SQUASHFS_INVALID ((long long) 0xffffffffffff)
-+#define SQUASHFS_INVALID_FRAG ((unsigned int) 0xffffffff)
-+#define SQUASHFS_INVALID_BLK ((long long) -1)
-+#define SQUASHFS_USED_BLK ((long long) -2)
-+
-+/* Filesystem flags */
-+#define SQUASHFS_NOI 0
-+#define SQUASHFS_NOD 1
-+#define SQUASHFS_CHECK 2
-+#define SQUASHFS_NOF 3
-+#define SQUASHFS_NO_FRAG 4
-+#define SQUASHFS_ALWAYS_FRAG 5
-+#define SQUASHFS_DUPLICATE 6
-+
-+#define SQUASHFS_BIT(flag, bit) ((flag >> bit) & 1)
-+
-+#define SQUASHFS_UNCOMPRESSED_INODES(flags) SQUASHFS_BIT(flags, \
-+ SQUASHFS_NOI)
-+
-+#define SQUASHFS_UNCOMPRESSED_DATA(flags) SQUASHFS_BIT(flags, \
-+ SQUASHFS_NOD)
-+
-+#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
-+ SQUASHFS_NOF)
-+
-+#define SQUASHFS_NO_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
-+ SQUASHFS_NO_FRAG)
-+
-+#define SQUASHFS_ALWAYS_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
-+ SQUASHFS_ALWAYS_FRAG)
-+
-+#define SQUASHFS_DUPLICATES(flags) SQUASHFS_BIT(flags, \
-+ SQUASHFS_DUPLICATE)
-+
-+#define SQUASHFS_CHECK_DATA(flags) SQUASHFS_BIT(flags, \
-+ SQUASHFS_CHECK)
-+
-+#define SQUASHFS_MKFLAGS(noi, nod, check_data, nof, no_frag, always_frag, \
-+ duplicate_checking) (noi | (nod << 1) | (check_data << 2) \
-+ | (nof << 3) | (no_frag << 4) | (always_frag << 5) | \
-+ (duplicate_checking << 6))
-+
-+/* Max number of types and file types */
-+#define SQUASHFS_DIR_TYPE 1
-+#define SQUASHFS_FILE_TYPE 2
-+#define SQUASHFS_SYMLINK_TYPE 3
-+#define SQUASHFS_BLKDEV_TYPE 4
-+#define SQUASHFS_CHRDEV_TYPE 5
-+#define SQUASHFS_FIFO_TYPE 6
-+#define SQUASHFS_SOCKET_TYPE 7
-+#define SQUASHFS_LDIR_TYPE 8
-+#define SQUASHFS_LREG_TYPE 9
-+
-+/* 1.0 filesystem type definitions */
-+#define SQUASHFS_TYPES 5
-+#define SQUASHFS_IPC_TYPE 0
-+
-+/* Flag whether block is compressed or uncompressed, bit is set if block is
-+ * uncompressed */
-+#define SQUASHFS_COMPRESSED_BIT (1 << 15)
-+
-+#define SQUASHFS_COMPRESSED_SIZE(B) (((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
-+ (B) & ~SQUASHFS_COMPRESSED_BIT : SQUASHFS_COMPRESSED_BIT)
-+
-+#define SQUASHFS_COMPRESSED(B) (!((B) & SQUASHFS_COMPRESSED_BIT))
-+
-+#define SQUASHFS_COMPRESSED_BIT_BLOCK (1 << 24)
-+
-+#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B) (((B) & \
-+ ~SQUASHFS_COMPRESSED_BIT_BLOCK) ? (B) & \
-+ ~SQUASHFS_COMPRESSED_BIT_BLOCK : SQUASHFS_COMPRESSED_BIT_BLOCK)
-+
-+#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
-+
-+/*
-+ * Inode number ops. Inodes consist of a compressed block number, and an
-+ * uncompressed offset within that block
-+ */
-+#define SQUASHFS_INODE_BLK(a) ((unsigned int) ((a) >> 16))
-+
-+#define SQUASHFS_INODE_OFFSET(a) ((unsigned int) ((a) & 0xffff))
-+
-+#define SQUASHFS_MKINODE(A, B) ((squashfs_inode_t)(((squashfs_inode_t) (A)\
-+ << 16) + (B)))
-+
-+/* Compute 32 bit VFS inode number from squashfs inode number */
-+#define SQUASHFS_MK_VFS_INODE(a, b) ((unsigned int) (((a) << 8) + \
-+ ((b) >> 2) + 1))
-+/* XXX */
-+
-+/* Translate between VFS mode and squashfs mode */
-+#define SQUASHFS_MODE(a) ((a) & 0xfff)
-+
-+/* fragment and fragment table defines */
-+#define SQUASHFS_FRAGMENT_BYTES(A) (A * sizeof(struct squashfs_fragment_entry))
-+
-+#define SQUASHFS_FRAGMENT_INDEX(A) (SQUASHFS_FRAGMENT_BYTES(A) / \
-+ SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A) (SQUASHFS_FRAGMENT_BYTES(A) % \
-+ SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEXES(A) ((SQUASHFS_FRAGMENT_BYTES(A) + \
-+ SQUASHFS_METADATA_SIZE - 1) / \
-+ SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEX_BYTES(A) (SQUASHFS_FRAGMENT_INDEXES(A) *\
-+ sizeof(long long))
-+
-+/* cached data constants for filesystem */
-+#define SQUASHFS_CACHED_BLKS 8
-+
-+#define SQUASHFS_MAX_FILE_SIZE_LOG 64
-+
-+#define SQUASHFS_MAX_FILE_SIZE ((long long) 1 << \
-+ (SQUASHFS_MAX_FILE_SIZE_LOG - 2))
-+
-+#define SQUASHFS_MARKER_BYTE 0xff
-+
-+/* meta index cache */
-+#define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
-+#define SQUASHFS_META_ENTRIES 31
-+#define SQUASHFS_META_NUMBER 8
-+#define SQUASHFS_SLOTS 4
-+
-+struct meta_entry {
-+ long long data_block;
-+ unsigned int index_block;
-+ unsigned short offset;
-+ unsigned short pad;
-+};
-+
-+struct meta_index {
-+ unsigned int inode_number;
-+ unsigned int offset;
-+ unsigned short entries;
-+ unsigned short skip;
-+ unsigned short locked;
-+ unsigned short pad;
-+ struct meta_entry meta_entry[SQUASHFS_META_ENTRIES];
-+};
-+
-+
-+/*
-+ * definitions for structures on disk
-+ */
-+
-+typedef long long squashfs_block_t;
-+typedef long long squashfs_inode_t;
-+
-+struct squashfs_super_block {
-+ unsigned int s_magic;
-+ unsigned int inodes;
-+ unsigned int bytes_used_2;
-+ unsigned int uid_start_2;
-+ unsigned int guid_start_2;
-+ unsigned int inode_table_start_2;
-+ unsigned int directory_table_start_2;
-+ unsigned int s_major:16;
-+ unsigned int s_minor:16;
-+ unsigned int block_size_1:16;
-+ unsigned int block_log:16;
-+ unsigned int flags:8;
-+ unsigned int no_uids:8;
-+ unsigned int no_guids:8;
-+ unsigned int mkfs_time /* time of filesystem creation */;
-+ squashfs_inode_t root_inode;
-+ unsigned int block_size;
-+ unsigned int fragments;
-+ unsigned int fragment_table_start_2;
-+ long long bytes_used;
-+ long long uid_start;
-+ long long guid_start;
-+ long long inode_table_start;
-+ long long directory_table_start;
-+ long long fragment_table_start;
-+ long long unused;
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_index {
-+ unsigned int index;
-+ unsigned int start_block;
-+ unsigned char size;
-+ unsigned char name[0];
-+} __attribute__ ((packed));
-+
-+#define SQUASHFS_BASE_INODE_HEADER \
-+ unsigned int inode_type:4; \
-+ unsigned int mode:12; \
-+ unsigned int uid:8; \
-+ unsigned int guid:8; \
-+ unsigned int mtime; \
-+ unsigned int inode_number;
-+
-+struct squashfs_base_inode_header {
-+ SQUASHFS_BASE_INODE_HEADER;
-+} __attribute__ ((packed));
-+
-+struct squashfs_ipc_inode_header {
-+ SQUASHFS_BASE_INODE_HEADER;
-+ unsigned int nlink;
-+} __attribute__ ((packed));
-+
-+struct squashfs_dev_inode_header {
-+ SQUASHFS_BASE_INODE_HEADER;
-+ unsigned int nlink;
-+ unsigned short rdev;
-+} __attribute__ ((packed));
-+
-+struct squashfs_symlink_inode_header {
-+ SQUASHFS_BASE_INODE_HEADER;
-+ unsigned int nlink;
-+ unsigned short symlink_size;
-+ char symlink[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_reg_inode_header {
-+ SQUASHFS_BASE_INODE_HEADER;
-+ squashfs_block_t start_block;
-+ unsigned int fragment;
-+ unsigned int offset;
-+ unsigned int file_size;
-+ unsigned short block_list[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_lreg_inode_header {
-+ SQUASHFS_BASE_INODE_HEADER;
-+ unsigned int nlink;
-+ squashfs_block_t start_block;
-+ unsigned int fragment;
-+ unsigned int offset;
-+ long long file_size;
-+ unsigned short block_list[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_inode_header {
-+ SQUASHFS_BASE_INODE_HEADER;
-+ unsigned int nlink;
-+ unsigned int file_size:19;
-+ unsigned int offset:13;
-+ unsigned int start_block;
-+ unsigned int parent_inode;
-+} __attribute__ ((packed));
-+
-+struct squashfs_ldir_inode_header {
-+ SQUASHFS_BASE_INODE_HEADER;
-+ unsigned int nlink;
-+ unsigned int file_size:27;
-+ unsigned int offset:13;
-+ unsigned int start_block;
-+ unsigned int i_count:16;
-+ unsigned int parent_inode;
-+ struct squashfs_dir_index index[0];
-+} __attribute__ ((packed));
-+
-+union squashfs_inode_header {
-+ struct squashfs_base_inode_header base;
-+ struct squashfs_dev_inode_header dev;
-+ struct squashfs_symlink_inode_header symlink;
-+ struct squashfs_reg_inode_header reg;
-+ struct squashfs_lreg_inode_header lreg;
-+ struct squashfs_dir_inode_header dir;
-+ struct squashfs_ldir_inode_header ldir;
-+ struct squashfs_ipc_inode_header ipc;
-+};
-+
-+struct squashfs_dir_entry {
-+ unsigned int offset:13;
-+ unsigned int type:3;
-+ unsigned int size:8;
-+ int inode_number:16;
-+ char name[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_header {
-+ unsigned int count:8;
-+ unsigned int start_block;
-+ unsigned int inode_number;
-+} __attribute__ ((packed));
-+
-+struct squashfs_fragment_entry {
-+ long long start_block;
-+ unsigned int size;
-+ unsigned int unused;
-+} __attribute__ ((packed));
-+
-+extern int squashfs_uncompress_block(void *d, int dstlen, void *s, int srclen);
-+extern int squashfs_uncompress_init(void);
-+extern int squashfs_uncompress_exit(void);
-+
-+/*
-+ * macros to convert each packed bitfield structure from little endian to big
-+ * endian and vice versa. These are needed when creating or using a filesystem
-+ * on a machine with different byte ordering to the target architecture.
-+ *
-+ */
-+
-+#define SQUASHFS_SWAP_START \
-+ int bits;\
-+ int b_pos;\
-+ unsigned long long val;\
-+ unsigned char *s;\
-+ unsigned char *d;
-+
-+#define SQUASHFS_SWAP_SUPER_BLOCK(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_super_block));\
-+ SQUASHFS_SWAP((s)->s_magic, d, 0, 32);\
-+ SQUASHFS_SWAP((s)->inodes, d, 32, 32);\
-+ SQUASHFS_SWAP((s)->bytes_used_2, d, 64, 32);\
-+ SQUASHFS_SWAP((s)->uid_start_2, d, 96, 32);\
-+ SQUASHFS_SWAP((s)->guid_start_2, d, 128, 32);\
-+ SQUASHFS_SWAP((s)->inode_table_start_2, d, 160, 32);\
-+ SQUASHFS_SWAP((s)->directory_table_start_2, d, 192, 32);\
-+ SQUASHFS_SWAP((s)->s_major, d, 224, 16);\
-+ SQUASHFS_SWAP((s)->s_minor, d, 240, 16);\
-+ SQUASHFS_SWAP((s)->block_size_1, d, 256, 16);\
-+ SQUASHFS_SWAP((s)->block_log, d, 272, 16);\
-+ SQUASHFS_SWAP((s)->flags, d, 288, 8);\
-+ SQUASHFS_SWAP((s)->no_uids, d, 296, 8);\
-+ SQUASHFS_SWAP((s)->no_guids, d, 304, 8);\
-+ SQUASHFS_SWAP((s)->mkfs_time, d, 312, 32);\
-+ SQUASHFS_SWAP((s)->root_inode, d, 344, 64);\
-+ SQUASHFS_SWAP((s)->block_size, d, 408, 32);\
-+ SQUASHFS_SWAP((s)->fragments, d, 440, 32);\
-+ SQUASHFS_SWAP((s)->fragment_table_start_2, d, 472, 32);\
-+ SQUASHFS_SWAP((s)->bytes_used, d, 504, 64);\
-+ SQUASHFS_SWAP((s)->uid_start, d, 568, 64);\
-+ SQUASHFS_SWAP((s)->guid_start, d, 632, 64);\
-+ SQUASHFS_SWAP((s)->inode_table_start, d, 696, 64);\
-+ SQUASHFS_SWAP((s)->directory_table_start, d, 760, 64);\
-+ SQUASHFS_SWAP((s)->fragment_table_start, d, 824, 64);\
-+ SQUASHFS_SWAP((s)->unused, d, 888, 64);\
-+}
-+
-+#define SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
-+ SQUASHFS_MEMSET(s, d, n);\
-+ SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
-+ SQUASHFS_SWAP((s)->mode, d, 4, 12);\
-+ SQUASHFS_SWAP((s)->uid, d, 16, 8);\
-+ SQUASHFS_SWAP((s)->guid, d, 24, 8);\
-+ SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
-+ SQUASHFS_SWAP((s)->inode_number, d, 64, 32);
-+
-+#define SQUASHFS_SWAP_BASE_INODE_HEADER(s, d, n) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
-+}
-+
-+#define SQUASHFS_SWAP_IPC_INODE_HEADER(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+ sizeof(struct squashfs_ipc_inode_header))\
-+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DEV_INODE_HEADER(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+ sizeof(struct squashfs_dev_inode_header)); \
-+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+ SQUASHFS_SWAP((s)->rdev, d, 128, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+ sizeof(struct squashfs_symlink_inode_header));\
-+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+ SQUASHFS_SWAP((s)->symlink_size, d, 128, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_REG_INODE_HEADER(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+ sizeof(struct squashfs_reg_inode_header));\
-+ SQUASHFS_SWAP((s)->start_block, d, 96, 64);\
-+ SQUASHFS_SWAP((s)->fragment, d, 160, 32);\
-+ SQUASHFS_SWAP((s)->offset, d, 192, 32);\
-+ SQUASHFS_SWAP((s)->file_size, d, 224, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_LREG_INODE_HEADER(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+ sizeof(struct squashfs_lreg_inode_header));\
-+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+ SQUASHFS_SWAP((s)->start_block, d, 128, 64);\
-+ SQUASHFS_SWAP((s)->fragment, d, 192, 32);\
-+ SQUASHFS_SWAP((s)->offset, d, 224, 32);\
-+ SQUASHFS_SWAP((s)->file_size, d, 256, 64);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INODE_HEADER(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+ sizeof(struct squashfs_dir_inode_header));\
-+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+ SQUASHFS_SWAP((s)->file_size, d, 128, 19);\
-+ SQUASHFS_SWAP((s)->offset, d, 147, 13);\
-+ SQUASHFS_SWAP((s)->start_block, d, 160, 32);\
-+ SQUASHFS_SWAP((s)->parent_inode, d, 192, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+ sizeof(struct squashfs_ldir_inode_header));\
-+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+ SQUASHFS_SWAP((s)->file_size, d, 128, 27);\
-+ SQUASHFS_SWAP((s)->offset, d, 155, 13);\
-+ SQUASHFS_SWAP((s)->start_block, d, 168, 32);\
-+ SQUASHFS_SWAP((s)->i_count, d, 200, 16);\
-+ SQUASHFS_SWAP((s)->parent_inode, d, 216, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INDEX(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index));\
-+ SQUASHFS_SWAP((s)->index, d, 0, 32);\
-+ SQUASHFS_SWAP((s)->start_block, d, 32, 32);\
-+ SQUASHFS_SWAP((s)->size, d, 64, 8);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_HEADER(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header));\
-+ SQUASHFS_SWAP((s)->count, d, 0, 8);\
-+ SQUASHFS_SWAP((s)->start_block, d, 8, 32);\
-+ SQUASHFS_SWAP((s)->inode_number, d, 40, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_ENTRY(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry));\
-+ SQUASHFS_SWAP((s)->offset, d, 0, 13);\
-+ SQUASHFS_SWAP((s)->type, d, 13, 3);\
-+ SQUASHFS_SWAP((s)->size, d, 16, 8);\
-+ SQUASHFS_SWAP((s)->inode_number, d, 24, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry));\
-+ SQUASHFS_SWAP((s)->start_block, d, 0, 64);\
-+ SQUASHFS_SWAP((s)->size, d, 64, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_SHORTS(s, d, n) {\
-+ int entry;\
-+ int bit_position;\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, n * 2);\
-+ for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
-+ 16)\
-+ SQUASHFS_SWAP(s[entry], d, bit_position, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_INTS(s, d, n) {\
-+ int entry;\
-+ int bit_position;\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, n * 4);\
-+ for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
-+ 32)\
-+ SQUASHFS_SWAP(s[entry], d, bit_position, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_LONG_LONGS(s, d, n) {\
-+ int entry;\
-+ int bit_position;\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, n * 8);\
-+ for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
-+ 64)\
-+ SQUASHFS_SWAP(s[entry], d, bit_position, 64);\
-+}
-+
-+#define SQUASHFS_SWAP_DATA(s, d, n, bits) {\
-+ int entry;\
-+ int bit_position;\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, n * bits / 8);\
-+ for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
-+ bits)\
-+ SQUASHFS_SWAP(s[entry], d, bit_position, bits);\
-+}
-+
-+#define SQUASHFS_SWAP_FRAGMENT_INDEXES(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
-+
-+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
-+
-+struct squashfs_base_inode_header_1 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:4; /* index into uid table */
-+ unsigned int guid:4; /* index into guid table */
-+} __attribute__ ((packed));
-+
-+struct squashfs_ipc_inode_header_1 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:4; /* index into uid table */
-+ unsigned int guid:4; /* index into guid table */
-+ unsigned int type:4;
-+ unsigned int offset:4;
-+} __attribute__ ((packed));
-+
-+struct squashfs_dev_inode_header_1 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:4; /* index into uid table */
-+ unsigned int guid:4; /* index into guid table */
-+ unsigned short rdev;
-+} __attribute__ ((packed));
-+
-+struct squashfs_symlink_inode_header_1 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:4; /* index into uid table */
-+ unsigned int guid:4; /* index into guid table */
-+ unsigned short symlink_size;
-+ char symlink[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_reg_inode_header_1 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:4; /* index into uid table */
-+ unsigned int guid:4; /* index into guid table */
-+ unsigned int mtime;
-+ unsigned int start_block;
-+ unsigned int file_size:32;
-+ unsigned short block_list[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_inode_header_1 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:4; /* index into uid table */
-+ unsigned int guid:4; /* index into guid table */
-+ unsigned int file_size:19;
-+ unsigned int offset:13;
-+ unsigned int mtime;
-+ unsigned int start_block:24;
-+} __attribute__ ((packed));
-+
-+#define SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n) \
-+ SQUASHFS_MEMSET(s, d, n);\
-+ SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
-+ SQUASHFS_SWAP((s)->mode, d, 4, 12);\
-+ SQUASHFS_SWAP((s)->uid, d, 16, 4);\
-+ SQUASHFS_SWAP((s)->guid, d, 20, 4);
-+
-+#define SQUASHFS_SWAP_BASE_INODE_HEADER_1(s, d, n) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n)\
-+}
-+
-+#define SQUASHFS_SWAP_IPC_INODE_HEADER_1(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+ sizeof(struct squashfs_ipc_inode_header_1));\
-+ SQUASHFS_SWAP((s)->type, d, 24, 4);\
-+ SQUASHFS_SWAP((s)->offset, d, 28, 4);\
-+}
-+
-+#define SQUASHFS_SWAP_DEV_INODE_HEADER_1(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+ sizeof(struct squashfs_dev_inode_header_1));\
-+ SQUASHFS_SWAP((s)->rdev, d, 24, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_1(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+ sizeof(struct squashfs_symlink_inode_header_1));\
-+ SQUASHFS_SWAP((s)->symlink_size, d, 24, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_REG_INODE_HEADER_1(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+ sizeof(struct squashfs_reg_inode_header_1));\
-+ SQUASHFS_SWAP((s)->mtime, d, 24, 32);\
-+ SQUASHFS_SWAP((s)->start_block, d, 56, 32);\
-+ SQUASHFS_SWAP((s)->file_size, d, 88, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INODE_HEADER_1(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+ sizeof(struct squashfs_dir_inode_header_1));\
-+ SQUASHFS_SWAP((s)->file_size, d, 24, 19);\
-+ SQUASHFS_SWAP((s)->offset, d, 43, 13);\
-+ SQUASHFS_SWAP((s)->mtime, d, 56, 32);\
-+ SQUASHFS_SWAP((s)->start_block, d, 88, 24);\
-+}
-+
-+#endif
-+
-+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
-+
-+struct squashfs_dir_index_2 {
-+ unsigned int index:27;
-+ unsigned int start_block:29;
-+ unsigned char size;
-+ unsigned char name[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_base_inode_header_2 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:8; /* index into uid table */
-+ unsigned int guid:8; /* index into guid table */
-+} __attribute__ ((packed));
-+
-+struct squashfs_ipc_inode_header_2 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:8; /* index into uid table */
-+ unsigned int guid:8; /* index into guid table */
-+} __attribute__ ((packed));
-+
-+struct squashfs_dev_inode_header_2 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:8; /* index into uid table */
-+ unsigned int guid:8; /* index into guid table */
-+ unsigned short rdev;
-+} __attribute__ ((packed));
-+
-+struct squashfs_symlink_inode_header_2 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:8; /* index into uid table */
-+ unsigned int guid:8; /* index into guid table */
-+ unsigned short symlink_size;
-+ char symlink[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_reg_inode_header_2 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:8; /* index into uid table */
-+ unsigned int guid:8; /* index into guid table */
-+ unsigned int mtime;
-+ unsigned int start_block;
-+ unsigned int fragment;
-+ unsigned int offset;
-+ unsigned int file_size:32;
-+ unsigned short block_list[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_inode_header_2 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:8; /* index into uid table */
-+ unsigned int guid:8; /* index into guid table */
-+ unsigned int file_size:19;
-+ unsigned int offset:13;
-+ unsigned int mtime;
-+ unsigned int start_block:24;
-+} __attribute__ ((packed));
-+
-+struct squashfs_ldir_inode_header_2 {
-+ unsigned int inode_type:4;
-+ unsigned int mode:12; /* protection */
-+ unsigned int uid:8; /* index into uid table */
-+ unsigned int guid:8; /* index into guid table */
-+ unsigned int file_size:27;
-+ unsigned int offset:13;
-+ unsigned int mtime;
-+ unsigned int start_block:24;
-+ unsigned int i_count:16;
-+ struct squashfs_dir_index_2 index[0];
-+} __attribute__ ((packed));
-+
-+union squashfs_inode_header_2 {
-+ struct squashfs_base_inode_header_2 base;
-+ struct squashfs_dev_inode_header_2 dev;
-+ struct squashfs_symlink_inode_header_2 symlink;
-+ struct squashfs_reg_inode_header_2 reg;
-+ struct squashfs_dir_inode_header_2 dir;
-+ struct squashfs_ldir_inode_header_2 ldir;
-+ struct squashfs_ipc_inode_header_2 ipc;
-+};
-+
-+struct squashfs_dir_header_2 {
-+ unsigned int count:8;
-+ unsigned int start_block:24;
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_entry_2 {
-+ unsigned int offset:13;
-+ unsigned int type:3;
-+ unsigned int size:8;
-+ char name[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_fragment_entry_2 {
-+ unsigned int start_block;
-+ unsigned int size;
-+} __attribute__ ((packed));
-+
-+#define SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
-+ SQUASHFS_MEMSET(s, d, n);\
-+ SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
-+ SQUASHFS_SWAP((s)->mode, d, 4, 12);\
-+ SQUASHFS_SWAP((s)->uid, d, 16, 8);\
-+ SQUASHFS_SWAP((s)->guid, d, 24, 8);\
-+
-+#define SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, n) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
-+}
-+
-+#define SQUASHFS_SWAP_IPC_INODE_HEADER_2(s, d) \
-+ SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, sizeof(struct squashfs_ipc_inode_header_2))
-+
-+#define SQUASHFS_SWAP_DEV_INODE_HEADER_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+ sizeof(struct squashfs_dev_inode_header_2)); \
-+ SQUASHFS_SWAP((s)->rdev, d, 32, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+ sizeof(struct squashfs_symlink_inode_header_2));\
-+ SQUASHFS_SWAP((s)->symlink_size, d, 32, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_REG_INODE_HEADER_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+ sizeof(struct squashfs_reg_inode_header_2));\
-+ SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
-+ SQUASHFS_SWAP((s)->start_block, d, 64, 32);\
-+ SQUASHFS_SWAP((s)->fragment, d, 96, 32);\
-+ SQUASHFS_SWAP((s)->offset, d, 128, 32);\
-+ SQUASHFS_SWAP((s)->file_size, d, 160, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INODE_HEADER_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+ sizeof(struct squashfs_dir_inode_header_2));\
-+ SQUASHFS_SWAP((s)->file_size, d, 32, 19);\
-+ SQUASHFS_SWAP((s)->offset, d, 51, 13);\
-+ SQUASHFS_SWAP((s)->mtime, d, 64, 32);\
-+ SQUASHFS_SWAP((s)->start_block, d, 96, 24);\
-+}
-+
-+#define SQUASHFS_SWAP_LDIR_INODE_HEADER_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+ sizeof(struct squashfs_ldir_inode_header_2));\
-+ SQUASHFS_SWAP((s)->file_size, d, 32, 27);\
-+ SQUASHFS_SWAP((s)->offset, d, 59, 13);\
-+ SQUASHFS_SWAP((s)->mtime, d, 72, 32);\
-+ SQUASHFS_SWAP((s)->start_block, d, 104, 24);\
-+ SQUASHFS_SWAP((s)->i_count, d, 128, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INDEX_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index_2));\
-+ SQUASHFS_SWAP((s)->index, d, 0, 27);\
-+ SQUASHFS_SWAP((s)->start_block, d, 27, 29);\
-+ SQUASHFS_SWAP((s)->size, d, 56, 8);\
-+}
-+#define SQUASHFS_SWAP_DIR_HEADER_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header_2));\
-+ SQUASHFS_SWAP((s)->count, d, 0, 8);\
-+ SQUASHFS_SWAP((s)->start_block, d, 8, 24);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_ENTRY_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry_2));\
-+ SQUASHFS_SWAP((s)->offset, d, 0, 13);\
-+ SQUASHFS_SWAP((s)->type, d, 13, 3);\
-+ SQUASHFS_SWAP((s)->size, d, 16, 8);\
-+}
-+
-+#define SQUASHFS_SWAP_FRAGMENT_ENTRY_2(s, d) {\
-+ SQUASHFS_SWAP_START\
-+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry_2));\
-+ SQUASHFS_SWAP((s)->start_block, d, 0, 32);\
-+ SQUASHFS_SWAP((s)->size, d, 32, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_FRAGMENT_INDEXES_2(s, d, n) SQUASHFS_SWAP_INTS(s, d, n)
-+
-+/* fragment and fragment table defines */
-+#define SQUASHFS_FRAGMENT_BYTES_2(A) (A * sizeof(struct squashfs_fragment_entry_2))
-+
-+#define SQUASHFS_FRAGMENT_INDEX_2(A) (SQUASHFS_FRAGMENT_BYTES_2(A) / \
-+ SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEX_OFFSET_2(A) (SQUASHFS_FRAGMENT_BYTES_2(A) % \
-+ SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEXES_2(A) ((SQUASHFS_FRAGMENT_BYTES_2(A) + \
-+ SQUASHFS_METADATA_SIZE - 1) / \
-+ SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEX_BYTES_2(A) (SQUASHFS_FRAGMENT_INDEXES_2(A) *\
-+ sizeof(int))
-+
-+#endif
-+
-+#ifdef __KERNEL__
-+
-+/*
-+ * macros used to swap each structure entry, taking into account
-+ * bitfields and different bitfield placing conventions on differing
-+ * architectures
-+ */
-+
-+#include <asm/byteorder.h>
-+
-+#ifdef __BIG_ENDIAN
-+ /* convert from little endian to big endian */
-+#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
-+ tbits, b_pos)
-+#else
-+ /* convert from big endian to little endian */
-+#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
-+ tbits, 64 - tbits - b_pos)
-+#endif
-+
-+#define _SQUASHFS_SWAP(value, p, pos, tbits, SHIFT) {\
-+ b_pos = pos % 8;\
-+ val = 0;\
-+ s = (unsigned char *)p + (pos / 8);\
-+ d = ((unsigned char *) &val) + 7;\
-+ for(bits = 0; bits < (tbits + b_pos); bits += 8) \
-+ *d-- = *s++;\
-+ value = (val >> (SHIFT))/* & ((1 << tbits) - 1)*/;\
-+}
-+
-+#define SQUASHFS_MEMSET(s, d, n) memset(s, 0, n);
-+
-+#endif
-+#endif
-diff --new-file -urp linux-2.6.15/include/linux/squashfs_fs_i.h linux-2.6.15-squashfs3.0/include/linux/squashfs_fs_i.h
---- linux-2.6.15/include/linux/squashfs_fs_i.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.15-squashfs3.0/include/linux/squashfs_fs_i.h 2006-03-07 21:12:37.000000000 +0000
-@@ -0,0 +1,45 @@
-+#ifndef SQUASHFS_FS_I
-+#define SQUASHFS_FS_I
-+/*
-+ * Squashfs
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs_fs_i.h
-+ */
-+
-+struct squashfs_inode_info {
-+ long long start_block;
-+ unsigned int offset;
-+ union {
-+ struct {
-+ long long fragment_start_block;
-+ unsigned int fragment_size;
-+ unsigned int fragment_offset;
-+ long long block_list_start;
-+ } s1;
-+ struct {
-+ long long directory_index_start;
-+ unsigned int directory_index_offset;
-+ unsigned int directory_index_count;
-+ unsigned int parent_inode;
-+ } s2;
-+ } u;
-+ struct inode vfs_inode;
-+};
-+#endif
-diff --new-file -urp linux-2.6.15/include/linux/squashfs_fs_sb.h linux-2.6.15-squashfs3.0/include/linux/squashfs_fs_sb.h
---- linux-2.6.15/include/linux/squashfs_fs_sb.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.15-squashfs3.0/include/linux/squashfs_fs_sb.h 2006-03-07 21:12:37.000000000 +0000
-@@ -0,0 +1,74 @@
-+#ifndef SQUASHFS_FS_SB
-+#define SQUASHFS_FS_SB
-+/*
-+ * Squashfs
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs_fs_sb.h
-+ */
-+
-+#include <linux/squashfs_fs.h>
-+
-+struct squashfs_cache {
-+ long long block;
-+ int length;
-+ long long next_index;
-+ char *data;
-+};
-+
-+struct squashfs_fragment_cache {
-+ long long block;
-+ int length;
-+ unsigned int locked;
-+ char *data;
-+};
-+
-+struct squashfs_sb_info {
-+ struct squashfs_super_block sblk;
-+ int devblksize;
-+ int devblksize_log2;
-+ int swap;
-+ struct squashfs_cache *block_cache;
-+ struct squashfs_fragment_cache *fragment;
-+ int next_cache;
-+ int next_fragment;
-+ int next_meta_index;
-+ unsigned int *uid;
-+ unsigned int *guid;
-+ long long *fragment_index;
-+ unsigned int *fragment_index_2;
-+ unsigned int read_size;
-+ char *read_data;
-+ char *read_page;
-+ struct semaphore read_data_mutex;
-+ struct semaphore read_page_mutex;
-+ struct semaphore block_cache_mutex;
-+ struct semaphore fragment_mutex;
-+ struct semaphore meta_index_mutex;
-+ wait_queue_head_t waitq;
-+ wait_queue_head_t fragment_wait_queue;
-+ struct meta_index *meta_index;
-+ struct inode *(*iget)(struct super_block *s, squashfs_inode_t \
-+ inode);
-+ long long (*read_blocklist)(struct inode *inode, int \
-+ index, int readahead_blks, char *block_list, \
-+ unsigned short **block_p, unsigned int *bsize);
-+ int (*read_fragment_index_table)(struct super_block *s);
-+};
-+#endif
-diff --new-file -urp linux-2.6.15/init/do_mounts_rd.c linux-2.6.15-squashfs3.0/init/do_mounts_rd.c
---- linux-2.6.15/init/do_mounts_rd.c 2006-03-01 22:37:27.000000000 +0000
-+++ linux-2.6.15-squashfs3.0/init/do_mounts_rd.c 2006-03-07 21:12:37.000000000 +0000
-@@ -5,6 +5,7 @@
- #include <linux/ext2_fs.h>
- #include <linux/romfs_fs.h>
- #include <linux/cramfs_fs.h>
-+#include <linux/squashfs_fs.h>
- #include <linux/initrd.h>
- #include <linux/string.h>
-
-@@ -39,6 +40,7 @@ static int __init crd_load(int in_fd, in
- * numbers could not be found.
- *
- * We currently check for the following magic numbers:
-+ * squashfs
- * minix
- * ext2
- * romfs
-@@ -53,6 +55,7 @@ identify_ramdisk_image(int fd, int start
- struct ext2_super_block *ext2sb;
- struct romfs_super_block *romfsb;
- struct cramfs_super *cramfsb;
-+ struct squashfs_super_block *squashfsb;
- int nblocks = -1;
- unsigned char *buf;
-
-@@ -64,6 +67,7 @@ identify_ramdisk_image(int fd, int start
- ext2sb = (struct ext2_super_block *) buf;
- romfsb = (struct romfs_super_block *) buf;
- cramfsb = (struct cramfs_super *) buf;
-+ squashfsb = (struct squashfs_super_block *) buf;
- memset(buf, 0xe5, size);
-
- /*
-@@ -101,6 +105,15 @@ identify_ramdisk_image(int fd, int start
- goto done;
- }
-
-+ /* squashfs is at block zero too */
-+ if (squashfsb->s_magic == SQUASHFS_MAGIC) {
-+ printk(KERN_NOTICE
-+ "RAMDISK: squashfs filesystem found at block %d\n",
-+ start_block);
-+ nblocks = (squashfsb->bytes_used+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS;
-+ goto done;
-+ }
-+
- /*
- * Read block 1 to test for minix and ext2 superblock
- */