diff -uNr linux-2.6.16.orig/drivers/net/forcedeth.c linux-2.6.16/drivers/net/forcedeth.c
---- linux-2.6.16.orig/drivers/net/forcedeth.c 2006-03-20 06:53:29.000000000 +0100
-+++ linux-2.6.16/drivers/net/forcedeth.c 2008-11-02 20:40:40.000000000 +0100
-@@ -102,6 +102,19 @@
+--- linux-2.6.16.orig/drivers/net/forcedeth.c 2007-06-23 20:16:01.572248000 +0200
++++ linux-2.6.16/drivers/net/forcedeth.c 2006-10-21 14:44:00.000000000 +0200
+@@ -102,6 +102,17 @@
* 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
* 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
* 0.49: 10 Dec 2005: Fix tso for large buffers.
+ * 0.58: 20 May 2006: Optimized rx and tx data paths.
+ * 0.59: 31 May 2006: Added support for sideband management unit.
+ * 0.60: 31 May 2006: Added support for recoverable error.
-+ * 0.61: 18 Jul 2006: Added support for suspend/resume.
-+ * 0.62: 16 Jan 2007: Fixed statistics, mgmt communication, and low phy speed on S5.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
-@@ -113,8 +126,9 @@
+@@ -113,7 +124,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.49"
-+#define FORCEDETH_VERSION "0.62-Driver Package V1.23"
++#define FORCEDETH_VERSION "0.60-Driver Package V1.21"
#define DRV_NAME "forcedeth"
-+#define DRV_DATE "2007/04/06"
#include <linux/module.h>
- #include <linux/types.h>
-@@ -131,18 +145,240 @@
+@@ -131,34 +142,189 @@
#include <linux/random.h>
#include <linux/init.h>
#include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
-+#include <linux/reboot.h>
+#include <linux/version.h>
+
+#define RHES3 0
+#define RHES4 2
+#define SUSE10 3
+#define FEDORA5 4
-+#define FEDORA6 5
+
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)
-+#define NVVER FEDORA6
-+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
+#define NVVER FEDORA5
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
+#define NVVER SUSE10
#define dprintk(x...) do { } while (0)
#endif
-+#define DPRINTK(nlevel,klevel,args...) (void)((debug & NETIF_MSG_##nlevel) && printk(klevel args))
-+
+/* it should add in pci_ids.h */
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_12
+#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_23
+#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_24
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054c
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_25
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054d
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_26
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054e
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_27
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054f
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_28
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07DC
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_29
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07DD
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_30
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07DE
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_31
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07DF
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_32
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_33
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_34
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
-+#endif
-+#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_35
-+#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
-+#endif
+
+/* it should add in mii.h */
+#ifndef ADVERTISE_1000HALF
+#define __iomem
+#endif
+
-+#ifndef __bitwise
-+#define __bitwise
-+#endif
-+
-+#ifndef __force
-+#define __force
-+#endif
-+
-+#ifndef PCI_D0
-+#define PCI_D0 ((int __bitwise __force) 0)
-+#endif
-+
-+#ifndef PM_EVENT_SUSPEND
-+#define PM_EVENT_SUSPEND 2
-+#endif
-+
-+#if NVVER < SUSE10
-+#define pm_message_t u32
-+#endif
-+
+/* rx/tx mac addr + type + vlan + align + slack*/
+#ifndef RX_NIC_BUFSIZE
+#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64)
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
-+#endif
-+
-+#ifndef PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
-+#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0x00
-+#endif
-+
-+#ifndef PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET
-+#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 0x04
-+#endif
-+
-+#ifndef PCI_MSIX_ENTRY_DATA_OFFSET
-+#define PCI_MSIX_ENTRY_DATA_OFFSET 0x08
-+#endif
-+
-+#ifndef PCI_MSIX_ENTRY_SIZE
-+#define PCI_MSIX_ENTRY_SIZE 16
-+#endif
-+
-+#ifndef PCI_MSIX_FLAGS_BIRMASK
-+#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
-+#endif
-+
-+#ifndef PCI_CAP_ID_MSIX
-+#define PCI_CAP_ID_MSIX 0x11
+#endif
/*
* Hardware access:
-@@ -153,11 +389,40 @@
+ */
+
+-#define DEV_NEED_TIMERIRQ 0x0000 /* work-around for Wake-On-Lan */
+-#define DEV_NEED_TIMERIRQ_ORIG 0x0001 /* set the timer irq flag in the irq mask */
++#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
+ #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
+#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
+#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
+#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
-+#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
-+#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
-+#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
-+#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
-+#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address */
++#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
++#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
++#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */
+
+#define NVIDIA_ETHERNET_ID(deviceid,nv_driver_data) {\
+ .vendor = PCI_VENDOR_ID_NVIDIA, \
+
+#define Mv_LED_Control 16
+#define Mv_Page_Address 22
-+#define Mv_LED_FORCE_OFF 0x88
-+#define Mv_LED_DUAL_MODE3 0x40
-+
-+struct nvmsi_msg{
-+ u32 address_lo;
-+ u32 address_hi;
-+ u32 data;
-+};
enum {
NvRegIrqStatus = 0x000,
NvRegIrqMask = 0x004,
#define NVREG_IRQ_RX_ERROR 0x0001
#define NVREG_IRQ_RX 0x0002
-@@ -166,14 +431,18 @@
+@@ -167,14 +333,18 @@
#define NVREG_IRQ_TX_OK 0x0010
#define NVREG_IRQ_TIMER 0x0020
#define NVREG_IRQ_LINK 0x0040
NvRegUnknownSetupReg6 = 0x008,
#define NVREG_UNKSETUP6_VAL 3
-@@ -185,25 +454,45 @@
+@@ -186,25 +356,45 @@
NvRegPollingInterval = 0x00c,
#define NVREG_POLL_DEFAULT_THROUGHPUT 970
#define NVREG_POLL_DEFAULT_CPU 13
NvRegReceiverStatus = 0x98,
#define NVREG_RCVSTAT_BUSY 0x01
-@@ -213,10 +502,12 @@
+@@ -214,10 +404,12 @@
#define NVREG_RNDSEED_FORCE2 0x2d00
#define NVREG_RNDSEED_FORCE3 0x7400
NvRegMacAddrA = 0xA8,
NvRegMacAddrB = 0xAC,
NvRegMulticastAddrA = 0xB0,
-@@ -233,7 +524,8 @@
+@@ -234,7 +426,8 @@
NvRegRingSizes = 0x108,
#define NVREG_RINGSZ_TXSHIFT 0
#define NVREG_RINGSZ_RXSHIFT 16
NvRegLinkSpeed = 0x110,
#define NVREG_LINKSPEED_FORCE 0x10000
#define NVREG_LINKSPEED_10 1000
-@@ -242,8 +534,10 @@
+@@ -243,8 +436,10 @@
#define NVREG_LINKSPEED_MASK (0xFFF)
NvRegUnknownSetupReg5 = 0x130,
#define NVREG_UNKSETUP5_BIT31 (1<<31)
NvRegTxRxControl = 0x144,
#define NVREG_TXRXCTL_KICK 0x0001
#define NVREG_TXRXCTL_BIT1 0x0002
-@@ -252,15 +546,22 @@
+@@ -253,15 +448,22 @@
#define NVREG_TXRXCTL_RESET 0x0010
#define NVREG_TXRXCTL_RXCHECK 0x0400
#define NVREG_TXRXCTL_DESC_1 0
NvRegAdapterControl = 0x188,
#define NVREG_ADAPTCTL_START 0x02
-@@ -290,6 +591,7 @@
+@@ -291,6 +493,7 @@
#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
NvRegPatternCRC = 0x204,
NvRegPatternMask = 0x208,
NvRegPowerCap = 0x268,
#define NVREG_POWERCAP_D3SUPP (1<<30)
-@@ -303,6 +605,43 @@
+@@ -304,6 +507,43 @@
#define NVREG_POWERSTATE_D1 0x0001
#define NVREG_POWERSTATE_D2 0x0002
#define NVREG_POWERSTATE_D3 0x0003
};
/* Big endian: should work, but is untested */
-@@ -314,7 +653,7 @@
+@@ -315,7 +555,7 @@
struct ring_desc_ex {
u32 PacketBufferHigh;
u32 PacketBufferLow;
u32 FlagLen;
};
-@@ -335,7 +674,7 @@
+@@ -336,7 +576,7 @@
#define NV_TX_CARRIERLOST (1<<27)
#define NV_TX_LATECOLLISION (1<<28)
#define NV_TX_UNDERFLOW (1<<29)
#define NV_TX_VALID (1<<31)
#define NV_TX2_LASTPACKET (1<<29)
-@@ -346,7 +685,7 @@
+@@ -347,7 +587,7 @@
#define NV_TX2_LATECOLLISION (1<<27)
#define NV_TX2_UNDERFLOW (1<<28)
/* error and valid are the same for both */
#define NV_TX2_VALID (1<<31)
#define NV_TX2_TSO (1<<28)
#define NV_TX2_TSO_SHIFT 14
-@@ -355,6 +694,8 @@
+@@ -356,6 +596,8 @@
#define NV_TX2_CHECKSUM_L3 (1<<27)
#define NV_TX2_CHECKSUM_L4 (1<<26)
#define NV_RX_DESCRIPTORVALID (1<<16)
#define NV_RX_MISSEDFRAME (1<<17)
#define NV_RX_SUBSTRACT1 (1<<18)
-@@ -365,7 +706,7 @@
+@@ -366,7 +608,7 @@
#define NV_RX_CRCERR (1<<27)
#define NV_RX_OVERFLOW (1<<28)
#define NV_RX_FRAMINGERR (1<<29)
#define NV_RX_AVAIL (1<<31)
#define NV_RX2_CHECKSUMMASK (0x1C000000)
-@@ -382,11 +723,16 @@
+@@ -383,11 +625,15 @@
#define NV_RX2_OVERFLOW (1<<23)
#define NV_RX2_FRAMINGERR (1<<24)
/* error and avail are the same for both */
/* Miscelaneous hardware related defines: */
-#define NV_PCI_REGSZ 0x270
+#define NV_PCI_REGSZ_VER1 0x270
-+#define NV_PCI_REGSZ_VER2 0x2d4
-+#define NV_PCI_REGSZ_VER3 0x604
++#define NV_PCI_REGSZ_VER2 0x604
/* various timeout delays: all in usec */
#define NV_TXRX_RESET_DELAY 4
-@@ -403,6 +749,7 @@
+@@ -404,6 +650,7 @@
#define NV_MIIBUSY_DELAY 50
#define NV_MIIPHY_DELAY 10
#define NV_MIIPHY_DELAYMAX 10000
#define NV_WAKEUPPATTERNS 5
#define NV_WAKEUPMASKENTRIES 4
-@@ -410,16 +757,18 @@
+@@ -411,16 +658,18 @@
/* General driver defaults */
#define NV_WATCHDOG_TIMEO (5*HZ)
/* rx/tx mac addr + type + vlan + align + slack*/
#define NV_RX_HEADERS (64)
-@@ -433,6 +782,7 @@
+@@ -434,6 +683,7 @@
#define OOM_REFILL (1+HZ/20)
#define POLL_WAIT (1+HZ/100)
#define LINK_TIMEOUT (3*HZ)
/*
* desc_ver values:
-@@ -448,16 +798,38 @@
+@@ -449,16 +699,37 @@
/* PHY defines */
#define PHY_OUI_MARVELL 0x5043
#define PHY_OUI_CICADA 0x03f1
-#define PHY_INIT6 0x02000
+#define PHYID2_MODEL_MASK 0x03f0
+#define PHY_MODEL_MARVELL_E3016 0x220
-+#define PHY_MODEL_MARVELL_E1011 0xb0
+#define PHY_MARVELL_E3016_INITMASK 0x0300
+#define PHY_CICADA_INIT1 0x0f000
+#define PHY_CICADA_INIT2 0x0e00
#define PHY_GIGABIT 0x0100
#define PHY_TIMEOUT 0x1
-@@ -467,14 +839,148 @@
+@@ -468,14 +739,148 @@
#define PHY_1000 0x2
#define PHY_HALF 0x100
+#define NV_MSI_X_VECTOR_TX 0x1
+#define NV_MSI_X_VECTOR_OTHER 0x2
+
++/* statistics */
++#define NV_STATS_COUNT_SW 10
++
+#define NVLAN_DISABLE_ALL_FEATURES do { \
+ msi = NV_MSI_INT_DISABLED; \
+ msix = NV_MSIX_INT_DISABLED; \
+ u64 rx_pause;
+ u64 rx_drop_frame;
+};
-+#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
-+#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 4)
-+#define NV_DEV_STATISTICS_SW_COUNT 10
+
+/* diagnostics */
+#define NV_TEST_COUNT_BASE 3
+ { "interrupt (offline) " },
+ { "loopback (offline) " }
+};
-
++
+struct register_test {
+ u32 reg;
+ u32 mask;
+ { NvRegWakeUpFlags, 0x07777 },
+ { 0,0 }
+};
-+
+
+struct nv_skb_map {
+ struct sk_buff *skb;
+ dma_addr_t dma;
/*
* SMP locking:
-@@ -489,57 +995,105 @@
+@@ -490,11 +895,48 @@
/* in dev: base, irq */
struct fe_priv {
int in_shutdown;
u32 linkspeed;
int duplex;
-+ int speed_duplex;
- int autoneg;
- int fixed_mode;
+@@ -503,44 +945,46 @@
int phyaddr;
int wolenabled;
unsigned int phy_oui;
+ /* flow control */
+ u32 pause_flags;
+ u32 led_stats[3];
-+ u32 saved_config_space[64];
-+ u32 saved_nvregphyinterface;
-+#if NVVER < SUSE10
-+ u32 pci_state[16];
-+#endif
-+ /* msix table */
-+ struct nvmsi_msg nvmsg[NV_MSI_X_MAX_VECTORS];
-+ unsigned long msix_pa_addr;
};
/*
-@@ -554,8 +1108,10 @@
+@@ -555,8 +999,10 @@
* Throughput Mode: Every tx and rx packet will generate an interrupt.
* CPU Mode: Interrupts are controlled by a timer.
*/
static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
/*
-@@ -567,14 +1123,221 @@
+@@ -568,14 +1014,213 @@
*/
static int poll_interval = -1;
+};
+static int tagging_8021pq = NV_8021PQ_ENABLED;
+
-+enum {
-+ NV_LOW_POWER_DISABLED,
-+ NV_LOW_POWER_ENABLED
-+};
-+static int lowpowerspeed = NV_LOW_POWER_ENABLED;
-+
-+static int debug = 0;
-+
+#if NVVER < RHES4
+static inline unsigned long nv_msecs_to_jiffies(const unsigned int m)
+{
}
static inline void pci_push(u8 __iomem *base)
-@@ -612,22 +1375,137 @@
+@@ -613,78 +1258,247 @@
return 0;
}
- reg = readl(base + NvRegMIIControl);
- if (reg & NVREG_MIICTL_INUSE) {
- writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
+- udelay(NV_MIIBUSY_DELAY);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ if (rxtx_flags & NV_SETUP_RX_RING) {
+ writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
+ writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
+ writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
+ }
-+ }
+ }
+}
-+
+
+- reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
+- if (value != MII_READ) {
+- writel(value, base + NvRegMIIData);
+- reg |= NVREG_MIICTL_WRITE;
+- }
+- writel(reg, base + NvRegMIIControl);
+static void free_rings(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
-+
+
+- if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
+- NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
+- dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
+- dev->name, miireg, addr);
+- retval = -1;
+- } else if (value != MII_READ) {
+- /* it was a write operation - fewer failures are detectable */
+- dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
+- dev->name, value, miireg, addr);
+- retval = 0;
+- } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
+- dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
+- dev->name, miireg, addr);
+- retval = -1;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ if(np->rx_ring.orig)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.orig, np->ring_addr);
-+ } else {
+ } else {
+- retval = readl(base + NvRegMIIData);
+- dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
+- dev->name, miireg, addr, retval);
+ if (np->rx_ring.ex)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.ex, np->ring_addr);
-+ }
+ }
+-
+- return retval;
+ if (np->rx_skb)
+ kfree(np->rx_skb);
+ if (np->tx_skb)
+ kfree(np->tx_skb);
-+}
-+
+ }
+
+-static int phy_reset(struct net_device *dev)
+static int using_multi_irqs(struct net_device *dev)
-+{
+ {
+- struct fe_priv *np = netdev_priv(dev);
+- u32 miicontrol;
+- unsigned int tries = 0;
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_enable_irq: begin\n",dev->name);
+ /* modify network device class id */
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_disable_irq: begin\n",dev->name);
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ reg = readl(base + NvRegMIIControl);
+ if (reg & NVREG_MIICTL_INUSE) {
+ writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
- udelay(NV_MIIBUSY_DELAY);
- }
-
-@@ -661,29 +1539,112 @@
- return retval;
- }
-
--static int phy_reset(struct net_device *dev)
++ udelay(NV_MIIBUSY_DELAY);
++ }
++
++ reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
++ if (value != MII_READ) {
++ writel(value, base + NvRegMIIData);
++ reg |= NVREG_MIICTL_WRITE;
++ }
++ writel(reg, base + NvRegMIIControl);
++
++ if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
++ NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
++ dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
++ dev->name, miireg, addr);
++ retval = -1;
++ } else if (value != MII_READ) {
++ /* it was a write operation - fewer failures are detectable */
++ dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
++ dev->name, value, miireg, addr);
++ retval = 0;
++ } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
++ dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
++ dev->name, miireg, addr);
++ retval = -1;
++ } else {
++ retval = readl(base + NvRegMIIData);
++ dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
++ dev->name, miireg, addr, retval);
++ }
++
++ return retval;
++}
++
+static void nv_save_LED_stats(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ dprintk(KERN_DEBUG "%s: save LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
+ }
+
++ reg = Mv_Page_Address;
++ value = 0;
++ mii_rw(dev,np->phyaddr,reg,value);
++ udelay(5);
+}
+
+static void nv_restore_LED_stats(struct net_device *dev)
+ u32 reg=0;
+ u32 value=0;
+ int i=0;
-+
+
+- miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+- miicontrol |= BMCR_RESET;
+ reg = Mv_Page_Address;
+ value = 3;
+ mii_rw(dev,np->phyaddr,reg,value);
+ dprintk(KERN_DEBUG "%s: restore LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
+ }
+
-+}
-+
-+static void nv_LED_on(struct net_device *dev)
-+{
-+ struct fe_priv *np = get_nvpriv(dev);
-+ u32 reg=0;
-+ u32 value=0;
-+
+ reg = Mv_Page_Address;
-+ value = 3;
++ value = 0;
+ mii_rw(dev,np->phyaddr,reg,value);
+ udelay(5);
-+
-+ reg = Mv_LED_Control;
-+ mii_rw(dev,np->phyaddr,reg,Mv_LED_DUAL_MODE3);
-+
+}
+
-+static void nv_LED_off(struct net_device *dev)
++static int phy_reset(struct net_device *dev, u32 bmcr_setup)
+{
+ struct fe_priv *np = get_nvpriv(dev);
-+ u32 reg=0;
-+ u32 value=0;
-+
-+ reg = Mv_Page_Address;
-+ value = 3;
-+ mii_rw(dev,np->phyaddr,reg,value);
-+ udelay(5);
-+
-+ reg = Mv_LED_Control;
-+ mii_rw(dev,np->phyaddr,reg,Mv_LED_FORCE_OFF);
-+ udelay(1);
-+
-+}
++ u32 miicontrol;
++ unsigned int tries = 0;
+
-+static int phy_reset(struct net_device *dev, u32 bmcr_setup)
- {
-- struct fe_priv *np = netdev_priv(dev);
-+ struct fe_priv *np = get_nvpriv(dev);
- u32 miicontrol;
- unsigned int tries = 0;
-
-- miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-- miicontrol |= BMCR_RESET;
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: phy_reset: begin\n",dev->name);
+ /**/
-+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) {
-+ nv_save_LED_stats(dev);
-+ }
++ nv_save_LED_stats(dev);
+ miicontrol = BMCR_RESET | bmcr_setup;
if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
return -1;
if (tries++ > 100)
return -1;
}
-+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) {
-+ nv_restore_LED_stats(dev);
-+ }
++ nv_restore_LED_stats(dev);
+
return 0;
}
-@@ -693,9 +1654,36 @@
+@@ -694,9 +1508,36 @@
u8 __iomem *base = get_hwbase(dev);
u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: phy_init: begin\n",dev->name);
+ /* phy errata for E3016 phy */
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
+ reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_AUTO)
++ if (speed_duplex == NV_SPEED_DUPLEX_AUTO)
+ reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL);
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
++ if (speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
+ reg |= ADVERTISE_10HALF;
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
++ if (speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
+ reg |= ADVERTISE_10FULL;
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
++ if (speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
+ reg |= ADVERTISE_100HALF;
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
++ if (speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
+ reg |= ADVERTISE_100FULL;
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
-@@ -708,14 +1696,18 @@
+@@ -709,14 +1550,18 @@
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
mii_control_1000 &= ~ADVERTISE_1000HALF;
- if (phyinterface & PHY_RGMII)
+ if (phyinterface & PHY_RGMII &&
-+ (np->speed_duplex == NV_SPEED_DUPLEX_AUTO ||
-+ (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_ENABLE)))
++ (speed_duplex == NV_SPEED_DUPLEX_AUTO ||
++ (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_ENABLE)))
mii_control_1000 |= ADVERTISE_1000FULL;
- else
+ else {
-+ if (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_DISABLE)
++ if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_DISABLE)
+ printk(KERN_INFO "%s: 1000mpbs full only allowed with autoneg\n", pci_name(np->pci_dev));
mii_control_1000 &= ~ADVERTISE_1000FULL;
-
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
-@@ -723,8 +1715,25 @@
+@@ -724,8 +1569,25 @@
else
np->gigabit = 0;
- /* reset the phy */
- if (phy_reset(dev)) {
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-+ if (np->autoneg == AUTONEG_DISABLE){
++ if (autoneg == AUTONEG_DISABLE){
+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_ENABLE | NV_PAUSEFRAME_TX_ENABLE);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
-@@ -732,14 +1741,14 @@
+@@ -733,14 +1595,14 @@
/* phy vendor specific configuration */
if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
-@@ -747,18 +1756,92 @@
+@@ -748,18 +1610,92 @@
}
if (np->phy_oui == PHY_OUI_CICADA) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
- mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
- if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
- return PHY_ERROR;
-+ if (np->autoneg == AUTONEG_ENABLE) {
++ if (autoneg == AUTONEG_ENABLE) {
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
}
return 0;
-@@ -766,18 +1849,24 @@
+@@ -767,18 +1703,23 @@
static void nv_start_rx(struct net_device *dev)
{
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 rx_ctrl = readl(base + NvRegReceiverControl);
-+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
-- dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
+ dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
/* Already running? Stop it. */
- if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
- writel(0, base + NvRegReceiverControl);
dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
dev->name, np->duplex, np->linkspeed);
pci_push(base);
-@@ -785,47 +1874,66 @@
+@@ -786,44 +1727,63 @@
static void nv_stop_rx(struct net_device *dev)
{
u8 __iomem *base = get_hwbase(dev);
+ u32 rx_ctrl = readl(base + NvRegReceiverControl);
-- dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
+ dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
- writel(0, base + NvRegReceiverControl);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ if (!np->mac_in_use)
+ rx_ctrl &= ~NVREG_RCVCTL_START;
+ else
u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl = readl(base + NvRegTransmitterControl);
-- dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
+ dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
- writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ tx_ctrl |= NVREG_XMITCTL_START;
+ if (np->mac_in_use)
+ tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl = readl(base + NvRegTransmitterControl);
-- dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
+ dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
- writel(0, base + NvRegTransmitterControl);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
+ if (!np->mac_in_use)
+ tx_ctrl &= ~NVREG_XMITCTL_START;
+ else
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-- dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
- writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
- pci_push(base);
- udelay(NV_TXRX_RESET_DELAY);
-@@ -833,140 +1941,301 @@
+ dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
+@@ -834,140 +1794,301 @@
pci_push(base);
}
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
+ writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
+ pci_push(base);
+ writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
return 1;
} else {
return 0;
-@@ -975,14 +2244,19 @@
+@@ -976,14 +2097,19 @@
static void nv_drain_tx(struct net_device *dev)
{
if (nv_release_txskb(dev, i))
np->stats.tx_dropped++;
}
-@@ -990,20 +2264,25 @@
+@@ -991,20 +2117,25 @@
static void nv_drain_rx(struct net_device *dev)
{
}
}
}
-@@ -1020,52 +2299,51 @@
+@@ -1021,52 +2152,51 @@
*/
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct ring_desc* prev_tx;
+ struct nv_skb_map* prev_tx_ctx;
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ //dprintk(KERN_DEBUG "%s: nv_start_xmit \n", dev->name);
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
} while(size);
/* setup the fragments */
-@@ -1075,68 +2353,174 @@
+@@ -1076,34 +2206,133 @@
offset = 0;
do {
+ prev_tx_ctx->skb = skb;
+
+#ifdef NETIF_F_TSO
-+#if NVVER > FEDORA5
-+ if (skb_shinfo(skb)->gso_size)
-+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
-+#else
+ if (skb_shinfo(skb)->tso_size)
+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
-+#endif
+ else
+#endif
+ tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+
+ u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ //dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized \n", dev->name);
+ /* add fragments to entries count */
+ for (i = 0; i < fragments; i++) {
+ entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
+ prev_tx_ctx->skb = skb;
#ifdef NETIF_F_TSO
-+#if NVVER > FEDORA5
-+ if (skb_shinfo(skb)->gso_size)
-+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
-+#else
if (skb_shinfo(skb)->tso_size)
- tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
-+#endif
- else
+@@ -1112,32 +2341,29 @@
#endif
tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
}
/*
-@@ -1144,30 +2528,26 @@
+@@ -1145,30 +2371,26 @@
*
* Caller must own np->lock.
*/
- while (np->nic_tx != np->next_tx) {
- i = np->nic_tx % TX_RING;
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ //dprintk(KERN_DEBUG "%s: nv_tx_done \n", dev->name);
+ while ((np->get_tx.orig != put_tx) &&
+ !((Flags = le32_to_cpu(np->get_tx.orig->FlagLen)) & NV_TX_VALID)) {
+ dprintk(KERN_DEBUG "%s: nv_tx_done:NVLAN tx done\n", dev->name);
if (Flags & NV_TX_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (Flags & NV_TX_CARRIERLOST)
-@@ -1175,14 +2555,15 @@
+@@ -1176,14 +2398,15 @@
np->stats.tx_errors++;
} else {
np->stats.tx_packets++;
if (Flags & NV_TX2_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (Flags & NV_TX2_CARRIERLOST)
-@@ -1190,15 +2571,58 @@
+@@ -1191,15 +2414,59 @@
np->stats.tx_errors++;
} else {
np->stats.tx_packets++;
+ struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
+ struct ring_desc_ex* put_tx = np->put_tx.ex;
+
++ //dprintk(KERN_DEBUG "%s: nv_tx_done_optimized \n", dev->name);
+ while ((np->get_tx.ex != put_tx) &&
+ !((Flags = le32_to_cpu(np->get_tx.ex->FlagLen)) & NV_TX_VALID) &&
+ (max_work-- > 0)) {
}
/*
-@@ -1207,20 +2631,34 @@
+@@ -1208,20 +2475,34 @@
*/
static void nv_tx_timeout(struct net_device *dev)
{
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 status;
-+
+
+- printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
+- readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
+ if (!netif_running(dev))
+ return;
+
+ status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+ else
+ status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
-
-- printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
-- readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
++
+ printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
{
printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
i,
readl(base + i + 0), readl(base + i + 4),
-@@ -1229,7 +2667,7 @@
+@@ -1230,7 +2511,7 @@
readl(base + i + 24), readl(base + i + 28));
}
printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
i,
-@@ -1260,29 +2698,36 @@
+@@ -1261,29 +2542,35 @@
}
}
nv_stop_tx(dev);
/* 2) check that the packets were not sent already: */
-- nv_tx_done(dev);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-+ nv_tx_done(dev);
+ nv_tx_done(dev);
+ else
+ nv_tx_done_optimized(dev, np->tx_ring_size);
+ np->get_tx.orig = np->put_tx.orig = np->first_tx.orig;
else
- writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
-- netif_wake_queue(dev);
+ np->get_tx.ex = np->put_tx.ex = np->first_tx.ex;
+ np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx;
+ setup_hw_rings(dev, NV_SETUP_TX_RING);
+ netif_wake_queue(dev);
}
-+ netif_wake_queue(dev);
/* 4) restart tx engine */
nv_start_tx(dev);
-+
spin_unlock_irq(&np->lock);
+ nv_enable_irq(dev);
}
/*
-@@ -1338,41 +2783,23 @@
+@@ -1339,41 +2626,23 @@
}
}
-
- if (Flags & NV_RX_AVAIL)
- break; /* still owned by hardware, */
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ //dprintk(KERN_DEBUG "%s: nv_rx_process \n", dev->name);
+ while((np->get_rx.orig != np->put_rx.orig) &&
+ !((Flags = le32_to_cpu(np->get_rx.orig->FlagLen)) & NV_RX_AVAIL)) {
+
{
int j;
-@@ -1380,112 +2807,197 @@
+@@ -1381,112 +2650,198 @@
for (j=0; j<64; j++) {
if ((j%16) == 0)
dprintk("\n%03x:", j);
+ struct sk_buff *skb;
+ int len;
+
++// dprintk(KERN_DEBUG "%s: nv_rx_process_optimized \n", dev->name);
+ while((np->get_rx.ex != np->put_rx.ex) &&
+ !((Flags = le32_to_cpu(np->get_rx.ex->FlagLen)) & NV_RX2_AVAIL) &&
+ (rx_processed_cnt++ < max_work)) {
if (dev->mtu <= ETH_DATA_LEN)
np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
-@@ -1499,7 +3011,7 @@
+@@ -1500,7 +2855,7 @@
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
int old_mtu;
if (new_mtu < 64 || new_mtu > np->pkt_limit)
-@@ -1523,8 +3035,12 @@
+@@ -1524,7 +2879,7 @@
* guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter.
*/
- disable_irq(dev->irq);
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
spin_lock_bh(&dev->xmit_lock);
-+#endif
spin_lock(&np->lock);
/* stop engines */
- nv_stop_rx(dev);
-@@ -1534,22 +3050,15 @@
+@@ -1535,22 +2890,15 @@
nv_drain_rx(dev);
nv_drain_tx(dev);
/* reinit driver view of the rx queue */
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
-@@ -1559,8 +3068,12 @@
- nv_start_rx(dev);
+@@ -1561,7 +2909,7 @@
nv_start_tx(dev);
spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
spin_unlock_bh(&dev->xmit_lock);
- enable_irq(dev->irq);
-+#endif
+ nv_enable_irq(dev);
}
return 0;
}
-@@ -1571,11 +3084,11 @@
- u32 mac[2];
-
- mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
-- (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
-+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
- mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
--
- writel(mac[0], base + NvRegMacAddrA);
- writel(mac[1], base + NvRegMacAddrB);
-+
- }
-
- /*
-@@ -1584,17 +3097,22 @@
+@@ -1585,12 +2933,13 @@
*/
static int nv_set_mac_address(struct net_device *dev, void *addr)
{
if(!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_set_mac_address \n", dev->name);
/* synchronized against open : rtnl_lock() held by caller */
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
- if (netif_running(dev)) {
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
- spin_lock_bh(&dev->xmit_lock);
-+#endif
- spin_lock_irq(&np->lock);
-
- /* stop rx engine */
-@@ -1606,7 +3124,11 @@
- /* restart rx engine */
- nv_start_rx(dev);
- spin_unlock_irq(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
- spin_unlock_bh(&dev->xmit_lock);
-+#endif
- } else {
- nv_copy_mac_to_hw(dev);
- }
-@@ -1619,20 +3141,20 @@
+@@ -1620,20 +2969,20 @@
*/
static void nv_set_multicast(struct net_device *dev)
{
if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
u32 alwaysOff[2];
-@@ -1677,6 +3199,35 @@
+@@ -1678,6 +3027,35 @@
spin_unlock_irq(&np->lock);
}
/**
* nv_update_linkspeed: Setup the MAC according to the link partner
* @dev: Network device to be configured
-@@ -1690,14 +3241,16 @@
+@@ -1691,14 +3069,16 @@
*/
static int nv_update_linkspeed(struct net_device *dev)
{
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
-@@ -1714,7 +3267,7 @@
+@@ -1715,7 +3095,7 @@
goto set_speed;
}
dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
dev->name, np->fixed_mode);
if (np->fixed_mode & LPA_100FULL) {
-@@ -1743,10 +3296,14 @@
+@@ -1744,10 +3124,14 @@
goto set_speed;
}
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
-@@ -1758,27 +3315,22 @@
+@@ -1759,27 +3143,22 @@
}
}
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
-@@ -1815,12 +3367,71 @@
+@@ -1816,13 +3195,72 @@
phyreg |= PHY_1000;
writel(phyreg, base + NvRegPhyInterface);
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
+- return retval;
+ pause_flags = 0;
+ /* setup pause frame */
+ if (np->duplex != 0) {
+ }
+ nv_update_pause(dev, pause_flags);
+
- return retval;
++ return retval;
}
-@@ -1858,24 +3469,28 @@
+ static void nv_linkchange(struct net_device *dev)
+@@ -1859,7 +3297,7 @@
static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-- u32 events;
-+ u32 events,mask;
+ u32 events;
int i;
-
-- dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
+@@ -1867,16 +3305,19 @@
+ dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
for (i=0; ; i++) {
- events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
+ }
pci_push(base);
dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
-- if (!(events & np->irqmask))
-+ mask = readl(base + NvRegIrqMask);
-+ if (!(events & mask))
+ if (!(events & np->irqmask))
break;
- spin_lock(&np->lock);
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
-@@ -1907,11 +3522,16 @@
+@@ -1908,11 +3349,16 @@
if (i > max_interrupt_work) {
spin_lock(&np->lock);
/* disable interrupts on the nic */
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
spin_unlock(&np->lock);
break;
-@@ -1923,310 +3543,1950 @@
+@@ -1924,285 +3370,1749 @@
return IRQ_RETVAL(i);
}
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
-+ u32 events,mask;
++ u32 events;
+ int i = 1;
- disable_irq(dev->irq);
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+ }
++ if (events & np->irqmask) {
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void nv_poll_controller(struct net_device *dev)
- nv_do_nic_poll((unsigned long) dev);
-}
-#endif
-+ mask = readl(base + NvRegIrqMask);
-+ if (events & mask) {
++ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
-static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "forcedeth");
- strcpy(info->version, FORCEDETH_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
-+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
-+
+ if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
+ if (unlikely(nv_alloc_rx_optimized(dev))) {
+ spin_lock(&np->lock);
- if (np->wolenabled)
- wolinfo->wolopts = WAKE_MAGIC;
- spin_unlock_irq(&np->lock);
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
+
+ for (i=0; ; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
+ u32 events;
+ int i;
+
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
+
+ for (i=0; ; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
- switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
- case NVREG_LINKSPEED_10:
- ecmd->speed = SPEED_10;
-+ dprintk("%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
+
+ for (i=0; ; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
- SUPPORTED_MII);
- if (np->gigabit == PHY_GIGABIT)
- ecmd->supported |= SUPPORTED_1000baseT_Full;
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
- ecmd->phy_address = np->phyaddr;
- ecmd->transceiver = XCVR_EXTERNAL;
- if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+- return -EINVAL;
+- } else {
+- return -EINVAL;
+- }
+ if (np->msi_flags & NV_MSI_X_CAPABLE) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ np->msi_x_entry[i].entry = i;
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
-+
+
+- spin_lock_irq(&np->lock);
+- if (ecmd->autoneg == AUTONEG_ENABLE) {
+- int adv, bmcr;
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ goto out_err;
+ }
-+
+
+- np->autoneg = 1;
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIMap0);
+ writel(0, base + NvRegMSIMap1);
+ np->recover_error = 0;
+ printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
+ if (netif_running(dev)) {
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ }
+ }
+ /* FIXME: Do we need synchronize_irq(dev->irq) here? */
+ spin_lock_irq(&np->lock);
+
+ np->estats.tx_dropped = np->stats.tx_dropped;
-+ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
++ if (np->driver_data & DEV_HAS_STATISTICS) {
++ np->estats.tx_packets += readl(base + NvRegTxFrame);
+ np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+ np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
+ np->estats.tx_bytes += readl(base + NvRegTxCnt);
++ np->estats.rx_bytes += readl(base + NvRegRxCnt);
+ np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+ np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
++
+ np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+ np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+ np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+ np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+ np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+ np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
++ np->estats.tx_deferral += readl(base + NvRegTxDef);
++ np->estats.tx_pause += readl(base + NvRegTxPause);
++ np->estats.rx_pause += readl(base + NvRegRxPause);
++ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+ np->estats.rx_packets =
+ np->estats.rx_unicast +
+ np->estats.rx_multicast +
+ np->estats.rx_frame_too_long +
+ np->rx_len_errors;
+
-+ if (np->driver_data & DEV_HAS_STATISTICS_V2) {
-+ np->estats.tx_deferral += readl(base + NvRegTxDef);
-+ np->estats.tx_packets += readl(base + NvRegTxFrame);
-+ np->estats.rx_bytes += readl(base + NvRegRxCnt);
-+ np->estats.tx_pause += readl(base + NvRegTxPause);
-+ np->estats.rx_pause += readl(base + NvRegRxPause);
-+ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
-+ }
-+
+ /* copy to net_device stats */
++ np->stats.tx_packets = np->estats.tx_packets;
+ np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
+ np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
+ np->stats.tx_bytes = np->estats.tx_bytes;
++ np->stats.rx_bytes = np->estats.rx_bytes;
+ np->stats.rx_crc_errors = np->estats.rx_crc_errors;
+ np->stats.rx_over_errors = np->estats.rx_over_errors;
+ np->stats.rx_packets = np->estats.rx_packets;
+ mask |= ADVERTISED_1000baseT_Full;
+
+ if ((ecmd->advertising & mask) == 0)
- return -EINVAL;
++ return -EINVAL;
+
+ } else if (ecmd->autoneg == AUTONEG_DISABLE) {
+ /* Note: autonegotiation disable, speed 1000 intentionally
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ }
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ /* advertise only what has been requested */
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-+ if (ecmd->advertising & ADVERTISED_10baseT_Half) {
++ if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
-+ np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
-+ }
-+ if (ecmd->advertising & ADVERTISED_10baseT_Full) {
++ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
-+ }
-+ if (ecmd->advertising & ADVERTISED_100baseT_Half) {
++ if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
-+ np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
-+ }
-+ if (ecmd->advertising & ADVERTISED_100baseT_Full) {
++ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
-+ }
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ adv &= ~ADVERTISE_1000FULL;
-+ if (ecmd->advertising & ADVERTISED_1000baseT_Full) {
++ if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ adv |= ADVERTISE_1000FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_1000_FULL_DUPLEX;
-+ }
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
-+
-+ if (ecmd->advertising & (ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full|ADVERTISED_1000baseT_Full))
-+ np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
-+ } else {
-+ if (ecmd->advertising & (ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full))
-+ np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
-+ }
++ }
+
+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-+ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) {
++ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+ adv |= ADVERTISE_10HALF;
-+ np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
-+ }
-+ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) {
++ if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+ adv |= ADVERTISE_10FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
-+ }
-+ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) {
++ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+ adv |= ADVERTISE_100HALF;
-+ np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
-+ }
-+ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) {
++ if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+ adv |= ADVERTISE_100FULL;
-+ np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
-+ }
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+ }
+
+ u8 __iomem *base = get_hwbase(dev);
+ u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
+ dma_addr_t ring_addr;
-+
+
+- /* advertise only what has been requested */
+- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+- if (ecmd->advertising & ADVERTISED_10baseT_Half)
+- adv |= ADVERTISE_10HALF;
+- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+- adv |= ADVERTISE_10FULL;
+- if (ecmd->advertising & ADVERTISED_100baseT_Half)
+- adv |= ADVERTISE_100HALF;
+- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+- adv |= ADVERTISE_100FULL;
+- mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+ if (ring->rx_pending < RX_RING_MIN ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_mini_pending != 0 ||
+ ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
+ return -EINVAL;
+ }
-+
+
+- if (np->gigabit == PHY_GIGABIT) {
+- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+- adv &= ~ADVERTISE_1000FULL;
+- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+- adv |= ADVERTISE_1000FULL;
+- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ /* allocate new rings */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ rxtx_ring = pci_alloc_consistent(np->pci_dev,
+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-+ }
+ }
+
+ /* reinit nic view of the queues */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ nv_enable_irq(dev);
+ }
+ return 0;
+exit:
+ return -ENOMEM;
+}
-+
+
+static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ }
+
+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
+
+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
-+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ } else {
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (pause->rx_pause)
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int retcode = 0;
-+
+
+ if (np->driver_data & DEV_HAS_CHECKSUM) {
+
+ if (data) {
+ spin_unlock_irq(&np->lock);
+ }
} else {
- return -EINVAL;
- }
+- int adv, bmcr;
++ return -EINVAL;
++ }
+- np->autoneg = 0;
+ return retcode;
+}
-+
-+#ifdef NETIF_F_TSO
-+static int nv_set_tso(struct net_device *dev, u32 data)
-+{
-+ struct fe_priv *np = get_nvpriv(dev);
+
+- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+- adv |= ADVERTISE_10HALF;
+- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+- adv |= ADVERTISE_10FULL;
+- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+- adv |= ADVERTISE_100HALF;
+- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+- adv |= ADVERTISE_100FULL;
+- mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+- np->fixed_mode = adv;
++#ifdef NETIF_F_TSO
++static int nv_set_tso(struct net_device *dev, u32 data)
++{
++ struct fe_priv *np = get_nvpriv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM){
+#if NVVER < SUSE10
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
-+ if (np->driver_data & DEV_HAS_STATISTICS_V1)
-+ return NV_DEV_STATISTICS_V1_COUNT;
-+ else if (np->driver_data & DEV_HAS_STATISTICS_V2)
-+ return NV_DEV_STATISTICS_V2_COUNT;
-+ else
-+ return NV_DEV_STATISTICS_SW_COUNT;
++ if (np->driver_data & DEV_HAS_STATISTICS)
++ return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
++ else
++ return NV_STATS_COUNT_SW;
+}
+
+static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+ /* wait for at least one interrupt */
+ nv_msleep(100);
+
- spin_lock_irq(&np->lock);
-- if (ecmd->autoneg == AUTONEG_ENABLE) {
-- int adv, bmcr;
-
-- np->autoneg = 1;
++ spin_lock_irq(&np->lock);
++
+ /* flag should be set within ISR */
+ testcnt = np->intr_test;
+ if (!testcnt)
+ ret = 2;
-
-- /* advertise only what has been requested */
-- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-- if (ecmd->advertising & ADVERTISED_10baseT_Half)
-- adv |= ADVERTISE_10HALF;
-- if (ecmd->advertising & ADVERTISED_10baseT_Full)
-- adv |= ADVERTISE_10FULL;
-- if (ecmd->advertising & ADVERTISED_100baseT_Half)
-- adv |= ADVERTISE_100HALF;
-- if (ecmd->advertising & ADVERTISED_100baseT_Full)
-- adv |= ADVERTISE_100FULL;
-- mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
++
+ nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ else
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
-
-- if (np->gigabit == PHY_GIGABIT) {
-- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
-- adv &= ~ADVERTISE_1000FULL;
-- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
-- adv |= ADVERTISE_1000FULL;
-- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
-- }
++
+ spin_unlock_irq(&np->lock);
-
-- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
++
+ nv_free_irq(dev);
+
+ np->msi_flags = save_msi_flags;
+ if (nv_request_irq(dev, 0))
+ return 0;
+ }
-
++
+ return ret;
+}
+
+ u32 misc1_flags = 0;
+ int ret = 1;
+
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
-+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ filter_flags = readl(base + NvRegPacketFilterFlags);
+ misc1_flags = readl(base + NvRegMisc1);
- } else {
-- int adv, bmcr;
++ } else {
+ nv_txrx_reset(dev);
+ }
-
-- np->autoneg = 0;
++
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ nv_init_ring(dev);
-
-- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
-- adv |= ADVERTISE_10HALF;
-- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
-- adv |= ADVERTISE_10FULL;
-- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
-- adv |= ADVERTISE_100HALF;
-- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
-- adv |= ADVERTISE_100FULL;
-- mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
-- np->fixed_mode = adv;
++
+ /* setup hardware for loopback */
+ writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
+ writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
-
-- if (np->gigabit == PHY_GIGABIT) {
-- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
-- adv &= ~ADVERTISE_1000FULL;
-- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
-- }
++
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
-
-- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-- bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
-- if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
-- bmcr |= BMCR_FULLDPLX;
-- if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
-- bmcr |= BMCR_SPEED100;
-- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
++
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
-- if (netif_running(dev)) {
-- /* Wait a bit and then reconfigure the nic. */
-- udelay(10);
-- nv_linkchange(dev);
+- if (np->gigabit == PHY_GIGABIT) {
+- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+- adv &= ~ADVERTISE_1000FULL;
+- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+- }
+ /* setup packet for tx */
+ pkt_len = ETH_DATA_LEN;
+ tx_skb = dev_alloc_skb(pkt_len);
+ pkt_data[i] = (u8)(i & 0xff);
+ test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
-+
+
+- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+- bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
+- if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+- bmcr |= BMCR_FULLDPLX;
+- if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+- bmcr |= BMCR_SPEED100;
+- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
+ np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ }
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(get_hwbase(dev));
-+
+
+- if (netif_running(dev)) {
+- /* Wait a bit and then reconfigure the nic. */
+- udelay(10);
+- nv_linkchange(dev);
+ nv_msleep(500);
+
+ /* check for rx of the packet */
- spin_unlock_irq(&np->lock);
- return 0;
+-}
+ if (ret) {
+ if (len != pkt_len) {
+ ret = 0;
+ } else {
+ dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
+ }
-+
+
+-#define FORCEDETH_REGS_VER 1
+-#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
+ pci_unmap_page(np->pci_dev, test_dma_addr,
+ tx_skb->end-tx_skb->data,
+ PCI_DMA_TODEVICE);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
-+
+
+-static int nv_get_regs_len(struct net_device *dev)
+-{
+- return FORCEDETH_REGS_SIZE;
+ if (netif_running(dev)) {
+ writel(misc1_flags, base + NvRegMisc1);
+ writel(filter_flags, base + NvRegPacketFilterFlags);
+ return ret;
}
--#define FORCEDETH_REGS_VER 1
--#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
+-static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
-+{
+ {
+- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = get_nvpriv(dev);
-+ u8 __iomem *base = get_hwbase(dev);
+ u8 __iomem *base = get_hwbase(dev);
+- u32 *rbuf = buf;
+- int i;
+ int result;
+ memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
--static int nv_get_regs_len(struct net_device *dev)
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
-+
+- regs->version = FORCEDETH_REGS_VER;
+- spin_lock_irq(&np->lock);
+- for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
+- rbuf[i] = readl(base + i*sizeof(u32));
+- spin_unlock_irq(&np->lock);
+-}
+ if (!nv_link_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[0] = 1;
+ }
-+
+
+-static int nv_nway_reset(struct net_device *dev)
+-{
+- struct fe_priv *np = netdev_priv(dev);
+- int ret;
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
-+#if NVVER > FEDORA5
-+ netif_tx_lock_bh(dev);
-+#else
+ spin_lock_bh(&dev->xmit_lock);
-+#endif
+ spin_lock_irq(&np->lock);
+ nv_disable_hw_interrupts(dev, np->irqmask);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ spin_unlock_irq(&np->lock);
-+#if NVVER > FEDORA5
-+ netif_tx_unlock_bh(dev);
-+#else
+ spin_unlock_bh(&dev->xmit_lock);
-+#endif
+ }
-+
+
+- spin_lock_irq(&np->lock);
+- if (np->autoneg) {
+- int bmcr;
+ if (!nv_register_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[1] = 1;
+ }
-+
+
+- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ result = nv_interrupt_test(dev);
+ if (result != 1) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ /* bail out */
+ return;
+ }
-+
+
+- ret = 0;
+- } else {
+- ret = -EINVAL;
+ if (!nv_loopback_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[3] = 1;
+ netif_start_queue(dev);
+ nv_enable_hw_interrupts(dev, np->irqmask);
+ }
-+ }
+ }
+- spin_unlock_irq(&np->lock);
+}
-+
+
+- return ret;
+static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
- {
-- return FORCEDETH_REGS_SIZE;
++{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
+ }
}
--static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
-+static struct ethtool_ops ops = {
-+ .get_drvinfo = nv_get_drvinfo,
-+ .get_link = ethtool_op_get_link,
-+ .get_wol = nv_get_wol,
-+ .set_wol = nv_set_wol,
-+ .get_settings = nv_get_settings,
-+ .set_settings = nv_set_settings,
-+ .get_regs_len = nv_get_regs_len,
-+ .get_regs = nv_get_regs,
-+ .nway_reset = nv_nway_reset,
+ static struct ethtool_ops ops = {
+@@ -2215,68 +5125,175 @@
+ .get_regs_len = nv_get_regs_len,
+ .get_regs = nv_get_regs,
+ .nway_reset = nv_nway_reset,
+#if NVVER > SUSE10
-+ .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
+ .get_ringparam = nv_get_ringparam,
+ .set_ringparam = nv_set_ringparam,
+};
+
+static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
- {
-- struct fe_priv *np = netdev_priv(dev);
-- u8 __iomem *base = get_hwbase(dev);
-- u32 *rbuf = buf;
-- int i;
++{
+ struct fe_priv *np = get_nvpriv(dev);
-
-- regs->version = FORCEDETH_REGS_VER;
- spin_lock_irq(&np->lock);
-- for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
-- rbuf[i] = readl(base + i*sizeof(u32));
++
++ spin_lock_irq(&np->lock);
+
+ /* save vlan group */
+ np->vlangrp = grp;
+ if (grp) {
+ /* enable vlan on MAC */
+ np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
-+ /* vlan is dependent on rx checksum */
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ } else {
+ /* disable vlan on MAC */
+
+ writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+
- spin_unlock_irq(&np->lock);
--}
++ spin_unlock_irq(&np->lock);
+};
-
--static int nv_nway_reset(struct net_device *dev)
++
+static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ /* nothing to do */
-+};
-+
+ };
+
+/* The mgmt unit and driver use a semaphore to access the phy during init */
+static int nv_mgmt_acquire_sema(struct net_device *dev)
- {
-- struct fe_priv *np = netdev_priv(dev);
-- int ret;
++{
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 tx_ctrl, mgmt_sema;
-
-- spin_lock_irq(&np->lock);
-- if (np->autoneg) {
-- int bmcr;
++
+ for (i = 0; i < 10; i++) {
+ mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
+ if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) {
+ }
+ nv_msleep(500);
+ }
-
-- bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
++
+ if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) {
+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is not free\n");
+ return 0;
+ }
-
-- ret = 0;
-- } else {
-- ret = -EINVAL;
++
+ for (i = 0; i < 2; i++) {
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
+ return 1;
+ } else
+ udelay(50);
- }
-- spin_unlock_irq(&np->lock);
-
-- return ret;
++ }
++
+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: exit\n");
+ return 0;
- }
-
--static struct ethtool_ops ops = {
-- .get_drvinfo = nv_get_drvinfo,
-- .get_link = ethtool_op_get_link,
-- .get_wol = nv_get_wol,
-- .set_wol = nv_set_wol,
-- .get_settings = nv_get_settings,
-- .set_settings = nv_set_settings,
-- .get_regs_len = nv_get_regs_len,
-- .get_regs = nv_get_regs,
-- .nway_reset = nv_nway_reset,
-- .get_perm_addr = ethtool_op_get_perm_addr,
--};
--
++}
++
++/* Indicate to mgmt unit whether driver is loaded or not */
++static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded)
++{
++ u8 __iomem *base = get_hwbase(dev);
++ u32 tx_ctrl;
++
++ tx_ctrl = readl(base + NvRegTransmitterControl);
++ if (loaded)
++ tx_ctrl |= NVREG_XMITCTL_HOST_LOADED;
++ else
++ tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED;
++ writel(tx_ctrl, base + NvRegTransmitterControl);
++}
++
static int nv_open(struct net_device *dev)
{
- struct fe_priv *np = netdev_priv(dev);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
writel(0, base + NvRegMulticastMaskA);
-@@ -2238,44 +5498,44 @@
+ writel(0, base + NvRegMulticastMaskB);
+ writel(0, base + NvRegPacketFilterFlags);
+
+- writel(0, base + NvRegTransmitterControl);
+- writel(0, base + NvRegReceiverControl);
++ nv_stop_tx(dev);
++ nv_stop_rx(dev);
writel(0, base + NvRegAdapterControl);
set_bufsize(dev);
oom = nv_init_ring(dev);
- writel(0, base + NvRegLinkSpeed);
+- writel(0, base + NvRegLinkSpeed);
- writel(0, base + NvRegUnknownTransmitterReg);
-+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
nv_txrx_reset(dev);
writel(0, base + NvRegUnknownSetupReg6);
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
-@@ -2284,8 +5544,8 @@
+@@ -2285,8 +5302,8 @@
writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
get_random_bytes(&i, sizeof(i));
writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
if (poll_interval == -1) {
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
-@@ -2298,8 +5558,9 @@
+@@ -2299,8 +5316,9 @@
writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
base + NvRegAdapterControl);
writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
i = readl(base + NvRegPowerState);
if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
-@@ -2309,18 +5570,18 @@
+@@ -2310,18 +5328,18 @@
udelay(10);
writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
-@@ -2347,11 +5608,15 @@
+@@ -2348,11 +5366,15 @@
if (ret) {
netif_carrier_on(dev);
} else {
spin_unlock_irq(&np->lock);
return 0;
-@@ -2362,16 +5627,23 @@
+@@ -2363,16 +5385,23 @@
static int nv_close(struct net_device *dev)
{
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
-@@ -2381,25 +5653,19 @@
+@@ -2382,25 +5411,19 @@
/* disable interrupts on the nic or we will lock up */
base = get_hwbase(dev);
/* FIXME: power down nic */
return 0;
-@@ -2412,13 +5678,18 @@
+@@ -2413,13 +5436,19 @@
unsigned long addr;
u8 __iomem *base;
int err, i;
+ u32 powerstate, phystate_orig = 0, phystate, txreg;
+ int phyinitialized = 0;
++ //NVLAN_DISABLE_ALL_FEATURES ;
+ /* modify network device class id */
+ quirk_nforce_network_class(pci_dev);
dev = alloc_etherdev(sizeof(struct fe_priv));
goto out;
- np = netdev_priv(dev);
-+ dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
++ dprintk(KERN_DEBUG "%s:nv_probe: begin\n",dev->name);
+ np = get_nvpriv(dev);
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
SET_MODULE_OWNER(dev);
-@@ -2430,6 +5701,9 @@
+@@ -2431,6 +5460,9 @@
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
err = pci_enable_device(pci_dev);
if (err) {
-@@ -2444,15 +5718,22 @@
+@@ -2445,6 +5477,11 @@
if (err < 0)
goto out_disable;
-+ if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
-+ np->register_size = NV_PCI_REGSZ_VER3;
-+ else if (id->driver_data & DEV_HAS_STATISTICS_V1)
++ if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
+ np->register_size = NV_PCI_REGSZ_VER2;
+ else
+ np->register_size = NV_PCI_REGSZ_VER1;
err = -EINVAL;
addr = 0;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
- pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
-- pci_resource_len(pci_dev, i),
-- pci_resource_flags(pci_dev, i));
-+ (long)pci_resource_len(pci_dev, i),
-+ (long)pci_resource_flags(pci_dev, i));
+@@ -2453,7 +5490,7 @@
+ pci_resource_len(pci_dev, i),
+ pci_resource_flags(pci_dev, i));
if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
- pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
+ pci_resource_len(pci_dev, i) >= np->register_size) {
addr = pci_resource_start(pci_dev, i);
break;
}
-@@ -2463,17 +5744,29 @@
+@@ -2464,17 +5501,29 @@
goto out_relreg;
}
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2;
-@@ -2487,49 +5780,153 @@
+@@ -2488,49 +5537,149 @@
np->pkt_limit = NV_PKTLIMIT_1;
if (id->driver_data & DEV_HAS_LARGEDESC)
np->pkt_limit = NV_PKTLIMIT_2;
+ printk(KERN_INFO "forcedeth: speed_duplex of 1000 full can not enabled if autoneg is disabled\n");
+ goto out_relreg;
+ }
-+
-+ /* save phy config */
-+ np->autoneg = autoneg;
-+ np->speed_duplex = speed_duplex;
+
err = -ENOMEM;
- np->base = ioremap(addr, NV_PCI_REGSZ);
SET_ETHTOOL_OPS(dev, &ops);
dev->tx_timeout = nv_tx_timeout;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
-@@ -2541,15 +5938,37 @@
+@@ -2542,15 +5691,36 @@
np->orig_mac[0] = readl(base + NvRegMacAddrA);
np->orig_mac[1] = readl(base + NvRegMacAddrB);
-- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
-- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
-- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
-- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
-- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
-- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* check the workaround bit for correct mac address order */
+ txreg = readl(base + NvRegTransmitPoll);
-+ if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
-+ (id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
++ if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
+ /* mac address is already in correct order */
+ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ } else {
-+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
-+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
-+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
-+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
-+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
-+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ /* set permanent address to be correct aswell */
+ np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
-@@ -2568,20 +5987,41 @@
+@@ -2569,22 +5739,43 @@
dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ np->msi_flags |= 0x0001;
+ }
- if (id->driver_data & DEV_NEED_TIMERIRQ)
+- if (id->driver_data & DEV_NEED_TIMERIRQ_ORIG)
++ if (id->driver_data & DEV_NEED_TIMERIRQ)
np->irqmask |= NVREG_IRQ_TIMER;
-@@ -2594,6 +6034,41 @@
+ if (id->driver_data & DEV_NEED_LINKTIMER) {
+ dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
+@@ -2595,6 +5786,59 @@
np->need_linktimer = 0;
}
+ writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+
+ if (id->driver_data & DEV_HAS_MGMT_UNIT) {
++ writel(NV_UNKNOWN_VAL, base + NvRegPatternCRC);
++ pci_push(base);
++ nv_msleep(500);
+ /* management unit running on the mac? */
-+ if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
-+ np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
-+ dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
-+ for (i = 0; i < 5000; i++) {
-+ nv_msleep(1);
-+ if (nv_mgmt_acquire_sema(dev)) {
-+ /* management unit setup the phy already? */
-+ if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
-+ NVREG_XMITCTL_SYNC_PHY_INIT) {
-+ if(np->mac_in_use){
-+ /* phy is inited by mgmt unit */
-+ phyinitialized = 1;
-+ dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
-+ }
-+ } else {
-+ /* we need to init the phy */
++ np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
++ if (np->mac_in_use) {
++ u32 mgmt_sync;
++ dprintk(KERN_DEBUG "%s: probe: mac in use\n",dev->name);
++ /* management unit setup the phy already? */
++ mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
++ if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) {
++ dprintk(KERN_DEBUG"%s : probe: sync not ready\n",dev->name);
++ if (!nv_mgmt_acquire_sema(dev)) {
++ dprintk(KERN_DEBUG"%s: probe: could not acquire sema\n",dev->name);
++ for (i = 0; i < 5000; i++) {
++ nv_msleep(1);
++ mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
++ if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY)
++ continue;
++ if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
++ dprintk(KERN_DEBUG"%s: probe: phy inited by SMU 1\n",dev->name);
++ phyinitialized = 1;
+ }
+ break;
++ dprintk(KERN_DEBUG"%s: probe: breaking out of loop\n",dev->name);
+ }
++ } else {
++ /* we need to init the phy */
++ dprintk(KERN_DEBUG"%s: probe: we need to init phy 1\n",dev->name);
+ }
++ } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
++ dprintk(KERN_DEBUG"%s: probe: phy inited by SMU 2\n",dev->name);
++ /* phy is inited by SMU */
++ phyinitialized = 1;
++ } else {
++ /* we need to init the phy */
++ dprintk(KERN_DEBUG"%s: probe: we need to init phy 2\n",dev->name);
+ }
++ } else
++ dprintk(KERN_DEBUG"%s: probe: mac not in use\n",dev->name);
+ }
+
/* find a suitable phy */
for (i = 1; i <= 32; i++) {
int id1, id2;
-@@ -2610,32 +6085,45 @@
+@@ -2611,6 +5855,7 @@
if (id2 < 0 || id2 == 0xffff)
continue;
id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
-- pci_name(pci_dev), id1, id2, phyaddr);
-+ pci_name(pci_dev), id1, id2, phyaddr);
- np->phyaddr = phyaddr;
- np->phy_oui = id1 | id2;
- break;
- }
+@@ -2622,21 +5867,32 @@
if (i == 33) {
printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
-- pci_name(pci_dev));
+ pci_name(pci_dev));
- goto out_freering;
-+ pci_name(pci_dev));
+ goto out_error;
}
-- /* reset it */
-- phy_init(dev);
+ if (!phyinitialized) {
-+ /* reset it */
-+ phy_init(dev);
+ /* reset it */
+ phy_init(dev);
+ } else {
-+ /* see if it is a gigabit phy */
++ /* see if gigabit phy */
+ u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ }
+ }
-+
-+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E && np->pci_dev->subsystem_device==0x6676 ) {
-+ nv_LED_on(dev);
++ if (id->driver_data & DEV_HAS_MGMT_UNIT) {
++ nv_mgmt_driver_loaded(dev, 1);
+ }
/* set default link speed settings */
}
printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
-@@ -2643,14 +6131,12 @@
+@@ -2644,14 +5900,14 @@
return 0;
+out_error:
+ if (phystate_orig)
+ writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
++ if (np->mac_in_use)
++ nv_mgmt_driver_loaded(dev, 0);
pci_set_drvdata(pci_dev, NULL);
+out_freering:
+ free_rings(dev);
out_unmap:
iounmap(get_hwbase(dev));
out_relreg:
-@@ -2663,18 +6149,27 @@
- return err;
- }
-
-+#ifdef CONFIG_PM
-+static void nv_set_low_speed(struct net_device *dev);
-+#endif
+@@ -2667,15 +5923,20 @@
static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
-+ if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E && np->pci_dev->subsystem_device==0x6676) {
-+ nv_LED_off(dev);
-+ }
unregister_netdev(dev);
+ /* special op: write back the misordered MAC address - otherwise
+ * the next nv_probe would see a wrong address.
+ */
-+ writel(np->orig_mac[0], base + NvRegMacAddrA);
-+ writel(np->orig_mac[1], base + NvRegMacAddrB);
++ writel(np->orig_mac[0], base + NvRegMacAddrA);
++ writel(np->orig_mac[1], base + NvRegMacAddrB);
++ if (np->mac_in_use)
++ nv_mgmt_driver_loaded(dev, 0);
/* free all structures */
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
iounmap(get_hwbase(dev));
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
-@@ -2713,65 +6208,471 @@
- },
- { /* CK804 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
-- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
- },
- { /* CK804 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
-- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
- },
- { /* MCP04 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
-- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
- },
- { /* MCP04 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
-- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
+@@ -2730,19 +5991,51 @@
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP67 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP67 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP67 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP67 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP73 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP73 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP73 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP73 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
-+ },
-+ { /* MCP77 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
-+ },
-+ { /* MCP77 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
-+ },
-+ { /* MCP77 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
-+ },
-+ { /* MCP77 Ethernet Controller */
-+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
-+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{0,},
};
-
--static struct pci_driver driver = {
-+#ifdef CONFIG_PM
-+static void nv_set_low_speed(struct net_device *dev)
-+{
-+ struct fe_priv *np = get_nvpriv(dev);
-+ int adv = 0;
-+ int lpa = 0;
-+ int adv_lpa, bmcr, tries = 0;
-+ int mii_status;
-+ u32 control_1000;
-+
-+ if (np->autoneg == 0 || ((np->linkspeed & 0xFFF) != NVREG_LINKSPEED_1000))
-+ return;
-+
-+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-+ lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
-+ control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
-+
-+ adv_lpa = lpa & adv;
-+
-+ if ((adv_lpa & LPA_10FULL) || (adv_lpa & LPA_10HALF)) {
-+ adv &= ~(ADVERTISE_100BASE4 | ADVERTISE_100FULL | ADVERTISE_100HALF);
-+ control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
-+ printk(KERN_INFO "forcedeth %s: set low speed to 10mbs\n",dev->name);
-+ } else if ((adv_lpa & LPA_100FULL) || (adv_lpa & LPA_100HALF)) {
-+ control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
-+ } else
-+ return;
-+
-+ /* set new advertisements */
-+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
-+ mii_rw(dev, np->phyaddr, MII_CTRL1000, control_1000);
-+
-+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
-+ bmcr |= BMCR_ANENABLE;
-+ /* reset the phy in order for settings to stick,
-+ * and cause autoneg to start */
-+ if (phy_reset(dev, bmcr)) {
-+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
-+ return;
-+ }
-+ } else {
-+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
-+ }
-+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
-+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
-+ while (!(mii_status & BMSR_ANEGCOMPLETE)) {
-+ nv_msleep(100);
-+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
-+ if (tries++ > 50)
-+ break;
-+ }
-+
-+ nv_update_linkspeed(dev);
-+
-+ return;
-+}
-+
-+static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
-+{
-+ struct net_device *dev = pci_get_drvdata(pdev);
-+ struct fe_priv *np = get_nvpriv(dev);
-+ u8 __iomem *base = get_hwbase(dev);
-+ int i;
-+
-+ dprintk(KERN_INFO "forcedeth: nv_suspend\n");
-+
-+ /* save msix table */
-+ {
-+ unsigned long phys_addr;
-+ void __iomem *base_addr;
-+ void __iomem *base;
-+ unsigned int bir,len;
-+ unsigned int i;
-+ int pos;
-+ u32 table_offset;
-+
-+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-+ pci_read_config_dword(pdev, pos+0x04 , &table_offset);
-+ bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
-+ table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
-+ phys_addr = pci_resource_start(pdev, bir) + table_offset;
-+ np->msix_pa_addr = phys_addr;
-+ len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
-+ base_addr = ioremap_nocache(phys_addr, len);
-+
-+ for(i=0;i<NV_MSI_X_MAX_VECTORS;i++){
-+ base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
-+ np->nvmsg[i].address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
-+ np->nvmsg[i].address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET );
-+ np->nvmsg[i].data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
-+ }
-+
-+ iounmap(base_addr);
-+ }
-+
-+ nv_update_linkspeed(dev);
-+
-+ if (netif_running(dev)) {
-+ netif_device_detach(dev);
-+ /* bring down the adapter */
-+ nv_close(dev);
-+ }
-+
-+ /* set phy to a lower speed to conserve power */
-+ if((lowpowerspeed==NV_LOW_POWER_ENABLED)&&!np->mac_in_use)
-+ nv_set_low_speed(dev);
-+
-+#if NVVER > RHES4
-+ pci_save_state(pdev);
-+#else
-+ pci_save_state(pdev,np->pci_state);
-+#endif
-+ np->saved_nvregphyinterface= readl(base+NvRegPhyInterface);
-+ for(i=0;i<64;i++){
-+ pci_read_config_dword(pdev,i*4,&np->saved_config_space[i]);
-+ }
-+#if NVVER > RHES4
-+ pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
-+#else
-+ pci_enable_wake(pdev, state, np->wolenabled);
-+#endif
-+ pci_disable_device(pdev);
-+
-+#if NVVER > RHES4
-+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
-+#else
-+ pci_set_power_state(pdev, state);
-+#endif
-+
-+ return 0;
-+}
-+
-+static int nv_resume(struct pci_dev *pdev)
-+{
-+ struct net_device *dev = pci_get_drvdata(pdev);
-+ int rc = 0;
-+ struct fe_priv *np = get_nvpriv(dev);
-+ u8 __iomem *base = get_hwbase(dev);
-+ int i;
-+ u32 txreg;
-+
-+ dprintk(KERN_INFO "forcedeth: nv_resume\n");
-+
-+ pci_set_power_state(pdev, PCI_D0);
-+#if NVVER > RHES4
-+ pci_restore_state(pdev);
-+#else
-+ pci_restore_state(pdev,np->pci_state);
-+#endif
-+ for(i=0;i<64;i++){
-+ pci_write_config_dword(pdev,i*4,np->saved_config_space[i]);
-+ }
-+ pci_enable_device(pdev);
-+ pci_set_master(pdev);
-+
-+ txreg = readl(base + NvRegTransmitPoll);
-+ txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV;
-+ writel(txreg, base + NvRegTransmitPoll);
-+ writel(np->saved_nvregphyinterface,base+NvRegPhyInterface);
-+ writel(np->orig_mac[0], base + NvRegMacAddrA);
-+ writel(np->orig_mac[1], base + NvRegMacAddrB);
-+
-+ /* restore msix table */
-+ {
-+ unsigned long phys_addr;
-+ void __iomem *base_addr;
-+ void __iomem *base;
-+ unsigned int len;
-+ unsigned int i;
-+
-+ len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
-+ phys_addr = np->msix_pa_addr;
-+ base_addr = ioremap_nocache(phys_addr, len);
-+ for(i=0;i< NV_MSI_X_MAX_VECTORS;i++){
-+ base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
-+ writel(np->nvmsg[i].address_lo,base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
-+ writel(np->nvmsg[i].address_hi,base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
-+ writel(np->nvmsg[i].data,base + PCI_MSIX_ENTRY_DATA_OFFSET);
-+ }
-+
-+ iounmap(base_addr);
-+ }
-+
-+ if(lowpowerspeed==NV_LOW_POWER_ENABLED){
-+ /* re-initialize the phy */
-+ phy_init(dev);
-+ udelay(10);
-+ }
-+ /* bring up the adapter */
-+ if (netif_running(dev)){
-+ rc = nv_open(dev);
-+ }
-+ netif_device_attach(dev);
-+
-+ return rc;
-+}
-+
-+#endif /* CONFIG_PM */
-+static struct pci_driver nv_eth_driver = {
- .name = "forcedeth",
- .id_table = pci_tbl,
- .probe = nv_probe,
- .remove = __devexit_p(nv_remove),
-+#ifdef CONFIG_PM
-+ .suspend = nv_suspend,
-+ .resume = nv_resume,
-+#endif
- };
-
-+#ifdef CONFIG_PM
-+static int nv_reboot_handler(struct notifier_block *nb, unsigned long event, void *p)
-+{
-+ struct pci_dev *pdev = NULL;
-+ pm_message_t state = { PM_EVENT_SUSPEND };
-+
-+ switch (event)
-+ {
-+ case SYS_POWER_OFF:
-+ case SYS_HALT:
-+ case SYS_DOWN:
-+ while ((pdev = pci_find_device(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, pdev)) != NULL) {
-+ if (pci_dev_driver(pdev) == &nv_eth_driver) {
-+ nv_suspend(pdev, state);
-+ }
-+ }
-+ }
-+
-+ return NOTIFY_DONE;
-+}
-+
-+/*
-+ * Reboot notification
-+ */
-+struct notifier_block nv_reboot_notifier =
-+{
-+ notifier_call : nv_reboot_handler,
-+ next : NULL,
-+ priority : 0
-+};
-+#endif
-
+@@ -2758,6 +6051,7 @@
static int __init init_nic(void)
{
-+ int status;
printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
-- return pci_module_init(&driver);
-+ DPRINTK(DRV,KERN_DEBUG,"forcedeth:%s\n",DRV_DATE);
-+ status = pci_module_init(&nv_eth_driver);
-+#ifdef CONFIG_PM
-+ if (status >= 0)
-+ register_reboot_notifier(&nv_reboot_notifier);
-+#endif
-+ return status;
++ dprintk(KERN_DEBUG "DEBUG VERSION\n");
+ return pci_module_init(&driver);
}
- static void __exit exit_nic(void)
- {
-- pci_unregister_driver(&driver);
-+#ifdef CONFIG_PM
-+ unregister_reboot_notifier(&nv_reboot_notifier);
-+#endif
-+ pci_unregister_driver(&nv_eth_driver);
+@@ -2766,15 +6060,90 @@
+ pci_unregister_driver(&driver);
}
+#if NVVER > SLES9
-+module_param(debug, int, 0);
-+module_param(lowpowerspeed, int, 0);
-+MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting to 1 and disabled by setting to 0");
module_param(max_interrupt_work, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
module_param(optimization_mode, int, 0);
+module_param(tagging_8021pq, int, 0);
+MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
+#else
-+MODULE_PARM(debug, "i");
-+MODULE_PARM(lowpowerspeed, "i");
-+MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting to 1 and disabled by setting to 0");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
+MODULE_PARM(optimization_mode, "i");
+MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
+#endif
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
- MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
+-MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver (TIMERIRQ DISABLED)");
++MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
MODULE_LICENSE("GPL");
+
+ MODULE_DEVICE_TABLE(pci, pci_tbl);
diff -uNr linux-2.6.16.orig/drivers/scsi/sata_nv.c linux-2.6.16/drivers/scsi/sata_nv.c
---- linux-2.6.16.orig/drivers/scsi/sata_nv.c 2008-11-02 19:51:53.000000000 +0100
-+++ linux-2.6.16/drivers/scsi/sata_nv.c 2008-11-03 01:02:50.000000000 +0100
+--- linux-2.6.16.orig/drivers/scsi/sata_nv.c 2007-06-23 20:15:59.919947000 +0200
++++ linux-2.6.16/drivers/scsi/sata_nv.c 2006-10-21 14:45:00.000000000 +0200
@@ -1,630 +1,1284 @@
-/*
- * sata_nv.c - NVIDIA nForce SATA
-
-module_init(nv_init);
-module_exit(nv_exit);
-+/*
-+ * sata_nv.c - NVIDIA nForce SATA
-+ *
-+ * Copyright 2004 NVIDIA Corp. All rights reserved.
-+ * Copyright 2004 Andrew Chew
-+ *
-+ * The contents of this file are subject to the Open
-+ * Software License version 1.1 that can be found at
-+ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
-+ * by reference.
-+ *
-+ * Alternatively, the contents of this file may be used under the terms
-+ * of the GNU General Public License version 2 (the "GPL") as distributed
-+ * in the kernel source COPYING file, in which case the provisions of
-+ * the GPL are applicable instead of the above. If you wish to allow
-+ * the use of your version of this file only under the terms of the
-+ * GPL and not to allow others to use your version of this file under
-+ * the OSL, indicate your decision by deleting the provisions above and
-+ * replace them with the notice and other provisions required by the GPL.
-+ * If you do not delete the provisions above, a recipient may use your
-+ * version of this file under either the OSL or the GPL.
-+ *
-+ * 0.11
-+ * - Added sgpio support
-+ *
-+ * 0.10
-+ * - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB
-+ * drive. Also made the check_hotplug() callbacks return whether there
-+ * was a hotplug interrupt or not. This was not the source of the
-+ * spurious interrupts, but is the right thing to do anyway.
-+ *
-+ * 0.09
-+ * - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
-+ *
-+ * 0.08
-+ * - Added support for MCP51 and MCP55.
-+ *
-+ * 0.07
-+ * - Added support for RAID class code.
-+ *
-+ * 0.06
-+ * - Added generic SATA support by using a pci_device_id that filters on
-+ * the IDE storage class code.
-+ *
-+ * 0.03
-+ * - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using
-+ * mmio_base, which is only set for the CK804/MCP04 case.
-+ *
-+ * 0.02
-+ * - Added support for CK804 SATA controller.
-+ *
-+ * 0.01
-+ * - Initial revision.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/blkdev.h>
-+#include <linux/delay.h>
-+#include <linux/interrupt.h>
-+#include "scsi.h"
-+#include <scsi/scsi_host.h>
-+#include <linux/libata.h>
-+
-+#define DRV_NAME "sata_nv"
-+#define DRV_VERSION "0.11-Driver Package V1.23"
-+
-+#define NV_PORTS 2
-+#define NV_PIO_MASK 0x1f
-+#define NV_MWDMA_MASK 0x07
-+#define NV_UDMA_MASK 0x7f
-+#define NV_PORT0_SCR_REG_OFFSET 0x00
-+#define NV_PORT1_SCR_REG_OFFSET 0x40
-+
-+#define NV_INT_STATUS 0x10
-+#define NV_INT_STATUS_CK804 0x440
-+#define NV_INT_STATUS_MCP55 0x440
-+#define NV_INT_STATUS_PDEV_INT 0x01
-+#define NV_INT_STATUS_PDEV_PM 0x02
-+#define NV_INT_STATUS_PDEV_ADDED 0x04
-+#define NV_INT_STATUS_PDEV_REMOVED 0x08
-+#define NV_INT_STATUS_SDEV_INT 0x10
-+#define NV_INT_STATUS_SDEV_PM 0x20
-+#define NV_INT_STATUS_SDEV_ADDED 0x40
-+#define NV_INT_STATUS_SDEV_REMOVED 0x80
-+#define NV_INT_STATUS_PDEV_HOTPLUG (NV_INT_STATUS_PDEV_ADDED | \
-+ NV_INT_STATUS_PDEV_REMOVED)
-+#define NV_INT_STATUS_SDEV_HOTPLUG (NV_INT_STATUS_SDEV_ADDED | \
-+ NV_INT_STATUS_SDEV_REMOVED)
-+#define NV_INT_STATUS_HOTPLUG (NV_INT_STATUS_PDEV_HOTPLUG | \
-+ NV_INT_STATUS_SDEV_HOTPLUG)
-+
-+#define NV_INT_ENABLE 0x11
-+#define NV_INT_ENABLE_CK804 0x441
-+#define NV_INT_ENABLE_MCP55 0x444
-+#define NV_INT_ENABLE_PDEV_MASK 0x01
-+#define NV_INT_ENABLE_PDEV_PM 0x02
-+#define NV_INT_ENABLE_PDEV_ADDED 0x04
-+#define NV_INT_ENABLE_PDEV_REMOVED 0x08
-+#define NV_INT_ENABLE_SDEV_MASK 0x10
-+#define NV_INT_ENABLE_SDEV_PM 0x20
-+#define NV_INT_ENABLE_SDEV_ADDED 0x40
-+#define NV_INT_ENABLE_SDEV_REMOVED 0x80
-+#define NV_INT_ENABLE_PDEV_HOTPLUG (NV_INT_ENABLE_PDEV_ADDED | \
-+ NV_INT_ENABLE_PDEV_REMOVED)
-+#define NV_INT_ENABLE_SDEV_HOTPLUG (NV_INT_ENABLE_SDEV_ADDED | \
-+ NV_INT_ENABLE_SDEV_REMOVED)
-+#define NV_INT_ENABLE_HOTPLUG (NV_INT_ENABLE_PDEV_HOTPLUG | \
-+ NV_INT_ENABLE_SDEV_HOTPLUG)
-+
-+#define NV_INT_CONFIG 0x12
-+#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI
-+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
-+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F
-+
-+// For PCI config register 20
-+#define NV_MCP_SATA_CFG_20 0x50
-+#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN 0x04
-+
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+#define RHAS3U7
-+#endif
-+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,16)
-+#define SLES10
-+#endif
-+
-+//sgpio
-+// Sgpio defines
-+// SGPIO state defines
-+#define NV_SGPIO_STATE_RESET 0
-+#define NV_SGPIO_STATE_OPERATIONAL 1
-+#define NV_SGPIO_STATE_ERROR 2
-+
-+// SGPIO command opcodes
-+#define NV_SGPIO_CMD_RESET 0
-+#define NV_SGPIO_CMD_READ_PARAMS 1
-+#define NV_SGPIO_CMD_READ_DATA 2
-+#define NV_SGPIO_CMD_WRITE_DATA 3
-+
-+// SGPIO command status defines
-+#define NV_SGPIO_CMD_OK 0
-+#define NV_SGPIO_CMD_ACTIVE 1
-+#define NV_SGPIO_CMD_ERR 2
-+
-+#define NV_SGPIO_UPDATE_TICK 90
-+#define NV_SGPIO_MIN_UPDATE_DELTA 33
-+#define NV_CNTRLR_SHARE_INIT 2
-+#define NV_SGPIO_MAX_ACTIVITY_ON 20
-+#define NV_SGPIO_MIN_FORCE_OFF 5
-+#define NV_SGPIO_PCI_CSR_OFFSET 0x58
-+#define NV_SGPIO_PCI_CB_OFFSET 0x5C
-+#define NV_SGPIO_DFLT_CB_SIZE 256
-+#define NV_ON 1
-+#define NV_OFF 0
-+#ifndef bool
-+#define bool u8
-+#endif
-+
-+static inline unsigned int jiffies_to_msecs1(const unsigned long j)
-+{
-+#if HZ <= 1000 && !(1000 % HZ)
-+ return (1000 / HZ) * j;
-+#elif HZ > 1000 && !(HZ % 1000)
-+ return (j + (HZ / 1000) - 1)/(HZ / 1000);
-+#else
-+ return (j * 1000) / HZ;
-+#endif
-+}
-+
-+#define BF_EXTRACT(v, off, bc) \
-+ ((((u8)(v)) >> (off)) & ((1 << (bc)) - 1))
-+
-+#define BF_INS(v, ins, off, bc) \
-+ (((v) & ~((((1 << (bc)) - 1)) << (off))) | \
-+ (((u8)(ins)) << (off)))
-+
-+#define BF_EXTRACT_U32(v, off, bc) \
-+ ((((u32)(v)) >> (off)) & ((1 << (bc)) - 1))
-+
-+#define BF_INS_U32(v, ins, off, bc) \
-+ (((v) & ~((((1 << (bc)) - 1)) << (off))) | \
-+ (((u32)(ins)) << (off)))
-+
-+#define GET_SGPIO_STATUS(v) BF_EXTRACT(v, 0, 2)
-+#define GET_CMD_STATUS(v) BF_EXTRACT(v, 3, 2)
-+#define GET_CMD(v) BF_EXTRACT(v, 5, 3)
-+#define SET_CMD(v, cmd) BF_INS(v, cmd, 5, 3)
-+
-+#define GET_ENABLE(v) BF_EXTRACT_U32(v, 23, 1)
-+#define SET_ENABLE(v) BF_INS_U32(v, 1, 23, 1)
-+
-+// Needs to have a u8 bit-field insert.
-+#define GET_ACTIVITY(v) BF_EXTRACT(v, 5, 3)
-+#define SET_ACTIVITY(v, on_off) BF_INS(v, on_off, 5, 3)
-+
-+union nv_sgpio_nvcr
-+{
-+ struct {
-+ u8 init_cnt;
-+ u8 cb_size;
-+ u8 cbver;
-+ u8 rsvd;
-+ } bit;
-+ u32 all;
-+};
-+
-+union nv_sgpio_tx
-+{
-+ u8 tx_port[4];
-+ u32 all;
-+};
-+
-+struct nv_sgpio_cb
-+{
-+ u64 scratch_space;
-+ union nv_sgpio_nvcr nvcr;
-+ u32 cr0;
-+ u32 rsvd[4];
-+ union nv_sgpio_tx tx[2];
-+};
-+
-+struct nv_sgpio_host_share
-+{
-+ spinlock_t *plock;
-+ unsigned long *ptstamp;
-+};
-+
-+struct nv_sgpio_host_flags
-+{
-+ u8 sgpio_enabled:1;
-+ u8 need_update:1;
-+ u8 rsvd:6;
-+};
-+
-+struct nv_host_sgpio
-+{
-+ struct nv_sgpio_host_flags flags;
-+ u8 *pcsr;
-+ struct nv_sgpio_cb *pcb;
-+ struct nv_sgpio_host_share share;
-+ struct timer_list sgpio_timer;
-+};
-+
-+struct nv_sgpio_port_flags
-+{
-+ u8 last_state:1;
-+ u8 recent_activity:1;
-+ u8 rsvd:6;
-+};
-+
-+struct nv_sgpio_led
-+{
-+ struct nv_sgpio_port_flags flags;
-+ u8 force_off;
-+ u8 last_cons_active;
-+};
-+
-+struct nv_port_sgpio
-+{
-+ struct nv_sgpio_led activity;
-+};
-+
-+static spinlock_t nv_sgpio_lock;
-+static unsigned long nv_sgpio_tstamp;
-+
-+static inline void nv_sgpio_set_csr(u8 csr, unsigned long pcsr)
-+{
-+ outb(csr, pcsr);
-+}
-+
-+static inline u8 nv_sgpio_get_csr(unsigned long pcsr)
-+{
-+ return inb(pcsr);
-+}
-+
-+static inline u8 nv_sgpio_get_func(struct ata_host_set *host_set)
-+{
-+ u8 devfn = (to_pci_dev(host_set->dev))->devfn;
-+ return (PCI_FUNC(devfn));
-+}
-+
-+static inline u8 nv_sgpio_tx_host_offset(struct ata_host_set *host_set)
-+{
-+ return (nv_sgpio_get_func(host_set)/NV_CNTRLR_SHARE_INIT);
-+}
-+
-+static inline u8 nv_sgpio_calc_tx_offset(u8 cntrlr, u8 channel)
-+{
-+ return (sizeof(union nv_sgpio_tx) - (NV_CNTRLR_SHARE_INIT *
-+ (cntrlr % NV_CNTRLR_SHARE_INIT)) - channel - 1);
-+}
-+
-+static inline u8 nv_sgpio_tx_port_offset(struct ata_port *ap)
-+{
-+ u8 cntrlr = nv_sgpio_get_func(ap->host_set);
-+ return (nv_sgpio_calc_tx_offset(cntrlr, ap->port_no));
-+}
-+
-+static inline bool nv_sgpio_capable(const struct pci_device_id *ent)
-+{
-+ if (ent->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2)
-+ return 1;
-+ else
-+ return 0;
-+}
-+
-+
-+
-+
-+
-+
-+static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-+static irqreturn_t nv_interrupt (int irq, void *dev_instance,
-+ struct pt_regs *regs);
-+static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
-+static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
-+static void nv_host_stop (struct ata_host_set *host_set);
-+static int nv_port_start(struct ata_port *ap);
-+static void nv_port_stop(struct ata_port *ap);
-+static int nv_qc_issue(struct ata_queued_cmd *qc);
-+static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
-+static void nv_disable_hotplug(struct ata_host_set *host_set);
-+static void nv_check_hotplug(struct ata_host_set *host_set);
-+static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
-+static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
-+static void nv_check_hotplug_ck804(struct ata_host_set *host_set);
-+static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent);
-+static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set);
-+static void nv_check_hotplug_mcp55(struct ata_host_set *host_set);
-+enum nv_host_type
-+{
-+ GENERIC,
-+ NFORCE2,
-+ NFORCE3,
-+ CK804,
-+ MCP55
-+};
-+
-+static struct pci_device_id nv_pci_tbl[] = {
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
-+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
-+ { 0, } /* terminate list */
-+};
-+
-+#define NV_HOST_FLAGS_SCR_MMIO 0x00000001
-+
-+struct nv_host_desc
-+{
-+ enum nv_host_type host_type;
-+ void (*enable_hotplug)(struct ata_probe_ent *probe_ent);
-+ void (*disable_hotplug)(struct ata_host_set *host_set);
-+ void (*check_hotplug)(struct ata_host_set *host_set);
-+
-+};
-+static struct nv_host_desc nv_device_tbl[] = {
-+ {
-+ .host_type = GENERIC,
-+ .enable_hotplug = NULL,
-+ .disable_hotplug= NULL,
-+ .check_hotplug = NULL,
-+ },
-+ {
-+ .host_type = NFORCE2,
-+ .enable_hotplug = nv_enable_hotplug,
-+ .disable_hotplug= nv_disable_hotplug,
-+ .check_hotplug = nv_check_hotplug,
-+ },
-+ {
-+ .host_type = NFORCE3,
-+ .enable_hotplug = nv_enable_hotplug,
-+ .disable_hotplug= nv_disable_hotplug,
-+ .check_hotplug = nv_check_hotplug,
-+ },
-+ { .host_type = CK804,
-+ .enable_hotplug = nv_enable_hotplug_ck804,
-+ .disable_hotplug= nv_disable_hotplug_ck804,
-+ .check_hotplug = nv_check_hotplug_ck804,
-+ },
-+ { .host_type = MCP55,
-+ .enable_hotplug = nv_enable_hotplug_mcp55,
-+ .disable_hotplug= nv_disable_hotplug_mcp55,
-+ .check_hotplug = nv_check_hotplug_mcp55,
-+ },
-+};
-+
-+
-+struct nv_host
-+{
-+ struct nv_host_desc *host_desc;
-+ unsigned long host_flags;
-+ struct nv_host_sgpio host_sgpio;
-+ struct pci_dev *pdev;
-+};
-+
-+struct nv_port
-+{
-+ struct nv_port_sgpio port_sgpio;
-+};
-+
-+// SGPIO function prototypes
-+static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost);
-+static void nv_sgpio_reset(u8 *pcsr);
-+static void nv_sgpio_set_timer(struct timer_list *ptimer,
-+ unsigned int timeout_msec);
-+static void nv_sgpio_timer_handler(unsigned long ptr);
-+static void nv_sgpio_host_cleanup(struct nv_host *host);
-+static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off);
-+static void nv_sgpio_clear_all_leds(struct ata_port *ap);
-+static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd);
-+
-+
-+static struct pci_driver nv_pci_driver = {
-+ .name = DRV_NAME,
-+ .id_table = nv_pci_tbl,
-+ .probe = nv_init_one,
-+ .remove = ata_pci_remove_one,
-+};
-+
-+
-+#ifdef SLES10
-+static struct scsi_host_template nv_sht = {
-+#else
-+static Scsi_Host_Template nv_sht = {
-+#endif
-+ .module = THIS_MODULE,
-+ .name = DRV_NAME,
-+#ifdef RHAS3U7
-+ .detect = ata_scsi_detect,
-+ .release = ata_scsi_release,
-+#endif
-+ .ioctl = ata_scsi_ioctl,
-+ .queuecommand = ata_scsi_queuecmd,
-+ .eh_strategy_handler = ata_scsi_error,
-+ .can_queue = ATA_DEF_QUEUE,
-+ .this_id = ATA_SHT_THIS_ID,
-+ .sg_tablesize = LIBATA_MAX_PRD,
-+ .max_sectors = ATA_MAX_SECTORS,
-+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
-+#ifdef RHAS3U7
-+ .use_new_eh_code = ATA_SHT_NEW_EH_CODE,
-+#endif
-+ .emulated = ATA_SHT_EMULATED,
-+ .use_clustering = ATA_SHT_USE_CLUSTERING,
-+ .proc_name = DRV_NAME,
-+#ifndef RHAS3U7
-+ .dma_boundary = ATA_DMA_BOUNDARY,
-+ .slave_configure = ata_scsi_slave_config,
-+#endif
-+ .bios_param = ata_std_bios_param,
-+};
-+
-+static struct ata_port_operations nv_ops = {
-+ .port_disable = ata_port_disable,
-+ .tf_load = ata_tf_load,
-+ .tf_read = ata_tf_read,
-+ .exec_command = ata_exec_command,
-+ .check_status = ata_check_status,
-+ .dev_select = ata_std_dev_select,
-+ .phy_reset = sata_phy_reset,
-+ .bmdma_setup = ata_bmdma_setup,
-+ .bmdma_start = ata_bmdma_start,
-+ .bmdma_stop = ata_bmdma_stop,
-+ .bmdma_status = ata_bmdma_status,
-+ .qc_prep = ata_qc_prep,
-+ .qc_issue = nv_qc_issue,
-+ .eng_timeout = ata_eng_timeout,
-+ .irq_handler = nv_interrupt,
-+ .irq_clear = ata_bmdma_irq_clear,
-+ .scr_read = nv_scr_read,
-+ .scr_write = nv_scr_write,
-+ .port_start = nv_port_start,
-+ .port_stop = nv_port_stop,
-+ .host_stop = nv_host_stop,
-+};
-+
-+/* FIXME: The hardware provides the necessary SATA PHY controls
-+ * to support ATA_FLAG_SATA_RESET. However, it is currently
-+ * necessary to disable that flag, to solve misdetection problems.
-+ * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.
-+ *
-+ * This problem really needs to be investigated further. But in the
-+ * meantime, we avoid ATA_FLAG_SATA_RESET to get people working.
-+ */
-+static struct ata_port_info nv_port_info = {
-+ .sht = &nv_sht,
-+ .host_flags = ATA_FLAG_SATA |
-+ /* ATA_FLAG_SATA_RESET | */
-+ ATA_FLAG_SRST |
-+ ATA_FLAG_NO_LEGACY,
-+ .pio_mask = NV_PIO_MASK,
-+ .mwdma_mask = NV_MWDMA_MASK,
-+ .udma_mask = NV_UDMA_MASK,
-+ .port_ops = &nv_ops,
-+};
-+
-+MODULE_AUTHOR("NVIDIA");
-+MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
-+MODULE_LICENSE("GPL");
-+MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
-+MODULE_VERSION(DRV_VERSION);
-+
-+static irqreturn_t nv_interrupt (int irq, void *dev_instance,
-+ struct pt_regs *regs)
-+{
-+ struct ata_host_set *host_set = dev_instance;
-+ struct nv_host *host = host_set->private_data;
-+ unsigned int i;
-+ unsigned int handled = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&host_set->lock, flags);
-+
-+ for (i = 0; i < host_set->n_ports; i++) {
-+ struct ata_port *ap;
-+
-+ ap = host_set->ports[i];
-+#ifdef ATA_FLAG_NOINTR
-+ if (ap &&
-+ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
-+#else
-+ if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
-+#endif
-+ struct ata_queued_cmd *qc;
-+
-+ qc = ata_qc_from_tag(ap, ap->active_tag);
-+ if (qc && (!(qc->tf.ctl & ATA_NIEN)))
-+ handled += ata_host_intr(ap, qc);
-+ else
-+ // No request pending? Clear interrupt status
-+ // anyway, in case there's one pending.
-+ ap->ops->check_status(ap);
-+ }
-+
-+ }
-+
-+ if (host->host_desc->check_hotplug)
-+ host->host_desc->check_hotplug(host_set);
-+
-+ spin_unlock_irqrestore(&host_set->lock, flags);
-+
-+ return IRQ_RETVAL(handled);
-+}
-+
-+static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
-+{
-+ struct ata_host_set *host_set = ap->host_set;
-+ struct nv_host *host = host_set->private_data;
-+
-+ if (sc_reg > SCR_CONTROL)
-+ return 0xffffffffU;
-+
-+ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-+ return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4));
-+ else
-+ return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
-+}
-+
-+static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
-+{
-+ struct ata_host_set *host_set = ap->host_set;
-+ struct nv_host *host = host_set->private_data;
-+
-+ if (sc_reg > SCR_CONTROL)
-+ return;
-+
-+ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-+ writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4));
-+ else
-+ outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
-+}
-+
-+static void nv_host_stop (struct ata_host_set *host_set)
-+{
-+ struct nv_host *host = host_set->private_data;
-+
-+ // Disable hotplug event interrupts.
-+ if (host->host_desc->disable_hotplug)
-+ host->host_desc->disable_hotplug(host_set);
-+
-+ nv_sgpio_host_cleanup(host);
-+ kfree(host);
-+#ifdef RHAS3U7
-+
-+ ata_host_stop(host_set);
-+#endif
-+}
-+
-+static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
-+{
-+ static int printed_version = 0;
-+ struct nv_host *host;
-+ struct ata_port_info *ppi;
-+ struct ata_probe_ent *probe_ent;
-+ int pci_dev_busy = 0;
-+ int rc;
-+ u32 bar;
-+
-+ // Make sure this is a SATA controller by counting the number of bars
-+ // (NVIDIA SATA controllers will always have six bars). Otherwise,
-+ // it's an IDE controller and we ignore it.
-+ for (bar=0; bar<6; bar++)
-+ if (pci_resource_start(pdev, bar) == 0)
-+ return -ENODEV;
-+
-+ if (!printed_version++)
-+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
-+
-+ rc = pci_enable_device(pdev);
-+ if (rc)
-+ goto err_out;
-+
-+ rc = pci_request_regions(pdev, DRV_NAME);
-+ if (rc) {
-+ pci_dev_busy = 1;
-+ goto err_out_disable;
-+ }
-+
-+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-+ if (rc)
-+ goto err_out_regions;
-+#ifndef RHAS3U7
-+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
-+ if (rc)
-+ goto err_out_regions;
-+#endif
-+ rc = -ENOMEM;
-+
-+ ppi = &nv_port_info;
-+
-+ probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY|ATA_PORT_SECONDARY);
-+
-+ if (!probe_ent)
-+ goto err_out_regions;
-+
-+ host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
-+ if (!host)
-+ goto err_out_free_ent;
-+
-+ memset(host, 0, sizeof(struct nv_host));
-+ host->host_desc = &nv_device_tbl[ent->driver_data];
-+
-+ probe_ent->private_data = host;
-+
-+ if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)
-+ host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;
-+
-+ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
-+ unsigned long base;
-+
-+ probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),
-+ pci_resource_len(pdev, 5));
-+ if (probe_ent->mmio_base == NULL) {
-+ rc = -EIO;
-+ goto err_out_free_host;
-+ }
-+
-+ base = (unsigned long)probe_ent->mmio_base;
-+
-+ probe_ent->port[0].scr_addr =
-+ base + NV_PORT0_SCR_REG_OFFSET;
-+ probe_ent->port[1].scr_addr =
-+ base + NV_PORT1_SCR_REG_OFFSET;
-+ } else {
-+
-+ probe_ent->port[0].scr_addr =
-+ pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
-+ probe_ent->port[1].scr_addr =
-+ pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
-+ }
-+
-+ pci_set_master(pdev);
-+#ifdef RHAS3U7
-+ ata_add_to_probe_list(probe_ent);
-+
-+ if (nv_sgpio_capable(ent))
-+ nv_sgpio_init(pdev, host);
-+ // Enable hotplug event interrupts.
-+ if (host->host_desc->enable_hotplug)
-+ host->host_desc->enable_hotplug(probe_ent);
-+
-+ return 0;
-+#else
-+ rc = ata_device_add(probe_ent);
-+ if (rc != NV_PORTS)
-+ goto err_out_iounmap;
-+
-+ if (nv_sgpio_capable(ent))
-+ nv_sgpio_init(pdev, host);
-+ // Enable hotplug event interrupts.
-+ if (host->host_desc->enable_hotplug)
-+ host->host_desc->enable_hotplug(probe_ent);
-+
-+ kfree(probe_ent);
-+
-+ return 0;
-+
-+err_out_iounmap:
-+ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-+ iounmap(probe_ent->mmio_base);
-+#endif
-+err_out_free_host:
-+ kfree(host);
-+err_out_free_ent:
-+ kfree(probe_ent);
-+err_out_regions:
-+ pci_release_regions(pdev);
-+err_out_disable:
-+ if (!pci_dev_busy)
-+ pci_disable_device(pdev);
-+err_out:
-+ return rc;
-+}
-+
-+
-+static int nv_port_start(struct ata_port *ap)
-+{
-+ int stat;
-+ struct nv_port *port;
-+
-+ stat = ata_port_start(ap);
-+ if (stat) {
-+ return stat;
-+ }
-+
-+ port = kmalloc(sizeof(struct nv_port), GFP_KERNEL);
-+ if (!port)
-+ goto err_out_no_free;
-+
-+ memset(port, 0, sizeof(struct nv_port));
-+
-+ ap->private_data = port;
-+ return 0;
-+
-+err_out_no_free:
-+ return 1;
-+}
-+
-+static void nv_port_stop(struct ata_port *ap)
-+{
-+ nv_sgpio_clear_all_leds(ap);
-+
-+ if (ap->private_data) {
-+ kfree(ap->private_data);
-+ ap->private_data = NULL;
-+ }
-+ ata_port_stop(ap);
-+}
-+
-+static int nv_qc_issue(struct ata_queued_cmd *qc)
-+{
-+ struct nv_port *port = qc->ap->private_data;
-+
-+ if (port)
-+ port->port_sgpio.activity.flags.recent_activity = 1;
-+ return (ata_qc_issue_prot(qc));
-+}
-+
-+
-+
-+
-+static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)
-+{
-+ u8 intr_mask;
-+
-+ outb(NV_INT_STATUS_HOTPLUG,
-+ probe_ent->port[0].scr_addr + NV_INT_STATUS);
-+
-+ intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);
-+ intr_mask |= NV_INT_ENABLE_HOTPLUG;
-+
-+ outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);
-+}
-+
-+static void nv_disable_hotplug(struct ata_host_set *host_set)
-+{
-+ u8 intr_mask;
-+
-+ intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
-+
-+ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
-+
-+ outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
-+}
-+
-+static void nv_check_hotplug(struct ata_host_set *host_set)
-+{
-+ u8 intr_status;
-+
-+ intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
-+
-+ // Clear interrupt status.
-+ outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
-+
-+ if (intr_status & NV_INT_STATUS_HOTPLUG) {
-+ if (intr_status & NV_INT_STATUS_PDEV_ADDED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device added\n");
-+
-+ if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device removed\n");
-+
-+ if (intr_status & NV_INT_STATUS_SDEV_ADDED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device added\n");
-+
-+ if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device removed\n");
-+ }
-+}
-+
-+static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
-+{
-+ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
-+ u8 intr_mask;
-+ u8 regval;
-+
-+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
-+ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
-+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
-+
-+ writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
-+
-+ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
-+ intr_mask |= NV_INT_ENABLE_HOTPLUG;
-+
-+ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
-+}
-+
-+static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
-+{
-+ struct pci_dev *pdev = to_pci_dev(host_set->dev);
-+ u8 intr_mask;
-+ u8 regval;
-+
-+ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);
-+
-+ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
-+
-+ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
-+
-+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
-+ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
-+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
-+}
-+
-+static void nv_check_hotplug_ck804(struct ata_host_set *host_set)
-+{
-+ u8 intr_status;
-+
-+ intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
-+
-+ // Clear interrupt status.
-+ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
-+
-+ if (intr_status & NV_INT_STATUS_HOTPLUG) {
-+ if (intr_status & NV_INT_STATUS_PDEV_ADDED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device added\n");
-+
-+ if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device removed\n");
-+
-+ if (intr_status & NV_INT_STATUS_SDEV_ADDED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device added\n");
-+
-+ if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device removed\n");
-+ }
-+}
-+static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent)
-+{
-+ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
-+ u8 intr_mask;
-+ u8 regval;
-+
-+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
-+ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
-+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
-+
-+ writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55);
-+ writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55+2);
-+
-+ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55);
-+ intr_mask |= 0x0c;
-+ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55);
-+
-+ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);
-+ intr_mask |= 0x0c;
-+ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);
-+}
-+
-+static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set)
-+{
-+ struct pci_dev *pdev = to_pci_dev(host_set->dev);
-+ u8 intr_mask;
-+ u8 regval;
-+
-+ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55);
-+ intr_mask &= ~(0x0C);
-+ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55);
-+
-+ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55+2);
-+ intr_mask &= ~(0x0C);
-+ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55+2);
-+
-+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
-+ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
-+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
-+}
-+
-+static void nv_check_hotplug_mcp55(struct ata_host_set *host_set)
-+{
-+ u8 intr_status,intr_status1;
-+
-+ intr_status = readb(host_set->mmio_base + NV_INT_STATUS_MCP55);
-+ intr_status1 = readb(host_set->mmio_base + NV_INT_STATUS_MCP55+2);
-+
-+ // Clear interrupt status.
-+ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55);
-+ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55+2);
-+
-+ if ((intr_status & 0x0c) || (intr_status1&0x0c)) {
-+ if (intr_status & 0x04)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device added\n");
-+
-+ if (intr_status & 0x08)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Primary device removed\n");
-+
-+ if (intr_status1 & 0x04)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device added\n");
-+
-+ if (intr_status1 & 0x08)
-+ printk(KERN_WARNING "nv_sata: "
-+ "Secondary device removed\n");
-+ }
-+}
-+
-+
-+static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost)
-+{
-+ u16 csr_add;
-+ u32 cb_add, temp32;
-+ struct device *dev = pci_dev_to_dev(pdev);
-+ struct ata_host_set *host_set = dev_get_drvdata(dev);
-+ u8 pro=0;
-+ pci_read_config_word(pdev, NV_SGPIO_PCI_CSR_OFFSET, &csr_add);
-+ pci_read_config_dword(pdev, NV_SGPIO_PCI_CB_OFFSET, &cb_add);
-+ pci_read_config_byte(pdev, 0xA4, &pro);
-+
-+ if (csr_add == 0 || cb_add == 0)
-+ return;
-+
-+
-+ if (!(pro&0x40))
-+ return;
-+
-+
-+ temp32 = csr_add;
-+ phost->host_sgpio.pcsr = (void *)temp32;
-+ phost->host_sgpio.pcb = phys_to_virt(cb_add);
-+
-+ if (phost->host_sgpio.pcb->nvcr.bit.init_cnt!=0x2 || phost->host_sgpio.pcb->nvcr.bit.cbver!=0x0)
-+ return;
-+
-+ if (temp32 <=0x200 || temp32 >=0xFFFE )
-+ return;
-+
-+
-+ if (cb_add<=0x80000 || cb_add>=0x9FC00)
-+ return;
-+
-+
-+ if (phost->host_sgpio.pcb->scratch_space == 0) {
-+ spin_lock_init(&nv_sgpio_lock);
-+ phost->host_sgpio.share.plock = &nv_sgpio_lock;
-+ phost->host_sgpio.share.ptstamp = &nv_sgpio_tstamp;
-+ phost->host_sgpio.pcb->scratch_space =
-+ (unsigned long)&phost->host_sgpio.share;
-+ spin_lock(phost->host_sgpio.share.plock);
-+ nv_sgpio_reset(phost->host_sgpio.pcsr);
-+ phost->host_sgpio.pcb->cr0 =
-+ SET_ENABLE(phost->host_sgpio.pcb->cr0);
-+
-+ spin_unlock(phost->host_sgpio.share.plock);
-+ }
-+
-+ phost->host_sgpio.share =
-+ *(struct nv_sgpio_host_share *)(unsigned long)
-+ phost->host_sgpio.pcb->scratch_space;
-+ phost->host_sgpio.flags.sgpio_enabled = 1;
-+ phost->pdev = pdev;
-+ init_timer(&phost->host_sgpio.sgpio_timer);
-+ phost->host_sgpio.sgpio_timer.data = (unsigned long)phost;
-+ nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer,
-+ NV_SGPIO_UPDATE_TICK);
-+}
-+
-+static void __nv_sgpio_timer_handler(unsigned long context);
-+static void nv_sgpio_set_timer(struct timer_list *ptimer, unsigned int timeout_msec)
-+{
-+ if (!ptimer)
-+ return;
-+ ptimer->function = __nv_sgpio_timer_handler;
-+ ptimer->expires = msecs_to_jiffies(timeout_msec) + jiffies;
-+ add_timer(ptimer);
-+}
-+static void __nv_sgpio_timer_handler(unsigned long context)
-+{
-+ struct nv_host *phost = (struct nv_host*)context;
-+ struct device *dev = pci_dev_to_dev(phost->pdev);
-+ struct ata_host_set *host_set = dev_get_drvdata(dev);
-+
-+ if (!host_set)
-+ nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer,
-+ NV_SGPIO_UPDATE_TICK);
-+ else
-+ nv_sgpio_timer_handler(host_set);
-+
-+}
-+
-+static void nv_sgpio_timer_handler(unsigned long context)
-+{
-+
-+ struct ata_host_set *host_set = (struct ata_host_set *)context;
-+ struct nv_host *host;
-+ u8 count, host_offset, port_offset;
-+ union nv_sgpio_tx tx;
-+ bool on_off;
-+ unsigned long mask = 0xFFFF;
-+ struct nv_port *port;
-+
-+ if (!host_set)
-+ goto err_out;
-+ else
-+ host = (struct nv_host *)host_set->private_data;
-+
-+ if (!host->host_sgpio.flags.sgpio_enabled)
-+ goto err_out;
-+
-+ host_offset = nv_sgpio_tx_host_offset(host_set);
-+
-+ spin_lock(host->host_sgpio.share.plock);
-+ tx = host->host_sgpio.pcb->tx[host_offset];
-+ spin_unlock(host->host_sgpio.share.plock);
-+
-+ for (count = 0; count < host_set->n_ports; count++) {
-+ struct ata_port *ap;
-+
-+ ap = host_set->ports[count];
-+
-+ if (!(ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)))
-+ continue;
-+
-+ port = (struct nv_port *)ap->private_data;
-+ if (!port)
-+ continue;
-+ port_offset = nv_sgpio_tx_port_offset(ap);
-+ on_off = GET_ACTIVITY(tx.tx_port[port_offset]);
-+ if (nv_sgpio_update_led(&port->port_sgpio.activity, &on_off)) {
-+ tx.tx_port[port_offset] =
-+ SET_ACTIVITY(tx.tx_port[port_offset], on_off);
-+ host->host_sgpio.flags.need_update = 1;
-+ }
-+ }
-+
-+
-+ if (host->host_sgpio.flags.need_update) {
-+ spin_lock(host->host_sgpio.share.plock);
-+ if (nv_sgpio_get_func(host_set)
-+ % NV_CNTRLR_SHARE_INIT == 0) {
-+ host->host_sgpio.pcb->tx[host_offset].all &= mask;
-+ mask = mask << 16;
-+ tx.all &= mask;
-+ } else {
-+ tx.all &= mask;
-+ mask = mask << 16;
-+ host->host_sgpio.pcb->tx[host_offset].all &= mask;
-+ }
-+ host->host_sgpio.pcb->tx[host_offset].all |= tx.all;
-+ spin_unlock(host->host_sgpio.share.plock);
-+
-+ if (nv_sgpio_send_cmd(host, NV_SGPIO_CMD_WRITE_DATA)) {
-+ host->host_sgpio.flags.need_update = 0;
-+ return;
-+ }
-+ } else {
-+ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer,
-+ NV_SGPIO_UPDATE_TICK);
-+ }
-+err_out:
-+ return;
-+}
-+
-+static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd)
-+{
-+ u8 csr;
-+ unsigned long *ptstamp;
-+
-+ spin_lock(host->host_sgpio.share.plock);
-+ ptstamp = host->host_sgpio.share.ptstamp;
-+ if (jiffies_to_msecs1(jiffies - *ptstamp) >= NV_SGPIO_MIN_UPDATE_DELTA) {
-+ csr =
-+ nv_sgpio_get_csr((unsigned long)host->host_sgpio.pcsr);
-+ if ((GET_SGPIO_STATUS(csr) != NV_SGPIO_STATE_OPERATIONAL) ||
-+ (GET_CMD_STATUS(csr) == NV_SGPIO_CMD_ACTIVE)) {
-+ //nv_sgpio_reset(host->host_sgpio.pcsr);
-+ } else {
-+ host->host_sgpio.pcb->cr0 =
-+ SET_ENABLE(host->host_sgpio.pcb->cr0);
-+ csr = 0;
-+ csr = SET_CMD(csr, cmd);
-+ nv_sgpio_set_csr(csr,
-+ (unsigned long)host->host_sgpio.pcsr);
-+ *ptstamp = jiffies;
-+ }
-+ spin_unlock(host->host_sgpio.share.plock);
-+ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer,
-+ NV_SGPIO_UPDATE_TICK);
-+ return 1;
-+ } else {
-+ spin_unlock(host->host_sgpio.share.plock);
-+ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer,
-+ (NV_SGPIO_MIN_UPDATE_DELTA -
-+ jiffies_to_msecs1(jiffies - *ptstamp)));
-+ return 0;
-+ }
-+}
-+
-+static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off)
-+{
-+ bool need_update = 0;
-+
-+ if (led->force_off > 0) {
-+ led->force_off--;
-+ } else if (led->flags.recent_activity ^ led->flags.last_state) {
-+ *on_off = led->flags.recent_activity;
-+ led->flags.last_state = led->flags.recent_activity;
-+ need_update = 1;
-+ } else if ((led->flags.recent_activity & led->flags.last_state) &&
-+ (led->last_cons_active >= NV_SGPIO_MAX_ACTIVITY_ON)) {
-+ *on_off = NV_OFF;
-+ led->flags.last_state = NV_OFF;
-+ led->force_off = NV_SGPIO_MIN_FORCE_OFF;
-+ need_update = 1;
-+ }
-+
-+ if (*on_off)
-+ led->last_cons_active++;
-+ else
-+ led->last_cons_active = 0;
-+
-+ led->flags.recent_activity = 0;
-+ return need_update;
-+}
-+
-+static void nv_sgpio_reset(u8 *pcsr)
-+{
-+ u8 csr;
-+
-+ csr = nv_sgpio_get_csr((unsigned long)pcsr);
-+ if (GET_SGPIO_STATUS(csr) == NV_SGPIO_STATE_RESET) {
-+ csr = 0;
-+ csr = SET_CMD(csr, NV_SGPIO_CMD_RESET);
-+ nv_sgpio_set_csr(csr, (unsigned long)pcsr);
-+ }
-+ csr = 0;
-+ csr = SET_CMD(csr, NV_SGPIO_CMD_READ_PARAMS);
-+ nv_sgpio_set_csr(csr, (unsigned long)pcsr);
-+}
-+
-+static void nv_sgpio_host_cleanup(struct nv_host *host)
-+{
-+ u8 csr;
-+ if (!host)
-+ return;
-+
-+ if (host->host_sgpio.flags.sgpio_enabled){
-+ spin_lock(host->host_sgpio.share.plock);
-+ host->host_sgpio.pcb->cr0 =
-+ SET_ENABLE(host->host_sgpio.pcb->cr0);
-+ csr = 0;
-+ csr = SET_CMD(csr, NV_SGPIO_CMD_WRITE_DATA);
-+ nv_sgpio_set_csr(csr,
-+ (unsigned long)host->host_sgpio.pcsr);
-+ spin_unlock(host->host_sgpio.share.plock);
-+
-+ if (timer_pending(&host->host_sgpio.sgpio_timer))
-+ del_timer(&host->host_sgpio.sgpio_timer);
-+ host->host_sgpio.flags.sgpio_enabled = 0;
-+ host->host_sgpio.pcb->scratch_space = 0;
-+ }
-+
-+}
-+
-+static void nv_sgpio_clear_all_leds(struct ata_port *ap)
-+{
-+ struct nv_port *port = ap->private_data;
-+ struct nv_host *host;
-+ u8 host_offset, port_offset;
-+
-+ if (!port || !ap->host_set)
-+ return;
-+ if (!ap->host_set->private_data)
-+ return;
-+
-+ host = ap->host_set->private_data;
-+ if (!host->host_sgpio.flags.sgpio_enabled)
-+ return;
-+
-+ host_offset = nv_sgpio_tx_host_offset(ap->host_set);
-+ port_offset = nv_sgpio_tx_port_offset(ap);
-+
-+ spin_lock(host->host_sgpio.share.plock);
-+ host->host_sgpio.pcb->tx[host_offset].tx_port[port_offset] = 0;
-+ host->host_sgpio.flags.need_update = 1;
-+ spin_unlock(host->host_sgpio.share.plock);
-+}
-+
-+
-+
-+static int __init nv_init(void)
-+{
-+#ifdef RHAS3U7
-+ int rc;
-+ rc = pci_module_init(&nv_pci_driver);
-+ if (rc)
-+ return rc;
-+
-+ rc = scsi_register_module(MODULE_SCSI_HA, &nv_sht);
-+ if (rc) {
-+ pci_unregister_driver(&nv_pci_driver);
-+ /* TODO: does scsi_register_module return errno val? */
-+ return -ENODEV;
-+ }
-+
-+ return 0;
-+#else
-+ return pci_module_init(&nv_pci_driver);
-+#endif
-+}
-+
-+static void __exit nv_exit(void)
-+{
-+#ifdef RHAS3U7
-+ scsi_unregister_module(MODULE_SCSI_HA, &nv_sht);
-+#endif
-+ pci_unregister_driver(&nv_pci_driver);
-+
-+}
-+
-+module_init(nv_init);
-+module_exit(nv_exit);
++/*\r
++ * sata_nv.c - NVIDIA nForce SATA\r
++ *\r
++ * Copyright 2004 NVIDIA Corp. All rights reserved.\r
++ * Copyright 2004 Andrew Chew\r
++ *\r
++ * The contents of this file are subject to the Open\r
++ * Software License version 1.1 that can be found at\r
++ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein\r
++ * by reference.\r
++ *\r
++ * Alternatively, the contents of this file may be used under the terms\r
++ * of the GNU General Public License version 2 (the "GPL") as distributed\r
++ * in the kernel source COPYING file, in which case the provisions of\r
++ * the GPL are applicable instead of the above. If you wish to allow\r
++ * the use of your version of this file only under the terms of the\r
++ * GPL and not to allow others to use your version of this file under\r
++ * the OSL, indicate your decision by deleting the provisions above and\r
++ * replace them with the notice and other provisions required by the GPL.\r
++ * If you do not delete the provisions above, a recipient may use your\r
++ * version of this file under either the OSL or the GPL.\r
++ *\r
++ * 0.11\r
++ * - Added sgpio support\r
++ *\r
++ * 0.10\r
++ * - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB\r
++ * drive. Also made the check_hotplug() callbacks return whether there\r
++ * was a hotplug interrupt or not. This was not the source of the\r
++ * spurious interrupts, but is the right thing to do anyway.\r
++ *\r
++ * 0.09\r
++ * - Fixed bug introduced by 0.08's MCP51 and MCP55 support.\r
++ *\r
++ * 0.08\r
++ * - Added support for MCP51 and MCP55.\r
++ *\r
++ * 0.07\r
++ * - Added support for RAID class code.\r
++ *\r
++ * 0.06\r
++ * - Added generic SATA support by using a pci_device_id that filters on\r
++ * the IDE storage class code.\r
++ *\r
++ * 0.03\r
++ * - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using\r
++ * mmio_base, which is only set for the CK804/MCP04 case.\r
++ *\r
++ * 0.02\r
++ * - Added support for CK804 SATA controller.\r
++ *\r
++ * 0.01\r
++ * - Initial revision.\r
++ */\r
++\r
++#include <linux/config.h>\r
++#include <linux/version.h>\r
++#include <linux/kernel.h>\r
++#include <linux/module.h>\r
++#include <linux/pci.h>\r
++#include <linux/init.h>\r
++#include <linux/blkdev.h>\r
++#include <linux/delay.h>\r
++#include <linux/interrupt.h>\r
++#include "scsi.h"\r
++#include <scsi/scsi_host.h>\r
++#include <linux/libata.h>\r
++\r
++#define DRV_NAME "sata_nv"\r
++#define DRV_VERSION "0.11-Driver Package V1.21"\r
++\r
++#define NV_PORTS 2\r
++#define NV_PIO_MASK 0x1f\r
++#define NV_MWDMA_MASK 0x07\r
++#define NV_UDMA_MASK 0x7f\r
++#define NV_PORT0_SCR_REG_OFFSET 0x00\r
++#define NV_PORT1_SCR_REG_OFFSET 0x40\r
++\r
++#define NV_INT_STATUS 0x10\r
++#define NV_INT_STATUS_CK804 0x440\r
++#define NV_INT_STATUS_MCP55 0x440\r
++#define NV_INT_STATUS_PDEV_INT 0x01\r
++#define NV_INT_STATUS_PDEV_PM 0x02\r
++#define NV_INT_STATUS_PDEV_ADDED 0x04\r
++#define NV_INT_STATUS_PDEV_REMOVED 0x08\r
++#define NV_INT_STATUS_SDEV_INT 0x10\r
++#define NV_INT_STATUS_SDEV_PM 0x20\r
++#define NV_INT_STATUS_SDEV_ADDED 0x40\r
++#define NV_INT_STATUS_SDEV_REMOVED 0x80\r
++#define NV_INT_STATUS_PDEV_HOTPLUG (NV_INT_STATUS_PDEV_ADDED | \\r
++ NV_INT_STATUS_PDEV_REMOVED)\r
++#define NV_INT_STATUS_SDEV_HOTPLUG (NV_INT_STATUS_SDEV_ADDED | \\r
++ NV_INT_STATUS_SDEV_REMOVED)\r
++#define NV_INT_STATUS_HOTPLUG (NV_INT_STATUS_PDEV_HOTPLUG | \\r
++ NV_INT_STATUS_SDEV_HOTPLUG)\r
++\r
++#define NV_INT_ENABLE 0x11\r
++#define NV_INT_ENABLE_CK804 0x441\r
++#define NV_INT_ENABLE_MCP55 0x444\r
++#define NV_INT_ENABLE_PDEV_MASK 0x01\r
++#define NV_INT_ENABLE_PDEV_PM 0x02\r
++#define NV_INT_ENABLE_PDEV_ADDED 0x04\r
++#define NV_INT_ENABLE_PDEV_REMOVED 0x08\r
++#define NV_INT_ENABLE_SDEV_MASK 0x10\r
++#define NV_INT_ENABLE_SDEV_PM 0x20\r
++#define NV_INT_ENABLE_SDEV_ADDED 0x40\r
++#define NV_INT_ENABLE_SDEV_REMOVED 0x80\r
++#define NV_INT_ENABLE_PDEV_HOTPLUG (NV_INT_ENABLE_PDEV_ADDED | \\r
++ NV_INT_ENABLE_PDEV_REMOVED)\r
++#define NV_INT_ENABLE_SDEV_HOTPLUG (NV_INT_ENABLE_SDEV_ADDED | \\r
++ NV_INT_ENABLE_SDEV_REMOVED)\r
++#define NV_INT_ENABLE_HOTPLUG (NV_INT_ENABLE_PDEV_HOTPLUG | \\r
++ NV_INT_ENABLE_SDEV_HOTPLUG)\r
++\r
++#define NV_INT_CONFIG 0x12\r
++#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI\r
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E\r
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F\r
++\r
++// For PCI config register 20\r
++#define NV_MCP_SATA_CFG_20 0x50\r
++#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN 0x04\r
++\r
++\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)\r
++#define RHAS3U7\r
++#endif\r
++#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,16)\r
++#define SLES10\r
++#endif\r
++\r
++//sgpio\r
++// Sgpio defines\r
++// SGPIO state defines\r
++#define NV_SGPIO_STATE_RESET 0\r
++#define NV_SGPIO_STATE_OPERATIONAL 1\r
++#define NV_SGPIO_STATE_ERROR 2\r
++\r
++// SGPIO command opcodes\r
++#define NV_SGPIO_CMD_RESET 0\r
++#define NV_SGPIO_CMD_READ_PARAMS 1\r
++#define NV_SGPIO_CMD_READ_DATA 2\r
++#define NV_SGPIO_CMD_WRITE_DATA 3\r
++\r
++// SGPIO command status defines\r
++#define NV_SGPIO_CMD_OK 0\r
++#define NV_SGPIO_CMD_ACTIVE 1\r
++#define NV_SGPIO_CMD_ERR 2\r
++\r
++#define NV_SGPIO_UPDATE_TICK 90\r
++#define NV_SGPIO_MIN_UPDATE_DELTA 33\r
++#define NV_CNTRLR_SHARE_INIT 2\r
++#define NV_SGPIO_MAX_ACTIVITY_ON 20\r
++#define NV_SGPIO_MIN_FORCE_OFF 5\r
++#define NV_SGPIO_PCI_CSR_OFFSET 0x58\r
++#define NV_SGPIO_PCI_CB_OFFSET 0x5C\r
++#define NV_SGPIO_DFLT_CB_SIZE 256\r
++#define NV_ON 1\r
++#define NV_OFF 0\r
++#ifndef bool\r
++#define bool u8\r
++#endif\r
++\r
++static inline unsigned int jiffies_to_msecs1(const unsigned long j)\r
++{\r
++#if HZ <= 1000 && !(1000 % HZ)\r
++ return (1000 / HZ) * j;\r
++#elif HZ > 1000 && !(HZ % 1000)\r
++ return (j + (HZ / 1000) - 1)/(HZ / 1000);\r
++#else\r
++ return (j * 1000) / HZ;\r
++#endif\r
++}\r
++\r
++#define BF_EXTRACT(v, off, bc) \\r
++ ((((u8)(v)) >> (off)) & ((1 << (bc)) - 1))\r
++\r
++#define BF_INS(v, ins, off, bc) \\r
++ (((v) & ~((((1 << (bc)) - 1)) << (off))) | \\r
++ (((u8)(ins)) << (off)))\r
++\r
++#define BF_EXTRACT_U32(v, off, bc) \\r
++ ((((u32)(v)) >> (off)) & ((1 << (bc)) - 1))\r
++\r
++#define BF_INS_U32(v, ins, off, bc) \\r
++ (((v) & ~((((1 << (bc)) - 1)) << (off))) | \\r
++ (((u32)(ins)) << (off)))\r
++\r
++#define GET_SGPIO_STATUS(v) BF_EXTRACT(v, 0, 2)\r
++#define GET_CMD_STATUS(v) BF_EXTRACT(v, 3, 2)\r
++#define GET_CMD(v) BF_EXTRACT(v, 5, 3)\r
++#define SET_CMD(v, cmd) BF_INS(v, cmd, 5, 3) \r
++\r
++#define GET_ENABLE(v) BF_EXTRACT_U32(v, 23, 1)\r
++#define SET_ENABLE(v) BF_INS_U32(v, 1, 23, 1)\r
++\r
++// Needs to have a u8 bit-field insert.\r
++#define GET_ACTIVITY(v) BF_EXTRACT(v, 5, 3)\r
++#define SET_ACTIVITY(v, on_off) BF_INS(v, on_off, 5, 3)\r
++\r
++union nv_sgpio_nvcr \r
++{\r
++ struct {\r
++ u8 init_cnt;\r
++ u8 cb_size;\r
++ u8 cbver;\r
++ u8 rsvd;\r
++ } bit;\r
++ u32 all;\r
++};\r
++\r
++union nv_sgpio_tx \r
++{\r
++ u8 tx_port[4];\r
++ u32 all;\r
++};\r
++\r
++struct nv_sgpio_cb \r
++{\r
++ u64 scratch_space;\r
++ union nv_sgpio_nvcr nvcr;\r
++ u32 cr0;\r
++ u32 rsvd[4];\r
++ union nv_sgpio_tx tx[2];\r
++};\r
++\r
++struct nv_sgpio_host_share\r
++{\r
++ spinlock_t *plock;\r
++ unsigned long *ptstamp;\r
++};\r
++\r
++struct nv_sgpio_host_flags\r
++{\r
++ u8 sgpio_enabled:1;\r
++ u8 need_update:1;\r
++ u8 rsvd:6;\r
++};\r
++ \r
++struct nv_host_sgpio\r
++{\r
++ struct nv_sgpio_host_flags flags;\r
++ u8 *pcsr;\r
++ struct nv_sgpio_cb *pcb; \r
++ struct nv_sgpio_host_share share;\r
++ struct timer_list sgpio_timer;\r
++};\r
++\r
++struct nv_sgpio_port_flags\r
++{\r
++ u8 last_state:1;\r
++ u8 recent_activity:1;\r
++ u8 rsvd:6;\r
++};\r
++\r
++struct nv_sgpio_led \r
++{\r
++ struct nv_sgpio_port_flags flags;\r
++ u8 force_off;\r
++ u8 last_cons_active;\r
++};\r
++\r
++struct nv_port_sgpio\r
++{\r
++ struct nv_sgpio_led activity;\r
++};\r
++\r
++static spinlock_t nv_sgpio_lock;\r
++static unsigned long nv_sgpio_tstamp;\r
++\r
++static inline void nv_sgpio_set_csr(u8 csr, unsigned long pcsr)\r
++{\r
++ outb(csr, pcsr);\r
++}\r
++\r
++static inline u8 nv_sgpio_get_csr(unsigned long pcsr)\r
++{\r
++ return inb(pcsr);\r
++}\r
++\r
++static inline u8 nv_sgpio_get_func(struct ata_host_set *host_set)\r
++{\r
++ u8 devfn = (to_pci_dev(host_set->dev))->devfn;\r
++ return (PCI_FUNC(devfn));\r
++}\r
++\r
++static inline u8 nv_sgpio_tx_host_offset(struct ata_host_set *host_set)\r
++{\r
++ return (nv_sgpio_get_func(host_set)/NV_CNTRLR_SHARE_INIT);\r
++}\r
++\r
++static inline u8 nv_sgpio_calc_tx_offset(u8 cntrlr, u8 channel)\r
++{\r
++ return (sizeof(union nv_sgpio_tx) - (NV_CNTRLR_SHARE_INIT *\r
++ (cntrlr % NV_CNTRLR_SHARE_INIT)) - channel - 1);\r
++}\r
++\r
++static inline u8 nv_sgpio_tx_port_offset(struct ata_port *ap)\r
++{\r
++ u8 cntrlr = nv_sgpio_get_func(ap->host_set);\r
++ return (nv_sgpio_calc_tx_offset(cntrlr, ap->port_no));\r
++}\r
++\r
++static inline bool nv_sgpio_capable(const struct pci_device_id *ent)\r
++{\r
++ if (ent->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2)\r
++ return 1;\r
++ else\r
++ return 0;\r
++}\r
++\r
++\r
++\r
++\r
++\r
++\r
++static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);\r
++static irqreturn_t nv_interrupt (int irq, void *dev_instance,\r
++ struct pt_regs *regs);\r
++static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);\r
++static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);\r
++static void nv_host_stop (struct ata_host_set *host_set);\r
++static int nv_port_start(struct ata_port *ap);\r
++static void nv_port_stop(struct ata_port *ap);\r
++static int nv_qc_issue(struct ata_queued_cmd *qc);\r
++static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);\r
++static void nv_disable_hotplug(struct ata_host_set *host_set);\r
++static void nv_check_hotplug(struct ata_host_set *host_set);\r
++static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);\r
++static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);\r
++static void nv_check_hotplug_ck804(struct ata_host_set *host_set);\r
++static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent);\r
++static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set);\r
++static void nv_check_hotplug_mcp55(struct ata_host_set *host_set);\r
++enum nv_host_type\r
++{\r
++ GENERIC,\r
++ NFORCE2,\r
++ NFORCE3,\r
++ CK804,\r
++ MCP55\r
++};\r
++\r
++static struct pci_device_id nv_pci_tbl[] = {\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,\r
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },\r
++ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,\r
++ PCI_ANY_ID, PCI_ANY_ID,\r
++ PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },\r
++ { 0, } /* terminate list */\r
++};\r
++\r
++#define NV_HOST_FLAGS_SCR_MMIO 0x00000001\r
++\r
++struct nv_host_desc\r
++{\r
++ enum nv_host_type host_type;\r
++ void (*enable_hotplug)(struct ata_probe_ent *probe_ent);\r
++ void (*disable_hotplug)(struct ata_host_set *host_set);\r
++ void (*check_hotplug)(struct ata_host_set *host_set);\r
++\r
++};\r
++static struct nv_host_desc nv_device_tbl[] = {\r
++ {\r
++ .host_type = GENERIC,\r
++ .enable_hotplug = NULL,\r
++ .disable_hotplug= NULL,\r
++ .check_hotplug = NULL,\r
++ },\r
++ {\r
++ .host_type = NFORCE2,\r
++ .enable_hotplug = nv_enable_hotplug,\r
++ .disable_hotplug= nv_disable_hotplug,\r
++ .check_hotplug = nv_check_hotplug,\r
++ },\r
++ {\r
++ .host_type = NFORCE3,\r
++ .enable_hotplug = nv_enable_hotplug,\r
++ .disable_hotplug= nv_disable_hotplug,\r
++ .check_hotplug = nv_check_hotplug,\r
++ },\r
++ { .host_type = CK804,\r
++ .enable_hotplug = nv_enable_hotplug_ck804,\r
++ .disable_hotplug= nv_disable_hotplug_ck804,\r
++ .check_hotplug = nv_check_hotplug_ck804,\r
++ },\r
++ { .host_type = MCP55,\r
++ .enable_hotplug = nv_enable_hotplug_mcp55,\r
++ .disable_hotplug= nv_disable_hotplug_mcp55,\r
++ .check_hotplug = nv_check_hotplug_mcp55,\r
++ },\r
++};\r
++\r
++\r
++struct nv_host\r
++{\r
++ struct nv_host_desc *host_desc;\r
++ unsigned long host_flags;\r
++ struct nv_host_sgpio host_sgpio;\r
++ struct pci_dev *pdev;\r
++};\r
++\r
++struct nv_port\r
++{\r
++ struct nv_port_sgpio port_sgpio;\r
++};\r
++\r
++// SGPIO function prototypes\r
++static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost);\r
++static void nv_sgpio_reset(u8 *pcsr);\r
++static void nv_sgpio_set_timer(struct timer_list *ptimer, \r
++ unsigned int timeout_msec);\r
++static void nv_sgpio_timer_handler(unsigned long ptr);\r
++static void nv_sgpio_host_cleanup(struct nv_host *host);\r
++static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off);\r
++static void nv_sgpio_clear_all_leds(struct ata_port *ap);\r
++static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd);\r
++\r
++\r
++static struct pci_driver nv_pci_driver = {\r
++ .name = DRV_NAME,\r
++ .id_table = nv_pci_tbl,\r
++ .probe = nv_init_one,\r
++ .remove = ata_pci_remove_one,\r
++};\r
++\r
++\r
++#ifdef SLES10\r
++static struct scsi_host_template nv_sht = {\r
++#else\r
++static Scsi_Host_Template nv_sht = {\r
++#endif\r
++ .module = THIS_MODULE,\r
++ .name = DRV_NAME,\r
++#ifdef RHAS3U7\r
++ .detect = ata_scsi_detect,\r
++ .release = ata_scsi_release,\r
++#endif\r
++ .ioctl = ata_scsi_ioctl,\r
++ .queuecommand = ata_scsi_queuecmd,\r
++ .eh_strategy_handler = ata_scsi_error,\r
++ .can_queue = ATA_DEF_QUEUE,\r
++ .this_id = ATA_SHT_THIS_ID,\r
++ .sg_tablesize = LIBATA_MAX_PRD,\r
++ .max_sectors = ATA_MAX_SECTORS,\r
++ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,\r
++#ifdef RHAS3U7\r
++ .use_new_eh_code = ATA_SHT_NEW_EH_CODE,\r
++#endif\r
++ .emulated = ATA_SHT_EMULATED,\r
++ .use_clustering = ATA_SHT_USE_CLUSTERING,\r
++ .proc_name = DRV_NAME,\r
++#ifndef RHAS3U7\r
++ .dma_boundary = ATA_DMA_BOUNDARY,\r
++ .slave_configure = ata_scsi_slave_config,\r
++#endif\r
++ .bios_param = ata_std_bios_param,\r
++};\r
++\r
++static struct ata_port_operations nv_ops = {\r
++ .port_disable = ata_port_disable,\r
++ .tf_load = ata_tf_load,\r
++ .tf_read = ata_tf_read,\r
++ .exec_command = ata_exec_command,\r
++ .check_status = ata_check_status,\r
++ .dev_select = ata_std_dev_select,\r
++ .phy_reset = sata_phy_reset,\r
++ .bmdma_setup = ata_bmdma_setup,\r
++ .bmdma_start = ata_bmdma_start,\r
++ .bmdma_stop = ata_bmdma_stop,\r
++ .bmdma_status = ata_bmdma_status,\r
++ .qc_prep = ata_qc_prep,\r
++ .qc_issue = nv_qc_issue,\r
++ .eng_timeout = ata_eng_timeout,\r
++ .irq_handler = nv_interrupt,\r
++ .irq_clear = ata_bmdma_irq_clear,\r
++ .scr_read = nv_scr_read,\r
++ .scr_write = nv_scr_write,\r
++ .port_start = nv_port_start,\r
++ .port_stop = nv_port_stop,\r
++ .host_stop = nv_host_stop,\r
++};\r
++\r
++/* FIXME: The hardware provides the necessary SATA PHY controls\r
++ * to support ATA_FLAG_SATA_RESET. However, it is currently\r
++ * necessary to disable that flag, to solve misdetection problems.\r
++ * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.\r
++ *\r
++ * This problem really needs to be investigated further. But in the\r
++ * meantime, we avoid ATA_FLAG_SATA_RESET to get people working.\r
++ */\r
++static struct ata_port_info nv_port_info = {\r
++ .sht = &nv_sht,\r
++ .host_flags = ATA_FLAG_SATA |\r
++ /* ATA_FLAG_SATA_RESET | */\r
++ ATA_FLAG_SRST |\r
++ ATA_FLAG_NO_LEGACY,\r
++ .pio_mask = NV_PIO_MASK,\r
++ .mwdma_mask = NV_MWDMA_MASK,\r
++ .udma_mask = NV_UDMA_MASK,\r
++ .port_ops = &nv_ops,\r
++};\r
++\r
++MODULE_AUTHOR("NVIDIA");\r
++MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");\r
++MODULE_LICENSE("GPL");\r
++MODULE_DEVICE_TABLE(pci, nv_pci_tbl);\r
++MODULE_VERSION(DRV_VERSION);\r
++\r
++static irqreturn_t nv_interrupt (int irq, void *dev_instance,\r
++ struct pt_regs *regs)\r
++{\r
++ struct ata_host_set *host_set = dev_instance;\r
++ struct nv_host *host = host_set->private_data;\r
++ unsigned int i;\r
++ unsigned int handled = 0;\r
++ unsigned long flags;\r
++\r
++ spin_lock_irqsave(&host_set->lock, flags);\r
++\r
++ for (i = 0; i < host_set->n_ports; i++) {\r
++ struct ata_port *ap;\r
++\r
++ ap = host_set->ports[i];\r
++#ifdef ATA_FLAG_NOINTR\r
++ if (ap &&\r
++ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {\r
++#else\r
++ if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {\r
++#endif \r
++ struct ata_queued_cmd *qc;\r
++\r
++ qc = ata_qc_from_tag(ap, ap->active_tag);\r
++ if (qc && (!(qc->tf.ctl & ATA_NIEN)))\r
++ handled += ata_host_intr(ap, qc);\r
++ else\r
++ // No request pending? Clear interrupt status\r
++ // anyway, in case there's one pending.\r
++ ap->ops->check_status(ap);\r
++ }\r
++\r
++ }\r
++\r
++ if (host->host_desc->check_hotplug)\r
++ host->host_desc->check_hotplug(host_set);\r
++\r
++ spin_unlock_irqrestore(&host_set->lock, flags);\r
++\r
++ return IRQ_RETVAL(handled);\r
++}\r
++\r
++static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)\r
++{\r
++ struct ata_host_set *host_set = ap->host_set;\r
++ struct nv_host *host = host_set->private_data;\r
++\r
++ if (sc_reg > SCR_CONTROL)\r
++ return 0xffffffffU;\r
++\r
++ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
++ return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4));\r
++ else\r
++ return inl(ap->ioaddr.scr_addr + (sc_reg * 4));\r
++}\r
++\r
++static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)\r
++{\r
++ struct ata_host_set *host_set = ap->host_set;\r
++ struct nv_host *host = host_set->private_data;\r
++\r
++ if (sc_reg > SCR_CONTROL)\r
++ return;\r
++\r
++ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
++ writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4));\r
++ else\r
++ outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));\r
++}\r
++\r
++static void nv_host_stop (struct ata_host_set *host_set)\r
++{\r
++ struct nv_host *host = host_set->private_data;\r
++\r
++ // Disable hotplug event interrupts.\r
++ if (host->host_desc->disable_hotplug)\r
++ host->host_desc->disable_hotplug(host_set);\r
++\r
++ nv_sgpio_host_cleanup(host);\r
++ kfree(host);\r
++#ifdef RHAS3U7\r
++\r
++ ata_host_stop(host_set);\r
++#endif\r
++}\r
++\r
++static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)\r
++{\r
++ static int printed_version = 0;\r
++ struct nv_host *host;\r
++ struct ata_port_info *ppi;\r
++ struct ata_probe_ent *probe_ent;\r
++ int pci_dev_busy = 0;\r
++ int rc;\r
++ u32 bar;\r
++\r
++ // Make sure this is a SATA controller by counting the number of bars\r
++ // (NVIDIA SATA controllers will always have six bars). Otherwise,\r
++ // it's an IDE controller and we ignore it.\r
++ for (bar=0; bar<6; bar++)\r
++ if (pci_resource_start(pdev, bar) == 0)\r
++ return -ENODEV;\r
++\r
++ if (!printed_version++)\r
++ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");\r
++\r
++ rc = pci_enable_device(pdev);\r
++ if (rc)\r
++ goto err_out;\r
++\r
++ rc = pci_request_regions(pdev, DRV_NAME);\r
++ if (rc) {\r
++ pci_dev_busy = 1;\r
++ goto err_out_disable;\r
++ }\r
++\r
++ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);\r
++ if (rc)\r
++ goto err_out_regions;\r
++#ifndef RHAS3U7\r
++ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);\r
++ if (rc)\r
++ goto err_out_regions;\r
++#endif\r
++ rc = -ENOMEM;\r
++\r
++ ppi = &nv_port_info;\r
++\r
++ probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY|ATA_PORT_SECONDARY);\r
++\r
++ if (!probe_ent)\r
++ goto err_out_regions;\r
++\r
++ host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);\r
++ if (!host)\r
++ goto err_out_free_ent;\r
++\r
++ memset(host, 0, sizeof(struct nv_host));\r
++ host->host_desc = &nv_device_tbl[ent->driver_data];\r
++\r
++ probe_ent->private_data = host;\r
++\r
++ if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)\r
++ host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;\r
++\r
++ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {\r
++ unsigned long base;\r
++\r
++ probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),\r
++ pci_resource_len(pdev, 5));\r
++ if (probe_ent->mmio_base == NULL) {\r
++ rc = -EIO;\r
++ goto err_out_free_host;\r
++ }\r
++\r
++ base = (unsigned long)probe_ent->mmio_base;\r
++\r
++ probe_ent->port[0].scr_addr =\r
++ base + NV_PORT0_SCR_REG_OFFSET;\r
++ probe_ent->port[1].scr_addr =\r
++ base + NV_PORT1_SCR_REG_OFFSET;\r
++ } else {\r
++\r
++ probe_ent->port[0].scr_addr =\r
++ pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;\r
++ probe_ent->port[1].scr_addr =\r
++ pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;\r
++ }\r
++\r
++ pci_set_master(pdev);\r
++#ifdef RHAS3U7\r
++ ata_add_to_probe_list(probe_ent);\r
++ \r
++ if (nv_sgpio_capable(ent))\r
++ nv_sgpio_init(pdev, host);\r
++ // Enable hotplug event interrupts.\r
++ if (host->host_desc->enable_hotplug)\r
++ host->host_desc->enable_hotplug(probe_ent);\r
++\r
++ return 0;\r
++#else\r
++ rc = ata_device_add(probe_ent);\r
++ if (rc != NV_PORTS)\r
++ goto err_out_iounmap;\r
++ \r
++ if (nv_sgpio_capable(ent))\r
++ nv_sgpio_init(pdev, host);\r
++ // Enable hotplug event interrupts.\r
++ if (host->host_desc->enable_hotplug)\r
++ host->host_desc->enable_hotplug(probe_ent);\r
++\r
++ kfree(probe_ent);\r
++\r
++ return 0;\r
++\r
++err_out_iounmap:\r
++ if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
++ iounmap(probe_ent->mmio_base);\r
++#endif\r
++err_out_free_host:\r
++ kfree(host);\r
++err_out_free_ent:\r
++ kfree(probe_ent);\r
++err_out_regions:\r
++ pci_release_regions(pdev);\r
++err_out_disable:\r
++ if (!pci_dev_busy)\r
++ pci_disable_device(pdev);\r
++err_out:\r
++ return rc;\r
++}\r
++\r
++\r
++static int nv_port_start(struct ata_port *ap)\r
++{\r
++ int stat;\r
++ struct nv_port *port;\r
++\r
++ stat = ata_port_start(ap);\r
++ if (stat) {\r
++ return stat;\r
++ }\r
++\r
++ port = kmalloc(sizeof(struct nv_port), GFP_KERNEL);\r
++ if (!port) \r
++ goto err_out_no_free;\r
++\r
++ memset(port, 0, sizeof(struct nv_port));\r
++\r
++ ap->private_data = port;\r
++ return 0;\r
++\r
++err_out_no_free:\r
++ return 1;\r
++}\r
++\r
++static void nv_port_stop(struct ata_port *ap)\r
++{\r
++ nv_sgpio_clear_all_leds(ap);\r
++\r
++ if (ap->private_data) {\r
++ kfree(ap->private_data);\r
++ ap->private_data = NULL;\r
++ }\r
++ ata_port_stop(ap);\r
++}\r
++\r
++static int nv_qc_issue(struct ata_queued_cmd *qc)\r
++{\r
++ struct nv_port *port = qc->ap->private_data;\r
++\r
++ if (port) \r
++ port->port_sgpio.activity.flags.recent_activity = 1;\r
++ return (ata_qc_issue_prot(qc));\r
++}\r
++\r
++\r
++\r
++\r
++static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)\r
++{\r
++ u8 intr_mask;\r
++\r
++ outb(NV_INT_STATUS_HOTPLUG,\r
++ probe_ent->port[0].scr_addr + NV_INT_STATUS);\r
++\r
++ intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);\r
++ intr_mask |= NV_INT_ENABLE_HOTPLUG;\r
++\r
++ outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);\r
++}\r
++\r
++static void nv_disable_hotplug(struct ata_host_set *host_set)\r
++{\r
++ u8 intr_mask;\r
++\r
++ intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);\r
++\r
++ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);\r
++\r
++ outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);\r
++}\r
++\r
++static void nv_check_hotplug(struct ata_host_set *host_set)\r
++{\r
++ u8 intr_status;\r
++\r
++ intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);\r
++\r
++ // Clear interrupt status.\r
++ outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);\r
++\r
++ if (intr_status & NV_INT_STATUS_HOTPLUG) {\r
++ if (intr_status & NV_INT_STATUS_PDEV_ADDED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device added\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_PDEV_REMOVED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device removed\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_SDEV_ADDED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device added\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_SDEV_REMOVED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device removed\n");\r
++ }\r
++}\r
++\r
++static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)\r
++{\r
++ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);\r
++ u8 intr_mask;\r
++ u8 regval;\r
++\r
++ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);\r
++ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
++ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
++\r
++ writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);\r
++\r
++ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);\r
++ intr_mask |= NV_INT_ENABLE_HOTPLUG;\r
++\r
++ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);\r
++}\r
++\r
++static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)\r
++{\r
++ struct pci_dev *pdev = to_pci_dev(host_set->dev);\r
++ u8 intr_mask;\r
++ u8 regval;\r
++\r
++ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);\r
++\r
++ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);\r
++\r
++ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);\r
++\r
++ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);\r
++ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
++ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
++}\r
++\r
++static void nv_check_hotplug_ck804(struct ata_host_set *host_set)\r
++{\r
++ u8 intr_status;\r
++\r
++ intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);\r
++\r
++ // Clear interrupt status.\r
++ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);\r
++\r
++ if (intr_status & NV_INT_STATUS_HOTPLUG) {\r
++ if (intr_status & NV_INT_STATUS_PDEV_ADDED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device added\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_PDEV_REMOVED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device removed\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_SDEV_ADDED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device added\n");\r
++\r
++ if (intr_status & NV_INT_STATUS_SDEV_REMOVED)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device removed\n");\r
++ }\r
++}\r
++static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent)\r
++{\r
++ struct pci_dev *pdev = to_pci_dev(probe_ent->dev);\r
++ u8 intr_mask;\r
++ u8 regval;\r
++\r
++ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);\r
++ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
++ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
++\r
++ writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55);\r
++ writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55+2);\r
++\r
++ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55);\r
++ intr_mask |= 0x0c;\r
++ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55);\r
++\r
++ intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);\r
++ intr_mask |= 0x0c;\r
++ writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);\r
++}\r
++\r
++static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set)\r
++{\r
++ struct pci_dev *pdev = to_pci_dev(host_set->dev);\r
++ u8 intr_mask;\r
++ u8 regval;\r
++\r
++ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55);\r
++ intr_mask &= ~(0x0C);\r
++ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55);\r
++ \r
++ intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55+2);\r
++ intr_mask &= ~(0x0C);\r
++ writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55+2);\r
++\r
++ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);\r
++ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
++ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
++}\r
++\r
++static void nv_check_hotplug_mcp55(struct ata_host_set *host_set)\r
++{\r
++ u8 intr_status,intr_status1;\r
++\r
++ intr_status = readb(host_set->mmio_base + NV_INT_STATUS_MCP55);\r
++ intr_status1 = readb(host_set->mmio_base + NV_INT_STATUS_MCP55+2);\r
++\r
++ // Clear interrupt status.\r
++ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55);\r
++ writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55+2); \r
++\r
++ if ((intr_status & 0x0c) || (intr_status1&0x0c)) {\r
++ if (intr_status & 0x04)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device added\n");\r
++\r
++ if (intr_status & 0x08)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Primary device removed\n");\r
++\r
++ if (intr_status1 & 0x04)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device added\n");\r
++\r
++ if (intr_status1 & 0x08)\r
++ printk(KERN_WARNING "nv_sata: "\r
++ "Secondary device removed\n");\r
++ }\r
++}\r
++\r
++\r
++static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost)\r
++{\r
++ u16 csr_add; \r
++ u32 cb_add, temp32;\r
++ struct device *dev = pci_dev_to_dev(pdev);\r
++ struct ata_host_set *host_set = dev_get_drvdata(dev);\r
++ u8 pro=0;\r
++ pci_read_config_word(pdev, NV_SGPIO_PCI_CSR_OFFSET, &csr_add);\r
++ pci_read_config_dword(pdev, NV_SGPIO_PCI_CB_OFFSET, &cb_add);\r
++ pci_read_config_byte(pdev, 0xA4, &pro);\r
++ \r
++ if (csr_add == 0 || cb_add == 0) \r
++ return;\r
++ \r
++\r
++ if (!(pro&0x40))\r
++ return; \r
++ \r
++ \r
++ temp32 = csr_add;\r
++ phost->host_sgpio.pcsr = (void *)temp32;\r
++ phost->host_sgpio.pcb = phys_to_virt(cb_add);\r
++\r
++ if (phost->host_sgpio.pcb->nvcr.bit.init_cnt!=0x2 || phost->host_sgpio.pcb->nvcr.bit.cbver!=0x0)\r
++ return;\r
++ \r
++ if (temp32 <=0x200 || temp32 >=0xFFFE )\r
++ return;\r
++ \r
++ \r
++ if (cb_add<=0x80000 || cb_add>=0x9FC00)\r
++ return;\r
++ \r
++ \r
++ if (phost->host_sgpio.pcb->scratch_space == 0) {\r
++ spin_lock_init(&nv_sgpio_lock);\r
++ phost->host_sgpio.share.plock = &nv_sgpio_lock;\r
++ phost->host_sgpio.share.ptstamp = &nv_sgpio_tstamp;\r
++ phost->host_sgpio.pcb->scratch_space = \r
++ (unsigned long)&phost->host_sgpio.share;\r
++ spin_lock(phost->host_sgpio.share.plock);\r
++ nv_sgpio_reset(phost->host_sgpio.pcsr);\r
++ phost->host_sgpio.pcb->cr0 = \r
++ SET_ENABLE(phost->host_sgpio.pcb->cr0);\r
++\r
++ spin_unlock(phost->host_sgpio.share.plock);\r
++ }\r
++\r
++ phost->host_sgpio.share = \r
++ *(struct nv_sgpio_host_share *)(unsigned long)\r
++ phost->host_sgpio.pcb->scratch_space;\r
++ phost->host_sgpio.flags.sgpio_enabled = 1;\r
++ phost->pdev = pdev;\r
++ init_timer(&phost->host_sgpio.sgpio_timer);\r
++ phost->host_sgpio.sgpio_timer.data = (unsigned long)phost;\r
++ nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer, \r
++ NV_SGPIO_UPDATE_TICK);\r
++}\r
++\r
++static void __nv_sgpio_timer_handler(unsigned long context);\r
++static void nv_sgpio_set_timer(struct timer_list *ptimer, unsigned int timeout_msec)\r
++{\r
++ if (!ptimer)\r
++ return;\r
++ ptimer->function = __nv_sgpio_timer_handler;\r
++ ptimer->expires = msecs_to_jiffies(timeout_msec) + jiffies;\r
++ add_timer(ptimer);\r
++}\r
++static void __nv_sgpio_timer_handler(unsigned long context)\r
++{\r
++ struct nv_host *phost = (struct nv_host*)context;\r
++ struct device *dev = pci_dev_to_dev(phost->pdev);\r
++ struct ata_host_set *host_set = dev_get_drvdata(dev);\r
++ \r
++ if (!host_set)\r
++ nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer, \r
++ NV_SGPIO_UPDATE_TICK);\r
++ else\r
++ nv_sgpio_timer_handler(host_set);\r
++ \r
++}\r
++\r
++static void nv_sgpio_timer_handler(unsigned long context)\r
++{\r
++\r
++ struct ata_host_set *host_set = (struct ata_host_set *)context;\r
++ struct nv_host *host;\r
++ u8 count, host_offset, port_offset;\r
++ union nv_sgpio_tx tx;\r
++ bool on_off;\r
++ unsigned long mask = 0xFFFF;\r
++ struct nv_port *port;\r
++\r
++ if (!host_set)\r
++ goto err_out;\r
++ else \r
++ host = (struct nv_host *)host_set->private_data;\r
++\r
++ if (!host->host_sgpio.flags.sgpio_enabled)\r
++ goto err_out;\r
++\r
++ host_offset = nv_sgpio_tx_host_offset(host_set);\r
++\r
++ spin_lock(host->host_sgpio.share.plock);\r
++ tx = host->host_sgpio.pcb->tx[host_offset];\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++\r
++ for (count = 0; count < host_set->n_ports; count++) {\r
++ struct ata_port *ap; \r
++\r
++ ap = host_set->ports[count];\r
++ \r
++ if (!(ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)))\r
++ continue;\r
++\r
++ port = (struct nv_port *)ap->private_data;\r
++ if (!port)\r
++ continue; \r
++ port_offset = nv_sgpio_tx_port_offset(ap);\r
++ on_off = GET_ACTIVITY(tx.tx_port[port_offset]);\r
++ if (nv_sgpio_update_led(&port->port_sgpio.activity, &on_off)) {\r
++ tx.tx_port[port_offset] = \r
++ SET_ACTIVITY(tx.tx_port[port_offset], on_off);\r
++ host->host_sgpio.flags.need_update = 1;\r
++ }\r
++ }\r
++\r
++\r
++ if (host->host_sgpio.flags.need_update) {\r
++ spin_lock(host->host_sgpio.share.plock); \r
++ if (nv_sgpio_get_func(host_set) \r
++ % NV_CNTRLR_SHARE_INIT == 0) {\r
++ host->host_sgpio.pcb->tx[host_offset].all &= mask;\r
++ mask = mask << 16;\r
++ tx.all &= mask;\r
++ } else {\r
++ tx.all &= mask;\r
++ mask = mask << 16;\r
++ host->host_sgpio.pcb->tx[host_offset].all &= mask;\r
++ }\r
++ host->host_sgpio.pcb->tx[host_offset].all |= tx.all;\r
++ spin_unlock(host->host_sgpio.share.plock); \r
++ \r
++ if (nv_sgpio_send_cmd(host, NV_SGPIO_CMD_WRITE_DATA)) { \r
++ host->host_sgpio.flags.need_update = 0;\r
++ return;\r
++ }\r
++ } else {\r
++ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
++ NV_SGPIO_UPDATE_TICK);\r
++ }\r
++err_out:\r
++ return;\r
++}\r
++\r
++static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd)\r
++{\r
++ u8 csr;\r
++ unsigned long *ptstamp;\r
++\r
++ spin_lock(host->host_sgpio.share.plock); \r
++ ptstamp = host->host_sgpio.share.ptstamp;\r
++ if (jiffies_to_msecs1(jiffies - *ptstamp) >= NV_SGPIO_MIN_UPDATE_DELTA) {\r
++ csr = \r
++ nv_sgpio_get_csr((unsigned long)host->host_sgpio.pcsr);\r
++ if ((GET_SGPIO_STATUS(csr) != NV_SGPIO_STATE_OPERATIONAL) ||\r
++ (GET_CMD_STATUS(csr) == NV_SGPIO_CMD_ACTIVE)) {\r
++ //nv_sgpio_reset(host->host_sgpio.pcsr);\r
++ } else {\r
++ host->host_sgpio.pcb->cr0 = \r
++ SET_ENABLE(host->host_sgpio.pcb->cr0);\r
++ csr = 0;\r
++ csr = SET_CMD(csr, cmd);\r
++ nv_sgpio_set_csr(csr, \r
++ (unsigned long)host->host_sgpio.pcsr);\r
++ *ptstamp = jiffies;\r
++ }\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
++ NV_SGPIO_UPDATE_TICK);\r
++ return 1;\r
++ } else {\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++ nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
++ (NV_SGPIO_MIN_UPDATE_DELTA - \r
++ jiffies_to_msecs1(jiffies - *ptstamp)));\r
++ return 0;\r
++ }\r
++}\r
++\r
++static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off)\r
++{\r
++ bool need_update = 0;\r
++\r
++ if (led->force_off > 0) {\r
++ led->force_off--;\r
++ } else if (led->flags.recent_activity ^ led->flags.last_state) {\r
++ *on_off = led->flags.recent_activity;\r
++ led->flags.last_state = led->flags.recent_activity;\r
++ need_update = 1;\r
++ } else if ((led->flags.recent_activity & led->flags.last_state) &&\r
++ (led->last_cons_active >= NV_SGPIO_MAX_ACTIVITY_ON)) {\r
++ *on_off = NV_OFF;\r
++ led->flags.last_state = NV_OFF;\r
++ led->force_off = NV_SGPIO_MIN_FORCE_OFF;\r
++ need_update = 1;\r
++ }\r
++\r
++ if (*on_off) \r
++ led->last_cons_active++; \r
++ else\r
++ led->last_cons_active = 0;\r
++\r
++ led->flags.recent_activity = 0;\r
++ return need_update;\r
++}\r
++\r
++static void nv_sgpio_reset(u8 *pcsr)\r
++{\r
++ u8 csr;\r
++\r
++ csr = nv_sgpio_get_csr((unsigned long)pcsr);\r
++ if (GET_SGPIO_STATUS(csr) == NV_SGPIO_STATE_RESET) {\r
++ csr = 0;\r
++ csr = SET_CMD(csr, NV_SGPIO_CMD_RESET);\r
++ nv_sgpio_set_csr(csr, (unsigned long)pcsr);\r
++ }\r
++ csr = 0;\r
++ csr = SET_CMD(csr, NV_SGPIO_CMD_READ_PARAMS);\r
++ nv_sgpio_set_csr(csr, (unsigned long)pcsr);\r
++}\r
++\r
++static void nv_sgpio_host_cleanup(struct nv_host *host)\r
++{\r
++ u8 csr;\r
++ if (!host)\r
++ return;\r
++\r
++ if (host->host_sgpio.flags.sgpio_enabled){\r
++ spin_lock(host->host_sgpio.share.plock);\r
++ host->host_sgpio.pcb->cr0 = \r
++ SET_ENABLE(host->host_sgpio.pcb->cr0);\r
++ csr = 0;\r
++ csr = SET_CMD(csr, NV_SGPIO_CMD_WRITE_DATA);\r
++ nv_sgpio_set_csr(csr, \r
++ (unsigned long)host->host_sgpio.pcsr);\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++ \r
++ if (timer_pending(&host->host_sgpio.sgpio_timer))\r
++ del_timer(&host->host_sgpio.sgpio_timer);\r
++ host->host_sgpio.flags.sgpio_enabled = 0;\r
++ host->host_sgpio.pcb->scratch_space = 0;\r
++ }\r
++ \r
++}\r
++\r
++static void nv_sgpio_clear_all_leds(struct ata_port *ap)\r
++{\r
++ struct nv_port *port = ap->private_data;\r
++ struct nv_host *host;\r
++ u8 host_offset, port_offset;\r
++\r
++ if (!port || !ap->host_set)\r
++ return;\r
++ if (!ap->host_set->private_data)\r
++ return;\r
++\r
++ host = ap->host_set->private_data;\r
++ if (!host->host_sgpio.flags.sgpio_enabled)\r
++ return;\r
++\r
++ host_offset = nv_sgpio_tx_host_offset(ap->host_set);\r
++ port_offset = nv_sgpio_tx_port_offset(ap);\r
++\r
++ spin_lock(host->host_sgpio.share.plock);\r
++ host->host_sgpio.pcb->tx[host_offset].tx_port[port_offset] = 0;\r
++ host->host_sgpio.flags.need_update = 1;\r
++ spin_unlock(host->host_sgpio.share.plock);\r
++}\r
++\r
++\r
++\r
++static int __init nv_init(void)\r
++{\r
++#ifdef RHAS3U7\r
++ int rc;\r
++ rc = pci_module_init(&nv_pci_driver);\r
++ if (rc)\r
++ return rc;\r
++ \r
++ rc = scsi_register_module(MODULE_SCSI_HA, &nv_sht);\r
++ if (rc) {\r
++ pci_unregister_driver(&nv_pci_driver);\r
++ /* TODO: does scsi_register_module return errno val? */\r
++ return -ENODEV;\r
++ }\r
++\r
++ return 0;\r
++#else\r
++ return pci_module_init(&nv_pci_driver);\r
++#endif\r
++}\r
++\r
++static void __exit nv_exit(void)\r
++{\r
++#ifdef RHAS3U7\r
++ scsi_unregister_module(MODULE_SCSI_HA, &nv_sht);\r
++#endif\r
++ pci_unregister_driver(&nv_pci_driver);\r
++\r
++}\r
++\r
++module_init(nv_init);\r
++module_exit(nv_exit);\r