]> git.pld-linux.org Git - packages/kernel.git/commitdiff
- from Broadcom http://www.broadcom.com/support/ethernet_nic/driver-sla.php?driver...
authormarcus <marcus@pld-linux.org>
Fri, 8 Feb 2008 09:28:30 +0000 (09:28 +0000)
committercvs2git <feedback@pld-linux.org>
Sun, 24 Jun 2012 12:13:13 +0000 (12:13 +0000)
Changed files:
    linux-tg3-3.81c.patch -> 1.1

linux-tg3-3.81c.patch [new file with mode: 0644]

diff --git a/linux-tg3-3.81c.patch b/linux-tg3-3.81c.patch
new file mode 100644 (file)
index 0000000..de722dc
--- /dev/null
@@ -0,0 +1,6918 @@
+diff -uNr linux-2.6.16.old/drivers/net/tg3.c linux-2.6.16/drivers/net/tg3.c
+--- linux-2.6.16.old/drivers/net/tg3.c 2008-02-08 09:52:27.000000000 +0100
++++ linux-2.6.16/drivers/net/tg3.c     2007-09-19 00:38:18.000000000 +0200
+@@ -4,7 +4,7 @@
+  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
+  * Copyright (C) 2004 Sun Microsystems Inc.
+- * Copyright (C) 2005 Broadcom Corporation.
++ * Copyright (C) 2005-2007 Broadcom Corporation.
+  *
+  * Firmware is:
+  *    Derived from proprietary unpublished source code,
+@@ -15,10 +15,22 @@
+  *    notice is accompanying it.
+  */
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE < 0x020612)
+ #include <linux/config.h>
++#endif
++#if (LINUX_VERSION_CODE < 0x020500)
++#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#include <linux/modversions.h>
++#endif
++#endif
+ #include <linux/module.h>
++#if (LINUX_VERSION_CODE >= 0x20600)
+ #include <linux/moduleparam.h>
++#endif
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+@@ -36,21 +48,26 @@
+ #include <linux/if_vlan.h>
+ #include <linux/ip.h>
+ #include <linux/tcp.h>
++#if (LINUX_VERSION_CODE >= 0x20600)
+ #include <linux/workqueue.h>
++#endif
+ #include <linux/prefetch.h>
++#if (LINUX_VERSION_CODE >= 0x020600)
+ #include <linux/dma-mapping.h>
++#endif
++#include <linux/bitops.h>
+ #include <net/checksum.h>
++#include <net/ip.h>
+ #include <asm/system.h>
+ #include <asm/io.h>
+ #include <asm/byteorder.h>
+ #include <asm/uaccess.h>
+-#ifdef CONFIG_SPARC64
++#ifdef CONFIG_SPARC
+ #include <asm/idprom.h>
+-#include <asm/oplib.h>
+-#include <asm/pbm.h>
++#include <asm/prom.h>
+ #endif
+ #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+@@ -69,8 +86,8 @@
+ #define DRV_MODULE_NAME               "tg3"
+ #define PFX DRV_MODULE_NAME   ": "
+-#define DRV_MODULE_VERSION    "3.49"
+-#define DRV_MODULE_RELDATE    "Feb 2, 2006"
++#define DRV_MODULE_VERSION    "3.81c"
++#define DRV_MODULE_RELDATE    "September 18, 2007"
+ #define TG3_DEF_MAC_MODE      0
+ #define TG3_DEF_RX_MODE               0
+@@ -124,16 +141,13 @@
+                                  TG3_RX_RCB_RING_SIZE(tp))
+ #define TG3_TX_RING_BYTES     (sizeof(struct tg3_tx_buffer_desc) * \
+                                TG3_TX_RING_SIZE)
+-#define TX_BUFFS_AVAIL(TP)                                            \
+-      ((TP)->tx_pending -                                             \
+-       (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
+ #define NEXT_TX(N)            (((N) + 1) & (TG3_TX_RING_SIZE - 1))
+ #define RX_PKT_BUF_SZ         (1536 + tp->rx_offset + 64)
+ #define RX_JUMBO_PKT_BUF_SZ   (9046 + tp->rx_offset + 64)
+ /* minimum number of free TX descriptors required to wake up TX process */
+-#define TG3_TX_WAKEUP_THRESH          (TG3_TX_RING_SIZE / 4)
++#define TG3_TX_WAKEUP_THRESH(tp)              ((tp)->tx_pending / 4)
+ /* number of ETHTOOL_GSTATS u64's */
+ #define TG3_NUM_STATS         (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
+@@ -149,108 +163,78 @@
+ MODULE_VERSION(DRV_MODULE_VERSION);
+ static int tg3_debug = -1;    /* -1 == use TG3_DEF_MSG_ENABLE as value */
++#if (LINUX_VERSION_CODE >= 0x20600)
+ module_param(tg3_debug, int, 0);
+ MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
++#endif
+ static struct pci_device_id tg3_pci_tbl[] = {
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
+-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+-      { 0, }
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
++      {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
++      {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
++      {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
++      {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
++      {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
++      {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
++      {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
++      {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
++      {}
+ };
+ MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
+-static struct {
++static const struct {
+       const char string[ETH_GSTRING_LEN];
+ } ethtool_stats_keys[TG3_NUM_STATS] = {
+       { "rx_octets" },
+@@ -331,7 +315,7 @@
+       { "nic_tx_threshold_hit" }
+ };
+-static struct {
++static const struct {
+       const char string[ETH_GSTRING_LEN];
+ } ethtool_test_keys[TG3_NUM_TEST] = {
+       { "nvram test     (online) " },
+@@ -349,7 +333,7 @@
+ static u32 tg3_read32(struct tg3 *tp, u32 off)
+ {
+-      return (readl(tp->regs + off)); 
++      return (readl(tp->regs + off));
+ }
+ static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
+@@ -465,6 +449,16 @@
+               readl(mbox);
+ }
++static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
++{
++      return (readl(tp->regs + off + GRCMBOX_BASE));
++}
++
++static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
++{
++      writel(val, tp->regs + off + GRCMBOX_BASE);
++}
++
+ #define tw32_mailbox(reg, val)        tp->write32_mbox(tp, reg, val)
+ #define tw32_mailbox_f(reg, val)      tw32_mailbox_flush(tp, (reg), (val))
+ #define tw32_rx_mbox(reg, val)        tp->write32_rx_mbox(tp, reg, val)
+@@ -480,34 +474,51 @@
+ {
+       unsigned long flags;
++      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
++          (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
++              return;
++
+       spin_lock_irqsave(&tp->indirect_lock, flags);
+-      pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+-      pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
++      if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
++              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
++              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+-      /* Always leave this as zero. */
+-      pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+-      spin_unlock_irqrestore(&tp->indirect_lock, flags);
+-}
++              /* Always leave this as zero. */
++              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
++      } else {
++              tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
++              tw32_f(TG3PCI_MEM_WIN_DATA, val);
+-static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
+-{
+-      /* If no workaround is needed, write to mem space directly */
+-      if (tp->write32 != tg3_write_indirect_reg32)
+-              tw32(NIC_SRAM_WIN_BASE + off, val);
+-      else
+-              tg3_write_mem(tp, off, val);
++              /* Always leave this as zero. */
++              tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
++      }
++      spin_unlock_irqrestore(&tp->indirect_lock, flags);
+ }
+ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
+ {
+       unsigned long flags;
++      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
++          (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
++              *val = 0;
++              return;
++      }
++
+       spin_lock_irqsave(&tp->indirect_lock, flags);
+-      pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+-      pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
++      if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
++              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
++              pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+-      /* Always leave this as zero. */
+-      pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
++              /* Always leave this as zero. */
++              pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
++      } else {
++              tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
++              *val = tr32(TG3PCI_MEM_WIN_DATA);
++
++              /* Always leave this as zero. */
++              tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
++      }
+       spin_unlock_irqrestore(&tp->indirect_lock, flags);
+ }
+@@ -523,6 +534,9 @@
+       if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
+           (tp->hw_status->status & SD_STATUS_UPDATED))
+               tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
++      else
++              tw32(HOSTCC_MODE, tp->coalesce_mode |
++                   (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
+ }
+ static void tg3_enable_ints(struct tg3 *tp)
+@@ -534,6 +548,9 @@
+            (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+                      (tp->last_tag << 24));
++      if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
++              tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
++                             (tp->last_tag << 24));
+       tg3_cond_int(tp);
+ }
+@@ -560,7 +577,7 @@
+ /* tg3_restart_ints
+  *  similar to tg3_enable_ints, but it accurately determines whether there
+  *  is new work pending and can return without flushing the PIO write
+- *  which reenables interrupts 
++ *  which reenables interrupts
+  */
+ static void tg3_restart_ints(struct tg3 *tp)
+ {
+@@ -649,7 +666,7 @@
+       frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+                     MI_COM_REG_ADDR_MASK);
+       frame_val |= (MI_COM_CMD_READ | MI_COM_START);
+-      
++
+       tw32_f(MAC_MI_COM, frame_val);
+       loops = PHY_BUSY_LOOPS;
+@@ -685,6 +702,10 @@
+       unsigned int loops;
+       int ret;
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
++          (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
++              return 0;
++
+       if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+               tw32_f(MAC_MI_MODE,
+                    (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+@@ -697,7 +718,7 @@
+                     MI_COM_REG_ADDR_MASK);
+       frame_val |= (val & MI_COM_DATA_MASK);
+       frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
+-      
++
+       tw32_f(MAC_MI_COM, frame_val);
+       loops = PHY_BUSY_LOOPS;
+@@ -724,6 +745,44 @@
+       return ret;
+ }
++static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
++{
++      u32 phy;
++
++      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
++          (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
++              return;
++
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              u32 ephy;
++
++              if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
++                      tg3_writephy(tp, MII_TG3_EPHY_TEST,
++                                   ephy | MII_TG3_EPHY_SHADOW_EN);
++                      if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
++                              if (enable)
++                                      phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
++                              else
++                                      phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
++                              tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
++                      }
++                      tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
++              }
++      } else {
++              phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
++                    MII_TG3_AUXCTL_SHDWSEL_MISC;
++              if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
++                  !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
++                      if (enable)
++                              phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
++                      else
++                              phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
++                      phy |= MII_TG3_AUXCTL_MISC_WREN;
++                      tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
++              }
++      }
++}
++
+ static void tg3_phy_set_wirespeed(struct tg3 *tp)
+ {
+       u32 val;
+@@ -948,6 +1007,8 @@
+       return err;
+ }
++static void tg3_link_report(struct tg3 *);
++
+ /* This will reset the tigon3 PHY if there is no valid
+  * link unless the FORCE argument is non-zero.
+  */
+@@ -956,11 +1017,23 @@
+       u32 phy_status;
+       int err;
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              u32 val;
++
++              val = tr32(GRC_MISC_CFG);
++              tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
++              udelay(40);
++      }
+       err  = tg3_readphy(tp, MII_BMSR, &phy_status);
+       err |= tg3_readphy(tp, MII_BMSR, &phy_status);
+       if (err != 0)
+               return -EBUSY;
++      if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
++              netif_carrier_off(tp->dev);
++              tg3_link_report(tp);
++      }
++
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+@@ -997,6 +1070,17 @@
+               tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
+               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+       }
++      else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
++              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
++              tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
++              if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
++                      tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
++                      tg3_writephy(tp, MII_TG3_TEST1,
++                                   MII_TG3_TEST1_TRIM_EN | 0x4);
++              } else
++                      tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
++              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
++      }
+       /* Set Extended packet length bit (bit 14) on all chips that */
+       /* support jumbo frames */
+       if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+@@ -1022,6 +1106,12 @@
+                                phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+       }
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              /* adjust output voltage */
++              tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
++      }
++
++      tg3_phy_toggle_automdix(tp, 1);
+       tg3_phy_set_wirespeed(tp);
+       return 0;
+ }
+@@ -1030,7 +1120,7 @@
+ {
+       struct tg3 *tp_peer = tp;
+-      if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
++      if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
+               return;
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
+@@ -1038,9 +1128,11 @@
+               struct net_device *dev_peer;
+               dev_peer = pci_get_drvdata(tp->pdev_peer);
++              /* remove_one() may have been run on the peer. */
+               if (!dev_peer)
+-                      BUG();
+-              tp_peer = netdev_priv(dev_peer);
++                      tp_peer = tp;
++              else
++                      tp_peer = netdev_priv(dev_peer);
+       }
+       if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
+@@ -1120,6 +1212,19 @@
+       }
+ }
++static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
++{
++      if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
++              return 1;
++      else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
++              if (speed != SPEED_10)
++                      return 1;
++      } else if (speed == SPEED_10)
++              return 1;
++
++      return 0;
++}
++
+ static int tg3_setup_phy(struct tg3 *, int);
+ #define RESET_KIND_SHUTDOWN   0
+@@ -1131,7 +1236,47 @@
+ static int tg3_nvram_lock(struct tg3 *);
+ static void tg3_nvram_unlock(struct tg3 *);
+-static int tg3_set_power_state(struct tg3 *tp, int state)
++static void tg3_power_down_phy(struct tg3 *tp)
++{
++      if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
++                      u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
++                      u32 serdes_cfg = tr32(MAC_SERDES_CFG);
++
++                      sg_dig_ctrl |=
++                              SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
++                      tw32(SG_DIG_CTRL, sg_dig_ctrl);
++                      tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
++              }
++              return;
++      }
++
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              u32 val;
++
++              tg3_bmcr_reset(tp);
++              val = tr32(GRC_MISC_CFG);
++              tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
++              udelay(40);
++              return;
++      } else {
++              tg3_writephy(tp, MII_TG3_EXT_CTRL,
++                           MII_TG3_EXT_CTRL_FORCE_LED_OFF);
++              tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
++      }
++
++      /* The PHY should not be powered down on some chips because
++       * of bugs.
++       */
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
++          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
++           (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
++              return;
++      tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
++}
++
++static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
+ {
+       u32 misc_host_ctrl;
+       u16 power_control, power_caps;
+@@ -1150,28 +1295,28 @@
+       power_control |= PCI_PM_CTRL_PME_STATUS;
+       power_control &= ~(PCI_PM_CTRL_STATE_MASK);
+       switch (state) {
+-      case 0:
++      case PCI_D0:
+               power_control |= 0;
+               pci_write_config_word(tp->pdev,
+                                     pm + PCI_PM_CTRL,
+                                     power_control);
+               udelay(100);    /* Delay after power state change */
+-              /* Switch out of Vaux if it is not a LOM */
+-              if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
++              /* Switch out of Vaux if it is a NIC */
++              if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
+                       tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
+               return 0;
+-      case 1:
++      case PCI_D1:
+               power_control |= 1;
+               break;
+-      case 2:
++      case PCI_D2:
+               power_control |= 2;
+               break;
+-      case 3:
++      case PCI_D3hot:
+               power_control |= 3;
+               break;
+@@ -1202,7 +1347,12 @@
+               tg3_setup_phy(tp, 0);
+       }
+-      if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              u32 val;
++
++              val = tr32(GRC_VCPU_EXT_CTRL);
++              tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
++      } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+               int i;
+               u32 val;
+@@ -1210,12 +1360,19 @@
+                       tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
+                       if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+                               break;
++#if (LINUX_VERSION_CODE < 0x20607)
++                      set_current_state(TASK_UNINTERRUPTIBLE);
++                      schedule_timeout(HZ / 1000);
++#else
+                       msleep(1);
++#endif
+               }
+       }
+-      tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
+-                                           WOL_DRV_STATE_SHUTDOWN |
+-                                           WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
++      if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
++              tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
++                                                   WOL_DRV_STATE_SHUTDOWN |
++                                                   WOL_DRV_WOL |
++                                                   WOL_SET_MAGIC_PKT);
+       pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
+@@ -1226,11 +1383,22 @@
+                       tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
+                       udelay(40);
+-                      mac_mode = MAC_MODE_PORT_MODE_MII;
++                      if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
++                              mac_mode = MAC_MODE_PORT_MODE_GMII;
++                      else
++                              mac_mode = MAC_MODE_PORT_MODE_MII;
+-                      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
+-                          !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
+-                              mac_mode |= MAC_MODE_LINK_POLARITY;
++                      mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
++                      if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
++                          ASIC_REV_5700) {
++                              u32 speed = (tp->tg3_flags &
++                                           TG3_FLAG_WOL_SPEED_100MB) ?
++                                           SPEED_100 : SPEED_10;
++                              if (tg3_5700_link_polarity(tp, speed))
++                                      mac_mode |= MAC_MODE_LINK_POLARITY;
++                              else
++                                      mac_mode &= ~MAC_MODE_LINK_POLARITY;
++                      }
+               } else {
+                       mac_mode = MAC_MODE_PORT_MODE_TBI;
+               }
+@@ -1260,7 +1428,8 @@
+               tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
+                           CLOCK_CTRL_PWRDOWN_PLL133, 40);
+-      } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
++      } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
++                 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
+               /* do nothing */
+       } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
+                    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
+@@ -1304,16 +1473,8 @@
+       }
+       if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
+-          !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+-              /* Turn off the PHY */
+-              if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+-                      tg3_writephy(tp, MII_TG3_EXT_CTRL,
+-                                   MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+-                      tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
+-                      if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+-                              tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
+-              }
+-      }
++          !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
++              tg3_power_down_phy(tp);
+       tg3_frob_aux_power(tp);
+@@ -1334,20 +1495,22 @@
+               }
+       }
++      tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
++
+       /* Finally, set the new power state. */
+       pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
+       udelay(100);    /* Delay after power state change */
+-      tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+-
+       return 0;
+ }
+ static void tg3_link_report(struct tg3 *tp)
+ {
+       if (!netif_carrier_ok(tp->dev)) {
+-              printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
+-      } else {
++              if (netif_msg_link(tp))
++                      printk(KERN_INFO PFX "%s: Link is down.\n",
++                             tp->dev->name);
++      } else if (netif_msg_link(tp)) {
+               printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
+                      tp->dev->name,
+                      (tp->link_config.active_speed == SPEED_1000 ?
+@@ -1426,7 +1589,7 @@
+       if (old_rx_mode != tp->rx_mode) {
+               tw32_f(MAC_RX_MODE, tp->rx_mode);
+       }
+-      
++
+       if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
+               tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
+       else
+@@ -1471,6 +1634,13 @@
+               break;
+       default:
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++                      *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
++                               SPEED_10;
++                      *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
++                                DUPLEX_HALF;
++                      break;
++              }
+               *speed = SPEED_INVALID;
+               *duplex = DUPLEX_INVALID;
+               break;
+@@ -1495,12 +1665,6 @@
+               tg3_writephy(tp, MII_ADVERTISE, new_adv);
+       } else if (tp->link_config.speed == SPEED_INVALID) {
+-              tp->link_config.advertising =
+-                      (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+-                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+-                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
+-                       ADVERTISED_Autoneg | ADVERTISED_MII);
+-
+               if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+                       tp->link_config.advertising &=
+                               ~(ADVERTISED_1000baseT_Half |
+@@ -1644,25 +1808,36 @@
+       return err;
+ }
+-static int tg3_copper_is_advertising_all(struct tg3 *tp)
++static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+ {
+-      u32 adv_reg, all_mask;
++      u32 adv_reg, all_mask = 0;
++
++      if (mask & ADVERTISED_10baseT_Half)
++              all_mask |= ADVERTISE_10HALF;
++      if (mask & ADVERTISED_10baseT_Full)
++              all_mask |= ADVERTISE_10FULL;
++      if (mask & ADVERTISED_100baseT_Half)
++              all_mask |= ADVERTISE_100HALF;
++      if (mask & ADVERTISED_100baseT_Full)
++              all_mask |= ADVERTISE_100FULL;
+       if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
+               return 0;
+-      all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
+-                  ADVERTISE_100HALF | ADVERTISE_100FULL);
+       if ((adv_reg & all_mask) != all_mask)
+               return 0;
+       if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+               u32 tg3_ctrl;
++              all_mask = 0;
++              if (mask & ADVERTISED_1000baseT_Half)
++                      all_mask |= ADVERTISE_1000HALF;
++              if (mask & ADVERTISED_1000baseT_Full)
++                      all_mask |= ADVERTISE_1000FULL;
++
+               if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
+                       return 0;
+-              all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
+-                          MII_TG3_CTRL_ADV_1000_FULL);
+               if ((tg3_ctrl & all_mask) != all_mask)
+                       return 0;
+       }
+@@ -1753,7 +1928,7 @@
+       if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
+               tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
+-      else
++      else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
+               tg3_writephy(tp, MII_TG3_IMASK, ~0);
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+@@ -1822,7 +1997,8 @@
+                               /* Force autoneg restart if we are exiting
+                                * low power mode.
+                                */
+-                              if (!tg3_copper_is_advertising_all(tp))
++                              if (!tg3_copper_is_advertising_all(tp,
++                                              tp->link_config.advertising))
+                                       current_link_up = 0;
+                       } else {
+                               current_link_up = 0;
+@@ -1890,15 +2066,12 @@
+       if (tp->link_config.active_duplex == DUPLEX_HALF)
+               tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+-      tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+-              if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
+-                  (current_link_up == 1 &&
+-                   tp->link_config.active_speed == SPEED_10))
+-                      tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+-      } else {
+-              if (current_link_up == 1)
++              if (current_link_up == 1 &&
++                  tg3_5700_link_polarity(tp, tp->link_config.active_speed))
+                       tp->mac_mode |= MAC_MODE_LINK_POLARITY;
++              else
++                      tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+       }
+       /* ??? Without this setting Netgear GA302T PHY does not
+@@ -2410,24 +2583,27 @@
+       expected_sg_dig_ctrl |= (1 << 12);
+       if (sg_dig_ctrl != expected_sg_dig_ctrl) {
++              if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
++                  tp->serdes_counter &&
++                  ((mac_status & (MAC_STATUS_PCS_SYNCED |
++                                  MAC_STATUS_RCVD_CFG)) ==
++                   MAC_STATUS_PCS_SYNCED)) {
++                      tp->serdes_counter--;
++                      current_link_up = 1;
++                      goto out;
++              }
++restart_autoneg:
+               if (workaround)
+                       tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
+               tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
+               udelay(5);
+               tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
+-              tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
++              tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
++              tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+       } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
+                                MAC_STATUS_SIGNAL_DET)) {
+-              int i;
+-
+-              /* Giver time to negotiate (~200ms) */
+-              for (i = 0; i < 40000; i++) {
+-                      sg_dig_status = tr32(SG_DIG_STATUS);
+-                      if (sg_dig_status & (0x3))
+-                              break;
+-                      udelay(5);
+-              }
++              sg_dig_status = tr32(SG_DIG_STATUS);
+               mac_status = tr32(MAC_STATUS);
+               if ((sg_dig_status & (1 << 1)) &&
+@@ -2443,10 +2619,11 @@
+                       tg3_setup_flow_control(tp, local_adv, remote_adv);
+                       current_link_up = 1;
+-                      tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
++                      tp->serdes_counter = 0;
++                      tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+               } else if (!(sg_dig_status & (1 << 1))) {
+-                      if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
+-                              tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
++                      if (tp->serdes_counter)
++                              tp->serdes_counter--;
+                       else {
+                               if (workaround) {
+                                       u32 val = serdes_cfg;
+@@ -2470,9 +2647,17 @@
+                                   !(mac_status & MAC_STATUS_RCVD_CFG)) {
+                                       tg3_setup_flow_control(tp, 0, 0);
+                                       current_link_up = 1;
+-                              }
++                                      tp->tg3_flags2 |=
++                                              TG3_FLG2_PARALLEL_DETECT;
++                                      tp->serdes_counter =
++                                              SERDES_PARALLEL_DET_TIMEOUT;
++                              } else
++                                      goto restart_autoneg;
+                       }
+               }
++      } else {
++              tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
++              tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+       }
+ out:
+@@ -2483,15 +2668,13 @@
+ {
+       int current_link_up = 0;
+-      if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
+-              tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
++      if (!(mac_status & MAC_STATUS_PCS_SYNCED))
+               goto out;
+-      }
+       if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+               u32 flags;
+               int i;
+-  
++
+               if (fiber_autoneg(tp, &flags)) {
+                       u32 local_adv, remote_adv;
+@@ -2504,7 +2687,6 @@
+                       tg3_setup_flow_control(tp, local_adv, remote_adv);
+-                      tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
+                       current_link_up = 1;
+               }
+               for (i = 0; i < 30; i++) {
+@@ -2527,10 +2709,12 @@
+       } else {
+               /* Forcing 1000FD link up. */
+               current_link_up = 1;
+-              tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
+               tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
+               udelay(40);
++
++              tw32_f(MAC_MODE, tp->mac_mode);
++              udelay(40);
+       }
+ out:
+@@ -2590,10 +2774,6 @@
+       else
+               current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
+-      tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+-      tw32_f(MAC_MODE, tp->mac_mode);
+-      udelay(40);
+-
+       tp->hw_status->status =
+               (SD_STATUS_UPDATED |
+                (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
+@@ -2603,14 +2783,16 @@
+                                   MAC_STATUS_CFG_CHANGED));
+               udelay(5);
+               if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
+-                                       MAC_STATUS_CFG_CHANGED)) == 0)
++                                       MAC_STATUS_CFG_CHANGED |
++                                       MAC_STATUS_LNKSTATE_CHANGED)) == 0)
+                       break;
+       }
+       mac_status = tr32(MAC_STATUS);
+       if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
+               current_link_up = 0;
+-              if (tp->link_config.autoneg == AUTONEG_ENABLE) {
++              if (tp->link_config.autoneg == AUTONEG_ENABLE &&
++                  tp->serdes_counter == 0) {
+                       tw32_f(MAC_MODE, (tp->mac_mode |
+                                         MAC_MODE_SEND_CONFIGS));
+                       udelay(1);
+@@ -2680,6 +2862,12 @@
+       err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+       err |= tg3_readphy(tp, MII_BMSR, &bmsr);
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
++              if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
++                      bmsr |= BMSR_LSTATUS;
++              else
++                      bmsr &= ~BMSR_LSTATUS;
++      }
+       err |= tg3_readphy(tp, MII_BMCR, &bmcr);
+@@ -2709,7 +2897,7 @@
+                       tg3_writephy(tp, MII_BMCR, bmcr);
+                       tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+-                      tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
++                      tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
+                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+                       return err;
+@@ -2748,6 +2936,13 @@
+                       bmcr = new_bmcr;
+                       err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+                       err |= tg3_readphy(tp, MII_BMSR, &bmsr);
++                      if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
++                          ASIC_REV_5714) {
++                              if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
++                                      bmsr |= BMSR_LSTATUS;
++                              else
++                                      bmsr &= ~BMSR_LSTATUS;
++                      }
+                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+               }
+       }
+@@ -2807,9 +3002,9 @@
+ static void tg3_serdes_parallel_detect(struct tg3 *tp)
+ {
+-      if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
++      if (tp->serdes_counter) {
+               /* Give autoneg time to complete. */
+-              tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
++              tp->serdes_counter--;
+               return;
+       }
+       if (!netif_carrier_ok(tp->dev) &&
+@@ -2896,9 +3091,47 @@
+               }
+       }
++      if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
++              u32 val = tr32(PCIE_PWR_MGMT_THRESH);
++              if (!netif_carrier_ok(tp->dev))
++                      val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
++                            tp->pwrmgmt_thresh;
++              else
++                      val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
++              tw32(PCIE_PWR_MGMT_THRESH, val);
++      }
++
+       return err;
+ }
++/* This is called whenever we suspect that the system chipset is re-
++ * ordering the sequence of MMIO to the tx send mailbox. The symptom
++ * is bogus tx completions. We try to recover by setting the
++ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
++ * in the workqueue.
++ */
++static void tg3_tx_recover(struct tg3 *tp)
++{
++      BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
++             tp->write32_tx_mbox == tg3_write_indirect_mbox);
++
++      printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
++             "mapped I/O cycles to the network device, attempting to "
++             "recover. Please report the problem to the driver maintainer "
++             "and include system chipset information.\n", tp->dev->name);
++
++      spin_lock(&tp->lock);
++      tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
++      spin_unlock(&tp->lock);
++}
++
++static inline u32 tg3_tx_avail(struct tg3 *tp)
++{
++      smp_mb();
++      return (tp->tx_pending -
++              ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
++}
++
+ /* Tigon3 never reports partial packet sends.  So we do not
+  * need special logic to handle SKBs that have not had all
+  * of their frags sent yet, like SunGEM does.
+@@ -2911,10 +3144,12 @@
+       while (sw_idx != hw_idx) {
+               struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
+               struct sk_buff *skb = ri->skb;
+-              int i;
++              int i, tx_bug = 0;
+-              if (unlikely(skb == NULL))
+-                      BUG();
++              if (unlikely(skb == NULL)) {
++                      tg3_tx_recover(tp);
++                      return;
++              }
+               pci_unmap_single(tp->pdev,
+                                pci_unmap_addr(ri, mapping),
+@@ -2926,12 +3161,9 @@
+               sw_idx = NEXT_TX(sw_idx);
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+-                      if (unlikely(sw_idx == hw_idx))
+-                              BUG();
+-
+                       ri = &tp->tx_buffers[sw_idx];
+-                      if (unlikely(ri->skb != NULL))
+-                              BUG();
++                      if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
++                              tx_bug = 1;
+                       pci_unmap_page(tp->pdev,
+                                      pci_unmap_addr(ri, mapping),
+@@ -2942,16 +3174,29 @@
+               }
+               dev_kfree_skb(skb);
++
++              if (unlikely(tx_bug)) {
++                      tg3_tx_recover(tp);
++                      return;
++              }
+       }
+       tp->tx_cons = sw_idx;
+-      if (unlikely(netif_queue_stopped(tp->dev))) {
+-              spin_lock(&tp->tx_lock);
++      /* Need to make the tx_cons update visible to tg3_start_xmit()
++       * before checking for netif_queue_stopped().  Without the
++       * memory barrier, there is a small possibility that tg3_start_xmit()
++       * will miss it and cause the queue to be stopped forever.
++       */
++      smp_mb();
++
++      if (unlikely(netif_queue_stopped(tp->dev) &&
++                   (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
++              netif_tx_lock(tp->dev);
+               if (netif_queue_stopped(tp->dev) &&
+-                  (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
++                  (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
+                       netif_wake_queue(tp->dev);
+-              spin_unlock(&tp->tx_lock);
++              netif_tx_unlock(tp->dev);
+       }
+ }
+@@ -3005,11 +3250,10 @@
+        * Callers depend upon this behavior and assume that
+        * we leave everything unchanged if we fail.
+        */
+-      skb = dev_alloc_skb(skb_size);
++      skb = netdev_alloc_skb(tp->dev, skb_size);
+       if (skb == NULL)
+               return -ENOMEM;
+-      skb->dev = tp->dev;
+       skb_reserve(skb, tp->rx_offset);
+       mapping = pci_map_single(tp->pdev, skb->data,
+@@ -3102,7 +3346,7 @@
+  */
+ static int tg3_rx(struct tg3 *tp, int budget)
+ {
+-      u32 work_mask;
++      u32 work_mask, rx_std_posted = 0;
+       u32 sw_idx = tp->rx_rcb_ptr;
+       u16 hw_idx;
+       int received;
+@@ -3129,6 +3373,7 @@
+                                                 mapping);
+                       skb = tp->rx_std_buffers[desc_idx].skb;
+                       post_ptr = &tp->rx_std_ptr;
++                      rx_std_posted++;
+               } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+                       dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
+                                                 mapping);
+@@ -3154,7 +3399,7 @@
+               len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
+-              if (len > RX_COPY_THRESHOLD 
++              if (len > RX_COPY_THRESHOLD
+                       && tp->rx_offset == 2
+                       /* rx_offset != 2 iff this is a 5701 card running
+                        * in PCI-X mode [see tg3_get_invariants()] */
+@@ -3177,15 +3422,18 @@
+                       tg3_recycle_rx(tp, opaque_key,
+                                      desc_idx, *post_ptr);
+-                      copy_skb = dev_alloc_skb(len + 2);
++                      copy_skb = netdev_alloc_skb(tp->dev, len + 2);
+                       if (copy_skb == NULL)
+                               goto drop_it_no_recycle;
+-                      copy_skb->dev = tp->dev;
+                       skb_reserve(copy_skb, 2);
+                       skb_put(copy_skb, len);
+                       pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
++#if (LINUX_VERSION_CODE >= 0x20616)
++                      skb_copy_from_linear_data(skb, copy_skb->data, len);
++#else
+                       memcpy(copy_skb->data, skb->data, len);
++#endif
+                       pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+                       /* We'll reuse the original ring buffer. */
+@@ -3216,9 +3464,18 @@
+ next_pkt:
+               (*post_ptr)++;
++
++              if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
++                      u32 idx = *post_ptr % TG3_RX_RING_SIZE;
++
++                      tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
++                                   TG3_64BIT_REG_LOW, idx);
++                      work_mask &= ~RXD_OPAQUE_RING_STD;
++                      rx_std_posted = 0;
++              }
+ next_pkt_nopost:
+               sw_idx++;
+-              sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
++              sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
+               /* Refresh hw_idx to see if there is new work */
+               if (sw_idx == hw_idx) {
+@@ -3231,6 +3488,9 @@
+       tp->rx_rcb_ptr = sw_idx;
+       tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
++      /* Some platforms need to sync memory here */
++      wmb();
++
+       /* Refill RX ring(s). */
+       if (work_mask & RXD_OPAQUE_RING_STD) {
+               sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
+@@ -3269,6 +3529,11 @@
+       /* run TX completion thread */
+       if (sblk->idx[0].tx_consumer != tp->tx_cons) {
+               tg3_tx(tp);
++              if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
++                      netif_rx_complete(netdev);
++                      schedule_work(&tp->reset_task);
++                      return 0;
++              }
+       }
+       /* run RX thread, within the bounds set by NAPI.
+@@ -3311,7 +3576,11 @@
+       tp->irq_sync = 1;
+       smp_mb();
++#if (LINUX_VERSION_CODE >= 0x2051c)
+       synchronize_irq(tp->pdev->irq);
++#else
++      synchronize_irq();
++#endif
+ }
+ static inline int tg3_irq_sync(struct tg3 *tp)
+@@ -3326,23 +3595,46 @@
+  */
+ static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
+ {
++      spin_lock_bh(&tp->lock);
+       if (irq_sync)
+               tg3_irq_quiesce(tp);
+-      spin_lock_bh(&tp->lock);
+-      spin_lock(&tp->tx_lock);
+ }
+ static inline void tg3_full_unlock(struct tg3 *tp)
+ {
+-      spin_unlock(&tp->tx_lock);
+       spin_unlock_bh(&tp->lock);
+ }
++/* One-shot MSI handler - Chip automatically disables interrupt
++ * after sending MSI so driver doesn't have to do it.
++ */
++#if (LINUX_VERSION_CODE < 0x20613)
++static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
++#else
++static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
++#endif
++{
++      struct net_device *dev = dev_id;
++      struct tg3 *tp = netdev_priv(dev);
++
++      prefetch(tp->hw_status);
++      prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
++
++      if (likely(!tg3_irq_sync(tp)))
++              netif_rx_schedule(dev);         /* schedule NAPI poll */
++
++      return IRQ_HANDLED;
++}
++
+ /* MSI ISR - No need to check for interrupt sharing and no need to
+  * flush status block and interrupt mailbox. PCI ordering rules
+  * guarantee that MSI will arrive after the status block.
+  */
++#if (LINUX_VERSION_CODE < 0x20613)
+ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
++#else
++static irqreturn_t tg3_msi(int irq, void *dev_id)
++#endif
+ {
+       struct net_device *dev = dev_id;
+       struct tg3 *tp = netdev_priv(dev);
+@@ -3363,7 +3655,11 @@
+       return IRQ_RETVAL(1);
+ }
++#if (LINUX_VERSION_CODE < 0x20613)
+ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++#else
++static irqreturn_t tg3_interrupt(int irq, void *dev_id)
++#endif
+ {
+       struct net_device *dev = dev_id;
+       struct tg3 *tp = netdev_priv(dev);
+@@ -3375,38 +3671,48 @@
+        * Reading the PCI State register will confirm whether the
+        * interrupt is ours and will flush the status block.
+        */
+-      if ((sblk->status & SD_STATUS_UPDATED) ||
+-          !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+-              /*
+-               * Writing any value to intr-mbox-0 clears PCI INTA# and
+-               * chip-internal interrupt pending events.
+-               * Writing non-zero to intr-mbox-0 additional tells the
+-               * NIC to stop sending us irqs, engaging "in-intr-handler"
+-               * event coalescing.
+-               */
+-              tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+-                           0x00000001);
+-              if (tg3_irq_sync(tp))
++      if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
++              if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
++                  (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
++                      handled = 0;
+                       goto out;
+-              sblk->status &= ~SD_STATUS_UPDATED;
+-              if (likely(tg3_has_work(tp))) {
+-                      prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+-                      netif_rx_schedule(dev);         /* schedule NAPI poll */
+-              } else {
+-                      /* No work, shared interrupt perhaps?  re-enable
+-                       * interrupts, and flush that PCI write
+-                       */
+-                      tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+-                              0x00000000);
+               }
+-      } else {        /* shared interrupt */
+-              handled = 0;
++      }
++
++      /*
++       * Writing any value to intr-mbox-0 clears PCI INTA# and
++       * chip-internal interrupt pending events.
++       * Writing non-zero to intr-mbox-0 additional tells the
++       * NIC to stop sending us irqs, engaging "in-intr-handler"
++       * event coalescing.
++       *
++       * Flush the mailbox to de-assert the IRQ immediately to prevent
++       * spurious interrupts.  The flush impacts performance but
++       * excessive spurious interrupts can be worse in some cases.
++       */
++      tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
++      if (tg3_irq_sync(tp))
++              goto out;
++      sblk->status &= ~SD_STATUS_UPDATED;
++      if (likely(tg3_has_work(tp))) {
++              prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
++              netif_rx_schedule(dev);         /* schedule NAPI poll */
++      } else {
++              /* No work, shared interrupt perhaps?  re-enable
++               * interrupts, and flush that PCI write
++               */
++              tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
++                             0x00000000);
+       }
+ out:
+       return IRQ_RETVAL(handled);
+ }
++#if (LINUX_VERSION_CODE < 0x20613)
+ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
++#else
++static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
++#endif
+ {
+       struct net_device *dev = dev_id;
+       struct tg3 *tp = netdev_priv(dev);
+@@ -3418,75 +3724,126 @@
+        * Reading the PCI State register will confirm whether the
+        * interrupt is ours and will flush the status block.
+        */
+-      if ((sblk->status_tag != tp->last_tag) ||
+-          !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+-              /*
+-               * writing any value to intr-mbox-0 clears PCI INTA# and
+-               * chip-internal interrupt pending events.
+-               * writing non-zero to intr-mbox-0 additional tells the
+-               * NIC to stop sending us irqs, engaging "in-intr-handler"
+-               * event coalescing.
+-               */
+-              tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+-                           0x00000001);
+-              if (tg3_irq_sync(tp))
++      if (unlikely(sblk->status_tag == tp->last_tag)) {
++              if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
++                  (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
++                      handled = 0;
+                       goto out;
+-              if (netif_rx_schedule_prep(dev)) {
+-                      prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+-                      /* Update last_tag to mark that this status has been
+-                       * seen. Because interrupt may be shared, we may be
+-                       * racing with tg3_poll(), so only update last_tag
+-                       * if tg3_poll() is not scheduled.
+-                       */
+-                      tp->last_tag = sblk->status_tag;
+-                      __netif_rx_schedule(dev);
+               }
+-      } else {        /* shared interrupt */
+-              handled = 0;
+       }
+-out:
+-      return IRQ_RETVAL(handled);
+-}
+-/* ISR for interrupt test */
+-static irqreturn_t tg3_test_isr(int irq, void *dev_id,
+-              struct pt_regs *regs)
+-{
+-      struct net_device *dev = dev_id;
+-      struct tg3 *tp = netdev_priv(dev);
+-      struct tg3_hw_status *sblk = tp->hw_status;
+-
+-      if ((sblk->status & SD_STATUS_UPDATED) ||
+-          !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+-              tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+-                           0x00000001);
++      /*
++       * writing any value to intr-mbox-0 clears PCI INTA# and
++       * chip-internal interrupt pending events.
++       * writing non-zero to intr-mbox-0 additional tells the
++       * NIC to stop sending us irqs, engaging "in-intr-handler"
++       * event coalescing.
++       *
++       * Flush the mailbox to de-assert the IRQ immediately to prevent
++       * spurious interrupts.  The flush impacts performance but
++       * excessive spurious interrupts can be worse in some cases.
++       */
++      tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
++      if (tg3_irq_sync(tp))
++              goto out;
++      if (netif_rx_schedule_prep(dev)) {
++              prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
++              /* Update last_tag to mark that this status has been
++               * seen. Because interrupt may be shared, we may be
++               * racing with tg3_poll(), so only update last_tag
++               * if tg3_poll() is not scheduled.
++               */
++              tp->last_tag = sblk->status_tag;
++              __netif_rx_schedule(dev);
++      }
++out:
++      return IRQ_RETVAL(handled);
++}
++
++/* ISR for interrupt test */
++#if (LINUX_VERSION_CODE < 0x020613)
++static irqreturn_t tg3_test_isr(int irq, void *dev_id, struct pt_regs *regs)
++#else
++static irqreturn_t tg3_test_isr(int irq, void *dev_id)
++#endif
++{
++      struct net_device *dev = dev_id;
++      struct tg3 *tp = netdev_priv(dev);
++      struct tg3_hw_status *sblk = tp->hw_status;
++
++      if ((sblk->status & SD_STATUS_UPDATED) ||
++          !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
++              tg3_disable_ints(tp);
+               return IRQ_RETVAL(1);
+       }
+       return IRQ_RETVAL(0);
+ }
+-static int tg3_init_hw(struct tg3 *);
++static int tg3_init_hw(struct tg3 *, int);
+ static int tg3_halt(struct tg3 *, int, int);
+-#ifdef CONFIG_NET_POLL_CONTROLLER
++/* Restart hardware after configuration changes, self-test, etc.
++ * Invoked with tp->lock held.
++ */
++static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
++{
++      int err;
++
++      err = tg3_init_hw(tp, reset_phy);
++      if (err) {
++              printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
++                     "aborting.\n", tp->dev->name);
++              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++              tg3_full_unlock(tp);
++              del_timer_sync(&tp->timer);
++              tp->irq_sync = 0;
++              netif_poll_enable(tp->dev);
++              dev_close(tp->dev);
++              tg3_full_lock(tp, 0);
++      }
++      return err;
++}
++
++#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+ static void tg3_poll_controller(struct net_device *dev)
+ {
+       struct tg3 *tp = netdev_priv(dev);
++#if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x20600)
++      if (netdump_mode) {
++              tg3_interrupt(tp->pdev->irq, dev, NULL);
++              if (dev->poll_list.prev) {
++                      int budget = 64;
++
++                      tg3_poll(dev, &budget);
++              }
++      }
++      else
++#endif
++#if (LINUX_VERSION_CODE < 0x020613)
+       tg3_interrupt(tp->pdev->irq, dev, NULL);
++#else
++      tg3_interrupt(tp->pdev->irq, dev);
++#endif
+ }
+ #endif
++#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
++static void tg3_reset_task(struct work_struct *work)
++#else
+ static void tg3_reset_task(void *_data)
++#endif
+ {
++#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
++      struct tg3 *tp = container_of(work, struct tg3, reset_task);
++#else
+       struct tg3 *tp = _data;
++#endif
+       unsigned int restart_timer;
+       tg3_full_lock(tp, 0);
+-      tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
+       if (!netif_running(tp->dev)) {
+-              tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
+               tg3_full_unlock(tp);
+               return;
+       }
+@@ -3500,25 +3857,43 @@
+       restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
+       tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
++      if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
++              tp->write32_tx_mbox = tg3_write32_tx_mbox;
++              tp->write32_rx_mbox = tg3_write_flush_reg32;
++              tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
++              tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
++      }
++
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+-      tg3_init_hw(tp);
++      if (tg3_init_hw(tp, 1))
++              goto out;
+       tg3_netif_start(tp);
+       if (restart_timer)
+               mod_timer(&tp->timer, jiffies + 1);
+-      tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
+-
++out:
+       tg3_full_unlock(tp);
+ }
++static void tg3_dump_short_state(struct tg3 *tp)
++{
++      printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
++             tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
++      printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
++             tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
++}
++
+ static void tg3_tx_timeout(struct net_device *dev)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-      printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
+-             dev->name);
++      if (netif_msg_tx_err(tp)) {
++              printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
++                     dev->name);
++              tg3_dump_short_state(tp);
++      }
+       schedule_work(&tp->reset_task);
+ }
+@@ -3537,7 +3912,7 @@
+                                         int len)
+ {
+ #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+-      if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
++      if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
+               return (((u64) mapping + len) > DMA_40BIT_MASK);
+       return 0;
+ #else
+@@ -3628,24 +4003,416 @@
+       txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
+ }
++/* hard_start_xmit for devices that don't have any bugs and
++ * support TG3_FLG2_HW_TSO_2 only.
++ */
+ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+       dma_addr_t mapping;
+       u32 len, entry, base_flags, mss;
+-      int would_hit_hwbug;
+       len = skb_headlen(skb);
+-      /* No BH disabling for tx_lock here.  We are running in BH disabled
+-       * context and TX reclaim runs via tp->poll inside of a software
++      /* We are running in BH disabled context with netif_tx_lock
++       * and TX reclaim runs via tp->poll inside of a software
+        * interrupt.  Furthermore, IRQ processing runs lockless so we have
+        * no IRQ context deadlocks to worry about either.  Rejoice!
+        */
+-      if (!spin_trylock(&tp->tx_lock))
+-              return NETDEV_TX_LOCKED; 
++      if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
++              if (!netif_queue_stopped(dev)) {
++                      netif_stop_queue(dev);
+-      if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
++                      /* This is a hard error, log it. */
++                      printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
++                             "queue awake!\n", dev->name);
++              }
++              return NETDEV_TX_BUSY;
++      }
++
++      entry = tp->tx_prod;
++      base_flags = 0;
++#if TG3_TSO_SUPPORT != 0
++      mss = 0;
++      if ((mss = skb_shinfo(skb)->gso_size) != 0) {
++              int tcp_opt_len, ip_tcp_len;
++
++              if (skb_header_cloned(skb) &&
++                  pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
++                      dev_kfree_skb(skb);
++                      goto out_unlock;
++              }
++
++#ifndef BCM_NO_TSO6
++              if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
++                      mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
++              else
++#endif
++              {
++                      struct iphdr *iph = ip_hdr(skb);
++
++                      tcp_opt_len = tcp_optlen(skb);
++                      ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
++
++                      iph->check = 0;
++                      iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
++                      mss |= (ip_tcp_len + tcp_opt_len) << 9;
++              }
++
++              base_flags |= (TXD_FLAG_CPU_PRE_DMA |
++                             TXD_FLAG_CPU_POST_DMA);
++
++              tcp_hdr(skb)->check = 0;
++
++      }
++      else if (skb->ip_summed == CHECKSUM_PARTIAL)
++              base_flags |= TXD_FLAG_TCPUDP_CSUM;
++#else
++      mss = 0;
++      if (skb->ip_summed == CHECKSUM_PARTIAL)
++              base_flags |= TXD_FLAG_TCPUDP_CSUM;
++#endif
++#if TG3_VLAN_TAG_USED
++      if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
++              base_flags |= (TXD_FLAG_VLAN |
++                             (vlan_tx_tag_get(skb) << 16));
++#endif
++
++      /* Queue skb data, a.k.a. the main skb fragment. */
++      mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
++
++      tp->tx_buffers[entry].skb = skb;
++      pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
++
++      tg3_set_txd(tp, entry, mapping, len, base_flags,
++                  (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
++
++      entry = NEXT_TX(entry);
++
++      /* Now loop through additional data fragments, and queue them. */
++      if (skb_shinfo(skb)->nr_frags > 0) {
++              unsigned int i, last;
++
++              last = skb_shinfo(skb)->nr_frags - 1;
++              for (i = 0; i <= last; i++) {
++                      skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++
++                      len = frag->size;
++                      mapping = pci_map_page(tp->pdev,
++                                             frag->page,
++                                             frag->page_offset,
++                                             len, PCI_DMA_TODEVICE);
++
++                      tp->tx_buffers[entry].skb = NULL;
++                      pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
++
++                      tg3_set_txd(tp, entry, mapping, len,
++                                  base_flags, (i == last) | (mss << 1));
++
++                      entry = NEXT_TX(entry);
++              }
++      }
++
++      /* Some platforms need to sync memory here */
++      wmb();
++
++      /* Packets are ready, update Tx producer idx local and on card. */
++      tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
++
++      tp->tx_prod = entry;
++      if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
++              netif_stop_queue(dev);
++              if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
++                      netif_wake_queue(tp->dev);
++      }
++
++#if TG3_TSO_SUPPORT != 0
++out_unlock:
++#endif
++      mmiowb();
++
++      dev->trans_start = jiffies;
++
++      return NETDEV_TX_OK;
++}
++
++#if TG3_TSO_SUPPORT != 0
++#ifndef NETIF_F_GSO
++
++struct sk_buff *skb_segment(struct sk_buff *skb, int features)
++{
++      struct sk_buff *segs = NULL;
++      struct sk_buff *tail = NULL;
++      unsigned int mss = skb_shinfo(skb)->gso_size;
++      unsigned int doffset = skb->data - skb->mac.raw;
++      unsigned int offset = doffset;
++      unsigned int headroom;
++      unsigned int len;
++      int nfrags = skb_shinfo(skb)->nr_frags;
++      int err = -ENOMEM;
++      int i = 0;
++      int pos;
++
++      __skb_push(skb, doffset);
++      headroom = skb_headroom(skb);
++      pos = skb_headlen(skb);
++
++      do {
++              struct sk_buff *nskb;
++              skb_frag_t *frag;
++              int hsize;
++              int k;
++              int size;
++
++              len = skb->len - offset;
++              if (len > mss)
++                      len = mss;
++
++              hsize = skb_headlen(skb) - offset;
++              if (hsize < 0)
++                      hsize = 0;
++              if (hsize > len)
++                      hsize = len;
++
++              nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
++              if (unlikely(!nskb))
++                      goto err;
++
++              if (segs)
++                      tail->next = nskb;
++              else
++                      segs = nskb;
++              tail = nskb;
++
++              nskb->dev = skb->dev;
++              nskb->priority = skb->priority;
++              nskb->protocol = skb->protocol;
++              nskb->dst = dst_clone(skb->dst);
++              memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
++              nskb->pkt_type = skb->pkt_type;
++              nskb->mac_len = skb->mac_len;
++
++              skb_reserve(nskb, headroom);
++              nskb->mac.raw = nskb->data;
++              nskb->nh.raw = nskb->data + skb->mac_len;
++              nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
++              memcpy(skb_put(nskb, doffset), skb->data, doffset);
++
++              frag = skb_shinfo(nskb)->frags;
++              k = 0;
++
++              nskb->ip_summed = CHECKSUM_PARTIAL;
++              nskb->csum = skb->csum;
++              memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
++
++              while (pos < offset + len) {
++                      BUG_ON(i >= nfrags);
++
++                      *frag = skb_shinfo(skb)->frags[i];
++                      get_page(frag->page);
++                      size = frag->size;
++
++                      if (pos < offset) {
++                              frag->page_offset += offset - pos;
++                              frag->size -= offset - pos;
++                      }
++
++                      k++;
++
++                      if (pos + size <= offset + len) {
++                              i++;
++                              pos += size;
++                      } else {
++                              frag->size -= pos + size - (offset + len);
++                              break;
++                      }
++
++                      frag++;
++              }
++
++              skb_shinfo(nskb)->nr_frags = k;
++              nskb->data_len = len - hsize;
++              nskb->len += nskb->data_len;
++              nskb->truesize += nskb->data_len;
++      } while ((offset += len) < skb->len);
++
++      return segs;
++
++err:
++      while ((skb = segs)) {
++              segs = skb->next;
++              kfree(skb);
++      }
++      return ERR_PTR(err);
++}
++
++static struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
++{
++      struct sk_buff *segs = ERR_PTR(-EINVAL);
++      struct tcphdr *th;
++      unsigned thlen;
++      unsigned int seq;
++      u32 delta;
++      unsigned int oldlen;
++      unsigned int len;
++
++      if (!pskb_may_pull(skb, sizeof(*th)))
++              goto out;
++
++      th = skb->h.th;
++      thlen = th->doff * 4;
++      if (thlen < sizeof(*th))
++              goto out;
++
++      if (!pskb_may_pull(skb, thlen))
++              goto out;
++
++      oldlen = (u16)~skb->len;
++      __skb_pull(skb, thlen);
++
++      segs = skb_segment(skb, features);
++      if (IS_ERR(segs))
++              goto out;
++
++      len = skb_shinfo(skb)->gso_size;
++      delta = htonl(oldlen + (thlen + len));
++
++      skb = segs;
++      th = skb->h.th;
++      seq = ntohl(th->seq);
++
++      do {
++              th->fin = th->psh = 0;
++
++              th->check = ~csum_fold((u32)((u32)th->check +
++                                     (u32)delta));
++              seq += len;
++              skb = skb->next;
++              th = skb->h.th;
++
++              th->seq = htonl(seq);
++              th->cwr = 0;
++      } while (skb->next);
++
++      delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
++      th->check = ~csum_fold((u32)((u32)th->check +
++                              (u32)delta));
++out:
++      return segs;
++}
++
++static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
++{
++      struct sk_buff *segs = ERR_PTR(-EINVAL);
++      struct iphdr *iph;
++      int ihl;
++      int id;
++
++      if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
++              goto out;
++
++      iph = skb->nh.iph;
++      ihl = iph->ihl * 4;
++      if (ihl < sizeof(*iph))
++              goto out;
++
++      if (unlikely(!pskb_may_pull(skb, ihl)))
++              goto out;
++
++      skb->h.raw = __skb_pull(skb, ihl);
++      iph = skb->nh.iph;
++      id = ntohs(iph->id);
++      segs = ERR_PTR(-EPROTONOSUPPORT);
++
++      segs = tcp_tso_segment(skb, features);
++
++      if (!segs || unlikely(IS_ERR(segs)))
++              goto out;
++
++      skb = segs;
++      do {
++              iph = skb->nh.iph;
++              iph->id = htons(id++);
++              iph->tot_len = htons(skb->len - skb->mac_len);
++              iph->check = 0;
++              iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
++      } while ((skb = skb->next));
++
++out:
++      return segs;
++}
++
++static struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
++{
++      struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
++
++      skb->mac.raw = skb->data;
++      skb->mac_len = skb->nh.raw - skb->data;
++      __skb_pull(skb, skb->mac_len);
++
++      segs = inet_gso_segment(skb, features);
++
++      __skb_push(skb, skb->data - skb->mac.raw);
++      return segs;
++}
++
++#endif
++
++static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
++
++/* Use GSO to workaround a rare TSO bug that may be triggered when the
++ * TSO header is greater than 80 bytes.
++ */
++static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
++{
++      struct sk_buff *segs, *nskb;
++
++      /* Estimate the number of fragments in the worst case */
++      if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
++              netif_stop_queue(tp->dev);
++              if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
++                      return NETDEV_TX_BUSY;
++
++              netif_wake_queue(tp->dev);
++      }
++
++      segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
++      if (unlikely(IS_ERR(segs)))
++              goto tg3_tso_bug_end;
++
++      do {
++              nskb = segs;
++              segs = segs->next;
++              nskb->next = NULL;
++              tg3_start_xmit_dma_bug(nskb, tp->dev);
++      } while (segs);
++
++tg3_tso_bug_end:
++      dev_kfree_skb(skb);
++
++      return NETDEV_TX_OK;
++}
++
++#endif
++
++/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
++ * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
++ */
++static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
++{
++      struct tg3 *tp = netdev_priv(dev);
++      dma_addr_t mapping;
++      u32 len, entry, base_flags, mss;
++      int would_hit_hwbug;
++
++      len = skb_headlen(skb);
++
++      /* We are running in BH disabled context with netif_tx_lock
++       * and TX reclaim runs via tp->poll inside of a software
++       * interrupt.  Furthermore, IRQ processing runs lockless so we have
++       * no IRQ context deadlocks to worry about either.  Rejoice!
++       */
++      if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
+@@ -3653,19 +4420,19 @@
+                       printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
+                              "queue awake!\n", dev->name);
+               }
+-              spin_unlock(&tp->tx_lock);
+               return NETDEV_TX_BUSY;
+       }
+       entry = tp->tx_prod;
+       base_flags = 0;
+-      if (skb->ip_summed == CHECKSUM_HW)
++      if (skb->ip_summed == CHECKSUM_PARTIAL)
+               base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ #if TG3_TSO_SUPPORT != 0
+       mss = 0;
+-      if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
+-          (mss = skb_shinfo(skb)->tso_size) != 0) {
+-              int tcp_opt_len, ip_tcp_len;
++      if (((mss = skb_shinfo(skb)->gso_size) != 0) &&
++          (skb_shinfo(skb)->gso_segs > 1)) {
++              struct iphdr *iph;
++              int tcp_opt_len, ip_tcp_len, hdr_len;
+               if (skb_header_cloned(skb) &&
+                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+@@ -3673,40 +4440,42 @@
+                       goto out_unlock;
+               }
+-              tcp_opt_len = ((skb->h.th->doff - 5) * 4);
+-              ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
++              tcp_opt_len = tcp_optlen(skb);
++              ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
++
++              hdr_len = ip_tcp_len + tcp_opt_len;
++              if (unlikely((ETH_HLEN + hdr_len) > 80) &&
++                           (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
++                      return (tg3_tso_bug(tp, skb));
+               base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+                              TXD_FLAG_CPU_POST_DMA);
+-              skb->nh.iph->check = 0;
+-              skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
++              iph = ip_hdr(skb);
++              iph->check = 0;
++              iph->tot_len = htons(mss + hdr_len);
+               if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
+-                      skb->h.th->check = 0;
++                      tcp_hdr(skb)->check = 0;
+                       base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
+-              }
+-              else {
+-                      skb->h.th->check =
+-                              ~csum_tcpudp_magic(skb->nh.iph->saddr,
+-                                                 skb->nh.iph->daddr,
+-                                                 0, IPPROTO_TCP, 0);
+-              }
++              } else
++                      tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
++                                                               iph->daddr, 0,
++                                                               IPPROTO_TCP,
++                                                               0);
+               if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
+                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
+-                      if (tcp_opt_len || skb->nh.iph->ihl > 5) {
++                      if (tcp_opt_len || iph->ihl > 5) {
+                               int tsflags;
+-                              tsflags = ((skb->nh.iph->ihl - 5) +
+-                                         (tcp_opt_len >> 2));
++                              tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+                               mss |= (tsflags << 11);
+                       }
+               } else {
+-                      if (tcp_opt_len || skb->nh.iph->ihl > 5) {
++                      if (tcp_opt_len || iph->ihl > 5) {
+                               int tsflags;
+-                              tsflags = ((skb->nh.iph->ihl - 5) +
+-                                         (tcp_opt_len >> 2));
++                              tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+                               base_flags |= tsflags << 12;
+                       }
+               }
+@@ -3787,19 +4556,21 @@
+               entry = start;
+       }
++      /* Some platforms need to sync memory here */
++      wmb();
++
+       /* Packets are ready, update Tx producer idx local and on card. */
+       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+       tp->tx_prod = entry;
+-      if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
++      if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
+               netif_stop_queue(dev);
+-              if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
++              if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
+                       netif_wake_queue(tp->dev);
+       }
+ out_unlock:
+       mmiowb();
+-      spin_unlock(&tp->tx_lock);
+       dev->trans_start = jiffies;
+@@ -3814,7 +4585,9 @@
+       if (new_mtu > ETH_DATA_LEN) {
+               if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+                       tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
++#if TG3_TSO_SUPPORT != 0
+                       ethtool_op_set_tso(dev, 0);
++#endif
+               }
+               else
+                       tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
+@@ -3828,6 +4601,7 @@
+ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+ {
+       struct tg3 *tp = netdev_priv(dev);
++      int err;
+       if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
+               return -EINVAL;
+@@ -3848,13 +4622,14 @@
+       tg3_set_mtu(dev, tp, new_mtu);
+-      tg3_init_hw(tp);
++      err = tg3_restart_hw(tp, 0);
+-      tg3_netif_start(tp);
++      if (!err)
++              tg3_netif_start(tp);
+       tg3_full_unlock(tp);
+-      return 0;
++      return err;
+ }
+ /* Free up pending packets in all rx/tx rings.
+@@ -3936,7 +4711,7 @@
+  * end up in the driver.  tp->{tx,}lock are held and thus
+  * we may not sleep.
+  */
+-static void tg3_init_rings(struct tg3 *tp)
++static int tg3_init_rings(struct tg3 *tp)
+ {
+       u32 i;
+@@ -3985,18 +4760,38 @@
+       /* Now allocate fresh SKBs for each rx ring. */
+       for (i = 0; i < tp->rx_pending; i++) {
+-              if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
+-                                   -1, i) < 0)
++              if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
++                      printk(KERN_WARNING PFX
++                             "%s: Using a smaller RX standard ring, "
++                             "only %d out of %d buffers were allocated "
++                             "successfully.\n",
++                             tp->dev->name, i, tp->rx_pending);
++                      if (i == 0)
++                              return -ENOMEM;
++                      tp->rx_pending = i;
+                       break;
++              }
+       }
+       if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
+               for (i = 0; i < tp->rx_jumbo_pending; i++) {
+                       if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
+-                                           -1, i) < 0)
++                                           -1, i) < 0) {
++                              printk(KERN_WARNING PFX
++                                     "%s: Using a smaller RX jumbo ring, "
++                                     "only %d out of %d buffers were "
++                                     "allocated successfully.\n",
++                                     tp->dev->name, i, tp->rx_jumbo_pending);
++                              if (i == 0) {
++                                      tg3_free_rings(tp);
++                                      return -ENOMEM;
++                              }
++                              tp->rx_jumbo_pending = i;
+                               break;
++                      }
+               }
+       }
++      return 0;
+ }
+ /*
+@@ -4278,9 +5073,8 @@
+ /* tp->lock is held. */
+ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
+ {
+-      if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+-              tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+-                            NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
++      tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
++                    NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+       if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
+               switch (kind) {
+@@ -4352,6 +5146,104 @@
+       }
+ }
++static int tg3_poll_fw(struct tg3 *tp)
++{
++      int i;
++      u32 val;
++
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              /* Wait up to 20ms for init done. */
++              for (i = 0; i < 200; i++) {
++                      if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
++                              return 0;
++                      udelay(100);
++              }
++              return -ENODEV;
++      }
++
++      /* Wait for firmware initialization to complete. */
++      for (i = 0; i < 100000; i++) {
++              tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
++              if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
++                      break;
++              udelay(10);
++      }
++
++      /* Chip might not be fitted with firmware.  Some Sun onboard
++       * parts are configured like that.  So don't signal the timeout
++       * of the above loop as an error, but do report the lack of
++       * running firmware once.
++       */
++      if (i >= 100000 &&
++          !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
++              tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
++
++              printk(KERN_INFO PFX "%s: No firmware running.\n",
++                     tp->dev->name);
++      }
++
++      return 0;
++}
++
++/* Save PCI command register before chip reset */
++static void tg3_save_pci_state(struct tg3 *tp)
++{
++      u32 val;
++
++      pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
++      tp->pci_cmd = val;
++}
++
++/* Restore PCI state after chip reset */
++static void tg3_restore_pci_state(struct tg3 *tp)
++{
++      u32 val;
++
++      /* Re-enable indirect register accesses. */
++      pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
++                             tp->misc_host_ctrl);
++
++      /* Set MAX PCI retry to zero. */
++      val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
++      if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
++          (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
++              val |= PCISTATE_RETRY_SAME_DMA;
++      pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
++
++      pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
++
++      /* Make sure PCI-X relaxed ordering bit is clear. */
++      if (tp->pcix_cap) {
++              u16 pcix_cmd;
++
++              pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
++                                   &pcix_cmd);
++              pcix_cmd &= ~PCI_X_CMD_ERO;
++              pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
++                                    pcix_cmd);
++      }
++
++      if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
++              u32 val;
++
++              /* Chip reset on 5780 will reset MSI enable bit,
++               * so need to restore it.
++               */
++              if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
++                      u16 ctrl;
++
++                      pci_read_config_word(tp->pdev,
++                                           tp->msi_cap + PCI_MSI_FLAGS,
++                                           &ctrl);
++                      pci_write_config_word(tp->pdev,
++                                            tp->msi_cap + PCI_MSI_FLAGS,
++                                            ctrl | PCI_MSI_FLAGS_ENABLE);
++                      val = tr32(MSGINT_MODE);
++                      tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
++              }
++      }
++}
++
+ static void tg3_stop_fw(struct tg3 *);
+ /* tp->lock is held. */
+@@ -4359,15 +5251,25 @@
+ {
+       u32 val;
+       void (*write_op)(struct tg3 *, u32, u32);
+-      int i;
++      int err;
+-      if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
+-              tg3_nvram_lock(tp);
+-              /* No matching tg3_nvram_unlock() after this because
+-               * chip reset below will undo the nvram lock.
+-               */
+-              tp->nvram_lock_cnt = 0;
+-      }
++      tg3_nvram_lock(tp);
++
++      /* No matching tg3_nvram_unlock() after this because
++       * chip reset below will undo the nvram lock.
++       */
++      tp->nvram_lock_cnt = 0;
++
++      /* GRC_MISC_CFG core clock reset will clear the memory
++       * enable bit in PCI register 4 and the MSI enable bit
++       * on some chips, so we save relevant registers here.
++       */
++      tg3_save_pci_state(tp);
++
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
++              tw32(GRC_FASTBOOT_PC, 0);
+       /*
+        * We must avoid the readl() that normally takes place.
+@@ -4379,6 +5281,25 @@
+       if (write_op == tg3_write_flush_reg32)
+               tp->write32 = tg3_write32;
++      /* Prevent the irq handler from reading or writing PCI registers
++       * during chip reset when the memory enable bit in the PCI command
++       * register may be cleared.  The chip does not generate interrupt
++       * at this time, but the irq handler may still be called due to irq
++       * sharing or irqpoll.
++       */
++      tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
++      if (tp->hw_status) {
++              tp->hw_status->status = 0;
++              tp->hw_status->status_tag = 0;
++      }
++      tp->last_tag = 0;
++      smp_mb();
++#if (LINUX_VERSION_CODE >= 0x2051c)
++      synchronize_irq(tp->pdev->irq);
++#else
++      synchronize_irq();
++#endif
++
+       /* do the reset */
+       val = GRC_MISC_CFG_CORECLK_RESET;
+@@ -4392,6 +5313,12 @@
+               }
+       }
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
++              tw32(GRC_VCPU_EXT_CTRL,
++                   tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
++      }
++
+       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+               val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+       tw32(GRC_MISC_CFG, val);
+@@ -4441,48 +5368,14 @@
+               pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
+       }
+-      /* Re-enable indirect register accesses. */
+-      pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+-                             tp->misc_host_ctrl);
+-
+-      /* Set MAX PCI retry to zero. */
+-      val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+-      if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+-          (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
+-              val |= PCISTATE_RETRY_SAME_DMA;
+-      pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+-
+-      pci_restore_state(tp->pdev);
+-
+-      /* Make sure PCI-X relaxed ordering bit is clear. */
+-      pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
+-      val &= ~PCIX_CAPS_RELAXED_ORDERING;
+-      pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
+-
+-      if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+-              u32 val;
++      tg3_restore_pci_state(tp);
+-              /* Chip reset on 5780 will reset MSI enable bit,
+-               * so need to restore it.
+-               */
+-              if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+-                      u16 ctrl;
+-
+-                      pci_read_config_word(tp->pdev,
+-                                           tp->msi_cap + PCI_MSI_FLAGS,
+-                                           &ctrl);
+-                      pci_write_config_word(tp->pdev,
+-                                            tp->msi_cap + PCI_MSI_FLAGS,
+-                                            ctrl | PCI_MSI_FLAGS_ENABLE);
+-                      val = tr32(MSGINT_MODE);
+-                      tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+-              }
++      tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
++      val = 0;
++      if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+               val = tr32(MEMARB_MODE);
+-              tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+-
+-      } else
+-              tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
++      tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+       if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+               tg3_stop_fw(tp);
+@@ -4515,21 +5408,9 @@
+               tw32_f(MAC_MODE, 0);
+       udelay(40);
+-      if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
+-              /* Wait for firmware initialization to complete. */
+-              for (i = 0; i < 100000; i++) {
+-                      tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+-                      if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+-                              break;
+-                      udelay(10);
+-              }
+-              if (i >= 100000) {
+-                      printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
+-                             "firmware will not restart magic=%08x\n",
+-                             tp->dev->name, val);
+-                      return -ENODEV;
+-              }
+-      }
++      err = tg3_poll_fw(tp);
++      if (err)
++              return err;
+       if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+           tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+@@ -4613,7 +5494,7 @@
+ #define TG3_FW_BSS_ADDR               0x08000a70
+ #define TG3_FW_BSS_LEN                0x10
+-static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
++static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
+       0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
+       0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
+       0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
+@@ -4707,7 +5588,7 @@
+       0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
+ };
+-static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
++static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
+       0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
+       0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
+@@ -4732,10 +5613,15 @@
+ {
+       int i;
+-      if (offset == TX_CPU_BASE &&
+-          (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+-              BUG();
++      BUG_ON(offset == TX_CPU_BASE &&
++          (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              u32 val = tr32(GRC_VCPU_EXT_CTRL);
++
++              tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
++              return 0;
++      }
+       if (offset == RX_CPU_BASE) {
+               for (i = 0; i < 10000; i++) {
+                       tw32(offset + CPU_STATE, 0xffffffff);
+@@ -4773,13 +5659,13 @@
+ struct fw_info {
+       unsigned int text_base;
+       unsigned int text_len;
+-      u32 *text_data;
++      const u32 *text_data;
+       unsigned int rodata_base;
+       unsigned int rodata_len;
+-      u32 *rodata_data;
++      const u32 *rodata_data;
+       unsigned int data_base;
+       unsigned int data_len;
+-      u32 *data_data;
++      const u32 *data_data;
+ };
+ /* tp->lock is held. */
+@@ -4911,7 +5797,7 @@
+ #define TG3_TSO_FW_BSS_ADDR           0x08001b80
+ #define TG3_TSO_FW_BSS_LEN            0x894
+-static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
++static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
+       0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
+       0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
+       0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
+@@ -5198,7 +6084,7 @@
+       0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
+ };
+-static u32 tg3TsoFwRodata[] = {
++static const u32 tg3TsoFwRodata[] = {
+       0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
+       0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
+       0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
+@@ -5206,7 +6092,7 @@
+       0x00000000,
+ };
+-static u32 tg3TsoFwData[] = {
++static const u32 tg3TsoFwData[] = {
+       0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000,
+@@ -5228,7 +6114,7 @@
+ #define TG3_TSO5_FW_BSS_ADDR          0x00010f50
+ #define TG3_TSO5_FW_BSS_LEN           0x88
+-static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
++static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
+       0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
+       0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
+       0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
+@@ -5387,14 +6273,14 @@
+       0x00000000, 0x00000000, 0x00000000,
+ };
+-static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
++static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
+       0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
+       0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
+       0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
+       0x00000000, 0x00000000, 0x00000000,
+ };
+-static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
++static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
+       0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000,
+ };
+@@ -5474,7 +6360,7 @@
+ #endif /* TG3_TSO_SUPPORT != 0 */
+ /* tp->lock is held. */
+-static void __tg3_set_mac_addr(struct tg3 *tp)
++static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
+ {
+       u32 addr_high, addr_low;
+       int i;
+@@ -5486,6 +6372,8 @@
+                   (tp->dev->dev_addr[4] <<  8) |
+                   (tp->dev->dev_addr[5] <<  0));
+       for (i = 0; i < 4; i++) {
++              if (i == 1 && skip_mac_1)
++                      continue;
+               tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
+               tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+       }
+@@ -5512,17 +6400,34 @@
+ {
+       struct tg3 *tp = netdev_priv(dev);
+       struct sockaddr *addr = p;
++      int err = 0, skip_mac_1 = 0;
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
++      if (!netif_running(dev))
++              return 0;
++
++      if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
++              u32 addr0_high, addr0_low, addr1_high, addr1_low;
++
++              addr0_high = tr32(MAC_ADDR_0_HIGH);
++              addr0_low = tr32(MAC_ADDR_0_LOW);
++              addr1_high = tr32(MAC_ADDR_1_HIGH);
++              addr1_low = tr32(MAC_ADDR_1_LOW);
++
++              /* Skip MAC addr 1 if ASF is using it. */
++              if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
++                  !(addr1_high == 0 && addr1_low == 0))
++                      skip_mac_1 = 1;
++      }
+       spin_lock_bh(&tp->lock);
+-      __tg3_set_mac_addr(tp);
++      __tg3_set_mac_addr(tp, skip_mac_1);
+       spin_unlock_bh(&tp->lock);
+-      return 0;
++      return err;
+ }
+ /* tp->lock is held. */
+@@ -5570,7 +6475,7 @@
+ }
+ /* tp->lock is held. */
+-static int tg3_reset_hw(struct tg3 *tp)
++static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ {
+       u32 val, rdmac_mode;
+       int i, err, limit;
+@@ -5585,6 +6490,9 @@
+               tg3_abort_hw(tp, 1);
+       }
++      if (reset_phy)
++              tg3_phy_reset(tp);
++
+       err = tg3_chip_reset(tp);
+       if (err)
+               return err;
+@@ -5619,7 +6527,9 @@
+        * can only do this after the hardware has been
+        * successfully reset.
+        */
+-      tg3_init_rings(tp);
++      err = tg3_init_rings(tp);
++      if (err)
++              return err;
+       /* This value is determined during the probe time DMA
+        * engine test, tg3_test_dma.
+@@ -5631,10 +6541,14 @@
+                         GRC_MODE_NO_TX_PHDR_CSUM |
+                         GRC_MODE_NO_RX_PHDR_CSUM);
+       tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
+-      if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
+-              tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
+-      if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
+-              tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
++
++      /* Pseudo-header checksum is done by hardware logic and not
++       * the offload processers, so make the chip do the pseudo-
++       * header checksums on receive.  For transmit it is more
++       * convenient to do the pseudo-header checksum in software
++       * as Linux does that on transmit for us in all cases.
++       */
++      tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
+       tw32(GRC_MODE,
+            tp->grc_mode |
+@@ -5708,7 +6622,20 @@
+       }
+       /* Setup replenish threshold. */
+-      tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
++      val = tp->rx_pending / 8;
++      if (val == 0)
++              val = 1;
++      else if (val > tp->rx_std_max_post)
++              val = tp->rx_std_max_post;
++      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
++                      tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
++
++              if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
++                      val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
++      }
++
++      tw32(RCVBDI_STD_THRESH, val);
+       /* Initialize TG3_BDINFO's at:
+        *  RCVDBDI_STD_BD:     standard eth size rx ring
+@@ -5817,7 +6744,7 @@
+                    tp->rx_jumbo_ptr);
+       /* Initialize MAC address and backoff seed. */
+-      __tg3_set_mac_addr(tp);
++      __tg3_set_mac_addr(tp, 0);
+       /* MTU + ethernet header + FCS + optional VLAN tag */
+       tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
+@@ -5842,16 +6769,13 @@
+                     RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
+                     RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
+                     RDMAC_MODE_LNGREAD_ENAB);
+-      if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
+-              rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
+       /* If statement applies to 5705 and 5750 PCI devices only */
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+            tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
+           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
+               if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
+-                  (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
+-                   tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
++                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+                       rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
+               } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+                          !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
+@@ -5868,8 +6792,12 @@
+ #endif
+       /* Receive/send statistics. */
+-      if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
+-          (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
++      if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
++              val = tr32(RCVLPC_STATS_ENABLE);
++              val &= ~RCVLPC_STATSENAB_DACK_FIX;
++              tw32(RCVLPC_STATS_ENABLE, val);
++      } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
++                 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
+               val = tr32(RCVLPC_STATS_ENABLE);
+               val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
+               tw32(RCVLPC_STATS_ENABLE, val);
+@@ -5936,30 +6864,40 @@
+       tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
+               MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
++      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
++          !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
++          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
++              tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+       tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
+       udelay(40);
+       /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
+-       * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
++       * If TG3_FLG2_IS_NIC is zero, we should read the
+        * register to preserve the GPIO settings for LOMs. The GPIOs,
+        * whether used as inputs or outputs, are set by boot code after
+        * reset.
+        */
+-      if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
++      if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
+               u32 gpio_mask;
+-              gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
+-                          GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
++              gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
++                          GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
++                          GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+                       gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
+                                    GRC_LCLCTRL_GPIO_OUTPUT3;
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
++                      gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
++
++              tp->grc_local_ctrl &= ~gpio_mask;
+               tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
+               /* GPIO1 must be driven high for eeprom write protect */
+-              tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+-                                     GRC_LCLCTRL_GPIO_OUTPUT1);
++              if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
++                      tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
++                                             GRC_LCLCTRL_GPIO_OUTPUT1);
+       }
+       tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+       udelay(100);
+@@ -5993,22 +6931,28 @@
+               }
+       }
++      /* Enable host coalescing bug fix */
++      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
++          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
++              val |= (1 << 29);
++
+       tw32_f(WDMAC_MODE, val);
+       udelay(40);
+-      if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+-              val = tr32(TG3PCI_X_CAPS);
++      if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
++              u16 pcix_cmd;
++
++              pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
++                                   &pcix_cmd);
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+-                      val &= ~PCIX_CAPS_BURST_MASK;
+-                      val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
++                      pcix_cmd &= ~PCI_X_CMD_MAX_READ;
++                      pcix_cmd |= PCI_X_CMD_READ_2K;
+               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+-                      val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
+-                      val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
+-                      if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
+-                              val |= (tp->split_mode_max_reqs <<
+-                                      PCIX_CAPS_SPLIT_SHIFT);
++                      pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
++                      pcix_cmd |= PCI_X_CMD_READ_2K;
+               }
+-              tw32(TG3PCI_X_CAPS, val);
++              pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
++                                    pcix_cmd);
+       }
+       tw32_f(RDMAC_MODE, rdmac_mode);
+@@ -6048,6 +6992,9 @@
+       udelay(100);
+       tp->rx_mode = RX_MODE_ENABLE;
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
++              tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
++
+       tw32_f(MAC_RX_MODE, tp->rx_mode);
+       udelay(10);
+@@ -6097,16 +7044,29 @@
+               tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
+       }
+-      err = tg3_setup_phy(tp, 1);
++      if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
++          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
++              u32 tmp;
++
++              tmp = tr32(SERDES_RX_CTRL);
++              tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
++              tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
++              tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
++              tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
++      }
++
++      err = tg3_setup_phy(tp, 0);
+       if (err)
+               return err;
+-      if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
++      if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
++          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
+               u32 tmp;
+               /* Clear CRC stats. */
+-              if (!tg3_readphy(tp, 0x1e, &tmp)) {
+-                      tg3_writephy(tp, 0x1e, tmp | 0x8000);
++              if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
++                      tg3_writephy(tp, MII_TG3_TEST1,
++                                   tmp | MII_TG3_TEST1_CRC_EN);
+                       tg3_readphy(tp, 0x14, &tmp);
+               }
+       }
+@@ -6170,12 +7130,12 @@
+ /* Called at device open time to get the chip ready for
+  * packet processing.  Invoked with tp->lock held.
+  */
+-static int tg3_init_hw(struct tg3 *tp)
++static int tg3_init_hw(struct tg3 *tp, int reset_phy)
+ {
+       int err;
+       /* Force the chip into D0. */
+-      err = tg3_set_power_state(tp, 0);
++      err = tg3_set_power_state(tp, PCI_D0);
+       if (err)
+               goto out;
+@@ -6183,7 +7143,7 @@
+       tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+-      err = tg3_reset_hw(tp);
++      err = tg3_reset_hw(tp, reset_phy);
+ out:
+       return err;
+@@ -6231,12 +7191,19 @@
+       TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
+       TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
+       TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
++
++      TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
++      TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
++      TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
+ }
+ static void tg3_timer(unsigned long __opaque)
+ {
+       struct tg3 *tp = (struct tg3 *) __opaque;
++      if (tp->irq_sync)
++              goto restart_timer;
++
+       spin_lock(&tp->lock);
+       if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+@@ -6294,12 +7261,14 @@
+                               need_setup = 1;
+                       }
+                       if (need_setup) {
+-                              tw32_f(MAC_MODE,
+-                                   (tp->mac_mode &
+-                                    ~MAC_MODE_PORT_MODE_MASK));
+-                              udelay(40);
+-                              tw32_f(MAC_MODE, tp->mac_mode);
+-                              udelay(40);
++                              if (!tp->serdes_counter) {
++                                      tw32_f(MAC_MODE,
++                                           (tp->mac_mode &
++                                            ~MAC_MODE_PORT_MODE_MASK));
++                                      udelay(40);
++                                      tw32_f(MAC_MODE, tp->mac_mode);
++                                      udelay(40);
++                              }
+                               tg3_setup_phy(tp, 0);
+                       }
+               } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+@@ -6308,16 +7277,32 @@
+               tp->timer_counter = tp->timer_multiplier;
+       }
+-      /* Heartbeat is only sent once every 2 seconds.  */
++      /* Heartbeat is only sent once every 2 seconds.
++       *
++       * The heartbeat is to tell the ASF firmware that the host
++       * driver is still alive.  In the event that the OS crashes,
++       * ASF needs to reset the hardware to free up the FIFO space
++       * that may be filled with rx packets destined for the host.
++       * If the FIFO is full, ASF will no longer function properly.
++       *
++       * Unintended resets have been reported on real time kernels
++       * where the timer doesn't run on time.  Netpoll will also have
++       * same problem.
++       *
++       * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
++       * to check the ring condition when the heartbeat is expiring
++       * before doing the reset.  This will prevent most unintended
++       * resets.
++       */
+       if (!--tp->asf_counter) {
+               if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+                       u32 val;
+-                      tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
+-                                         FWCMD_NICDRV_ALIVE2);
+-                      tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
++                      tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
++                                    FWCMD_NICDRV_ALIVE3);
++                      tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
+                       /* 5 seconds timeout */
+-                      tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
++                      tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
+                       val = tr32(GRC_RX_CPU_EVENT);
+                       val |= (1 << 14);
+                       tw32(GRC_RX_CPU_EVENT, val);
+@@ -6327,15 +7312,39 @@
+       spin_unlock(&tp->lock);
++restart_timer:
+       tp->timer.expires = jiffies + tp->timer_offset;
+       add_timer(&tp->timer);
+ }
++static int tg3_request_irq(struct tg3 *tp)
++{
++#if (LINUX_VERSION_CODE < 0x020613)
++      irqreturn_t (*fn)(int, void *, struct pt_regs *);
++#else
++      irq_handler_t fn;
++#endif
++      unsigned long flags;
++      struct net_device *dev = tp->dev;
++
++      if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
++              fn = tg3_msi;
++              if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
++                      fn = tg3_msi_1shot;
++              flags = IRQF_SAMPLE_RANDOM;
++      } else {
++              fn = tg3_interrupt;
++              if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
++                      fn = tg3_interrupt_tagged;
++              flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
++      }
++      return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
++}
++
+ static int tg3_test_interrupt(struct tg3 *tp)
+ {
+       struct net_device *dev = tp->dev;
+-      int err, i;
+-      u32 int_mbox = 0;
++      int err, i, intr_ok = 0;
+       if (!netif_running(dev))
+               return -ENODEV;
+@@ -6345,7 +7354,7 @@
+       free_irq(tp->pdev->irq, dev);
+       err = request_irq(tp->pdev->irq, tg3_test_isr,
+-                        SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
++                        IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
+       if (err)
+               return err;
+@@ -6356,32 +7365,36 @@
+              HOSTCC_MODE_NOW);
+       for (i = 0; i < 5; i++) {
++              u32 int_mbox, misc_host_ctrl;
++
+               int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
+                                       TG3_64BIT_REG_LOW);
+-              if (int_mbox != 0)
++              misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
++
++              if ((int_mbox != 0) ||
++                  (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
++                      intr_ok = 1;
+                       break;
++              }
++
++#if (LINUX_VERSION_CODE < 0x20607)
++              set_current_state(TASK_UNINTERRUPTIBLE);
++              schedule_timeout(10);
++#else
+               msleep(10);
++#endif
+       }
+       tg3_disable_ints(tp);
+       free_irq(tp->pdev->irq, dev);
+-      
+-      if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
+-              err = request_irq(tp->pdev->irq, tg3_msi,
+-                                SA_SAMPLE_RANDOM, dev->name, dev);
+-      else {
+-              irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
+-              if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+-                      fn = tg3_interrupt_tagged;
+-              err = request_irq(tp->pdev->irq, fn,
+-                                SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+-      }
++
++      err = tg3_request_irq(tp);
+       if (err)
+               return err;
+-      if (int_mbox != 0)
++      if (intr_ok)
+               return 0;
+       return -EIO;
+@@ -6424,18 +7437,13 @@
+                      tp->dev->name);
+       free_irq(tp->pdev->irq, dev);
++#ifdef CONFIG_PCI_MSI
+       pci_disable_msi(tp->pdev);
++#endif
+       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+-      {
+-              irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
+-              if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+-                      fn = tg3_interrupt_tagged;
+-
+-              err = request_irq(tp->pdev->irq, fn,
+-                                SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+-      }
++      err = tg3_request_irq(tp);
+       if (err)
+               return err;
+@@ -6445,7 +7453,7 @@
+       tg3_full_lock(tp, 1);
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+-      err = tg3_init_hw(tp);
++      err = tg3_init_hw(tp, 1);
+       tg3_full_unlock(tp);
+@@ -6460,8 +7468,16 @@
+       struct tg3 *tp = netdev_priv(dev);
+       int err;
++      netif_carrier_off(tp->dev);
++
+       tg3_full_lock(tp, 0);
++      err = tg3_set_power_state(tp, PCI_D0);
++      if (err) {
++              tg3_full_unlock(tp);
++              return err;
++      }
++
+       tg3_disable_ints(tp);
+       tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+@@ -6474,9 +7490,8 @@
+       if (err)
+               return err;
+-      if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
+-          (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
+-          (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
++#ifdef CONFIG_PCI_MSI
++      if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
+               /* All MSI supporting chips should support tagged
+                * status.  Assert that this is the case.
+                */
+@@ -6486,26 +7501,27 @@
+               } else if (pci_enable_msi(tp->pdev) == 0) {
+                       u32 msi_mode;
++                      /* Hardware bug - MSI won't work if INTX disabled. */
++                      if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
++#if (LINUX_VERSION_CODE < 0x2060e)
++                              tg3_enable_intx(tp->pdev);
++#else
++                              pci_intx(tp->pdev, 1);
++#endif
++
+                       msi_mode = tr32(MSGINT_MODE);
+                       tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
+                       tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
+               }
+       }
+-      if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
+-              err = request_irq(tp->pdev->irq, tg3_msi,
+-                                SA_SAMPLE_RANDOM, dev->name, dev);
+-      else {
+-              irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
+-              if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+-                      fn = tg3_interrupt_tagged;
+-
+-              err = request_irq(tp->pdev->irq, fn,
+-                                SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+-      }
++#endif
++      err = tg3_request_irq(tp);
+       if (err) {
+               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
++#ifdef CONFIG_PCI_MSI
+                       pci_disable_msi(tp->pdev);
++#endif
+                       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+               }
+               tg3_free_consistent(tp);
+@@ -6514,7 +7530,7 @@
+       tg3_full_lock(tp, 0);
+-      err = tg3_init_hw(tp);
++      err = tg3_init_hw(tp, 1);
+       if (err) {
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               tg3_free_rings(tp);
+@@ -6541,7 +7557,9 @@
+       if (err) {
+               free_irq(tp->pdev->irq, dev);
+               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
++#ifdef CONFIG_PCI_MSI
+                       pci_disable_msi(tp->pdev);
++#endif
+                       tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+               }
+               tg3_free_consistent(tp);
+@@ -6555,7 +7573,9 @@
+                       tg3_full_lock(tp, 0);
+                       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
++#ifdef CONFIG_PCI_MSI
+                               pci_disable_msi(tp->pdev);
++#endif
+                               tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+                       }
+                       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+@@ -6566,6 +7586,15 @@
+                       return err;
+               }
++
++              if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
++                      if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
++                              u32 val = tr32(PCIE_TRANSACTION_CFG);
++
++                              tw32(PCIE_TRANSACTION_CFG,
++                                   val | PCIE_TRANS_CFG_1SHOT_MSI);
++                      }
++              }
+       }
+       tg3_full_lock(tp, 0);
+@@ -6816,12 +7845,12 @@
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-      /* Calling flush_scheduled_work() may deadlock because
+-       * linkwatch_event() may be on the workqueue and it will try to get
+-       * the rtnl_lock which we are holding.
+-       */
+-      while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
+-              msleep(1);
++#if (LINUX_VERSION_CODE >= 0x20616)
++      cancel_work_sync(&tp->reset_task);
++#else
++      set_current_state(TASK_UNINTERRUPTIBLE);
++      schedule_timeout(1);
++#endif
+       netif_stop_queue(dev);
+@@ -6836,16 +7865,15 @@
+       tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+       tg3_free_rings(tp);
+-      tp->tg3_flags &=
+-              ~(TG3_FLAG_INIT_COMPLETE |
+-                TG3_FLAG_GOT_SERDES_FLOWCTL);
+-      netif_carrier_off(tp->dev);
++      tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+       tg3_full_unlock(tp);
+       free_irq(tp->pdev->irq, dev);
+       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
++#ifdef CONFIG_PCI_MSI
+               pci_disable_msi(tp->pdev);
++#endif
+               tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+       }
+@@ -6856,6 +7884,10 @@
+       tg3_free_consistent(tp);
++      tg3_set_power_state(tp, PCI_D3hot);
++
++      netif_carrier_off(tp->dev);
++
+       return 0;
+ }
+@@ -6881,8 +7913,9 @@
+               u32 val;
+               spin_lock_bh(&tp->lock);
+-              if (!tg3_readphy(tp, 0x1e, &val)) {
+-                      tg3_writephy(tp, 0x1e, val | 0x8000);
++              if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
++                      tg3_writephy(tp, MII_TG3_TEST1,
++                                   val | MII_TG3_TEST1_CRC_EN);
+                       tg3_readphy(tp, 0x14, &val);
+               } else
+                       val = 0;
+@@ -7002,7 +8035,7 @@
+               get_stat64(&hw_stats->rx_ucast_packets) +
+               get_stat64(&hw_stats->rx_mcast_packets) +
+               get_stat64(&hw_stats->rx_bcast_packets);
+-              
++
+       stats->tx_packets = old_stats->tx_packets +
+               get_stat64(&hw_stats->tx_ucast_packets) +
+               get_stat64(&hw_stats->tx_mcast_packets) +
+@@ -7150,6 +8183,9 @@
+ {
+       struct tg3 *tp = netdev_priv(dev);
++      if (!netif_running(dev))
++              return;
++
+       tg3_full_lock(tp, 0);
+       __tg3_set_rx_mode(dev);
+       tg3_full_unlock(tp);
+@@ -7174,6 +8210,9 @@
+       memset(p, 0, TG3_REGDUMP_LEN);
++      if (tp->link_config.phy_is_low_power)
++              return;
++
+       tg3_full_lock(tp, 0);
+ #define __GET_REG32(reg)      (*(p)++ = tr32(reg))
+@@ -7232,15 +8271,19 @@
+       tg3_full_unlock(tp);
+ }
++#if (LINUX_VERSION_CODE >= 0x20418)
+ static int tg3_get_eeprom_len(struct net_device *dev)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+       return tp->nvram_size;
+ }
++#endif
+ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
++static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
++#ifdef ETHTOOL_GEEPROM
+ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+@@ -7248,6 +8291,9 @@
+       u8  *pd;
+       u32 i, offset, len, val, b_offset, b_count;
++      if (tp->link_config.phy_is_low_power)
++              return -EAGAIN;
++
+       offset = eeprom->offset;
+       len = eeprom->len;
+       eeprom->len = 0;
+@@ -7299,9 +8345,11 @@
+       }
+       return 0;
+ }
++#endif
+-static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
++static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
++#ifdef ETHTOOL_SEEPROM
+ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+@@ -7309,6 +8357,9 @@
+       u32 offset, len, b_offset, odd_len, start, end;
+       u8 *buf;
++      if (tp->link_config.phy_is_low_power)
++              return -EAGAIN;
++
+       if (eeprom->magic != TG3_EEPROM_MAGIC)
+               return -EINVAL;
+@@ -7357,11 +8408,12 @@
+       return ret;
+ }
++#endif
+ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
++
+       cmd->supported = (SUPPORTED_Autoneg);
+       if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+@@ -7373,13 +8425,13 @@
+                                 SUPPORTED_100baseT_Full |
+                                 SUPPORTED_10baseT_Half |
+                                 SUPPORTED_10baseT_Full |
+-                                SUPPORTED_MII);
++                                SUPPORTED_TP);
+               cmd->port = PORT_TP;
+       } else {
+               cmd->supported |= SUPPORTED_FIBRE;
+               cmd->port = PORT_FIBRE;
+       }
+-  
++
+       cmd->advertising = tp->link_config.advertising;
+       if (netif_running(dev)) {
+               cmd->speed = tp->link_config.active_speed;
+@@ -7392,12 +8444,12 @@
+       cmd->maxrxpkt = 0;
+       return 0;
+ }
+-  
++
+ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
+-      if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
++
++      if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
+               /* These are the only valid advertisement bits allowed.  */
+               if (cmd->autoneg == AUTONEG_ENABLE &&
+                   (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
+@@ -7429,68 +8481,75 @@
+               tp->link_config.speed = cmd->speed;
+               tp->link_config.duplex = cmd->duplex;
+       }
+-  
++
++      tp->link_config.orig_speed = tp->link_config.speed;
++      tp->link_config.orig_duplex = tp->link_config.duplex;
++      tp->link_config.orig_autoneg = tp->link_config.autoneg;
++
+       if (netif_running(dev))
+               tg3_setup_phy(tp, 1);
+       tg3_full_unlock(tp);
+-  
++
+       return 0;
+ }
+-  
++
+ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
++
+       strcpy(info->driver, DRV_MODULE_NAME);
+       strcpy(info->version, DRV_MODULE_VERSION);
++      strcpy(info->fw_version, tp->fw_ver);
+       strcpy(info->bus_info, pci_name(tp->pdev));
+ }
+-  
++
+ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
+-      wol->supported = WAKE_MAGIC;
++
++      if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
++              wol->supported = WAKE_MAGIC;
++      else
++              wol->supported = 0;
+       wol->wolopts = 0;
+       if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
+               wol->wolopts = WAKE_MAGIC;
+       memset(&wol->sopass, 0, sizeof(wol->sopass));
+ }
+-  
++
+ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
++
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EINVAL;
+       if ((wol->wolopts & WAKE_MAGIC) &&
+-          tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
+-          !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
++          !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
+               return -EINVAL;
+-  
++
+       spin_lock_bh(&tp->lock);
+       if (wol->wolopts & WAKE_MAGIC)
+               tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
+       else
+               tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
+       spin_unlock_bh(&tp->lock);
+-  
++
+       return 0;
+ }
+-  
++
+ static u32 tg3_get_msglevel(struct net_device *dev)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+       return tp->msg_enable;
+ }
+-  
++
+ static void tg3_set_msglevel(struct net_device *dev, u32 value)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+       tp->msg_enable = value;
+ }
+-  
++
+ #if TG3_TSO_SUPPORT != 0
+ static int tg3_set_tso(struct net_device *dev, u32 value)
+ {
+@@ -7501,16 +8560,23 @@
+                       return -EINVAL;
+               return 0;
+       }
++      if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
++          (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
++              if (value)
++                      dev->features |= NETIF_F_TSO6;
++              else
++                      dev->features &= ~NETIF_F_TSO6;
++      }
+       return ethtool_op_set_tso(dev, value);
+ }
+ #endif
+-  
++
+ static int tg3_nway_reset(struct net_device *dev)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+       u32 bmcr;
+       int r;
+-  
++
+       if (!netif_running(dev))
+               return -EAGAIN;
+@@ -7528,41 +8594,53 @@
+               r = 0;
+       }
+       spin_unlock_bh(&tp->lock);
+-  
++
+       return r;
+ }
+-  
++
+ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
++
+       ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
+       ering->rx_mini_max_pending = 0;
+-      ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
++      if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
++              ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
++      else
++              ering->rx_jumbo_max_pending = 0;
++
++      ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
+       ering->rx_pending = tp->rx_pending;
+       ering->rx_mini_pending = 0;
+-      ering->rx_jumbo_pending = tp->rx_jumbo_pending;
++      if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
++              ering->rx_jumbo_pending = tp->rx_jumbo_pending;
++      else
++              ering->rx_jumbo_pending = 0;
++
+       ering->tx_pending = tp->tx_pending;
+ }
+-  
++
+ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-      int irq_sync = 0;
+-  
++      int irq_sync = 0, err = 0;
++
+       if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
+           (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
+-          (ering->tx_pending > TG3_TX_RING_SIZE - 1))
++          (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
++          (ering->tx_pending <= MAX_SKB_FRAGS) ||
++          ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
++           (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
+               return -EINVAL;
+-  
++
+       if (netif_running(dev)) {
+               tg3_netif_stop(tp);
+               irq_sync = 1;
+       }
+       tg3_full_lock(tp, irq_sync);
+-  
++
+       tp->rx_pending = ering->rx_pending;
+       if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
+@@ -7573,29 +8651,30 @@
+       if (netif_running(dev)) {
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+-              tg3_init_hw(tp);
+-              tg3_netif_start(tp);
++              err = tg3_restart_hw(tp, 1);
++              if (!err)
++                      tg3_netif_start(tp);
+       }
+       tg3_full_unlock(tp);
+-  
+-      return 0;
++
++      return err;
+ }
+-  
++
+ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
++
+       epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
+       epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
+       epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
+ }
+-  
++
+ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-      int irq_sync = 0;
+-  
++      int irq_sync = 0, err = 0;
++
+       if (netif_running(dev)) {
+               tg3_netif_stop(tp);
+               irq_sync = 1;
+@@ -7618,58 +8697,68 @@
+       if (netif_running(dev)) {
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+-              tg3_init_hw(tp);
+-              tg3_netif_start(tp);
++              err = tg3_restart_hw(tp, 1);
++              if (!err)
++                      tg3_netif_start(tp);
+       }
+       tg3_full_unlock(tp);
+-  
+-      return 0;
++
++      return err;
+ }
+-  
++
+ static u32 tg3_get_rx_csum(struct net_device *dev)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+       return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
+ }
+-  
++
+ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
++
+       if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
+               if (data != 0)
+                       return -EINVAL;
+               return 0;
+       }
+-  
++
+       spin_lock_bh(&tp->lock);
+       if (data)
+               tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
+       else
+               tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
+       spin_unlock_bh(&tp->lock);
+-  
++
+       return 0;
+ }
+-  
++
++#if (LINUX_VERSION_CODE >= 0x20418)
+ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
+ {
+       struct tg3 *tp = netdev_priv(dev);
+-  
++
+       if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
+               if (data != 0)
+                       return -EINVAL;
+               return 0;
+       }
+-  
+-      if (data)
+-              dev->features |= NETIF_F_IP_CSUM;
++
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
++#if (LINUX_VERSION_CODE >= 0x20418) && (LINUX_VERSION_CODE < 0x2060c)
++              tg3_set_tx_hw_csum(dev, data);
++#elif (LINUX_VERSION_CODE >= 0x20617)
++              ethtool_op_set_tx_ipv6_csum(dev, data);
++#else
++              ethtool_op_set_tx_hw_csum(dev, data);
++#endif
+       else
+-              dev->features &= ~NETIF_F_IP_CSUM;
++              ethtool_op_set_tx_csum(dev, data);
+       return 0;
+ }
++#endif
+ static int tg3_get_stats_count (struct net_device *dev)
+ {
+@@ -7716,12 +8805,16 @@
+                                          LED_CTRL_TRAFFIC_OVERRIDE |
+                                          LED_CTRL_TRAFFIC_BLINK |
+                                          LED_CTRL_TRAFFIC_LED);
+-      
++
+               else
+                       tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+                                          LED_CTRL_TRAFFIC_OVERRIDE);
+-
++#if (LINUX_VERSION_CODE < 0x20607)
++              set_current_state(TASK_INTERRUPTIBLE);
++              if (schedule_timeout(HZ / 2))
++#else
+               if (msleep_interruptible(500))
++#endif
+                       break;
+       }
+       tw32(MAC_LED_CTRL, tp->led_ctrl);
+@@ -7736,29 +8829,106 @@
+ }
+ #define NVRAM_TEST_SIZE 0x100
++#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
++#define NVRAM_SELFBOOT_HW_SIZE 0x20
++#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
+ static int tg3_test_nvram(struct tg3 *tp)
+ {
+-      u32 *buf, csum;
+-      int i, j, err = 0;
++      u32 *buf, csum, magic;
++      int i, j, err = 0, size;
++
++      if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
++              return -EIO;
++
++      if (magic == TG3_EEPROM_MAGIC)
++              size = NVRAM_TEST_SIZE;
++      else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
++              if ((magic & 0xe00000) == 0x200000)
++                      size = NVRAM_SELFBOOT_FORMAT1_SIZE;
++              else
++                      return 0;
++      } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
++              size = NVRAM_SELFBOOT_HW_SIZE;
++      else
++              return -EIO;
+-      buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
++      buf = kmalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+-      for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
++      err = -EIO;
++      for (i = 0, j = 0; i < size; i += 4, j++) {
+               u32 val;
+               if ((err = tg3_nvram_read(tp, i, &val)) != 0)
+                       break;
+               buf[j] = cpu_to_le32(val);
+       }
+-      if (i < NVRAM_TEST_SIZE)
++      if (i < size)
+               goto out;
+-      err = -EIO;
+-      if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
++      /* Selfboot format */
++      if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
++          TG3_EEPROM_MAGIC_FW) {
++              u8 *buf8 = (u8 *) buf, csum8 = 0;
++
++              for (i = 0; i < size; i++)
++                      csum8 += buf8[i];
++
++              if (csum8 == 0) {
++                      err = 0;
++                      goto out;
++              }
++
++              err = -EIO;
++              goto out;
++      }
++
++      if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
++          TG3_EEPROM_MAGIC_HW) {
++              u8 data[NVRAM_SELFBOOT_DATA_SIZE];
++              u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
++              u8 *buf8 = (u8 *) buf;
++              int j, k;
++
++              /* Separate the parity bits and the data bytes.  */
++              for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
++                      if ((i == 0) || (i == 8)) {
++                              int l;
++                              u8 msk;
++
++                              for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
++                                      parity[k++] = buf8[i] & msk;
++                              i++;
++                      }
++                      else if (i == 16) {
++                              int l;
++                              u8 msk;
++
++                              for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
++                                      parity[k++] = buf8[i] & msk;
++                              i++;
++
++                              for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
++                                      parity[k++] = buf8[i] & msk;
++                              i++;
++                      }
++                      data[j++] = buf8[i];
++              }
++
++              err = -EIO;
++              for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
++                      u8 hw8 = hweight8(data[i]);
++
++                      if ((hw8 & 0x1) && parity[i])
++                              goto out;
++                      else if (!(hw8 & 0x1) && !parity[i])
++                              goto out;
++              }
++              err = 0;
+               goto out;
++      }
+       /* Bootstrap checksum at offset 0x10 */
+       csum = calc_crc((unsigned char *) buf, 0x10);
+@@ -7778,7 +8948,7 @@
+ }
+ #define TG3_SERDES_TIMEOUT_SEC        2
+-#define TG3_COPPER_TIMEOUT_SEC        6
++#define TG3_COPPER_TIMEOUT_SEC        7
+ static int tg3_test_link(struct tg3 *tp)
+ {
+@@ -7796,7 +8966,12 @@
+               if (netif_carrier_ok(tp->dev))
+                       return 0;
++#if (LINUX_VERSION_CODE < 0x20607)
++              set_current_state(TASK_INTERRUPTIBLE);
++              if (schedule_timeout(HZ))
++#else
+               if (msleep_interruptible(1000))
++#endif
+                       break;
+       }
+@@ -7806,7 +8981,7 @@
+ /* Only test the commonly used registers */
+ static int tg3_test_registers(struct tg3 *tp)
+ {
+-      int i, is_5705;
++      int i, is_5705, is_5750;
+       u32 offset, read_mask, write_mask, val, save_val, read_val;
+       static struct {
+               u16 offset;
+@@ -7814,6 +8989,7 @@
+ #define TG3_FL_5705   0x1
+ #define TG3_FL_NOT_5705       0x2
+ #define TG3_FL_NOT_5788       0x4
++#define TG3_FL_NOT_5750       0x8
+               u32 read_mask;
+               u32 write_mask;
+       } reg_tbl[] = {
+@@ -7866,7 +9042,7 @@
+                       0x00000000, 0xffff0002 },
+               { RCVDBDI_STD_BD+0xc, 0x0000,
+                       0x00000000, 0xffffffff },
+-      
++
+               /* Receive BD Initiator Control Registers. */
+               { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+@@ -7874,7 +9050,7 @@
+                       0x00000000, 0x000003ff },
+               { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
+                       0x00000000, 0xffffffff },
+-      
++
+               /* Host Coalescing Control Registers. */
+               { HOSTCC_MODE, TG3_FL_NOT_5705,
+                       0x00000000, 0x00000004 },
+@@ -7924,9 +9100,9 @@
+                       0xffffffff, 0x00000000 },
+               /* Buffer Manager Control Registers. */
+-              { BUFMGR_MB_POOL_ADDR, 0x0000,
++              { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
+                       0x00000000, 0x007fff80 },
+-              { BUFMGR_MB_POOL_SIZE, 0x0000,
++              { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
+                       0x00000000, 0x007fffff },
+               { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
+                       0x00000000, 0x0000003f },
+@@ -7938,7 +9114,7 @@
+                       0xffffffff, 0x00000000 },
+               { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
+                       0xffffffff, 0x00000000 },
+-      
++
+               /* Mailbox Registers */
+               { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
+                       0x00000000, 0x000001ff },
+@@ -7952,10 +9128,12 @@
+               { 0xffff, 0x0000, 0x00000000, 0x00000000 },
+       };
+-      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
++      is_5705 = is_5750 = 0;
++      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+               is_5705 = 1;
+-      else
+-              is_5705 = 0;
++              if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
++                      is_5750 = 1;
++      }
+       for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+               if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
+@@ -7968,6 +9146,9 @@
+                   (reg_tbl[i].flags & TG3_FL_NOT_5788))
+                       continue;
++              if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
++                      continue;
++
+               offset = (u32) reg_tbl[i].offset;
+               read_mask = reg_tbl[i].read_mask;
+               write_mask = reg_tbl[i].write_mask;
+@@ -8011,14 +9192,16 @@
+       return 0;
+ out:
+-      printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
++      if (netif_msg_hw(tp))
++              printk(KERN_ERR PFX "Register test failed at offset %x\n",
++                     offset);
+       tw32(offset, save_val);
+       return -EIO;
+ }
+ static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
+ {
+-      static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
++      static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
+       int i;
+       u32 j;
+@@ -8052,14 +9235,34 @@
+               { 0x00008000, 0x02000},
+               { 0x00010000, 0x0e000},
+               { 0xffffffff, 0x00000}
++      }, mem_tbl_5755[] = {
++              { 0x00000200, 0x00008},
++              { 0x00004000, 0x00800},
++              { 0x00006000, 0x00800},
++              { 0x00008000, 0x02000},
++              { 0x00010000, 0x0c000},
++              { 0xffffffff, 0x00000}
++      }, mem_tbl_5906[] = {
++              { 0x00000200, 0x00008},
++              { 0x00004000, 0x00400},
++              { 0x00006000, 0x00400},
++              { 0x00008000, 0x01000},
++              { 0x00010000, 0x01000},
++              { 0xffffffff, 0x00000}
+       };
+       struct mem_entry *mem_tbl;
+       int err = 0;
+       int i;
+-      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+-              mem_tbl = mem_tbl_5705;
+-      else
++      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
++                      mem_tbl = mem_tbl_5755;
++              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
++                      mem_tbl = mem_tbl_5906;
++              else
++                      mem_tbl = mem_tbl_5705;
++      } else
+               mem_tbl = mem_tbl_570x;
+       for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+@@ -8067,7 +9270,7 @@
+                   mem_tbl[i].len)) != 0)
+                       break;
+       }
+-      
++
+       return err;
+ }
+@@ -8093,23 +9296,59 @@
+                       return 0;
+               mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
+-                         MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
+-                         MAC_MODE_PORT_MODE_GMII;
++                         MAC_MODE_PORT_INT_LPBACK;
++              if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
++                      mac_mode |= MAC_MODE_LINK_POLARITY;
++              if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
++                      mac_mode |= MAC_MODE_PORT_MODE_MII;
++              else
++                      mac_mode |= MAC_MODE_PORT_MODE_GMII;
+               tw32(MAC_MODE, mac_mode);
+       } else if (loopback_mode == TG3_PHY_LOOPBACK) {
+-              tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
+-                                         BMCR_SPEED1000);
++              u32 val;
++
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++                      u32 phytest;
++
++                      if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
++                              u32 phy;
++
++                              tg3_writephy(tp, MII_TG3_EPHY_TEST,
++                                           phytest | MII_TG3_EPHY_SHADOW_EN);
++                              if (!tg3_readphy(tp, 0x1b, &phy))
++                                      tg3_writephy(tp, 0x1b, phy & ~0x20);
++                              tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
++                      }
++                      val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
++              } else
++                      val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
++
++              tg3_phy_toggle_automdix(tp, 0);
++
++              tg3_writephy(tp, MII_BMCR, val);
+               udelay(40);
++
++              mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++                      tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
++                      mac_mode |= MAC_MODE_PORT_MODE_MII;
++              } else
++                      mac_mode |= MAC_MODE_PORT_MODE_GMII;
++
+               /* reset to prevent losing 1st rx packet intermittently */
+               if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+                       tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+                       udelay(10);
+                       tw32_f(MAC_RX_MODE, tp->rx_mode);
+               }
+-              mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
+-                         MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
+-              if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
+-                      mac_mode &= ~MAC_MODE_LINK_POLARITY;
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
++                      if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
++                              mac_mode &= ~MAC_MODE_LINK_POLARITY;
++                      else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
++                              mac_mode |= MAC_MODE_LINK_POLARITY;
++                      tg3_writephy(tp, MII_TG3_EXT_CTRL,
++                                   MII_TG3_EXT_CTRL_LNK3_LED_MODE);
++              }
+               tw32(MAC_MODE, mac_mode);
+       }
+       else
+@@ -8118,7 +9357,10 @@
+       err = -EIO;
+       tx_len = 1514;
+-      skb = dev_alloc_skb(tx_len);
++      skb = netdev_alloc_skb(tp->dev, tx_len);
++      if (!skb)
++              return -ENOMEM;
++
+       tx_data = skb_put(skb, tx_len);
+       memcpy(tx_data, tp->dev->dev_addr, 6);
+       memset(tx_data + 6, 0x0, 8);
+@@ -8144,13 +9386,17 @@
+       tp->tx_prod++;
+       num_pkts++;
++      /* Some platforms need to sync memory here */
++      wmb();
++
+       tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
+                    tp->tx_prod);
+       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
+       udelay(10);
+-      for (i = 0; i < 10; i++) {
++      /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
++      for (i = 0; i < 25; i++) {
+               tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+                      HOSTCC_MODE_NOW);
+@@ -8196,7 +9442,7 @@
+                       goto out;
+       }
+       err = 0;
+-      
++
+       /* tg3_free_rings will unmap and free the rx_skb */
+ out:
+       return err;
+@@ -8214,7 +9460,9 @@
+       if (!netif_running(tp->dev))
+               return TG3_LOOPBACK_FAILED;
+-      tg3_reset_hw(tp);
++      err = tg3_reset_hw(tp, 1);
++      if (err)
++              return TG3_LOOPBACK_FAILED;
+       if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
+               err |= TG3_MAC_LOOPBACK_FAILED;
+@@ -8231,6 +9479,9 @@
+ {
+       struct tg3 *tp = netdev_priv(dev);
++      if (tp->link_config.phy_is_low_power)
++              tg3_set_power_state(tp, PCI_D0);
++
+       memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
+       if (tg3_test_nvram(tp) != 0) {
+@@ -8259,6 +9510,9 @@
+               if (!err)
+                       tg3_nvram_unlock(tp);
++              if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
++                      tg3_phy_reset(tp);
++
+               if (tg3_test_registers(tp) != 0) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       data[2] = 1;
+@@ -8282,17 +9536,24 @@
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               if (netif_running(dev)) {
+                       tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+-                      tg3_init_hw(tp);
+-                      tg3_netif_start(tp);
++                      if (!tg3_restart_hw(tp, 1))
++                              tg3_netif_start(tp);
+               }
+               tg3_full_unlock(tp);
+       }
++      if (tp->link_config.phy_is_low_power)
++              tg3_set_power_state(tp, PCI_D3hot);
++
+ }
+ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ {
++#if (LINUX_VERSION_CODE >= 0x020607)
+       struct mii_ioctl_data *data = if_mii(ifr);
++#else
++      struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
++#endif
+       struct tg3 *tp = netdev_priv(dev);
+       int err;
+@@ -8307,6 +9568,9 @@
+               if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+                       break;                  /* We have no PHY */
++              if (tp->link_config.phy_is_low_power)
++                      return -EAGAIN;
++
+               spin_lock_bh(&tp->lock);
+               err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+               spin_unlock_bh(&tp->lock);
+@@ -8323,6 +9587,9 @@
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
++              if (tp->link_config.phy_is_low_power)
++                      return -EAGAIN;
++
+               spin_lock_bh(&tp->lock);
+               err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+               spin_unlock_bh(&tp->lock);
+@@ -8341,6 +9608,9 @@
+ {
+       struct tg3 *tp = netdev_priv(dev);
++      if (netif_running(dev))
++              tg3_netif_stop(tp);
++
+       tg3_full_lock(tp, 0);
+       tp->vlgrp = grp;
+@@ -8348,6 +9618,9 @@
+       /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
+       __tg3_set_rx_mode(dev);
++      if (netif_running(dev))
++              tg3_netif_start(tp);
++
+       tg3_full_unlock(tp);
+ }
+@@ -8355,10 +9628,15 @@
+ {
+       struct tg3 *tp = netdev_priv(dev);
++      if (netif_running(dev))
++              tg3_netif_stop(tp);
++
+       tg3_full_lock(tp, 0);
+-      if (tp->vlgrp)
+-              tp->vlgrp->vlan_devices[vid] = NULL;
++      vlan_group_set_device(tp->vlgrp, vid, NULL);
+       tg3_full_unlock(tp);
++
++      if (netif_running(dev))
++              tg3_netif_start(tp);
+ }
+ #endif
+@@ -8436,9 +9714,15 @@
+       .set_msglevel           = tg3_set_msglevel,
+       .nway_reset             = tg3_nway_reset,
+       .get_link               = ethtool_op_get_link,
++#if (LINUX_VERSION_CODE >= 0x20418)
+       .get_eeprom_len         = tg3_get_eeprom_len,
++#endif
++#ifdef ETHTOOL_GEEPROM
+       .get_eeprom             = tg3_get_eeprom,
++#endif
++#ifdef ETHTOOL_SEEPROM
+       .set_eeprom             = tg3_set_eeprom,
++#endif
+       .get_ringparam          = tg3_get_ringparam,
+       .set_ringparam          = tg3_set_ringparam,
+       .get_pauseparam         = tg3_get_pauseparam,
+@@ -8446,7 +9730,9 @@
+       .get_rx_csum            = tg3_get_rx_csum,
+       .set_rx_csum            = tg3_set_rx_csum,
+       .get_tx_csum            = ethtool_op_get_tx_csum,
++#if (LINUX_VERSION_CODE >= 0x20418)
+       .set_tx_csum            = tg3_set_tx_csum,
++#endif
+       .get_sg                 = ethtool_op_get_sg,
+       .set_sg                 = ethtool_op_set_sg,
+ #if TG3_TSO_SUPPORT != 0
+@@ -8461,19 +9747,23 @@
+       .get_ethtool_stats      = tg3_get_ethtool_stats,
+       .get_coalesce           = tg3_get_coalesce,
+       .set_coalesce           = tg3_set_coalesce,
++#ifdef ETHTOOL_GPERMADDR
+       .get_perm_addr          = ethtool_op_get_perm_addr,
++#endif
+ };
+ static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
+ {
+-      u32 cursize, val;
++      u32 cursize, val, magic;
+       tp->nvram_size = EEPROM_CHIP_SIZE;
+-      if (tg3_nvram_read(tp, 0, &val) != 0)
++      if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
+               return;
+-      if (swab32(val) != TG3_EEPROM_MAGIC)
++      if ((magic != TG3_EEPROM_MAGIC) &&
++          ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
++          ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
+               return;
+       /*
+@@ -8481,13 +9771,13 @@
+        * When we encounter our validation signature, we know the addressing
+        * has wrapped around, and thus have our chip size.
+        */
+-      cursize = 0x800;
++      cursize = 0x10;
+       while (cursize < tp->nvram_size) {
+-              if (tg3_nvram_read(tp, cursize, &val) != 0)
++              if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
+                       return;
+-              if (swab32(val) == TG3_EEPROM_MAGIC)
++              if (val == magic)
+                       break;
+               cursize <<= 1;
+@@ -8495,18 +9785,27 @@
+       tp->nvram_size = cursize;
+ }
+-              
++
+ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
+ {
+       u32 val;
++      if (tg3_nvram_read_swab(tp, 0, &val) != 0)
++              return;
++
++      /* Selfboot format */
++      if (val != TG3_EEPROM_MAGIC) {
++              tg3_get_eeprom_size(tp);
++              return;
++      }
++
+       if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
+               if (val != 0) {
+                       tp->nvram_size = (val >> 16) * 1024;
+                       return;
+               }
+       }
+-      tp->nvram_size = 0x20000;
++      tp->nvram_size = 0x80000;
+ }
+ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
+@@ -8623,22 +9922,112 @@
+       }
+ }
+-/* Chips other than 5700/5701 use the NVRAM for fetching info. */
+-static void __devinit tg3_nvram_init(struct tg3 *tp)
++static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
+ {
+-      int j;
++      u32 nvcfg1, protect = 0;
+-      if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
+-              return;
++      nvcfg1 = tr32(NVRAM_CFG1);
++
++      /* NVRAM protection for TPM */
++      if (nvcfg1 & (1 << 27)) {
++              tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
++              protect = 1;
++      }
++
++      nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
++      switch (nvcfg1) {
++              case FLASH_5755VENDOR_ATMEL_FLASH_1:
++              case FLASH_5755VENDOR_ATMEL_FLASH_2:
++              case FLASH_5755VENDOR_ATMEL_FLASH_3:
++              case FLASH_5755VENDOR_ATMEL_FLASH_5:
++                      tp->nvram_jedecnum = JEDEC_ATMEL;
++                      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
++                      tp->tg3_flags2 |= TG3_FLG2_FLASH;
++                      tp->nvram_pagesize = 264;
++                      if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
++                          nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
++                              tp->nvram_size = (protect ? 0x3e200 : 0x80000);
++                      else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
++                              tp->nvram_size = (protect ? 0x1f200 : 0x40000);
++                      else
++                              tp->nvram_size = (protect ? 0x1f200 : 0x20000);
++                      break;
++              case FLASH_5752VENDOR_ST_M45PE10:
++              case FLASH_5752VENDOR_ST_M45PE20:
++              case FLASH_5752VENDOR_ST_M45PE40:
++                      tp->nvram_jedecnum = JEDEC_ST;
++                      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
++                      tp->tg3_flags2 |= TG3_FLG2_FLASH;
++                      tp->nvram_pagesize = 256;
++                      if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
++                              tp->nvram_size = (protect ? 0x10000 : 0x20000);
++                      else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
++                              tp->nvram_size = (protect ? 0x10000 : 0x40000);
++                      else
++                              tp->nvram_size = (protect ? 0x20000 : 0x80000);
++                      break;
++      }
++}
++
++static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
++{
++      u32 nvcfg1;
++
++      nvcfg1 = tr32(NVRAM_CFG1);
++
++      switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
++              case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
++              case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
++              case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
++              case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
++                      tp->nvram_jedecnum = JEDEC_ATMEL;
++                      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
++                      tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++
++                      nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
++                      tw32(NVRAM_CFG1, nvcfg1);
++                      break;
++              case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
++              case FLASH_5755VENDOR_ATMEL_FLASH_1:
++              case FLASH_5755VENDOR_ATMEL_FLASH_2:
++              case FLASH_5755VENDOR_ATMEL_FLASH_3:
++                      tp->nvram_jedecnum = JEDEC_ATMEL;
++                      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
++                      tp->tg3_flags2 |= TG3_FLG2_FLASH;
++                      tp->nvram_pagesize = 264;
++                      break;
++              case FLASH_5752VENDOR_ST_M45PE10:
++              case FLASH_5752VENDOR_ST_M45PE20:
++              case FLASH_5752VENDOR_ST_M45PE40:
++                      tp->nvram_jedecnum = JEDEC_ST;
++                      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
++                      tp->tg3_flags2 |= TG3_FLG2_FLASH;
++                      tp->nvram_pagesize = 256;
++                      break;
++      }
++}
++
++static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
++{
++      tp->nvram_jedecnum = JEDEC_ATMEL;
++      tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
++      tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
++}
++/* Chips other than 5700/5701 use the NVRAM for fetching info. */
++static void __devinit tg3_nvram_init(struct tg3 *tp)
++{
+       tw32_f(GRC_EEPROM_ADDR,
+            (EEPROM_ADDR_FSM_RESET |
+             (EEPROM_DEFAULT_CLOCK_PERIOD <<
+              EEPROM_ADDR_CLKPERD_SHIFT)));
+-      /* XXX schedule_timeout() ... */
+-      for (j = 0; j < 100; j++)
+-              udelay(10);
++#if (LINUX_VERSION_CODE < 0x20607)
++      set_current_state(TASK_UNINTERRUPTIBLE);
++      schedule_timeout(HZ / 1000);
++#else
++      msleep(1);
++#endif
+       /* Enable seeprom accesses. */
+       tw32_f(GRC_LOCAL_CTRL,
+@@ -8656,12 +10045,21 @@
+               }
+               tg3_enable_nvram_access(tp);
++              tp->nvram_size = 0;
++
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+                       tg3_get_5752_nvram_info(tp);
++              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
++                      tg3_get_5755_nvram_info(tp);
++              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
++                      tg3_get_5787_nvram_info(tp);
++              else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
++                      tg3_get_5906_nvram_info(tp);
+               else
+                       tg3_get_nvram_info(tp);
+-              tg3_get_nvram_size(tp);
++              if (tp->nvram_size == 0)
++                      tg3_get_nvram_size(tp);
+               tg3_disable_nvram_access(tp);
+               tg3_nvram_unlock(tp);
+@@ -8693,12 +10091,17 @@
+             EEPROM_ADDR_ADDR_MASK) |
+            EEPROM_ADDR_READ | EEPROM_ADDR_START);
+-      for (i = 0; i < 10000; i++) {
++      for (i = 0; i < 1000; i++) {
+               tmp = tr32(GRC_EEPROM_ADDR);
+               if (tmp & EEPROM_ADDR_COMPLETE)
+                       break;
+-              udelay(100);
++#if (LINUX_VERSION_CODE < 0x20607)
++              set_current_state(TASK_UNINTERRUPTIBLE);
++              schedule_timeout(HZ / 1000);
++#else
++              msleep(1);
++#endif
+       }
+       if (!(tmp & EEPROM_ADDR_COMPLETE))
+               return -EBUSY;
+@@ -8727,26 +10130,42 @@
+       return 0;
+ }
++static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
++{
++      if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
++          (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
++          (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
++          (tp->nvram_jedecnum == JEDEC_ATMEL))
++
++              addr = ((addr / tp->nvram_pagesize) <<
++                      ATMEL_AT45DB0X1B_PAGE_POS) +
++                     (addr % tp->nvram_pagesize);
++
++      return addr;
++}
++
++static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
++{
++      if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
++          (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
++          (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
++          (tp->nvram_jedecnum == JEDEC_ATMEL))
++
++              addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
++                      tp->nvram_pagesize) +
++                     (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
++
++      return addr;
++}
++
+ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
+ {
+       int ret;
+-      if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
+-              printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
+-              return -EINVAL;
+-      }
+-
+       if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
+               return tg3_nvram_read_using_eeprom(tp, offset, val);
+-      if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
+-              (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+-              (tp->nvram_jedecnum == JEDEC_ATMEL)) {
+-
+-              offset = ((offset / tp->nvram_pagesize) <<
+-                        ATMEL_AT45DB0X1B_PAGE_POS) +
+-                      (offset % tp->nvram_pagesize);
+-      }
++      offset = tg3_nvram_phys_addr(tp, offset);
+       if (offset > NVRAM_ADDR_MSK)
+               return -EINVAL;
+@@ -8771,6 +10190,16 @@
+       return ret;
+ }
++static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
++{
++      int err;
++      u32 tmp;
++
++      err = tg3_nvram_read(tp, offset, &tmp);
++      *val = swab32(tmp);
++      return err;
++}
++
+ static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
+                                   u32 offset, u32 len, u8 *buf)
+ {
+@@ -8796,13 +10225,18 @@
+                       (addr & EEPROM_ADDR_ADDR_MASK) |
+                       EEPROM_ADDR_START |
+                       EEPROM_ADDR_WRITE);
+-              
+-              for (j = 0; j < 10000; j++) {
++
++              for (j = 0; j < 1000; j++) {
+                       val = tr32(GRC_EEPROM_ADDR);
+                       if (val & EEPROM_ADDR_COMPLETE)
+                               break;
+-                      udelay(100);
++#if (LINUX_VERSION_CODE < 0x20607)
++                      set_current_state(TASK_UNINTERRUPTIBLE);
++                      schedule_timeout(HZ / 1000);
++#else
++                      msleep(1);
++#endif
+               }
+               if (!(val & EEPROM_ADDR_COMPLETE)) {
+                       rc = -EBUSY;
+@@ -8832,7 +10266,7 @@
+               u32 phy_addr, page_off, size;
+               phy_addr = offset & ~pagemask;
+-      
++
+               for (j = 0; j < pagesize; j += 4) {
+                       if ((ret = tg3_nvram_read(tp, phy_addr + j,
+                                               (u32 *) (tmp + j))))
+@@ -8923,15 +10357,7 @@
+               page_off = offset % tp->nvram_pagesize;
+-              if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+-                      (tp->nvram_jedecnum == JEDEC_ATMEL)) {
+-
+-                      phy_addr = ((offset / tp->nvram_pagesize) <<
+-                                  ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
+-              }
+-              else {
+-                      phy_addr = offset;
+-              }
++              phy_addr = tg3_nvram_phys_addr(tp, offset);
+               tw32(NVRAM_ADDR, phy_addr);
+@@ -8939,13 +10365,15 @@
+               if ((page_off == 0) || (i == 0))
+                       nvram_cmd |= NVRAM_CMD_FIRST;
+-              else if (page_off == (tp->nvram_pagesize - 4))
++              if (page_off == (tp->nvram_pagesize - 4))
+                       nvram_cmd |= NVRAM_CMD_LAST;
+               if (i == (len - 4))
+                       nvram_cmd |= NVRAM_CMD_LAST;
+               if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
++                  (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
++                  (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
+                   (tp->nvram_jedecnum == JEDEC_ST) &&
+                   (nvram_cmd & NVRAM_CMD_FIRST)) {
+@@ -8971,11 +10399,6 @@
+ {
+       int ret;
+-      if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
+-              printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
+-              return -EINVAL;
+-      }
+-
+       if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+               tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
+                      ~GRC_LCLCTRL_GPIO_OUTPUT1);
+@@ -9083,12 +10506,23 @@
+       return NULL;
+ }
+-/* Since this function may be called in D3-hot power state during
+- * tg3_init_one(), only config cycles are allowed.
+- */
+ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
+ {
+       u32 val;
++      u16 pmcsr;
++
++      /* On some early chips the SRAM cannot be accessed in D3hot state,
++       * so need make sure we're in D0.
++       */
++      pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
++      pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++      pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
++#if (LINUX_VERSION_CODE < 0x20607)
++      set_current_state(TASK_UNINTERRUPTIBLE);
++      schedule_timeout(HZ / 1000);
++#else
++      msleep(1);
++#endif
+       /* Make sure register accesses (indirect or otherwise)
+        * will function correctly.
+@@ -9096,9 +10530,34 @@
+       pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+                              tp->misc_host_ctrl);
++      /* The memory arbiter has to be enabled in order for SRAM accesses
++       * to succeed.  Normally on powerup the tg3 chip firmware will make
++       * sure it is enabled, but other entities such as system netboot
++       * code might disable it.
++       */
++      val = tr32(MEMARB_MODE);
++      tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
++
+       tp->phy_id = PHY_ID_INVALID;
+       tp->led_ctrl = LED_CTRL_MODE_PHY_1;
++      /* Assume an onboard device and WOL capable by default.  */
++      tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
++
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
++                      tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
++                      tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
++              }
++              val = tr32(VCPU_CFGSHDW);
++              if (val & VCPU_CFGSHDW_ASPM_DBNC)
++                      tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
++              if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
++                  (val & VCPU_CFGSHDW_WOL_MAGPKT))
++                      tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
++              return;
++      }
++
+       tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+       if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+               u32 nic_cfg, led_cfg;
+@@ -9195,18 +10654,30 @@
+                   tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+                       tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+-              if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
+-                  (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
+-                  (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
++              if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
+                       tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
++                      if ((tp->pdev->subsystem_vendor ==
++                           PCI_VENDOR_ID_ARIMA) &&
++                          (tp->pdev->subsystem_device == 0x205a ||
++                           tp->pdev->subsystem_device == 0x2063))
++                              tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
++              } else {
++                      tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
++                      tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
++              }
+               if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+                       tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
+                       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
+                               tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
+               }
+-              if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
+-                      tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
++              if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
++                  !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
++                      tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
++
++              if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
++                  nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
++                      tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
+               if (cfg2 & (1 << 17))
+                       tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
+@@ -9215,6 +10686,14 @@
+               /* bootcode if bit 18 is set */
+               if (cfg2 & (1 << 18))
+                       tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
++
++              if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
++                      u32 cfg3;
++
++                      tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
++                      if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
++                              tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
++              }
+       }
+ }
+@@ -9276,13 +10755,13 @@
+       if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
+           !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+-              u32 bmsr, adv_reg, tg3_ctrl;
++              u32 bmsr, adv_reg, tg3_ctrl, mask;
+               tg3_readphy(tp, MII_BMSR, &bmsr);
+               if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+                   (bmsr & BMSR_LSTATUS))
+                       goto skip_phy_reset;
+-                  
++
+               err = tg3_phy_reset(tp);
+               if (err)
+                       return err;
+@@ -9300,7 +10779,10 @@
+                                            MII_TG3_CTRL_ENABLE_AS_MASTER);
+               }
+-              if (!tg3_copper_is_advertising_all(tp)) {
++              mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
++                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
++                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
++              if (!tg3_copper_is_advertising_all(tp, mask)) {
+                       tg3_writephy(tp, MII_ADVERTISE, adv_reg);
+                       if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+@@ -9345,25 +10827,53 @@
+ {
+       unsigned char vpd_data[256];
+       unsigned int i;
++      u32 magic;
+-      if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
+-              /* Sun decided not to put the necessary bits in the
+-               * NVRAM of their onboard tg3 parts :(
+-               */
+-              strcpy(tp->board_part_number, "Sun 570X");
+-              return;
+-      }
++      if (tg3_nvram_read_swab(tp, 0x0, &magic))
++              goto out_not_found;
+-      for (i = 0; i < 256; i += 4) {
+-              u32 tmp;
++      if (magic == TG3_EEPROM_MAGIC) {
++              for (i = 0; i < 256; i += 4) {
++                      u32 tmp;
++
++                      if (tg3_nvram_read(tp, 0x100 + i, &tmp))
++                              goto out_not_found;
++
++                      vpd_data[i + 0] = ((tmp >>  0) & 0xff);
++                      vpd_data[i + 1] = ((tmp >>  8) & 0xff);
++                      vpd_data[i + 2] = ((tmp >> 16) & 0xff);
++                      vpd_data[i + 3] = ((tmp >> 24) & 0xff);
++              }
++      } else {
++              int vpd_cap;
+-              if (tg3_nvram_read(tp, 0x100 + i, &tmp))
+-                      goto out_not_found;
++              vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
++              for (i = 0; i < 256; i += 4) {
++                      u32 tmp, j = 0;
++                      u16 tmp16;
++
++                      pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
++                                            i);
++                      while (j++ < 100) {
++                              pci_read_config_word(tp->pdev, vpd_cap +
++                                                   PCI_VPD_ADDR, &tmp16);
++                              if (tmp16 & 0x8000)
++                                      break;
++#if (LINUX_VERSION_CODE < 0x20607)
++                              set_current_state(TASK_UNINTERRUPTIBLE);
++                              schedule_timeout(1);
++#else
++                              msleep(1);
++#endif
++                      }
++                      if (!(tmp16 & 0x8000))
++                              goto out_not_found;
+-              vpd_data[i + 0] = ((tmp >>  0) & 0xff);
+-              vpd_data[i + 1] = ((tmp >>  8) & 0xff);
+-              vpd_data[i + 2] = ((tmp >> 16) & 0xff);
+-              vpd_data[i + 3] = ((tmp >> 24) & 0xff);
++                      pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
++                                            &tmp);
++                      tmp = cpu_to_le32(tmp);
++                      memcpy(&vpd_data[i], &tmp, 4);
++              }
+       }
+       /* Now parse and find the part number. */
+@@ -9412,60 +10922,129 @@
+       }
+ out_not_found:
+-      strcpy(tp->board_part_number, "none");
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
++              strcpy(tp->board_part_number, "BCM95906");
++      else
++              strcpy(tp->board_part_number, "none");
+ }
+-#ifdef CONFIG_SPARC64
+-static int __devinit tg3_is_sun_570X(struct tg3 *tp)
++static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
+ {
+-      struct pci_dev *pdev = tp->pdev;
+-      struct pcidev_cookie *pcp = pdev->sysdata;
++      u32 val;
+-      if (pcp != NULL) {
+-              int node = pcp->prom_node;
+-              u32 venid;
+-              int err;
+-
+-              err = prom_getproperty(node, "subsystem-vendor-id",
+-                                     (char *) &venid, sizeof(venid));
+-              if (err == 0 || err == -1)
+-                      return 0;
+-              if (venid == PCI_VENDOR_ID_SUN)
+-                      return 1;
++      if (tg3_nvram_read_swab(tp, offset, &val) ||
++          (val & 0xfc000000) != 0x0c000000 ||
++          tg3_nvram_read_swab(tp, offset + 4, &val) ||
++          val != 0)
++              return 0;
+-              /* TG3 chips onboard the SunBlade-2500 don't have the
+-               * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
+-               * are distinguishable from non-Sun variants by being
+-               * named "network" by the firmware.  Non-Sun cards will
+-               * show up as being named "ethernet".
+-               */
+-              if (!strcmp(pcp->prom_name, "network"))
+-                      return 1;
++      return 1;
++}
++
++static void __devinit tg3_read_fw_ver(struct tg3 *tp)
++{
++      u32 val, offset, start;
++      u32 ver_offset;
++      int i, bcnt;
++
++      if (tg3_nvram_read_swab(tp, 0, &val))
++              return;
++
++      if (val != TG3_EEPROM_MAGIC)
++              return;
++
++      if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
++          tg3_nvram_read_swab(tp, 0x4, &start))
++              return;
++
++      offset = tg3_nvram_logical_addr(tp, offset);
++
++      if (!tg3_fw_img_is_valid(tp, offset) ||
++          tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
++              return;
++
++      offset = offset + ver_offset - start;
++      for (i = 0; i < 16; i += 4) {
++              if (tg3_nvram_read(tp, offset + i, &val))
++                      return;
++
++              val = le32_to_cpu(val);
++              memcpy(tp->fw_ver + i, &val, 4);
+       }
+-      return 0;
++
++      if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
++              return;
++
++      for (offset = TG3_NVM_DIR_START;
++           offset < TG3_NVM_DIR_END;
++           offset += TG3_NVM_DIRENT_SIZE) {
++              if (tg3_nvram_read_swab(tp, offset, &val))
++                      return;
++
++              if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
++                      break;
++      }
++
++      if (offset == TG3_NVM_DIR_END)
++              return;
++
++      if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
++              start = 0x08000000;
++      else if (tg3_nvram_read_swab(tp, offset - 4, &start))
++              return;
++
++      if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
++          !tg3_fw_img_is_valid(tp, offset) ||
++          tg3_nvram_read_swab(tp, offset + 8, &val))
++              return;
++
++      offset += val - start;
++
++      bcnt = strlen(tp->fw_ver);
++
++      tp->fw_ver[bcnt++] = ',';
++      tp->fw_ver[bcnt++] = ' ';
++
++      for (i = 0; i < 4; i++) {
++              if (tg3_nvram_read(tp, offset, &val))
++                      return;
++
++              val = le32_to_cpu(val);
++              offset += sizeof(val);
++
++              if (bcnt > TG3_VER_SIZE - sizeof(val)) {
++                      memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
++                      break;
++              }
++
++              memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
++              bcnt += sizeof(val);
++      }
++
++      tp->fw_ver[TG3_VER_SIZE - 1] = 0;
+ }
+-#endif
++
++static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
+ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ {
++#if (LINUX_VERSION_CODE >= 0x2060a)
+       static struct pci_device_id write_reorder_chipsets[] = {
+               { PCI_DEVICE(PCI_VENDOR_ID_AMD,
+                            PCI_DEVICE_ID_AMD_FE_GATE_700C) },
++              { PCI_DEVICE(PCI_VENDOR_ID_AMD,
++                           PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+               { PCI_DEVICE(PCI_VENDOR_ID_VIA,
+                            PCI_DEVICE_ID_VIA_8385_0) },
+               { },
+       };
++#endif
+       u32 misc_ctrl_reg;
+       u32 cacheline_sz_reg;
+       u32 pci_state_reg, grc_misc_cfg;
+       u32 val;
+       u16 pci_cmd;
+-      int err;
+-
+-#ifdef CONFIG_SPARC64
+-      if (tg3_is_sun_570X(tp))
+-              tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
+-#endif
++      int err, pcie_cap;
+       /* Force memory write invalidate off.  If we leave it on,
+        * then on 5700_BX chips we have to enable a workaround.
+@@ -9604,8 +11183,15 @@
+       tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
+       tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
++      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
++          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
++              tp->pdev_peer = tg3_find_peer(tp);
++
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+           (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
+               tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
+@@ -9613,16 +11199,50 @@
+           (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
+               tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
+-      if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
+-              tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
++      if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
++              tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
++              if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
++                  GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
++                  (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
++                   tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
++                   tp->pdev_peer == tp->pdev))
++                      tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
++
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
++                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++                      tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
++                      tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
++              } else {
++                      tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
++                      if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
++                              ASIC_REV_5750 &&
++                          tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
++                              tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
++              }
++      }
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
+-          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
++          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
++          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
++          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
++          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
+               tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
+-      if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
++      pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
++      if (pcie_cap != 0) {
+               tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++                      u16 lnkctl;
++
++                      pci_read_config_word(tp->pdev,
++                                           pcie_cap + PCI_EXP_LNKCTL,
++                                           &lnkctl);
++                      if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
++                              tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
++              }
++      }
+       /* If we have an AMD 762 or VIA K8T800 chipset, write
+        * reordering to the mailbox registers done by the host
+@@ -9630,7 +11250,16 @@
+        * every mailbox register write to force the writes to be
+        * posted to the chip in order.
+        */
++#if (LINUX_VERSION_CODE < 0x2060a)
++      if ((pci_find_device(PCI_VENDOR_ID_AMD,
++                           PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL) ||
++           pci_find_device(PCI_VENDOR_ID_AMD,
++                           PCI_DEVICE_ID_AMD_8131_BRIDGE, NULL) ||
++           pci_find_device(PCI_VENDOR_ID_VIA,
++                           PCI_DEVICE_ID_VIA_8385_0, NULL)) &&
++#else
+       if (pci_dev_present(write_reorder_chipsets) &&
++#endif
+           !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+               tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
+@@ -9647,10 +11276,24 @@
+                                      cacheline_sz_reg);
+       }
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
++              tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
++              if (!tp->pcix_cap) {
++                      printk(KERN_ERR PFX "Cannot find PCI-X "
++                                          "capability, aborting.\n");
++                      return -EIO;
++              }
++      }
++
+       pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+                             &pci_state_reg);
+-      if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
++      if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
+               tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
+               /* If this is a 5700 BX chipset, and we are in PCI-X
+@@ -9669,11 +11312,13 @@
+                        * space registers clobbered due to this bug.
+                        * So explicitly force the chip into D0 here.
+                        */
+-                      pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
++                      pci_read_config_dword(tp->pdev,
++                                            tp->pm_cap + PCI_PM_CTRL,
+                                             &pm_reg);
+                       pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
+                       pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
+-                      pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
++                      pci_write_config_dword(tp->pdev,
++                                             tp->pm_cap + PCI_PM_CTRL,
+                                              pm_reg);
+                       /* Also, force SERR#/PERR# in PCI command. */
+@@ -9689,17 +11334,6 @@
+       if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
+               tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
+-      /* Back to back register writes can cause problems on this chip,
+-       * the workaround is to read back all reg writes except those to
+-       * mailbox regs.  See tg3_write_indirect_reg32().
+-       *
+-       * PCI Express 5750_A0 rev chips need this workaround too.
+-       */
+-      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+-          ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+-           tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
+-              tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
+-
+       if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
+               tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
+       if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
+@@ -9723,8 +11357,19 @@
+       /* Various workaround register access methods */
+       if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
+               tp->write32 = tg3_write_indirect_reg32;
+-      else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
++      else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
++               ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
++                tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
++              /*
++               * Back to back register writes can cause problems on these
++               * chips, the workaround is to read back all reg writes
++               * except those to mailbox regs.
++               *
++               * See tg3_write_indirect_reg32().
++               */
+               tp->write32 = tg3_write_flush_reg32;
++      }
++
+       if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
+           (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
+@@ -9748,15 +11393,27 @@
+               pci_cmd &= ~PCI_COMMAND_MEMORY;
+               pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+       }
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++              tp->read32_mbox = tg3_read32_mbox_5906;
++              tp->write32_mbox = tg3_write32_mbox_5906;
++              tp->write32_tx_mbox = tg3_write32_mbox_5906;
++              tp->write32_rx_mbox = tg3_write32_mbox_5906;
++      }
++
++      if (tp->write32 == tg3_write_indirect_reg32 ||
++          ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
++           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
++            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
++              tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
+       /* Get eeprom hw config before calling tg3_set_power_state().
+-       * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
++       * In particular, the TG3_FLG2_IS_NIC flag must be
+        * determined before calling tg3_set_power_state() so that
+        * we know whether or not to switch out of Vaux power.
+        * When the flag is set, it means that GPIO1 is used for eeprom
+        * write protect and also implies that it is a LOM where GPIOs
+        * are not used to switch power.
+-       */ 
++       */
+       tg3_get_eeprom_hw_cfg(tp);
+       /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
+@@ -9774,8 +11431,11 @@
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+               tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
++              tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
++
+       /* Force the chip into D0. */
+-      err = tg3_set_power_state(tp, 0);
++      err = tg3_set_power_state(tp, PCI_D0);
+       if (err) {
+               printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
+                      pci_name(tp->pdev));
+@@ -9788,15 +11448,6 @@
+       if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
+               tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
+-      /* Pseudo-header checksum is done by hardware logic and not
+-       * the offload processers, so make the chip do the pseudo-
+-       * header checksums on receive.  For transmit it is more
+-       * convenient to do the pseudo-header checksum in software
+-       * as Linux does that on transmit for us in all cases.
+-       */
+-      tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
+-      tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
+-
+       /* Derive initial jumbo mode from MTU assigned in
+        * ether_setup() via the alloc_etherdev() call
+        */
+@@ -9819,6 +11470,7 @@
+           ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+            (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
+            (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
++          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
+           (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+               tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
+@@ -9828,8 +11480,17 @@
+       if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+               tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
+-      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+-              tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
++      if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
++                      if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
++                          tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
++                              tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
++                      if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
++                              tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
++              } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
++                      tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
++      }
+       tp->coalesce_mode = 0;
+       if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
+@@ -9882,14 +11543,6 @@
+       grc_misc_cfg = tr32(GRC_MISC_CFG);
+       grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
+-      /* Broadcom's driver says that CIOBE multisplit has a bug */
+-#if 0
+-      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+-          grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
+-              tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
+-              tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
+-      }
+-#endif
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+           (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
+            grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
+@@ -9917,7 +11570,9 @@
+             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
+           (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+            (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
+-            tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
++            tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
++            tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+               tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
+       err = tg3_phy_probe(tp);
+@@ -9928,6 +11583,7 @@
+       }
+       tg3_read_partno(tp);
++      tg3_read_fw_ver(tp);
+       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+               tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
+@@ -9952,6 +11608,7 @@
+        * upon subsystem IDs.
+        */
+       if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+           !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+               tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
+                                 TG3_FLAG_USE_LINKCHG_REG);
+@@ -9963,40 +11620,52 @@
+       else
+               tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
+-      /* It seems all chips can get confused if TX buffers
++      /* All chips before 5787 can get confused if TX buffers
+        * straddle the 4GB address boundary in some cases.
+        */
+-      tp->dev->hard_start_xmit = tg3_start_xmit;
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
++              tp->dev->hard_start_xmit = tg3_start_xmit;
++      else
++              tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
+       tp->rx_offset = 2;
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+           (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
+               tp->rx_offset = 0;
+-      /* By default, disable wake-on-lan.  User can change this
+-       * using ETHTOOL_SWOL.
++      tp->rx_std_max_post = TG3_RX_RING_SIZE;
++
++      /* Increment the rx prod index on the rx std ring by at most
++       * 8 for these chips to workaround hw errata.
+        */
+-      tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
++              tp->rx_std_max_post = 8;
++
++      if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
++              tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
++                                   PCIE_PWR_MGMT_L1_THRESH_MSK;
+       return err;
+ }
+-#ifdef CONFIG_SPARC64
++#ifdef CONFIG_SPARC
+ static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
+ {
+       struct net_device *dev = tp->dev;
+       struct pci_dev *pdev = tp->pdev;
+-      struct pcidev_cookie *pcp = pdev->sysdata;
+-
+-      if (pcp != NULL) {
+-              int node = pcp->prom_node;
+-
+-              if (prom_getproplen(node, "local-mac-address") == 6) {
+-                      prom_getproperty(node, "local-mac-address",
+-                                       dev->dev_addr, 6);
+-                      memcpy(dev->perm_addr, dev->dev_addr, 6);
+-                      return 0;
+-              }
++      struct device_node *dp = pci_device_to_OF_node(pdev);
++      const unsigned char *addr;
++      int len;
++
++      addr = of_get_property(dp, "local-mac-address", &len);
++      if (addr && len == 6) {
++              memcpy(dev->dev_addr, addr, 6);
++              memcpy(dev->perm_addr, dev->dev_addr, 6);
++              return 0;
+       }
+       return -ENODEV;
+ }
+@@ -10015,15 +11684,15 @@
+ {
+       struct net_device *dev = tp->dev;
+       u32 hi, lo, mac_offset;
++      int addr_ok = 0;
+-#ifdef CONFIG_SPARC64
++#ifdef CONFIG_SPARC
+       if (!tg3_get_macaddr_sparc(tp))
+               return 0;
+ #endif
+       mac_offset = 0x7c;
+-      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+-           !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
++      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
+           (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+               if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+                       mac_offset = 0xcc;
+@@ -10032,6 +11701,8 @@
+               else
+                       tg3_nvram_unlock(tp);
+       }
++      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
++              mac_offset = 0x10;
+       /* First try to get it from MAC address mailbox. */
+       tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
+@@ -10044,29 +11715,33 @@
+               dev->dev_addr[3] = (lo >> 16) & 0xff;
+               dev->dev_addr[4] = (lo >>  8) & 0xff;
+               dev->dev_addr[5] = (lo >>  0) & 0xff;
++
++              /* Some old bootcode may report a 0 MAC address in SRAM */
++              addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+       }
+-      /* Next, try NVRAM. */
+-      else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
+-               !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
+-               !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
+-              dev->dev_addr[0] = ((hi >> 16) & 0xff);
+-              dev->dev_addr[1] = ((hi >> 24) & 0xff);
+-              dev->dev_addr[2] = ((lo >>  0) & 0xff);
+-              dev->dev_addr[3] = ((lo >>  8) & 0xff);
+-              dev->dev_addr[4] = ((lo >> 16) & 0xff);
+-              dev->dev_addr[5] = ((lo >> 24) & 0xff);
+-      }
+-      /* Finally just fetch it out of the MAC control regs. */
+-      else {
+-              hi = tr32(MAC_ADDR_0_HIGH);
+-              lo = tr32(MAC_ADDR_0_LOW);
++      if (!addr_ok) {
++              /* Next, try NVRAM. */
++              if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
++                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
++                      dev->dev_addr[0] = ((hi >> 16) & 0xff);
++                      dev->dev_addr[1] = ((hi >> 24) & 0xff);
++                      dev->dev_addr[2] = ((lo >>  0) & 0xff);
++                      dev->dev_addr[3] = ((lo >>  8) & 0xff);
++                      dev->dev_addr[4] = ((lo >> 16) & 0xff);
++                      dev->dev_addr[5] = ((lo >> 24) & 0xff);
++              }
++              /* Finally just fetch it out of the MAC control regs. */
++              else {
++                      hi = tr32(MAC_ADDR_0_HIGH);
++                      lo = tr32(MAC_ADDR_0_LOW);
+-              dev->dev_addr[5] = lo & 0xff;
+-              dev->dev_addr[4] = (lo >> 8) & 0xff;
+-              dev->dev_addr[3] = (lo >> 16) & 0xff;
+-              dev->dev_addr[2] = (lo >> 24) & 0xff;
+-              dev->dev_addr[1] = hi & 0xff;
+-              dev->dev_addr[0] = (hi >> 8) & 0xff;
++                      dev->dev_addr[5] = lo & 0xff;
++                      dev->dev_addr[4] = (lo >> 8) & 0xff;
++                      dev->dev_addr[3] = (lo >> 16) & 0xff;
++                      dev->dev_addr[2] = (lo >> 24) & 0xff;
++                      dev->dev_addr[1] = hi & 0xff;
++                      dev->dev_addr[0] = (hi >> 8) & 0xff;
++              }
+       }
+       if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+@@ -10076,7 +11751,9 @@
+ #endif
+               return -EINVAL;
+       }
++#ifdef ETHTOOL_GPERMADDR
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
++#endif
+       return 0;
+ }
+@@ -10333,6 +12010,7 @@
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+                       u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
++                      u32 read_water = 0x7;
+                       /* If the 5704 is behind the EPB bridge, we can
+                        * do the less restrictive ONE_DMA workaround for
+@@ -10344,8 +12022,13 @@
+                       else if (ccval == 0x6 || ccval == 0x7)
+                               tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
++                      if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
++                              read_water = 4;
+                       /* Set bit 23 to enable PCIX hw bug fix */
+-                      tp->dma_rwctrl |= 0x009f0000;
++                      tp->dma_rwctrl |=
++                              (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
++                              (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
++                              (1 << 23);
+               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+                       /* 5780 always in PCIX mode */
+                       tp->dma_rwctrl |= 0x00144000;
+@@ -10457,17 +12140,25 @@
+       }
+       if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+           DMA_RWCTRL_WRITE_BNDRY_16) {
++#if (LINUX_VERSION_CODE >= 0x2060a)
+               static struct pci_device_id dma_wait_state_chipsets[] = {
+                       { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
+                                    PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+                       { },
+               };
++#endif
+               /* DMA test passed without adjusting DMA boundary,
+                * now look for chipsets that are known to expose the
+                * DMA bug without failing the test.
+                */
+-              if (pci_dev_present(dma_wait_state_chipsets)) {
++#if (LINUX_VERSION_CODE < 0x2060a)
++              if (pci_find_device(PCI_VENDOR_ID_APPLE,
++                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL))
++#else
++              if (pci_dev_present(dma_wait_state_chipsets))
++#endif
++              {
+                       tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+                       tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+               }
+@@ -10494,7 +12185,6 @@
+       tp->link_config.speed = SPEED_INVALID;
+       tp->link_config.duplex = DUPLEX_INVALID;
+       tp->link_config.autoneg = AUTONEG_ENABLE;
+-      netif_carrier_off(tp->dev);
+       tp->link_config.active_speed = SPEED_INVALID;
+       tp->link_config.active_duplex = DUPLEX_INVALID;
+       tp->link_config.phy_is_low_power = 0;
+@@ -10512,6 +12202,12 @@
+                       DEFAULT_MB_MACRX_LOW_WATER_5705;
+               tp->bufmgr_config.mbuf_high_water =
+                       DEFAULT_MB_HIGH_WATER_5705;
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
++                      tp->bufmgr_config.mbuf_mac_rx_low_water =
++                              DEFAULT_MB_MACRX_LOW_WATER_5906;
++                      tp->bufmgr_config.mbuf_high_water =
++                              DEFAULT_MB_HIGH_WATER_5906;
++              }
+               tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+                       DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
+@@ -10553,6 +12249,10 @@
+       case PHY_ID_BCM5752:    return "5752";
+       case PHY_ID_BCM5714:    return "5714";
+       case PHY_ID_BCM5780:    return "5780";
++      case PHY_ID_BCM5755:    return "5755";
++      case PHY_ID_BCM5787:    return "5787";
++      case PHY_ID_BCM5756:    return "5722/5756";
++      case PHY_ID_BCM5906:    return "5906";
+       case PHY_ID_BCM8002:    return "8002/serdes";
+       case 0:                 return "serdes";
+       default:                return "unknown";
+@@ -10711,9 +12411,10 @@
+       }
+       SET_MODULE_OWNER(dev);
++#if (LINUX_VERSION_CODE >= 0x20419)
+       SET_NETDEV_DEV(dev, &pdev->dev);
++#endif
+-      dev->features |= NETIF_F_LLTX;
+ #if TG3_VLAN_TAG_USED
+       dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+       dev->vlan_rx_register = tg3_vlan_rx_register;
+@@ -10755,9 +12456,12 @@
+       tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
+ #endif
+       spin_lock_init(&tp->lock);
+-      spin_lock_init(&tp->tx_lock);
+       spin_lock_init(&tp->indirect_lock);
++#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
++      INIT_WORK(&tp->reset_task, tg3_reset_task);
++#else
+       INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
++#endif
+       tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
+       if (tp->regs == 0UL) {
+@@ -10786,7 +12490,7 @@
+       dev->watchdog_timeo = TG3_TX_TIMEOUT;
+       dev->change_mtu = tg3_change_mtu;
+       dev->irq = pdev->irq;
+-#ifdef CONFIG_NET_POLL_CONTROLLER
++#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+       dev->poll_controller = tg3_poll_controller;
+ #endif
+@@ -10845,17 +12549,23 @@
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+           tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
++          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+           (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
+               tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
+       } else {
+-              tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
++              tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
+       }
+-      /* TSO is off by default, user can enable using ethtool.  */
+-#if 0
+-      if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
++      /* TSO is on by default on chips that support hardware TSO.
++       * Firmware TSO on older chips gives lower performance, so it
++       * is off by default, but can be enabled using ethtool.
++       */
++      if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
+               dev->features |= NETIF_F_TSO;
+-#endif
++              if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
++                  (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
++                      dev->features |= NETIF_F_TSO6;
++      }
+ #endif
+@@ -10866,10 +12576,6 @@
+               tp->rx_pending = 63;
+       }
+-      if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
+-          (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
+-              tp->pdev_peer = tg3_find_peer(tp);
+-
+       err = tg3_get_device_address(tp);
+       if (err) {
+               printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
+@@ -10884,7 +12590,6 @@
+        */
+       if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+           (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+-              pci_save_state(tp->pdev);
+               tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+       }
+@@ -10899,7 +12604,19 @@
+        * checksumming.
+        */
+       if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
+-              dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
++#ifdef NETIF_F_IPV6_CSUM
++              dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
++        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
++            dev->features |= NETIF_F_IPV6_CSUM;
++#else
++              if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
++                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
++                      dev->features |= NETIF_F_HW_CSUM;
++              else
++                      dev->features |= NETIF_F_IP_CSUM;
++              dev->features |= NETIF_F_SG;
++#endif
+               tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
+       } else
+               tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
+@@ -10909,11 +12626,7 @@
+       tg3_init_coal(tp);
+-      /* Now that we have fully setup the chip, save away a snapshot
+-       * of the PCI config space.  We need to restore this after
+-       * GRC_MISC_CFG core clock resets and some resume events.
+-       */
+-      pci_save_state(tp->pdev);
++      pci_set_drvdata(pdev, dev);
+       err = register_netdev(dev);
+       if (err) {
+@@ -10922,29 +12635,27 @@
+               goto err_out_iounmap;
+       }
+-      pci_set_drvdata(pdev, dev);
+-
+-      printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
++      printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
+              dev->name,
+              tp->board_part_number,
+              tp->pci_chip_rev_id,
+              tg3_phy_string(tp),
+              tg3_bus_string(tp, str),
+-             (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
++             ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
++              ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
++               "10/100/1000Base-T")));
+       for (i = 0; i < 6; i++)
+               printk("%2.2x%c", dev->dev_addr[i],
+                      i == 5 ? '\n' : ':');
+       printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
+-             "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
+-             "TSOcap[%d] \n",
++             "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
+              dev->name,
+              (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
+              (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
+              (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
+              (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
+-             (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
+              (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
+              (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
+       printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+@@ -10961,7 +12672,11 @@
+       }
+ err_out_free_dev:
++#if (LINUX_VERSION_CODE >= 0x20418)
+       free_netdev(dev);
++#else
++      kfree(dev);
++#endif
+ err_out_free_res:
+       pci_release_regions(pdev);
+@@ -10979,20 +12694,30 @@
+       if (dev) {
+               struct tg3 *tp = netdev_priv(dev);
++#if (LINUX_VERSION_CODE >= 0x20600)
+               flush_scheduled_work();
++#endif
+               unregister_netdev(dev);
+               if (tp->regs) {
+                       iounmap(tp->regs);
+                       tp->regs = NULL;
+               }
++#if (LINUX_VERSION_CODE >= 0x20418)
+               free_netdev(dev);
++#else
++              kfree(dev);
++#endif
+               pci_release_regions(pdev);
+               pci_disable_device(pdev);
+               pci_set_drvdata(pdev, NULL);
+       }
+ }
++#if (LINUX_VERSION_CODE < 0x2060b)
++static int tg3_suspend(struct pci_dev *pdev, u32 state)
++#else
+ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
++#endif
+ {
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct tg3 *tp = netdev_priv(dev);
+@@ -11001,7 +12726,9 @@
+       if (!netif_running(dev))
+               return 0;
++#if (LINUX_VERSION_CODE >= 0x20600)
+       flush_scheduled_work();
++#endif
+       tg3_netif_stop(tp);
+       del_timer_sync(&tp->timer);
+@@ -11017,12 +12744,24 @@
+       tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+       tg3_full_unlock(tp);
++      /* Save MSI address and data for resume.  */
++#if (LINUX_VERSION_CODE < 0x2060a)
++      pci_save_state(pdev, tp->pci_cfg_state);
++#else
++      pci_save_state(pdev);
++#endif
++
++#if (LINUX_VERSION_CODE < 0x2060b)
++      err = tg3_set_power_state(tp, state);
++#else
+       err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
++#endif
+       if (err) {
+               tg3_full_lock(tp, 0);
+               tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+-              tg3_init_hw(tp);
++              if (tg3_restart_hw(tp, 1))
++                      goto out;
+               tp->timer.expires = jiffies + tp->timer_offset;
+               add_timer(&tp->timer);
+@@ -11030,6 +12769,7 @@
+               netif_device_attach(dev);
+               tg3_netif_start(tp);
++out:
+               tg3_full_unlock(tp);
+       }
+@@ -11045,27 +12785,43 @@
+       if (!netif_running(dev))
+               return 0;
++#if (LINUX_VERSION_CODE < 0x2060a)
++      pci_restore_state(tp->pdev, tp->pci_cfg_state);
++#else
+       pci_restore_state(tp->pdev);
++#endif
+-      err = tg3_set_power_state(tp, 0);
++      err = tg3_set_power_state(tp, PCI_D0);
+       if (err)
+               return err;
++      /* Hardware bug - MSI won't work if INTX disabled. */
++      if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
++          (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
++#if (LINUX_VERSION_CODE < 0x2060e)
++              tg3_enable_intx(tp->pdev);
++#else
++              pci_intx(tp->pdev, 1);
++#endif
++
+       netif_device_attach(dev);
+       tg3_full_lock(tp, 0);
+       tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+-      tg3_init_hw(tp);
++      err = tg3_restart_hw(tp, 1);
++      if (err)
++              goto out;
+       tp->timer.expires = jiffies + tp->timer_offset;
+       add_timer(&tp->timer);
+       tg3_netif_start(tp);
++out:
+       tg3_full_unlock(tp);
+-      return 0;
++      return err;
+ }
+ static struct pci_driver tg3_driver = {
+@@ -11079,7 +12835,11 @@
+ static int __init tg3_init(void)
+ {
++#if (LINUX_VERSION_CODE < 0x020613)
+       return pci_module_init(&tg3_driver);
++#else
++      return pci_register_driver(&tg3_driver);
++#endif
+ }
+ static void __exit tg3_cleanup(void)
+diff -uNr linux-2.6.16.old/drivers/net/tg3.h linux-2.6.16/drivers/net/tg3.h
+--- linux-2.6.16.old/drivers/net/tg3.h 2006-03-20 06:53:29.000000000 +0100
++++ linux-2.6.16/drivers/net/tg3.h     2007-09-15 01:53:08.000000000 +0200
+@@ -9,6 +9,442 @@
+ #ifndef _T3_H
+ #define _T3_H
++#if !defined(__iomem)
++#define __iomem
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5704S_2)
++#define PCI_DEVICE_ID_TIGON3_5704S_2  0x1649
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5705F)
++#define PCI_DEVICE_ID_TIGON3_5705F    0x166e
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5720)
++#define PCI_DEVICE_ID_TIGON3_5720     0x1658
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5721)
++#define PCI_DEVICE_ID_TIGON3_5721     0x1659
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5750)
++#define PCI_DEVICE_ID_TIGON3_5750     0x1676
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5751)
++#define PCI_DEVICE_ID_TIGON3_5751     0x1677
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5750M)
++#define PCI_DEVICE_ID_TIGON3_5750M    0x167c
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5751M)
++#define PCI_DEVICE_ID_TIGON3_5751M    0x167d
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5751F)
++#define PCI_DEVICE_ID_TIGON3_5751F    0x167e
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5789)
++#define       PCI_DEVICE_ID_TIGON3_5789       0x169d
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5753)
++#define PCI_DEVICE_ID_TIGON3_5753     0x16f7
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5753M)
++#define PCI_DEVICE_ID_TIGON3_5753M    0x16fd
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5753F)
++#define PCI_DEVICE_ID_TIGON3_5753F    0x16fe
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5781)
++#define PCI_DEVICE_ID_TIGON3_5781     0x16dd
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5752)
++#define PCI_DEVICE_ID_TIGON3_5752     0x1600
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5752M)
++#define PCI_DEVICE_ID_TIGON3_5752M    0x1601
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5714)
++#define PCI_DEVICE_ID_TIGON3_5714     0x1668
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5714S)
++#define PCI_DEVICE_ID_TIGON3_5714S    0x1669
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5780)
++#define PCI_DEVICE_ID_TIGON3_5780     0x166a
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5780S)
++#define PCI_DEVICE_ID_TIGON3_5780S    0x166b
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5715)
++#define PCI_DEVICE_ID_TIGON3_5715     0x1678
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5715S)
++#define PCI_DEVICE_ID_TIGON3_5715S    0x1679
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5756)
++#define PCI_DEVICE_ID_TIGON3_5756     0x1674
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5754)
++#define PCI_DEVICE_ID_TIGON3_5754     0x167a
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5754M)
++#define PCI_DEVICE_ID_TIGON3_5754M    0x1672
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5755)
++#define PCI_DEVICE_ID_TIGON3_5755     0x167b
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5755M)
++#define PCI_DEVICE_ID_TIGON3_5755M    0x1673
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5722)
++#define PCI_DEVICE_ID_TIGON3_5722     0x165a
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5786)
++#define PCI_DEVICE_ID_TIGON3_5786     0x169a
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5787M)
++#define PCI_DEVICE_ID_TIGON3_5787M    0x1693
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5787)
++#define PCI_DEVICE_ID_TIGON3_5787     0x169b
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5787F)
++#define PCI_DEVICE_ID_TIGON3_5787F    0x167f
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5906)
++#define PCI_DEVICE_ID_TIGON3_5906     0x1712
++#endif
++
++#if !defined(PCI_DEVICE_ID_TIGON3_5906M)
++#define PCI_DEVICE_ID_TIGON3_5906M    0x1713
++#endif
++
++#if !defined(PCI_DEVICE_ID_APPLE_TIGON3)
++#define PCI_DEVICE_ID_APPLE_TIGON3    0x1645
++#endif
++
++#if !defined(PCI_DEVICE_ID_APPLE_UNI_N_PCI15)
++#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15       0x002e
++#endif
++
++#if !defined(PCI_DEVICE_ID_VIA_8385_0)
++#define PCI_DEVICE_ID_VIA_8385_0      0x3188
++#endif
++
++#if !defined(PCI_DEVICE_ID_AMD_8131_BRIDGE)
++#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
++#endif
++
++#if !defined(PCI_DEVICE_ID_SERVERWORKS_EPB)
++#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103
++#endif
++
++#if !defined(PCI_VENDOR_ID_ARIMA)
++#define PCI_VENDOR_ID_ARIMA           0x161f
++#endif
++
++#ifndef PCI_DEVICE
++#define PCI_DEVICE(vend,dev) \
++      .vendor = (vend), .device = (dev), \
++      .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
++#endif
++
++#if !defined(PCI_VPD_ADDR)
++#define PCI_VPD_ADDR  2
++#define PCI_VPD_DATA  4
++#endif
++
++#ifndef NETDEV_TX_OK
++#define NETDEV_TX_OK 0
++#endif
++
++#ifndef NETDEV_TX_BUSY
++#define NETDEV_TX_BUSY 1
++#endif
++
++#ifndef NETDEV_TX_LOCKED
++#define NETDEV_TX_LOCKED -1
++#endif
++
++#ifdef NETIF_F_TSO
++#ifndef NETIF_F_GSO
++#define gso_size tso_size
++#define gso_segs tso_segs
++#endif
++#ifndef NETIF_F_TSO6
++#define NETIF_F_TSO6  0
++#define BCM_NO_TSO6     1
++#endif
++
++#if (LINUX_VERSION_CODE < 0x020616)
++static inline int skb_transport_offset(const struct sk_buff *skb)
++{
++      return (int) (skb->h.raw - skb->data);
++}
++
++static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
++{
++      return skb->nh.iph;
++}
++
++static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
++{
++      return ip_hdr(skb)->ihl * 4;
++}
++
++static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
++{
++      return skb->h.th;
++}
++
++static inline unsigned int tcp_optlen(const struct sk_buff *skb)
++{
++      return (tcp_hdr(skb)->doff - 5) * 4;
++}
++#endif
++#endif
++
++#ifndef CHECKSUM_PARTIAL
++#define CHECKSUM_PARTIAL CHECKSUM_HW
++#endif
++
++#ifndef DMA_64BIT_MASK
++#define DMA_64BIT_MASK ((u64) 0xffffffffffffffffULL)
++#define DMA_32BIT_MASK ((u64) 0x00000000ffffffffULL)
++#endif
++
++#ifndef DMA_40BIT_MASK
++#define DMA_40BIT_MASK ((u64) 0x000000ffffffffffULL)
++#endif
++
++#ifndef mmiowb
++#define mmiowb()
++#endif
++
++#ifndef PCI_D0
++typedef u32 pm_message_t;
++typedef u32 pci_power_t;
++#define PCI_D0                0
++#define PCI_D1                1
++#define PCI_D2                2
++#define PCI_D3hot     3
++#endif
++
++#ifndef WARN_ON
++#define WARN_ON(x)
++#endif
++
++#ifndef IRQ_RETVAL
++typedef void irqreturn_t;
++#define IRQ_RETVAL(x)
++#define IRQ_HANDLED
++#define IRQ_NONE
++#endif
++
++#ifndef IRQF_SHARED
++#define IRQF_SHARED SA_SHIRQ
++#endif
++
++#ifndef IRQF_SAMPLE_RANDOM
++#define IRQF_SAMPLE_RANDOM SA_SAMPLE_RANDOM
++#endif
++
++#if (LINUX_VERSION_CODE < 0x020604)
++#define MODULE_VERSION(version)
++#endif
++
++#if (LINUX_VERSION_CODE <= 0x020600)
++#define schedule_work(x)      schedule_task(x)
++#define work_struct           tq_struct
++#define INIT_WORK(x, y, z)    INIT_TQUEUE(x, y, z)
++#endif
++
++#ifndef ADVERTISE_PAUSE
++#define ADVERTISE_PAUSE_CAP           0x0400
++#endif
++#ifndef ADVERTISE_PAUSE_ASYM
++#define ADVERTISE_PAUSE_ASYM          0x0800
++#endif
++#ifndef LPA_PAUSE
++#define LPA_PAUSE_CAP                 0x0400
++#endif
++#ifndef LPA_PAUSE_ASYM
++#define LPA_PAUSE_ASYM                        0x0800
++#endif
++#ifndef MII_CTRL1000
++#define MII_CTRL1000                  0x9
++#endif
++#ifndef BMCR_SPEED1000
++#define BMCR_SPEED1000                        0x40
++#endif
++#ifndef ADVERTISE_1000FULL
++#define ADVERTISE_1000FULL            0x0200
++#define ADVERTISE_1000HALF            0x0100
++#endif
++#ifndef ADVERTISE_1000XFULL
++#define ADVERTISE_1000XFULL           0x20
++#define ADVERTISE_1000XHALF           0x40
++#define ADVERTISE_1000XPAUSE          0x80
++#define ADVERTISE_1000XPSE_ASYM               0x100
++#define LPA_1000XFULL                 0x20
++#define LPA_1000XHALF                 0x40
++#define LPA_1000XPAUSE                        0x80
++#define LPA_1000XPAUSE_ASYM           0x100
++#endif
++
++#if (LINUX_VERSION_CODE < 0x020605)
++#define pci_dma_sync_single_for_cpu(pdev, map, len, dir)      \
++      pci_dma_sync_single(pdev, map, len, dir)
++
++#define pci_dma_sync_single_for_device(pdev, map, len, dir)
++#endif
++
++#if (LINUX_VERSION_CODE < 0x020600)
++#define pci_get_device(x, y, z)       pci_find_device(x, y, z)
++#define pci_get_slot(x, y)    pci_find_slot((x)->number, y)
++#define pci_dev_put(x)
++#endif
++
++#if (LINUX_VERSION_CODE < 0x020547)
++#define pci_set_consistent_dma_mask(pdev, mask) (0)
++#endif
++
++#ifndef PCI_CAP_ID_EXP
++#define PCI_CAP_ID_EXP 0x10
++#endif
++#ifndef PCI_EXP_LNKCTL
++#define PCI_EXP_LNKCTL 16
++#endif
++#ifndef PCI_EXP_LNKCTL_CLKREQ_EN
++#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100
++#endif
++
++#if (LINUX_VERSION_CODE < 0x020612)
++static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
++              unsigned int length)
++{
++      struct sk_buff *skb = dev_alloc_skb(length);
++      if (skb)
++              skb->dev = dev;
++      return skb;
++}
++#endif
++
++#ifndef NETIF_F_GSO
++static inline void netif_tx_lock(struct net_device *dev)
++{
++      spin_lock(&dev->xmit_lock);
++      dev->xmit_lock_owner = smp_processor_id();
++}
++
++static inline void netif_tx_unlock(struct net_device *dev)
++{
++      dev->xmit_lock_owner = -1;
++      spin_unlock(&dev->xmit_lock);
++}
++#endif
++
++#if !defined(HAVE_NETDEV_PRIV) && (LINUX_VERSION_CODE != 0x020603) && (LINUX_VERSION_CODE != 0x020604) && (LINUX_VERSION_CODE != 0x20605)
++static inline void *netdev_priv(struct net_device *dev)
++{
++      return dev->priv;
++}
++#endif
++
++#ifdef OLD_NETIF
++static inline void netif_poll_disable(struct net_device *dev)
++{
++      while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
++              /* No hurry. */
++              current->state = TASK_INTERRUPTIBLE;
++              schedule_timeout(1);
++      }
++}
++
++static inline void netif_poll_enable(struct net_device *dev)
++{
++      clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
++}
++
++static inline void netif_tx_disable(struct net_device *dev)
++{
++      spin_lock_bh(&dev->xmit_lock);
++      netif_stop_queue(dev);
++      spin_unlock_bh(&dev->xmit_lock);
++}
++
++#endif
++
++#if (LINUX_VERSION_CODE < 0x2060c)
++static inline int skb_header_cloned(struct sk_buff *skb) { return 0; }
++#endif
++
++#if (LINUX_VERSION_CODE >= 0x20418) && (LINUX_VERSION_CODE < 0x2060c)
++static int tg3_set_tx_hw_csum(struct net_device *dev, u32 data)
++{
++      if (data)
++              dev->features |= NETIF_F_HW_CSUM;
++      else
++              dev->features &= ~NETIF_F_HW_CSUM;
++
++      return 0;
++}
++#endif
++
++#ifndef VLAN_GROUP_ARRAY_SPLIT_PARTS
++static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
++                                       struct net_device *dev)
++{
++      if (vg)
++              vg->vlan_devices[vlan_id] = dev;
++}
++#endif
++#if (LINUX_VERSION_CODE < 0x2060e)
++static inline void tg3_enable_intx(struct pci_dev *pdev)
++{
++      u16 pci_command;
++
++      pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
++      if (pci_command & PCI_COMMAND_INTX_DISABLE)
++              pci_write_config_word(pdev, PCI_COMMAND,
++                                    pci_command & ~PCI_COMMAND_INTX_DISABLE);
++}
++#endif
++
++#ifndef NETIF_F_LLTX
++#define NETIF_F_LLTX  0
++#endif
++
+ #define TG3_64BIT_REG_HIGH            0x00UL
+ #define TG3_64BIT_REG_LOW             0x04UL
+@@ -24,6 +460,8 @@
+ #define RX_COPY_THRESHOLD             256
++#define TG3_RX_INTERNAL_RING_SZ_5906  32
++
+ #define RX_STD_MAX_SIZE                       1536
+ #define RX_STD_MAX_SIZE_5705          512
+ #define RX_JUMBO_MAX_SIZE             0xdeadbeef /* XXX */
+@@ -55,32 +493,10 @@
+ #define TG3PCI_IRQ_PIN                        0x0000003d
+ #define TG3PCI_MIN_GNT                        0x0000003e
+ #define TG3PCI_MAX_LAT                        0x0000003f
+-#define TG3PCI_X_CAPS                 0x00000040
+-#define  PCIX_CAPS_RELAXED_ORDERING    0x00020000
+-#define  PCIX_CAPS_SPLIT_MASK          0x00700000
+-#define  PCIX_CAPS_SPLIT_SHIFT                 20
+-#define  PCIX_CAPS_BURST_MASK          0x000c0000
+-#define  PCIX_CAPS_BURST_SHIFT                 18
+-#define  PCIX_CAPS_MAX_BURST_CPIOB     2
+-#define TG3PCI_PM_CAP_PTR             0x00000041
+-#define TG3PCI_X_COMMAND              0x00000042
+-#define TG3PCI_X_STATUS                       0x00000044
+-#define TG3PCI_PM_CAP_ID              0x00000048
+-#define TG3PCI_VPD_CAP_PTR            0x00000049
+-#define TG3PCI_PM_CAPS                        0x0000004a
+-#define TG3PCI_PM_CTRL_STAT           0x0000004c
+-#define TG3PCI_BR_SUPP_EXT            0x0000004e
+-#define TG3PCI_PM_DATA                        0x0000004f
+-#define TG3PCI_VPD_CAP_ID             0x00000050
+-#define TG3PCI_MSI_CAP_PTR            0x00000051
+-#define TG3PCI_VPD_ADDR_FLAG          0x00000052
+-#define  VPD_ADDR_FLAG_WRITE          0x00008000
+-#define TG3PCI_VPD_DATA                       0x00000054
+-#define TG3PCI_MSI_CAP_ID             0x00000058
+-#define TG3PCI_NXT_CAP_PTR            0x00000059
+-#define TG3PCI_MSI_CTRL                       0x0000005a
+-#define TG3PCI_MSI_ADDR_LOW           0x0000005c
+-#define TG3PCI_MSI_ADDR_HIGH          0x00000060
++#ifndef PCI_X_CMD_READ_2K
++#define  PCI_X_CMD_READ_2K            0x0008
++#endif
++/* 0x40 --> 0x64 unused */
+ #define TG3PCI_MSI_DATA                       0x00000064
+ /* 0x66 --> 0x68 unused */
+ #define TG3PCI_MISC_HOST_CTRL         0x00000068
+@@ -125,9 +541,12 @@
+ #define  CHIPREV_ID_5750_A0            0x4000
+ #define  CHIPREV_ID_5750_A1            0x4001
+ #define  CHIPREV_ID_5750_A3            0x4003
++#define  CHIPREV_ID_5750_C2            0x4202
+ #define  CHIPREV_ID_5752_A0_HW                 0x5000
+ #define  CHIPREV_ID_5752_A0            0x6000
+ #define  CHIPREV_ID_5752_A1            0x6001
++#define  CHIPREV_ID_5714_A2            0x9002
++#define  CHIPREV_ID_5906_A1            0xc001
+ #define  GET_ASIC_REV(CHIP_REV_ID)    ((CHIP_REV_ID) >> 12)
+ #define   ASIC_REV_5700                        0x07
+ #define   ASIC_REV_5701                        0x00
+@@ -138,6 +557,9 @@
+ #define   ASIC_REV_5752                        0x06
+ #define   ASIC_REV_5780                        0x08
+ #define   ASIC_REV_5714                        0x09
++#define   ASIC_REV_5755                        0x0a
++#define   ASIC_REV_5787                        0x0b
++#define   ASIC_REV_5906                        0x0c
+ #define  GET_CHIP_REV(CHIP_REV_ID)    ((CHIP_REV_ID) >> 8)
+ #define   CHIPREV_5700_AX              0x70
+ #define   CHIPREV_5700_BX              0x71
+@@ -455,6 +877,7 @@
+ #define  RX_MODE_PROMISC               0x00000100
+ #define  RX_MODE_NO_CRC_CHECK          0x00000200
+ #define  RX_MODE_KEEP_VLAN_TAG                 0x00000400
++#define  RX_MODE_IPV6_CSUM_ENABLE      0x01000000
+ #define MAC_RX_STATUS                 0x0000046c
+ #define  RX_STATUS_REMOTE_TX_XOFFED    0x00000001
+ #define  RX_STATUS_XOFF_RCVD           0x00000002
+@@ -642,7 +1065,8 @@
+ #define  SNDDATAI_SCTRL_FORCE_ZERO     0x00000010
+ #define SNDDATAI_STATSENAB            0x00000c0c
+ #define SNDDATAI_STATSINCMASK         0x00000c10
+-/* 0xc14 --> 0xc80 unused */
++#define ISO_PKT_TX                    0x00000c20
++/* 0xc24 --> 0xc80 unused */
+ #define SNDDATAI_COS_CNT_0            0x00000c80
+ #define SNDDATAI_COS_CNT_1            0x00000c84
+ #define SNDDATAI_COS_CNT_2            0x00000c88
+@@ -757,6 +1181,7 @@
+ #define  RCVLPC_STATSCTRL_ENABLE       0x00000001
+ #define  RCVLPC_STATSCTRL_FASTUPD      0x00000002
+ #define RCVLPC_STATS_ENABLE           0x00002018
++#define  RCVLPC_STATSENAB_DACK_FIX     0x00040000
+ #define  RCVLPC_STATSENAB_LNGBRST_RFIX         0x00400000
+ #define RCVLPC_STATS_INCMASK          0x0000201c
+ /* 0x2020 --> 0x2100 unused */
+@@ -992,11 +1417,13 @@
+ #define BUFMGR_MB_MACRX_LOW_WATER     0x00004414
+ #define  DEFAULT_MB_MACRX_LOW_WATER     0x00000020
+ #define  DEFAULT_MB_MACRX_LOW_WATER_5705  0x00000010
++#define  DEFAULT_MB_MACRX_LOW_WATER_5906  0x00000004
+ #define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
+ #define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
+ #define BUFMGR_MB_HIGH_WATER          0x00004418
+ #define  DEFAULT_MB_HIGH_WATER                 0x00000060
+ #define  DEFAULT_MB_HIGH_WATER_5705    0x00000060
++#define  DEFAULT_MB_HIGH_WATER_5906    0x00000010
+ #define  DEFAULT_MB_HIGH_WATER_JUMBO   0x0000017c
+ #define  DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
+ #define BUFMGR_RX_MB_ALLOC_REQ                0x0000441c
+@@ -1133,7 +1560,17 @@
+ #define TX_CPU_STATE                  0x00005404
+ #define TX_CPU_PGMCTR                 0x0000541c
++#define VCPU_STATUS                   0x00005100
++#define  VCPU_STATUS_INIT_DONE                 0x04000000
++#define  VCPU_STATUS_DRV_RESET                 0x08000000
++
++#define VCPU_CFGSHDW                  0x00005104
++#define  VCPU_CFGSHDW_WOL_ENABLE       0x00000001
++#define  VCPU_CFGSHDW_WOL_MAGPKT       0x00000004
++#define  VCPU_CFGSHDW_ASPM_DBNC                0x00001000
++
+ /* Mailboxes */
++#define GRCMBOX_BASE                  0x00005600
+ #define GRCMBOX_INTERRUPT_0           0x00005800 /* 64-bit */
+ #define GRCMBOX_INTERRUPT_1           0x00005808 /* 64-bit */
+ #define GRCMBOX_INTERRUPT_2           0x00005810 /* 64-bit */
+@@ -1333,12 +1770,16 @@
+ #define  GRC_MISC_CFG_BOARD_ID_5788   0x00010000
+ #define  GRC_MISC_CFG_BOARD_ID_5788M  0x00018000
+ #define  GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000
++#define  GRC_MISC_CFG_BOARD_ID_5754   0x00008000
++#define  GRC_MISC_CFG_BOARD_ID_5754M  0x0000c000
++#define  GRC_MISC_CFG_EPHY_IDDQ               0x00200000
+ #define  GRC_MISC_CFG_KEEP_GPHY_POWER 0x04000000
+ #define GRC_LOCAL_CTRL                        0x00006808
+ #define  GRC_LCLCTRL_INT_ACTIVE               0x00000001
+ #define  GRC_LCLCTRL_CLEARINT         0x00000002
+ #define  GRC_LCLCTRL_SETINT           0x00000004
+ #define  GRC_LCLCTRL_INT_ON_ATTN      0x00000008
++#define  GRC_LCLCTRL_GPIO_UART_SEL    0x00000010      /* 5755 only */
+ #define  GRC_LCLCTRL_USE_SIG_DETECT   0x00000010      /* 5714/5780 only */
+ #define  GRC_LCLCTRL_USE_EXT_SIG_DETECT       0x00000020      /* 5714/5780 only */
+ #define  GRC_LCLCTRL_GPIO_INPUT3      0x00000020
+@@ -1392,7 +1833,11 @@
+ #define GRC_EEPROM_CTRL                       0x00006840
+ #define GRC_MDI_CTRL                  0x00006844
+ #define GRC_SEEPROM_DELAY             0x00006848
+-/* 0x684c --> 0x6c00 unused */
++/* 0x684c --> 0x6890 unused */
++#define GRC_VCPU_EXT_CTRL             0x00006890
++#define GRC_VCPU_EXT_CTRL_HALT_CPU     0x00400000
++#define GRC_VCPU_EXT_CTRL_DISABLE_WOL  0x20000000
++#define GRC_FASTBOOT_PC                       0x00006894      /* 5752, 5755, 5787 */
+ /* 0x6c00 --> 0x7000 unused */
+@@ -1436,6 +1881,17 @@
+ #define  FLASH_5752VENDOR_ST_M45PE10   0x02400000
+ #define  FLASH_5752VENDOR_ST_M45PE20   0x02400002
+ #define  FLASH_5752VENDOR_ST_M45PE40   0x02400001
++#define  FLASH_5755VENDOR_ATMEL_FLASH_1        0x03400001
++#define  FLASH_5755VENDOR_ATMEL_FLASH_2        0x03400002
++#define  FLASH_5755VENDOR_ATMEL_FLASH_3        0x03400000
++#define  FLASH_5755VENDOR_ATMEL_FLASH_4        0x00000003
++#define  FLASH_5755VENDOR_ATMEL_FLASH_5        0x02000003
++#define  FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ   0x03c00003
++#define  FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ  0x03c00002
++#define  FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ   0x03000003
++#define  FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ  0x03000002
++#define  FLASH_5787VENDOR_MICRO_EEPROM_64KHZ   0x03000000
++#define  FLASH_5787VENDOR_MICRO_EEPROM_376KHZ  0x02000000
+ #define  NVRAM_CFG1_5752PAGE_SIZE_MASK         0x70000000
+ #define  FLASH_5752PAGE_SIZE_256       0x00000000
+ #define  FLASH_5752PAGE_SIZE_512       0x10000000
+@@ -1468,9 +1924,26 @@
+ #define NVRAM_WRITE1                  0x00007028
+ /* 0x702c --> 0x7400 unused */
+-/* 0x7400 --> 0x8000 unused */
++/* 0x7400 --> 0x7c00 unused */
++#define PCIE_TRANSACTION_CFG          0x00007c04
++#define PCIE_TRANS_CFG_1SHOT_MSI       0x20000000
++#define PCIE_TRANS_CFG_LOM             0x00000020
++
++#define PCIE_PWR_MGMT_THRESH          0x00007d28
++#define PCIE_PWR_MGMT_L1_THRESH_MSK    0x0000ff00
+ #define TG3_EEPROM_MAGIC              0x669955aa
++#define TG3_EEPROM_MAGIC_FW           0xa5000000
++#define TG3_EEPROM_MAGIC_FW_MSK               0xff000000
++#define TG3_EEPROM_MAGIC_HW           0xabcd
++#define TG3_EEPROM_MAGIC_HW_MSK               0xffff
++
++#define TG3_NVM_DIR_START      0x18
++#define TG3_NVM_DIR_END        0x78
++#define TG3_NVM_DIRENT_SIZE    0xc
++#define TG3_NVM_DIRTYPE_SHIFT   24
++#define TG3_NVM_DIRTYPE_ASFINI   1
++
+ /* 32K Window into NIC internal memory */
+ #define NIC_SRAM_WIN_BASE             0x00008000
+@@ -1520,6 +1993,7 @@
+ #define  FWCMD_NICDRV_FIX_DMAR                 0x00000005
+ #define  FWCMD_NICDRV_FIX_DMAW                 0x00000006
+ #define  FWCMD_NICDRV_ALIVE2           0x0000000d
++#define  FWCMD_NICDRV_ALIVE3           0x0000000e
+ #define NIC_SRAM_FW_CMD_LEN_MBOX      0x00000b7c
+ #define NIC_SRAM_FW_CMD_DATA_MBOX     0x00000b80
+ #define NIC_SRAM_FW_ASF_STATUS_MBOX   0x00000c00
+@@ -1550,6 +2024,9 @@
+ #define  SHASTA_EXT_LED_MAC            0x00010000
+ #define  SHASTA_EXT_LED_COMBO          0x00018000
++#define NIC_SRAM_DATA_CFG_3           0x00000d3c
++#define  NIC_SRAM_ASPM_DEBOUNCE                0x00000002
++
+ #define NIC_SRAM_RX_MINI_BUFFER_DESC  0x00001000
+ #define NIC_SRAM_DMA_DESC_POOL_BASE   0x00002000
+@@ -1587,9 +2064,15 @@
+ #define MII_TG3_DSP_RW_PORT           0x15 /* DSP coefficient read/write port */
+ #define MII_TG3_DSP_ADDRESS           0x17 /* DSP address register */
++#define MII_TG3_EPHY_PTEST            0x17 /* 5906 PHY register */
+ #define MII_TG3_AUX_CTRL              0x18 /* auxilliary control register */
++#define MII_TG3_AUXCTL_MISC_WREN      0x8000
++#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX       0x0200
++#define MII_TG3_AUXCTL_MISC_RDSEL_MISC        0x7000
++#define MII_TG3_AUXCTL_SHDWSEL_MISC           0x0007
++
+ #define MII_TG3_AUX_STAT              0x19 /* auxilliary status register */
+ #define MII_TG3_AUX_STAT_LPASS                0x0004
+ #define MII_TG3_AUX_STAT_SPDMASK      0x0700
+@@ -1600,6 +2083,8 @@
+ #define MII_TG3_AUX_STAT_100FULL      0x0500
+ #define MII_TG3_AUX_STAT_1000HALF     0x0600
+ #define MII_TG3_AUX_STAT_1000FULL     0x0700
++#define MII_TG3_AUX_STAT_100          0x0008
++#define MII_TG3_AUX_STAT_FULL         0x0001
+ #define MII_TG3_ISTAT                 0x1a /* IRQ status register */
+ #define MII_TG3_IMASK                 0x1b /* IRQ mask register */
+@@ -1610,6 +2095,16 @@
+ #define MII_TG3_INT_DUPLEXCHG         0x0008
+ #define MII_TG3_INT_ANEG_PAGE_RX      0x0400
++#define MII_TG3_EPHY_TEST             0x1f /* 5906 PHY register */
++#define MII_TG3_EPHY_SHADOW_EN                0x80
++
++#define MII_TG3_EPHYTST_MISCCTRL      0x10 /* 5906 EPHY misc ctrl shadow register */
++#define MII_TG3_EPHYTST_MISCCTRL_MDIX 0x4000
++
++#define MII_TG3_TEST1                 0x1e
++#define MII_TG3_TEST1_TRIM_EN         0x0010
++#define MII_TG3_TEST1_CRC_EN          0x8000
++
+ /* There are two ways to manage the TX descriptors on the tigon3.
+  * Either the descriptors are in host DMA'able memory, or they
+  * exist only in the cards on-chip SRAM.  All 16 send bds are under
+@@ -1763,35 +2258,35 @@
+ #define TG3_HW_STATUS_SIZE            0x50
+ struct tg3_hw_status {
+-      u32                             status;
++      volatile u32                    status;
+ #define SD_STATUS_UPDATED             0x00000001
+ #define SD_STATUS_LINK_CHG            0x00000002
+ #define SD_STATUS_ERROR                       0x00000004
+-      u32                             status_tag;
++      volatile u32                    status_tag;
+ #ifdef __BIG_ENDIAN
+-      u16                             rx_consumer;
+-      u16                             rx_jumbo_consumer;
++      volatile u16                    rx_consumer;
++      volatile u16                    rx_jumbo_consumer;
+ #else
+-      u16                             rx_jumbo_consumer;
+-      u16                             rx_consumer;
++      volatile u16                    rx_jumbo_consumer;
++      volatile u16                    rx_consumer;
+ #endif
+ #ifdef __BIG_ENDIAN
+-      u16                             reserved;
+-      u16                             rx_mini_consumer;
++      volatile u16                    reserved;
++      volatile u16                    rx_mini_consumer;
+ #else
+-      u16                             rx_mini_consumer;
+-      u16                             reserved;
++      volatile u16                    rx_mini_consumer;
++      volatile u16                    reserved;
+ #endif
+       struct {
+ #ifdef __BIG_ENDIAN
+-              u16                     tx_consumer;
+-              u16                     rx_producer;
++              volatile u16            tx_consumer;
++              volatile u16            rx_producer;
+ #else
+-              u16                     rx_producer;
+-              u16                     tx_consumer;
++              volatile u16            rx_producer;
++              volatile u16            tx_consumer;
+ #endif
+       }                               idx[16];
+ };
+@@ -2059,12 +2554,22 @@
+       /* SMP locking strategy:
+        *
+-       * lock: Held during all operations except TX packet
+-       *       processing.
++       * lock: Held during reset, PHY access, timer, and when
++       *       updating tg3_flags and tg3_flags2.
+        *
+-       * tx_lock: Held during tg3_start_xmit and tg3_tx
++       * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
++       *                netif_tx_lock when it needs to call
++       *                netif_wake_queue.
+        *
+        * Both of these locks are to be held with BH safety.
++       *
++       * Because the IRQ handler, tg3_poll, and tg3_start_xmit
++       * are running lockless, it is necessary to completely
++       * quiesce the chip with tg3_netif_stop and tg3_full_lock
++       * before reconfiguring the device.
++       *
++       * indirect_lock: Held when accessing registers indirectly
++       *                with IRQ disabling.
+        */
+       spinlock_t                      lock;
+       spinlock_t                      indirect_lock;
+@@ -2091,8 +2596,6 @@
+       u32                             tx_cons;
+       u32                             tx_pending;
+-      spinlock_t                      tx_lock;
+-
+       struct tg3_tx_buffer_desc       *tx_ring;
+       struct tx_ring_info             *tx_buffers;
+       dma_addr_t                      tx_desc_mapping;
+@@ -2112,6 +2615,7 @@
+       struct tg3_rx_buffer_desc       *rx_std;
+       struct ring_info                *rx_std_buffers;
+       dma_addr_t                      rx_std_mapping;
++      u32                             rx_std_max_post;
+       struct tg3_rx_buffer_desc       *rx_jumbo;
+       struct ring_info                *rx_jumbo_buffers;
+@@ -2138,13 +2642,9 @@
+ #define TG3_FLAG_USE_LINKCHG_REG      0x00000008
+ #define TG3_FLAG_USE_MI_INTERRUPT     0x00000010
+ #define TG3_FLAG_ENABLE_ASF           0x00000020
+-#define TG3_FLAG_5701_REG_WRITE_BUG   0x00000040
++#define TG3_FLAG_ASPM_WORKAROUND      0x00000040
+ #define TG3_FLAG_POLL_SERDES          0x00000080
+-#if defined(CONFIG_X86)
+ #define TG3_FLAG_MBOX_WRITE_REORDER   0x00000100
+-#else
+-#define TG3_FLAG_MBOX_WRITE_REORDER   0       /* disables code too */
+-#endif
+ #define TG3_FLAG_PCIX_TARGET_HWBUG    0x00000200
+ #define TG3_FLAG_WOL_SPEED_100MB      0x00000400
+ #define TG3_FLAG_WOL_ENABLE           0x00000800
+@@ -2156,21 +2656,20 @@
+ #define TG3_FLAG_PCIX_MODE            0x00020000
+ #define TG3_FLAG_PCI_HIGH_SPEED               0x00040000
+ #define TG3_FLAG_PCI_32BIT            0x00080000
+-#define TG3_FLAG_NO_TX_PSEUDO_CSUM    0x00100000
+-#define TG3_FLAG_NO_RX_PSEUDO_CSUM    0x00200000
+-#define TG3_FLAG_SERDES_WOL_CAP               0x00400000
++#define TG3_FLAG_SRAM_USE_CONFIG      0x00100000
++#define TG3_FLAG_TX_RECOVERY_PENDING  0x00200000
++#define TG3_FLAG_WOL_CAP              0x00400000
+ #define TG3_FLAG_JUMBO_RING_ENABLE    0x00800000
+ #define TG3_FLAG_10_100_ONLY          0x01000000
+ #define TG3_FLAG_PAUSE_AUTONEG                0x02000000
+-#define TG3_FLAG_IN_RESET_TASK                0x04000000
+ #define TG3_FLAG_40BIT_DMA_BUG                0x08000000
+ #define TG3_FLAG_BROKEN_CHECKSUMS     0x10000000
+-#define TG3_FLAG_GOT_SERDES_FLOWCTL   0x20000000
+-#define TG3_FLAG_SPLIT_MODE           0x40000000
++#define TG3_FLAG_SUPPORT_MSI          0x20000000
++#define TG3_FLAG_CHIP_RESETTING               0x40000000
+ #define TG3_FLAG_INIT_COMPLETE                0x80000000
+       u32                             tg3_flags2;
+ #define TG3_FLG2_RESTART_TIMER                0x00000001
+-#define TG3_FLG2_SUN_570X             0x00000002
++#define TG3_FLG2_TSO_BUG              0x00000002
+ #define TG3_FLG2_NO_ETH_WIRE_SPEED    0x00000004
+ #define TG3_FLG2_IS_5788              0x00000008
+ #define TG3_FLG2_MAX_RXPEND_64                0x00000010
+@@ -2181,11 +2680,11 @@
+ #define TG3_FLG2_PCI_EXPRESS          0x00000200
+ #define TG3_FLG2_ASF_NEW_HANDSHAKE    0x00000400
+ #define TG3_FLG2_HW_AUTONEG           0x00000800
+-#define TG3_FLG2_PHY_JUST_INITTED     0x00001000
++#define TG3_FLG2_IS_NIC                       0x00001000
+ #define TG3_FLG2_PHY_SERDES           0x00002000
+ #define TG3_FLG2_CAPACITIVE_COUPLING  0x00004000
+ #define TG3_FLG2_FLASH                        0x00008000
+-#define TG3_FLG2_HW_TSO                       0x00010000
++#define TG3_FLG2_HW_TSO_1             0x00010000
+ #define TG3_FLG2_SERDES_PREEMPHASIS   0x00020000
+ #define TG3_FLG2_5705_PLUS            0x00040000
+ #define TG3_FLG2_5750_PLUS            0x00080000
+@@ -2198,9 +2697,12 @@
+ #define TG3_FLG2_PARALLEL_DETECT      0x01000000
+ #define TG3_FLG2_ICH_WORKAROUND               0x02000000
+ #define TG3_FLG2_5780_CLASS           0x04000000
+-
+-      u32                             split_mode_max_reqs;
+-#define SPLIT_MODE_5704_MAX_REQ               3
++#define TG3_FLG2_HW_TSO_2             0x08000000
++#define TG3_FLG2_HW_TSO                       (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
++#define TG3_FLG2_1SHOT_MSI            0x10000000
++#define TG3_FLG2_PHY_JITTER_BUG               0x20000000
++#define TG3_FLG2_NO_FWARE_REPORTED    0x40000000
++#define TG3_FLG2_PHY_ADJUST_TRIM      0x80000000
+       struct timer_list               timer;
+       u16                             timer_counter;
+@@ -2209,6 +2711,12 @@
+       u16                             asf_counter;
+       u16                             asf_multiplier;
++      /* 1 second counter for transient serdes link events */
++      u32                             serdes_counter;
++#define SERDES_AN_TIMEOUT_5704S               2
++#define SERDES_PARALLEL_DET_TIMEOUT   1
++#define SERDES_AN_TIMEOUT_5714S               1
++
+       struct tg3_link_config          link_config;
+       struct tg3_bufmgr_config        bufmgr_config;
+@@ -2222,6 +2730,7 @@
+       u32                             grc_local_ctrl;
+       u32                             dma_rwctrl;
+       u32                             coalesce_mode;
++      u32                             pwrmgmt_thresh;
+       /* PCI block */
+       u16                             pci_chip_rev_id;
+@@ -2229,9 +2738,11 @@
+       u8                              pci_lat_timer;
+       u8                              pci_hdr_type;
+       u8                              pci_bist;
++      u32                             pci_cfg_state[64 / sizeof(u32)];
+       int                             pm_cap;
+       int                             msi_cap;
++      int                             pcix_cap;
+       /* PHY info */
+       u32                             phy_id;
+@@ -2247,6 +2758,10 @@
+ #define PHY_ID_BCM5752                        0x60008100
+ #define PHY_ID_BCM5714                        0x60008340
+ #define PHY_ID_BCM5780                        0x60008350
++#define PHY_ID_BCM5755                        0xbc050cc0
++#define PHY_ID_BCM5787                        0xbc050ce0
++#define PHY_ID_BCM5756                        0xbc050ed0
++#define PHY_ID_BCM5906                        0xdc00ac40
+ #define PHY_ID_BCM8002                        0x60010140
+ #define PHY_ID_INVALID                        0xffffffff
+ #define PHY_ID_REV_MASK                       0x0000000f
+@@ -2256,8 +2771,11 @@
+ #define PHY_REV_BCM5411_X0            0x1 /* Found on Netgear GA302T */
+       u32                             led_ctrl;
++      u32                             pci_cmd;
+       char                            board_part_number[24];
++#define TG3_VER_SIZE 32
++      char                            fw_ver[TG3_VER_SIZE];
+       u32                             nic_sram_data_cfg;
+       u32                             pci_clock_ctrl;
+       struct pci_dev                  *pdev_peer;
+@@ -2271,7 +2789,9 @@
+        (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
+        (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
+        (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
+-       (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002)
++       (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
++       (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
++       (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM8002)
+       struct tg3_hw_stats             *hw_stats;
+       dma_addr_t                      stats_mapping;
This page took 0.285806 seconds and 4 git commands to generate.