]> git.pld-linux.org Git - packages/kernel.git/blob - linux-tg3-3.81c.patch
- updated to 2.6.29.2 (builds and works --without apparmor --without grsecurity)
[packages/kernel.git] / linux-tg3-3.81c.patch
1 diff -uNr linux-2.6.16.old/drivers/net/tg3.c linux-2.6.16/drivers/net/tg3.c
2 --- linux-2.6.16.old/drivers/net/tg3.c  2008-02-08 09:52:27.000000000 +0100
3 +++ linux-2.6.16/drivers/net/tg3.c      2007-09-19 00:38:18.000000000 +0200
4 @@ -4,7 +4,7 @@
5   * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
6   * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
7   * Copyright (C) 2004 Sun Microsystems Inc.
8 - * Copyright (C) 2005 Broadcom Corporation.
9 + * Copyright (C) 2005-2007 Broadcom Corporation.
10   *
11   * Firmware is:
12   *     Derived from proprietary unpublished source code,
13 @@ -15,10 +15,22 @@
14   *     notice is accompanying it.
15   */
16  
17 +#include <linux/version.h>
18 +
19 +#if (LINUX_VERSION_CODE < 0x020612)
20  #include <linux/config.h>
21 +#endif
22  
23 +#if (LINUX_VERSION_CODE < 0x020500)
24 +#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
25 +#define MODVERSIONS
26 +#include <linux/modversions.h>
27 +#endif
28 +#endif
29  #include <linux/module.h>
30 +#if (LINUX_VERSION_CODE >= 0x20600)
31  #include <linux/moduleparam.h>
32 +#endif
33  #include <linux/kernel.h>
34  #include <linux/types.h>
35  #include <linux/compiler.h>
36 @@ -36,21 +48,26 @@
37  #include <linux/if_vlan.h>
38  #include <linux/ip.h>
39  #include <linux/tcp.h>
40 +#if (LINUX_VERSION_CODE >= 0x20600)
41  #include <linux/workqueue.h>
42 +#endif
43  #include <linux/prefetch.h>
44 +#if (LINUX_VERSION_CODE >= 0x020600)
45  #include <linux/dma-mapping.h>
46 +#endif
47 +#include <linux/bitops.h>
48  
49  #include <net/checksum.h>
50 +#include <net/ip.h>
51  
52  #include <asm/system.h>
53  #include <asm/io.h>
54  #include <asm/byteorder.h>
55  #include <asm/uaccess.h>
56  
57 -#ifdef CONFIG_SPARC64
58 +#ifdef CONFIG_SPARC
59  #include <asm/idprom.h>
60 -#include <asm/oplib.h>
61 -#include <asm/pbm.h>
62 +#include <asm/prom.h>
63  #endif
64  
65  #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
66 @@ -69,8 +86,8 @@
67  
68  #define DRV_MODULE_NAME                "tg3"
69  #define PFX DRV_MODULE_NAME    ": "
70 -#define DRV_MODULE_VERSION     "3.49"
71 -#define DRV_MODULE_RELDATE     "Feb 2, 2006"
72 +#define DRV_MODULE_VERSION     "3.81c"
73 +#define DRV_MODULE_RELDATE     "September 18, 2007"
74  
75  #define TG3_DEF_MAC_MODE       0
76  #define TG3_DEF_RX_MODE                0
77 @@ -124,16 +141,13 @@
78                                    TG3_RX_RCB_RING_SIZE(tp))
79  #define TG3_TX_RING_BYTES      (sizeof(struct tg3_tx_buffer_desc) * \
80                                  TG3_TX_RING_SIZE)
81 -#define TX_BUFFS_AVAIL(TP)                                             \
82 -       ((TP)->tx_pending -                                             \
83 -        (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
84  #define NEXT_TX(N)             (((N) + 1) & (TG3_TX_RING_SIZE - 1))
85  
86  #define RX_PKT_BUF_SZ          (1536 + tp->rx_offset + 64)
87  #define RX_JUMBO_PKT_BUF_SZ    (9046 + tp->rx_offset + 64)
88  
89  /* minimum number of free TX descriptors required to wake up TX process */
90 -#define TG3_TX_WAKEUP_THRESH           (TG3_TX_RING_SIZE / 4)
91 +#define TG3_TX_WAKEUP_THRESH(tp)               ((tp)->tx_pending / 4)
92  
93  /* number of ETHTOOL_GSTATS u64's */
94  #define TG3_NUM_STATS          (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
95 @@ -149,108 +163,78 @@
96  MODULE_VERSION(DRV_MODULE_VERSION);
97  
98  static int tg3_debug = -1;     /* -1 == use TG3_DEF_MSG_ENABLE as value */
99 +#if (LINUX_VERSION_CODE >= 0x20600)
100  module_param(tg3_debug, int, 0);
101  MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
102 +#endif
103  
104  static struct pci_device_id tg3_pci_tbl[] = {
105 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
106 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
107 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
108 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
109 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
110 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
111 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
112 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
113 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
114 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
115 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
116 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
117 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
118 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
119 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
120 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
121 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
122 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
123 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
124 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
125 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
126 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
127 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
128 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
129 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
130 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
131 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
132 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
133 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
134 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
135 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
136 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
137 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
138 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
139 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
140 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
141 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
142 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
143 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
144 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
145 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
146 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
147 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
148 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
149 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
150 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
152 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
154 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
156 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
158 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
160 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
162 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
164 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
166 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
168 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
170 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
172 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
174 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
176 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
178 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
180 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 -       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
182 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 -       { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
184 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 -       { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
186 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 -       { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
188 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 -       { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
190 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 -       { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
192 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 -       { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
194 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 -       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
196 -         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 -       { 0, }
198 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
199 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
200 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
201 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
202 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
203 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
204 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
205 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
206 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
207 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
208 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
209 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
210 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
211 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
212 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
213 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
214 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
215 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
216 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
217 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
218 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
219 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
220 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
221 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
222 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
223 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
224 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
225 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
226 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
227 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
228 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
229 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
230 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
231 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
232 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
233 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
234 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
235 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
236 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
237 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
238 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
239 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
240 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
241 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
242 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
243 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
244 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
245 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
246 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
247 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
248 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
249 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
250 +       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
251 +       {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
252 +       {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
253 +       {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
254 +       {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
255 +       {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
256 +       {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
257 +       {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
258 +       {}
259  };
260  
261  MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
262  
263 -static struct {
264 +static const struct {
265         const char string[ETH_GSTRING_LEN];
266  } ethtool_stats_keys[TG3_NUM_STATS] = {
267         { "rx_octets" },
268 @@ -331,7 +315,7 @@
269         { "nic_tx_threshold_hit" }
270  };
271  
272 -static struct {
273 +static const struct {
274         const char string[ETH_GSTRING_LEN];
275  } ethtool_test_keys[TG3_NUM_TEST] = {
276         { "nvram test     (online) " },
277 @@ -349,7 +333,7 @@
278  
279  static u32 tg3_read32(struct tg3 *tp, u32 off)
280  {
281 -       return (readl(tp->regs + off)); 
282 +       return (readl(tp->regs + off));
283  }
284  
285  static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
286 @@ -465,6 +449,16 @@
287                 readl(mbox);
288  }
289  
290 +static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
291 +{
292 +       return (readl(tp->regs + off + GRCMBOX_BASE));
293 +}
294 +
295 +static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
296 +{
297 +       writel(val, tp->regs + off + GRCMBOX_BASE);
298 +}
299 +
300  #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
301  #define tw32_mailbox_f(reg, val)       tw32_mailbox_flush(tp, (reg), (val))
302  #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
303 @@ -480,34 +474,51 @@
304  {
305         unsigned long flags;
306  
307 +       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
308 +           (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
309 +               return;
310 +
311         spin_lock_irqsave(&tp->indirect_lock, flags);
312 -       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
313 -       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
314 +       if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
315 +               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
316 +               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
317  
318 -       /* Always leave this as zero. */
319 -       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
320 -       spin_unlock_irqrestore(&tp->indirect_lock, flags);
321 -}
322 +               /* Always leave this as zero. */
323 +               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
324 +       } else {
325 +               tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
326 +               tw32_f(TG3PCI_MEM_WIN_DATA, val);
327  
328 -static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
329 -{
330 -       /* If no workaround is needed, write to mem space directly */
331 -       if (tp->write32 != tg3_write_indirect_reg32)
332 -               tw32(NIC_SRAM_WIN_BASE + off, val);
333 -       else
334 -               tg3_write_mem(tp, off, val);
335 +               /* Always leave this as zero. */
336 +               tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
337 +       }
338 +       spin_unlock_irqrestore(&tp->indirect_lock, flags);
339  }
340  
341  static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
342  {
343         unsigned long flags;
344  
345 +       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
346 +           (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
347 +               *val = 0;
348 +               return;
349 +       }
350 +
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352 -       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
353 -       pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
354 +       if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
355 +               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
356 +               pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
357  
358 -       /* Always leave this as zero. */
359 -       pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
360 +               /* Always leave this as zero. */
361 +               pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
362 +       } else {
363 +               tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
364 +               *val = tr32(TG3PCI_MEM_WIN_DATA);
365 +
366 +               /* Always leave this as zero. */
367 +               tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
368 +       }
369         spin_unlock_irqrestore(&tp->indirect_lock, flags);
370  }
371  
372 @@ -523,6 +534,9 @@
373         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
374             (tp->hw_status->status & SD_STATUS_UPDATED))
375                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
376 +       else
377 +               tw32(HOSTCC_MODE, tp->coalesce_mode |
378 +                    (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
379  }
380  
381  static void tg3_enable_ints(struct tg3 *tp)
382 @@ -534,6 +548,9 @@
383              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
384         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
385                        (tp->last_tag << 24));
386 +       if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
387 +               tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
388 +                              (tp->last_tag << 24));
389         tg3_cond_int(tp);
390  }
391  
392 @@ -560,7 +577,7 @@
393  /* tg3_restart_ints
394   *  similar to tg3_enable_ints, but it accurately determines whether there
395   *  is new work pending and can return without flushing the PIO write
396 - *  which reenables interrupts 
397 + *  which reenables interrupts
398   */
399  static void tg3_restart_ints(struct tg3 *tp)
400  {
401 @@ -649,7 +666,7 @@
402         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
403                       MI_COM_REG_ADDR_MASK);
404         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
405 -       
406 +
407         tw32_f(MAC_MI_COM, frame_val);
408  
409         loops = PHY_BUSY_LOOPS;
410 @@ -685,6 +702,10 @@
411         unsigned int loops;
412         int ret;
413  
414 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
415 +           (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
416 +               return 0;
417 +
418         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
419                 tw32_f(MAC_MI_MODE,
420                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
421 @@ -697,7 +718,7 @@
422                       MI_COM_REG_ADDR_MASK);
423         frame_val |= (val & MI_COM_DATA_MASK);
424         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
425 -       
426 +
427         tw32_f(MAC_MI_COM, frame_val);
428  
429         loops = PHY_BUSY_LOOPS;
430 @@ -724,6 +745,44 @@
431         return ret;
432  }
433  
434 +static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
435 +{
436 +       u32 phy;
437 +
438 +       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
439 +           (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
440 +               return;
441 +
442 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
443 +               u32 ephy;
444 +
445 +               if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
446 +                       tg3_writephy(tp, MII_TG3_EPHY_TEST,
447 +                                    ephy | MII_TG3_EPHY_SHADOW_EN);
448 +                       if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
449 +                               if (enable)
450 +                                       phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
451 +                               else
452 +                                       phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
453 +                               tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
454 +                       }
455 +                       tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
456 +               }
457 +       } else {
458 +               phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
459 +                     MII_TG3_AUXCTL_SHDWSEL_MISC;
460 +               if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
461 +                   !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
462 +                       if (enable)
463 +                               phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
464 +                       else
465 +                               phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
466 +                       phy |= MII_TG3_AUXCTL_MISC_WREN;
467 +                       tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
468 +               }
469 +       }
470 +}
471 +
472  static void tg3_phy_set_wirespeed(struct tg3 *tp)
473  {
474         u32 val;
475 @@ -948,6 +1007,8 @@
476         return err;
477  }
478  
479 +static void tg3_link_report(struct tg3 *);
480 +
481  /* This will reset the tigon3 PHY if there is no valid
482   * link unless the FORCE argument is non-zero.
483   */
484 @@ -956,11 +1017,23 @@
485         u32 phy_status;
486         int err;
487  
488 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
489 +               u32 val;
490 +
491 +               val = tr32(GRC_MISC_CFG);
492 +               tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
493 +               udelay(40);
494 +       }
495         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
496         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
497         if (err != 0)
498                 return -EBUSY;
499  
500 +       if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
501 +               netif_carrier_off(tp->dev);
502 +               tg3_link_report(tp);
503 +       }
504 +
505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
507             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
508 @@ -997,6 +1070,17 @@
509                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
510                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
511         }
512 +       else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
513 +               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
514 +               tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
515 +               if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
516 +                       tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
517 +                       tg3_writephy(tp, MII_TG3_TEST1,
518 +                                    MII_TG3_TEST1_TRIM_EN | 0x4);
519 +               } else
520 +                       tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
521 +               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
522 +       }
523         /* Set Extended packet length bit (bit 14) on all chips that */
524         /* support jumbo frames */
525         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
526 @@ -1022,6 +1106,12 @@
527                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
528         }
529  
530 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
531 +               /* adjust output voltage */
532 +               tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
533 +       }
534 +
535 +       tg3_phy_toggle_automdix(tp, 1);
536         tg3_phy_set_wirespeed(tp);
537         return 0;
538  }
539 @@ -1030,7 +1120,7 @@
540  {
541         struct tg3 *tp_peer = tp;
542  
543 -       if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
544 +       if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
545                 return;
546  
547         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
548 @@ -1038,9 +1128,11 @@
549                 struct net_device *dev_peer;
550  
551                 dev_peer = pci_get_drvdata(tp->pdev_peer);
552 +               /* remove_one() may have been run on the peer. */
553                 if (!dev_peer)
554 -                       BUG();
555 -               tp_peer = netdev_priv(dev_peer);
556 +                       tp_peer = tp;
557 +               else
558 +                       tp_peer = netdev_priv(dev_peer);
559         }
560  
561         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
562 @@ -1120,6 +1212,19 @@
563         }
564  }
565  
566 +static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
567 +{
568 +       if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
569 +               return 1;
570 +       else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
571 +               if (speed != SPEED_10)
572 +                       return 1;
573 +       } else if (speed == SPEED_10)
574 +               return 1;
575 +
576 +       return 0;
577 +}
578 +
579  static int tg3_setup_phy(struct tg3 *, int);
580  
581  #define RESET_KIND_SHUTDOWN    0
582 @@ -1131,7 +1236,47 @@
583  static int tg3_nvram_lock(struct tg3 *);
584  static void tg3_nvram_unlock(struct tg3 *);
585  
586 -static int tg3_set_power_state(struct tg3 *tp, int state)
587 +static void tg3_power_down_phy(struct tg3 *tp)
588 +{
589 +       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
590 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
591 +                       u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
592 +                       u32 serdes_cfg = tr32(MAC_SERDES_CFG);
593 +
594 +                       sg_dig_ctrl |=
595 +                               SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
596 +                       tw32(SG_DIG_CTRL, sg_dig_ctrl);
597 +                       tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
598 +               }
599 +               return;
600 +       }
601 +
602 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
603 +               u32 val;
604 +
605 +               tg3_bmcr_reset(tp);
606 +               val = tr32(GRC_MISC_CFG);
607 +               tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
608 +               udelay(40);
609 +               return;
610 +       } else {
611 +               tg3_writephy(tp, MII_TG3_EXT_CTRL,
612 +                            MII_TG3_EXT_CTRL_FORCE_LED_OFF);
613 +               tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
614 +       }
615 +
616 +       /* The PHY should not be powered down on some chips because
617 +        * of bugs.
618 +        */
619 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
620 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
621 +           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
622 +            (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
623 +               return;
624 +       tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
625 +}
626 +
627 +static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
628  {
629         u32 misc_host_ctrl;
630         u16 power_control, power_caps;
631 @@ -1150,28 +1295,28 @@
632         power_control |= PCI_PM_CTRL_PME_STATUS;
633         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
634         switch (state) {
635 -       case 0:
636 +       case PCI_D0:
637                 power_control |= 0;
638                 pci_write_config_word(tp->pdev,
639                                       pm + PCI_PM_CTRL,
640                                       power_control);
641                 udelay(100);    /* Delay after power state change */
642  
643 -               /* Switch out of Vaux if it is not a LOM */
644 -               if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
645 +               /* Switch out of Vaux if it is a NIC */
646 +               if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
647                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
648  
649                 return 0;
650  
651 -       case 1:
652 +       case PCI_D1:
653                 power_control |= 1;
654                 break;
655  
656 -       case 2:
657 +       case PCI_D2:
658                 power_control |= 2;
659                 break;
660  
661 -       case 3:
662 +       case PCI_D3hot:
663                 power_control |= 3;
664                 break;
665  
666 @@ -1202,7 +1347,12 @@
667                 tg3_setup_phy(tp, 0);
668         }
669  
670 -       if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
671 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
672 +               u32 val;
673 +
674 +               val = tr32(GRC_VCPU_EXT_CTRL);
675 +               tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
676 +       } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
677                 int i;
678                 u32 val;
679  
680 @@ -1210,12 +1360,19 @@
681                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
682                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
683                                 break;
684 +#if (LINUX_VERSION_CODE < 0x20607)
685 +                       set_current_state(TASK_UNINTERRUPTIBLE);
686 +                       schedule_timeout(HZ / 1000);
687 +#else
688                         msleep(1);
689 +#endif
690                 }
691         }
692 -       tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
693 -                                            WOL_DRV_STATE_SHUTDOWN |
694 -                                            WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
695 +       if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
696 +               tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
697 +                                                    WOL_DRV_STATE_SHUTDOWN |
698 +                                                    WOL_DRV_WOL |
699 +                                                    WOL_SET_MAGIC_PKT);
700  
701         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
702  
703 @@ -1226,11 +1383,22 @@
704                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
705                         udelay(40);
706  
707 -                       mac_mode = MAC_MODE_PORT_MODE_MII;
708 +                       if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
709 +                               mac_mode = MAC_MODE_PORT_MODE_GMII;
710 +                       else
711 +                               mac_mode = MAC_MODE_PORT_MODE_MII;
712  
713 -                       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
714 -                           !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
715 -                               mac_mode |= MAC_MODE_LINK_POLARITY;
716 +                       mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
717 +                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
718 +                           ASIC_REV_5700) {
719 +                               u32 speed = (tp->tg3_flags &
720 +                                            TG3_FLAG_WOL_SPEED_100MB) ?
721 +                                            SPEED_100 : SPEED_10;
722 +                               if (tg3_5700_link_polarity(tp, speed))
723 +                                       mac_mode |= MAC_MODE_LINK_POLARITY;
724 +                               else
725 +                                       mac_mode &= ~MAC_MODE_LINK_POLARITY;
726 +                       }
727                 } else {
728                         mac_mode = MAC_MODE_PORT_MODE_TBI;
729                 }
730 @@ -1260,7 +1428,8 @@
731  
732                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
733                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
734 -       } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
735 +       } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
736 +                  (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
737                 /* do nothing */
738         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
739                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
740 @@ -1304,16 +1473,8 @@
741         }
742  
743         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
744 -           !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
745 -               /* Turn off the PHY */
746 -               if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
747 -                       tg3_writephy(tp, MII_TG3_EXT_CTRL,
748 -                                    MII_TG3_EXT_CTRL_FORCE_LED_OFF);
749 -                       tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
750 -                       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
751 -                               tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
752 -               }
753 -       }
754 +           !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
755 +               tg3_power_down_phy(tp);
756  
757         tg3_frob_aux_power(tp);
758  
759 @@ -1334,20 +1495,22 @@
760                 }
761         }
762  
763 +       tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
764 +
765         /* Finally, set the new power state. */
766         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
767         udelay(100);    /* Delay after power state change */
768  
769 -       tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
770 -
771         return 0;
772  }
773  
774  static void tg3_link_report(struct tg3 *tp)
775  {
776         if (!netif_carrier_ok(tp->dev)) {
777 -               printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
778 -       } else {
779 +               if (netif_msg_link(tp))
780 +                       printk(KERN_INFO PFX "%s: Link is down.\n",
781 +                              tp->dev->name);
782 +       } else if (netif_msg_link(tp)) {
783                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
784                        tp->dev->name,
785                        (tp->link_config.active_speed == SPEED_1000 ?
786 @@ -1426,7 +1589,7 @@
787         if (old_rx_mode != tp->rx_mode) {
788                 tw32_f(MAC_RX_MODE, tp->rx_mode);
789         }
790 -       
791 +
792         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
793                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
794         else
795 @@ -1471,6 +1634,13 @@
796                 break;
797  
798         default:
799 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
800 +                       *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
801 +                                SPEED_10;
802 +                       *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
803 +                                 DUPLEX_HALF;
804 +                       break;
805 +               }
806                 *speed = SPEED_INVALID;
807                 *duplex = DUPLEX_INVALID;
808                 break;
809 @@ -1495,12 +1665,6 @@
810  
811                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
812         } else if (tp->link_config.speed == SPEED_INVALID) {
813 -               tp->link_config.advertising =
814 -                       (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
815 -                        ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
816 -                        ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
817 -                        ADVERTISED_Autoneg | ADVERTISED_MII);
818 -
819                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
820                         tp->link_config.advertising &=
821                                 ~(ADVERTISED_1000baseT_Half |
822 @@ -1644,25 +1808,36 @@
823         return err;
824  }
825  
826 -static int tg3_copper_is_advertising_all(struct tg3 *tp)
827 +static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
828  {
829 -       u32 adv_reg, all_mask;
830 +       u32 adv_reg, all_mask = 0;
831 +
832 +       if (mask & ADVERTISED_10baseT_Half)
833 +               all_mask |= ADVERTISE_10HALF;
834 +       if (mask & ADVERTISED_10baseT_Full)
835 +               all_mask |= ADVERTISE_10FULL;
836 +       if (mask & ADVERTISED_100baseT_Half)
837 +               all_mask |= ADVERTISE_100HALF;
838 +       if (mask & ADVERTISED_100baseT_Full)
839 +               all_mask |= ADVERTISE_100FULL;
840  
841         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
842                 return 0;
843  
844 -       all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
845 -                   ADVERTISE_100HALF | ADVERTISE_100FULL);
846         if ((adv_reg & all_mask) != all_mask)
847                 return 0;
848         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
849                 u32 tg3_ctrl;
850  
851 +               all_mask = 0;
852 +               if (mask & ADVERTISED_1000baseT_Half)
853 +                       all_mask |= ADVERTISE_1000HALF;
854 +               if (mask & ADVERTISED_1000baseT_Full)
855 +                       all_mask |= ADVERTISE_1000FULL;
856 +
857                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
858                         return 0;
859  
860 -               all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
861 -                           MII_TG3_CTRL_ADV_1000_FULL);
862                 if ((tg3_ctrl & all_mask) != all_mask)
863                         return 0;
864         }
865 @@ -1753,7 +1928,7 @@
866  
867         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
868                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
869 -       else
870 +       else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
871                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
872  
873         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
874 @@ -1822,7 +1997,8 @@
875                                 /* Force autoneg restart if we are exiting
876                                  * low power mode.
877                                  */
878 -                               if (!tg3_copper_is_advertising_all(tp))
879 +                               if (!tg3_copper_is_advertising_all(tp,
880 +                                               tp->link_config.advertising))
881                                         current_link_up = 0;
882                         } else {
883                                 current_link_up = 0;
884 @@ -1890,15 +2066,12 @@
885         if (tp->link_config.active_duplex == DUPLEX_HALF)
886                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
887  
888 -       tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
889         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
890 -               if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
891 -                   (current_link_up == 1 &&
892 -                    tp->link_config.active_speed == SPEED_10))
893 -                       tp->mac_mode |= MAC_MODE_LINK_POLARITY;
894 -       } else {
895 -               if (current_link_up == 1)
896 +               if (current_link_up == 1 &&
897 +                   tg3_5700_link_polarity(tp, tp->link_config.active_speed))
898                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
899 +               else
900 +                       tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
901         }
902  
903         /* ??? Without this setting Netgear GA302T PHY does not
904 @@ -2410,24 +2583,27 @@
905         expected_sg_dig_ctrl |= (1 << 12);
906  
907         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
908 +               if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
909 +                   tp->serdes_counter &&
910 +                   ((mac_status & (MAC_STATUS_PCS_SYNCED |
911 +                                   MAC_STATUS_RCVD_CFG)) ==
912 +                    MAC_STATUS_PCS_SYNCED)) {
913 +                       tp->serdes_counter--;
914 +                       current_link_up = 1;
915 +                       goto out;
916 +               }
917 +restart_autoneg:
918                 if (workaround)
919                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
920                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
921                 udelay(5);
922                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
923  
924 -               tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
925 +               tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
926 +               tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
927         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
928                                  MAC_STATUS_SIGNAL_DET)) {
929 -               int i;
930 -
931 -               /* Giver time to negotiate (~200ms) */
932 -               for (i = 0; i < 40000; i++) {
933 -                       sg_dig_status = tr32(SG_DIG_STATUS);
934 -                       if (sg_dig_status & (0x3))
935 -                               break;
936 -                       udelay(5);
937 -               }
938 +               sg_dig_status = tr32(SG_DIG_STATUS);
939                 mac_status = tr32(MAC_STATUS);
940  
941                 if ((sg_dig_status & (1 << 1)) &&
942 @@ -2443,10 +2619,11 @@
943  
944                         tg3_setup_flow_control(tp, local_adv, remote_adv);
945                         current_link_up = 1;
946 -                       tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
947 +                       tp->serdes_counter = 0;
948 +                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
949                 } else if (!(sg_dig_status & (1 << 1))) {
950 -                       if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
951 -                               tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
952 +                       if (tp->serdes_counter)
953 +                               tp->serdes_counter--;
954                         else {
955                                 if (workaround) {
956                                         u32 val = serdes_cfg;
957 @@ -2470,9 +2647,17 @@
958                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
959                                         tg3_setup_flow_control(tp, 0, 0);
960                                         current_link_up = 1;
961 -                               }
962 +                                       tp->tg3_flags2 |=
963 +                                               TG3_FLG2_PARALLEL_DETECT;
964 +                                       tp->serdes_counter =
965 +                                               SERDES_PARALLEL_DET_TIMEOUT;
966 +                               } else
967 +                                       goto restart_autoneg;
968                         }
969                 }
970 +       } else {
971 +               tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
972 +               tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
973         }
974  
975  out:
976 @@ -2483,15 +2668,13 @@
977  {
978         int current_link_up = 0;
979  
980 -       if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
981 -               tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
982 +       if (!(mac_status & MAC_STATUS_PCS_SYNCED))
983                 goto out;
984 -       }
985  
986         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
987                 u32 flags;
988                 int i;
989 -  
990 +
991                 if (fiber_autoneg(tp, &flags)) {
992                         u32 local_adv, remote_adv;
993  
994 @@ -2504,7 +2687,6 @@
995  
996                         tg3_setup_flow_control(tp, local_adv, remote_adv);
997  
998 -                       tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
999                         current_link_up = 1;
1000                 }
1001                 for (i = 0; i < 30; i++) {
1002 @@ -2527,10 +2709,12 @@
1003         } else {
1004                 /* Forcing 1000FD link up. */
1005                 current_link_up = 1;
1006 -               tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
1007  
1008                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
1009                 udelay(40);
1010 +
1011 +               tw32_f(MAC_MODE, tp->mac_mode);
1012 +               udelay(40);
1013         }
1014  
1015  out:
1016 @@ -2590,10 +2774,6 @@
1017         else
1018                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
1019  
1020 -       tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1021 -       tw32_f(MAC_MODE, tp->mac_mode);
1022 -       udelay(40);
1023 -
1024         tp->hw_status->status =
1025                 (SD_STATUS_UPDATED |
1026                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
1027 @@ -2603,14 +2783,16 @@
1028                                     MAC_STATUS_CFG_CHANGED));
1029                 udelay(5);
1030                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
1031 -                                        MAC_STATUS_CFG_CHANGED)) == 0)
1032 +                                        MAC_STATUS_CFG_CHANGED |
1033 +                                        MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1034                         break;
1035         }
1036  
1037         mac_status = tr32(MAC_STATUS);
1038         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
1039                 current_link_up = 0;
1040 -               if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1041 +               if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1042 +                   tp->serdes_counter == 0) {
1043                         tw32_f(MAC_MODE, (tp->mac_mode |
1044                                           MAC_MODE_SEND_CONFIGS));
1045                         udelay(1);
1046 @@ -2680,6 +2862,12 @@
1047  
1048         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
1049         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
1050 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
1051 +               if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
1052 +                       bmsr |= BMSR_LSTATUS;
1053 +               else
1054 +                       bmsr &= ~BMSR_LSTATUS;
1055 +       }
1056  
1057         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
1058  
1059 @@ -2709,7 +2897,7 @@
1060                         tg3_writephy(tp, MII_BMCR, bmcr);
1061  
1062                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1063 -                       tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
1064 +                       tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
1065                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1066  
1067                         return err;
1068 @@ -2748,6 +2936,13 @@
1069                         bmcr = new_bmcr;
1070                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
1071                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
1072 +                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1073 +                           ASIC_REV_5714) {
1074 +                               if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
1075 +                                       bmsr |= BMSR_LSTATUS;
1076 +                               else
1077 +                                       bmsr &= ~BMSR_LSTATUS;
1078 +                       }
1079                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1080                 }
1081         }
1082 @@ -2807,9 +3002,9 @@
1083  
1084  static void tg3_serdes_parallel_detect(struct tg3 *tp)
1085  {
1086 -       if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
1087 +       if (tp->serdes_counter) {
1088                 /* Give autoneg time to complete. */
1089 -               tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
1090 +               tp->serdes_counter--;
1091                 return;
1092         }
1093         if (!netif_carrier_ok(tp->dev) &&
1094 @@ -2896,9 +3091,47 @@
1095                 }
1096         }
1097  
1098 +       if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
1099 +               u32 val = tr32(PCIE_PWR_MGMT_THRESH);
1100 +               if (!netif_carrier_ok(tp->dev))
1101 +                       val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
1102 +                             tp->pwrmgmt_thresh;
1103 +               else
1104 +                       val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
1105 +               tw32(PCIE_PWR_MGMT_THRESH, val);
1106 +       }
1107 +
1108         return err;
1109  }
1110  
1111 +/* This is called whenever we suspect that the system chipset is re-
1112 + * ordering the sequence of MMIO to the tx send mailbox. The symptom
1113 + * is bogus tx completions. We try to recover by setting the
1114 + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
1115 + * in the workqueue.
1116 + */
1117 +static void tg3_tx_recover(struct tg3 *tp)
1118 +{
1119 +       BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
1120 +              tp->write32_tx_mbox == tg3_write_indirect_mbox);
1121 +
1122 +       printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
1123 +              "mapped I/O cycles to the network device, attempting to "
1124 +              "recover. Please report the problem to the driver maintainer "
1125 +              "and include system chipset information.\n", tp->dev->name);
1126 +
1127 +       spin_lock(&tp->lock);
1128 +       tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
1129 +       spin_unlock(&tp->lock);
1130 +}
1131 +
1132 +static inline u32 tg3_tx_avail(struct tg3 *tp)
1133 +{
1134 +       smp_mb();
1135 +       return (tp->tx_pending -
1136 +               ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
1137 +}
1138 +
1139  /* Tigon3 never reports partial packet sends.  So we do not
1140   * need special logic to handle SKBs that have not had all
1141   * of their frags sent yet, like SunGEM does.
1142 @@ -2911,10 +3144,12 @@
1143         while (sw_idx != hw_idx) {
1144                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
1145                 struct sk_buff *skb = ri->skb;
1146 -               int i;
1147 +               int i, tx_bug = 0;
1148  
1149 -               if (unlikely(skb == NULL))
1150 -                       BUG();
1151 +               if (unlikely(skb == NULL)) {
1152 +                       tg3_tx_recover(tp);
1153 +                       return;
1154 +               }
1155  
1156                 pci_unmap_single(tp->pdev,
1157                                  pci_unmap_addr(ri, mapping),
1158 @@ -2926,12 +3161,9 @@
1159                 sw_idx = NEXT_TX(sw_idx);
1160  
1161                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1162 -                       if (unlikely(sw_idx == hw_idx))
1163 -                               BUG();
1164 -
1165                         ri = &tp->tx_buffers[sw_idx];
1166 -                       if (unlikely(ri->skb != NULL))
1167 -                               BUG();
1168 +                       if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
1169 +                               tx_bug = 1;
1170  
1171                         pci_unmap_page(tp->pdev,
1172                                        pci_unmap_addr(ri, mapping),
1173 @@ -2942,16 +3174,29 @@
1174                 }
1175  
1176                 dev_kfree_skb(skb);
1177 +
1178 +               if (unlikely(tx_bug)) {
1179 +                       tg3_tx_recover(tp);
1180 +                       return;
1181 +               }
1182         }
1183  
1184         tp->tx_cons = sw_idx;
1185  
1186 -       if (unlikely(netif_queue_stopped(tp->dev))) {
1187 -               spin_lock(&tp->tx_lock);
1188 +       /* Need to make the tx_cons update visible to tg3_start_xmit()
1189 +        * before checking for netif_queue_stopped().  Without the
1190 +        * memory barrier, there is a small possibility that tg3_start_xmit()
1191 +        * will miss it and cause the queue to be stopped forever.
1192 +        */
1193 +       smp_mb();
1194 +
1195 +       if (unlikely(netif_queue_stopped(tp->dev) &&
1196 +                    (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
1197 +               netif_tx_lock(tp->dev);
1198                 if (netif_queue_stopped(tp->dev) &&
1199 -                   (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
1200 +                   (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
1201                         netif_wake_queue(tp->dev);
1202 -               spin_unlock(&tp->tx_lock);
1203 +               netif_tx_unlock(tp->dev);
1204         }
1205  }
1206  
1207 @@ -3005,11 +3250,10 @@
1208          * Callers depend upon this behavior and assume that
1209          * we leave everything unchanged if we fail.
1210          */
1211 -       skb = dev_alloc_skb(skb_size);
1212 +       skb = netdev_alloc_skb(tp->dev, skb_size);
1213         if (skb == NULL)
1214                 return -ENOMEM;
1215  
1216 -       skb->dev = tp->dev;
1217         skb_reserve(skb, tp->rx_offset);
1218  
1219         mapping = pci_map_single(tp->pdev, skb->data,
1220 @@ -3102,7 +3346,7 @@
1221   */
1222  static int tg3_rx(struct tg3 *tp, int budget)
1223  {
1224 -       u32 work_mask;
1225 +       u32 work_mask, rx_std_posted = 0;
1226         u32 sw_idx = tp->rx_rcb_ptr;
1227         u16 hw_idx;
1228         int received;
1229 @@ -3129,6 +3373,7 @@
1230                                                   mapping);
1231                         skb = tp->rx_std_buffers[desc_idx].skb;
1232                         post_ptr = &tp->rx_std_ptr;
1233 +                       rx_std_posted++;
1234                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
1235                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
1236                                                   mapping);
1237 @@ -3154,7 +3399,7 @@
1238  
1239                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
1240  
1241 -               if (len > RX_COPY_THRESHOLD 
1242 +               if (len > RX_COPY_THRESHOLD
1243                         && tp->rx_offset == 2
1244                         /* rx_offset != 2 iff this is a 5701 card running
1245                          * in PCI-X mode [see tg3_get_invariants()] */
1246 @@ -3177,15 +3422,18 @@
1247                         tg3_recycle_rx(tp, opaque_key,
1248                                        desc_idx, *post_ptr);
1249  
1250 -                       copy_skb = dev_alloc_skb(len + 2);
1251 +                       copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1252                         if (copy_skb == NULL)
1253                                 goto drop_it_no_recycle;
1254  
1255 -                       copy_skb->dev = tp->dev;
1256                         skb_reserve(copy_skb, 2);
1257                         skb_put(copy_skb, len);
1258                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
1259 +#if (LINUX_VERSION_CODE >= 0x20616)
1260 +                       skb_copy_from_linear_data(skb, copy_skb->data, len);
1261 +#else
1262                         memcpy(copy_skb->data, skb->data, len);
1263 +#endif
1264                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
1265  
1266                         /* We'll reuse the original ring buffer. */
1267 @@ -3216,9 +3464,18 @@
1268  
1269  next_pkt:
1270                 (*post_ptr)++;
1271 +
1272 +               if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
1273 +                       u32 idx = *post_ptr % TG3_RX_RING_SIZE;
1274 +
1275 +                       tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
1276 +                                    TG3_64BIT_REG_LOW, idx);
1277 +                       work_mask &= ~RXD_OPAQUE_RING_STD;
1278 +                       rx_std_posted = 0;
1279 +               }
1280  next_pkt_nopost:
1281                 sw_idx++;
1282 -               sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
1283 +               sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
1284  
1285                 /* Refresh hw_idx to see if there is new work */
1286                 if (sw_idx == hw_idx) {
1287 @@ -3231,6 +3488,9 @@
1288         tp->rx_rcb_ptr = sw_idx;
1289         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1290  
1291 +       /* Some platforms need to sync memory here */
1292 +       wmb();
1293 +
1294         /* Refill RX ring(s). */
1295         if (work_mask & RXD_OPAQUE_RING_STD) {
1296                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
1297 @@ -3269,6 +3529,11 @@
1298         /* run TX completion thread */
1299         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1300                 tg3_tx(tp);
1301 +               if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
1302 +                       netif_rx_complete(netdev);
1303 +                       schedule_work(&tp->reset_task);
1304 +                       return 0;
1305 +               }
1306         }
1307  
1308         /* run RX thread, within the bounds set by NAPI.
1309 @@ -3311,7 +3576,11 @@
1310         tp->irq_sync = 1;
1311         smp_mb();
1312  
1313 +#if (LINUX_VERSION_CODE >= 0x2051c)
1314         synchronize_irq(tp->pdev->irq);
1315 +#else
1316 +       synchronize_irq();
1317 +#endif
1318  }
1319  
1320  static inline int tg3_irq_sync(struct tg3 *tp)
1321 @@ -3326,23 +3595,46 @@
1322   */
1323  static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
1324  {
1325 +       spin_lock_bh(&tp->lock);
1326         if (irq_sync)
1327                 tg3_irq_quiesce(tp);
1328 -       spin_lock_bh(&tp->lock);
1329 -       spin_lock(&tp->tx_lock);
1330  }
1331  
1332  static inline void tg3_full_unlock(struct tg3 *tp)
1333  {
1334 -       spin_unlock(&tp->tx_lock);
1335         spin_unlock_bh(&tp->lock);
1336  }
1337  
1338 +/* One-shot MSI handler - Chip automatically disables interrupt
1339 + * after sending MSI so driver doesn't have to do it.
1340 + */
1341 +#if (LINUX_VERSION_CODE < 0x20613)
1342 +static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
1343 +#else
1344 +static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
1345 +#endif
1346 +{
1347 +       struct net_device *dev = dev_id;
1348 +       struct tg3 *tp = netdev_priv(dev);
1349 +
1350 +       prefetch(tp->hw_status);
1351 +       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
1352 +
1353 +       if (likely(!tg3_irq_sync(tp)))
1354 +               netif_rx_schedule(dev);         /* schedule NAPI poll */
1355 +
1356 +       return IRQ_HANDLED;
1357 +}
1358 +
1359  /* MSI ISR - No need to check for interrupt sharing and no need to
1360   * flush status block and interrupt mailbox. PCI ordering rules
1361   * guarantee that MSI will arrive after the status block.
1362   */
1363 +#if (LINUX_VERSION_CODE < 0x20613)
1364  static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
1365 +#else
1366 +static irqreturn_t tg3_msi(int irq, void *dev_id)
1367 +#endif
1368  {
1369         struct net_device *dev = dev_id;
1370         struct tg3 *tp = netdev_priv(dev);
1371 @@ -3363,7 +3655,11 @@
1372         return IRQ_RETVAL(1);
1373  }
1374  
1375 +#if (LINUX_VERSION_CODE < 0x20613)
1376  static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1377 +#else
1378 +static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1379 +#endif
1380  {
1381         struct net_device *dev = dev_id;
1382         struct tg3 *tp = netdev_priv(dev);
1383 @@ -3375,38 +3671,48 @@
1384          * Reading the PCI State register will confirm whether the
1385          * interrupt is ours and will flush the status block.
1386          */
1387 -       if ((sblk->status & SD_STATUS_UPDATED) ||
1388 -           !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1389 -               /*
1390 -                * Writing any value to intr-mbox-0 clears PCI INTA# and
1391 -                * chip-internal interrupt pending events.
1392 -                * Writing non-zero to intr-mbox-0 additional tells the
1393 -                * NIC to stop sending us irqs, engaging "in-intr-handler"
1394 -                * event coalescing.
1395 -                */
1396 -               tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
1397 -                            0x00000001);
1398 -               if (tg3_irq_sync(tp))
1399 +       if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
1400 +               if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
1401 +                   (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1402 +                       handled = 0;
1403                         goto out;
1404 -               sblk->status &= ~SD_STATUS_UPDATED;
1405 -               if (likely(tg3_has_work(tp))) {
1406 -                       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
1407 -                       netif_rx_schedule(dev);         /* schedule NAPI poll */
1408 -               } else {
1409 -                       /* No work, shared interrupt perhaps?  re-enable
1410 -                        * interrupts, and flush that PCI write
1411 -                        */
1412 -                       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
1413 -                               0x00000000);
1414                 }
1415 -       } else {        /* shared interrupt */
1416 -               handled = 0;
1417 +       }
1418 +
1419 +       /*
1420 +        * Writing any value to intr-mbox-0 clears PCI INTA# and
1421 +        * chip-internal interrupt pending events.
1422 +        * Writing non-zero to intr-mbox-0 additional tells the
1423 +        * NIC to stop sending us irqs, engaging "in-intr-handler"
1424 +        * event coalescing.
1425 +        *
1426 +        * Flush the mailbox to de-assert the IRQ immediately to prevent
1427 +        * spurious interrupts.  The flush impacts performance but
1428 +        * excessive spurious interrupts can be worse in some cases.
1429 +        */
1430 +       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1431 +       if (tg3_irq_sync(tp))
1432 +               goto out;
1433 +       sblk->status &= ~SD_STATUS_UPDATED;
1434 +       if (likely(tg3_has_work(tp))) {
1435 +               prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
1436 +               netif_rx_schedule(dev);         /* schedule NAPI poll */
1437 +       } else {
1438 +               /* No work, shared interrupt perhaps?  re-enable
1439 +                * interrupts, and flush that PCI write
1440 +                */
1441 +               tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
1442 +                              0x00000000);
1443         }
1444  out:
1445         return IRQ_RETVAL(handled);
1446  }
1447  
1448 +#if (LINUX_VERSION_CODE < 0x20613)
1449  static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
1450 +#else
1451 +static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
1452 +#endif
1453  {
1454         struct net_device *dev = dev_id;
1455         struct tg3 *tp = netdev_priv(dev);
1456 @@ -3418,75 +3724,126 @@
1457          * Reading the PCI State register will confirm whether the
1458          * interrupt is ours and will flush the status block.
1459          */
1460 -       if ((sblk->status_tag != tp->last_tag) ||
1461 -           !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1462 -               /*
1463 -                * writing any value to intr-mbox-0 clears PCI INTA# and
1464 -                * chip-internal interrupt pending events.
1465 -                * writing non-zero to intr-mbox-0 additional tells the
1466 -                * NIC to stop sending us irqs, engaging "in-intr-handler"
1467 -                * event coalescing.
1468 -                */
1469 -               tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
1470 -                            0x00000001);
1471 -               if (tg3_irq_sync(tp))
1472 +       if (unlikely(sblk->status_tag == tp->last_tag)) {
1473 +               if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
1474 +                   (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1475 +                       handled = 0;
1476                         goto out;
1477 -               if (netif_rx_schedule_prep(dev)) {
1478 -                       prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
1479 -                       /* Update last_tag to mark that this status has been
1480 -                        * seen. Because interrupt may be shared, we may be
1481 -                        * racing with tg3_poll(), so only update last_tag
1482 -                        * if tg3_poll() is not scheduled.
1483 -                        */
1484 -                       tp->last_tag = sblk->status_tag;
1485 -                       __netif_rx_schedule(dev);
1486                 }
1487 -       } else {        /* shared interrupt */
1488 -               handled = 0;
1489         }
1490 -out:
1491 -       return IRQ_RETVAL(handled);
1492 -}
1493  
1494 -/* ISR for interrupt test */
1495 -static irqreturn_t tg3_test_isr(int irq, void *dev_id,
1496 -               struct pt_regs *regs)
1497 -{
1498 -       struct net_device *dev = dev_id;
1499 -       struct tg3 *tp = netdev_priv(dev);
1500 -       struct tg3_hw_status *sblk = tp->hw_status;
1501 -
1502 -       if ((sblk->status & SD_STATUS_UPDATED) ||
1503 -           !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1504 -               tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
1505 -                            0x00000001);
1506 +       /*
1507 +        * writing any value to intr-mbox-0 clears PCI INTA# and
1508 +        * chip-internal interrupt pending events.
1509 +        * writing non-zero to intr-mbox-0 additional tells the
1510 +        * NIC to stop sending us irqs, engaging "in-intr-handler"
1511 +        * event coalescing.
1512 +        *
1513 +        * Flush the mailbox to de-assert the IRQ immediately to prevent
1514 +        * spurious interrupts.  The flush impacts performance but
1515 +        * excessive spurious interrupts can be worse in some cases.
1516 +        */
1517 +       tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1518 +       if (tg3_irq_sync(tp))
1519 +               goto out;
1520 +       if (netif_rx_schedule_prep(dev)) {
1521 +               prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
1522 +               /* Update last_tag to mark that this status has been
1523 +                * seen. Because interrupt may be shared, we may be
1524 +                * racing with tg3_poll(), so only update last_tag
1525 +                * if tg3_poll() is not scheduled.
1526 +                */
1527 +               tp->last_tag = sblk->status_tag;
1528 +               __netif_rx_schedule(dev);
1529 +       }
1530 +out:
1531 +       return IRQ_RETVAL(handled);
1532 +}
1533 +
1534 +/* ISR for interrupt test */
1535 +#if (LINUX_VERSION_CODE < 0x020613)
1536 +static irqreturn_t tg3_test_isr(int irq, void *dev_id, struct pt_regs *regs)
1537 +#else
1538 +static irqreturn_t tg3_test_isr(int irq, void *dev_id)
1539 +#endif
1540 +{
1541 +       struct net_device *dev = dev_id;
1542 +       struct tg3 *tp = netdev_priv(dev);
1543 +       struct tg3_hw_status *sblk = tp->hw_status;
1544 +
1545 +       if ((sblk->status & SD_STATUS_UPDATED) ||
1546 +           !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1547 +               tg3_disable_ints(tp);
1548                 return IRQ_RETVAL(1);
1549         }
1550         return IRQ_RETVAL(0);
1551  }
1552  
1553 -static int tg3_init_hw(struct tg3 *);
1554 +static int tg3_init_hw(struct tg3 *, int);
1555  static int tg3_halt(struct tg3 *, int, int);
1556  
1557 -#ifdef CONFIG_NET_POLL_CONTROLLER
1558 +/* Restart hardware after configuration changes, self-test, etc.
1559 + * Invoked with tp->lock held.
1560 + */
1561 +static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
1562 +{
1563 +       int err;
1564 +
1565 +       err = tg3_init_hw(tp, reset_phy);
1566 +       if (err) {
1567 +               printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
1568 +                      "aborting.\n", tp->dev->name);
1569 +               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1570 +               tg3_full_unlock(tp);
1571 +               del_timer_sync(&tp->timer);
1572 +               tp->irq_sync = 0;
1573 +               netif_poll_enable(tp->dev);
1574 +               dev_close(tp->dev);
1575 +               tg3_full_lock(tp, 0);
1576 +       }
1577 +       return err;
1578 +}
1579 +
1580 +#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1581  static void tg3_poll_controller(struct net_device *dev)
1582  {
1583         struct tg3 *tp = netdev_priv(dev);
1584  
1585 +#if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x20600)
1586 +       if (netdump_mode) {
1587 +               tg3_interrupt(tp->pdev->irq, dev, NULL);
1588 +               if (dev->poll_list.prev) {
1589 +                       int budget = 64;
1590 +
1591 +                       tg3_poll(dev, &budget);
1592 +               }
1593 +       }
1594 +       else
1595 +#endif
1596 +#if (LINUX_VERSION_CODE < 0x020613)
1597         tg3_interrupt(tp->pdev->irq, dev, NULL);
1598 +#else
1599 +       tg3_interrupt(tp->pdev->irq, dev);
1600 +#endif
1601  }
1602  #endif
1603  
1604 +#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
1605 +static void tg3_reset_task(struct work_struct *work)
1606 +#else
1607  static void tg3_reset_task(void *_data)
1608 +#endif
1609  {
1610 +#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
1611 +       struct tg3 *tp = container_of(work, struct tg3, reset_task);
1612 +#else
1613         struct tg3 *tp = _data;
1614 +#endif
1615         unsigned int restart_timer;
1616  
1617         tg3_full_lock(tp, 0);
1618 -       tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
1619  
1620         if (!netif_running(tp->dev)) {
1621 -               tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
1622                 tg3_full_unlock(tp);
1623                 return;
1624         }
1625 @@ -3500,25 +3857,43 @@
1626         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
1627         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
1628  
1629 +       if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
1630 +               tp->write32_tx_mbox = tg3_write32_tx_mbox;
1631 +               tp->write32_rx_mbox = tg3_write_flush_reg32;
1632 +               tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
1633 +               tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
1634 +       }
1635 +
1636         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1637 -       tg3_init_hw(tp);
1638 +       if (tg3_init_hw(tp, 1))
1639 +               goto out;
1640  
1641         tg3_netif_start(tp);
1642  
1643         if (restart_timer)
1644                 mod_timer(&tp->timer, jiffies + 1);
1645  
1646 -       tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
1647 -
1648 +out:
1649         tg3_full_unlock(tp);
1650  }
1651  
1652 +static void tg3_dump_short_state(struct tg3 *tp)
1653 +{
1654 +       printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
1655 +              tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
1656 +       printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
1657 +              tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
1658 +}
1659 +
1660  static void tg3_tx_timeout(struct net_device *dev)
1661  {
1662         struct tg3 *tp = netdev_priv(dev);
1663  
1664 -       printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
1665 -              dev->name);
1666 +       if (netif_msg_tx_err(tp)) {
1667 +               printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
1668 +                      dev->name);
1669 +               tg3_dump_short_state(tp);
1670 +       }
1671  
1672         schedule_work(&tp->reset_task);
1673  }
1674 @@ -3537,7 +3912,7 @@
1675                                           int len)
1676  {
1677  #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
1678 -       if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
1679 +       if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
1680                 return (((u64) mapping + len) > DMA_40BIT_MASK);
1681         return 0;
1682  #else
1683 @@ -3628,24 +4003,416 @@
1684         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
1685  }
1686  
1687 +/* hard_start_xmit for devices that don't have any bugs and
1688 + * support TG3_FLG2_HW_TSO_2 only.
1689 + */
1690  static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1691  {
1692         struct tg3 *tp = netdev_priv(dev);
1693         dma_addr_t mapping;
1694         u32 len, entry, base_flags, mss;
1695 -       int would_hit_hwbug;
1696  
1697         len = skb_headlen(skb);
1698  
1699 -       /* No BH disabling for tx_lock here.  We are running in BH disabled
1700 -        * context and TX reclaim runs via tp->poll inside of a software
1701 +       /* We are running in BH disabled context with netif_tx_lock
1702 +        * and TX reclaim runs via tp->poll inside of a software
1703          * interrupt.  Furthermore, IRQ processing runs lockless so we have
1704          * no IRQ context deadlocks to worry about either.  Rejoice!
1705          */
1706 -       if (!spin_trylock(&tp->tx_lock))
1707 -               return NETDEV_TX_LOCKED; 
1708 +       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1709 +               if (!netif_queue_stopped(dev)) {
1710 +                       netif_stop_queue(dev);
1711  
1712 -       if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1713 +                       /* This is a hard error, log it. */
1714 +                       printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
1715 +                              "queue awake!\n", dev->name);
1716 +               }
1717 +               return NETDEV_TX_BUSY;
1718 +       }
1719 +
1720 +       entry = tp->tx_prod;
1721 +       base_flags = 0;
1722 +#if TG3_TSO_SUPPORT != 0
1723 +       mss = 0;
1724 +       if ((mss = skb_shinfo(skb)->gso_size) != 0) {
1725 +               int tcp_opt_len, ip_tcp_len;
1726 +
1727 +               if (skb_header_cloned(skb) &&
1728 +                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1729 +                       dev_kfree_skb(skb);
1730 +                       goto out_unlock;
1731 +               }
1732 +
1733 +#ifndef BCM_NO_TSO6
1734 +               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1735 +                       mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
1736 +               else
1737 +#endif
1738 +               {
1739 +                       struct iphdr *iph = ip_hdr(skb);
1740 +
1741 +                       tcp_opt_len = tcp_optlen(skb);
1742 +                       ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
1743 +
1744 +                       iph->check = 0;
1745 +                       iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
1746 +                       mss |= (ip_tcp_len + tcp_opt_len) << 9;
1747 +               }
1748 +
1749 +               base_flags |= (TXD_FLAG_CPU_PRE_DMA |
1750 +                              TXD_FLAG_CPU_POST_DMA);
1751 +
1752 +               tcp_hdr(skb)->check = 0;
1753 +
1754 +       }
1755 +       else if (skb->ip_summed == CHECKSUM_PARTIAL)
1756 +               base_flags |= TXD_FLAG_TCPUDP_CSUM;
1757 +#else
1758 +       mss = 0;
1759 +       if (skb->ip_summed == CHECKSUM_PARTIAL)
1760 +               base_flags |= TXD_FLAG_TCPUDP_CSUM;
1761 +#endif
1762 +#if TG3_VLAN_TAG_USED
1763 +       if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
1764 +               base_flags |= (TXD_FLAG_VLAN |
1765 +                              (vlan_tx_tag_get(skb) << 16));
1766 +#endif
1767 +
1768 +       /* Queue skb data, a.k.a. the main skb fragment. */
1769 +       mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
1770 +
1771 +       tp->tx_buffers[entry].skb = skb;
1772 +       pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
1773 +
1774 +       tg3_set_txd(tp, entry, mapping, len, base_flags,
1775 +                   (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
1776 +
1777 +       entry = NEXT_TX(entry);
1778 +
1779 +       /* Now loop through additional data fragments, and queue them. */
1780 +       if (skb_shinfo(skb)->nr_frags > 0) {
1781 +               unsigned int i, last;
1782 +
1783 +               last = skb_shinfo(skb)->nr_frags - 1;
1784 +               for (i = 0; i <= last; i++) {
1785 +                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1786 +
1787 +                       len = frag->size;
1788 +                       mapping = pci_map_page(tp->pdev,
1789 +                                              frag->page,
1790 +                                              frag->page_offset,
1791 +                                              len, PCI_DMA_TODEVICE);
1792 +
1793 +                       tp->tx_buffers[entry].skb = NULL;
1794 +                       pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
1795 +
1796 +                       tg3_set_txd(tp, entry, mapping, len,
1797 +                                   base_flags, (i == last) | (mss << 1));
1798 +
1799 +                       entry = NEXT_TX(entry);
1800 +               }
1801 +       }
1802 +
1803 +       /* Some platforms need to sync memory here */
1804 +       wmb();
1805 +
1806 +       /* Packets are ready, update Tx producer idx local and on card. */
1807 +       tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
1808 +
1809 +       tp->tx_prod = entry;
1810 +       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1811 +               netif_stop_queue(dev);
1812 +               if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
1813 +                       netif_wake_queue(tp->dev);
1814 +       }
1815 +
1816 +#if TG3_TSO_SUPPORT != 0
1817 +out_unlock:
1818 +#endif
1819 +       mmiowb();
1820 +
1821 +       dev->trans_start = jiffies;
1822 +
1823 +       return NETDEV_TX_OK;
1824 +}
1825 +
1826 +#if TG3_TSO_SUPPORT != 0
1827 +#ifndef NETIF_F_GSO
1828 +
1829 +struct sk_buff *skb_segment(struct sk_buff *skb, int features)
1830 +{
1831 +       struct sk_buff *segs = NULL;
1832 +       struct sk_buff *tail = NULL;
1833 +       unsigned int mss = skb_shinfo(skb)->gso_size;
1834 +       unsigned int doffset = skb->data - skb->mac.raw;
1835 +       unsigned int offset = doffset;
1836 +       unsigned int headroom;
1837 +       unsigned int len;
1838 +       int nfrags = skb_shinfo(skb)->nr_frags;
1839 +       int err = -ENOMEM;
1840 +       int i = 0;
1841 +       int pos;
1842 +
1843 +       __skb_push(skb, doffset);
1844 +       headroom = skb_headroom(skb);
1845 +       pos = skb_headlen(skb);
1846 +
1847 +       do {
1848 +               struct sk_buff *nskb;
1849 +               skb_frag_t *frag;
1850 +               int hsize;
1851 +               int k;
1852 +               int size;
1853 +
1854 +               len = skb->len - offset;
1855 +               if (len > mss)
1856 +                       len = mss;
1857 +
1858 +               hsize = skb_headlen(skb) - offset;
1859 +               if (hsize < 0)
1860 +                       hsize = 0;
1861 +               if (hsize > len)
1862 +                       hsize = len;
1863 +
1864 +               nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
1865 +               if (unlikely(!nskb))
1866 +                       goto err;
1867 +
1868 +               if (segs)
1869 +                       tail->next = nskb;
1870 +               else
1871 +                       segs = nskb;
1872 +               tail = nskb;
1873 +
1874 +               nskb->dev = skb->dev;
1875 +               nskb->priority = skb->priority;
1876 +               nskb->protocol = skb->protocol;
1877 +               nskb->dst = dst_clone(skb->dst);
1878 +               memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
1879 +               nskb->pkt_type = skb->pkt_type;
1880 +               nskb->mac_len = skb->mac_len;
1881 +
1882 +               skb_reserve(nskb, headroom);
1883 +               nskb->mac.raw = nskb->data;
1884 +               nskb->nh.raw = nskb->data + skb->mac_len;
1885 +               nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
1886 +               memcpy(skb_put(nskb, doffset), skb->data, doffset);
1887 +
1888 +               frag = skb_shinfo(nskb)->frags;
1889 +               k = 0;
1890 +
1891 +               nskb->ip_summed = CHECKSUM_PARTIAL;
1892 +               nskb->csum = skb->csum;
1893 +               memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
1894 +
1895 +               while (pos < offset + len) {
1896 +                       BUG_ON(i >= nfrags);
1897 +
1898 +                       *frag = skb_shinfo(skb)->frags[i];
1899 +                       get_page(frag->page);
1900 +                       size = frag->size;
1901 +
1902 +                       if (pos < offset) {
1903 +                               frag->page_offset += offset - pos;
1904 +                               frag->size -= offset - pos;
1905 +                       }
1906 +
1907 +                       k++;
1908 +
1909 +                       if (pos + size <= offset + len) {
1910 +                               i++;
1911 +                               pos += size;
1912 +                       } else {
1913 +                               frag->size -= pos + size - (offset + len);
1914 +                               break;
1915 +                       }
1916 +
1917 +                       frag++;
1918 +               }
1919 +
1920 +               skb_shinfo(nskb)->nr_frags = k;
1921 +               nskb->data_len = len - hsize;
1922 +               nskb->len += nskb->data_len;
1923 +               nskb->truesize += nskb->data_len;
1924 +       } while ((offset += len) < skb->len);
1925 +
1926 +       return segs;
1927 +
1928 +err:
1929 +       while ((skb = segs)) {
1930 +               segs = skb->next;
1931 +               kfree(skb);
1932 +       }
1933 +       return ERR_PTR(err);
1934 +}
1935 +
1936 +static struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
1937 +{
1938 +       struct sk_buff *segs = ERR_PTR(-EINVAL);
1939 +       struct tcphdr *th;
1940 +       unsigned thlen;
1941 +       unsigned int seq;
1942 +       u32 delta;
1943 +       unsigned int oldlen;
1944 +       unsigned int len;
1945 +
1946 +       if (!pskb_may_pull(skb, sizeof(*th)))
1947 +               goto out;
1948 +
1949 +       th = skb->h.th;
1950 +       thlen = th->doff * 4;
1951 +       if (thlen < sizeof(*th))
1952 +               goto out;
1953 +
1954 +       if (!pskb_may_pull(skb, thlen))
1955 +               goto out;
1956 +
1957 +       oldlen = (u16)~skb->len;
1958 +       __skb_pull(skb, thlen);
1959 +
1960 +       segs = skb_segment(skb, features);
1961 +       if (IS_ERR(segs))
1962 +               goto out;
1963 +
1964 +       len = skb_shinfo(skb)->gso_size;
1965 +       delta = htonl(oldlen + (thlen + len));
1966 +
1967 +       skb = segs;
1968 +       th = skb->h.th;
1969 +       seq = ntohl(th->seq);
1970 +
1971 +       do {
1972 +               th->fin = th->psh = 0;
1973 +
1974 +               th->check = ~csum_fold((u32)((u32)th->check +
1975 +                                      (u32)delta));
1976 +               seq += len;
1977 +               skb = skb->next;
1978 +               th = skb->h.th;
1979 +
1980 +               th->seq = htonl(seq);
1981 +               th->cwr = 0;
1982 +       } while (skb->next);
1983 +
1984 +       delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
1985 +       th->check = ~csum_fold((u32)((u32)th->check +
1986 +                               (u32)delta));
1987 +out:
1988 +       return segs;
1989 +}
1990 +
1991 +static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
1992 +{
1993 +       struct sk_buff *segs = ERR_PTR(-EINVAL);
1994 +       struct iphdr *iph;
1995 +       int ihl;
1996 +       int id;
1997 +
1998 +       if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1999 +               goto out;
2000 +
2001 +       iph = skb->nh.iph;
2002 +       ihl = iph->ihl * 4;
2003 +       if (ihl < sizeof(*iph))
2004 +               goto out;
2005 +
2006 +       if (unlikely(!pskb_may_pull(skb, ihl)))
2007 +               goto out;
2008 +
2009 +       skb->h.raw = __skb_pull(skb, ihl);
2010 +       iph = skb->nh.iph;
2011 +       id = ntohs(iph->id);
2012 +       segs = ERR_PTR(-EPROTONOSUPPORT);
2013 +
2014 +       segs = tcp_tso_segment(skb, features);
2015 +
2016 +       if (!segs || unlikely(IS_ERR(segs)))
2017 +               goto out;
2018 +
2019 +       skb = segs;
2020 +       do {
2021 +               iph = skb->nh.iph;
2022 +               iph->id = htons(id++);
2023 +               iph->tot_len = htons(skb->len - skb->mac_len);
2024 +               iph->check = 0;
2025 +               iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
2026 +       } while ((skb = skb->next));
2027 +
2028 +out:
2029 +       return segs;
2030 +}
2031 +
2032 +static struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
2033 +{
2034 +       struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2035 +
2036 +       skb->mac.raw = skb->data;
2037 +       skb->mac_len = skb->nh.raw - skb->data;
2038 +       __skb_pull(skb, skb->mac_len);
2039 +
2040 +       segs = inet_gso_segment(skb, features);
2041 +
2042 +       __skb_push(skb, skb->data - skb->mac.raw);
2043 +       return segs;
2044 +}
2045 +
2046 +#endif
2047 +
2048 +static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
2049 +
2050 +/* Use GSO to workaround a rare TSO bug that may be triggered when the
2051 + * TSO header is greater than 80 bytes.
2052 + */
2053 +static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
2054 +{
2055 +       struct sk_buff *segs, *nskb;
2056 +
2057 +       /* Estimate the number of fragments in the worst case */
2058 +       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
2059 +               netif_stop_queue(tp->dev);
2060 +               if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
2061 +                       return NETDEV_TX_BUSY;
2062 +
2063 +               netif_wake_queue(tp->dev);
2064 +       }
2065 +
2066 +       segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
2067 +       if (unlikely(IS_ERR(segs)))
2068 +               goto tg3_tso_bug_end;
2069 +
2070 +       do {
2071 +               nskb = segs;
2072 +               segs = segs->next;
2073 +               nskb->next = NULL;
2074 +               tg3_start_xmit_dma_bug(nskb, tp->dev);
2075 +       } while (segs);
2076 +
2077 +tg3_tso_bug_end:
2078 +       dev_kfree_skb(skb);
2079 +
2080 +       return NETDEV_TX_OK;
2081 +}
2082 +
2083 +#endif
2084 +
2085 +/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
2086 + * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
2087 + */
2088 +static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
2089 +{
2090 +       struct tg3 *tp = netdev_priv(dev);
2091 +       dma_addr_t mapping;
2092 +       u32 len, entry, base_flags, mss;
2093 +       int would_hit_hwbug;
2094 +
2095 +       len = skb_headlen(skb);
2096 +
2097 +       /* We are running in BH disabled context with netif_tx_lock
2098 +        * and TX reclaim runs via tp->poll inside of a software
2099 +        * interrupt.  Furthermore, IRQ processing runs lockless so we have
2100 +        * no IRQ context deadlocks to worry about either.  Rejoice!
2101 +        */
2102 +       if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2103                 if (!netif_queue_stopped(dev)) {
2104                         netif_stop_queue(dev);
2105  
2106 @@ -3653,19 +4420,19 @@
2107                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
2108                                "queue awake!\n", dev->name);
2109                 }
2110 -               spin_unlock(&tp->tx_lock);
2111                 return NETDEV_TX_BUSY;
2112         }
2113  
2114         entry = tp->tx_prod;
2115         base_flags = 0;
2116 -       if (skb->ip_summed == CHECKSUM_HW)
2117 +       if (skb->ip_summed == CHECKSUM_PARTIAL)
2118                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2119  #if TG3_TSO_SUPPORT != 0
2120         mss = 0;
2121 -       if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2122 -           (mss = skb_shinfo(skb)->tso_size) != 0) {
2123 -               int tcp_opt_len, ip_tcp_len;
2124 +       if (((mss = skb_shinfo(skb)->gso_size) != 0) &&
2125 +           (skb_shinfo(skb)->gso_segs > 1)) {
2126 +               struct iphdr *iph;
2127 +               int tcp_opt_len, ip_tcp_len, hdr_len;
2128  
2129                 if (skb_header_cloned(skb) &&
2130                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2131 @@ -3673,40 +4440,42 @@
2132                         goto out_unlock;
2133                 }
2134  
2135 -               tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2136 -               ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2137 +               tcp_opt_len = tcp_optlen(skb);
2138 +               ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
2139 +
2140 +               hdr_len = ip_tcp_len + tcp_opt_len;
2141 +               if (unlikely((ETH_HLEN + hdr_len) > 80) &&
2142 +                            (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
2143 +                       return (tg3_tso_bug(tp, skb));
2144  
2145                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2146                                TXD_FLAG_CPU_POST_DMA);
2147  
2148 -               skb->nh.iph->check = 0;
2149 -               skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2150 +               iph = ip_hdr(skb);
2151 +               iph->check = 0;
2152 +               iph->tot_len = htons(mss + hdr_len);
2153                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
2154 -                       skb->h.th->check = 0;
2155 +                       tcp_hdr(skb)->check = 0;
2156                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
2157 -               }
2158 -               else {
2159 -                       skb->h.th->check =
2160 -                               ~csum_tcpudp_magic(skb->nh.iph->saddr,
2161 -                                                  skb->nh.iph->daddr,
2162 -                                                  0, IPPROTO_TCP, 0);
2163 -               }
2164 +               } else
2165 +                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2166 +                                                                iph->daddr, 0,
2167 +                                                                IPPROTO_TCP,
2168 +                                                                0);
2169  
2170                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
2171                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
2172 -                       if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2173 +                       if (tcp_opt_len || iph->ihl > 5) {
2174                                 int tsflags;
2175  
2176 -                               tsflags = ((skb->nh.iph->ihl - 5) +
2177 -                                          (tcp_opt_len >> 2));
2178 +                               tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
2179                                 mss |= (tsflags << 11);
2180                         }
2181                 } else {
2182 -                       if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2183 +                       if (tcp_opt_len || iph->ihl > 5) {
2184                                 int tsflags;
2185  
2186 -                               tsflags = ((skb->nh.iph->ihl - 5) +
2187 -                                          (tcp_opt_len >> 2));
2188 +                               tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
2189                                 base_flags |= tsflags << 12;
2190                         }
2191                 }
2192 @@ -3787,19 +4556,21 @@
2193                 entry = start;
2194         }
2195  
2196 +       /* Some platforms need to sync memory here */
2197 +       wmb();
2198 +
2199         /* Packets are ready, update Tx producer idx local and on card. */
2200         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
2201  
2202         tp->tx_prod = entry;
2203 -       if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
2204 +       if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
2205                 netif_stop_queue(dev);
2206 -               if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
2207 +               if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
2208                         netif_wake_queue(tp->dev);
2209         }
2210  
2211  out_unlock:
2212         mmiowb();
2213 -       spin_unlock(&tp->tx_lock);
2214  
2215         dev->trans_start = jiffies;
2216  
2217 @@ -3814,7 +4585,9 @@
2218         if (new_mtu > ETH_DATA_LEN) {
2219                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
2220                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
2221 +#if TG3_TSO_SUPPORT != 0
2222                         ethtool_op_set_tso(dev, 0);
2223 +#endif
2224                 }
2225                 else
2226                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
2227 @@ -3828,6 +4601,7 @@
2228  static int tg3_change_mtu(struct net_device *dev, int new_mtu)
2229  {
2230         struct tg3 *tp = netdev_priv(dev);
2231 +       int err;
2232  
2233         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
2234                 return -EINVAL;
2235 @@ -3848,13 +4622,14 @@
2236  
2237         tg3_set_mtu(dev, tp, new_mtu);
2238  
2239 -       tg3_init_hw(tp);
2240 +       err = tg3_restart_hw(tp, 0);
2241  
2242 -       tg3_netif_start(tp);
2243 +       if (!err)
2244 +               tg3_netif_start(tp);
2245  
2246         tg3_full_unlock(tp);
2247  
2248 -       return 0;
2249 +       return err;
2250  }
2251  
2252  /* Free up pending packets in all rx/tx rings.
2253 @@ -3936,7 +4711,7 @@
2254   * end up in the driver.  tp->{tx,}lock are held and thus
2255   * we may not sleep.
2256   */
2257 -static void tg3_init_rings(struct tg3 *tp)
2258 +static int tg3_init_rings(struct tg3 *tp)
2259  {
2260         u32 i;
2261  
2262 @@ -3985,18 +4760,38 @@
2263  
2264         /* Now allocate fresh SKBs for each rx ring. */
2265         for (i = 0; i < tp->rx_pending; i++) {
2266 -               if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
2267 -                                    -1, i) < 0)
2268 +               if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
2269 +                       printk(KERN_WARNING PFX
2270 +                              "%s: Using a smaller RX standard ring, "
2271 +                              "only %d out of %d buffers were allocated "
2272 +                              "successfully.\n",
2273 +                              tp->dev->name, i, tp->rx_pending);
2274 +                       if (i == 0)
2275 +                               return -ENOMEM;
2276 +                       tp->rx_pending = i;
2277                         break;
2278 +               }
2279         }
2280  
2281         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
2282                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
2283                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
2284 -                                            -1, i) < 0)
2285 +                                            -1, i) < 0) {
2286 +                               printk(KERN_WARNING PFX
2287 +                                      "%s: Using a smaller RX jumbo ring, "
2288 +                                      "only %d out of %d buffers were "
2289 +                                      "allocated successfully.\n",
2290 +                                      tp->dev->name, i, tp->rx_jumbo_pending);
2291 +                               if (i == 0) {
2292 +                                       tg3_free_rings(tp);
2293 +                                       return -ENOMEM;
2294 +                               }
2295 +                               tp->rx_jumbo_pending = i;
2296                                 break;
2297 +                       }
2298                 }
2299         }
2300 +       return 0;
2301  }
2302  
2303  /*
2304 @@ -4278,9 +5073,8 @@
2305  /* tp->lock is held. */
2306  static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
2307  {
2308 -       if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
2309 -               tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
2310 -                             NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
2311 +       tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
2312 +                     NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
2313  
2314         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
2315                 switch (kind) {
2316 @@ -4352,6 +5146,104 @@
2317         }
2318  }
2319  
2320 +static int tg3_poll_fw(struct tg3 *tp)
2321 +{
2322 +       int i;
2323 +       u32 val;
2324 +
2325 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2326 +               /* Wait up to 20ms for init done. */
2327 +               for (i = 0; i < 200; i++) {
2328 +                       if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
2329 +                               return 0;
2330 +                       udelay(100);
2331 +               }
2332 +               return -ENODEV;
2333 +       }
2334 +
2335 +       /* Wait for firmware initialization to complete. */
2336 +       for (i = 0; i < 100000; i++) {
2337 +               tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
2338 +               if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2339 +                       break;
2340 +               udelay(10);
2341 +       }
2342 +
2343 +       /* Chip might not be fitted with firmware.  Some Sun onboard
2344 +        * parts are configured like that.  So don't signal the timeout
2345 +        * of the above loop as an error, but do report the lack of
2346 +        * running firmware once.
2347 +        */
2348 +       if (i >= 100000 &&
2349 +           !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
2350 +               tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
2351 +
2352 +               printk(KERN_INFO PFX "%s: No firmware running.\n",
2353 +                      tp->dev->name);
2354 +       }
2355 +
2356 +       return 0;
2357 +}
2358 +
2359 +/* Save PCI command register before chip reset */
2360 +static void tg3_save_pci_state(struct tg3 *tp)
2361 +{
2362 +       u32 val;
2363 +
2364 +       pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
2365 +       tp->pci_cmd = val;
2366 +}
2367 +
2368 +/* Restore PCI state after chip reset */
2369 +static void tg3_restore_pci_state(struct tg3 *tp)
2370 +{
2371 +       u32 val;
2372 +
2373 +       /* Re-enable indirect register accesses. */
2374 +       pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
2375 +                              tp->misc_host_ctrl);
2376 +
2377 +       /* Set MAX PCI retry to zero. */
2378 +       val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
2379 +       if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
2380 +           (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
2381 +               val |= PCISTATE_RETRY_SAME_DMA;
2382 +       pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
2383 +
2384 +       pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
2385 +
2386 +       /* Make sure PCI-X relaxed ordering bit is clear. */
2387 +       if (tp->pcix_cap) {
2388 +               u16 pcix_cmd;
2389 +
2390 +               pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2391 +                                    &pcix_cmd);
2392 +               pcix_cmd &= ~PCI_X_CMD_ERO;
2393 +               pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2394 +                                     pcix_cmd);
2395 +       }
2396 +
2397 +       if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
2398 +               u32 val;
2399 +
2400 +               /* Chip reset on 5780 will reset MSI enable bit,
2401 +                * so need to restore it.
2402 +                */
2403 +               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
2404 +                       u16 ctrl;
2405 +
2406 +                       pci_read_config_word(tp->pdev,
2407 +                                            tp->msi_cap + PCI_MSI_FLAGS,
2408 +                                            &ctrl);
2409 +                       pci_write_config_word(tp->pdev,
2410 +                                             tp->msi_cap + PCI_MSI_FLAGS,
2411 +                                             ctrl | PCI_MSI_FLAGS_ENABLE);
2412 +                       val = tr32(MSGINT_MODE);
2413 +                       tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
2414 +               }
2415 +       }
2416 +}
2417 +
2418  static void tg3_stop_fw(struct tg3 *);
2419  
2420  /* tp->lock is held. */
2421 @@ -4359,15 +5251,25 @@
2422  {
2423         u32 val;
2424         void (*write_op)(struct tg3 *, u32, u32);
2425 -       int i;
2426 +       int err;
2427  
2428 -       if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
2429 -               tg3_nvram_lock(tp);
2430 -               /* No matching tg3_nvram_unlock() after this because
2431 -                * chip reset below will undo the nvram lock.
2432 -                */
2433 -               tp->nvram_lock_cnt = 0;
2434 -       }
2435 +       tg3_nvram_lock(tp);
2436 +
2437 +       /* No matching tg3_nvram_unlock() after this because
2438 +        * chip reset below will undo the nvram lock.
2439 +        */
2440 +       tp->nvram_lock_cnt = 0;
2441 +
2442 +       /* GRC_MISC_CFG core clock reset will clear the memory
2443 +        * enable bit in PCI register 4 and the MSI enable bit
2444 +        * on some chips, so we save relevant registers here.
2445 +        */
2446 +       tg3_save_pci_state(tp);
2447 +
2448 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
2449 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
2450 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
2451 +               tw32(GRC_FASTBOOT_PC, 0);
2452  
2453         /*
2454          * We must avoid the readl() that normally takes place.
2455 @@ -4379,6 +5281,25 @@
2456         if (write_op == tg3_write_flush_reg32)
2457                 tp->write32 = tg3_write32;
2458  
2459 +       /* Prevent the irq handler from reading or writing PCI registers
2460 +        * during chip reset when the memory enable bit in the PCI command
2461 +        * register may be cleared.  The chip does not generate interrupt
2462 +        * at this time, but the irq handler may still be called due to irq
2463 +        * sharing or irqpoll.
2464 +        */
2465 +       tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
2466 +       if (tp->hw_status) {
2467 +               tp->hw_status->status = 0;
2468 +               tp->hw_status->status_tag = 0;
2469 +       }
2470 +       tp->last_tag = 0;
2471 +       smp_mb();
2472 +#if (LINUX_VERSION_CODE >= 0x2051c)
2473 +       synchronize_irq(tp->pdev->irq);
2474 +#else
2475 +       synchronize_irq();
2476 +#endif
2477 +
2478         /* do the reset */
2479         val = GRC_MISC_CFG_CORECLK_RESET;
2480  
2481 @@ -4392,6 +5313,12 @@
2482                 }
2483         }
2484  
2485 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2486 +               tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
2487 +               tw32(GRC_VCPU_EXT_CTRL,
2488 +                    tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
2489 +       }
2490 +
2491         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
2492                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
2493         tw32(GRC_MISC_CFG, val);
2494 @@ -4441,48 +5368,14 @@
2495                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
2496         }
2497  
2498 -       /* Re-enable indirect register accesses. */
2499 -       pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
2500 -                              tp->misc_host_ctrl);
2501 -
2502 -       /* Set MAX PCI retry to zero. */
2503 -       val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
2504 -       if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
2505 -           (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
2506 -               val |= PCISTATE_RETRY_SAME_DMA;
2507 -       pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
2508 -
2509 -       pci_restore_state(tp->pdev);
2510 -
2511 -       /* Make sure PCI-X relaxed ordering bit is clear. */
2512 -       pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
2513 -       val &= ~PCIX_CAPS_RELAXED_ORDERING;
2514 -       pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
2515 -
2516 -       if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
2517 -               u32 val;
2518 +       tg3_restore_pci_state(tp);
2519  
2520 -               /* Chip reset on 5780 will reset MSI enable bit,
2521 -                * so need to restore it.
2522 -                */
2523 -               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
2524 -                       u16 ctrl;
2525 -
2526 -                       pci_read_config_word(tp->pdev,
2527 -                                            tp->msi_cap + PCI_MSI_FLAGS,
2528 -                                            &ctrl);
2529 -                       pci_write_config_word(tp->pdev,
2530 -                                             tp->msi_cap + PCI_MSI_FLAGS,
2531 -                                             ctrl | PCI_MSI_FLAGS_ENABLE);
2532 -                       val = tr32(MSGINT_MODE);
2533 -                       tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
2534 -               }
2535 +       tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
2536  
2537 +       val = 0;
2538 +       if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
2539                 val = tr32(MEMARB_MODE);
2540 -               tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
2541 -
2542 -       } else
2543 -               tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
2544 +       tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
2545  
2546         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
2547                 tg3_stop_fw(tp);
2548 @@ -4515,21 +5408,9 @@
2549                 tw32_f(MAC_MODE, 0);
2550         udelay(40);
2551  
2552 -       if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
2553 -               /* Wait for firmware initialization to complete. */
2554 -               for (i = 0; i < 100000; i++) {
2555 -                       tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
2556 -                       if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2557 -                               break;
2558 -                       udelay(10);
2559 -               }
2560 -               if (i >= 100000) {
2561 -                       printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
2562 -                              "firmware will not restart magic=%08x\n",
2563 -                              tp->dev->name, val);
2564 -                       return -ENODEV;
2565 -               }
2566 -       }
2567 +       err = tg3_poll_fw(tp);
2568 +       if (err)
2569 +               return err;
2570  
2571         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
2572             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
2573 @@ -4613,7 +5494,7 @@
2574  #define TG3_FW_BSS_ADDR                0x08000a70
2575  #define TG3_FW_BSS_LEN         0x10
2576  
2577 -static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
2578 +static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
2579         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
2580         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
2581         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
2582 @@ -4707,7 +5588,7 @@
2583         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
2584  };
2585  
2586 -static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
2587 +static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
2588         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
2589         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
2590         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
2591 @@ -4732,10 +5613,15 @@
2592  {
2593         int i;
2594  
2595 -       if (offset == TX_CPU_BASE &&
2596 -           (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
2597 -               BUG();
2598 +       BUG_ON(offset == TX_CPU_BASE &&
2599 +           (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
2600  
2601 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2602 +               u32 val = tr32(GRC_VCPU_EXT_CTRL);
2603 +
2604 +               tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
2605 +               return 0;
2606 +       }
2607         if (offset == RX_CPU_BASE) {
2608                 for (i = 0; i < 10000; i++) {
2609                         tw32(offset + CPU_STATE, 0xffffffff);
2610 @@ -4773,13 +5659,13 @@
2611  struct fw_info {
2612         unsigned int text_base;
2613         unsigned int text_len;
2614 -       u32 *text_data;
2615 +       const u32 *text_data;
2616         unsigned int rodata_base;
2617         unsigned int rodata_len;
2618 -       u32 *rodata_data;
2619 +       const u32 *rodata_data;
2620         unsigned int data_base;
2621         unsigned int data_len;
2622 -       u32 *data_data;
2623 +       const u32 *data_data;
2624  };
2625  
2626  /* tp->lock is held. */
2627 @@ -4911,7 +5797,7 @@
2628  #define TG3_TSO_FW_BSS_ADDR            0x08001b80
2629  #define TG3_TSO_FW_BSS_LEN             0x894
2630  
2631 -static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
2632 +static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
2633         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
2634         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
2635         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
2636 @@ -5198,7 +6084,7 @@
2637         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
2638  };
2639  
2640 -static u32 tg3TsoFwRodata[] = {
2641 +static const u32 tg3TsoFwRodata[] = {
2642         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
2643         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
2644         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
2645 @@ -5206,7 +6092,7 @@
2646         0x00000000,
2647  };
2648  
2649 -static u32 tg3TsoFwData[] = {
2650 +static const u32 tg3TsoFwData[] = {
2651         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
2652         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2653         0x00000000,
2654 @@ -5228,7 +6114,7 @@
2655  #define TG3_TSO5_FW_BSS_ADDR           0x00010f50
2656  #define TG3_TSO5_FW_BSS_LEN            0x88
2657  
2658 -static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
2659 +static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
2660         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
2661         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
2662         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
2663 @@ -5387,14 +6273,14 @@
2664         0x00000000, 0x00000000, 0x00000000,
2665  };
2666  
2667 -static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
2668 +static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
2669         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
2670         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
2671         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
2672         0x00000000, 0x00000000, 0x00000000,
2673  };
2674  
2675 -static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
2676 +static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
2677         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
2678         0x00000000, 0x00000000, 0x00000000,
2679  };
2680 @@ -5474,7 +6360,7 @@
2681  #endif /* TG3_TSO_SUPPORT != 0 */
2682  
2683  /* tp->lock is held. */
2684 -static void __tg3_set_mac_addr(struct tg3 *tp)
2685 +static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2686  {
2687         u32 addr_high, addr_low;
2688         int i;
2689 @@ -5486,6 +6372,8 @@
2690                     (tp->dev->dev_addr[4] <<  8) |
2691                     (tp->dev->dev_addr[5] <<  0));
2692         for (i = 0; i < 4; i++) {
2693 +               if (i == 1 && skip_mac_1)
2694 +                       continue;
2695                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2696                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2697         }
2698 @@ -5512,17 +6400,34 @@
2699  {
2700         struct tg3 *tp = netdev_priv(dev);
2701         struct sockaddr *addr = p;
2702 +       int err = 0, skip_mac_1 = 0;
2703  
2704         if (!is_valid_ether_addr(addr->sa_data))
2705                 return -EINVAL;
2706  
2707         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2708  
2709 +       if (!netif_running(dev))
2710 +               return 0;
2711 +
2712 +       if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
2713 +               u32 addr0_high, addr0_low, addr1_high, addr1_low;
2714 +
2715 +               addr0_high = tr32(MAC_ADDR_0_HIGH);
2716 +               addr0_low = tr32(MAC_ADDR_0_LOW);
2717 +               addr1_high = tr32(MAC_ADDR_1_HIGH);
2718 +               addr1_low = tr32(MAC_ADDR_1_LOW);
2719 +
2720 +               /* Skip MAC addr 1 if ASF is using it. */
2721 +               if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
2722 +                   !(addr1_high == 0 && addr1_low == 0))
2723 +                       skip_mac_1 = 1;
2724 +       }
2725         spin_lock_bh(&tp->lock);
2726 -       __tg3_set_mac_addr(tp);
2727 +       __tg3_set_mac_addr(tp, skip_mac_1);
2728         spin_unlock_bh(&tp->lock);
2729  
2730 -       return 0;
2731 +       return err;
2732  }
2733  
2734  /* tp->lock is held. */
2735 @@ -5570,7 +6475,7 @@
2736  }
2737  
2738  /* tp->lock is held. */
2739 -static int tg3_reset_hw(struct tg3 *tp)
2740 +static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
2741  {
2742         u32 val, rdmac_mode;
2743         int i, err, limit;
2744 @@ -5585,6 +6490,9 @@
2745                 tg3_abort_hw(tp, 1);
2746         }
2747  
2748 +       if (reset_phy)
2749 +               tg3_phy_reset(tp);
2750 +
2751         err = tg3_chip_reset(tp);
2752         if (err)
2753                 return err;
2754 @@ -5619,7 +6527,9 @@
2755          * can only do this after the hardware has been
2756          * successfully reset.
2757          */
2758 -       tg3_init_rings(tp);
2759 +       err = tg3_init_rings(tp);
2760 +       if (err)
2761 +               return err;
2762  
2763         /* This value is determined during the probe time DMA
2764          * engine test, tg3_test_dma.
2765 @@ -5631,10 +6541,14 @@
2766                           GRC_MODE_NO_TX_PHDR_CSUM |
2767                           GRC_MODE_NO_RX_PHDR_CSUM);
2768         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
2769 -       if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
2770 -               tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
2771 -       if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
2772 -               tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
2773 +
2774 +       /* Pseudo-header checksum is done by hardware logic and not
2775 +        * the offload processers, so make the chip do the pseudo-
2776 +        * header checksums on receive.  For transmit it is more
2777 +        * convenient to do the pseudo-header checksum in software
2778 +        * as Linux does that on transmit for us in all cases.
2779 +        */
2780 +       tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
2781  
2782         tw32(GRC_MODE,
2783              tp->grc_mode |
2784 @@ -5708,7 +6622,20 @@
2785         }
2786  
2787         /* Setup replenish threshold. */
2788 -       tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
2789 +       val = tp->rx_pending / 8;
2790 +       if (val == 0)
2791 +               val = 1;
2792 +       else if (val > tp->rx_std_max_post)
2793 +               val = tp->rx_std_max_post;
2794 +       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2795 +               if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
2796 +                       tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
2797 +
2798 +               if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
2799 +                       val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
2800 +       }
2801 +
2802 +       tw32(RCVBDI_STD_THRESH, val);
2803  
2804         /* Initialize TG3_BDINFO's at:
2805          *  RCVDBDI_STD_BD:     standard eth size rx ring
2806 @@ -5817,7 +6744,7 @@
2807                      tp->rx_jumbo_ptr);
2808  
2809         /* Initialize MAC address and backoff seed. */
2810 -       __tg3_set_mac_addr(tp);
2811 +       __tg3_set_mac_addr(tp, 0);
2812  
2813         /* MTU + ethernet header + FCS + optional VLAN tag */
2814         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
2815 @@ -5842,16 +6769,13 @@
2816                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
2817                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
2818                       RDMAC_MODE_LNGREAD_ENAB);
2819 -       if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
2820 -               rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
2821  
2822         /* If statement applies to 5705 and 5750 PCI devices only */
2823         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
2824              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
2825             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
2826                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
2827 -                   (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
2828 -                    tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
2829 +                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2830                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
2831                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
2832                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
2833 @@ -5868,8 +6792,12 @@
2834  #endif
2835  
2836         /* Receive/send statistics. */
2837 -       if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
2838 -           (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
2839 +       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
2840 +               val = tr32(RCVLPC_STATS_ENABLE);
2841 +               val &= ~RCVLPC_STATSENAB_DACK_FIX;
2842 +               tw32(RCVLPC_STATS_ENABLE, val);
2843 +       } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
2844 +                  (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
2845                 val = tr32(RCVLPC_STATS_ENABLE);
2846                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
2847                 tw32(RCVLPC_STATS_ENABLE, val);
2848 @@ -5936,30 +6864,40 @@
2849  
2850         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
2851                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
2852 +       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2853 +           !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
2854 +           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
2855 +               tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2856         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
2857         udelay(40);
2858  
2859         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
2860 -        * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
2861 +        * If TG3_FLG2_IS_NIC is zero, we should read the
2862          * register to preserve the GPIO settings for LOMs. The GPIOs,
2863          * whether used as inputs or outputs, are set by boot code after
2864          * reset.
2865          */
2866 -       if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
2867 +       if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
2868                 u32 gpio_mask;
2869  
2870 -               gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
2871 -                           GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
2872 +               gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
2873 +                           GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
2874 +                           GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
2875  
2876                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
2877                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
2878                                      GRC_LCLCTRL_GPIO_OUTPUT3;
2879  
2880 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
2881 +                       gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
2882 +
2883 +               tp->grc_local_ctrl &= ~gpio_mask;
2884                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
2885  
2886                 /* GPIO1 must be driven high for eeprom write protect */
2887 -               tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2888 -                                      GRC_LCLCTRL_GPIO_OUTPUT1);
2889 +               if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
2890 +                       tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2891 +                                              GRC_LCLCTRL_GPIO_OUTPUT1);
2892         }
2893         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2894         udelay(100);
2895 @@ -5993,22 +6931,28 @@
2896                 }
2897         }
2898  
2899 +       /* Enable host coalescing bug fix */
2900 +       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
2901 +           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
2902 +               val |= (1 << 29);
2903 +
2904         tw32_f(WDMAC_MODE, val);
2905         udelay(40);
2906  
2907 -       if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
2908 -               val = tr32(TG3PCI_X_CAPS);
2909 +       if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
2910 +               u16 pcix_cmd;
2911 +
2912 +               pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2913 +                                    &pcix_cmd);
2914                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2915 -                       val &= ~PCIX_CAPS_BURST_MASK;
2916 -                       val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
2917 +                       pcix_cmd &= ~PCI_X_CMD_MAX_READ;
2918 +                       pcix_cmd |= PCI_X_CMD_READ_2K;
2919                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2920 -                       val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
2921 -                       val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
2922 -                       if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
2923 -                               val |= (tp->split_mode_max_reqs <<
2924 -                                       PCIX_CAPS_SPLIT_SHIFT);
2925 +                       pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
2926 +                       pcix_cmd |= PCI_X_CMD_READ_2K;
2927                 }
2928 -               tw32(TG3PCI_X_CAPS, val);
2929 +               pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2930 +                                     pcix_cmd);
2931         }
2932  
2933         tw32_f(RDMAC_MODE, rdmac_mode);
2934 @@ -6048,6 +6992,9 @@
2935         udelay(100);
2936  
2937         tp->rx_mode = RX_MODE_ENABLE;
2938 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
2939 +               tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
2940 +
2941         tw32_f(MAC_RX_MODE, tp->rx_mode);
2942         udelay(10);
2943  
2944 @@ -6097,16 +7044,29 @@
2945                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
2946         }
2947  
2948 -       err = tg3_setup_phy(tp, 1);
2949 +       if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
2950 +           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
2951 +               u32 tmp;
2952 +
2953 +               tmp = tr32(SERDES_RX_CTRL);
2954 +               tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
2955 +               tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
2956 +               tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
2957 +               tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2958 +       }
2959 +
2960 +       err = tg3_setup_phy(tp, 0);
2961         if (err)
2962                 return err;
2963  
2964 -       if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2965 +       if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
2966 +           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
2967                 u32 tmp;
2968  
2969                 /* Clear CRC stats. */
2970 -               if (!tg3_readphy(tp, 0x1e, &tmp)) {
2971 -                       tg3_writephy(tp, 0x1e, tmp | 0x8000);
2972 +               if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
2973 +                       tg3_writephy(tp, MII_TG3_TEST1,
2974 +                                    tmp | MII_TG3_TEST1_CRC_EN);
2975                         tg3_readphy(tp, 0x14, &tmp);
2976                 }
2977         }
2978 @@ -6170,12 +7130,12 @@
2979  /* Called at device open time to get the chip ready for
2980   * packet processing.  Invoked with tp->lock held.
2981   */
2982 -static int tg3_init_hw(struct tg3 *tp)
2983 +static int tg3_init_hw(struct tg3 *tp, int reset_phy)
2984  {
2985         int err;
2986  
2987         /* Force the chip into D0. */
2988 -       err = tg3_set_power_state(tp, 0);
2989 +       err = tg3_set_power_state(tp, PCI_D0);
2990         if (err)
2991                 goto out;
2992  
2993 @@ -6183,7 +7143,7 @@
2994  
2995         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
2996  
2997 -       err = tg3_reset_hw(tp);
2998 +       err = tg3_reset_hw(tp, reset_phy);
2999  
3000  out:
3001         return err;
3002 @@ -6231,12 +7191,19 @@
3003         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
3004         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
3005         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
3006 +
3007 +       TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
3008 +       TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
3009 +       TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
3010  }
3011  
3012  static void tg3_timer(unsigned long __opaque)
3013  {
3014         struct tg3 *tp = (struct tg3 *) __opaque;
3015  
3016 +       if (tp->irq_sync)
3017 +               goto restart_timer;
3018 +
3019         spin_lock(&tp->lock);
3020  
3021         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
3022 @@ -6294,12 +7261,14 @@
3023                                 need_setup = 1;
3024                         }
3025                         if (need_setup) {
3026 -                               tw32_f(MAC_MODE,
3027 -                                    (tp->mac_mode &
3028 -                                     ~MAC_MODE_PORT_MODE_MASK));
3029 -                               udelay(40);
3030 -                               tw32_f(MAC_MODE, tp->mac_mode);
3031 -                               udelay(40);
3032 +                               if (!tp->serdes_counter) {
3033 +                                       tw32_f(MAC_MODE,
3034 +                                            (tp->mac_mode &
3035 +                                             ~MAC_MODE_PORT_MODE_MASK));
3036 +                                       udelay(40);
3037 +                                       tw32_f(MAC_MODE, tp->mac_mode);
3038 +                                       udelay(40);
3039 +                               }
3040                                 tg3_setup_phy(tp, 0);
3041                         }
3042                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
3043 @@ -6308,16 +7277,32 @@
3044                 tp->timer_counter = tp->timer_multiplier;
3045         }
3046  
3047 -       /* Heartbeat is only sent once every 2 seconds.  */
3048 +       /* Heartbeat is only sent once every 2 seconds.
3049 +        *
3050 +        * The heartbeat is to tell the ASF firmware that the host
3051 +        * driver is still alive.  In the event that the OS crashes,
3052 +        * ASF needs to reset the hardware to free up the FIFO space
3053 +        * that may be filled with rx packets destined for the host.
3054 +        * If the FIFO is full, ASF will no longer function properly.
3055 +        *
3056 +        * Unintended resets have been reported on real time kernels
3057 +        * where the timer doesn't run on time.  Netpoll will also have
3058 +        * same problem.
3059 +        *
3060 +        * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
3061 +        * to check the ring condition when the heartbeat is expiring
3062 +        * before doing the reset.  This will prevent most unintended
3063 +        * resets.
3064 +        */
3065         if (!--tp->asf_counter) {
3066                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3067                         u32 val;
3068  
3069 -                       tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
3070 -                                          FWCMD_NICDRV_ALIVE2);
3071 -                       tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
3072 +                       tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
3073 +                                     FWCMD_NICDRV_ALIVE3);
3074 +                       tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
3075                         /* 5 seconds timeout */
3076 -                       tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
3077 +                       tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
3078                         val = tr32(GRC_RX_CPU_EVENT);
3079                         val |= (1 << 14);
3080                         tw32(GRC_RX_CPU_EVENT, val);
3081 @@ -6327,15 +7312,39 @@
3082  
3083         spin_unlock(&tp->lock);
3084  
3085 +restart_timer:
3086         tp->timer.expires = jiffies + tp->timer_offset;
3087         add_timer(&tp->timer);
3088  }
3089  
3090 +static int tg3_request_irq(struct tg3 *tp)
3091 +{
3092 +#if (LINUX_VERSION_CODE < 0x020613)
3093 +       irqreturn_t (*fn)(int, void *, struct pt_regs *);
3094 +#else
3095 +       irq_handler_t fn;
3096 +#endif
3097 +       unsigned long flags;
3098 +       struct net_device *dev = tp->dev;
3099 +
3100 +       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
3101 +               fn = tg3_msi;
3102 +               if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
3103 +                       fn = tg3_msi_1shot;
3104 +               flags = IRQF_SAMPLE_RANDOM;
3105 +       } else {
3106 +               fn = tg3_interrupt;
3107 +               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3108 +                       fn = tg3_interrupt_tagged;
3109 +               flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
3110 +       }
3111 +       return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
3112 +}
3113 +
3114  static int tg3_test_interrupt(struct tg3 *tp)
3115  {
3116         struct net_device *dev = tp->dev;
3117 -       int err, i;
3118 -       u32 int_mbox = 0;
3119 +       int err, i, intr_ok = 0;
3120  
3121         if (!netif_running(dev))
3122                 return -ENODEV;
3123 @@ -6345,7 +7354,7 @@
3124         free_irq(tp->pdev->irq, dev);
3125  
3126         err = request_irq(tp->pdev->irq, tg3_test_isr,
3127 -                         SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
3128 +                         IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
3129         if (err)
3130                 return err;
3131  
3132 @@ -6356,32 +7365,36 @@
3133                HOSTCC_MODE_NOW);
3134  
3135         for (i = 0; i < 5; i++) {
3136 +               u32 int_mbox, misc_host_ctrl;
3137 +
3138                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
3139                                         TG3_64BIT_REG_LOW);
3140 -               if (int_mbox != 0)
3141 +               misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3142 +
3143 +               if ((int_mbox != 0) ||
3144 +                   (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
3145 +                       intr_ok = 1;
3146                         break;
3147 +               }
3148 +
3149 +#if (LINUX_VERSION_CODE < 0x20607)
3150 +               set_current_state(TASK_UNINTERRUPTIBLE);
3151 +               schedule_timeout(10);
3152 +#else
3153                 msleep(10);
3154 +#endif
3155         }
3156  
3157         tg3_disable_ints(tp);
3158  
3159         free_irq(tp->pdev->irq, dev);
3160 -       
3161 -       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
3162 -               err = request_irq(tp->pdev->irq, tg3_msi,
3163 -                                 SA_SAMPLE_RANDOM, dev->name, dev);
3164 -       else {
3165 -               irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
3166 -               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3167 -                       fn = tg3_interrupt_tagged;
3168 -               err = request_irq(tp->pdev->irq, fn,
3169 -                                 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
3170 -       }
3171 +
3172 +       err = tg3_request_irq(tp);
3173  
3174         if (err)
3175                 return err;
3176  
3177 -       if (int_mbox != 0)
3178 +       if (intr_ok)
3179                 return 0;
3180  
3181         return -EIO;
3182 @@ -6424,18 +7437,13 @@
3183                        tp->dev->name);
3184  
3185         free_irq(tp->pdev->irq, dev);
3186 +#ifdef CONFIG_PCI_MSI
3187         pci_disable_msi(tp->pdev);
3188 +#endif
3189  
3190         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
3191  
3192 -       {
3193 -               irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
3194 -               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3195 -                       fn = tg3_interrupt_tagged;
3196 -
3197 -               err = request_irq(tp->pdev->irq, fn,
3198 -                                 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
3199 -       }
3200 +       err = tg3_request_irq(tp);
3201         if (err)
3202                 return err;
3203  
3204 @@ -6445,7 +7453,7 @@
3205         tg3_full_lock(tp, 1);
3206  
3207         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3208 -       err = tg3_init_hw(tp);
3209 +       err = tg3_init_hw(tp, 1);
3210  
3211         tg3_full_unlock(tp);
3212  
3213 @@ -6460,8 +7468,16 @@
3214         struct tg3 *tp = netdev_priv(dev);
3215         int err;
3216  
3217 +       netif_carrier_off(tp->dev);
3218 +
3219         tg3_full_lock(tp, 0);
3220  
3221 +       err = tg3_set_power_state(tp, PCI_D0);
3222 +       if (err) {
3223 +               tg3_full_unlock(tp);
3224 +               return err;
3225 +       }
3226 +
3227         tg3_disable_ints(tp);
3228         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
3229  
3230 @@ -6474,9 +7490,8 @@
3231         if (err)
3232                 return err;
3233  
3234 -       if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3235 -           (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
3236 -           (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
3237 +#ifdef CONFIG_PCI_MSI
3238 +       if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
3239                 /* All MSI supporting chips should support tagged
3240                  * status.  Assert that this is the case.
3241                  */
3242 @@ -6486,26 +7501,27 @@
3243                 } else if (pci_enable_msi(tp->pdev) == 0) {
3244                         u32 msi_mode;
3245  
3246 +                       /* Hardware bug - MSI won't work if INTX disabled. */
3247 +                       if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3248 +#if (LINUX_VERSION_CODE < 0x2060e)
3249 +                               tg3_enable_intx(tp->pdev);
3250 +#else
3251 +                               pci_intx(tp->pdev, 1);
3252 +#endif
3253 +
3254                         msi_mode = tr32(MSGINT_MODE);
3255                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
3256                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
3257                 }
3258         }
3259 -       if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
3260 -               err = request_irq(tp->pdev->irq, tg3_msi,
3261 -                                 SA_SAMPLE_RANDOM, dev->name, dev);
3262 -       else {
3263 -               irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
3264 -               if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3265 -                       fn = tg3_interrupt_tagged;
3266 -
3267 -               err = request_irq(tp->pdev->irq, fn,
3268 -                                 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
3269 -       }
3270 +#endif
3271 +       err = tg3_request_irq(tp);
3272  
3273         if (err) {
3274                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
3275 +#ifdef CONFIG_PCI_MSI
3276                         pci_disable_msi(tp->pdev);
3277 +#endif
3278                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
3279                 }
3280                 tg3_free_consistent(tp);
3281 @@ -6514,7 +7530,7 @@
3282  
3283         tg3_full_lock(tp, 0);
3284  
3285 -       err = tg3_init_hw(tp);
3286 +       err = tg3_init_hw(tp, 1);
3287         if (err) {
3288                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3289                 tg3_free_rings(tp);
3290 @@ -6541,7 +7557,9 @@
3291         if (err) {
3292                 free_irq(tp->pdev->irq, dev);
3293                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
3294 +#ifdef CONFIG_PCI_MSI
3295                         pci_disable_msi(tp->pdev);
3296 +#endif
3297                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
3298                 }
3299                 tg3_free_consistent(tp);
3300 @@ -6555,7 +7573,9 @@
3301                         tg3_full_lock(tp, 0);
3302  
3303                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
3304 +#ifdef CONFIG_PCI_MSI
3305                                 pci_disable_msi(tp->pdev);
3306 +#endif
3307                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
3308                         }
3309                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3310 @@ -6566,6 +7586,15 @@
3311  
3312                         return err;
3313                 }
3314 +
3315 +               if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
3316 +                       if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
3317 +                               u32 val = tr32(PCIE_TRANSACTION_CFG);
3318 +
3319 +                               tw32(PCIE_TRANSACTION_CFG,
3320 +                                    val | PCIE_TRANS_CFG_1SHOT_MSI);
3321 +                       }
3322 +               }
3323         }
3324  
3325         tg3_full_lock(tp, 0);
3326 @@ -6816,12 +7845,12 @@
3327  {
3328         struct tg3 *tp = netdev_priv(dev);
3329  
3330 -       /* Calling flush_scheduled_work() may deadlock because
3331 -        * linkwatch_event() may be on the workqueue and it will try to get
3332 -        * the rtnl_lock which we are holding.
3333 -        */
3334 -       while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
3335 -               msleep(1);
3336 +#if (LINUX_VERSION_CODE >= 0x20616)
3337 +       cancel_work_sync(&tp->reset_task);
3338 +#else
3339 +       set_current_state(TASK_UNINTERRUPTIBLE);
3340 +       schedule_timeout(1);
3341 +#endif
3342  
3343         netif_stop_queue(dev);
3344  
3345 @@ -6836,16 +7865,15 @@
3346  
3347         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3348         tg3_free_rings(tp);
3349 -       tp->tg3_flags &=
3350 -               ~(TG3_FLAG_INIT_COMPLETE |
3351 -                 TG3_FLAG_GOT_SERDES_FLOWCTL);
3352 -       netif_carrier_off(tp->dev);
3353 +       tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
3354  
3355         tg3_full_unlock(tp);
3356  
3357         free_irq(tp->pdev->irq, dev);
3358         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
3359 +#ifdef CONFIG_PCI_MSI
3360                 pci_disable_msi(tp->pdev);
3361 +#endif
3362                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
3363         }
3364  
3365 @@ -6856,6 +7884,10 @@
3366  
3367         tg3_free_consistent(tp);
3368  
3369 +       tg3_set_power_state(tp, PCI_D3hot);
3370 +
3371 +       netif_carrier_off(tp->dev);
3372 +
3373         return 0;
3374  }
3375  
3376 @@ -6881,8 +7913,9 @@
3377                 u32 val;
3378  
3379                 spin_lock_bh(&tp->lock);
3380 -               if (!tg3_readphy(tp, 0x1e, &val)) {
3381 -                       tg3_writephy(tp, 0x1e, val | 0x8000);
3382 +               if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
3383 +                       tg3_writephy(tp, MII_TG3_TEST1,
3384 +                                    val | MII_TG3_TEST1_CRC_EN);
3385                         tg3_readphy(tp, 0x14, &val);
3386                 } else
3387                         val = 0;
3388 @@ -7002,7 +8035,7 @@
3389                 get_stat64(&hw_stats->rx_ucast_packets) +
3390                 get_stat64(&hw_stats->rx_mcast_packets) +
3391                 get_stat64(&hw_stats->rx_bcast_packets);
3392 -               
3393 +
3394         stats->tx_packets = old_stats->tx_packets +
3395                 get_stat64(&hw_stats->tx_ucast_packets) +
3396                 get_stat64(&hw_stats->tx_mcast_packets) +
3397 @@ -7150,6 +8183,9 @@
3398  {
3399         struct tg3 *tp = netdev_priv(dev);
3400  
3401 +       if (!netif_running(dev))
3402 +               return;
3403 +
3404         tg3_full_lock(tp, 0);
3405         __tg3_set_rx_mode(dev);
3406         tg3_full_unlock(tp);
3407 @@ -7174,6 +8210,9 @@
3408  
3409         memset(p, 0, TG3_REGDUMP_LEN);
3410  
3411 +       if (tp->link_config.phy_is_low_power)
3412 +               return;
3413 +
3414         tg3_full_lock(tp, 0);
3415  
3416  #define __GET_REG32(reg)       (*(p)++ = tr32(reg))
3417 @@ -7232,15 +8271,19 @@
3418         tg3_full_unlock(tp);
3419  }
3420  
3421 +#if (LINUX_VERSION_CODE >= 0x20418)
3422  static int tg3_get_eeprom_len(struct net_device *dev)
3423  {
3424         struct tg3 *tp = netdev_priv(dev);
3425  
3426         return tp->nvram_size;
3427  }
3428 +#endif
3429  
3430  static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
3431 +static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
3432  
3433 +#ifdef ETHTOOL_GEEPROM
3434  static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
3435  {
3436         struct tg3 *tp = netdev_priv(dev);
3437 @@ -7248,6 +8291,9 @@
3438         u8  *pd;
3439         u32 i, offset, len, val, b_offset, b_count;
3440  
3441 +       if (tp->link_config.phy_is_low_power)
3442 +               return -EAGAIN;
3443 +
3444         offset = eeprom->offset;
3445         len = eeprom->len;
3446         eeprom->len = 0;
3447 @@ -7299,9 +8345,11 @@
3448         }
3449         return 0;
3450  }
3451 +#endif
3452  
3453 -static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
3454 +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
3455  
3456 +#ifdef ETHTOOL_SEEPROM
3457  static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
3458  {
3459         struct tg3 *tp = netdev_priv(dev);
3460 @@ -7309,6 +8357,9 @@
3461         u32 offset, len, b_offset, odd_len, start, end;
3462         u8 *buf;
3463  
3464 +       if (tp->link_config.phy_is_low_power)
3465 +               return -EAGAIN;
3466 +
3467         if (eeprom->magic != TG3_EEPROM_MAGIC)
3468                 return -EINVAL;
3469  
3470 @@ -7357,11 +8408,12 @@
3471  
3472         return ret;
3473  }
3474 +#endif
3475  
3476  static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3477  {
3478         struct tg3 *tp = netdev_priv(dev);
3479 -  
3480 +
3481         cmd->supported = (SUPPORTED_Autoneg);
3482  
3483         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
3484 @@ -7373,13 +8425,13 @@
3485                                   SUPPORTED_100baseT_Full |
3486                                   SUPPORTED_10baseT_Half |
3487                                   SUPPORTED_10baseT_Full |
3488 -                                 SUPPORTED_MII);
3489 +                                 SUPPORTED_TP);
3490                 cmd->port = PORT_TP;
3491         } else {
3492                 cmd->supported |= SUPPORTED_FIBRE;
3493                 cmd->port = PORT_FIBRE;
3494         }
3495 -  
3496 +
3497         cmd->advertising = tp->link_config.advertising;
3498         if (netif_running(dev)) {
3499                 cmd->speed = tp->link_config.active_speed;
3500 @@ -7392,12 +8444,12 @@
3501         cmd->maxrxpkt = 0;
3502         return 0;
3503  }
3504 -  
3505 +
3506  static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3507  {
3508         struct tg3 *tp = netdev_priv(dev);
3509 -  
3510 -       if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
3511 +
3512 +       if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
3513                 /* These are the only valid advertisement bits allowed.  */
3514                 if (cmd->autoneg == AUTONEG_ENABLE &&
3515                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
3516 @@ -7429,68 +8481,75 @@
3517                 tp->link_config.speed = cmd->speed;
3518                 tp->link_config.duplex = cmd->duplex;
3519         }
3520 -  
3521 +
3522 +       tp->link_config.orig_speed = tp->link_config.speed;
3523 +       tp->link_config.orig_duplex = tp->link_config.duplex;
3524 +       tp->link_config.orig_autoneg = tp->link_config.autoneg;
3525 +
3526         if (netif_running(dev))
3527                 tg3_setup_phy(tp, 1);
3528  
3529         tg3_full_unlock(tp);
3530 -  
3531 +
3532         return 0;
3533  }
3534 -  
3535 +
3536  static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3537  {
3538         struct tg3 *tp = netdev_priv(dev);
3539 -  
3540 +
3541         strcpy(info->driver, DRV_MODULE_NAME);
3542         strcpy(info->version, DRV_MODULE_VERSION);
3543 +       strcpy(info->fw_version, tp->fw_ver);
3544         strcpy(info->bus_info, pci_name(tp->pdev));
3545  }
3546 -  
3547 +
3548  static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3549  {
3550         struct tg3 *tp = netdev_priv(dev);
3551 -  
3552 -       wol->supported = WAKE_MAGIC;
3553 +
3554 +       if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
3555 +               wol->supported = WAKE_MAGIC;
3556 +       else
3557 +               wol->supported = 0;
3558         wol->wolopts = 0;
3559         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
3560                 wol->wolopts = WAKE_MAGIC;
3561         memset(&wol->sopass, 0, sizeof(wol->sopass));
3562  }
3563 -  
3564 +
3565  static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3566  {
3567         struct tg3 *tp = netdev_priv(dev);
3568 -  
3569 +
3570         if (wol->wolopts & ~WAKE_MAGIC)
3571                 return -EINVAL;
3572         if ((wol->wolopts & WAKE_MAGIC) &&
3573 -           tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
3574 -           !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
3575 +           !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
3576                 return -EINVAL;
3577 -  
3578 +
3579         spin_lock_bh(&tp->lock);
3580         if (wol->wolopts & WAKE_MAGIC)
3581                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
3582         else
3583                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
3584         spin_unlock_bh(&tp->lock);
3585 -  
3586 +
3587         return 0;
3588  }
3589 -  
3590 +
3591  static u32 tg3_get_msglevel(struct net_device *dev)
3592  {
3593         struct tg3 *tp = netdev_priv(dev);
3594         return tp->msg_enable;
3595  }
3596 -  
3597 +
3598  static void tg3_set_msglevel(struct net_device *dev, u32 value)
3599  {
3600         struct tg3 *tp = netdev_priv(dev);
3601         tp->msg_enable = value;
3602  }
3603 -  
3604 +
3605  #if TG3_TSO_SUPPORT != 0
3606  static int tg3_set_tso(struct net_device *dev, u32 value)
3607  {
3608 @@ -7501,16 +8560,23 @@
3609                         return -EINVAL;
3610                 return 0;
3611         }
3612 +       if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
3613 +           (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
3614 +               if (value)
3615 +                       dev->features |= NETIF_F_TSO6;
3616 +               else
3617 +                       dev->features &= ~NETIF_F_TSO6;
3618 +       }
3619         return ethtool_op_set_tso(dev, value);
3620  }
3621  #endif
3622 -  
3623 +
3624  static int tg3_nway_reset(struct net_device *dev)
3625  {
3626         struct tg3 *tp = netdev_priv(dev);
3627         u32 bmcr;
3628         int r;
3629 -  
3630 +
3631         if (!netif_running(dev))
3632                 return -EAGAIN;
3633  
3634 @@ -7528,41 +8594,53 @@
3635                 r = 0;
3636         }
3637         spin_unlock_bh(&tp->lock);
3638 -  
3639 +
3640         return r;
3641  }
3642 -  
3643 +
3644  static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
3645  {
3646         struct tg3 *tp = netdev_priv(dev);
3647 -  
3648 +
3649         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
3650         ering->rx_mini_max_pending = 0;
3651 -       ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
3652 +       if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
3653 +               ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
3654 +       else
3655 +               ering->rx_jumbo_max_pending = 0;
3656 +
3657 +       ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
3658  
3659         ering->rx_pending = tp->rx_pending;
3660         ering->rx_mini_pending = 0;
3661 -       ering->rx_jumbo_pending = tp->rx_jumbo_pending;
3662 +       if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
3663 +               ering->rx_jumbo_pending = tp->rx_jumbo_pending;
3664 +       else
3665 +               ering->rx_jumbo_pending = 0;
3666 +
3667         ering->tx_pending = tp->tx_pending;
3668  }
3669 -  
3670 +
3671  static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
3672  {
3673         struct tg3 *tp = netdev_priv(dev);
3674 -       int irq_sync = 0;
3675 -  
3676 +       int irq_sync = 0, err = 0;
3677 +
3678         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
3679             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
3680 -           (ering->tx_pending > TG3_TX_RING_SIZE - 1))
3681 +           (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
3682 +           (ering->tx_pending <= MAX_SKB_FRAGS) ||
3683 +           ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
3684 +            (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
3685                 return -EINVAL;
3686 -  
3687 +
3688         if (netif_running(dev)) {
3689                 tg3_netif_stop(tp);
3690                 irq_sync = 1;
3691         }
3692  
3693         tg3_full_lock(tp, irq_sync);
3694 -  
3695 +
3696         tp->rx_pending = ering->rx_pending;
3697  
3698         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
3699 @@ -7573,29 +8651,30 @@
3700  
3701         if (netif_running(dev)) {
3702                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3703 -               tg3_init_hw(tp);
3704 -               tg3_netif_start(tp);
3705 +               err = tg3_restart_hw(tp, 1);
3706 +               if (!err)
3707 +                       tg3_netif_start(tp);
3708         }
3709  
3710         tg3_full_unlock(tp);
3711 -  
3712 -       return 0;
3713 +
3714 +       return err;
3715  }
3716 -  
3717 +
3718  static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
3719  {
3720         struct tg3 *tp = netdev_priv(dev);
3721 -  
3722 +
3723         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
3724         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
3725         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
3726  }
3727 -  
3728 +
3729  static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
3730  {
3731         struct tg3 *tp = netdev_priv(dev);
3732 -       int irq_sync = 0;
3733 -  
3734 +       int irq_sync = 0, err = 0;
3735 +
3736         if (netif_running(dev)) {
3737                 tg3_netif_stop(tp);
3738                 irq_sync = 1;
3739 @@ -7618,58 +8697,68 @@
3740  
3741         if (netif_running(dev)) {
3742                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3743 -               tg3_init_hw(tp);
3744 -               tg3_netif_start(tp);
3745 +               err = tg3_restart_hw(tp, 1);
3746 +               if (!err)
3747 +                       tg3_netif_start(tp);
3748         }
3749  
3750         tg3_full_unlock(tp);
3751 -  
3752 -       return 0;
3753 +
3754 +       return err;
3755  }
3756 -  
3757 +
3758  static u32 tg3_get_rx_csum(struct net_device *dev)
3759  {
3760         struct tg3 *tp = netdev_priv(dev);
3761         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
3762  }
3763 -  
3764 +
3765  static int tg3_set_rx_csum(struct net_device *dev, u32 data)
3766  {
3767         struct tg3 *tp = netdev_priv(dev);
3768 -  
3769 +
3770         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
3771                 if (data != 0)
3772                         return -EINVAL;
3773                 return 0;
3774         }
3775 -  
3776 +
3777         spin_lock_bh(&tp->lock);
3778         if (data)
3779                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
3780         else
3781                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
3782         spin_unlock_bh(&tp->lock);
3783 -  
3784 +
3785         return 0;
3786  }
3787 -  
3788 +
3789 +#if (LINUX_VERSION_CODE >= 0x20418)
3790  static int tg3_set_tx_csum(struct net_device *dev, u32 data)
3791  {
3792         struct tg3 *tp = netdev_priv(dev);
3793 -  
3794 +
3795         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
3796                 if (data != 0)
3797                         return -EINVAL;
3798                 return 0;
3799         }
3800 -  
3801 -       if (data)
3802 -               dev->features |= NETIF_F_IP_CSUM;
3803 +
3804 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
3805 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
3806 +#if (LINUX_VERSION_CODE >= 0x20418) && (LINUX_VERSION_CODE < 0x2060c)
3807 +               tg3_set_tx_hw_csum(dev, data);
3808 +#elif (LINUX_VERSION_CODE >= 0x20617)
3809 +               ethtool_op_set_tx_ipv6_csum(dev, data);
3810 +#else
3811 +               ethtool_op_set_tx_hw_csum(dev, data);
3812 +#endif
3813         else
3814 -               dev->features &= ~NETIF_F_IP_CSUM;
3815 +               ethtool_op_set_tx_csum(dev, data);
3816  
3817         return 0;
3818  }
3819 +#endif
3820  
3821  static int tg3_get_stats_count (struct net_device *dev)
3822  {
3823 @@ -7716,12 +8805,16 @@
3824                                            LED_CTRL_TRAFFIC_OVERRIDE |
3825                                            LED_CTRL_TRAFFIC_BLINK |
3826                                            LED_CTRL_TRAFFIC_LED);
3827 -       
3828 +
3829                 else
3830                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
3831                                            LED_CTRL_TRAFFIC_OVERRIDE);
3832 -
3833 +#if (LINUX_VERSION_CODE < 0x20607)
3834 +               set_current_state(TASK_INTERRUPTIBLE);
3835 +               if (schedule_timeout(HZ / 2))
3836 +#else
3837                 if (msleep_interruptible(500))
3838 +#endif
3839                         break;
3840         }
3841         tw32(MAC_LED_CTRL, tp->led_ctrl);
3842 @@ -7736,29 +8829,106 @@
3843  }
3844  
3845  #define NVRAM_TEST_SIZE 0x100
3846 +#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
3847 +#define NVRAM_SELFBOOT_HW_SIZE 0x20
3848 +#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
3849  
3850  static int tg3_test_nvram(struct tg3 *tp)
3851  {
3852 -       u32 *buf, csum;
3853 -       int i, j, err = 0;
3854 +       u32 *buf, csum, magic;
3855 +       int i, j, err = 0, size;
3856 +
3857 +       if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
3858 +               return -EIO;
3859 +
3860 +       if (magic == TG3_EEPROM_MAGIC)
3861 +               size = NVRAM_TEST_SIZE;
3862 +       else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
3863 +               if ((magic & 0xe00000) == 0x200000)
3864 +                       size = NVRAM_SELFBOOT_FORMAT1_SIZE;
3865 +               else
3866 +                       return 0;
3867 +       } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
3868 +               size = NVRAM_SELFBOOT_HW_SIZE;
3869 +       else
3870 +               return -EIO;
3871  
3872 -       buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
3873 +       buf = kmalloc(size, GFP_KERNEL);
3874         if (buf == NULL)
3875                 return -ENOMEM;
3876  
3877 -       for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
3878 +       err = -EIO;
3879 +       for (i = 0, j = 0; i < size; i += 4, j++) {
3880                 u32 val;
3881  
3882                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
3883                         break;
3884                 buf[j] = cpu_to_le32(val);
3885         }
3886 -       if (i < NVRAM_TEST_SIZE)
3887 +       if (i < size)
3888                 goto out;
3889  
3890 -       err = -EIO;
3891 -       if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
3892 +       /* Selfboot format */
3893 +       if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
3894 +           TG3_EEPROM_MAGIC_FW) {
3895 +               u8 *buf8 = (u8 *) buf, csum8 = 0;
3896 +
3897 +               for (i = 0; i < size; i++)
3898 +                       csum8 += buf8[i];
3899 +
3900 +               if (csum8 == 0) {
3901 +                       err = 0;
3902 +                       goto out;
3903 +               }
3904 +
3905 +               err = -EIO;
3906 +               goto out;
3907 +       }
3908 +
3909 +       if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
3910 +           TG3_EEPROM_MAGIC_HW) {
3911 +               u8 data[NVRAM_SELFBOOT_DATA_SIZE];
3912 +               u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
3913 +               u8 *buf8 = (u8 *) buf;
3914 +               int j, k;
3915 +
3916 +               /* Separate the parity bits and the data bytes.  */
3917 +               for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
3918 +                       if ((i == 0) || (i == 8)) {
3919 +                               int l;
3920 +                               u8 msk;
3921 +
3922 +                               for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
3923 +                                       parity[k++] = buf8[i] & msk;
3924 +                               i++;
3925 +                       }
3926 +                       else if (i == 16) {
3927 +                               int l;
3928 +                               u8 msk;
3929 +
3930 +                               for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
3931 +                                       parity[k++] = buf8[i] & msk;
3932 +                               i++;
3933 +
3934 +                               for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
3935 +                                       parity[k++] = buf8[i] & msk;
3936 +                               i++;
3937 +                       }
3938 +                       data[j++] = buf8[i];
3939 +               }
3940 +
3941 +               err = -EIO;
3942 +               for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
3943 +                       u8 hw8 = hweight8(data[i]);
3944 +
3945 +                       if ((hw8 & 0x1) && parity[i])
3946 +                               goto out;
3947 +                       else if (!(hw8 & 0x1) && !parity[i])
3948 +                               goto out;
3949 +               }
3950 +               err = 0;
3951                 goto out;
3952 +       }
3953  
3954         /* Bootstrap checksum at offset 0x10 */
3955         csum = calc_crc((unsigned char *) buf, 0x10);
3956 @@ -7778,7 +8948,7 @@
3957  }
3958  
3959  #define TG3_SERDES_TIMEOUT_SEC 2
3960 -#define TG3_COPPER_TIMEOUT_SEC 6
3961 +#define TG3_COPPER_TIMEOUT_SEC 7
3962  
3963  static int tg3_test_link(struct tg3 *tp)
3964  {
3965 @@ -7796,7 +8966,12 @@
3966                 if (netif_carrier_ok(tp->dev))
3967                         return 0;
3968  
3969 +#if (LINUX_VERSION_CODE < 0x20607)
3970 +               set_current_state(TASK_INTERRUPTIBLE);
3971 +               if (schedule_timeout(HZ))
3972 +#else
3973                 if (msleep_interruptible(1000))
3974 +#endif
3975                         break;
3976         }
3977  
3978 @@ -7806,7 +8981,7 @@
3979  /* Only test the commonly used registers */
3980  static int tg3_test_registers(struct tg3 *tp)
3981  {
3982 -       int i, is_5705;
3983 +       int i, is_5705, is_5750;
3984         u32 offset, read_mask, write_mask, val, save_val, read_val;
3985         static struct {
3986                 u16 offset;
3987 @@ -7814,6 +8989,7 @@
3988  #define TG3_FL_5705    0x1
3989  #define TG3_FL_NOT_5705        0x2
3990  #define TG3_FL_NOT_5788        0x4
3991 +#define TG3_FL_NOT_5750        0x8
3992                 u32 read_mask;
3993                 u32 write_mask;
3994         } reg_tbl[] = {
3995 @@ -7866,7 +9042,7 @@
3996                         0x00000000, 0xffff0002 },
3997                 { RCVDBDI_STD_BD+0xc, 0x0000,
3998                         0x00000000, 0xffffffff },
3999 -       
4000 +
4001                 /* Receive BD Initiator Control Registers. */
4002                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
4003                         0x00000000, 0xffffffff },
4004 @@ -7874,7 +9050,7 @@
4005                         0x00000000, 0x000003ff },
4006                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
4007                         0x00000000, 0xffffffff },
4008 -       
4009 +
4010                 /* Host Coalescing Control Registers. */
4011                 { HOSTCC_MODE, TG3_FL_NOT_5705,
4012                         0x00000000, 0x00000004 },
4013 @@ -7924,9 +9100,9 @@
4014                         0xffffffff, 0x00000000 },
4015  
4016                 /* Buffer Manager Control Registers. */
4017 -               { BUFMGR_MB_POOL_ADDR, 0x0000,
4018 +               { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
4019                         0x00000000, 0x007fff80 },
4020 -               { BUFMGR_MB_POOL_SIZE, 0x0000,
4021 +               { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
4022                         0x00000000, 0x007fffff },
4023                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
4024                         0x00000000, 0x0000003f },
4025 @@ -7938,7 +9114,7 @@
4026                         0xffffffff, 0x00000000 },
4027                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
4028                         0xffffffff, 0x00000000 },
4029 -       
4030 +
4031                 /* Mailbox Registers */
4032                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
4033                         0x00000000, 0x000001ff },
4034 @@ -7952,10 +9128,12 @@
4035                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
4036         };
4037  
4038 -       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4039 +       is_5705 = is_5750 = 0;
4040 +       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4041                 is_5705 = 1;
4042 -       else
4043 -               is_5705 = 0;
4044 +               if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4045 +                       is_5750 = 1;
4046 +       }
4047  
4048         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4049                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
4050 @@ -7968,6 +9146,9 @@
4051                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
4052                         continue;
4053  
4054 +               if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
4055 +                       continue;
4056 +
4057                 offset = (u32) reg_tbl[i].offset;
4058                 read_mask = reg_tbl[i].read_mask;
4059                 write_mask = reg_tbl[i].write_mask;
4060 @@ -8011,14 +9192,16 @@
4061         return 0;
4062  
4063  out:
4064 -       printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
4065 +       if (netif_msg_hw(tp))
4066 +               printk(KERN_ERR PFX "Register test failed at offset %x\n",
4067 +                      offset);
4068         tw32(offset, save_val);
4069         return -EIO;
4070  }
4071  
4072  static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
4073  {
4074 -       static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
4075 +       static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
4076         int i;
4077         u32 j;
4078  
4079 @@ -8052,14 +9235,34 @@
4080                 { 0x00008000, 0x02000},
4081                 { 0x00010000, 0x0e000},
4082                 { 0xffffffff, 0x00000}
4083 +       }, mem_tbl_5755[] = {
4084 +               { 0x00000200, 0x00008},
4085 +               { 0x00004000, 0x00800},
4086 +               { 0x00006000, 0x00800},
4087 +               { 0x00008000, 0x02000},
4088 +               { 0x00010000, 0x0c000},
4089 +               { 0xffffffff, 0x00000}
4090 +       }, mem_tbl_5906[] = {
4091 +               { 0x00000200, 0x00008},
4092 +               { 0x00004000, 0x00400},
4093 +               { 0x00006000, 0x00400},
4094 +               { 0x00008000, 0x01000},
4095 +               { 0x00010000, 0x01000},
4096 +               { 0xffffffff, 0x00000}
4097         };
4098         struct mem_entry *mem_tbl;
4099         int err = 0;
4100         int i;
4101  
4102 -       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4103 -               mem_tbl = mem_tbl_5705;
4104 -       else
4105 +       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4106 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4107 +                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4108 +                       mem_tbl = mem_tbl_5755;
4109 +               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
4110 +                       mem_tbl = mem_tbl_5906;
4111 +               else
4112 +                       mem_tbl = mem_tbl_5705;
4113 +       } else
4114                 mem_tbl = mem_tbl_570x;
4115  
4116         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4117 @@ -8067,7 +9270,7 @@
4118                     mem_tbl[i].len)) != 0)
4119                         break;
4120         }
4121 -       
4122 +
4123         return err;
4124  }
4125  
4126 @@ -8093,23 +9296,59 @@
4127                         return 0;
4128  
4129                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
4130 -                          MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
4131 -                          MAC_MODE_PORT_MODE_GMII;
4132 +                          MAC_MODE_PORT_INT_LPBACK;
4133 +               if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4134 +                       mac_mode |= MAC_MODE_LINK_POLARITY;
4135 +               if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
4136 +                       mac_mode |= MAC_MODE_PORT_MODE_MII;
4137 +               else
4138 +                       mac_mode |= MAC_MODE_PORT_MODE_GMII;
4139                 tw32(MAC_MODE, mac_mode);
4140         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
4141 -               tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
4142 -                                          BMCR_SPEED1000);
4143 +               u32 val;
4144 +
4145 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4146 +                       u32 phytest;
4147 +
4148 +                       if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
4149 +                               u32 phy;
4150 +
4151 +                               tg3_writephy(tp, MII_TG3_EPHY_TEST,
4152 +                                            phytest | MII_TG3_EPHY_SHADOW_EN);
4153 +                               if (!tg3_readphy(tp, 0x1b, &phy))
4154 +                                       tg3_writephy(tp, 0x1b, phy & ~0x20);
4155 +                               tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
4156 +                       }
4157 +                       val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
4158 +               } else
4159 +                       val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4160 +
4161 +               tg3_phy_toggle_automdix(tp, 0);
4162 +
4163 +               tg3_writephy(tp, MII_BMCR, val);
4164                 udelay(40);
4165 +
4166 +               mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4167 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4168 +                       tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
4169 +                       mac_mode |= MAC_MODE_PORT_MODE_MII;
4170 +               } else
4171 +                       mac_mode |= MAC_MODE_PORT_MODE_GMII;
4172 +
4173                 /* reset to prevent losing 1st rx packet intermittently */
4174                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4175                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
4176                         udelay(10);
4177                         tw32_f(MAC_RX_MODE, tp->rx_mode);
4178                 }
4179 -               mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
4180 -                          MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
4181 -               if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
4182 -                       mac_mode &= ~MAC_MODE_LINK_POLARITY;
4183 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4184 +                       if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
4185 +                               mac_mode &= ~MAC_MODE_LINK_POLARITY;
4186 +                       else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
4187 +                               mac_mode |= MAC_MODE_LINK_POLARITY;
4188 +                       tg3_writephy(tp, MII_TG3_EXT_CTRL,
4189 +                                    MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4190 +               }
4191                 tw32(MAC_MODE, mac_mode);
4192         }
4193         else
4194 @@ -8118,7 +9357,10 @@
4195         err = -EIO;
4196  
4197         tx_len = 1514;
4198 -       skb = dev_alloc_skb(tx_len);
4199 +       skb = netdev_alloc_skb(tp->dev, tx_len);
4200 +       if (!skb)
4201 +               return -ENOMEM;
4202 +
4203         tx_data = skb_put(skb, tx_len);
4204         memcpy(tx_data, tp->dev->dev_addr, 6);
4205         memset(tx_data + 6, 0x0, 8);
4206 @@ -8144,13 +9386,17 @@
4207         tp->tx_prod++;
4208         num_pkts++;
4209  
4210 +       /* Some platforms need to sync memory here */
4211 +       wmb();
4212 +
4213         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
4214                      tp->tx_prod);
4215         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
4216  
4217         udelay(10);
4218  
4219 -       for (i = 0; i < 10; i++) {
4220 +       /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
4221 +       for (i = 0; i < 25; i++) {
4222                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
4223                        HOSTCC_MODE_NOW);
4224  
4225 @@ -8196,7 +9442,7 @@
4226                         goto out;
4227         }
4228         err = 0;
4229 -       
4230 +
4231         /* tg3_free_rings will unmap and free the rx_skb */
4232  out:
4233         return err;
4234 @@ -8214,7 +9460,9 @@
4235         if (!netif_running(tp->dev))
4236                 return TG3_LOOPBACK_FAILED;
4237  
4238 -       tg3_reset_hw(tp);
4239 +       err = tg3_reset_hw(tp, 1);
4240 +       if (err)
4241 +               return TG3_LOOPBACK_FAILED;
4242  
4243         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
4244                 err |= TG3_MAC_LOOPBACK_FAILED;
4245 @@ -8231,6 +9479,9 @@
4246  {
4247         struct tg3 *tp = netdev_priv(dev);
4248  
4249 +       if (tp->link_config.phy_is_low_power)
4250 +               tg3_set_power_state(tp, PCI_D0);
4251 +
4252         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
4253  
4254         if (tg3_test_nvram(tp) != 0) {
4255 @@ -8259,6 +9510,9 @@
4256                 if (!err)
4257                         tg3_nvram_unlock(tp);
4258  
4259 +               if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
4260 +                       tg3_phy_reset(tp);
4261 +
4262                 if (tg3_test_registers(tp) != 0) {
4263                         etest->flags |= ETH_TEST_FL_FAILED;
4264                         data[2] = 1;
4265 @@ -8282,17 +9536,24 @@
4266                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4267                 if (netif_running(dev)) {
4268                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
4269 -                       tg3_init_hw(tp);
4270 -                       tg3_netif_start(tp);
4271 +                       if (!tg3_restart_hw(tp, 1))
4272 +                               tg3_netif_start(tp);
4273                 }
4274  
4275                 tg3_full_unlock(tp);
4276         }
4277 +       if (tp->link_config.phy_is_low_power)
4278 +               tg3_set_power_state(tp, PCI_D3hot);
4279 +
4280  }
4281  
4282  static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4283  {
4284 +#if (LINUX_VERSION_CODE >= 0x020607)
4285         struct mii_ioctl_data *data = if_mii(ifr);
4286 +#else
4287 +       struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
4288 +#endif
4289         struct tg3 *tp = netdev_priv(dev);
4290         int err;
4291  
4292 @@ -8307,6 +9568,9 @@
4293                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
4294                         break;                  /* We have no PHY */
4295  
4296 +               if (tp->link_config.phy_is_low_power)
4297 +                       return -EAGAIN;
4298 +
4299                 spin_lock_bh(&tp->lock);
4300                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
4301                 spin_unlock_bh(&tp->lock);
4302 @@ -8323,6 +9587,9 @@
4303                 if (!capable(CAP_NET_ADMIN))
4304                         return -EPERM;
4305  
4306 +               if (tp->link_config.phy_is_low_power)
4307 +                       return -EAGAIN;
4308 +
4309                 spin_lock_bh(&tp->lock);
4310                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
4311                 spin_unlock_bh(&tp->lock);
4312 @@ -8341,6 +9608,9 @@
4313  {
4314         struct tg3 *tp = netdev_priv(dev);
4315  
4316 +       if (netif_running(dev))
4317 +               tg3_netif_stop(tp);
4318 +
4319         tg3_full_lock(tp, 0);
4320  
4321         tp->vlgrp = grp;
4322 @@ -8348,6 +9618,9 @@
4323         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
4324         __tg3_set_rx_mode(dev);
4325  
4326 +       if (netif_running(dev))
4327 +               tg3_netif_start(tp);
4328 +
4329         tg3_full_unlock(tp);
4330  }
4331  
4332 @@ -8355,10 +9628,15 @@
4333  {
4334         struct tg3 *tp = netdev_priv(dev);
4335  
4336 +       if (netif_running(dev))
4337 +               tg3_netif_stop(tp);
4338 +
4339         tg3_full_lock(tp, 0);
4340 -       if (tp->vlgrp)
4341 -               tp->vlgrp->vlan_devices[vid] = NULL;
4342 +       vlan_group_set_device(tp->vlgrp, vid, NULL);
4343         tg3_full_unlock(tp);
4344 +
4345 +       if (netif_running(dev))
4346 +               tg3_netif_start(tp);
4347  }
4348  #endif
4349  
4350 @@ -8436,9 +9714,15 @@
4351         .set_msglevel           = tg3_set_msglevel,
4352         .nway_reset             = tg3_nway_reset,
4353         .get_link               = ethtool_op_get_link,
4354 +#if (LINUX_VERSION_CODE >= 0x20418)
4355         .get_eeprom_len         = tg3_get_eeprom_len,
4356 +#endif
4357 +#ifdef ETHTOOL_GEEPROM
4358         .get_eeprom             = tg3_get_eeprom,
4359 +#endif
4360 +#ifdef ETHTOOL_SEEPROM
4361         .set_eeprom             = tg3_set_eeprom,
4362 +#endif
4363         .get_ringparam          = tg3_get_ringparam,
4364         .set_ringparam          = tg3_set_ringparam,
4365         .get_pauseparam         = tg3_get_pauseparam,
4366 @@ -8446,7 +9730,9 @@
4367         .get_rx_csum            = tg3_get_rx_csum,
4368         .set_rx_csum            = tg3_set_rx_csum,
4369         .get_tx_csum            = ethtool_op_get_tx_csum,
4370 +#if (LINUX_VERSION_CODE >= 0x20418)
4371         .set_tx_csum            = tg3_set_tx_csum,
4372 +#endif
4373         .get_sg                 = ethtool_op_get_sg,
4374         .set_sg                 = ethtool_op_set_sg,
4375  #if TG3_TSO_SUPPORT != 0
4376 @@ -8461,19 +9747,23 @@
4377         .get_ethtool_stats      = tg3_get_ethtool_stats,
4378         .get_coalesce           = tg3_get_coalesce,
4379         .set_coalesce           = tg3_set_coalesce,
4380 +#ifdef ETHTOOL_GPERMADDR
4381         .get_perm_addr          = ethtool_op_get_perm_addr,
4382 +#endif
4383  };
4384  
4385  static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
4386  {
4387 -       u32 cursize, val;
4388 +       u32 cursize, val, magic;
4389  
4390         tp->nvram_size = EEPROM_CHIP_SIZE;
4391  
4392 -       if (tg3_nvram_read(tp, 0, &val) != 0)
4393 +       if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
4394                 return;
4395  
4396 -       if (swab32(val) != TG3_EEPROM_MAGIC)
4397 +       if ((magic != TG3_EEPROM_MAGIC) &&
4398 +           ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
4399 +           ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
4400                 return;
4401  
4402         /*
4403 @@ -8481,13 +9771,13 @@
4404          * When we encounter our validation signature, we know the addressing
4405          * has wrapped around, and thus have our chip size.
4406          */
4407 -       cursize = 0x800;
4408 +       cursize = 0x10;
4409  
4410         while (cursize < tp->nvram_size) {
4411 -               if (tg3_nvram_read(tp, cursize, &val) != 0)
4412 +               if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
4413                         return;
4414  
4415 -               if (swab32(val) == TG3_EEPROM_MAGIC)
4416 +               if (val == magic)
4417                         break;
4418  
4419                 cursize <<= 1;
4420 @@ -8495,18 +9785,27 @@
4421  
4422         tp->nvram_size = cursize;
4423  }
4424 -               
4425 +
4426  static void __devinit tg3_get_nvram_size(struct tg3 *tp)
4427  {
4428         u32 val;
4429  
4430 +       if (tg3_nvram_read_swab(tp, 0, &val) != 0)
4431 +               return;
4432 +
4433 +       /* Selfboot format */
4434 +       if (val != TG3_EEPROM_MAGIC) {
4435 +               tg3_get_eeprom_size(tp);
4436 +               return;
4437 +       }
4438 +
4439         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
4440                 if (val != 0) {
4441                         tp->nvram_size = (val >> 16) * 1024;
4442                         return;
4443                 }
4444         }
4445 -       tp->nvram_size = 0x20000;
4446 +       tp->nvram_size = 0x80000;
4447  }
4448  
4449  static void __devinit tg3_get_nvram_info(struct tg3 *tp)
4450 @@ -8623,22 +9922,112 @@
4451         }
4452  }
4453  
4454 -/* Chips other than 5700/5701 use the NVRAM for fetching info. */
4455 -static void __devinit tg3_nvram_init(struct tg3 *tp)
4456 +static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
4457  {
4458 -       int j;
4459 +       u32 nvcfg1, protect = 0;
4460  
4461 -       if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
4462 -               return;
4463 +       nvcfg1 = tr32(NVRAM_CFG1);
4464 +
4465 +       /* NVRAM protection for TPM */
4466 +       if (nvcfg1 & (1 << 27)) {
4467 +               tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
4468 +               protect = 1;
4469 +       }
4470 +
4471 +       nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
4472 +       switch (nvcfg1) {
4473 +               case FLASH_5755VENDOR_ATMEL_FLASH_1:
4474 +               case FLASH_5755VENDOR_ATMEL_FLASH_2:
4475 +               case FLASH_5755VENDOR_ATMEL_FLASH_3:
4476 +               case FLASH_5755VENDOR_ATMEL_FLASH_5:
4477 +                       tp->nvram_jedecnum = JEDEC_ATMEL;
4478 +                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
4479 +                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
4480 +                       tp->nvram_pagesize = 264;
4481 +                       if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
4482 +                           nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
4483 +                               tp->nvram_size = (protect ? 0x3e200 : 0x80000);
4484 +                       else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
4485 +                               tp->nvram_size = (protect ? 0x1f200 : 0x40000);
4486 +                       else
4487 +                               tp->nvram_size = (protect ? 0x1f200 : 0x20000);
4488 +                       break;
4489 +               case FLASH_5752VENDOR_ST_M45PE10:
4490 +               case FLASH_5752VENDOR_ST_M45PE20:
4491 +               case FLASH_5752VENDOR_ST_M45PE40:
4492 +                       tp->nvram_jedecnum = JEDEC_ST;
4493 +                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
4494 +                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
4495 +                       tp->nvram_pagesize = 256;
4496 +                       if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
4497 +                               tp->nvram_size = (protect ? 0x10000 : 0x20000);
4498 +                       else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
4499 +                               tp->nvram_size = (protect ? 0x10000 : 0x40000);
4500 +                       else
4501 +                               tp->nvram_size = (protect ? 0x20000 : 0x80000);
4502 +                       break;
4503 +       }
4504 +}
4505 +
4506 +static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
4507 +{
4508 +       u32 nvcfg1;
4509 +
4510 +       nvcfg1 = tr32(NVRAM_CFG1);
4511 +
4512 +       switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
4513 +               case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
4514 +               case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
4515 +               case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
4516 +               case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
4517 +                       tp->nvram_jedecnum = JEDEC_ATMEL;
4518 +                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
4519 +                       tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
4520 +
4521 +                       nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
4522 +                       tw32(NVRAM_CFG1, nvcfg1);
4523 +                       break;
4524 +               case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
4525 +               case FLASH_5755VENDOR_ATMEL_FLASH_1:
4526 +               case FLASH_5755VENDOR_ATMEL_FLASH_2:
4527 +               case FLASH_5755VENDOR_ATMEL_FLASH_3:
4528 +                       tp->nvram_jedecnum = JEDEC_ATMEL;
4529 +                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
4530 +                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
4531 +                       tp->nvram_pagesize = 264;
4532 +                       break;
4533 +               case FLASH_5752VENDOR_ST_M45PE10:
4534 +               case FLASH_5752VENDOR_ST_M45PE20:
4535 +               case FLASH_5752VENDOR_ST_M45PE40:
4536 +                       tp->nvram_jedecnum = JEDEC_ST;
4537 +                       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
4538 +                       tp->tg3_flags2 |= TG3_FLG2_FLASH;
4539 +                       tp->nvram_pagesize = 256;
4540 +                       break;
4541 +       }
4542 +}
4543 +
4544 +static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
4545 +{
4546 +       tp->nvram_jedecnum = JEDEC_ATMEL;
4547 +       tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
4548 +       tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
4549 +}
4550  
4551 +/* Chips other than 5700/5701 use the NVRAM for fetching info. */
4552 +static void __devinit tg3_nvram_init(struct tg3 *tp)
4553 +{
4554         tw32_f(GRC_EEPROM_ADDR,
4555              (EEPROM_ADDR_FSM_RESET |
4556               (EEPROM_DEFAULT_CLOCK_PERIOD <<
4557                EEPROM_ADDR_CLKPERD_SHIFT)));
4558  
4559 -       /* XXX schedule_timeout() ... */
4560 -       for (j = 0; j < 100; j++)
4561 -               udelay(10);
4562 +#if (LINUX_VERSION_CODE < 0x20607)
4563 +       set_current_state(TASK_UNINTERRUPTIBLE);
4564 +       schedule_timeout(HZ / 1000);
4565 +#else
4566 +       msleep(1);
4567 +#endif
4568  
4569         /* Enable seeprom accesses. */
4570         tw32_f(GRC_LOCAL_CTRL,
4571 @@ -8656,12 +10045,21 @@
4572                 }
4573                 tg3_enable_nvram_access(tp);
4574  
4575 +               tp->nvram_size = 0;
4576 +
4577                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
4578                         tg3_get_5752_nvram_info(tp);
4579 +               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
4580 +                       tg3_get_5755_nvram_info(tp);
4581 +               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4582 +                       tg3_get_5787_nvram_info(tp);
4583 +               else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
4584 +                       tg3_get_5906_nvram_info(tp);
4585                 else
4586                         tg3_get_nvram_info(tp);
4587  
4588 -               tg3_get_nvram_size(tp);
4589 +               if (tp->nvram_size == 0)
4590 +                       tg3_get_nvram_size(tp);
4591  
4592                 tg3_disable_nvram_access(tp);
4593                 tg3_nvram_unlock(tp);
4594 @@ -8693,12 +10091,17 @@
4595               EEPROM_ADDR_ADDR_MASK) |
4596              EEPROM_ADDR_READ | EEPROM_ADDR_START);
4597  
4598 -       for (i = 0; i < 10000; i++) {
4599 +       for (i = 0; i < 1000; i++) {
4600                 tmp = tr32(GRC_EEPROM_ADDR);
4601  
4602                 if (tmp & EEPROM_ADDR_COMPLETE)
4603                         break;
4604 -               udelay(100);
4605 +#if (LINUX_VERSION_CODE < 0x20607)
4606 +               set_current_state(TASK_UNINTERRUPTIBLE);
4607 +               schedule_timeout(HZ / 1000);
4608 +#else
4609 +               msleep(1);
4610 +#endif
4611         }
4612         if (!(tmp & EEPROM_ADDR_COMPLETE))
4613                 return -EBUSY;
4614 @@ -8727,26 +10130,42 @@
4615         return 0;
4616  }
4617  
4618 +static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
4619 +{
4620 +       if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
4621 +           (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
4622 +           (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
4623 +           (tp->nvram_jedecnum == JEDEC_ATMEL))
4624 +
4625 +               addr = ((addr / tp->nvram_pagesize) <<
4626 +                       ATMEL_AT45DB0X1B_PAGE_POS) +
4627 +                      (addr % tp->nvram_pagesize);
4628 +
4629 +       return addr;
4630 +}
4631 +
4632 +static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
4633 +{
4634 +       if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
4635 +           (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
4636 +           (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
4637 +           (tp->nvram_jedecnum == JEDEC_ATMEL))
4638 +
4639 +               addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
4640 +                       tp->nvram_pagesize) +
4641 +                      (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
4642 +
4643 +       return addr;
4644 +}
4645 +
4646  static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
4647  {
4648         int ret;
4649  
4650 -       if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
4651 -               printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
4652 -               return -EINVAL;
4653 -       }
4654 -
4655         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
4656                 return tg3_nvram_read_using_eeprom(tp, offset, val);
4657  
4658 -       if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
4659 -               (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
4660 -               (tp->nvram_jedecnum == JEDEC_ATMEL)) {
4661 -
4662 -               offset = ((offset / tp->nvram_pagesize) <<
4663 -                         ATMEL_AT45DB0X1B_PAGE_POS) +
4664 -                       (offset % tp->nvram_pagesize);
4665 -       }
4666 +       offset = tg3_nvram_phys_addr(tp, offset);
4667  
4668         if (offset > NVRAM_ADDR_MSK)
4669                 return -EINVAL;
4670 @@ -8771,6 +10190,16 @@
4671         return ret;
4672  }
4673  
4674 +static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
4675 +{
4676 +       int err;
4677 +       u32 tmp;
4678 +
4679 +       err = tg3_nvram_read(tp, offset, &tmp);
4680 +       *val = swab32(tmp);
4681 +       return err;
4682 +}
4683 +
4684  static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
4685                                     u32 offset, u32 len, u8 *buf)
4686  {
4687 @@ -8796,13 +10225,18 @@
4688                         (addr & EEPROM_ADDR_ADDR_MASK) |
4689                         EEPROM_ADDR_START |
4690                         EEPROM_ADDR_WRITE);
4691 -               
4692 -               for (j = 0; j < 10000; j++) {
4693 +
4694 +               for (j = 0; j < 1000; j++) {
4695                         val = tr32(GRC_EEPROM_ADDR);
4696  
4697                         if (val & EEPROM_ADDR_COMPLETE)
4698                                 break;
4699 -                       udelay(100);
4700 +#if (LINUX_VERSION_CODE < 0x20607)
4701 +                       set_current_state(TASK_UNINTERRUPTIBLE);
4702 +                       schedule_timeout(HZ / 1000);
4703 +#else
4704 +                       msleep(1);
4705 +#endif
4706                 }
4707                 if (!(val & EEPROM_ADDR_COMPLETE)) {
4708                         rc = -EBUSY;
4709 @@ -8832,7 +10266,7 @@
4710                 u32 phy_addr, page_off, size;
4711  
4712                 phy_addr = offset & ~pagemask;
4713 -       
4714 +
4715                 for (j = 0; j < pagesize; j += 4) {
4716                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
4717                                                 (u32 *) (tmp + j))))
4718 @@ -8923,15 +10357,7 @@
4719  
4720                 page_off = offset % tp->nvram_pagesize;
4721  
4722 -               if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
4723 -                       (tp->nvram_jedecnum == JEDEC_ATMEL)) {
4724 -
4725 -                       phy_addr = ((offset / tp->nvram_pagesize) <<
4726 -                                   ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
4727 -               }
4728 -               else {
4729 -                       phy_addr = offset;
4730 -               }
4731 +               phy_addr = tg3_nvram_phys_addr(tp, offset);
4732  
4733                 tw32(NVRAM_ADDR, phy_addr);
4734  
4735 @@ -8939,13 +10365,15 @@
4736  
4737                 if ((page_off == 0) || (i == 0))
4738                         nvram_cmd |= NVRAM_CMD_FIRST;
4739 -               else if (page_off == (tp->nvram_pagesize - 4))
4740 +               if (page_off == (tp->nvram_pagesize - 4))
4741                         nvram_cmd |= NVRAM_CMD_LAST;
4742  
4743                 if (i == (len - 4))
4744                         nvram_cmd |= NVRAM_CMD_LAST;
4745  
4746                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
4747 +                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
4748 +                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
4749                     (tp->nvram_jedecnum == JEDEC_ST) &&
4750                     (nvram_cmd & NVRAM_CMD_FIRST)) {
4751  
4752 @@ -8971,11 +10399,6 @@
4753  {
4754         int ret;
4755  
4756 -       if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
4757 -               printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
4758 -               return -EINVAL;
4759 -       }
4760 -
4761         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
4762                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
4763                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
4764 @@ -9083,12 +10506,23 @@
4765         return NULL;
4766  }
4767  
4768 -/* Since this function may be called in D3-hot power state during
4769 - * tg3_init_one(), only config cycles are allowed.
4770 - */
4771  static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
4772  {
4773         u32 val;
4774 +       u16 pmcsr;
4775 +
4776 +       /* On some early chips the SRAM cannot be accessed in D3hot state,
4777 +        * so need make sure we're in D0.
4778 +        */
4779 +       pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
4780 +       pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4781 +       pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
4782 +#if (LINUX_VERSION_CODE < 0x20607)
4783 +       set_current_state(TASK_UNINTERRUPTIBLE);
4784 +       schedule_timeout(HZ / 1000);
4785 +#else
4786 +       msleep(1);
4787 +#endif
4788  
4789         /* Make sure register accesses (indirect or otherwise)
4790          * will function correctly.
4791 @@ -9096,9 +10530,34 @@
4792         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4793                                tp->misc_host_ctrl);
4794  
4795 +       /* The memory arbiter has to be enabled in order for SRAM accesses
4796 +        * to succeed.  Normally on powerup the tg3 chip firmware will make
4797 +        * sure it is enabled, but other entities such as system netboot
4798 +        * code might disable it.
4799 +        */
4800 +       val = tr32(MEMARB_MODE);
4801 +       tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4802 +
4803         tp->phy_id = PHY_ID_INVALID;
4804         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
4805  
4806 +       /* Assume an onboard device and WOL capable by default.  */
4807 +       tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
4808 +
4809 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4810 +               if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
4811 +                       tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
4812 +                       tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
4813 +               }
4814 +               val = tr32(VCPU_CFGSHDW);
4815 +               if (val & VCPU_CFGSHDW_ASPM_DBNC)
4816 +                       tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
4817 +               if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
4818 +                   (val & VCPU_CFGSHDW_WOL_MAGPKT))
4819 +                       tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
4820 +               return;
4821 +       }
4822 +
4823         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4824         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4825                 u32 nic_cfg, led_cfg;
4826 @@ -9195,18 +10654,30 @@
4827                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
4828                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
4829  
4830 -               if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
4831 -                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
4832 -                   (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
4833 +               if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
4834                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
4835 +                       if ((tp->pdev->subsystem_vendor ==
4836 +                            PCI_VENDOR_ID_ARIMA) &&
4837 +                           (tp->pdev->subsystem_device == 0x205a ||
4838 +                            tp->pdev->subsystem_device == 0x2063))
4839 +                               tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
4840 +               } else {
4841 +                       tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
4842 +                       tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
4843 +               }
4844  
4845                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4846                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4847                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4848                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4849                 }
4850 -               if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
4851 -                       tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
4852 +               if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
4853 +                   !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
4854 +                       tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
4855 +
4856 +               if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
4857 +                   nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
4858 +                       tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
4859  
4860                 if (cfg2 & (1 << 17))
4861                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
4862 @@ -9215,6 +10686,14 @@
4863                 /* bootcode if bit 18 is set */
4864                 if (cfg2 & (1 << 18))
4865                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
4866 +
4867 +               if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4868 +                       u32 cfg3;
4869 +
4870 +                       tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
4871 +                       if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
4872 +                               tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
4873 +               }
4874         }
4875  }
4876  
4877 @@ -9276,13 +10755,13 @@
4878  
4879         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
4880             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
4881 -               u32 bmsr, adv_reg, tg3_ctrl;
4882 +               u32 bmsr, adv_reg, tg3_ctrl, mask;
4883  
4884                 tg3_readphy(tp, MII_BMSR, &bmsr);
4885                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4886                     (bmsr & BMSR_LSTATUS))
4887                         goto skip_phy_reset;
4888 -                   
4889 +
4890                 err = tg3_phy_reset(tp);
4891                 if (err)
4892                         return err;
4893 @@ -9300,7 +10779,10 @@
4894                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
4895                 }
4896  
4897 -               if (!tg3_copper_is_advertising_all(tp)) {
4898 +               mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4899 +                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
4900 +                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
4901 +               if (!tg3_copper_is_advertising_all(tp, mask)) {
4902                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
4903  
4904                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
4905 @@ -9345,25 +10827,53 @@
4906  {
4907         unsigned char vpd_data[256];
4908         unsigned int i;
4909 +       u32 magic;
4910  
4911 -       if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
4912 -               /* Sun decided not to put the necessary bits in the
4913 -                * NVRAM of their onboard tg3 parts :(
4914 -                */
4915 -               strcpy(tp->board_part_number, "Sun 570X");
4916 -               return;
4917 -       }
4918 +       if (tg3_nvram_read_swab(tp, 0x0, &magic))
4919 +               goto out_not_found;
4920  
4921 -       for (i = 0; i < 256; i += 4) {
4922 -               u32 tmp;
4923 +       if (magic == TG3_EEPROM_MAGIC) {
4924 +               for (i = 0; i < 256; i += 4) {
4925 +                       u32 tmp;
4926 +
4927 +                       if (tg3_nvram_read(tp, 0x100 + i, &tmp))
4928 +                               goto out_not_found;
4929 +
4930 +                       vpd_data[i + 0] = ((tmp >>  0) & 0xff);
4931 +                       vpd_data[i + 1] = ((tmp >>  8) & 0xff);
4932 +                       vpd_data[i + 2] = ((tmp >> 16) & 0xff);
4933 +                       vpd_data[i + 3] = ((tmp >> 24) & 0xff);
4934 +               }
4935 +       } else {
4936 +               int vpd_cap;
4937  
4938 -               if (tg3_nvram_read(tp, 0x100 + i, &tmp))
4939 -                       goto out_not_found;
4940 +               vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
4941 +               for (i = 0; i < 256; i += 4) {
4942 +                       u32 tmp, j = 0;
4943 +                       u16 tmp16;
4944 +
4945 +                       pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
4946 +                                             i);
4947 +                       while (j++ < 100) {
4948 +                               pci_read_config_word(tp->pdev, vpd_cap +
4949 +                                                    PCI_VPD_ADDR, &tmp16);
4950 +                               if (tmp16 & 0x8000)
4951 +                                       break;
4952 +#if (LINUX_VERSION_CODE < 0x20607)
4953 +                               set_current_state(TASK_UNINTERRUPTIBLE);
4954 +                               schedule_timeout(1);
4955 +#else
4956 +                               msleep(1);
4957 +#endif
4958 +                       }
4959 +                       if (!(tmp16 & 0x8000))
4960 +                               goto out_not_found;
4961  
4962 -               vpd_data[i + 0] = ((tmp >>  0) & 0xff);
4963 -               vpd_data[i + 1] = ((tmp >>  8) & 0xff);
4964 -               vpd_data[i + 2] = ((tmp >> 16) & 0xff);
4965 -               vpd_data[i + 3] = ((tmp >> 24) & 0xff);
4966 +                       pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
4967 +                                             &tmp);
4968 +                       tmp = cpu_to_le32(tmp);
4969 +                       memcpy(&vpd_data[i], &tmp, 4);
4970 +               }
4971         }
4972  
4973         /* Now parse and find the part number. */
4974 @@ -9412,60 +10922,129 @@
4975         }
4976  
4977  out_not_found:
4978 -       strcpy(tp->board_part_number, "none");
4979 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
4980 +               strcpy(tp->board_part_number, "BCM95906");
4981 +       else
4982 +               strcpy(tp->board_part_number, "none");
4983  }
4984  
4985 -#ifdef CONFIG_SPARC64
4986 -static int __devinit tg3_is_sun_570X(struct tg3 *tp)
4987 +static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
4988  {
4989 -       struct pci_dev *pdev = tp->pdev;
4990 -       struct pcidev_cookie *pcp = pdev->sysdata;
4991 +       u32 val;
4992  
4993 -       if (pcp != NULL) {
4994 -               int node = pcp->prom_node;
4995 -               u32 venid;
4996 -               int err;
4997 -
4998 -               err = prom_getproperty(node, "subsystem-vendor-id",
4999 -                                      (char *) &venid, sizeof(venid));
5000 -               if (err == 0 || err == -1)
5001 -                       return 0;
5002 -               if (venid == PCI_VENDOR_ID_SUN)
5003 -                       return 1;
5004 +       if (tg3_nvram_read_swab(tp, offset, &val) ||
5005 +           (val & 0xfc000000) != 0x0c000000 ||
5006 +           tg3_nvram_read_swab(tp, offset + 4, &val) ||
5007 +           val != 0)
5008 +               return 0;
5009  
5010 -               /* TG3 chips onboard the SunBlade-2500 don't have the
5011 -                * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
5012 -                * are distinguishable from non-Sun variants by being
5013 -                * named "network" by the firmware.  Non-Sun cards will
5014 -                * show up as being named "ethernet".
5015 -                */
5016 -               if (!strcmp(pcp->prom_name, "network"))
5017 -                       return 1;
5018 +       return 1;
5019 +}
5020 +
5021 +static void __devinit tg3_read_fw_ver(struct tg3 *tp)
5022 +{
5023 +       u32 val, offset, start;
5024 +       u32 ver_offset;
5025 +       int i, bcnt;
5026 +
5027 +       if (tg3_nvram_read_swab(tp, 0, &val))
5028 +               return;
5029 +
5030 +       if (val != TG3_EEPROM_MAGIC)
5031 +               return;
5032 +
5033 +       if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
5034 +           tg3_nvram_read_swab(tp, 0x4, &start))
5035 +               return;
5036 +
5037 +       offset = tg3_nvram_logical_addr(tp, offset);
5038 +
5039 +       if (!tg3_fw_img_is_valid(tp, offset) ||
5040 +           tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
5041 +               return;
5042 +
5043 +       offset = offset + ver_offset - start;
5044 +       for (i = 0; i < 16; i += 4) {
5045 +               if (tg3_nvram_read(tp, offset + i, &val))
5046 +                       return;
5047 +
5048 +               val = le32_to_cpu(val);
5049 +               memcpy(tp->fw_ver + i, &val, 4);
5050         }
5051 -       return 0;
5052 +
5053 +       if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
5054 +               return;
5055 +
5056 +       for (offset = TG3_NVM_DIR_START;
5057 +            offset < TG3_NVM_DIR_END;
5058 +            offset += TG3_NVM_DIRENT_SIZE) {
5059 +               if (tg3_nvram_read_swab(tp, offset, &val))
5060 +                       return;
5061 +
5062 +               if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
5063 +                       break;
5064 +       }
5065 +
5066 +       if (offset == TG3_NVM_DIR_END)
5067 +               return;
5068 +
5069 +       if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5070 +               start = 0x08000000;
5071 +       else if (tg3_nvram_read_swab(tp, offset - 4, &start))
5072 +               return;
5073 +
5074 +       if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
5075 +           !tg3_fw_img_is_valid(tp, offset) ||
5076 +           tg3_nvram_read_swab(tp, offset + 8, &val))
5077 +               return;
5078 +
5079 +       offset += val - start;
5080 +
5081 +       bcnt = strlen(tp->fw_ver);
5082 +
5083 +       tp->fw_ver[bcnt++] = ',';
5084 +       tp->fw_ver[bcnt++] = ' ';
5085 +
5086 +       for (i = 0; i < 4; i++) {
5087 +               if (tg3_nvram_read(tp, offset, &val))
5088 +                       return;
5089 +
5090 +               val = le32_to_cpu(val);
5091 +               offset += sizeof(val);
5092 +
5093 +               if (bcnt > TG3_VER_SIZE - sizeof(val)) {
5094 +                       memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
5095 +                       break;
5096 +               }
5097 +
5098 +               memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
5099 +               bcnt += sizeof(val);
5100 +       }
5101 +
5102 +       tp->fw_ver[TG3_VER_SIZE - 1] = 0;
5103  }
5104 -#endif
5105 +
5106 +static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
5107  
5108  static int __devinit tg3_get_invariants(struct tg3 *tp)
5109  {
5110 +#if (LINUX_VERSION_CODE >= 0x2060a)
5111         static struct pci_device_id write_reorder_chipsets[] = {
5112                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
5113                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
5114 +               { PCI_DEVICE(PCI_VENDOR_ID_AMD,
5115 +                            PCI_DEVICE_ID_AMD_8131_BRIDGE) },
5116                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
5117                              PCI_DEVICE_ID_VIA_8385_0) },
5118                 { },
5119         };
5120 +#endif
5121         u32 misc_ctrl_reg;
5122         u32 cacheline_sz_reg;
5123         u32 pci_state_reg, grc_misc_cfg;
5124         u32 val;
5125         u16 pci_cmd;
5126 -       int err;
5127 -
5128 -#ifdef CONFIG_SPARC64
5129 -       if (tg3_is_sun_570X(tp))
5130 -               tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
5131 -#endif
5132 +       int err, pcie_cap;
5133  
5134         /* Force memory write invalidate off.  If we leave it on,
5135          * then on 5700_BX chips we have to enable a workaround.
5136 @@ -9604,8 +11183,15 @@
5137         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
5138         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
5139  
5140 +       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
5141 +           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
5142 +               tp->pdev_peer = tg3_find_peer(tp);
5143 +
5144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5146 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5147 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5148 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
5149             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
5150                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
5151  
5152 @@ -9613,16 +11199,50 @@
5153             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
5154                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
5155  
5156 -       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5157 -               tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
5158 +       if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5159 +               tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
5160 +               if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
5161 +                   GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
5162 +                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
5163 +                    tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
5164 +                    tp->pdev_peer == tp->pdev))
5165 +                       tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
5166 +
5167 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5168 +                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5169 +                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5170 +                       tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
5171 +                       tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
5172 +               } else {
5173 +                       tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
5174 +                       if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5175 +                               ASIC_REV_5750 &&
5176 +                           tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
5177 +                               tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
5178 +               }
5179 +       }
5180  
5181         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
5182             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
5183 -           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
5184 +           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
5185 +           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
5186 +           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
5187 +           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
5188                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
5189  
5190 -       if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
5191 +       pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
5192 +       if (pcie_cap != 0) {
5193                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
5194 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5195 +                       u16 lnkctl;
5196 +
5197 +                       pci_read_config_word(tp->pdev,
5198 +                                            pcie_cap + PCI_EXP_LNKCTL,
5199 +                                            &lnkctl);
5200 +                       if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
5201 +                               tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
5202 +               }
5203 +       }
5204  
5205         /* If we have an AMD 762 or VIA K8T800 chipset, write
5206          * reordering to the mailbox registers done by the host
5207 @@ -9630,7 +11250,16 @@
5208          * every mailbox register write to force the writes to be
5209          * posted to the chip in order.
5210          */
5211 +#if (LINUX_VERSION_CODE < 0x2060a)
5212 +       if ((pci_find_device(PCI_VENDOR_ID_AMD,
5213 +                            PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL) ||
5214 +            pci_find_device(PCI_VENDOR_ID_AMD,
5215 +                            PCI_DEVICE_ID_AMD_8131_BRIDGE, NULL) ||
5216 +            pci_find_device(PCI_VENDOR_ID_VIA,
5217 +                            PCI_DEVICE_ID_VIA_8385_0, NULL)) &&
5218 +#else
5219         if (pci_dev_present(write_reorder_chipsets) &&
5220 +#endif
5221             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5222                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5223  
5224 @@ -9647,10 +11276,24 @@
5225                                        cacheline_sz_reg);
5226         }
5227  
5228 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5229 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
5230 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5231 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
5232 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
5233 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
5234 +               tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
5235 +               if (!tp->pcix_cap) {
5236 +                       printk(KERN_ERR PFX "Cannot find PCI-X "
5237 +                                           "capability, aborting.\n");
5238 +                       return -EIO;
5239 +               }
5240 +       }
5241 +
5242         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
5243                               &pci_state_reg);
5244  
5245 -       if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
5246 +       if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
5247                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
5248  
5249                 /* If this is a 5700 BX chipset, and we are in PCI-X
5250 @@ -9669,11 +11312,13 @@
5251                          * space registers clobbered due to this bug.
5252                          * So explicitly force the chip into D0 here.
5253                          */
5254 -                       pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
5255 +                       pci_read_config_dword(tp->pdev,
5256 +                                             tp->pm_cap + PCI_PM_CTRL,
5257                                               &pm_reg);
5258                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
5259                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
5260 -                       pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
5261 +                       pci_write_config_dword(tp->pdev,
5262 +                                              tp->pm_cap + PCI_PM_CTRL,
5263                                                pm_reg);
5264  
5265                         /* Also, force SERR#/PERR# in PCI command. */
5266 @@ -9689,17 +11334,6 @@
5267         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
5268                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
5269  
5270 -       /* Back to back register writes can cause problems on this chip,
5271 -        * the workaround is to read back all reg writes except those to
5272 -        * mailbox regs.  See tg3_write_indirect_reg32().
5273 -        *
5274 -        * PCI Express 5750_A0 rev chips need this workaround too.
5275 -        */
5276 -       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
5277 -           ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5278 -            tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
5279 -               tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
5280 -
5281         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
5282                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
5283         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
5284 @@ -9723,8 +11357,19 @@
5285         /* Various workaround register access methods */
5286         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
5287                 tp->write32 = tg3_write_indirect_reg32;
5288 -       else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
5289 +       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
5290 +                ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5291 +                 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
5292 +               /*
5293 +                * Back to back register writes can cause problems on these
5294 +                * chips, the workaround is to read back all reg writes
5295 +                * except those to mailbox regs.
5296 +                *
5297 +                * See tg3_write_indirect_reg32().
5298 +                */
5299                 tp->write32 = tg3_write_flush_reg32;
5300 +       }
5301 +
5302  
5303         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
5304             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
5305 @@ -9748,15 +11393,27 @@
5306                 pci_cmd &= ~PCI_COMMAND_MEMORY;
5307                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5308         }
5309 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5310 +               tp->read32_mbox = tg3_read32_mbox_5906;
5311 +               tp->write32_mbox = tg3_write32_mbox_5906;
5312 +               tp->write32_tx_mbox = tg3_write32_mbox_5906;
5313 +               tp->write32_rx_mbox = tg3_write32_mbox_5906;
5314 +       }
5315 +
5316 +       if (tp->write32 == tg3_write_indirect_reg32 ||
5317 +           ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
5318 +            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5319 +             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
5320 +               tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
5321  
5322         /* Get eeprom hw config before calling tg3_set_power_state().
5323 -        * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
5324 +        * In particular, the TG3_FLG2_IS_NIC flag must be
5325          * determined before calling tg3_set_power_state() so that
5326          * we know whether or not to switch out of Vaux power.
5327          * When the flag is set, it means that GPIO1 is used for eeprom
5328          * write protect and also implies that it is a LOM where GPIOs
5329          * are not used to switch power.
5330 -        */ 
5331 +        */
5332         tg3_get_eeprom_hw_cfg(tp);
5333  
5334         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
5335 @@ -9774,8 +11431,11 @@
5336         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5337                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
5338  
5339 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
5340 +               tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
5341 +
5342         /* Force the chip into D0. */
5343 -       err = tg3_set_power_state(tp, 0);
5344 +       err = tg3_set_power_state(tp, PCI_D0);
5345         if (err) {
5346                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
5347                        pci_name(tp->pdev));
5348 @@ -9788,15 +11448,6 @@
5349         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
5350                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
5351  
5352 -       /* Pseudo-header checksum is done by hardware logic and not
5353 -        * the offload processers, so make the chip do the pseudo-
5354 -        * header checksums on receive.  For transmit it is more
5355 -        * convenient to do the pseudo-header checksum in software
5356 -        * as Linux does that on transmit for us in all cases.
5357 -        */
5358 -       tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
5359 -       tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
5360 -
5361         /* Derive initial jumbo mode from MTU assigned in
5362          * ether_setup() via the alloc_etherdev() call
5363          */
5364 @@ -9819,6 +11470,7 @@
5365             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
5366              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
5367              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
5368 +           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
5369             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
5370                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
5371  
5372 @@ -9828,8 +11480,17 @@
5373         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
5374                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
5375  
5376 -       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5377 -               tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
5378 +       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5379 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5380 +                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
5381 +                       if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
5382 +                           tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
5383 +                               tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
5384 +                       if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
5385 +                               tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
5386 +               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
5387 +                       tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
5388 +       }
5389  
5390         tp->coalesce_mode = 0;
5391         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
5392 @@ -9882,14 +11543,6 @@
5393         grc_misc_cfg = tr32(GRC_MISC_CFG);
5394         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
5395  
5396 -       /* Broadcom's driver says that CIOBE multisplit has a bug */
5397 -#if 0
5398 -       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5399 -           grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
5400 -               tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
5401 -               tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
5402 -       }
5403 -#endif
5404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5405             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
5406              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
5407 @@ -9917,7 +11570,9 @@
5408               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
5409             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
5410              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
5411 -             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
5412 +             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
5413 +             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
5414 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5415                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
5416  
5417         err = tg3_phy_probe(tp);
5418 @@ -9928,6 +11583,7 @@
5419         }
5420  
5421         tg3_read_partno(tp);
5422 +       tg3_read_fw_ver(tp);
5423  
5424         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5425                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
5426 @@ -9952,6 +11608,7 @@
5427          * upon subsystem IDs.
5428          */
5429         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
5430 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
5431             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5432                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
5433                                   TG3_FLAG_USE_LINKCHG_REG);
5434 @@ -9963,40 +11620,52 @@
5435         else
5436                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
5437  
5438 -       /* It seems all chips can get confused if TX buffers
5439 +       /* All chips before 5787 can get confused if TX buffers
5440          * straddle the 4GB address boundary in some cases.
5441          */
5442 -       tp->dev->hard_start_xmit = tg3_start_xmit;
5443 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5444 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5445 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5446 +               tp->dev->hard_start_xmit = tg3_start_xmit;
5447 +       else
5448 +               tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
5449  
5450         tp->rx_offset = 2;
5451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
5452             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
5453                 tp->rx_offset = 0;
5454  
5455 -       /* By default, disable wake-on-lan.  User can change this
5456 -        * using ETHTOOL_SWOL.
5457 +       tp->rx_std_max_post = TG3_RX_RING_SIZE;
5458 +
5459 +       /* Increment the rx prod index on the rx std ring by at most
5460 +        * 8 for these chips to workaround hw errata.
5461          */
5462 -       tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
5463 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5464 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5465 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
5466 +               tp->rx_std_max_post = 8;
5467 +
5468 +       if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
5469 +               tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
5470 +                                    PCIE_PWR_MGMT_L1_THRESH_MSK;
5471  
5472         return err;
5473  }
5474  
5475 -#ifdef CONFIG_SPARC64
5476 +#ifdef CONFIG_SPARC
5477  static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
5478  {
5479         struct net_device *dev = tp->dev;
5480         struct pci_dev *pdev = tp->pdev;
5481 -       struct pcidev_cookie *pcp = pdev->sysdata;
5482 -
5483 -       if (pcp != NULL) {
5484 -               int node = pcp->prom_node;
5485 -
5486 -               if (prom_getproplen(node, "local-mac-address") == 6) {
5487 -                       prom_getproperty(node, "local-mac-address",
5488 -                                        dev->dev_addr, 6);
5489 -                       memcpy(dev->perm_addr, dev->dev_addr, 6);
5490 -                       return 0;
5491 -               }
5492 +       struct device_node *dp = pci_device_to_OF_node(pdev);
5493 +       const unsigned char *addr;
5494 +       int len;
5495 +
5496 +       addr = of_get_property(dp, "local-mac-address", &len);
5497 +       if (addr && len == 6) {
5498 +               memcpy(dev->dev_addr, addr, 6);
5499 +               memcpy(dev->perm_addr, dev->dev_addr, 6);
5500 +               return 0;
5501         }
5502         return -ENODEV;
5503  }
5504 @@ -10015,15 +11684,15 @@
5505  {
5506         struct net_device *dev = tp->dev;
5507         u32 hi, lo, mac_offset;
5508 +       int addr_ok = 0;
5509  
5510 -#ifdef CONFIG_SPARC64
5511 +#ifdef CONFIG_SPARC
5512         if (!tg3_get_macaddr_sparc(tp))
5513                 return 0;
5514  #endif
5515  
5516         mac_offset = 0x7c;
5517 -       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5518 -            !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
5519 +       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
5520             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
5521                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5522                         mac_offset = 0xcc;
5523 @@ -10032,6 +11701,8 @@
5524                 else
5525                         tg3_nvram_unlock(tp);
5526         }
5527 +       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5528 +               mac_offset = 0x10;
5529  
5530         /* First try to get it from MAC address mailbox. */
5531         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
5532 @@ -10044,29 +11715,33 @@
5533                 dev->dev_addr[3] = (lo >> 16) & 0xff;
5534                 dev->dev_addr[4] = (lo >>  8) & 0xff;
5535                 dev->dev_addr[5] = (lo >>  0) & 0xff;
5536 +
5537 +               /* Some old bootcode may report a 0 MAC address in SRAM */
5538 +               addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
5539         }
5540 -       /* Next, try NVRAM. */
5541 -       else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
5542 -                !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
5543 -                !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
5544 -               dev->dev_addr[0] = ((hi >> 16) & 0xff);
5545 -               dev->dev_addr[1] = ((hi >> 24) & 0xff);
5546 -               dev->dev_addr[2] = ((lo >>  0) & 0xff);
5547 -               dev->dev_addr[3] = ((lo >>  8) & 0xff);
5548 -               dev->dev_addr[4] = ((lo >> 16) & 0xff);
5549 -               dev->dev_addr[5] = ((lo >> 24) & 0xff);
5550 -       }
5551 -       /* Finally just fetch it out of the MAC control regs. */
5552 -       else {
5553 -               hi = tr32(MAC_ADDR_0_HIGH);
5554 -               lo = tr32(MAC_ADDR_0_LOW);
5555 +       if (!addr_ok) {
5556 +               /* Next, try NVRAM. */
5557 +               if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
5558 +                   !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
5559 +                       dev->dev_addr[0] = ((hi >> 16) & 0xff);
5560 +                       dev->dev_addr[1] = ((hi >> 24) & 0xff);
5561 +                       dev->dev_addr[2] = ((lo >>  0) & 0xff);
5562 +                       dev->dev_addr[3] = ((lo >>  8) & 0xff);
5563 +                       dev->dev_addr[4] = ((lo >> 16) & 0xff);
5564 +                       dev->dev_addr[5] = ((lo >> 24) & 0xff);
5565 +               }
5566 +               /* Finally just fetch it out of the MAC control regs. */
5567 +               else {
5568 +                       hi = tr32(MAC_ADDR_0_HIGH);
5569 +                       lo = tr32(MAC_ADDR_0_LOW);
5570  
5571 -               dev->dev_addr[5] = lo & 0xff;
5572 -               dev->dev_addr[4] = (lo >> 8) & 0xff;
5573 -               dev->dev_addr[3] = (lo >> 16) & 0xff;
5574 -               dev->dev_addr[2] = (lo >> 24) & 0xff;
5575 -               dev->dev_addr[1] = hi & 0xff;
5576 -               dev->dev_addr[0] = (hi >> 8) & 0xff;
5577 +                       dev->dev_addr[5] = lo & 0xff;
5578 +                       dev->dev_addr[4] = (lo >> 8) & 0xff;
5579 +                       dev->dev_addr[3] = (lo >> 16) & 0xff;
5580 +                       dev->dev_addr[2] = (lo >> 24) & 0xff;
5581 +                       dev->dev_addr[1] = hi & 0xff;
5582 +                       dev->dev_addr[0] = (hi >> 8) & 0xff;
5583 +               }
5584         }
5585  
5586         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
5587 @@ -10076,7 +11751,9 @@
5588  #endif
5589                 return -EINVAL;
5590         }
5591 +#ifdef ETHTOOL_GPERMADDR
5592         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5593 +#endif
5594         return 0;
5595  }
5596  
5597 @@ -10333,6 +12010,7 @@
5598                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5599                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5600                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
5601 +                       u32 read_water = 0x7;
5602  
5603                         /* If the 5704 is behind the EPB bridge, we can
5604                          * do the less restrictive ONE_DMA workaround for
5605 @@ -10344,8 +12022,13 @@
5606                         else if (ccval == 0x6 || ccval == 0x7)
5607                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
5608  
5609 +                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
5610 +                               read_water = 4;
5611                         /* Set bit 23 to enable PCIX hw bug fix */
5612 -                       tp->dma_rwctrl |= 0x009f0000;
5613 +                       tp->dma_rwctrl |=
5614 +                               (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
5615 +                               (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
5616 +                               (1 << 23);
5617                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
5618                         /* 5780 always in PCIX mode */
5619                         tp->dma_rwctrl |= 0x00144000;
5620 @@ -10457,17 +12140,25 @@
5621         }
5622         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
5623             DMA_RWCTRL_WRITE_BNDRY_16) {
5624 +#if (LINUX_VERSION_CODE >= 0x2060a)
5625                 static struct pci_device_id dma_wait_state_chipsets[] = {
5626                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
5627                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
5628                         { },
5629                 };
5630 +#endif
5631  
5632                 /* DMA test passed without adjusting DMA boundary,
5633                  * now look for chipsets that are known to expose the
5634                  * DMA bug without failing the test.
5635                  */
5636 -               if (pci_dev_present(dma_wait_state_chipsets)) {
5637 +#if (LINUX_VERSION_CODE < 0x2060a)
5638 +               if (pci_find_device(PCI_VENDOR_ID_APPLE,
5639 +                       PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL))
5640 +#else
5641 +               if (pci_dev_present(dma_wait_state_chipsets))
5642 +#endif
5643 +               {
5644                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
5645                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
5646                 }
5647 @@ -10494,7 +12185,6 @@
5648         tp->link_config.speed = SPEED_INVALID;
5649         tp->link_config.duplex = DUPLEX_INVALID;
5650         tp->link_config.autoneg = AUTONEG_ENABLE;
5651 -       netif_carrier_off(tp->dev);
5652         tp->link_config.active_speed = SPEED_INVALID;
5653         tp->link_config.active_duplex = DUPLEX_INVALID;
5654         tp->link_config.phy_is_low_power = 0;
5655 @@ -10512,6 +12202,12 @@
5656                         DEFAULT_MB_MACRX_LOW_WATER_5705;
5657                 tp->bufmgr_config.mbuf_high_water =
5658                         DEFAULT_MB_HIGH_WATER_5705;
5659 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5660 +                       tp->bufmgr_config.mbuf_mac_rx_low_water =
5661 +                               DEFAULT_MB_MACRX_LOW_WATER_5906;
5662 +                       tp->bufmgr_config.mbuf_high_water =
5663 +                               DEFAULT_MB_HIGH_WATER_5906;
5664 +               }
5665  
5666                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
5667                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
5668 @@ -10553,6 +12249,10 @@
5669         case PHY_ID_BCM5752:    return "5752";
5670         case PHY_ID_BCM5714:    return "5714";
5671         case PHY_ID_BCM5780:    return "5780";
5672 +       case PHY_ID_BCM5755:    return "5755";
5673 +       case PHY_ID_BCM5787:    return "5787";
5674 +       case PHY_ID_BCM5756:    return "5722/5756";
5675 +       case PHY_ID_BCM5906:    return "5906";
5676         case PHY_ID_BCM8002:    return "8002/serdes";
5677         case 0:                 return "serdes";
5678         default:                return "unknown";
5679 @@ -10711,9 +12411,10 @@
5680         }
5681  
5682         SET_MODULE_OWNER(dev);
5683 +#if (LINUX_VERSION_CODE >= 0x20419)
5684         SET_NETDEV_DEV(dev, &pdev->dev);
5685 +#endif
5686  
5687 -       dev->features |= NETIF_F_LLTX;
5688  #if TG3_VLAN_TAG_USED
5689         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5690         dev->vlan_rx_register = tg3_vlan_rx_register;
5691 @@ -10755,9 +12456,12 @@
5692         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
5693  #endif
5694         spin_lock_init(&tp->lock);
5695 -       spin_lock_init(&tp->tx_lock);
5696         spin_lock_init(&tp->indirect_lock);
5697 +#if defined(INIT_DELAYED_WORK_DEFERRABLE) || defined(INIT_WORK_NAR)
5698 +       INIT_WORK(&tp->reset_task, tg3_reset_task);
5699 +#else
5700         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
5701 +#endif
5702  
5703         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
5704         if (tp->regs == 0UL) {
5705 @@ -10786,7 +12490,7 @@
5706         dev->watchdog_timeo = TG3_TX_TIMEOUT;
5707         dev->change_mtu = tg3_change_mtu;
5708         dev->irq = pdev->irq;
5709 -#ifdef CONFIG_NET_POLL_CONTROLLER
5710 +#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5711         dev->poll_controller = tg3_poll_controller;
5712  #endif
5713  
5714 @@ -10845,17 +12549,23 @@
5715         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5716             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
5717             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
5718 +           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
5719             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
5720                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5721         } else {
5722 -               tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5723 +               tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
5724         }
5725  
5726 -       /* TSO is off by default, user can enable using ethtool.  */
5727 -#if 0
5728 -       if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
5729 +       /* TSO is on by default on chips that support hardware TSO.
5730 +        * Firmware TSO on older chips gives lower performance, so it
5731 +        * is off by default, but can be enabled using ethtool.
5732 +        */
5733 +       if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5734                 dev->features |= NETIF_F_TSO;
5735 -#endif
5736 +               if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
5737 +                   (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
5738 +                       dev->features |= NETIF_F_TSO6;
5739 +       }
5740  
5741  #endif
5742  
5743 @@ -10866,10 +12576,6 @@
5744                 tp->rx_pending = 63;
5745         }
5746  
5747 -       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
5748 -           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
5749 -               tp->pdev_peer = tg3_find_peer(tp);
5750 -
5751         err = tg3_get_device_address(tp);
5752         if (err) {
5753                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
5754 @@ -10884,7 +12590,6 @@
5755          */
5756         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
5757             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5758 -               pci_save_state(tp->pdev);
5759                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
5760                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5761         }
5762 @@ -10899,7 +12604,19 @@
5763          * checksumming.
5764          */
5765         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
5766 -               dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5767 +#ifdef NETIF_F_IPV6_CSUM
5768 +               dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5769 +        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5770 +            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
5771 +            dev->features |= NETIF_F_IPV6_CSUM;
5772 +#else
5773 +               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5774 +                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
5775 +                       dev->features |= NETIF_F_HW_CSUM;
5776 +               else
5777 +                       dev->features |= NETIF_F_IP_CSUM;
5778 +               dev->features |= NETIF_F_SG;
5779 +#endif
5780                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
5781         } else
5782                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
5783 @@ -10909,11 +12626,7 @@
5784  
5785         tg3_init_coal(tp);
5786  
5787 -       /* Now that we have fully setup the chip, save away a snapshot
5788 -        * of the PCI config space.  We need to restore this after
5789 -        * GRC_MISC_CFG core clock resets and some resume events.
5790 -        */
5791 -       pci_save_state(tp->pdev);
5792 +       pci_set_drvdata(pdev, dev);
5793  
5794         err = register_netdev(dev);
5795         if (err) {
5796 @@ -10922,29 +12635,27 @@
5797                 goto err_out_iounmap;
5798         }
5799  
5800 -       pci_set_drvdata(pdev, dev);
5801 -
5802 -       printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
5803 +       printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
5804                dev->name,
5805                tp->board_part_number,
5806                tp->pci_chip_rev_id,
5807                tg3_phy_string(tp),
5808                tg3_bus_string(tp, str),
5809 -              (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
5810 +              ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
5811 +               ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
5812 +                "10/100/1000Base-T")));
5813  
5814         for (i = 0; i < 6; i++)
5815                 printk("%2.2x%c", dev->dev_addr[i],
5816                        i == 5 ? '\n' : ':');
5817  
5818         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
5819 -              "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
5820 -              "TSOcap[%d] \n",
5821 +              "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
5822                dev->name,
5823                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
5824                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
5825                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
5826                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
5827 -              (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
5828                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
5829                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
5830         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
5831 @@ -10961,7 +12672,11 @@
5832         }
5833  
5834  err_out_free_dev:
5835 +#if (LINUX_VERSION_CODE >= 0x20418)
5836         free_netdev(dev);
5837 +#else
5838 +       kfree(dev);
5839 +#endif
5840  
5841  err_out_free_res:
5842         pci_release_regions(pdev);
5843 @@ -10979,20 +12694,30 @@
5844         if (dev) {
5845                 struct tg3 *tp = netdev_priv(dev);
5846  
5847 +#if (LINUX_VERSION_CODE >= 0x20600)
5848                 flush_scheduled_work();
5849 +#endif
5850                 unregister_netdev(dev);
5851                 if (tp->regs) {
5852                         iounmap(tp->regs);
5853                         tp->regs = NULL;
5854                 }
5855 +#if (LINUX_VERSION_CODE >= 0x20418)
5856                 free_netdev(dev);
5857 +#else
5858 +               kfree(dev);
5859 +#endif
5860                 pci_release_regions(pdev);
5861                 pci_disable_device(pdev);
5862                 pci_set_drvdata(pdev, NULL);
5863         }
5864  }
5865  
5866 +#if (LINUX_VERSION_CODE < 0x2060b)
5867 +static int tg3_suspend(struct pci_dev *pdev, u32 state)
5868 +#else
5869  static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
5870 +#endif
5871  {
5872         struct net_device *dev = pci_get_drvdata(pdev);
5873         struct tg3 *tp = netdev_priv(dev);
5874 @@ -11001,7 +12726,9 @@
5875         if (!netif_running(dev))
5876                 return 0;
5877  
5878 +#if (LINUX_VERSION_CODE >= 0x20600)
5879         flush_scheduled_work();
5880 +#endif
5881         tg3_netif_stop(tp);
5882  
5883         del_timer_sync(&tp->timer);
5884 @@ -11017,12 +12744,24 @@
5885         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5886         tg3_full_unlock(tp);
5887  
5888 +       /* Save MSI address and data for resume.  */
5889 +#if (LINUX_VERSION_CODE < 0x2060a)
5890 +       pci_save_state(pdev, tp->pci_cfg_state);
5891 +#else
5892 +       pci_save_state(pdev);
5893 +#endif
5894 +
5895 +#if (LINUX_VERSION_CODE < 0x2060b)
5896 +       err = tg3_set_power_state(tp, state);
5897 +#else
5898         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
5899 +#endif
5900         if (err) {
5901                 tg3_full_lock(tp, 0);
5902  
5903                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5904 -               tg3_init_hw(tp);
5905 +               if (tg3_restart_hw(tp, 1))
5906 +                       goto out;
5907  
5908                 tp->timer.expires = jiffies + tp->timer_offset;
5909                 add_timer(&tp->timer);
5910 @@ -11030,6 +12769,7 @@
5911                 netif_device_attach(dev);
5912                 tg3_netif_start(tp);
5913  
5914 +out:
5915                 tg3_full_unlock(tp);
5916         }
5917  
5918 @@ -11045,27 +12785,43 @@
5919         if (!netif_running(dev))
5920                 return 0;
5921  
5922 +#if (LINUX_VERSION_CODE < 0x2060a)
5923 +       pci_restore_state(tp->pdev, tp->pci_cfg_state);
5924 +#else
5925         pci_restore_state(tp->pdev);
5926 +#endif
5927  
5928 -       err = tg3_set_power_state(tp, 0);
5929 +       err = tg3_set_power_state(tp, PCI_D0);
5930         if (err)
5931                 return err;
5932  
5933 +       /* Hardware bug - MSI won't work if INTX disabled. */
5934 +       if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5935 +           (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5936 +#if (LINUX_VERSION_CODE < 0x2060e)
5937 +               tg3_enable_intx(tp->pdev);
5938 +#else
5939 +               pci_intx(tp->pdev, 1);
5940 +#endif
5941 +
5942         netif_device_attach(dev);
5943  
5944         tg3_full_lock(tp, 0);
5945  
5946         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5947 -       tg3_init_hw(tp);
5948 +       err = tg3_restart_hw(tp, 1);
5949 +       if (err)
5950 +               goto out;
5951  
5952         tp->timer.expires = jiffies + tp->timer_offset;
5953         add_timer(&tp->timer);
5954  
5955         tg3_netif_start(tp);
5956  
5957 +out:
5958         tg3_full_unlock(tp);
5959  
5960 -       return 0;
5961 +       return err;
5962  }
5963  
5964  static struct pci_driver tg3_driver = {
5965 @@ -11079,7 +12835,11 @@
5966  
5967  static int __init tg3_init(void)
5968  {
5969 +#if (LINUX_VERSION_CODE < 0x020613)
5970         return pci_module_init(&tg3_driver);
5971 +#else
5972 +       return pci_register_driver(&tg3_driver);
5973 +#endif
5974  }
5975  
5976  static void __exit tg3_cleanup(void)
5977 diff -uNr linux-2.6.16.old/drivers/net/tg3.h linux-2.6.16/drivers/net/tg3.h
5978 --- linux-2.6.16.old/drivers/net/tg3.h  2006-03-20 06:53:29.000000000 +0100
5979 +++ linux-2.6.16/drivers/net/tg3.h      2007-09-15 01:53:08.000000000 +0200
5980 @@ -9,6 +9,442 @@
5981  #ifndef _T3_H
5982  #define _T3_H
5983  
5984 +#if !defined(__iomem)
5985 +#define __iomem
5986 +#endif
5987 +
5988 +#if !defined(PCI_DEVICE_ID_TIGON3_5704S_2)
5989 +#define PCI_DEVICE_ID_TIGON3_5704S_2   0x1649
5990 +#endif
5991 +
5992 +#if !defined(PCI_DEVICE_ID_TIGON3_5705F)
5993 +#define PCI_DEVICE_ID_TIGON3_5705F     0x166e
5994 +#endif
5995 +
5996 +#if !defined(PCI_DEVICE_ID_TIGON3_5720)
5997 +#define PCI_DEVICE_ID_TIGON3_5720      0x1658
5998 +#endif
5999 +
6000 +#if !defined(PCI_DEVICE_ID_TIGON3_5721)
6001 +#define PCI_DEVICE_ID_TIGON3_5721      0x1659
6002 +#endif
6003 +
6004 +#if !defined(PCI_DEVICE_ID_TIGON3_5750)
6005 +#define PCI_DEVICE_ID_TIGON3_5750      0x1676
6006 +#endif
6007 +
6008 +#if !defined(PCI_DEVICE_ID_TIGON3_5751)
6009 +#define PCI_DEVICE_ID_TIGON3_5751      0x1677
6010 +#endif
6011 +
6012 +#if !defined(PCI_DEVICE_ID_TIGON3_5750M)
6013 +#define PCI_DEVICE_ID_TIGON3_5750M     0x167c
6014 +#endif
6015 +
6016 +#if !defined(PCI_DEVICE_ID_TIGON3_5751M)
6017 +#define PCI_DEVICE_ID_TIGON3_5751M     0x167d
6018 +#endif
6019 +
6020 +#if !defined(PCI_DEVICE_ID_TIGON3_5751F)
6021 +#define PCI_DEVICE_ID_TIGON3_5751F     0x167e
6022 +#endif
6023 +
6024 +#if !defined(PCI_DEVICE_ID_TIGON3_5789)
6025 +#define        PCI_DEVICE_ID_TIGON3_5789       0x169d
6026 +#endif
6027 +
6028 +#if !defined(PCI_DEVICE_ID_TIGON3_5753)
6029 +#define PCI_DEVICE_ID_TIGON3_5753      0x16f7
6030 +#endif
6031 +
6032 +#if !defined(PCI_DEVICE_ID_TIGON3_5753M)
6033 +#define PCI_DEVICE_ID_TIGON3_5753M     0x16fd
6034 +#endif
6035 +
6036 +#if !defined(PCI_DEVICE_ID_TIGON3_5753F)
6037 +#define PCI_DEVICE_ID_TIGON3_5753F     0x16fe
6038 +#endif
6039 +
6040 +#if !defined(PCI_DEVICE_ID_TIGON3_5781)
6041 +#define PCI_DEVICE_ID_TIGON3_5781      0x16dd
6042 +#endif
6043 +
6044 +#if !defined(PCI_DEVICE_ID_TIGON3_5752)
6045 +#define PCI_DEVICE_ID_TIGON3_5752      0x1600
6046 +#endif
6047 +
6048 +#if !defined(PCI_DEVICE_ID_TIGON3_5752M)
6049 +#define PCI_DEVICE_ID_TIGON3_5752M     0x1601
6050 +#endif
6051 +
6052 +#if !defined(PCI_DEVICE_ID_TIGON3_5714)
6053 +#define PCI_DEVICE_ID_TIGON3_5714      0x1668
6054 +#endif
6055 +
6056 +#if !defined(PCI_DEVICE_ID_TIGON3_5714S)
6057 +#define PCI_DEVICE_ID_TIGON3_5714S     0x1669
6058 +#endif
6059 +
6060 +#if !defined(PCI_DEVICE_ID_TIGON3_5780)
6061 +#define PCI_DEVICE_ID_TIGON3_5780      0x166a
6062 +#endif
6063 +
6064 +#if !defined(PCI_DEVICE_ID_TIGON3_5780S)
6065 +#define PCI_DEVICE_ID_TIGON3_5780S     0x166b
6066 +#endif
6067 +
6068 +#if !defined(PCI_DEVICE_ID_TIGON3_5715)
6069 +#define PCI_DEVICE_ID_TIGON3_5715      0x1678
6070 +#endif
6071 +
6072 +#if !defined(PCI_DEVICE_ID_TIGON3_5715S)
6073 +#define PCI_DEVICE_ID_TIGON3_5715S     0x1679
6074 +#endif
6075 +
6076 +#if !defined(PCI_DEVICE_ID_TIGON3_5756)
6077 +#define PCI_DEVICE_ID_TIGON3_5756      0x1674
6078 +#endif
6079 +
6080 +#if !defined(PCI_DEVICE_ID_TIGON3_5754)
6081 +#define PCI_DEVICE_ID_TIGON3_5754      0x167a
6082 +#endif
6083 +
6084 +#if !defined(PCI_DEVICE_ID_TIGON3_5754M)
6085 +#define PCI_DEVICE_ID_TIGON3_5754M     0x1672
6086 +#endif
6087 +
6088 +#if !defined(PCI_DEVICE_ID_TIGON3_5755)
6089 +#define PCI_DEVICE_ID_TIGON3_5755      0x167b
6090 +#endif
6091 +
6092 +#if !defined(PCI_DEVICE_ID_TIGON3_5755M)
6093 +#define PCI_DEVICE_ID_TIGON3_5755M     0x1673
6094 +#endif
6095 +
6096 +#if !defined(PCI_DEVICE_ID_TIGON3_5722)
6097 +#define PCI_DEVICE_ID_TIGON3_5722      0x165a
6098 +#endif
6099 +
6100 +#if !defined(PCI_DEVICE_ID_TIGON3_5786)
6101 +#define PCI_DEVICE_ID_TIGON3_5786      0x169a
6102 +#endif
6103 +
6104 +#if !defined(PCI_DEVICE_ID_TIGON3_5787M)
6105 +#define PCI_DEVICE_ID_TIGON3_5787M     0x1693
6106 +#endif
6107 +
6108 +#if !defined(PCI_DEVICE_ID_TIGON3_5787)
6109 +#define PCI_DEVICE_ID_TIGON3_5787      0x169b
6110 +#endif
6111 +
6112 +#if !defined(PCI_DEVICE_ID_TIGON3_5787F)
6113 +#define PCI_DEVICE_ID_TIGON3_5787F     0x167f
6114 +#endif
6115 +
6116 +#if !defined(PCI_DEVICE_ID_TIGON3_5906)
6117 +#define PCI_DEVICE_ID_TIGON3_5906      0x1712
6118 +#endif
6119 +
6120 +#if !defined(PCI_DEVICE_ID_TIGON3_5906M)
6121 +#define PCI_DEVICE_ID_TIGON3_5906M     0x1713
6122 +#endif
6123 +
6124 +#if !defined(PCI_DEVICE_ID_APPLE_TIGON3)
6125 +#define PCI_DEVICE_ID_APPLE_TIGON3     0x1645
6126 +#endif
6127 +
6128 +#if !defined(PCI_DEVICE_ID_APPLE_UNI_N_PCI15)
6129 +#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15        0x002e
6130 +#endif
6131 +
6132 +#if !defined(PCI_DEVICE_ID_VIA_8385_0)
6133 +#define PCI_DEVICE_ID_VIA_8385_0       0x3188
6134 +#endif
6135 +
6136 +#if !defined(PCI_DEVICE_ID_AMD_8131_BRIDGE)
6137 +#define PCI_DEVICE_ID_AMD_8131_BRIDGE  0x7450
6138 +#endif
6139 +
6140 +#if !defined(PCI_DEVICE_ID_SERVERWORKS_EPB)
6141 +#define PCI_DEVICE_ID_SERVERWORKS_EPB  0x0103
6142 +#endif
6143 +
6144 +#if !defined(PCI_VENDOR_ID_ARIMA)
6145 +#define PCI_VENDOR_ID_ARIMA            0x161f
6146 +#endif
6147 +
6148 +#ifndef PCI_DEVICE
6149 +#define PCI_DEVICE(vend,dev) \
6150 +       .vendor = (vend), .device = (dev), \
6151 +       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
6152 +#endif
6153 +
6154 +#if !defined(PCI_VPD_ADDR)
6155 +#define PCI_VPD_ADDR   2
6156 +#define PCI_VPD_DATA   4
6157 +#endif
6158 +
6159 +#ifndef NETDEV_TX_OK
6160 +#define NETDEV_TX_OK 0
6161 +#endif
6162 +
6163 +#ifndef NETDEV_TX_BUSY
6164 +#define NETDEV_TX_BUSY 1
6165 +#endif
6166 +
6167 +#ifndef NETDEV_TX_LOCKED
6168 +#define NETDEV_TX_LOCKED -1
6169 +#endif
6170 +
6171 +#ifdef NETIF_F_TSO
6172 +#ifndef NETIF_F_GSO
6173 +#define gso_size tso_size
6174 +#define gso_segs tso_segs
6175 +#endif
6176 +#ifndef NETIF_F_TSO6
6177 +#define NETIF_F_TSO6   0
6178 +#define BCM_NO_TSO6     1
6179 +#endif
6180 +
6181 +#if (LINUX_VERSION_CODE < 0x020616)
6182 +static inline int skb_transport_offset(const struct sk_buff *skb)
6183 +{
6184 +       return (int) (skb->h.raw - skb->data);
6185 +}
6186 +
6187 +static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
6188 +{
6189 +       return skb->nh.iph;
6190 +}
6191 +
6192 +static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
6193 +{
6194 +       return ip_hdr(skb)->ihl * 4;
6195 +}
6196 +
6197 +static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
6198 +{
6199 +       return skb->h.th;
6200 +}
6201 +
6202 +static inline unsigned int tcp_optlen(const struct sk_buff *skb)
6203 +{
6204 +       return (tcp_hdr(skb)->doff - 5) * 4;
6205 +}
6206 +#endif
6207 +#endif
6208 +
6209 +#ifndef CHECKSUM_PARTIAL
6210 +#define CHECKSUM_PARTIAL CHECKSUM_HW
6211 +#endif
6212 +
6213 +#ifndef DMA_64BIT_MASK
6214 +#define DMA_64BIT_MASK ((u64) 0xffffffffffffffffULL)
6215 +#define DMA_32BIT_MASK ((u64) 0x00000000ffffffffULL)
6216 +#endif
6217 +
6218 +#ifndef DMA_40BIT_MASK
6219 +#define DMA_40BIT_MASK ((u64) 0x000000ffffffffffULL)
6220 +#endif
6221 +
6222 +#ifndef mmiowb
6223 +#define mmiowb()
6224 +#endif
6225 +
6226 +#ifndef PCI_D0
6227 +typedef u32 pm_message_t;
6228 +typedef u32 pci_power_t;
6229 +#define PCI_D0         0
6230 +#define PCI_D1         1
6231 +#define PCI_D2         2
6232 +#define PCI_D3hot      3
6233 +#endif
6234 +
6235 +#ifndef WARN_ON
6236 +#define WARN_ON(x)
6237 +#endif
6238 +
6239 +#ifndef IRQ_RETVAL
6240 +typedef void irqreturn_t;
6241 +#define IRQ_RETVAL(x)
6242 +#define IRQ_HANDLED
6243 +#define IRQ_NONE
6244 +#endif
6245 +
6246 +#ifndef IRQF_SHARED
6247 +#define IRQF_SHARED SA_SHIRQ
6248 +#endif
6249 +
6250 +#ifndef IRQF_SAMPLE_RANDOM
6251 +#define IRQF_SAMPLE_RANDOM SA_SAMPLE_RANDOM
6252 +#endif
6253 +
6254 +#if (LINUX_VERSION_CODE < 0x020604)
6255 +#define MODULE_VERSION(version)
6256 +#endif
6257 +
6258 +#if (LINUX_VERSION_CODE <= 0x020600)
6259 +#define schedule_work(x)       schedule_task(x)
6260 +#define work_struct            tq_struct
6261 +#define INIT_WORK(x, y, z)     INIT_TQUEUE(x, y, z)
6262 +#endif
6263 +
6264 +#ifndef ADVERTISE_PAUSE
6265 +#define ADVERTISE_PAUSE_CAP            0x0400
6266 +#endif
6267 +#ifndef ADVERTISE_PAUSE_ASYM
6268 +#define ADVERTISE_PAUSE_ASYM           0x0800
6269 +#endif
6270 +#ifndef LPA_PAUSE
6271 +#define LPA_PAUSE_CAP                  0x0400
6272 +#endif
6273 +#ifndef LPA_PAUSE_ASYM
6274 +#define LPA_PAUSE_ASYM                 0x0800
6275 +#endif
6276 +#ifndef MII_CTRL1000
6277 +#define MII_CTRL1000                   0x9
6278 +#endif
6279 +#ifndef BMCR_SPEED1000
6280 +#define BMCR_SPEED1000                 0x40
6281 +#endif
6282 +#ifndef ADVERTISE_1000FULL
6283 +#define ADVERTISE_1000FULL             0x0200
6284 +#define ADVERTISE_1000HALF             0x0100
6285 +#endif
6286 +#ifndef ADVERTISE_1000XFULL
6287 +#define ADVERTISE_1000XFULL            0x20
6288 +#define ADVERTISE_1000XHALF            0x40
6289 +#define ADVERTISE_1000XPAUSE           0x80
6290 +#define ADVERTISE_1000XPSE_ASYM                0x100
6291 +#define LPA_1000XFULL                  0x20
6292 +#define LPA_1000XHALF                  0x40
6293 +#define LPA_1000XPAUSE                 0x80
6294 +#define LPA_1000XPAUSE_ASYM            0x100
6295 +#endif
6296 +
6297 +#if (LINUX_VERSION_CODE < 0x020605)
6298 +#define pci_dma_sync_single_for_cpu(pdev, map, len, dir)       \
6299 +       pci_dma_sync_single(pdev, map, len, dir)
6300 +
6301 +#define pci_dma_sync_single_for_device(pdev, map, len, dir)
6302 +#endif
6303 +
6304 +#if (LINUX_VERSION_CODE < 0x020600)
6305 +#define pci_get_device(x, y, z)        pci_find_device(x, y, z)
6306 +#define pci_get_slot(x, y)     pci_find_slot((x)->number, y)
6307 +#define pci_dev_put(x)
6308 +#endif
6309 +
6310 +#if (LINUX_VERSION_CODE < 0x020547)
6311 +#define pci_set_consistent_dma_mask(pdev, mask) (0)
6312 +#endif
6313 +
6314 +#ifndef PCI_CAP_ID_EXP
6315 +#define PCI_CAP_ID_EXP 0x10
6316 +#endif
6317 +#ifndef PCI_EXP_LNKCTL
6318 +#define PCI_EXP_LNKCTL 16
6319 +#endif
6320 +#ifndef PCI_EXP_LNKCTL_CLKREQ_EN
6321 +#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100
6322 +#endif
6323 +
6324 +#if (LINUX_VERSION_CODE < 0x020612)
6325 +static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
6326 +               unsigned int length)
6327 +{
6328 +       struct sk_buff *skb = dev_alloc_skb(length);
6329 +       if (skb)
6330 +               skb->dev = dev;
6331 +       return skb;
6332 +}
6333 +#endif
6334 +
6335 +#ifndef NETIF_F_GSO
6336 +static inline void netif_tx_lock(struct net_device *dev)
6337 +{
6338 +       spin_lock(&dev->xmit_lock);
6339 +       dev->xmit_lock_owner = smp_processor_id();
6340 +}
6341 +
6342 +static inline void netif_tx_unlock(struct net_device *dev)
6343 +{
6344 +       dev->xmit_lock_owner = -1;
6345 +       spin_unlock(&dev->xmit_lock);
6346 +}
6347 +#endif
6348 +
6349 +#if !defined(HAVE_NETDEV_PRIV) && (LINUX_VERSION_CODE != 0x020603) && (LINUX_VERSION_CODE != 0x020604) && (LINUX_VERSION_CODE != 0x20605)
6350 +static inline void *netdev_priv(struct net_device *dev)
6351 +{
6352 +       return dev->priv;
6353 +}
6354 +#endif
6355 +
6356 +#ifdef OLD_NETIF
6357 +static inline void netif_poll_disable(struct net_device *dev)
6358 +{
6359 +       while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
6360 +               /* No hurry. */
6361 +               current->state = TASK_INTERRUPTIBLE;
6362 +               schedule_timeout(1);
6363 +       }
6364 +}
6365 +
6366 +static inline void netif_poll_enable(struct net_device *dev)
6367 +{
6368 +       clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
6369 +}
6370 +
6371 +static inline void netif_tx_disable(struct net_device *dev)
6372 +{
6373 +       spin_lock_bh(&dev->xmit_lock);
6374 +       netif_stop_queue(dev);
6375 +       spin_unlock_bh(&dev->xmit_lock);
6376 +}
6377 +
6378 +#endif
6379 +
6380 +#if (LINUX_VERSION_CODE < 0x2060c)
6381 +static inline int skb_header_cloned(struct sk_buff *skb) { return 0; }
6382 +#endif
6383 +
6384 +#if (LINUX_VERSION_CODE >= 0x20418) && (LINUX_VERSION_CODE < 0x2060c)
6385 +static int tg3_set_tx_hw_csum(struct net_device *dev, u32 data)
6386 +{
6387 +       if (data)
6388 +               dev->features |= NETIF_F_HW_CSUM;
6389 +       else
6390 +               dev->features &= ~NETIF_F_HW_CSUM;
6391 +
6392 +       return 0;
6393 +}
6394 +#endif
6395 +
6396 +#ifndef VLAN_GROUP_ARRAY_SPLIT_PARTS
6397 +static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
6398 +                                        struct net_device *dev)
6399 +{
6400 +       if (vg)
6401 +               vg->vlan_devices[vlan_id] = dev;
6402 +}
6403 +#endif
6404 +#if (LINUX_VERSION_CODE < 0x2060e)
6405 +static inline void tg3_enable_intx(struct pci_dev *pdev)
6406 +{
6407 +       u16 pci_command;
6408 +
6409 +       pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
6410 +       if (pci_command & PCI_COMMAND_INTX_DISABLE)
6411 +               pci_write_config_word(pdev, PCI_COMMAND,
6412 +                                     pci_command & ~PCI_COMMAND_INTX_DISABLE);
6413 +}
6414 +#endif
6415 +
6416 +#ifndef NETIF_F_LLTX
6417 +#define NETIF_F_LLTX   0
6418 +#endif
6419 +
6420  #define TG3_64BIT_REG_HIGH             0x00UL
6421  #define TG3_64BIT_REG_LOW              0x04UL
6422  
6423 @@ -24,6 +460,8 @@
6424  
6425  #define RX_COPY_THRESHOLD              256
6426  
6427 +#define TG3_RX_INTERNAL_RING_SZ_5906   32
6428 +
6429  #define RX_STD_MAX_SIZE                        1536
6430  #define RX_STD_MAX_SIZE_5705           512
6431  #define RX_JUMBO_MAX_SIZE              0xdeadbeef /* XXX */
6432 @@ -55,32 +493,10 @@
6433  #define TG3PCI_IRQ_PIN                 0x0000003d
6434  #define TG3PCI_MIN_GNT                 0x0000003e
6435  #define TG3PCI_MAX_LAT                 0x0000003f
6436 -#define TG3PCI_X_CAPS                  0x00000040
6437 -#define  PCIX_CAPS_RELAXED_ORDERING     0x00020000
6438 -#define  PCIX_CAPS_SPLIT_MASK           0x00700000
6439 -#define  PCIX_CAPS_SPLIT_SHIFT          20
6440 -#define  PCIX_CAPS_BURST_MASK           0x000c0000
6441 -#define  PCIX_CAPS_BURST_SHIFT          18
6442 -#define  PCIX_CAPS_MAX_BURST_CPIOB      2
6443 -#define TG3PCI_PM_CAP_PTR              0x00000041
6444 -#define TG3PCI_X_COMMAND               0x00000042
6445 -#define TG3PCI_X_STATUS                        0x00000044
6446 -#define TG3PCI_PM_CAP_ID               0x00000048
6447 -#define TG3PCI_VPD_CAP_PTR             0x00000049
6448 -#define TG3PCI_PM_CAPS                 0x0000004a
6449 -#define TG3PCI_PM_CTRL_STAT            0x0000004c
6450 -#define TG3PCI_BR_SUPP_EXT             0x0000004e
6451 -#define TG3PCI_PM_DATA                 0x0000004f
6452 -#define TG3PCI_VPD_CAP_ID              0x00000050
6453 -#define TG3PCI_MSI_CAP_PTR             0x00000051
6454 -#define TG3PCI_VPD_ADDR_FLAG           0x00000052
6455 -#define  VPD_ADDR_FLAG_WRITE           0x00008000
6456 -#define TG3PCI_VPD_DATA                        0x00000054
6457 -#define TG3PCI_MSI_CAP_ID              0x00000058
6458 -#define TG3PCI_NXT_CAP_PTR             0x00000059
6459 -#define TG3PCI_MSI_CTRL                        0x0000005a
6460 -#define TG3PCI_MSI_ADDR_LOW            0x0000005c
6461 -#define TG3PCI_MSI_ADDR_HIGH           0x00000060
6462 +#ifndef PCI_X_CMD_READ_2K
6463 +#define  PCI_X_CMD_READ_2K             0x0008
6464 +#endif
6465 +/* 0x40 --> 0x64 unused */
6466  #define TG3PCI_MSI_DATA                        0x00000064
6467  /* 0x66 --> 0x68 unused */
6468  #define TG3PCI_MISC_HOST_CTRL          0x00000068
6469 @@ -125,9 +541,12 @@
6470  #define  CHIPREV_ID_5750_A0             0x4000
6471  #define  CHIPREV_ID_5750_A1             0x4001
6472  #define  CHIPREV_ID_5750_A3             0x4003
6473 +#define  CHIPREV_ID_5750_C2             0x4202
6474  #define  CHIPREV_ID_5752_A0_HW          0x5000
6475  #define  CHIPREV_ID_5752_A0             0x6000
6476  #define  CHIPREV_ID_5752_A1             0x6001
6477 +#define  CHIPREV_ID_5714_A2             0x9002
6478 +#define  CHIPREV_ID_5906_A1             0xc001
6479  #define  GET_ASIC_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 12)
6480  #define   ASIC_REV_5700                         0x07
6481  #define   ASIC_REV_5701                         0x00
6482 @@ -138,6 +557,9 @@
6483  #define   ASIC_REV_5752                         0x06
6484  #define   ASIC_REV_5780                         0x08
6485  #define   ASIC_REV_5714                         0x09
6486 +#define   ASIC_REV_5755                         0x0a
6487 +#define   ASIC_REV_5787                         0x0b
6488 +#define   ASIC_REV_5906                         0x0c
6489  #define  GET_CHIP_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 8)
6490  #define   CHIPREV_5700_AX               0x70
6491  #define   CHIPREV_5700_BX               0x71
6492 @@ -455,6 +877,7 @@
6493  #define  RX_MODE_PROMISC                0x00000100
6494  #define  RX_MODE_NO_CRC_CHECK           0x00000200
6495  #define  RX_MODE_KEEP_VLAN_TAG          0x00000400
6496 +#define  RX_MODE_IPV6_CSUM_ENABLE       0x01000000
6497  #define MAC_RX_STATUS                  0x0000046c
6498  #define  RX_STATUS_REMOTE_TX_XOFFED     0x00000001
6499  #define  RX_STATUS_XOFF_RCVD            0x00000002
6500 @@ -642,7 +1065,8 @@
6501  #define  SNDDATAI_SCTRL_FORCE_ZERO      0x00000010
6502  #define SNDDATAI_STATSENAB             0x00000c0c
6503  #define SNDDATAI_STATSINCMASK          0x00000c10
6504 -/* 0xc14 --> 0xc80 unused */
6505 +#define ISO_PKT_TX                     0x00000c20
6506 +/* 0xc24 --> 0xc80 unused */
6507  #define SNDDATAI_COS_CNT_0             0x00000c80
6508  #define SNDDATAI_COS_CNT_1             0x00000c84
6509  #define SNDDATAI_COS_CNT_2             0x00000c88
6510 @@ -757,6 +1181,7 @@
6511  #define  RCVLPC_STATSCTRL_ENABLE        0x00000001
6512  #define  RCVLPC_STATSCTRL_FASTUPD       0x00000002
6513  #define RCVLPC_STATS_ENABLE            0x00002018
6514 +#define  RCVLPC_STATSENAB_DACK_FIX      0x00040000
6515  #define  RCVLPC_STATSENAB_LNGBRST_RFIX  0x00400000
6516  #define RCVLPC_STATS_INCMASK           0x0000201c
6517  /* 0x2020 --> 0x2100 unused */
6518 @@ -992,11 +1417,13 @@
6519  #define BUFMGR_MB_MACRX_LOW_WATER      0x00004414
6520  #define  DEFAULT_MB_MACRX_LOW_WATER      0x00000020
6521  #define  DEFAULT_MB_MACRX_LOW_WATER_5705  0x00000010
6522 +#define  DEFAULT_MB_MACRX_LOW_WATER_5906  0x00000004
6523  #define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
6524  #define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
6525  #define BUFMGR_MB_HIGH_WATER           0x00004418
6526  #define  DEFAULT_MB_HIGH_WATER          0x00000060
6527  #define  DEFAULT_MB_HIGH_WATER_5705     0x00000060
6528 +#define  DEFAULT_MB_HIGH_WATER_5906     0x00000010
6529  #define  DEFAULT_MB_HIGH_WATER_JUMBO    0x0000017c
6530  #define  DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
6531  #define BUFMGR_RX_MB_ALLOC_REQ         0x0000441c
6532 @@ -1133,7 +1560,17 @@
6533  #define TX_CPU_STATE                   0x00005404
6534  #define TX_CPU_PGMCTR                  0x0000541c
6535  
6536 +#define VCPU_STATUS                    0x00005100
6537 +#define  VCPU_STATUS_INIT_DONE          0x04000000
6538 +#define  VCPU_STATUS_DRV_RESET          0x08000000
6539 +
6540 +#define VCPU_CFGSHDW                   0x00005104
6541 +#define  VCPU_CFGSHDW_WOL_ENABLE        0x00000001
6542 +#define  VCPU_CFGSHDW_WOL_MAGPKT        0x00000004
6543 +#define  VCPU_CFGSHDW_ASPM_DBNC                 0x00001000
6544 +
6545  /* Mailboxes */
6546 +#define GRCMBOX_BASE                   0x00005600
6547  #define GRCMBOX_INTERRUPT_0            0x00005800 /* 64-bit */
6548  #define GRCMBOX_INTERRUPT_1            0x00005808 /* 64-bit */
6549  #define GRCMBOX_INTERRUPT_2            0x00005810 /* 64-bit */
6550 @@ -1333,12 +1770,16 @@
6551  #define  GRC_MISC_CFG_BOARD_ID_5788    0x00010000
6552  #define  GRC_MISC_CFG_BOARD_ID_5788M   0x00018000
6553  #define  GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000
6554 +#define  GRC_MISC_CFG_BOARD_ID_5754    0x00008000
6555 +#define  GRC_MISC_CFG_BOARD_ID_5754M   0x0000c000
6556 +#define  GRC_MISC_CFG_EPHY_IDDQ                0x00200000
6557  #define  GRC_MISC_CFG_KEEP_GPHY_POWER  0x04000000
6558  #define GRC_LOCAL_CTRL                 0x00006808
6559  #define  GRC_LCLCTRL_INT_ACTIVE                0x00000001
6560  #define  GRC_LCLCTRL_CLEARINT          0x00000002
6561  #define  GRC_LCLCTRL_SETINT            0x00000004
6562  #define  GRC_LCLCTRL_INT_ON_ATTN       0x00000008
6563 +#define  GRC_LCLCTRL_GPIO_UART_SEL     0x00000010      /* 5755 only */
6564  #define  GRC_LCLCTRL_USE_SIG_DETECT    0x00000010      /* 5714/5780 only */
6565  #define  GRC_LCLCTRL_USE_EXT_SIG_DETECT        0x00000020      /* 5714/5780 only */
6566  #define  GRC_LCLCTRL_GPIO_INPUT3       0x00000020
6567 @@ -1392,7 +1833,11 @@
6568  #define GRC_EEPROM_CTRL                        0x00006840
6569  #define GRC_MDI_CTRL                   0x00006844
6570  #define GRC_SEEPROM_DELAY              0x00006848
6571 -/* 0x684c --> 0x6c00 unused */
6572 +/* 0x684c --> 0x6890 unused */
6573 +#define GRC_VCPU_EXT_CTRL              0x00006890
6574 +#define GRC_VCPU_EXT_CTRL_HALT_CPU      0x00400000
6575 +#define GRC_VCPU_EXT_CTRL_DISABLE_WOL   0x20000000
6576 +#define GRC_FASTBOOT_PC                        0x00006894      /* 5752, 5755, 5787 */
6577  
6578  /* 0x6c00 --> 0x7000 unused */
6579  
6580 @@ -1436,6 +1881,17 @@
6581  #define  FLASH_5752VENDOR_ST_M45PE10    0x02400000
6582  #define  FLASH_5752VENDOR_ST_M45PE20    0x02400002
6583  #define  FLASH_5752VENDOR_ST_M45PE40    0x02400001
6584 +#define  FLASH_5755VENDOR_ATMEL_FLASH_1         0x03400001
6585 +#define  FLASH_5755VENDOR_ATMEL_FLASH_2         0x03400002
6586 +#define  FLASH_5755VENDOR_ATMEL_FLASH_3         0x03400000
6587 +#define  FLASH_5755VENDOR_ATMEL_FLASH_4         0x00000003
6588 +#define  FLASH_5755VENDOR_ATMEL_FLASH_5         0x02000003
6589 +#define  FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ    0x03c00003
6590 +#define  FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ   0x03c00002
6591 +#define  FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ    0x03000003
6592 +#define  FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ   0x03000002
6593 +#define  FLASH_5787VENDOR_MICRO_EEPROM_64KHZ    0x03000000
6594 +#define  FLASH_5787VENDOR_MICRO_EEPROM_376KHZ   0x02000000
6595  #define  NVRAM_CFG1_5752PAGE_SIZE_MASK  0x70000000
6596  #define  FLASH_5752PAGE_SIZE_256        0x00000000
6597  #define  FLASH_5752PAGE_SIZE_512        0x10000000
6598 @@ -1468,9 +1924,26 @@
6599  #define NVRAM_WRITE1                   0x00007028
6600  /* 0x702c --> 0x7400 unused */
6601  
6602 -/* 0x7400 --> 0x8000 unused */
6603 +/* 0x7400 --> 0x7c00 unused */
6604 +#define PCIE_TRANSACTION_CFG           0x00007c04
6605 +#define PCIE_TRANS_CFG_1SHOT_MSI        0x20000000
6606 +#define PCIE_TRANS_CFG_LOM              0x00000020
6607 +
6608 +#define PCIE_PWR_MGMT_THRESH           0x00007d28
6609 +#define PCIE_PWR_MGMT_L1_THRESH_MSK     0x0000ff00
6610  
6611  #define TG3_EEPROM_MAGIC               0x669955aa
6612 +#define TG3_EEPROM_MAGIC_FW            0xa5000000
6613 +#define TG3_EEPROM_MAGIC_FW_MSK                0xff000000
6614 +#define TG3_EEPROM_MAGIC_HW            0xabcd
6615 +#define TG3_EEPROM_MAGIC_HW_MSK                0xffff
6616 +
6617 +#define TG3_NVM_DIR_START      0x18
6618 +#define TG3_NVM_DIR_END        0x78
6619 +#define TG3_NVM_DIRENT_SIZE    0xc
6620 +#define TG3_NVM_DIRTYPE_SHIFT   24
6621 +#define TG3_NVM_DIRTYPE_ASFINI   1
6622 +
6623  
6624  /* 32K Window into NIC internal memory */
6625  #define NIC_SRAM_WIN_BASE              0x00008000
6626 @@ -1520,6 +1993,7 @@
6627  #define  FWCMD_NICDRV_FIX_DMAR          0x00000005
6628  #define  FWCMD_NICDRV_FIX_DMAW          0x00000006
6629  #define  FWCMD_NICDRV_ALIVE2            0x0000000d
6630 +#define  FWCMD_NICDRV_ALIVE3            0x0000000e
6631  #define NIC_SRAM_FW_CMD_LEN_MBOX       0x00000b7c
6632  #define NIC_SRAM_FW_CMD_DATA_MBOX      0x00000b80
6633  #define NIC_SRAM_FW_ASF_STATUS_MBOX    0x00000c00
6634 @@ -1550,6 +2024,9 @@
6635  #define  SHASTA_EXT_LED_MAC             0x00010000
6636  #define  SHASTA_EXT_LED_COMBO           0x00018000
6637  
6638 +#define NIC_SRAM_DATA_CFG_3            0x00000d3c
6639 +#define  NIC_SRAM_ASPM_DEBOUNCE                 0x00000002
6640 +
6641  #define NIC_SRAM_RX_MINI_BUFFER_DESC   0x00001000
6642  
6643  #define NIC_SRAM_DMA_DESC_POOL_BASE    0x00002000
6644 @@ -1587,9 +2064,15 @@
6645  #define MII_TG3_DSP_RW_PORT            0x15 /* DSP coefficient read/write port */
6646  
6647  #define MII_TG3_DSP_ADDRESS            0x17 /* DSP address register */
6648 +#define MII_TG3_EPHY_PTEST             0x17 /* 5906 PHY register */
6649  
6650  #define MII_TG3_AUX_CTRL               0x18 /* auxilliary control register */
6651  
6652 +#define MII_TG3_AUXCTL_MISC_WREN       0x8000
6653 +#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX        0x0200
6654 +#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
6655 +#define MII_TG3_AUXCTL_SHDWSEL_MISC            0x0007
6656 +
6657  #define MII_TG3_AUX_STAT               0x19 /* auxilliary status register */
6658  #define MII_TG3_AUX_STAT_LPASS         0x0004
6659  #define MII_TG3_AUX_STAT_SPDMASK       0x0700
6660 @@ -1600,6 +2083,8 @@
6661  #define MII_TG3_AUX_STAT_100FULL       0x0500
6662  #define MII_TG3_AUX_STAT_1000HALF      0x0600
6663  #define MII_TG3_AUX_STAT_1000FULL      0x0700
6664 +#define MII_TG3_AUX_STAT_100           0x0008
6665 +#define MII_TG3_AUX_STAT_FULL          0x0001
6666  
6667  #define MII_TG3_ISTAT                  0x1a /* IRQ status register */
6668  #define MII_TG3_IMASK                  0x1b /* IRQ mask register */
6669 @@ -1610,6 +2095,16 @@
6670  #define MII_TG3_INT_DUPLEXCHG          0x0008
6671  #define MII_TG3_INT_ANEG_PAGE_RX       0x0400
6672  
6673 +#define MII_TG3_EPHY_TEST              0x1f /* 5906 PHY register */
6674 +#define MII_TG3_EPHY_SHADOW_EN         0x80
6675 +
6676 +#define MII_TG3_EPHYTST_MISCCTRL       0x10 /* 5906 EPHY misc ctrl shadow register */
6677 +#define MII_TG3_EPHYTST_MISCCTRL_MDIX  0x4000
6678 +
6679 +#define MII_TG3_TEST1                  0x1e
6680 +#define MII_TG3_TEST1_TRIM_EN          0x0010
6681 +#define MII_TG3_TEST1_CRC_EN           0x8000
6682 +
6683  /* There are two ways to manage the TX descriptors on the tigon3.
6684   * Either the descriptors are in host DMA'able memory, or they
6685   * exist only in the cards on-chip SRAM.  All 16 send bds are under
6686 @@ -1763,35 +2258,35 @@
6687  
6688  #define TG3_HW_STATUS_SIZE             0x50
6689  struct tg3_hw_status {
6690 -       u32                             status;
6691 +       volatile u32                    status;
6692  #define SD_STATUS_UPDATED              0x00000001
6693  #define SD_STATUS_LINK_CHG             0x00000002
6694  #define SD_STATUS_ERROR                        0x00000004
6695  
6696 -       u32                             status_tag;
6697 +       volatile u32                    status_tag;
6698  
6699  #ifdef __BIG_ENDIAN
6700 -       u16                             rx_consumer;
6701 -       u16                             rx_jumbo_consumer;
6702 +       volatile u16                    rx_consumer;
6703 +       volatile u16                    rx_jumbo_consumer;
6704  #else
6705 -       u16                             rx_jumbo_consumer;
6706 -       u16                             rx_consumer;
6707 +       volatile u16                    rx_jumbo_consumer;
6708 +       volatile u16                    rx_consumer;
6709  #endif
6710  
6711  #ifdef __BIG_ENDIAN
6712 -       u16                             reserved;
6713 -       u16                             rx_mini_consumer;
6714 +       volatile u16                    reserved;
6715 +       volatile u16                    rx_mini_consumer;
6716  #else
6717 -       u16                             rx_mini_consumer;
6718 -       u16                             reserved;
6719 +       volatile u16                    rx_mini_consumer;
6720 +       volatile u16                    reserved;
6721  #endif
6722         struct {
6723  #ifdef __BIG_ENDIAN
6724 -               u16                     tx_consumer;
6725 -               u16                     rx_producer;
6726 +               volatile u16            tx_consumer;
6727 +               volatile u16            rx_producer;
6728  #else
6729 -               u16                     rx_producer;
6730 -               u16                     tx_consumer;
6731 +               volatile u16            rx_producer;
6732 +               volatile u16            tx_consumer;
6733  #endif
6734         }                               idx[16];
6735  };
6736 @@ -2059,12 +2554,22 @@
6737  
6738         /* SMP locking strategy:
6739          *
6740 -        * lock: Held during all operations except TX packet
6741 -        *       processing.
6742 +        * lock: Held during reset, PHY access, timer, and when
6743 +        *       updating tg3_flags and tg3_flags2.
6744          *
6745 -        * tx_lock: Held during tg3_start_xmit and tg3_tx
6746 +        * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
6747 +        *                netif_tx_lock when it needs to call
6748 +        *                netif_wake_queue.
6749          *
6750          * Both of these locks are to be held with BH safety.
6751 +        *
6752 +        * Because the IRQ handler, tg3_poll, and tg3_start_xmit
6753 +        * are running lockless, it is necessary to completely
6754 +        * quiesce the chip with tg3_netif_stop and tg3_full_lock
6755 +        * before reconfiguring the device.
6756 +        *
6757 +        * indirect_lock: Held when accessing registers indirectly
6758 +        *                with IRQ disabling.
6759          */
6760         spinlock_t                      lock;
6761         spinlock_t                      indirect_lock;
6762 @@ -2091,8 +2596,6 @@
6763         u32                             tx_cons;
6764         u32                             tx_pending;
6765  
6766 -       spinlock_t                      tx_lock;
6767 -
6768         struct tg3_tx_buffer_desc       *tx_ring;
6769         struct tx_ring_info             *tx_buffers;
6770         dma_addr_t                      tx_desc_mapping;
6771 @@ -2112,6 +2615,7 @@
6772         struct tg3_rx_buffer_desc       *rx_std;
6773         struct ring_info                *rx_std_buffers;
6774         dma_addr_t                      rx_std_mapping;
6775 +       u32                             rx_std_max_post;
6776  
6777         struct tg3_rx_buffer_desc       *rx_jumbo;
6778         struct ring_info                *rx_jumbo_buffers;
6779 @@ -2138,13 +2642,9 @@
6780  #define TG3_FLAG_USE_LINKCHG_REG       0x00000008
6781  #define TG3_FLAG_USE_MI_INTERRUPT      0x00000010
6782  #define TG3_FLAG_ENABLE_ASF            0x00000020
6783 -#define TG3_FLAG_5701_REG_WRITE_BUG    0x00000040
6784 +#define TG3_FLAG_ASPM_WORKAROUND       0x00000040
6785  #define TG3_FLAG_POLL_SERDES           0x00000080
6786 -#if defined(CONFIG_X86)
6787  #define TG3_FLAG_MBOX_WRITE_REORDER    0x00000100
6788 -#else
6789 -#define TG3_FLAG_MBOX_WRITE_REORDER    0       /* disables code too */
6790 -#endif
6791  #define TG3_FLAG_PCIX_TARGET_HWBUG     0x00000200
6792  #define TG3_FLAG_WOL_SPEED_100MB       0x00000400
6793  #define TG3_FLAG_WOL_ENABLE            0x00000800
6794 @@ -2156,21 +2656,20 @@
6795  #define TG3_FLAG_PCIX_MODE             0x00020000
6796  #define TG3_FLAG_PCI_HIGH_SPEED                0x00040000
6797  #define TG3_FLAG_PCI_32BIT             0x00080000
6798 -#define TG3_FLAG_NO_TX_PSEUDO_CSUM     0x00100000
6799 -#define TG3_FLAG_NO_RX_PSEUDO_CSUM     0x00200000
6800 -#define TG3_FLAG_SERDES_WOL_CAP                0x00400000
6801 +#define TG3_FLAG_SRAM_USE_CONFIG       0x00100000
6802 +#define TG3_FLAG_TX_RECOVERY_PENDING   0x00200000
6803 +#define TG3_FLAG_WOL_CAP               0x00400000
6804  #define TG3_FLAG_JUMBO_RING_ENABLE     0x00800000
6805  #define TG3_FLAG_10_100_ONLY           0x01000000
6806  #define TG3_FLAG_PAUSE_AUTONEG         0x02000000
6807 -#define TG3_FLAG_IN_RESET_TASK         0x04000000
6808  #define TG3_FLAG_40BIT_DMA_BUG         0x08000000
6809  #define TG3_FLAG_BROKEN_CHECKSUMS      0x10000000
6810 -#define TG3_FLAG_GOT_SERDES_FLOWCTL    0x20000000
6811 -#define TG3_FLAG_SPLIT_MODE            0x40000000
6812 +#define TG3_FLAG_SUPPORT_MSI           0x20000000
6813 +#define TG3_FLAG_CHIP_RESETTING                0x40000000
6814  #define TG3_FLAG_INIT_COMPLETE         0x80000000
6815         u32                             tg3_flags2;
6816  #define TG3_FLG2_RESTART_TIMER         0x00000001
6817 -#define TG3_FLG2_SUN_570X              0x00000002
6818 +#define TG3_FLG2_TSO_BUG               0x00000002
6819  #define TG3_FLG2_NO_ETH_WIRE_SPEED     0x00000004
6820  #define TG3_FLG2_IS_5788               0x00000008
6821  #define TG3_FLG2_MAX_RXPEND_64         0x00000010
6822 @@ -2181,11 +2680,11 @@
6823  #define TG3_FLG2_PCI_EXPRESS           0x00000200
6824  #define TG3_FLG2_ASF_NEW_HANDSHAKE     0x00000400
6825  #define TG3_FLG2_HW_AUTONEG            0x00000800
6826 -#define TG3_FLG2_PHY_JUST_INITTED      0x00001000
6827 +#define TG3_FLG2_IS_NIC                        0x00001000
6828  #define TG3_FLG2_PHY_SERDES            0x00002000
6829  #define TG3_FLG2_CAPACITIVE_COUPLING   0x00004000
6830  #define TG3_FLG2_FLASH                 0x00008000
6831 -#define TG3_FLG2_HW_TSO                        0x00010000
6832 +#define TG3_FLG2_HW_TSO_1              0x00010000
6833  #define TG3_FLG2_SERDES_PREEMPHASIS    0x00020000
6834  #define TG3_FLG2_5705_PLUS             0x00040000
6835  #define TG3_FLG2_5750_PLUS             0x00080000
6836 @@ -2198,9 +2697,12 @@
6837  #define TG3_FLG2_PARALLEL_DETECT       0x01000000
6838  #define TG3_FLG2_ICH_WORKAROUND                0x02000000
6839  #define TG3_FLG2_5780_CLASS            0x04000000
6840 -
6841 -       u32                             split_mode_max_reqs;
6842 -#define SPLIT_MODE_5704_MAX_REQ                3
6843 +#define TG3_FLG2_HW_TSO_2              0x08000000
6844 +#define TG3_FLG2_HW_TSO                        (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
6845 +#define TG3_FLG2_1SHOT_MSI             0x10000000
6846 +#define TG3_FLG2_PHY_JITTER_BUG                0x20000000
6847 +#define TG3_FLG2_NO_FWARE_REPORTED     0x40000000
6848 +#define TG3_FLG2_PHY_ADJUST_TRIM       0x80000000
6849  
6850         struct timer_list               timer;
6851         u16                             timer_counter;
6852 @@ -2209,6 +2711,12 @@
6853         u16                             asf_counter;
6854         u16                             asf_multiplier;
6855  
6856 +       /* 1 second counter for transient serdes link events */
6857 +       u32                             serdes_counter;
6858 +#define SERDES_AN_TIMEOUT_5704S                2
6859 +#define SERDES_PARALLEL_DET_TIMEOUT    1
6860 +#define SERDES_AN_TIMEOUT_5714S                1
6861 +
6862         struct tg3_link_config          link_config;
6863         struct tg3_bufmgr_config        bufmgr_config;
6864  
6865 @@ -2222,6 +2730,7 @@
6866         u32                             grc_local_ctrl;
6867         u32                             dma_rwctrl;
6868         u32                             coalesce_mode;
6869 +       u32                             pwrmgmt_thresh;
6870  
6871         /* PCI block */
6872         u16                             pci_chip_rev_id;
6873 @@ -2229,9 +2738,11 @@
6874         u8                              pci_lat_timer;
6875         u8                              pci_hdr_type;
6876         u8                              pci_bist;
6877 +       u32                             pci_cfg_state[64 / sizeof(u32)];
6878  
6879         int                             pm_cap;
6880         int                             msi_cap;
6881 +       int                             pcix_cap;
6882  
6883         /* PHY info */
6884         u32                             phy_id;
6885 @@ -2247,6 +2758,10 @@
6886  #define PHY_ID_BCM5752                 0x60008100
6887  #define PHY_ID_BCM5714                 0x60008340
6888  #define PHY_ID_BCM5780                 0x60008350
6889 +#define PHY_ID_BCM5755                 0xbc050cc0
6890 +#define PHY_ID_BCM5787                 0xbc050ce0
6891 +#define PHY_ID_BCM5756                 0xbc050ed0
6892 +#define PHY_ID_BCM5906                 0xdc00ac40
6893  #define PHY_ID_BCM8002                 0x60010140
6894  #define PHY_ID_INVALID                 0xffffffff
6895  #define PHY_ID_REV_MASK                        0x0000000f
6896 @@ -2256,8 +2771,11 @@
6897  #define PHY_REV_BCM5411_X0             0x1 /* Found on Netgear GA302T */
6898  
6899         u32                             led_ctrl;
6900 +       u32                             pci_cmd;
6901  
6902         char                            board_part_number[24];
6903 +#define TG3_VER_SIZE 32
6904 +       char                            fw_ver[TG3_VER_SIZE];
6905         u32                             nic_sram_data_cfg;
6906         u32                             pci_clock_ctrl;
6907         struct pci_dev                  *pdev_peer;
6908 @@ -2271,7 +2789,9 @@
6909          (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
6910          (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
6911          (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
6912 -        (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002)
6913 +        (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
6914 +        (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
6915 +        (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM8002)
6916  
6917         struct tg3_hw_stats             *hw_stats;
6918         dma_addr_t                      stats_mapping;
This page took 0.557873 seconds and 3 git commands to generate.