]> git.pld-linux.org Git - packages/kernel.git/blob - linux-nvidia.patch
This commit was manufactured by cvs2git to create branch 'LINUX_2_6_16'.
[packages/kernel.git] / linux-nvidia.patch
1 diff -uNr linux-2.6.16.orig/drivers/net/forcedeth.c linux-2.6.16/drivers/net/forcedeth.c
2 --- linux-2.6.16.orig/drivers/net/forcedeth.c   2006-03-20 06:53:29.000000000 +0100
3 +++ linux-2.6.16/drivers/net/forcedeth.c        2008-11-02 20:40:40.000000000 +0100
4 @@ -102,6 +102,19 @@
5   *     0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
6   *     0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
7   *     0.49: 10 Dec 2005: Fix tso for large buffers.
8 + *     0.50: 20 Jan 2006: Add 8021pq tagging support.
9 + *     0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
10 + *     0.52: 20 Jan 2006: Add MSI/MSIX support.
11 + *     0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
12 + *     0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
13 + *     0.55: 22 Mar 2006: Add flow control (pause frame).
14 + *     0.56: 22 Mar 2006: Additional ethtool and moduleparam support.
15 + *     0.57: 14 May 2006: Moved mac address writes to nv_probe and nv_remove.
16 + *     0.58: 20 May 2006: Optimized rx and tx data paths.
17 + *     0.59: 31 May 2006: Added support for sideband management unit.
18 + *     0.60: 31 May 2006: Added support for recoverable error.
19 + *     0.61: 18 Jul 2006: Added support for suspend/resume.
20 + *     0.62: 16 Jan 2007: Fixed statistics, mgmt communication, and low phy speed on S5.
21   *
22   * Known bugs:
23   * We suspect that on some hardware no TX done interrupts are generated.
24 @@ -113,8 +126,9 @@
25   * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
26   * superfluous timer interrupts from the nic.
27   */
28 -#define FORCEDETH_VERSION              "0.49"
29 +#define FORCEDETH_VERSION              "0.62-Driver Package V1.23"
30  #define DRV_NAME                       "forcedeth"
31 +#define DRV_DATE                       "2007/04/06"
32  
33  #include <linux/module.h>
34  #include <linux/types.h>
35 @@ -131,18 +145,240 @@
36  #include <linux/random.h>
37  #include <linux/init.h>
38  #include <linux/if_vlan.h>
39 +#include <linux/rtnetlink.h>
40 +#include <linux/reboot.h>
41 +#include <linux/version.h>
42 +
43 +#define RHES3                  0
44 +#define SLES9          1
45 +#define RHES4          2
46 +#define SUSE10         3 
47 +#define        FEDORA5         4 
48 +#define        FEDORA6         5
49 +
50 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)
51 +#define NVVER FEDORA6
52 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
53 +#define NVVER FEDORA5          
54 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
55 +#define NVVER SUSE10           
56 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,6)
57 +#define NVVER RHES4    
58 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
59 +#define NVVER SLES9    
60 +#else
61 +#define NVVER RHES3    
62 +#endif
63 +
64 +#if NVVER > RHES3
65 +#include <linux/dma-mapping.h>
66 +#else
67 +#include <linux/forcedeth-compat.h>
68 +#endif
69  
70  #include <asm/irq.h>
71  #include <asm/io.h>
72  #include <asm/uaccess.h>
73  #include <asm/system.h>
74  
75 -#if 0
76 +#ifdef  NVLAN_DEBUG
77  #define dprintk                        printk
78  #else
79  #define dprintk(x...)          do { } while (0)
80  #endif
81  
82 +#define DPRINTK(nlevel,klevel,args...) (void)((debug & NETIF_MSG_##nlevel) && printk(klevel args))
83 +
84 +/* it should add in pci_ids.h */
85 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_12
86 +#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 
87 +#endif
88 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_13
89 +#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 
90 +#endif
91 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_14
92 +#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372 
93 +#endif
94 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_15
95 +#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 
96 +#endif
97 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_16
98 +#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
99 +#endif
100 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_17
101 +#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6 
102 +#endif
103 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_18
104 +#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE 
105 +#endif
106 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_19
107 +#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF 
108 +#endif
109 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_20
110 +#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 
111 +#endif
112 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_21
113 +#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 
114 +#endif
115 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_22
116 +#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 
117 +#endif
118 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_23
119 +#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453 
120 +#endif
121 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_24
122 +#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054c
123 +#endif
124 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_25
125 +#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054d
126 +#endif
127 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_26
128 +#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054e
129 +#endif
130 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_27
131 +#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054f
132 +#endif
133 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_28
134 +#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07DC
135 +#endif
136 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_29
137 +#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07DD
138 +#endif
139 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_30
140 +#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07DE
141 +#endif
142 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_31
143 +#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07DF
144 +#endif
145 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_32
146 +#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760
147 +#endif
148 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_33
149 +#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
150 +#endif
151 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_34
152 +#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
153 +#endif
154 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_35
155 +#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
156 +#endif
157 +
158 +/* it should add in mii.h */
159 +#ifndef ADVERTISE_1000HALF
160 +#define ADVERTISE_1000HALF     0x0100
161 +#endif
162 +#ifndef ADVERTISE_1000FULL
163 +#define ADVERTISE_1000FULL     0x0200
164 +#endif
165 +#ifndef ADVERTISE_PAUSE_CAP
166 +#define ADVERTISE_PAUSE_CAP    0x0400
167 +#endif
168 +#ifndef ADVERTISE_PAUSE_ASYM
169 +#define ADVERTISE_PAUSE_ASYM   0x0800
170 +#endif
171 +#ifndef MII_CTRL1000
172 +#define MII_CTRL1000           0x09 
173 +#endif
174 +#ifndef MII_STAT1000
175 +#define MII_STAT1000           0x0A 
176 +#endif
177 +#ifndef LPA_1000FULL
178 +#define LPA_1000FULL           0x0800
179 +#endif
180 +#ifndef LPA_1000HALF
181 +#define LPA_1000HALF           0x0400 
182 +#endif
183 +#ifndef LPA_PAUSE_CAP
184 +#define LPA_PAUSE_CAP          0x0400
185 +#endif
186 +#ifndef LPA_PAUSE_ASYM
187 +#define LPA_PAUSE_ASYM         0x0800
188 +#endif
189 +#ifndef BMCR_SPEED1000
190 +#define BMCR_SPEED1000         0x0040  /* MSB of Speed (1000)         */
191 +#endif
192 +
193 +#ifndef NETDEV_TX_OK
194 +#define NETDEV_TX_OK           0       /* driver took care of packet */
195 +#endif
196 +
197 +#ifndef NETDEV_TX_BUSY
198 +#define NETDEV_TX_BUSY                 1    /* driver tx path was busy*/
199 +#endif
200 +
201 +#ifndef DMA_39BIT_MASK
202 +#define DMA_39BIT_MASK         0x0000007fffffffffULL    
203 +#endif
204 +
205 +#ifndef __iomem
206 +#define __iomem 
207 +#endif
208 +
209 +#ifndef __bitwise
210 +#define __bitwise
211 +#endif
212 +
213 +#ifndef __force
214 +#define __force
215 +#endif
216 +
217 +#ifndef PCI_D0
218 +#define PCI_D0         ((int __bitwise __force) 0)
219 +#endif
220 +
221 +#ifndef PM_EVENT_SUSPEND 
222 +#define PM_EVENT_SUSPEND 2 
223 +#endif
224 +
225 +#if NVVER < SUSE10
226 +#define pm_message_t u32
227 +#endif
228 +
229 +/* rx/tx mac addr + type + vlan + align + slack*/
230 +#ifndef RX_NIC_BUFSIZE 
231 +#define RX_NIC_BUFSIZE         (ETH_DATA_LEN + 64)
232 +#endif
233 +/* even more slack */
234 +#ifndef RX_ALLOC_BUFSIZE       
235 +#define RX_ALLOC_BUFSIZE       (ETH_DATA_LEN + 128)
236 +#endif
237 +
238 +#ifndef PCI_DEVICE
239 +#define PCI_DEVICE(vend,dev) \
240 +       .vendor = (vend), .device = (dev), \
241 +       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
242 +#endif
243 +
244 +#if NVVER < RHES4
245 +struct msix_entry {
246 +       u16 vector;     /* kernel uses to write allocated vector */
247 +       u16 entry;      /* driver uses to specify entry, OS writes */
248 +};
249 +#endif
250 +
251 +#ifndef PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET
252 +#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0x00
253 +#endif
254 +
255 +#ifndef PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 
256 +#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 0x04 
257 +#endif
258 +
259 +#ifndef PCI_MSIX_ENTRY_DATA_OFFSET
260 +#define PCI_MSIX_ENTRY_DATA_OFFSET 0x08
261 +#endif 
262 +
263 +#ifndef PCI_MSIX_ENTRY_SIZE
264 +#define PCI_MSIX_ENTRY_SIZE 16
265 +#endif
266 +
267 +#ifndef PCI_MSIX_FLAGS_BIRMASK
268 +#define PCI_MSIX_FLAGS_BIRMASK         (7 << 0)
269 +#endif
270 +
271 +#ifndef PCI_CAP_ID_MSIX
272 +#define PCI_CAP_ID_MSIX 0x11
273 +#endif
274  
275  /*
276   * Hardware access:
277 @@ -153,11 +389,40 @@
278  #define DEV_HAS_LARGEDESC      0x0004  /* device supports jumbo frames and needs packet format 2 */
279  #define DEV_HAS_HIGH_DMA        0x0008  /* device supports 64bit dma */
280  #define DEV_HAS_CHECKSUM        0x0010  /* device supports tx and rx checksum offloads */
281 +#define DEV_HAS_VLAN            0x0020  /* device supports vlan tagging and striping */
282 +#define DEV_HAS_MSI             0x0040  /* device supports MSI */
283 +#define DEV_HAS_MSI_X           0x0080  /* device supports MSI-X */
284 +#define DEV_HAS_POWER_CNTRL     0x0100  /* device supports power savings */
285 +#define DEV_HAS_PAUSEFRAME_TX   0x0200  /* device supports tx pause frames */
286 +#define DEV_HAS_STATISTICS_V1   0x0400  /* device supports hw statistics version 1 */
287 +#define DEV_HAS_STATISTICS_V2   0x0800  /* device supports hw statistics version 2 */
288 +#define DEV_HAS_TEST_EXTENDED   0x1000  /* device supports extended diagnostic test */
289 +#define DEV_HAS_MGMT_UNIT       0x2000  /* device supports management unit */
290 +#define DEV_HAS_CORRECT_MACADDR 0x4000  /* device supports correct mac address */
291 +
292 +#define NVIDIA_ETHERNET_ID(deviceid,nv_driver_data) {\
293 +               .vendor = PCI_VENDOR_ID_NVIDIA, \
294 +               .device = deviceid, \
295 +               .subvendor = PCI_ANY_ID, \
296 +               .subdevice = PCI_ANY_ID, \
297 +               .driver_data = nv_driver_data, \
298 +               },
299 +               
300 +#define Mv_LED_Control 16
301 +#define Mv_Page_Address 22
302 +#define Mv_LED_FORCE_OFF 0x88
303 +#define Mv_LED_DUAL_MODE3 0x40
304 +
305 +struct nvmsi_msg{
306 +       u32 address_lo;
307 +       u32 address_hi;
308 +       u32 data;
309 +};
310  
311  enum {
312         NvRegIrqStatus = 0x000,
313  #define NVREG_IRQSTAT_MIIEVENT 0x040
314 -#define NVREG_IRQSTAT_MASK             0x1ff
315 +#define NVREG_IRQSTAT_MASK             0x81ff
316         NvRegIrqMask = 0x004,
317  #define NVREG_IRQ_RX_ERROR             0x0001
318  #define NVREG_IRQ_RX                   0x0002
319 @@ -166,14 +431,18 @@
320  #define NVREG_IRQ_TX_OK                        0x0010
321  #define NVREG_IRQ_TIMER                        0x0020
322  #define NVREG_IRQ_LINK                 0x0040
323 -#define NVREG_IRQ_TX_ERROR             0x0080
324 -#define NVREG_IRQ_TX1                  0x0100
325 +#define NVREG_IRQ_RX_FORCED            0x0080
326 +#define NVREG_IRQ_TX_FORCED            0x0100
327 +#define NVREG_IRQ_RECOVER_ERROR                0x8000
328  #define NVREG_IRQMASK_THROUGHPUT       0x00df
329  #define NVREG_IRQMASK_CPU              0x0040
330 +#define NVREG_IRQ_TX_ALL               (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
331 +#define NVREG_IRQ_RX_ALL               (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
332 +#define NVREG_IRQ_OTHER                        (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
333  
334  #define NVREG_IRQ_UNKNOWN      (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
335 -                                       NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
336 -                                       NVREG_IRQ_TX1))
337 +                                       NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
338 +                                       NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
339  
340         NvRegUnknownSetupReg6 = 0x008,
341  #define NVREG_UNKSETUP6_VAL            3
342 @@ -185,25 +454,45 @@
343         NvRegPollingInterval = 0x00c,
344  #define NVREG_POLL_DEFAULT_THROUGHPUT  970
345  #define NVREG_POLL_DEFAULT_CPU 13
346 +       NvRegMSIMap0 = 0x020,
347 +       NvRegMSIMap1 = 0x024,
348 +       NvRegMSIIrqMask = 0x030,
349 +#define NVREG_MSI_VECTOR_0_ENABLED 0x01
350         NvRegMisc1 = 0x080,
351 +#define NVREG_MISC1_PAUSE_TX   0x01
352  #define NVREG_MISC1_HD         0x02
353  #define NVREG_MISC1_FORCE      0x3b0f3c
354  
355 +       NvRegMacReset = 0x3c,
356 +#define NVREG_MAC_RESET_ASSERT 0x0F3
357         NvRegTransmitterControl = 0x084,
358  #define NVREG_XMITCTL_START    0x01
359 +#define NVREG_XMITCTL_MGMT_ST  0x40000000
360 +#define NVREG_XMITCTL_SYNC_MASK                0x000f0000
361 +#define NVREG_XMITCTL_SYNC_NOT_READY   0x0
362 +#define NVREG_XMITCTL_SYNC_PHY_INIT    0x00040000
363 +#define NVREG_XMITCTL_MGMT_SEMA_MASK   0x00000f00
364 +#define NVREG_XMITCTL_MGMT_SEMA_FREE   0x0
365 +#define NVREG_XMITCTL_HOST_SEMA_MASK   0x0000f000
366 +#define NVREG_XMITCTL_HOST_SEMA_ACQ    0x0000f000
367 +#define NVREG_XMITCTL_HOST_LOADED      0x00004000
368 +#define NVREG_XMITCTL_TX_PATH_EN       0x01000000
369         NvRegTransmitterStatus = 0x088,
370  #define NVREG_XMITSTAT_BUSY    0x01
371  
372         NvRegPacketFilterFlags = 0x8c,
373 -#define NVREG_PFF_ALWAYS       0x7F0008
374 +#define NVREG_PFF_PAUSE_RX     0x08
375 +#define NVREG_PFF_ALWAYS       0x7F0000
376  #define NVREG_PFF_PROMISC      0x80
377  #define NVREG_PFF_MYADDR       0x20
378 +#define NVREG_PFF_LOOPBACK     0x10
379  
380         NvRegOffloadConfig = 0x90,
381  #define NVREG_OFFLOAD_HOMEPHY  0x601
382  #define NVREG_OFFLOAD_NORMAL   RX_NIC_BUFSIZE
383         NvRegReceiverControl = 0x094,
384  #define NVREG_RCVCTL_START     0x01
385 +#define NVREG_RCVCTL_RX_PATH_EN        0x01000000
386         NvRegReceiverStatus = 0x98,
387  #define NVREG_RCVSTAT_BUSY     0x01
388  
389 @@ -213,10 +502,12 @@
390  #define NVREG_RNDSEED_FORCE2   0x2d00
391  #define NVREG_RNDSEED_FORCE3   0x7400
392  
393 -       NvRegUnknownSetupReg1 = 0xA0,
394 -#define NVREG_UNKSETUP1_VAL    0x16070f
395 -       NvRegUnknownSetupReg2 = 0xA4,
396 -#define NVREG_UNKSETUP2_VAL    0x16
397 +       NvRegTxDeferral = 0xA0,
398 +#define NVREG_TX_DEFERRAL_DEFAULT              0x15050f
399 +#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
400 +#define NVREG_TX_DEFERRAL_RGMII_1000   0x14050f
401 +       NvRegRxDeferral = 0xA4,
402 +#define NVREG_RX_DEFERRAL_DEFAULT      0x16
403         NvRegMacAddrA = 0xA8,
404         NvRegMacAddrB = 0xAC,
405         NvRegMulticastAddrA = 0xB0,
406 @@ -233,7 +524,8 @@
407         NvRegRingSizes = 0x108,
408  #define NVREG_RINGSZ_TXSHIFT 0
409  #define NVREG_RINGSZ_RXSHIFT 16
410 -       NvRegUnknownTransmitterReg = 0x10c,
411 +       NvRegTransmitPoll = 0x10c,
412 +#define NVREG_TRANSMITPOLL_MAC_ADDR_REV        0x00008000
413         NvRegLinkSpeed = 0x110,
414  #define NVREG_LINKSPEED_FORCE 0x10000
415  #define NVREG_LINKSPEED_10     1000
416 @@ -242,8 +534,10 @@
417  #define NVREG_LINKSPEED_MASK   (0xFFF)
418         NvRegUnknownSetupReg5 = 0x130,
419  #define NVREG_UNKSETUP5_BIT31  (1<<31)
420 -       NvRegUnknownSetupReg3 = 0x13c,
421 -#define NVREG_UNKSETUP3_VAL1   0x200010
422 +       NvRegTxWatermark = 0x13c,
423 +#define NVREG_TX_WM_DESC1_DEFAULT      0x0200010
424 +#define NVREG_TX_WM_DESC2_3_DEFAULT    0x1e08000
425 +#define NVREG_TX_WM_DESC2_3_1000       0xfe08000
426         NvRegTxRxControl = 0x144,
427  #define NVREG_TXRXCTL_KICK     0x0001
428  #define NVREG_TXRXCTL_BIT1     0x0002
429 @@ -252,15 +546,22 @@
430  #define NVREG_TXRXCTL_RESET    0x0010
431  #define NVREG_TXRXCTL_RXCHECK  0x0400
432  #define NVREG_TXRXCTL_DESC_1   0
433 -#define NVREG_TXRXCTL_DESC_2   0x02100
434 -#define NVREG_TXRXCTL_DESC_3   0x02200
435 +#define NVREG_TXRXCTL_DESC_2   0x002100
436 +#define NVREG_TXRXCTL_DESC_3   0xc02200
437 +#define NVREG_TXRXCTL_VLANSTRIP 0x00040
438 +#define NVREG_TXRXCTL_VLANINS  0x00080
439 +       NvRegTxRingPhysAddrHigh = 0x148,
440 +       NvRegRxRingPhysAddrHigh = 0x14C,
441 +       NvRegTxPauseFrame = 0x170,
442 +#define NVREG_TX_PAUSEFRAME_DISABLE    0x1ff0080
443 +#define NVREG_TX_PAUSEFRAME_ENABLE     0x0c00030
444         NvRegMIIStatus = 0x180,
445  #define NVREG_MIISTAT_ERROR            0x0001
446  #define NVREG_MIISTAT_LINKCHANGE       0x0008
447  #define NVREG_MIISTAT_MASK             0x000f
448  #define NVREG_MIISTAT_MASK2            0x000f
449 -       NvRegUnknownSetupReg4 = 0x184,
450 -#define NVREG_UNKSETUP4_VAL    8
451 +       NvRegMIIMask = 0x184,
452 +#define NVREG_MII_LINKCHANGE           0x0008
453  
454         NvRegAdapterControl = 0x188,
455  #define NVREG_ADAPTCTL_START   0x02
456 @@ -290,6 +591,7 @@
457  #define NVREG_WAKEUPFLAGS_ENABLE       0x1111
458  
459         NvRegPatternCRC = 0x204,
460 +#define NV_UNKNOWN_VAL  0x01
461         NvRegPatternMask = 0x208,
462         NvRegPowerCap = 0x268,
463  #define NVREG_POWERCAP_D3SUPP  (1<<30)
464 @@ -303,6 +605,43 @@
465  #define NVREG_POWERSTATE_D1            0x0001
466  #define NVREG_POWERSTATE_D2            0x0002
467  #define NVREG_POWERSTATE_D3            0x0003
468 +       NvRegTxCnt = 0x280,
469 +       NvRegTxZeroReXmt = 0x284,
470 +       NvRegTxOneReXmt = 0x288,
471 +       NvRegTxManyReXmt = 0x28c,
472 +       NvRegTxLateCol = 0x290,
473 +       NvRegTxUnderflow = 0x294,
474 +       NvRegTxLossCarrier = 0x298,
475 +       NvRegTxExcessDef = 0x29c,
476 +       NvRegTxRetryErr = 0x2a0,
477 +       NvRegRxFrameErr = 0x2a4,
478 +       NvRegRxExtraByte = 0x2a8,
479 +       NvRegRxLateCol = 0x2ac,
480 +       NvRegRxRunt = 0x2b0,
481 +       NvRegRxFrameTooLong = 0x2b4,
482 +       NvRegRxOverflow = 0x2b8,
483 +       NvRegRxFCSErr = 0x2bc,
484 +       NvRegRxFrameAlignErr = 0x2c0,
485 +       NvRegRxLenErr = 0x2c4,
486 +       NvRegRxUnicast = 0x2c8,
487 +       NvRegRxMulticast = 0x2cc,
488 +       NvRegRxBroadcast = 0x2d0,
489 +       NvRegTxDef = 0x2d4,
490 +       NvRegTxFrame = 0x2d8,
491 +       NvRegRxCnt = 0x2dc,
492 +       NvRegTxPause = 0x2e0,
493 +       NvRegRxPause = 0x2e4,
494 +       NvRegRxDropFrame = 0x2e8,
495 +
496 +       NvRegVlanControl = 0x300,
497 +#define NVREG_VLANCONTROL_ENABLE       0x2000
498 +       NvRegMSIXMap0 = 0x3e0,
499 +       NvRegMSIXMap1 = 0x3e4,
500 +       NvRegMSIXIrqStatus = 0x3f0,
501 +
502 +       NvRegPowerState2 = 0x600,
503 +#define NVREG_POWERSTATE2_POWERUP_MASK         0x0F11
504 +#define NVREG_POWERSTATE2_POWERUP_REV_A3       0x0001
505  };
506  
507  /* Big endian: should work, but is untested */
508 @@ -314,7 +653,7 @@
509  struct ring_desc_ex {
510         u32 PacketBufferHigh;
511         u32 PacketBufferLow;
512 -       u32 Reserved;
513 +       u32 TxVlan;
514         u32 FlagLen;
515  };
516  
517 @@ -335,7 +674,7 @@
518  #define NV_TX_CARRIERLOST      (1<<27)
519  #define NV_TX_LATECOLLISION    (1<<28)
520  #define NV_TX_UNDERFLOW                (1<<29)
521 -#define NV_TX_ERROR            (1<<30)
522 +#define NV_TX_ERROR            (1<<30) /* logical OR of all errors */
523  #define NV_TX_VALID            (1<<31)
524  
525  #define NV_TX2_LASTPACKET      (1<<29)
526 @@ -346,7 +685,7 @@
527  #define NV_TX2_LATECOLLISION   (1<<27)
528  #define NV_TX2_UNDERFLOW       (1<<28)
529  /* error and valid are the same for both */
530 -#define NV_TX2_ERROR           (1<<30)
531 +#define NV_TX2_ERROR           (1<<30) /* logical OR of all errors */
532  #define NV_TX2_VALID           (1<<31)
533  #define NV_TX2_TSO             (1<<28)
534  #define NV_TX2_TSO_SHIFT       14
535 @@ -355,6 +694,8 @@
536  #define NV_TX2_CHECKSUM_L3     (1<<27)
537  #define NV_TX2_CHECKSUM_L4     (1<<26)
538  
539 +#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
540 +
541  #define NV_RX_DESCRIPTORVALID  (1<<16)
542  #define NV_RX_MISSEDFRAME      (1<<17)
543  #define NV_RX_SUBSTRACT1       (1<<18)
544 @@ -365,7 +706,7 @@
545  #define NV_RX_CRCERR           (1<<27)
546  #define NV_RX_OVERFLOW         (1<<28)
547  #define NV_RX_FRAMINGERR       (1<<29)
548 -#define NV_RX_ERROR            (1<<30)
549 +#define NV_RX_ERROR            (1<<30) /* logical OR of all errors */
550  #define NV_RX_AVAIL            (1<<31)
551  
552  #define NV_RX2_CHECKSUMMASK    (0x1C000000)
553 @@ -382,11 +723,16 @@
554  #define NV_RX2_OVERFLOW                (1<<23)
555  #define NV_RX2_FRAMINGERR      (1<<24)
556  /* error and avail are the same for both */
557 -#define NV_RX2_ERROR           (1<<30)
558 +#define NV_RX2_ERROR           (1<<30) /* logical OR of all errors */
559  #define NV_RX2_AVAIL           (1<<31)
560  
561 +#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
562 +#define NV_RX3_VLAN_TAG_MASK   (0x0000FFFF)
563 +
564  /* Miscelaneous hardware related defines: */
565 -#define NV_PCI_REGSZ           0x270
566 +#define NV_PCI_REGSZ_VER1              0x270
567 +#define NV_PCI_REGSZ_VER2              0x2d4
568 +#define NV_PCI_REGSZ_VER3              0x604
569  
570  /* various timeout delays: all in usec */
571  #define NV_TXRX_RESET_DELAY    4
572 @@ -403,6 +749,7 @@
573  #define NV_MIIBUSY_DELAY       50
574  #define NV_MIIPHY_DELAY        10
575  #define NV_MIIPHY_DELAYMAX     10000
576 +#define NV_MAC_RESET_DELAY     64
577  
578  #define NV_WAKEUPPATTERNS      5
579  #define NV_WAKEUPMASKENTRIES   4
580 @@ -410,16 +757,18 @@
581  /* General driver defaults */
582  #define NV_WATCHDOG_TIMEO      (5*HZ)
583  
584 -#define RX_RING                128
585 -#define TX_RING                256
586 +#define RX_RING_DEFAULT                128
587 +#define TX_RING_DEFAULT                64
588 +#define RX_RING_MIN            RX_RING_DEFAULT
589 +#define TX_RING_MIN            TX_RING_DEFAULT
590 +#define RING_MAX_DESC_VER_1    1024
591 +#define RING_MAX_DESC_VER_2_3  16384
592  /* 
593 - * If your nic mysteriously hangs then try to reduce the limits
594 - * to 1/0: It might be required to set NV_TX_LASTPACKET in the
595 - * last valid ring entry. But this would be impossible to
596 - * implement - probably a disassembly error.
597 + * Difference between the get and put pointers for the tx ring.
598 + * This is used to throttle the amount of data outstanding in the
599 + * tx ring.
600   */
601 -#define TX_LIMIT_STOP  255
602 -#define TX_LIMIT_START 254
603 +#define TX_LIMIT_DIFFERENCE    1
604  
605  /* rx/tx mac addr + type + vlan + align + slack*/
606  #define NV_RX_HEADERS          (64)
607 @@ -433,6 +782,7 @@
608  #define OOM_REFILL     (1+HZ/20)
609  #define POLL_WAIT      (1+HZ/100)
610  #define LINK_TIMEOUT   (3*HZ)
611 +#define STATS_INTERVAL (10*HZ)
612  
613  /* 
614   * desc_ver values:
615 @@ -448,16 +798,38 @@
616  /* PHY defines */
617  #define PHY_OUI_MARVELL        0x5043
618  #define PHY_OUI_CICADA 0x03f1
619 +#define PHY_OUI_VITESSE        0x01c1
620  #define PHYID1_OUI_MASK        0x03ff
621  #define PHYID1_OUI_SHFT        6
622  #define PHYID2_OUI_MASK        0xfc00
623  #define PHYID2_OUI_SHFT        10
624 -#define PHY_INIT1      0x0f000
625 -#define PHY_INIT2      0x0e00
626 -#define PHY_INIT3      0x01000
627 -#define PHY_INIT4      0x0200
628 -#define PHY_INIT5      0x0004
629 -#define PHY_INIT6      0x02000
630 +#define PHYID2_MODEL_MASK              0x03f0
631 +#define PHY_MODEL_MARVELL_E3016                0x220
632 +#define PHY_MODEL_MARVELL_E1011                0xb0
633 +#define PHY_MARVELL_E3016_INITMASK     0x0300
634 +#define PHY_CICADA_INIT1       0x0f000
635 +#define PHY_CICADA_INIT2       0x0e00
636 +#define PHY_CICADA_INIT3       0x01000
637 +#define PHY_CICADA_INIT4       0x0200
638 +#define PHY_CICADA_INIT5       0x0004
639 +#define PHY_CICADA_INIT6       0x02000
640 +#define PHY_VITESSE_INIT_REG1  0x1f
641 +#define PHY_VITESSE_INIT_REG2  0x10
642 +#define PHY_VITESSE_INIT_REG3  0x11
643 +#define PHY_VITESSE_INIT_REG4  0x12
644 +#define PHY_VITESSE_INIT_MSK1  0xc
645 +#define PHY_VITESSE_INIT_MSK2  0x0180
646 +#define PHY_VITESSE_INIT1      0x52b5
647 +#define PHY_VITESSE_INIT2      0xaf8a
648 +#define PHY_VITESSE_INIT3      0x8
649 +#define PHY_VITESSE_INIT4      0x8f8a
650 +#define PHY_VITESSE_INIT5      0xaf86
651 +#define PHY_VITESSE_INIT6      0x8f86
652 +#define PHY_VITESSE_INIT7      0xaf82
653 +#define PHY_VITESSE_INIT8      0x0100
654 +#define PHY_VITESSE_INIT9      0x8f82
655 +#define PHY_VITESSE_INIT10     0x0
656 +
657  #define PHY_GIGABIT    0x0100
658  
659  #define PHY_TIMEOUT    0x1
660 @@ -467,14 +839,148 @@
661  #define PHY_1000       0x2
662  #define PHY_HALF       0x100
663  
664 -/* FIXME: MII defines that should be added to <linux/mii.h> */
665 -#define MII_1000BT_CR  0x09
666 -#define MII_1000BT_SR  0x0a
667 -#define ADVERTISE_1000FULL     0x0200
668 -#define ADVERTISE_1000HALF     0x0100
669 -#define LPA_1000FULL   0x0800
670 -#define LPA_1000HALF   0x0400
671 +#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
672 +#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
673 +#define NV_PAUSEFRAME_RX_ENABLE  0x0004
674 +#define NV_PAUSEFRAME_TX_ENABLE  0x0008
675 +#define NV_PAUSEFRAME_RX_REQ     0x0010
676 +#define NV_PAUSEFRAME_TX_REQ     0x0020
677 +#define NV_PAUSEFRAME_AUTONEG    0x0040
678 +
679 +/* MSI/MSI-X defines */
680 +#define NV_MSI_X_MAX_VECTORS  8
681 +#define NV_MSI_X_VECTORS_MASK 0x000f
682 +#define NV_MSI_CAPABLE        0x0010
683 +#define NV_MSI_X_CAPABLE      0x0020
684 +#define NV_MSI_ENABLED        0x0040
685 +#define NV_MSI_X_ENABLED      0x0080
686 +
687 +#define NV_MSI_X_VECTOR_ALL   0x0
688 +#define NV_MSI_X_VECTOR_RX    0x0
689 +#define NV_MSI_X_VECTOR_TX    0x1
690 +#define NV_MSI_X_VECTOR_OTHER 0x2
691 +
692 +#define NVLAN_DISABLE_ALL_FEATURES  do { \
693 +       msi = NV_MSI_INT_DISABLED; \
694 +       msix = NV_MSIX_INT_DISABLED; \
695 +       scatter_gather = NV_SCATTER_GATHER_DISABLED; \
696 +       tso_offload = NV_TSO_DISABLED; \
697 +       tx_checksum_offload = NV_TX_CHECKSUM_DISABLED; \
698 +       rx_checksum_offload = NV_RX_CHECKSUM_DISABLED; \
699 +       tx_flow_control = NV_TX_FLOW_CONTROL_DISABLED; \
700 +       rx_flow_control = NV_RX_FLOW_CONTROL_DISABLED; \
701 +       wol = NV_WOL_DISABLED; \
702 +       tagging_8021pq = NV_8021PQ_DISABLED; \
703 +} while (0)
704 +   
705 +struct nv_ethtool_str {
706 +       char name[ETH_GSTRING_LEN];
707 +};
708 +
709 +static const struct nv_ethtool_str nv_estats_str[] = {
710 +       { "tx_dropped" },
711 +       { "tx_fifo_errors" },
712 +       { "tx_carrier_errors" },
713 +       { "tx_packets" },
714 +       { "tx_bytes" },
715 +       { "rx_crc_errors" },
716 +       { "rx_over_errors" },
717 +       { "rx_errors_total" },
718 +       { "rx_packets" },
719 +       { "rx_bytes" },
720 +       
721 +       /* hardware counters */
722 +       { "tx_zero_rexmt" },
723 +       { "tx_one_rexmt" },
724 +       { "tx_many_rexmt" },
725 +       { "tx_late_collision" },
726 +       { "tx_excess_deferral" },
727 +       { "tx_retry_error" },
728 +       { "rx_frame_error" },
729 +       { "rx_extra_byte" },
730 +       { "rx_late_collision" },
731 +       { "rx_runt" },
732 +       { "rx_frame_too_long" },
733 +       { "rx_frame_align_error" },
734 +       { "rx_length_error" },
735 +       { "rx_unicast" },
736 +       { "rx_multicast" },
737 +       { "rx_broadcast" },
738 +       { "tx_deferral" },
739 +       { "tx_pause" },
740 +       { "rx_pause" },
741 +       { "rx_drop_frame" }
742 +};
743 +
744 +struct nv_ethtool_stats {
745 +       u64 tx_dropped;
746 +       u64 tx_fifo_errors;
747 +       u64 tx_carrier_errors;
748 +       u64 tx_packets;
749 +       u64 tx_bytes;
750 +       u64 rx_crc_errors;
751 +       u64 rx_over_errors;
752 +       u64 rx_errors_total;
753 +       u64 rx_packets;
754 +       u64 rx_bytes;
755 +
756 +       /* hardware counters */
757 +       u64 tx_zero_rexmt;
758 +       u64 tx_one_rexmt;
759 +       u64 tx_many_rexmt;
760 +       u64 tx_late_collision;
761 +       u64 tx_excess_deferral;
762 +       u64 tx_retry_error;
763 +       u64 rx_frame_error;
764 +       u64 rx_extra_byte;
765 +       u64 rx_late_collision;
766 +       u64 rx_runt;
767 +       u64 rx_frame_too_long;
768 +       u64 rx_frame_align_error;
769 +       u64 rx_length_error;
770 +       u64 rx_unicast;
771 +       u64 rx_multicast;
772 +       u64 rx_broadcast;
773 +       u64 tx_deferral;
774 +       u64 tx_pause;
775 +       u64 rx_pause;
776 +       u64 rx_drop_frame;
777 +};
778 +#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
779 +#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 4)
780 +#define NV_DEV_STATISTICS_SW_COUNT 10
781 +
782 +/* diagnostics */
783 +#define NV_TEST_COUNT_BASE 3
784 +#define NV_TEST_COUNT_EXTENDED 4
785 +
786 +static const struct nv_ethtool_str nv_etests_str[] = {
787 +       { "link      (online/offline)" },
788 +       { "register  (offline)       " },
789 +       { "interrupt (offline)       " },
790 +       { "loopback  (offline)       " }
791 +};
792  
793 +struct register_test {
794 +       u32 reg;
795 +       u32 mask;
796 +};
797 +
798 +static const struct register_test nv_registers_test[] = {
799 +       { NvRegUnknownSetupReg6, 0x01 },
800 +       { NvRegMisc1, 0x03c },
801 +       { NvRegOffloadConfig, 0x03ff },
802 +       { NvRegMulticastAddrA, 0xffffffff },
803 +       { NvRegTxWatermark, 0x0ff },
804 +       { NvRegWakeUpFlags, 0x07777 },
805 +       { 0,0 }
806 +};
807 +
808 +struct nv_skb_map {
809 +       struct sk_buff *skb;
810 +       dma_addr_t dma;
811 +       unsigned int dma_len;
812 +};
813  
814  /*
815   * SMP locking:
816 @@ -489,57 +995,105 @@
817  
818  /* in dev: base, irq */
819  struct fe_priv {
820 +
821 +       /* fields used in fast path are grouped together 
822 +          for better cache performance
823 +       */
824         spinlock_t lock;
825 +       void __iomem *base;
826 +       struct pci_dev *pci_dev;
827 +       u32 txrxctl_bits;
828 +       int stop_tx;
829 +       int need_linktimer;
830 +       unsigned long link_timeout;
831 +       u32 irqmask;
832 +       u32 msi_flags;
833 +
834 +       unsigned int rx_buf_sz;
835 +       struct vlan_group *vlangrp;
836 +       int tx_ring_size;
837 +       int rx_csum;
838 +
839 +       /*
840 +        * rx specific fields in fast path
841 +        */
842 +       ring_type get_rx __attribute__((aligned(L1_CACHE_BYTES)));
843 +       ring_type put_rx, first_rx, last_rx;
844 +       struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
845 +       struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
846 +       
847 +       /*
848 +        * tx specific fields in fast path
849 +        */
850 +       ring_type get_tx __attribute__((aligned(L1_CACHE_BYTES)));
851 +       ring_type put_tx, first_tx, last_tx;
852 +       struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
853 +       struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
854 +
855 +       struct nv_skb_map *rx_skb;
856 +       struct nv_skb_map *tx_skb;
857  
858         /* General data:
859          * Locking: spin_lock(&np->lock); */
860         struct net_device_stats stats;
861 +       struct nv_ethtool_stats estats;
862         int in_shutdown;
863         u32 linkspeed;
864         int duplex;
865 +       int speed_duplex;
866         int autoneg;
867         int fixed_mode;
868         int phyaddr;
869         int wolenabled;
870         unsigned int phy_oui;
871 +       unsigned int phy_model;
872         u16 gigabit;
873 +       int intr_test;
874 +       int recover_error;
875  
876         /* General data: RO fields */
877         dma_addr_t ring_addr;
878 -       struct pci_dev *pci_dev;
879         u32 orig_mac[2];
880 -       u32 irqmask;
881         u32 desc_ver;
882 -       u32 txrxctl_bits;
883 -
884 -       void __iomem *base;
885 +       u32 vlanctl_bits;
886 +       u32 driver_data;
887 +       u32 register_size;
888 +       u32 mac_in_use;
889  
890         /* rx specific fields.
891          * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
892          */
893         ring_type rx_ring;
894 -       unsigned int cur_rx, refill_rx;
895 -       struct sk_buff *rx_skbuff[RX_RING];
896 -       dma_addr_t rx_dma[RX_RING];
897 -       unsigned int rx_buf_sz;
898         unsigned int pkt_limit;
899         struct timer_list oom_kick;
900         struct timer_list nic_poll;
901 -
902 -       /* media detection workaround.
903 -        * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
904 -        */
905 -       int need_linktimer;
906 -       unsigned long link_timeout;
907 +       struct timer_list stats_poll;
908 +       u32 nic_poll_irq;
909 +       int rx_ring_size;
910 +       u32 rx_len_errors;
911         /*
912          * tx specific fields.
913          */
914         ring_type tx_ring;
915 -       unsigned int next_tx, nic_tx;
916 -       struct sk_buff *tx_skbuff[TX_RING];
917 -       dma_addr_t tx_dma[TX_RING];
918 -       unsigned int tx_dma_len[TX_RING];
919         u32 tx_flags;
920 +       int tx_limit_start;
921 +       int tx_limit_stop;
922 +
923 +
924 +       /* msi/msi-x fields */
925 +       struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
926 +
927 +       /* flow control */
928 +       u32 pause_flags;
929 +       u32 led_stats[3];
930 +       u32 saved_config_space[64];
931 +       u32 saved_nvregphyinterface;
932 +#if NVVER < SUSE10
933 +       u32 pci_state[16];
934 +#endif
935 +       /* msix table */
936 +       struct nvmsi_msg nvmsg[NV_MSI_X_MAX_VECTORS];
937 +       unsigned long msix_pa_addr;
938  };
939  
940  /*
941 @@ -554,8 +1108,10 @@
942   * Throughput Mode: Every tx and rx packet will generate an interrupt.
943   * CPU Mode: Interrupts are controlled by a timer.
944   */
945 -#define NV_OPTIMIZATION_MODE_THROUGHPUT 0
946 -#define NV_OPTIMIZATION_MODE_CPU        1
947 +enum {
948 +       NV_OPTIMIZATION_MODE_THROUGHPUT, 
949 +       NV_OPTIMIZATION_MODE_CPU
950 +};
951  static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
952  
953  /*
954 @@ -567,14 +1123,221 @@
955   */
956  static int poll_interval = -1;
957  
958 +/*
959 + * MSI interrupts
960 + */
961 +enum {
962 +       NV_MSI_INT_DISABLED,
963 +       NV_MSI_INT_ENABLED
964 +};
965 +
966 +#ifdef CONFIG_PCI_MSI 
967 +static int msi = NV_MSI_INT_ENABLED;
968 +#else
969 +static int msi = NV_MSI_INT_DISABLED;
970 +#endif
971 +
972 +/*
973 + * MSIX interrupts
974 + */
975 +enum {
976 +       NV_MSIX_INT_DISABLED, 
977 +       NV_MSIX_INT_ENABLED
978 +};
979 +
980 +#ifdef CONFIG_PCI_MSI 
981 +static int msix = NV_MSIX_INT_ENABLED;
982 +#else
983 +static int msix = NV_MSIX_INT_DISABLED;
984 +#endif
985 +/*
986 + * PHY Speed and Duplex
987 + */
988 +enum {
989 +       NV_SPEED_DUPLEX_AUTO,
990 +       NV_SPEED_DUPLEX_10_HALF_DUPLEX,
991 +       NV_SPEED_DUPLEX_10_FULL_DUPLEX,
992 +       NV_SPEED_DUPLEX_100_HALF_DUPLEX,
993 +       NV_SPEED_DUPLEX_100_FULL_DUPLEX,
994 +       NV_SPEED_DUPLEX_1000_FULL_DUPLEX
995 +};
996 +static int speed_duplex = NV_SPEED_DUPLEX_AUTO;
997 +
998 +/*
999 + * PHY autonegotiation
1000 + */
1001 +static int autoneg = AUTONEG_ENABLE;
1002 +
1003 +/*
1004 + * Scatter gather
1005 + */
1006 +enum {
1007 +       NV_SCATTER_GATHER_DISABLED,
1008 +       NV_SCATTER_GATHER_ENABLED
1009 +};
1010 +static int scatter_gather = NV_SCATTER_GATHER_ENABLED;
1011 +
1012 +/*
1013 + * TCP Segmentation Offload (TSO)
1014 + */
1015 +enum {
1016 +       NV_TSO_DISABLED,
1017 +       NV_TSO_ENABLED
1018 +};
1019 +static int tso_offload = NV_TSO_ENABLED;
1020 +
1021 +/*
1022 + * MTU settings
1023 + */
1024 +static int mtu = ETH_DATA_LEN;
1025 +
1026 +/*
1027 + * Tx checksum offload
1028 + */
1029 +enum {
1030 +       NV_TX_CHECKSUM_DISABLED, 
1031 +       NV_TX_CHECKSUM_ENABLED 
1032 +};
1033 +static int tx_checksum_offload = NV_TX_CHECKSUM_ENABLED;
1034 +
1035 +/*
1036 + * Rx checksum offload
1037 + */
1038 +enum {
1039 +       NV_RX_CHECKSUM_DISABLED, 
1040 +       NV_RX_CHECKSUM_ENABLED 
1041 +};
1042 +static int rx_checksum_offload = NV_RX_CHECKSUM_ENABLED;
1043 +
1044 +/*
1045 + * Tx ring size
1046 + */
1047 +static int tx_ring_size = TX_RING_DEFAULT;
1048 +
1049 +/*
1050 + * Rx ring size
1051 + */
1052 +static int rx_ring_size = RX_RING_DEFAULT;
1053 +
1054 +/*
1055 + * Tx flow control
1056 + */
1057 +enum {
1058 +       NV_TX_FLOW_CONTROL_DISABLED, 
1059 +       NV_TX_FLOW_CONTROL_ENABLED
1060 +};
1061 +static int tx_flow_control = NV_TX_FLOW_CONTROL_ENABLED;
1062 +
1063 +/*
1064 + * Rx flow control
1065 + */
1066 +enum {
1067 +       NV_RX_FLOW_CONTROL_DISABLED, 
1068 +       NV_RX_FLOW_CONTROL_ENABLED
1069 +};
1070 +static int rx_flow_control = NV_RX_FLOW_CONTROL_ENABLED;
1071 +
1072 +/*
1073 + * DMA 64bit
1074 + */
1075 +enum {
1076 +       NV_DMA_64BIT_DISABLED,
1077 +       NV_DMA_64BIT_ENABLED
1078 +};
1079 +static int dma_64bit = NV_DMA_64BIT_ENABLED;
1080 +
1081 +/*
1082 + * Wake On Lan
1083 + */
1084 +enum {
1085 +       NV_WOL_DISABLED,
1086 +       NV_WOL_ENABLED
1087 +};
1088 +static int wol = NV_WOL_DISABLED;
1089 +
1090 +/*
1091 + * Tagging 802.1pq
1092 + */
1093 +enum {
1094 +       NV_8021PQ_DISABLED,
1095 +       NV_8021PQ_ENABLED
1096 +};
1097 +static int tagging_8021pq = NV_8021PQ_ENABLED;
1098 +
1099 +enum {
1100 +       NV_LOW_POWER_DISABLED,
1101 +       NV_LOW_POWER_ENABLED
1102 +};
1103 +static int lowpowerspeed = NV_LOW_POWER_ENABLED;
1104 +
1105 +static int debug = 0;
1106 +
1107 +#if NVVER < RHES4
1108 +static inline unsigned long nv_msecs_to_jiffies(const unsigned int m)
1109 +{
1110 +#if HZ <= 1000 && !(1000 % HZ)
1111 +        return (m + (1000 / HZ) - 1) / (1000 / HZ);
1112 +#elif HZ > 1000 && !(HZ % 1000)
1113 +        return m * (HZ / 1000);
1114 +#else
1115 +        return (m * HZ + 999) / 1000;
1116 +#endif
1117 +}
1118 +#endif
1119 +
1120 +static void nv_msleep(unsigned int msecs)
1121 +{
1122 +#if NVVER > SLES9 
1123 +       msleep(msecs);
1124 +#else
1125 +       unsigned long timeout = nv_msecs_to_jiffies(msecs);
1126 +
1127 +       while (timeout) {
1128 +               set_current_state(TASK_UNINTERRUPTIBLE);
1129 +               timeout = schedule_timeout(timeout);
1130 +       }
1131 +#endif
1132 +}
1133 +
1134  static inline struct fe_priv *get_nvpriv(struct net_device *dev)
1135  {
1136 +#if NVVER > RHES3 
1137         return netdev_priv(dev);
1138 +#else
1139 +       return (struct fe_priv *) dev->priv;
1140 +#endif
1141 +}
1142 +
1143 +static void __init quirk_nforce_network_class(struct pci_dev *pdev)
1144 +{
1145 +       /* Some implementations of the nVidia network controllers
1146 +        * show up as bridges, when we need to see them as network
1147 +        * devices.
1148 +        */
1149 +
1150 +       /* If this is already known as a network ctlr, do nothing. */
1151 +       if ((pdev->class >> 8) == PCI_CLASS_NETWORK_ETHERNET)
1152 +               return;
1153 +
1154 +       if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_OTHER) {
1155 +               char    c;
1156 +
1157 +               /* Clearing bit 6 of the register at 0xf8
1158 +                * selects Ethernet device class
1159 +                */
1160 +               pci_read_config_byte(pdev, 0xf8, &c);
1161 +               c &= 0xbf;
1162 +               pci_write_config_byte(pdev, 0xf8, c);
1163 +
1164 +               /* sysfs needs pdev->class to be set correctly */
1165 +               pdev->class &= 0x0000ff;
1166 +               pdev->class |= (PCI_CLASS_NETWORK_ETHERNET << 8);
1167 +       }
1168  }
1169  
1170  static inline u8 __iomem *get_hwbase(struct net_device *dev)
1171  {
1172 -       return ((struct fe_priv *)netdev_priv(dev))->base;
1173 +       return ((struct fe_priv *)get_nvpriv(dev))->base;
1174  }
1175  
1176  static inline void pci_push(u8 __iomem *base)
1177 @@ -612,22 +1375,137 @@
1178         return 0;
1179  }
1180  
1181 -#define MII_READ       (-1)
1182 -/* mii_rw: read/write a register on the PHY.
1183 - *
1184 - * Caller must guarantee serialization
1185 - */
1186 -static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1187 +#define NV_SETUP_RX_RING 0x01
1188 +#define NV_SETUP_TX_RING 0x02
1189 +
1190 +static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
1191  {
1192 +       struct fe_priv *np = get_nvpriv(dev);
1193         u8 __iomem *base = get_hwbase(dev);
1194 -       u32 reg;
1195 -       int retval;
1196 -
1197 -       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1198  
1199 -       reg = readl(base + NvRegMIIControl);
1200 -       if (reg & NVREG_MIICTL_INUSE) {
1201 -               writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1202 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1203 +               if (rxtx_flags & NV_SETUP_RX_RING) {
1204 +                       writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
1205 +               }
1206 +               if (rxtx_flags & NV_SETUP_TX_RING) {
1207 +                       writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1208 +               }
1209 +       } else {
1210 +               if (rxtx_flags & NV_SETUP_RX_RING) {
1211 +                       writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
1212 +                       writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
1213 +               }
1214 +               if (rxtx_flags & NV_SETUP_TX_RING) {
1215 +                       writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1216 +                       writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
1217 +               }
1218 +       }
1219 +}
1220 +
1221 +static void free_rings(struct net_device *dev)
1222 +{
1223 +       struct fe_priv *np = get_nvpriv(dev);
1224 +
1225 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1226 +               if(np->rx_ring.orig)
1227 +                       pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1228 +                                           np->rx_ring.orig, np->ring_addr);
1229 +       } else {
1230 +               if (np->rx_ring.ex)
1231 +                       pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1232 +                                           np->rx_ring.ex, np->ring_addr);
1233 +       }
1234 +       if (np->rx_skb)
1235 +               kfree(np->rx_skb);
1236 +       if (np->tx_skb)
1237 +               kfree(np->tx_skb);      
1238 +}
1239 +
1240 +static int using_multi_irqs(struct net_device *dev)
1241 +{
1242 +       struct fe_priv *np = get_nvpriv(dev);
1243 +
1244 +       if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1245 +           ((np->msi_flags & NV_MSI_X_ENABLED) && 
1246 +            ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1247 +               return 0;
1248 +       else
1249 +               return 1;
1250 +}
1251 +
1252 +static void nv_enable_irq(struct net_device *dev)
1253 +{
1254 +       struct fe_priv *np = get_nvpriv(dev);
1255 +
1256 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1257 +       /* modify network device class id */    
1258 +       if (!using_multi_irqs(dev)) {
1259 +               if (np->msi_flags & NV_MSI_X_ENABLED)
1260 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1261 +               else
1262 +                       enable_irq(dev->irq);
1263 +       } else {
1264 +               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1265 +               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1266 +               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1267 +       }
1268 +}
1269 +
1270 +static void nv_disable_irq(struct net_device *dev)
1271 +{
1272 +       struct fe_priv *np = get_nvpriv(dev);
1273 +
1274 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1275 +       if (!using_multi_irqs(dev)) {
1276 +               if (np->msi_flags & NV_MSI_X_ENABLED)
1277 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1278 +               else
1279 +                       disable_irq(dev->irq);
1280 +       } else {
1281 +               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1282 +               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1283 +               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1284 +       }
1285 +}
1286 +
1287 +/* In MSIX mode, a write to irqmask behaves as XOR */
1288 +static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1289 +{
1290 +       u8 __iomem *base = get_hwbase(dev);
1291 +
1292 +       writel(mask, base + NvRegIrqMask);
1293 +}
1294 +
1295 +static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1296 +{
1297 +       struct fe_priv *np = get_nvpriv(dev);
1298 +       u8 __iomem *base = get_hwbase(dev);
1299 +
1300 +       if (np->msi_flags & NV_MSI_X_ENABLED) {
1301 +               writel(mask, base + NvRegIrqMask);
1302 +       } else {
1303 +               if (np->msi_flags & NV_MSI_ENABLED)
1304 +                       writel(0, base + NvRegMSIIrqMask);
1305 +               writel(0, base + NvRegIrqMask);
1306 +       }
1307 +}
1308 +
1309 +#define MII_READ       (-1)
1310 +/* mii_rw: read/write a register on the PHY.
1311 + *
1312 + * Caller must guarantee serialization
1313 + */
1314 +static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1315 +{
1316 +       u8 __iomem *base = get_hwbase(dev);
1317 +       u32 reg;
1318 +       int retval;
1319 +
1320 +       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1321 +
1322 +       reg = readl(base + NvRegMIIControl);
1323 +       if (reg & NVREG_MIICTL_INUSE) {
1324 +               writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1325                 udelay(NV_MIIBUSY_DELAY);
1326         }
1327  
1328 @@ -661,29 +1539,112 @@
1329         return retval;
1330  }
1331  
1332 -static int phy_reset(struct net_device *dev)
1333 +static void nv_save_LED_stats(struct net_device *dev)
1334 +{
1335 +       struct fe_priv *np = get_nvpriv(dev);
1336 +       u32 reg=0;
1337 +       u32 value=0;
1338 +       int i=0;
1339 +
1340 +       reg = Mv_Page_Address;
1341 +       value = 3;
1342 +       mii_rw(dev,np->phyaddr,reg,value);
1343 +       udelay(5);
1344 +
1345 +       reg = Mv_LED_Control;
1346 +       for(i=0;i<3;i++){
1347 +               np->led_stats[i]=mii_rw(dev,np->phyaddr,reg+i,MII_READ);        
1348 +               dprintk(KERN_DEBUG "%s: save LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
1349 +       }
1350 +
1351 +}
1352 +
1353 +static void nv_restore_LED_stats(struct net_device *dev)
1354 +{
1355 +
1356 +       struct fe_priv *np = get_nvpriv(dev);
1357 +       u32 reg=0;
1358 +       u32 value=0;
1359 +       int i=0;
1360 +
1361 +       reg = Mv_Page_Address;
1362 +       value = 3;
1363 +       mii_rw(dev,np->phyaddr,reg,value);
1364 +       udelay(5);
1365 +
1366 +       reg = Mv_LED_Control;
1367 +       for(i=0;i<3;i++){
1368 +               mii_rw(dev,np->phyaddr,reg+i,np->led_stats[i]); 
1369 +               udelay(1);
1370 +               dprintk(KERN_DEBUG "%s: restore LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
1371 +       }
1372 +
1373 +}
1374 +
1375 +static void nv_LED_on(struct net_device *dev)
1376 +{
1377 +       struct fe_priv *np = get_nvpriv(dev);
1378 +       u32 reg=0;
1379 +       u32 value=0;
1380 +
1381 +       reg = Mv_Page_Address;
1382 +       value = 3;
1383 +       mii_rw(dev,np->phyaddr,reg,value);
1384 +       udelay(5);
1385 +
1386 +       reg = Mv_LED_Control;
1387 +       mii_rw(dev,np->phyaddr,reg,Mv_LED_DUAL_MODE3);  
1388 +
1389 +}
1390 +
1391 +static void nv_LED_off(struct net_device *dev)
1392 +{
1393 +       struct fe_priv *np = get_nvpriv(dev);
1394 +       u32 reg=0;
1395 +       u32 value=0;
1396 +
1397 +       reg = Mv_Page_Address;
1398 +       value = 3;
1399 +       mii_rw(dev,np->phyaddr,reg,value);
1400 +       udelay(5);
1401 +
1402 +       reg = Mv_LED_Control;
1403 +       mii_rw(dev,np->phyaddr,reg,Mv_LED_FORCE_OFF);   
1404 +       udelay(1);
1405 +
1406 +}
1407 +
1408 +static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1409  {
1410 -       struct fe_priv *np = netdev_priv(dev);
1411 +       struct fe_priv *np = get_nvpriv(dev);
1412         u32 miicontrol;
1413         unsigned int tries = 0;
1414  
1415 -       miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1416 -       miicontrol |= BMCR_RESET;
1417 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1418 +       /**/
1419 +       if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) {
1420 +               nv_save_LED_stats(dev);
1421 +       }
1422 +       miicontrol = BMCR_RESET | bmcr_setup;
1423         if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1424                 return -1;
1425         }
1426  
1427         /* wait for 500ms */
1428 -       msleep(500);
1429 +       nv_msleep(500);
1430  
1431         /* must wait till reset is deasserted */
1432         while (miicontrol & BMCR_RESET) {
1433 -               msleep(10);
1434 +               nv_msleep(10);
1435                 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1436                 /* FIXME: 100 tries seem excessive */
1437                 if (tries++ > 100)
1438                         return -1;
1439         }
1440 +       if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011) {
1441 +               nv_restore_LED_stats(dev);
1442 +       }
1443 +
1444         return 0;
1445  }
1446  
1447 @@ -693,9 +1654,36 @@
1448         u8 __iomem *base = get_hwbase(dev);
1449         u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1450  
1451 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1452 +       /* phy errata for E3016 phy */
1453 +       if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1454 +               reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1455 +               reg &= ~PHY_MARVELL_E3016_INITMASK;
1456 +               if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1457 +                       printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1458 +                       return PHY_ERROR;
1459 +               }
1460 +       }
1461 +
1462         /* set advertise register */
1463         reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1464 -       reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
1465 +       reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1466 +       if (np->speed_duplex == NV_SPEED_DUPLEX_AUTO)
1467 +               reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL);
1468 +       if (np->speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
1469 +               reg |= ADVERTISE_10HALF;
1470 +       if (np->speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
1471 +               reg |= ADVERTISE_10FULL;
1472 +       if (np->speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
1473 +               reg |= ADVERTISE_100HALF;
1474 +       if (np->speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
1475 +               reg |= ADVERTISE_100FULL;
1476 +       if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
1477 +               reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1478 +       if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
1479 +               reg |= ADVERTISE_PAUSE_ASYM;
1480 +       np->fixed_mode = reg;
1481 +
1482         if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1483                 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1484                 return PHY_ERROR;
1485 @@ -708,14 +1696,18 @@
1486         mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1487         if (mii_status & PHY_GIGABIT) {
1488                 np->gigabit = PHY_GIGABIT;
1489 -               mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1490 +               mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1491                 mii_control_1000 &= ~ADVERTISE_1000HALF;
1492 -               if (phyinterface & PHY_RGMII)
1493 +               if (phyinterface & PHY_RGMII && 
1494 +                   (np->speed_duplex == NV_SPEED_DUPLEX_AUTO || 
1495 +                    (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_ENABLE)))
1496                         mii_control_1000 |= ADVERTISE_1000FULL;
1497 -               else
1498 +               else {
1499 +                       if (np->speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && np->autoneg == AUTONEG_DISABLE)
1500 +                               printk(KERN_INFO "%s: 1000mpbs full only allowed with autoneg\n", pci_name(np->pci_dev));
1501                         mii_control_1000 &= ~ADVERTISE_1000FULL;
1502 -
1503 -               if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
1504 +               }
1505 +               if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1506                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1507                         return PHY_ERROR;
1508                 }
1509 @@ -723,8 +1715,25 @@
1510         else
1511                 np->gigabit = 0;
1512  
1513 -       /* reset the phy */
1514 -       if (phy_reset(dev)) {
1515 +       mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1516 +       if (np->autoneg == AUTONEG_DISABLE){
1517 +               np->pause_flags &= ~(NV_PAUSEFRAME_RX_ENABLE | NV_PAUSEFRAME_TX_ENABLE);
1518 +               if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)
1519 +                       np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
1520 +               if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
1521 +                       np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
1522 +               mii_control &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
1523 +               if (reg & (ADVERTISE_10FULL|ADVERTISE_100FULL))
1524 +                       mii_control |= BMCR_FULLDPLX;
1525 +               if (reg & (ADVERTISE_100HALF|ADVERTISE_100FULL))
1526 +                       mii_control |= BMCR_SPEED100;
1527 +       } else {
1528 +               mii_control |= BMCR_ANENABLE;
1529 +       }
1530 +
1531 +       /* reset the phy and setup BMCR 
1532 +        * (certain phys need reset at same time new values are set) */
1533 +       if (phy_reset(dev, mii_control)) {
1534                 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1535                 return PHY_ERROR;
1536         }
1537 @@ -732,14 +1741,14 @@
1538         /* phy vendor specific configuration */
1539         if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1540                 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1541 -               phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
1542 -               phy_reserved |= (PHY_INIT3 | PHY_INIT4);
1543 +               phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1544 +               phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1545                 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1546                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1547                         return PHY_ERROR;
1548                 }
1549                 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1550 -               phy_reserved |= PHY_INIT5;
1551 +               phy_reserved |= PHY_CICADA_INIT5;
1552                 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1553                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1554                         return PHY_ERROR;
1555 @@ -747,18 +1756,92 @@
1556         }
1557         if (np->phy_oui == PHY_OUI_CICADA) {
1558                 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1559 -               phy_reserved |= PHY_INIT6;
1560 +               phy_reserved |= PHY_CICADA_INIT6;
1561                 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1562                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1563                         return PHY_ERROR;
1564                 }
1565         }
1566 +       if (np->phy_oui == PHY_OUI_VITESSE) {
1567 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1568 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1569 +                       return PHY_ERROR;
1570 +               }               
1571 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1572 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1573 +                       return PHY_ERROR;
1574 +               }               
1575 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1576 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1577 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1578 +                       return PHY_ERROR;
1579 +               }               
1580 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1581 +               phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1582 +               phy_reserved |= PHY_VITESSE_INIT3;
1583 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1584 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1585 +                       return PHY_ERROR;
1586 +               }               
1587 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1588 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1589 +                       return PHY_ERROR;
1590 +               }               
1591 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1592 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1593 +                       return PHY_ERROR;
1594 +               }               
1595 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1596 +               phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1597 +               phy_reserved |= PHY_VITESSE_INIT3;
1598 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1599 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1600 +                       return PHY_ERROR;
1601 +               }               
1602 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1603 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1604 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1605 +                       return PHY_ERROR;
1606 +               }               
1607 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1608 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1609 +                       return PHY_ERROR;
1610 +               }               
1611 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1612 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1613 +                       return PHY_ERROR;
1614 +               }               
1615 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1616 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1617 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1618 +                       return PHY_ERROR;
1619 +               }               
1620 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1621 +               phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1622 +               phy_reserved |= PHY_VITESSE_INIT8;
1623 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1624 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1625 +                       return PHY_ERROR;
1626 +               }               
1627 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1628 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1629 +                       return PHY_ERROR;
1630 +               }               
1631 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1632 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1633 +                       return PHY_ERROR;
1634 +               }               
1635 +       }
1636 +       /* some phys clear out pause advertisment on reset, set it back */
1637 +       mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1638  
1639         /* restart auto negotiation */
1640 -       mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1641 -       mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1642 -       if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1643 -               return PHY_ERROR;
1644 +       if (np->autoneg == AUTONEG_ENABLE) {
1645 +               mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1646 +               mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1647 +               if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1648 +                       return PHY_ERROR;
1649 +               }
1650         }
1651  
1652         return 0;
1653 @@ -766,18 +1849,24 @@
1654  
1655  static void nv_start_rx(struct net_device *dev)
1656  {
1657 -       struct fe_priv *np = netdev_priv(dev);
1658 +       struct fe_priv *np = get_nvpriv(dev);
1659         u8 __iomem *base = get_hwbase(dev);
1660 +       u32 rx_ctrl = readl(base + NvRegReceiverControl);
1661 +
1662 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1663  
1664 -       dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1665         /* Already running? Stop it. */
1666 -       if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
1667 -               writel(0, base + NvRegReceiverControl);
1668 +       if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1669 +               rx_ctrl &= ~NVREG_RCVCTL_START;
1670 +               writel(rx_ctrl, base + NvRegReceiverControl);
1671                 pci_push(base);
1672         }
1673         writel(np->linkspeed, base + NvRegLinkSpeed);
1674         pci_push(base);
1675 -       writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
1676 +        rx_ctrl |= NVREG_RCVCTL_START;
1677 +        if (np->mac_in_use)
1678 +               rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1679 +       writel(rx_ctrl, base + NvRegReceiverControl);
1680         dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1681                                 dev->name, np->duplex, np->linkspeed);
1682         pci_push(base);
1683 @@ -785,47 +1874,66 @@
1684  
1685  static void nv_stop_rx(struct net_device *dev)
1686  {
1687 +       struct fe_priv *np = get_nvpriv(dev);
1688         u8 __iomem *base = get_hwbase(dev);
1689 +       u32 rx_ctrl = readl(base + NvRegReceiverControl);
1690  
1691 -       dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1692 -       writel(0, base + NvRegReceiverControl);
1693 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1694 +       if (!np->mac_in_use)
1695 +               rx_ctrl &= ~NVREG_RCVCTL_START;
1696 +       else
1697 +               rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1698 +       writel(rx_ctrl, base + NvRegReceiverControl);
1699         reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1700                         NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1701                         KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1702  
1703         udelay(NV_RXSTOP_DELAY2);
1704 +       if (!np->mac_in_use)
1705         writel(0, base + NvRegLinkSpeed);
1706  }
1707  
1708  static void nv_start_tx(struct net_device *dev)
1709  {
1710 +       struct fe_priv *np = get_nvpriv(dev);
1711         u8 __iomem *base = get_hwbase(dev);
1712 +       u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1713  
1714 -       dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1715 -       writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
1716 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1717 +       tx_ctrl |= NVREG_XMITCTL_START;
1718 +       if (np->mac_in_use)
1719 +               tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1720 +       writel(tx_ctrl, base + NvRegTransmitterControl);
1721         pci_push(base);
1722  }
1723  
1724  static void nv_stop_tx(struct net_device *dev)
1725  {
1726 +       struct fe_priv *np = get_nvpriv(dev);
1727         u8 __iomem *base = get_hwbase(dev);
1728 +       u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1729  
1730 -       dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1731 -       writel(0, base + NvRegTransmitterControl);
1732 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1733 +       if (!np->mac_in_use)
1734 +               tx_ctrl &= ~NVREG_XMITCTL_START;
1735 +       else
1736 +               tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1737 +       writel(tx_ctrl, base + NvRegTransmitterControl);
1738         reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1739                         NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1740                         KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1741  
1742         udelay(NV_TXSTOP_DELAY2);
1743 -       writel(0, base + NvRegUnknownTransmitterReg);
1744 +       if (!np->mac_in_use)
1745 +               writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
1746  }
1747  
1748  static void nv_txrx_reset(struct net_device *dev)
1749  {
1750 -       struct fe_priv *np = netdev_priv(dev);
1751 +       struct fe_priv *np = get_nvpriv(dev);
1752         u8 __iomem *base = get_hwbase(dev);
1753  
1754 -       dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1755 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1756         writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1757         pci_push(base);
1758         udelay(NV_TXRX_RESET_DELAY);
1759 @@ -833,140 +1941,301 @@
1760         pci_push(base);
1761  }
1762  
1763 +static void nv_mac_reset(struct net_device *dev)
1764 +{
1765 +       struct fe_priv *np = get_nvpriv(dev);
1766 +       u8 __iomem *base = get_hwbase(dev);
1767 +
1768 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
1769 +       writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1770 +       pci_push(base);
1771 +       writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1772 +       pci_push(base);
1773 +       udelay(NV_MAC_RESET_DELAY);
1774 +       writel(0, base + NvRegMacReset);
1775 +       pci_push(base);
1776 +       udelay(NV_MAC_RESET_DELAY);
1777 +       writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1778 +       pci_push(base);
1779 +}
1780 +
1781 +#if NVVER < SLES9
1782 +static int nv_ethtool_ioctl(struct net_device *dev, void *useraddr)
1783 +{
1784 +       struct fe_priv *np = get_nvpriv(dev);
1785 +       u8 *base = get_hwbase(dev);
1786 +       u32 ethcmd;
1787 +
1788 +       if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
1789 +               return -EFAULT;
1790 +
1791 +       switch (ethcmd) {
1792 +               case ETHTOOL_GDRVINFO:
1793 +                       {
1794 +                               struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1795 +                               strcpy(info.driver, "forcedeth");
1796 +                               strcpy(info.version, FORCEDETH_VERSION);
1797 +                               strcpy(info.bus_info, pci_name(np->pci_dev));
1798 +                               if (copy_to_user(useraddr, &info, sizeof (info)))
1799 +                                       return -EFAULT;
1800 +                               return 0;
1801 +                       }
1802 +               case ETHTOOL_GLINK:
1803 +                       {
1804 +                               struct ethtool_value edata = { ETHTOOL_GLINK };
1805 +
1806 +                               edata.data = !!netif_carrier_ok(dev);
1807 +
1808 +                               if (copy_to_user(useraddr, &edata, sizeof(edata)))
1809 +                                       return -EFAULT;
1810 +                               return 0;
1811 +                       }
1812 +               case ETHTOOL_GWOL:
1813 +                       {
1814 +                               struct ethtool_wolinfo wolinfo;
1815 +                               memset(&wolinfo, 0, sizeof(wolinfo));
1816 +                               wolinfo.supported = WAKE_MAGIC;
1817 +
1818 +                               spin_lock_irq(&np->lock);
1819 +                               if (np->wolenabled)
1820 +                                       wolinfo.wolopts = WAKE_MAGIC;
1821 +                               spin_unlock_irq(&np->lock);
1822 +
1823 +                               if (copy_to_user(useraddr, &wolinfo, sizeof(wolinfo)))
1824 +                                       return -EFAULT;
1825 +                               return 0;
1826 +                       }
1827 +               case ETHTOOL_SWOL:
1828 +                       {
1829 +                               struct ethtool_wolinfo wolinfo;
1830 +                               if (copy_from_user(&wolinfo, useraddr, sizeof(wolinfo)))
1831 +                                       return -EFAULT;
1832 +
1833 +                               spin_lock_irq(&np->lock);
1834 +                               if (wolinfo.wolopts == 0) {
1835 +                                       writel(0, base + NvRegWakeUpFlags);
1836 +                                       np->wolenabled = NV_WOL_DISABLED;
1837 +                               }
1838 +                               if (wolinfo.wolopts & WAKE_MAGIC) {
1839 +                                       writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
1840 +                                       np->wolenabled = NV_WOL_ENABLED;
1841 +                               }
1842 +                               spin_unlock_irq(&np->lock);
1843 +                               return 0;
1844 +                       }
1845 +
1846 +               default:
1847 +                       break;
1848 +       }
1849 +
1850 +       return -EOPNOTSUPP;
1851 +}
1852 +
1853  /*
1854 - * nv_get_stats: dev->get_stats function
1855 - * Get latest stats value from the nic.
1856 - * Called with read_lock(&dev_base_lock) held for read -
1857 - * only synchronized against unregister_netdevice.
1858 + * nv_ioctl: dev->do_ioctl function
1859 + * Called with rtnl_lock held.
1860   */
1861 -static struct net_device_stats *nv_get_stats(struct net_device *dev)
1862 +static int nv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1863  {
1864 -       struct fe_priv *np = netdev_priv(dev);
1865 +       switch(cmd) {
1866 +               case SIOCETHTOOL:
1867 +                       return nv_ethtool_ioctl(dev, rq->ifr_data);
1868  
1869 -       /* It seems that the nic always generates interrupts and doesn't
1870 -        * accumulate errors internally. Thus the current values in np->stats
1871 -        * are already up to date.
1872 -        */
1873 -       return &np->stats;
1874 +               default:
1875 +                       return -EOPNOTSUPP;
1876 +       }
1877  }
1878 +#endif
1879  
1880  /*
1881   * nv_alloc_rx: fill rx ring entries.
1882   * Return 1 if the allocations for the skbs failed and the
1883   * rx engine is without Available descriptors
1884   */
1885 -static int nv_alloc_rx(struct net_device *dev)
1886 +static inline int nv_alloc_rx(struct net_device *dev)
1887  {
1888 -       struct fe_priv *np = netdev_priv(dev);
1889 -       unsigned int refill_rx = np->refill_rx;
1890 -       int nr;
1891 -
1892 -       while (np->cur_rx != refill_rx) {
1893 -               struct sk_buff *skb;
1894 -
1895 -               nr = refill_rx % RX_RING;
1896 -               if (np->rx_skbuff[nr] == NULL) {
1897 -
1898 -                       skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1899 -                       if (!skb)
1900 -                               break;
1901 +       struct fe_priv *np = get_nvpriv(dev);
1902 +       struct ring_desc* less_rx;
1903 +       struct sk_buff *skb;
1904  
1905 +       less_rx = np->get_rx.orig;
1906 +       if (less_rx-- == np->first_rx.orig)
1907 +               less_rx = np->last_rx.orig;
1908 +
1909 +       while (np->put_rx.orig != less_rx) {
1910 +               skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1911 +               if (skb) {
1912                         skb->dev = dev;
1913 -                       np->rx_skbuff[nr] = skb;
1914 +                       np->put_rx_ctx->skb = skb;
1915 +                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1916 +                                                            skb->end-skb->data, PCI_DMA_FROMDEVICE);
1917 +                       np->put_rx_ctx->dma_len = skb->end-skb->data;
1918 +                       np->put_rx.orig->PacketBuffer = cpu_to_le32(np->put_rx_ctx->dma);
1919 +                       wmb();
1920 +                       np->put_rx.orig->FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1921 +                       if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1922 +                               np->put_rx.orig = np->first_rx.orig;
1923 +                       if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1924 +                               np->put_rx_ctx = np->first_rx_ctx;
1925                 } else {
1926 -                       skb = np->rx_skbuff[nr];
1927 +                       return 1;
1928                 }
1929 -               np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1930 -                                       skb->end-skb->data, PCI_DMA_FROMDEVICE);
1931 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1932 -                       np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
1933 +       }
1934 +       return 0;
1935 +}
1936 +
1937 +static inline int nv_alloc_rx_optimized(struct net_device *dev)
1938 +{
1939 +       struct fe_priv *np = get_nvpriv(dev);
1940 +       struct ring_desc_ex* less_rx;
1941 +       struct sk_buff *skb;
1942 +
1943 +       less_rx = np->get_rx.ex;
1944 +       if (less_rx-- == np->first_rx.ex)
1945 +               less_rx = np->last_rx.ex;
1946 +
1947 +       while (np->put_rx.ex != less_rx) {
1948 +               skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1949 +               if (skb) {
1950 +                       skb->dev = dev;
1951 +                       np->put_rx_ctx->skb = skb;
1952 +                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1953 +                                                            skb->end-skb->data, PCI_DMA_FROMDEVICE);
1954 +                       np->put_rx_ctx->dma_len = skb->end-skb->data;
1955 +                       np->put_rx.ex->PacketBufferHigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1956 +                       np->put_rx.ex->PacketBufferLow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;                        
1957                         wmb();
1958 -                       np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1959 +                       np->put_rx.ex->FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1960 +                       if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1961 +                               np->put_rx.ex = np->first_rx.ex;
1962 +                       if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1963 +                               np->put_rx_ctx = np->first_rx_ctx;
1964                 } else {
1965 -                       np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1966 -                       np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1967 -                       wmb();
1968 -                       np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1969 +                       return 1;
1970                 }
1971 -               dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1972 -                                       dev->name, refill_rx);
1973 -               refill_rx++;
1974         }
1975 -       np->refill_rx = refill_rx;
1976 -       if (np->cur_rx - refill_rx == RX_RING)
1977 -               return 1;
1978         return 0;
1979 +
1980  }
1981  
1982  static void nv_do_rx_refill(unsigned long data)
1983  {
1984         struct net_device *dev = (struct net_device *) data;
1985 -       struct fe_priv *np = netdev_priv(dev);
1986 +       struct fe_priv *np = get_nvpriv(dev);
1987 +       int retcode;
1988  
1989 -       disable_irq(dev->irq);
1990 -       if (nv_alloc_rx(dev)) {
1991 -               spin_lock(&np->lock);
1992 +       if (!using_multi_irqs(dev)) {
1993 +               if (np->msi_flags & NV_MSI_X_ENABLED)
1994 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1995 +               else
1996 +                       disable_irq(dev->irq);
1997 +       } else {
1998 +               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1999 +       }
2000 +
2001 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2002 +               retcode = nv_alloc_rx(dev);
2003 +       else
2004 +               retcode = nv_alloc_rx_optimized(dev);
2005 +       if (retcode) {
2006 +               spin_lock_irq(&np->lock);
2007                 if (!np->in_shutdown)
2008                         mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2009 -               spin_unlock(&np->lock);
2010 +               spin_unlock_irq(&np->lock);
2011 +       }
2012 +       if (!using_multi_irqs(dev)) {
2013 +               if (np->msi_flags & NV_MSI_X_ENABLED)
2014 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2015 +               else
2016 +                       enable_irq(dev->irq);
2017 +       } else {
2018 +               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2019         }
2020 -       enable_irq(dev->irq);
2021  }
2022  
2023  static void nv_init_rx(struct net_device *dev) 
2024  {
2025 -       struct fe_priv *np = netdev_priv(dev);
2026 +       struct fe_priv *np = get_nvpriv(dev);
2027         int i;
2028  
2029 -       np->cur_rx = RX_RING;
2030 -       np->refill_rx = 0;
2031 -       for (i = 0; i < RX_RING; i++)
2032 +       np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
2033                 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2034 +               np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
2035 +       else
2036 +               np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
2037 +       np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
2038 +       np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
2039 +
2040 +       for (i = 0; i < np->rx_ring_size; i++) {
2041 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2042                         np->rx_ring.orig[i].FlagLen = 0;
2043 -               else
2044 +                       np->rx_ring.orig[i].PacketBuffer = 0;
2045 +               } else {
2046                         np->rx_ring.ex[i].FlagLen = 0;
2047 +                       np->rx_ring.ex[i].TxVlan = 0;
2048 +                       np->rx_ring.ex[i].PacketBufferHigh = 0;
2049 +                       np->rx_ring.ex[i].PacketBufferLow = 0;
2050 +               }
2051 +               np->rx_skb[i].skb = NULL;
2052 +               np->rx_skb[i].dma = 0;
2053 +       }
2054  }
2055  
2056  static void nv_init_tx(struct net_device *dev)
2057  {
2058 -       struct fe_priv *np = netdev_priv(dev);
2059 +       struct fe_priv *np = get_nvpriv(dev);
2060         int i;
2061  
2062 -       np->next_tx = np->nic_tx = 0;
2063 -       for (i = 0; i < TX_RING; i++) {
2064 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2065 +       np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
2066 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2067 +               np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
2068 +       else
2069 +               np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
2070 +       np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
2071 +       np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
2072 +
2073 +       for (i = 0; i < np->tx_ring_size; i++) {
2074 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2075                         np->tx_ring.orig[i].FlagLen = 0;
2076 -               else
2077 +                       np->tx_ring.orig[i].PacketBuffer = 0;
2078 +               } else {
2079                         np->tx_ring.ex[i].FlagLen = 0;
2080 -               np->tx_skbuff[i] = NULL;
2081 -               np->tx_dma[i] = 0;
2082 +                       np->tx_ring.ex[i].TxVlan = 0;
2083 +                       np->tx_ring.ex[i].PacketBufferHigh = 0;
2084 +                       np->tx_ring.ex[i].PacketBufferLow = 0;
2085 +               }
2086 +               np->tx_skb[i].skb = NULL;
2087 +               np->tx_skb[i].dma = 0;
2088         }
2089  }
2090  
2091  static int nv_init_ring(struct net_device *dev)
2092  {
2093 +       struct fe_priv *np = get_nvpriv(dev);
2094         nv_init_tx(dev);
2095         nv_init_rx(dev);
2096 -       return nv_alloc_rx(dev);
2097 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2098 +               return nv_alloc_rx(dev);
2099 +       else
2100 +               return nv_alloc_rx_optimized(dev);
2101  }
2102  
2103  static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
2104  {
2105 -       struct fe_priv *np = netdev_priv(dev);
2106 +       struct fe_priv *np = get_nvpriv(dev);
2107  
2108         dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
2109                 dev->name, skbnr);
2110  
2111 -       if (np->tx_dma[skbnr]) {
2112 -               pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
2113 -                              np->tx_dma_len[skbnr],
2114 +       if (np->tx_skb[skbnr].dma) {
2115 +               pci_unmap_page(np->pci_dev, np->tx_skb[skbnr].dma,
2116 +                              np->tx_skb[skbnr].dma_len,
2117                                PCI_DMA_TODEVICE);
2118 -               np->tx_dma[skbnr] = 0;
2119 +               np->tx_skb[skbnr].dma = 0;
2120         }
2121 -
2122 -       if (np->tx_skbuff[skbnr]) {
2123 -               dev_kfree_skb_irq(np->tx_skbuff[skbnr]);
2124 -               np->tx_skbuff[skbnr] = NULL;
2125 +       if (np->tx_skb[skbnr].skb) {
2126 +               dev_kfree_skb_any(np->tx_skb[skbnr].skb);
2127 +               np->tx_skb[skbnr].skb = NULL;
2128                 return 1;
2129         } else {
2130                 return 0;
2131 @@ -975,14 +2244,19 @@
2132  
2133  static void nv_drain_tx(struct net_device *dev)
2134  {
2135 -       struct fe_priv *np = netdev_priv(dev);
2136 +       struct fe_priv *np = get_nvpriv(dev);
2137         unsigned int i;
2138         
2139 -       for (i = 0; i < TX_RING; i++) {
2140 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2141 +       for (i = 0; i < np->tx_ring_size; i++) {
2142 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2143                         np->tx_ring.orig[i].FlagLen = 0;
2144 -               else
2145 +                       np->tx_ring.orig[i].PacketBuffer = 0;
2146 +               } else {
2147                         np->tx_ring.ex[i].FlagLen = 0;
2148 +                       np->tx_ring.ex[i].TxVlan = 0;
2149 +                       np->tx_ring.ex[i].PacketBufferHigh = 0;
2150 +                       np->tx_ring.ex[i].PacketBufferLow = 0;
2151 +               }
2152                 if (nv_release_txskb(dev, i))
2153                         np->stats.tx_dropped++;
2154         }
2155 @@ -990,20 +2264,25 @@
2156  
2157  static void nv_drain_rx(struct net_device *dev)
2158  {
2159 -       struct fe_priv *np = netdev_priv(dev);
2160 +       struct fe_priv *np = get_nvpriv(dev);
2161         int i;
2162 -       for (i = 0; i < RX_RING; i++) {
2163 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2164 +       for (i = 0; i < np->rx_ring_size; i++) {
2165 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2166                         np->rx_ring.orig[i].FlagLen = 0;
2167 -               else
2168 +                       np->rx_ring.orig[i].PacketBuffer = 0;
2169 +               } else {
2170                         np->rx_ring.ex[i].FlagLen = 0;
2171 +                       np->rx_ring.ex[i].TxVlan = 0;
2172 +                       np->rx_ring.ex[i].PacketBufferHigh = 0;
2173 +                       np->rx_ring.ex[i].PacketBufferLow = 0;
2174 +               }
2175                 wmb();
2176 -               if (np->rx_skbuff[i]) {
2177 -                       pci_unmap_single(np->pci_dev, np->rx_dma[i],
2178 -                                               np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
2179 +               if (np->rx_skb[i].skb) {
2180 +                       pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
2181 +                                               np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
2182                                                 PCI_DMA_FROMDEVICE);
2183 -                       dev_kfree_skb(np->rx_skbuff[i]);
2184 -                       np->rx_skbuff[i] = NULL;
2185 +                       dev_kfree_skb(np->rx_skb[i].skb);
2186 +                       np->rx_skb[i].skb = NULL;
2187                 }
2188         }
2189  }
2190 @@ -1020,52 +2299,51 @@
2191   */
2192  static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2193  {
2194 -       struct fe_priv *np = netdev_priv(dev);
2195 +       struct fe_priv *np = get_nvpriv(dev);
2196         u32 tx_flags = 0;
2197         u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2198         unsigned int fragments = skb_shinfo(skb)->nr_frags;
2199 -       unsigned int nr = (np->next_tx - 1) % TX_RING;
2200 -       unsigned int start_nr = np->next_tx % TX_RING;
2201         unsigned int i;
2202         u32 offset = 0;
2203         u32 bcnt;
2204         u32 size = skb->len-skb->data_len;
2205         u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2206 +       u32 empty_slots;
2207 +       struct ring_desc* put_tx;
2208 +       struct ring_desc* start_tx;
2209 +       struct ring_desc* prev_tx;
2210 +       struct nv_skb_map* prev_tx_ctx;
2211  
2212 +       dprintk("%s:%s\n",dev->name,__FUNCTION__);
2213         /* add fragments to entries count */
2214         for (i = 0; i < fragments; i++) {
2215                 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2216                            ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2217         }
2218  
2219 -       spin_lock_irq(&np->lock);
2220 +       empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2221 +       if (likely(empty_slots > entries)) {
2222  
2223 -       if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
2224 -               spin_unlock_irq(&np->lock);
2225 -               netif_stop_queue(dev);
2226 -               return NETDEV_TX_BUSY;
2227 -       }
2228 +       start_tx = put_tx = np->put_tx.orig;
2229  
2230         /* setup the header buffer */
2231         do {
2232 +               prev_tx = put_tx;
2233 +               prev_tx_ctx = np->put_tx_ctx;
2234                 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2235 -               nr = (nr + 1) % TX_RING;
2236 -
2237 -               np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2238 +               np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2239                                                 PCI_DMA_TODEVICE);
2240 -               np->tx_dma_len[nr] = bcnt;
2241 +               np->put_tx_ctx->dma_len = bcnt;
2242 +               put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma);
2243 +               put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2244  
2245 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2246 -                       np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
2247 -                       np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2248 -               } else {
2249 -                       np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
2250 -                       np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
2251 -                       np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2252 -               }
2253                 tx_flags = np->tx_flags;
2254                 offset += bcnt;
2255                 size -= bcnt;
2256 +               if (unlikely(put_tx++ == np->last_tx.orig))
2257 +                       put_tx = np->first_tx.orig;
2258 +               if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2259 +                       np->put_tx_ctx = np->first_tx_ctx;
2260         } while(size);
2261  
2262         /* setup the fragments */
2263 @@ -1075,68 +2353,174 @@
2264                 offset = 0;
2265  
2266                 do {
2267 +                       prev_tx = put_tx;
2268 +                       prev_tx_ctx = np->put_tx_ctx;
2269                         bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2270 -                       nr = (nr + 1) % TX_RING;
2271  
2272 -                       np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2273 -                                                     PCI_DMA_TODEVICE);
2274 -                       np->tx_dma_len[nr] = bcnt;
2275 +                       np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2276 +                                                          PCI_DMA_TODEVICE);
2277 +                       np->put_tx_ctx->dma_len = bcnt;
2278  
2279 -                       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2280 -                               np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
2281 -                               np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2282 -                       } else {
2283 -                               np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
2284 -                               np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
2285 -                               np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2286 -                       }
2287 +                       put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma);
2288 +                       put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2289                         offset += bcnt;
2290                         size -= bcnt;
2291 +                       if (unlikely(put_tx++ == np->last_tx.orig))
2292 +                               put_tx = np->first_tx.orig;
2293 +                       if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2294 +                               np->put_tx_ctx = np->first_tx_ctx;
2295                 } while (size);
2296         }
2297  
2298         /* set last fragment flag  */
2299 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2300 -               np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
2301 +       prev_tx->FlagLen |= cpu_to_le32(tx_flags_extra);
2302 +
2303 +       /* save skb in this slot's context area */
2304 +       prev_tx_ctx->skb = skb;
2305 +
2306 +#ifdef NETIF_F_TSO
2307 +#if NVVER > FEDORA5 
2308 +       if (skb_shinfo(skb)->gso_size)
2309 +               tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2310 +#else
2311 +       if (skb_shinfo(skb)->tso_size)
2312 +               tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
2313 +#endif
2314 +       else
2315 +#endif
2316 +       tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
2317 +
2318 +       start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
2319 +       np->put_tx.orig = put_tx;
2320 +
2321 +       dev->trans_start = jiffies;
2322 +       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2323 +       return NETDEV_TX_OK;
2324         } else {
2325 -               np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
2326 +               netif_stop_queue(dev);
2327 +               np->stop_tx = 1;
2328 +               return NETDEV_TX_BUSY;
2329 +       }
2330 +}
2331 +
2332 +static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2333 +{
2334 +       struct fe_priv *np = get_nvpriv(dev);
2335 +       u32 tx_flags = 0;
2336 +       u32 tx_flags_extra;
2337 +       unsigned int fragments = skb_shinfo(skb)->nr_frags;
2338 +       unsigned int i;
2339 +       u32 offset = 0;
2340 +       u32 bcnt;
2341 +       u32 size = skb->len-skb->data_len;
2342 +       u32 empty_slots;
2343 +       struct ring_desc_ex* put_tx;
2344 +       struct ring_desc_ex* start_tx;
2345 +       struct ring_desc_ex* prev_tx;
2346 +       struct nv_skb_map* prev_tx_ctx;
2347 +
2348 +       u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2349 +
2350 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
2351 +       /* add fragments to entries count */
2352 +       for (i = 0; i < fragments; i++) {
2353 +               entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2354 +                          ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2355 +       }
2356 +
2357 +       empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2358 +       if (likely(empty_slots > entries)) {
2359 +
2360 +       start_tx = put_tx = np->put_tx.ex;
2361 +
2362 +       /* setup the header buffer */
2363 +       do {
2364 +               prev_tx = put_tx;
2365 +               prev_tx_ctx = np->put_tx_ctx;
2366 +               bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2367 +               np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2368 +                                               PCI_DMA_TODEVICE);
2369 +               np->put_tx_ctx->dma_len = bcnt;
2370 +               put_tx->PacketBufferHigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
2371 +               put_tx->PacketBufferLow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
2372 +               put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2373 +
2374 +               tx_flags = NV_TX2_VALID;
2375 +               offset += bcnt;
2376 +               size -= bcnt;
2377 +               if (unlikely(put_tx++ == np->last_tx.ex))
2378 +                       put_tx = np->first_tx.ex;
2379 +               if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2380 +                       np->put_tx_ctx = np->first_tx_ctx;
2381 +       } while(size);
2382 +       /* setup the fragments */
2383 +       for (i = 0; i < fragments; i++) {
2384 +               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2385 +               u32 size = frag->size;
2386 +               offset = 0;
2387 +
2388 +               do {
2389 +                       prev_tx = put_tx;
2390 +                       prev_tx_ctx = np->put_tx_ctx;
2391 +                       bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2392 +
2393 +                       np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2394 +                                                          PCI_DMA_TODEVICE);
2395 +                       np->put_tx_ctx->dma_len = bcnt;
2396 +
2397 +                       put_tx->PacketBufferHigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
2398 +                       put_tx->PacketBufferLow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
2399 +                       put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2400 +                       offset += bcnt;
2401 +                       size -= bcnt;
2402 +                       if (unlikely(put_tx++ == np->last_tx.ex))
2403 +                               put_tx = np->first_tx.ex;
2404 +                       if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2405 +                               np->put_tx_ctx = np->first_tx_ctx;
2406 +               } while (size);
2407         }
2408  
2409 -       np->tx_skbuff[nr] = skb;
2410 +       /* set last fragment flag  */
2411 +       prev_tx->FlagLen |= cpu_to_le32(NV_TX2_LASTPACKET);
2412 +
2413 +       /* save skb in this slot's context area */
2414 +       prev_tx_ctx->skb = skb;
2415  
2416  #ifdef NETIF_F_TSO
2417 +#if NVVER > FEDORA5 
2418 +       if (skb_shinfo(skb)->gso_size)
2419 +               tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2420 +#else
2421         if (skb_shinfo(skb)->tso_size)
2422                 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
2423 +#endif
2424         else
2425  #endif
2426         tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
2427  
2428 -       /* set tx flags */
2429 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2430 -               np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
2431 +       /* vlan tag */
2432 +       if (likely(!np->vlangrp)) {
2433 +               start_tx->TxVlan = 0;
2434         } else {
2435 -               np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
2436 -       }       
2437 -
2438 -       dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
2439 -               dev->name, np->next_tx, entries, tx_flags_extra);
2440 -       {
2441 -               int j;
2442 -               for (j=0; j<64; j++) {
2443 -                       if ((j%16) == 0)
2444 -                               dprintk("\n%03x:", j);
2445 -                       dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2446 -               }
2447 -               dprintk("\n");
2448 +               if (vlan_tx_tag_present(skb))
2449 +                       start_tx->TxVlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2450 +               else
2451 +                       start_tx->TxVlan = 0;
2452         }
2453  
2454 -       np->next_tx += entries;
2455 +       /* set tx flags */
2456 +       start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
2457 +       np->put_tx.ex = put_tx;
2458  
2459         dev->trans_start = jiffies;
2460 -       spin_unlock_irq(&np->lock);
2461         writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2462 -       pci_push(get_hwbase(dev));
2463         return NETDEV_TX_OK;
2464 +
2465 +       } else {
2466 +               netif_stop_queue(dev);
2467 +               np->stop_tx = 1;
2468 +               return NETDEV_TX_BUSY;
2469 +       }
2470  }
2471  
2472  /*
2473 @@ -1144,30 +2528,26 @@
2474   *
2475   * Caller must own np->lock.
2476   */
2477 -static void nv_tx_done(struct net_device *dev)
2478 +static inline void nv_tx_done(struct net_device *dev)
2479  {
2480 -       struct fe_priv *np = netdev_priv(dev);
2481 +       struct fe_priv *np = get_nvpriv(dev);
2482         u32 Flags;
2483 -       unsigned int i;
2484 -       struct sk_buff *skb;
2485 +       struct ring_desc* orig_get_tx = np->get_tx.orig;
2486 +       struct ring_desc* put_tx = np->put_tx.orig;
2487  
2488 -       while (np->nic_tx != np->next_tx) {
2489 -               i = np->nic_tx % TX_RING;
2490 +       dprintk("%s:%s\n",dev->name,__FUNCTION__);
2491 +       while ((np->get_tx.orig != put_tx) &&
2492 +              !((Flags = le32_to_cpu(np->get_tx.orig->FlagLen)) & NV_TX_VALID)) {
2493 +               dprintk(KERN_DEBUG "%s: nv_tx_done:NVLAN tx done\n", dev->name);
2494  
2495 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2496 -                       Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
2497 -               else
2498 -                       Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
2499 +               pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2500 +                              np->get_tx_ctx->dma_len,
2501 +                              PCI_DMA_TODEVICE);
2502 +               np->get_tx_ctx->dma = 0;
2503  
2504 -               dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
2505 -                                       dev->name, np->nic_tx, Flags);
2506 -               if (Flags & NV_TX_VALID)
2507 -                       break;
2508                 if (np->desc_ver == DESC_VER_1) {
2509                         if (Flags & NV_TX_LASTPACKET) {
2510 -                               skb = np->tx_skbuff[i];
2511 -                               if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
2512 -                                            NV_TX_UNDERFLOW|NV_TX_ERROR)) {
2513 +                               if (Flags & NV_TX_ERROR) {
2514                                         if (Flags & NV_TX_UNDERFLOW)
2515                                                 np->stats.tx_fifo_errors++;
2516                                         if (Flags & NV_TX_CARRIERLOST)
2517 @@ -1175,14 +2555,15 @@
2518                                         np->stats.tx_errors++;
2519                                 } else {
2520                                         np->stats.tx_packets++;
2521 -                                       np->stats.tx_bytes += skb->len;
2522 +                                       np->stats.tx_bytes += np->get_tx_ctx->skb->len;
2523                                 }
2524 +                               dev_kfree_skb_any(np->get_tx_ctx->skb);
2525 +                               np->get_tx_ctx->skb = NULL;
2526 +
2527                         }
2528                 } else {
2529                         if (Flags & NV_TX2_LASTPACKET) {
2530 -                               skb = np->tx_skbuff[i];
2531 -                               if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
2532 -                                            NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
2533 +                               if (Flags & NV_TX2_ERROR) {
2534                                         if (Flags & NV_TX2_UNDERFLOW)
2535                                                 np->stats.tx_fifo_errors++;
2536                                         if (Flags & NV_TX2_CARRIERLOST)
2537 @@ -1190,15 +2571,58 @@
2538                                         np->stats.tx_errors++;
2539                                 } else {
2540                                         np->stats.tx_packets++;
2541 -                                       np->stats.tx_bytes += skb->len;
2542 +                                       np->stats.tx_bytes += np->get_tx_ctx->skb->len;
2543                                 }                               
2544 +                               dev_kfree_skb_any(np->get_tx_ctx->skb);
2545 +                               np->get_tx_ctx->skb = NULL;
2546 +                       }
2547 +               }
2548 +
2549 +               if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2550 +                       np->get_tx.orig = np->first_tx.orig;
2551 +               if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2552 +                       np->get_tx_ctx = np->first_tx_ctx;
2553 +       }
2554 +       if (unlikely((np->stop_tx == 1) && (np->get_tx.orig != orig_get_tx))) {
2555 +               np->stop_tx = 0;
2556 +               netif_wake_queue(dev);
2557 +       }
2558 +}
2559 +
2560 +static inline void nv_tx_done_optimized(struct net_device *dev, int max_work)
2561 +{
2562 +       struct fe_priv *np = get_nvpriv(dev);
2563 +       u32 Flags;
2564 +       struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2565 +       struct ring_desc_ex* put_tx = np->put_tx.ex;
2566 +
2567 +       while ((np->get_tx.ex != put_tx) &&
2568 +              !((Flags = le32_to_cpu(np->get_tx.ex->FlagLen)) & NV_TX_VALID) &&
2569 +              (max_work-- > 0)) {
2570 +               dprintk(KERN_DEBUG "%s: nv_tx_done_optimized:NVLAN tx done\n", dev->name);
2571 +
2572 +               pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2573 +                              np->get_tx_ctx->dma_len,
2574 +                              PCI_DMA_TODEVICE);
2575 +               np->get_tx_ctx->dma = 0;
2576 +
2577 +               if (Flags & NV_TX2_LASTPACKET) {
2578 +                       if (!(Flags & NV_TX2_ERROR)) {
2579 +                               np->stats.tx_packets++;
2580                         }
2581 +                       dev_kfree_skb_any(np->get_tx_ctx->skb);
2582 +                       np->get_tx_ctx->skb = NULL;
2583                 }
2584 -               nv_release_txskb(dev, i);
2585 -               np->nic_tx++;
2586 +               
2587 +               if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2588 +                       np->get_tx.ex = np->first_tx.ex;
2589 +               if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2590 +                       np->get_tx_ctx = np->first_tx_ctx;
2591         }
2592 -       if (np->next_tx - np->nic_tx < TX_LIMIT_START)
2593 +       if (unlikely((np->stop_tx == 1) && (np->get_tx.ex != orig_get_tx))) {
2594 +               np->stop_tx = 0;
2595                 netif_wake_queue(dev);
2596 +       }
2597  }
2598  
2599  /*
2600 @@ -1207,20 +2631,34 @@
2601   */
2602  static void nv_tx_timeout(struct net_device *dev)
2603  {
2604 -       struct fe_priv *np = netdev_priv(dev);
2605 +       struct fe_priv *np = get_nvpriv(dev);
2606         u8 __iomem *base = get_hwbase(dev);
2607 +       u32 status;
2608 +
2609 +       if (!netif_running(dev))
2610 +               return;
2611 +
2612 +       if (np->msi_flags & NV_MSI_X_ENABLED)
2613 +               status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2614 +       else
2615 +               status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2616  
2617 -       printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
2618 -                       readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
2619 +       printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2620  
2621         {
2622                 int i;
2623  
2624 -               printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
2625 -                               dev->name, (unsigned long)np->ring_addr,
2626 -                               np->next_tx, np->nic_tx);
2627 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2628 +                       printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n",
2629 +                              dev->name, (unsigned long)np->tx_ring.orig,
2630 +                              (unsigned long)np->get_tx.orig, (unsigned long)np->put_tx.orig);
2631 +               } else {
2632 +                       printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n",
2633 +                              dev->name, (unsigned long)np->tx_ring.ex,
2634 +                              (unsigned long)np->get_tx.ex, (unsigned long)np->put_tx.ex);
2635 +               }
2636                 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2637 -               for (i=0;i<0x400;i+= 32) {
2638 +               for (i=0;i<=np->register_size;i+= 32) {
2639                         printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2640                                         i,
2641                                         readl(base + i + 0), readl(base + i + 4),
2642 @@ -1229,7 +2667,7 @@
2643                                         readl(base + i + 24), readl(base + i + 28));
2644                 }
2645                 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2646 -               for (i=0;i<TX_RING;i+= 4) {
2647 +               for (i=0;i<np->tx_ring_size;i+= 4) {
2648                         if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2649                                 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2650                                        i, 
2651 @@ -1260,29 +2698,36 @@
2652                 }
2653         }
2654  
2655 +       nv_disable_irq(dev);
2656         spin_lock_irq(&np->lock);
2657  
2658         /* 1) stop tx engine */
2659         nv_stop_tx(dev);
2660  
2661         /* 2) check that the packets were not sent already: */
2662 -       nv_tx_done(dev);
2663 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2664 +               nv_tx_done(dev);
2665 +       else
2666 +               nv_tx_done_optimized(dev, np->tx_ring_size);
2667  
2668         /* 3) if there are dead entries: clear everything */
2669 -       if (np->next_tx != np->nic_tx) {
2670 +       if (np->get_tx_ctx != np->put_tx_ctx) {
2671                 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2672                 nv_drain_tx(dev);
2673 -               np->next_tx = np->nic_tx = 0;
2674                 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2675 -                       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2676 +                       np->get_tx.orig = np->put_tx.orig = np->first_tx.orig;
2677                 else
2678 -                       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
2679 -               netif_wake_queue(dev);
2680 +                       np->get_tx.ex = np->put_tx.ex = np->first_tx.ex;
2681 +               np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx;
2682 +               setup_hw_rings(dev, NV_SETUP_TX_RING);
2683         }
2684  
2685 +       netif_wake_queue(dev);
2686         /* 4) restart tx engine */
2687         nv_start_tx(dev);
2688 +
2689         spin_unlock_irq(&np->lock);
2690 +       nv_enable_irq(dev);
2691  }
2692  
2693  /*
2694 @@ -1338,41 +2783,23 @@
2695         }
2696  }
2697  
2698 -static void nv_rx_process(struct net_device *dev)
2699 +static inline void nv_rx_process(struct net_device *dev)
2700  {
2701 -       struct fe_priv *np = netdev_priv(dev);
2702 +       struct fe_priv *np = get_nvpriv(dev);
2703         u32 Flags;
2704 +       struct sk_buff *skb;
2705 +       int len;
2706  
2707 -       for (;;) {
2708 -               struct sk_buff *skb;
2709 -               int len;
2710 -               int i;
2711 -               if (np->cur_rx - np->refill_rx >= RX_RING)
2712 -                       break;  /* we scanned the whole ring - do not continue */
2713 -
2714 -               i = np->cur_rx % RX_RING;
2715 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2716 -                       Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
2717 -                       len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
2718 -               } else {
2719 -                       Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
2720 -                       len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
2721 -               }
2722 -
2723 -               dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
2724 -                                       dev->name, np->cur_rx, Flags);
2725 -
2726 -               if (Flags & NV_RX_AVAIL)
2727 -                       break;  /* still owned by hardware, */
2728 +       dprintk("%s:%s\n",dev->name,__FUNCTION__);
2729 +       while((np->get_rx.orig != np->put_rx.orig) &&
2730 +             !((Flags = le32_to_cpu(np->get_rx.orig->FlagLen)) & NV_RX_AVAIL)) {
2731 +
2732 +               pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2733 +                                np->get_rx_ctx->dma_len,
2734 +                                PCI_DMA_FROMDEVICE);
2735  
2736 -               /*
2737 -                * the packet is for us - immediately tear down the pci mapping.
2738 -                * TODO: check if a prefetch of the first cacheline improves
2739 -                * the performance.
2740 -                */
2741 -               pci_unmap_single(np->pci_dev, np->rx_dma[i],
2742 -                               np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
2743 -                               PCI_DMA_FROMDEVICE);
2744 +               skb = np->get_rx_ctx->skb;
2745 +               np->get_rx_ctx->skb = NULL;
2746  
2747                 {
2748                         int j;
2749 @@ -1380,112 +2807,197 @@
2750                         for (j=0; j<64; j++) {
2751                                 if ((j%16) == 0)
2752                                         dprintk("\n%03x:", j);
2753 -                               dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
2754 +                               dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2755                         }
2756                         dprintk("\n");
2757                 }
2758 -               /* look at what we actually got: */
2759 +
2760                 if (np->desc_ver == DESC_VER_1) {
2761 -                       if (!(Flags & NV_RX_DESCRIPTORVALID))
2762 -                               goto next_pkt;
2763  
2764 -                       if (Flags & NV_RX_ERROR) {
2765 -                               if (Flags & NV_RX_MISSEDFRAME) {
2766 -                                       np->stats.rx_missed_errors++;
2767 -                                       np->stats.rx_errors++;
2768 -                                       goto next_pkt;
2769 -                               }
2770 -                               if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
2771 -                                       np->stats.rx_errors++;
2772 -                                       goto next_pkt;
2773 -                               }
2774 -                               if (Flags & NV_RX_CRCERR) {
2775 -                                       np->stats.rx_crc_errors++;
2776 -                                       np->stats.rx_errors++;
2777 -                                       goto next_pkt;
2778 -                               }
2779 -                               if (Flags & NV_RX_OVERFLOW) {
2780 -                                       np->stats.rx_over_errors++;
2781 -                                       np->stats.rx_errors++;
2782 -                                       goto next_pkt;
2783 +                       if (likely(Flags & NV_RX_DESCRIPTORVALID)) {
2784 +                               len = Flags & LEN_MASK_V1;
2785 +                               if (unlikely(Flags & NV_RX_ERROR)) {
2786 +                                       if (Flags & NV_RX_ERROR4) {
2787 +                                               len = nv_getlen(dev, skb->data, len);
2788 +                                               if (len < 0) {
2789 +                                                       np->stats.rx_errors++;
2790 +                                                       dev_kfree_skb(skb);
2791 +                                                       goto next_pkt;
2792 +                                               }
2793 +                                       }
2794 +                                       /* framing errors are soft errors */
2795 +                                       else if (Flags & NV_RX_FRAMINGERR) {
2796 +                                               if (Flags & NV_RX_SUBSTRACT1) {
2797 +                                                       len--;
2798 +                                               }
2799 +                                       }
2800 +                                       /* the rest are hard errors */
2801 +                                       else {
2802 +                                               if (Flags & NV_RX_MISSEDFRAME)
2803 +                                                       np->stats.rx_missed_errors++;
2804 +                                               if (Flags & NV_RX_CRCERR)
2805 +                                                       np->stats.rx_crc_errors++;
2806 +                                               if (Flags & NV_RX_OVERFLOW)
2807 +                                                       np->stats.rx_over_errors++;
2808 +                                               np->stats.rx_errors++;
2809 +                                               dev_kfree_skb(skb);
2810 +                                               goto next_pkt;
2811 +                                       }
2812                                 }
2813 -                               if (Flags & NV_RX_ERROR4) {
2814 -                                       len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
2815 -                                       if (len < 0) {
2816 +                       } else {
2817 +                               dev_kfree_skb(skb);
2818 +                               goto next_pkt;
2819 +                       }
2820 +               } else {
2821 +                       if (likely(Flags & NV_RX2_DESCRIPTORVALID)) {
2822 +                               len = Flags & LEN_MASK_V2;
2823 +                               if (unlikely(Flags & NV_RX2_ERROR)) {
2824 +                                       if (Flags & NV_RX2_ERROR4) {
2825 +                                               len = nv_getlen(dev, skb->data, len);
2826 +                                               if (len < 0) {
2827 +                                                       np->stats.rx_errors++;
2828 +                                                       dev_kfree_skb(skb);
2829 +                                                       goto next_pkt;
2830 +                                               }
2831 +                                       }
2832 +                                       /* framing errors are soft errors */
2833 +                                       else if (Flags & NV_RX2_FRAMINGERR) {
2834 +                                               if (Flags & NV_RX2_SUBSTRACT1) {
2835 +                                                       len--;
2836 +                                               }
2837 +                                       }
2838 +                                       /* the rest are hard errors */
2839 +                                       else {
2840 +                                               if (Flags & NV_RX2_CRCERR)
2841 +                                                       np->stats.rx_crc_errors++;
2842 +                                               if (Flags & NV_RX2_OVERFLOW)
2843 +                                                       np->stats.rx_over_errors++;
2844                                                 np->stats.rx_errors++;
2845 +                                               dev_kfree_skb(skb);
2846                                                 goto next_pkt;
2847                                         }
2848                                 }
2849 -                               /* framing errors are soft errors. */
2850 -                               if (Flags & NV_RX_FRAMINGERR) {
2851 -                                       if (Flags & NV_RX_SUBSTRACT1) {
2852 -                                               len--;
2853 +                               if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
2854 +                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
2855 +                               } else {
2856 +                                       if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2857 +                                           (Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2858 +                                               skb->ip_summed = CHECKSUM_UNNECESSARY;
2859                                         }
2860                                 }
2861 -                       }
2862 -               } else {
2863 -                       if (!(Flags & NV_RX2_DESCRIPTORVALID))
2864 +                       } else {
2865 +                               dev_kfree_skb(skb);
2866                                 goto next_pkt;
2867 +                       }
2868 +               }
2869  
2870 -                       if (Flags & NV_RX2_ERROR) {
2871 -                               if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
2872 -                                       np->stats.rx_errors++;
2873 -                                       goto next_pkt;
2874 -                               }
2875 -                               if (Flags & NV_RX2_CRCERR) {
2876 -                                       np->stats.rx_crc_errors++;
2877 -                                       np->stats.rx_errors++;
2878 -                                       goto next_pkt;
2879 -                               }
2880 -                               if (Flags & NV_RX2_OVERFLOW) {
2881 -                                       np->stats.rx_over_errors++;
2882 -                                       np->stats.rx_errors++;
2883 -                                       goto next_pkt;
2884 -                               }
2885 +               /* got a valid packet - forward it to the network core */
2886 +               dprintk(KERN_DEBUG "%s: nv_rx_process:NVLAN rx done\n", dev->name);
2887 +               skb_put(skb, len);
2888 +               skb->protocol = eth_type_trans(skb, dev);
2889 +               netif_rx(skb);
2890 +               dev->last_rx = jiffies;
2891 +               np->stats.rx_packets++;
2892 +               np->stats.rx_bytes += len;
2893 +next_pkt:
2894 +               if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2895 +                       np->get_rx.orig = np->first_rx.orig;
2896 +               if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2897 +                       np->get_rx_ctx = np->first_rx_ctx;
2898 +       }
2899 +}
2900 +
2901 +static inline int nv_rx_process_optimized(struct net_device *dev, int max_work)
2902 +{
2903 +       struct fe_priv *np = get_nvpriv(dev);
2904 +       u32 Flags;
2905 +       u32 vlanflags = 0;
2906 +       u32 rx_processed_cnt = 0;
2907 +       struct sk_buff *skb;
2908 +       int len;
2909 +
2910 +       while((np->get_rx.ex != np->put_rx.ex) &&
2911 +             !((Flags = le32_to_cpu(np->get_rx.ex->FlagLen)) & NV_RX2_AVAIL) &&
2912 +             (rx_processed_cnt++ < max_work)) {
2913 +
2914 +               pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2915 +                                np->get_rx_ctx->dma_len,
2916 +                                PCI_DMA_FROMDEVICE);
2917 +
2918 +               skb = np->get_rx_ctx->skb;
2919 +               np->get_rx_ctx->skb = NULL;
2920 +
2921 +               /* look at what we actually got: */
2922 +               if (likely(Flags & NV_RX2_DESCRIPTORVALID)) {
2923 +                       len = Flags & LEN_MASK_V2;
2924 +                       if (unlikely(Flags & NV_RX2_ERROR)) {
2925                                 if (Flags & NV_RX2_ERROR4) {
2926 -                                       len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
2927 +                                       len = nv_getlen(dev, skb->data, len);
2928                                         if (len < 0) {
2929 -                                               np->stats.rx_errors++;
2930 +                                               np->rx_len_errors++;
2931 +                                               dev_kfree_skb(skb);
2932                                                 goto next_pkt;
2933                                         }
2934                                 }
2935                                 /* framing errors are soft errors */
2936 -                               if (Flags & NV_RX2_FRAMINGERR) {
2937 +                               else if (Flags & NV_RX2_FRAMINGERR) {
2938                                         if (Flags & NV_RX2_SUBSTRACT1) {
2939                                                 len--;
2940                                         }
2941                                 }
2942 +                               /* the rest are hard errors */
2943 +                               else {
2944 +                                       dev_kfree_skb(skb);
2945 +                                       goto next_pkt;
2946 +                               }
2947 +                       }
2948 +
2949 +                       if (likely(np->rx_csum)) {
2950 +                               if (likely((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)) {
2951 +                                       /*ip and tcp */
2952 +                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
2953 +                               } else {
2954 +                                       if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2955 +                                           (Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2956 +                                               skb->ip_summed = CHECKSUM_UNNECESSARY;
2957 +                                       }
2958 +                               }
2959                         }
2960 -                       Flags &= NV_RX2_CHECKSUMMASK;
2961 -                       if (Flags == NV_RX2_CHECKSUMOK1 ||
2962 -                                       Flags == NV_RX2_CHECKSUMOK2 ||
2963 -                                       Flags == NV_RX2_CHECKSUMOK3) {
2964 -                               dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
2965 -                               np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
2966 +                       dprintk(KERN_DEBUG "%s: nv_rx_process_optimized:NVLAN rx done\n", dev->name);
2967 +
2968 +                       /* got a valid packet - forward it to the network core */
2969 +                       skb_put(skb, len);
2970 +                       skb->protocol = eth_type_trans(skb, dev);
2971 +                       prefetch(skb->data);
2972 +
2973 +                       if (likely(!np->vlangrp)) {
2974 +                               netif_rx(skb);
2975                         } else {
2976 -                               dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
2977 +                               vlanflags = le32_to_cpu(np->get_rx.ex->PacketBufferLow);
2978 +                               if (vlanflags & NV_RX3_VLAN_TAG_PRESENT)
2979 +                                       vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
2980 +                               else
2981 +                                       netif_rx(skb);
2982                         }
2983 -               }
2984 -               /* got a valid packet - forward it to the network core */
2985 -               skb = np->rx_skbuff[i];
2986 -               np->rx_skbuff[i] = NULL;
2987  
2988 -               skb_put(skb, len);
2989 -               skb->protocol = eth_type_trans(skb, dev);
2990 -               dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
2991 -                                       dev->name, np->cur_rx, len, skb->protocol);
2992 -               netif_rx(skb);
2993 -               dev->last_rx = jiffies;
2994 -               np->stats.rx_packets++;
2995 -               np->stats.rx_bytes += len;
2996 +                       dev->last_rx = jiffies;
2997 +                       np->stats.rx_packets++;
2998 +                       np->stats.rx_bytes += len;
2999 +               } else {
3000 +                       dev_kfree_skb(skb);
3001 +               }
3002  next_pkt:
3003 -               np->cur_rx++;
3004 +               if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
3005 +                       np->get_rx.ex = np->first_rx.ex;
3006 +               if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
3007 +                       np->get_rx_ctx = np->first_rx_ctx;
3008         }
3009 +       return rx_processed_cnt;
3010  }
3011  
3012  static void set_bufsize(struct net_device *dev)
3013  {
3014 -       struct fe_priv *np = netdev_priv(dev);
3015 +       struct fe_priv *np = get_nvpriv(dev);
3016  
3017         if (dev->mtu <= ETH_DATA_LEN)
3018                 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
3019 @@ -1499,7 +3011,7 @@
3020   */
3021  static int nv_change_mtu(struct net_device *dev, int new_mtu)
3022  {
3023 -       struct fe_priv *np = netdev_priv(dev);
3024 +       struct fe_priv *np = get_nvpriv(dev);
3025         int old_mtu;
3026  
3027         if (new_mtu < 64 || new_mtu > np->pkt_limit)
3028 @@ -1523,8 +3035,12 @@
3029                  * guessed, there is probably a simpler approach.
3030                  * Changing the MTU is a rare event, it shouldn't matter.
3031                  */
3032 -               disable_irq(dev->irq);
3033 +               nv_disable_irq(dev);
3034 +#if NVVER > FEDORA5
3035 +               netif_tx_lock_bh(dev);
3036 +#else
3037                 spin_lock_bh(&dev->xmit_lock);
3038 +#endif
3039                 spin_lock(&np->lock);
3040                 /* stop engines */
3041                 nv_stop_rx(dev);
3042 @@ -1534,22 +3050,15 @@
3043                 nv_drain_rx(dev);
3044                 nv_drain_tx(dev);
3045                 /* reinit driver view of the rx queue */
3046 -               nv_init_rx(dev);
3047 -               nv_init_tx(dev);
3048 -               /* alloc new rx buffers */
3049                 set_bufsize(dev);
3050 -               if (nv_alloc_rx(dev)) {
3051 +               if (nv_init_ring(dev)) {
3052                         if (!np->in_shutdown)
3053                                 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3054                 }
3055                 /* reinit nic view of the rx queue */
3056                 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3057 -               writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
3058 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
3059 -                       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
3060 -               else
3061 -                       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
3062 -               writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
3063 +               setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3064 +               writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3065                         base + NvRegRingSizes);
3066                 pci_push(base);
3067                 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3068 @@ -1559,8 +3068,12 @@
3069                 nv_start_rx(dev);
3070                 nv_start_tx(dev);
3071                 spin_unlock(&np->lock);
3072 +#if NVVER > FEDORA5
3073 +               netif_tx_unlock_bh(dev);
3074 +#else
3075                 spin_unlock_bh(&dev->xmit_lock);
3076 -               enable_irq(dev->irq);
3077 +#endif
3078 +               nv_enable_irq(dev);
3079         }
3080         return 0;
3081  }
3082 @@ -1571,11 +3084,11 @@
3083         u32 mac[2];
3084  
3085         mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
3086 -                       (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3087 +               (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3088         mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
3089 -
3090         writel(mac[0], base + NvRegMacAddrA);
3091         writel(mac[1], base + NvRegMacAddrB);
3092 +
3093  }
3094  
3095  /*
3096 @@ -1584,17 +3097,22 @@
3097   */
3098  static int nv_set_mac_address(struct net_device *dev, void *addr)
3099  {
3100 -       struct fe_priv *np = netdev_priv(dev);
3101 +       struct fe_priv *np = get_nvpriv(dev);
3102         struct sockaddr *macaddr = (struct sockaddr*)addr;
3103  
3104         if(!is_valid_ether_addr(macaddr->sa_data))
3105                 return -EADDRNOTAVAIL;
3106  
3107 +       dprintk("%s:%s\n",dev->name,__FUNCTION__);
3108         /* synchronized against open : rtnl_lock() held by caller */
3109         memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
3110  
3111         if (netif_running(dev)) {
3112 +#if NVVER > FEDORA5
3113 +               netif_tx_lock_bh(dev);
3114 +#else
3115                 spin_lock_bh(&dev->xmit_lock);
3116 +#endif
3117                 spin_lock_irq(&np->lock);
3118  
3119                 /* stop rx engine */
3120 @@ -1606,7 +3124,11 @@
3121                 /* restart rx engine */
3122                 nv_start_rx(dev);
3123                 spin_unlock_irq(&np->lock);
3124 +#if NVVER > FEDORA5
3125 +               netif_tx_unlock_bh(dev);
3126 +#else
3127                 spin_unlock_bh(&dev->xmit_lock);
3128 +#endif
3129         } else {
3130                 nv_copy_mac_to_hw(dev);
3131         }
3132 @@ -1619,20 +3141,20 @@
3133   */
3134  static void nv_set_multicast(struct net_device *dev)
3135  {
3136 -       struct fe_priv *np = netdev_priv(dev);
3137 +       struct fe_priv *np = get_nvpriv(dev);
3138         u8 __iomem *base = get_hwbase(dev);
3139         u32 addr[2];
3140         u32 mask[2];
3141 -       u32 pff;
3142 +       u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3143  
3144         memset(addr, 0, sizeof(addr));
3145         memset(mask, 0, sizeof(mask));
3146  
3147         if (dev->flags & IFF_PROMISC) {
3148 -               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
3149 -               pff = NVREG_PFF_PROMISC;
3150 +               dprintk(KERN_DEBUG "%s: Promiscuous mode enabled.\n", dev->name);
3151 +               pff |= NVREG_PFF_PROMISC;
3152         } else {
3153 -               pff = NVREG_PFF_MYADDR;
3154 +               pff |= NVREG_PFF_MYADDR;
3155  
3156                 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
3157                         u32 alwaysOff[2];
3158 @@ -1677,6 +3199,35 @@
3159         spin_unlock_irq(&np->lock);
3160  }
3161  
3162 +static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3163 +{
3164 +       struct fe_priv *np = get_nvpriv(dev);
3165 +       u8 __iomem *base = get_hwbase(dev);
3166 +
3167 +       np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3168 +
3169 +       if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3170 +               u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3171 +               if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3172 +                       writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3173 +                       np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3174 +               } else {
3175 +                       writel(pff, base + NvRegPacketFilterFlags);
3176 +               }
3177 +       }
3178 +       if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3179 +               u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3180 +               if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3181 +                       writel(NVREG_TX_PAUSEFRAME_ENABLE,  base + NvRegTxPauseFrame);
3182 +                       writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3183 +                       np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3184 +               } else {
3185 +                       writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3186 +                       writel(regmisc, base + NvRegMisc1);                     
3187 +               }
3188 +       }
3189 +}
3190 +
3191  /**
3192   * nv_update_linkspeed: Setup the MAC according to the link partner
3193   * @dev: Network device to be configured
3194 @@ -1690,14 +3241,16 @@
3195   */
3196  static int nv_update_linkspeed(struct net_device *dev)
3197  {
3198 -       struct fe_priv *np = netdev_priv(dev);
3199 +       struct fe_priv *np = get_nvpriv(dev);
3200         u8 __iomem *base = get_hwbase(dev);
3201 -       int adv, lpa;
3202 +       int adv = 0;
3203 +       int lpa = 0;
3204 +       int adv_lpa, adv_pause, lpa_pause;
3205         int newls = np->linkspeed;
3206         int newdup = np->duplex;
3207         int mii_status;
3208         int retval = 0;
3209 -       u32 control_1000, status_1000, phyreg;
3210 +       u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3211  
3212         /* BMSR_LSTATUS is latched, read it twice:
3213          * we want the current value.
3214 @@ -1714,7 +3267,7 @@
3215                 goto set_speed;
3216         }
3217  
3218 -       if (np->autoneg == 0) {
3219 +       if (np->autoneg == AUTONEG_DISABLE) {
3220                 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3221                                 dev->name, np->fixed_mode);
3222                 if (np->fixed_mode & LPA_100FULL) {
3223 @@ -1743,10 +3296,14 @@
3224                 goto set_speed;
3225         }
3226  
3227 +       adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3228 +       lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3229 +       dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3230 +                               dev->name, adv, lpa);
3231         retval = 1;
3232         if (np->gigabit == PHY_GIGABIT) {
3233 -               control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
3234 -               status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
3235 +               control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3236 +               status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3237  
3238                 if ((control_1000 & ADVERTISE_1000FULL) &&
3239                         (status_1000 & LPA_1000FULL)) {
3240 @@ -1758,27 +3315,22 @@
3241                 }
3242         }
3243  
3244 -       adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3245 -       lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3246 -       dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3247 -                               dev->name, adv, lpa);
3248 -
3249         /* FIXME: handle parallel detection properly */
3250 -       lpa = lpa & adv;
3251 -       if (lpa & LPA_100FULL) {
3252 +       adv_lpa = lpa & adv;
3253 +       if (adv_lpa & LPA_100FULL) {
3254                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3255                 newdup = 1;
3256 -       } else if (lpa & LPA_100HALF) {
3257 +       } else if (adv_lpa & LPA_100HALF) {
3258                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3259                 newdup = 0;
3260 -       } else if (lpa & LPA_10FULL) {
3261 +       } else if (adv_lpa & LPA_10FULL) {
3262                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3263                 newdup = 1;
3264 -       } else if (lpa & LPA_10HALF) {
3265 +       } else if (adv_lpa & LPA_10HALF) {
3266                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3267                 newdup = 0;
3268         } else {
3269 -               dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
3270 +               dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3271                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3272                 newdup = 0;
3273         }
3274 @@ -1815,12 +3367,71 @@
3275                 phyreg |= PHY_1000;
3276         writel(phyreg, base + NvRegPhyInterface);
3277  
3278 +       if (phyreg & PHY_RGMII) {
3279 +               if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3280 +                       txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3281 +               else
3282 +                       txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3283 +       } else {
3284 +               txreg = NVREG_TX_DEFERRAL_DEFAULT;
3285 +       }
3286 +       writel(txreg, base + NvRegTxDeferral);
3287 +
3288 +       if (np->desc_ver == DESC_VER_1) {
3289 +               txreg = NVREG_TX_WM_DESC1_DEFAULT;
3290 +       } else {
3291 +               if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3292 +                       txreg = NVREG_TX_WM_DESC2_3_1000;
3293 +               else
3294 +                       txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3295 +       }
3296 +       writel(txreg, base + NvRegTxWatermark);
3297         writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
3298                 base + NvRegMisc1);
3299         pci_push(base);
3300         writel(np->linkspeed, base + NvRegLinkSpeed);
3301         pci_push(base);
3302  
3303 +       pause_flags = 0;
3304 +       /* setup pause frame */
3305 +       if (np->duplex != 0) {
3306 +               if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3307 +                       adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
3308 +                       lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
3309 +
3310 +                       switch (adv_pause) {
3311 +                       case (ADVERTISE_PAUSE_CAP):
3312 +                               if (lpa_pause & LPA_PAUSE_CAP) {
3313 +                                       pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3314 +                                       if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3315 +                                               pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3316 +                               }
3317 +                               break;
3318 +                       case (ADVERTISE_PAUSE_ASYM):
3319 +                               if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
3320 +                               {
3321 +                                       pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3322 +                               }
3323 +                               break;
3324 +                       case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
3325 +                               if (lpa_pause & LPA_PAUSE_CAP)
3326 +                               {
3327 +                                       pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3328 +                                       if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3329 +                                               pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3330 +                               }
3331 +                               if (lpa_pause == LPA_PAUSE_ASYM)
3332 +                               {
3333 +                                       pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3334 +                               } 
3335 +                               break;
3336 +                       }
3337 +               } else {
3338 +                       pause_flags = np->pause_flags;
3339 +               }
3340 +       }
3341 +       nv_update_pause(dev, pause_flags);
3342 +
3343         return retval;
3344  }
3345  
3346 @@ -1858,24 +3469,28 @@
3347  static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
3348  {
3349         struct net_device *dev = (struct net_device *) data;
3350 -       struct fe_priv *np = netdev_priv(dev);
3351 +       struct fe_priv *np = get_nvpriv(dev);
3352         u8 __iomem *base = get_hwbase(dev);
3353 -       u32 events;
3354 +       u32 events,mask;
3355         int i;
3356  
3357 -       dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3358 +       dprintk("%s:%s\n",dev->name,__FUNCTION__);
3359  
3360         for (i=0; ; i++) {
3361 -               events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3362 -               writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3363 +               if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3364 +                       events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3365 +                       writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3366 +               } else {
3367 +                       events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3368 +                       writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3369 +               }
3370                 pci_push(base);
3371                 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3372 -               if (!(events & np->irqmask))
3373 +               mask = readl(base + NvRegIrqMask);
3374 +               if (!(events & mask))
3375                         break;
3376  
3377 -               spin_lock(&np->lock);
3378                 nv_tx_done(dev);
3379 -               spin_unlock(&np->lock);
3380                 
3381                 nv_rx_process(dev);
3382                 if (nv_alloc_rx(dev)) {
3383 @@ -1907,11 +3522,16 @@
3384                 if (i > max_interrupt_work) {
3385                         spin_lock(&np->lock);
3386                         /* disable interrupts on the nic */
3387 -                       writel(0, base + NvRegIrqMask);
3388 +                       if (!(np->msi_flags & NV_MSI_X_ENABLED))
3389 +                               writel(0, base + NvRegIrqMask);
3390 +                       else
3391 +                               writel(np->irqmask, base + NvRegIrqMask);
3392                         pci_push(base);
3393  
3394 -                       if (!np->in_shutdown)
3395 +                       if (!np->in_shutdown) {
3396 +                               np->nic_poll_irq = np->irqmask;
3397                                 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3398 +                       }
3399                         printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3400                         spin_unlock(&np->lock);
3401                         break;
3402 @@ -1923,310 +3543,1950 @@
3403         return IRQ_RETVAL(i);
3404  }
3405  
3406 -static void nv_do_nic_poll(unsigned long data)
3407 +#define TX_WORK_PER_LOOP  64
3408 +#define RX_WORK_PER_LOOP  64
3409 +static irqreturn_t nv_nic_irq_optimized(int foo, void *data, struct pt_regs *regs)
3410  {
3411         struct net_device *dev = (struct net_device *) data;
3412 -       struct fe_priv *np = netdev_priv(dev);
3413 +       struct fe_priv *np = get_nvpriv(dev);
3414         u8 __iomem *base = get_hwbase(dev);
3415 +       u32 events,mask;
3416 +       int i = 1;
3417  
3418 -       disable_irq(dev->irq);
3419 -       /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3420 -       /*
3421 -        * reenable interrupts on the nic, we have to do this before calling
3422 -        * nv_nic_irq because that may decide to do otherwise
3423 -        */
3424 -       writel(np->irqmask, base + NvRegIrqMask);
3425 -       pci_push(base);
3426 -       nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
3427 -       enable_irq(dev->irq);
3428 -}
3429 +       do {
3430 +               if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3431 +                       events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3432 +                       writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3433 +               } else {
3434 +                       events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3435 +                       writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3436 +               }
3437  
3438 -#ifdef CONFIG_NET_POLL_CONTROLLER
3439 -static void nv_poll_controller(struct net_device *dev)
3440 -{
3441 -       nv_do_nic_poll((unsigned long) dev);
3442 -}
3443 -#endif
3444 +               mask = readl(base + NvRegIrqMask);
3445 +               if (events & mask) {
3446  
3447 -static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3448 -{
3449 -       struct fe_priv *np = netdev_priv(dev);
3450 -       strcpy(info->driver, "forcedeth");
3451 -       strcpy(info->version, FORCEDETH_VERSION);
3452 -       strcpy(info->bus_info, pci_name(np->pci_dev));
3453 +                       nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3454 +
3455 +                       if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3456 +                               if (unlikely(nv_alloc_rx_optimized(dev))) {
3457 +                                       spin_lock(&np->lock);
3458 +                                       if (!np->in_shutdown)
3459 +                                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3460 +                                       spin_unlock(&np->lock);
3461 +                               }
3462 +                       }
3463 +                       if (unlikely(events & NVREG_IRQ_LINK)) {
3464 +                               spin_lock(&np->lock);
3465 +                               nv_link_irq(dev);
3466 +                               spin_unlock(&np->lock);
3467 +                       }
3468 +                       if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3469 +                               spin_lock(&np->lock);
3470 +                               nv_linkchange(dev);
3471 +                               spin_unlock(&np->lock);
3472 +                               np->link_timeout = jiffies + LINK_TIMEOUT;
3473 +                       }
3474 +                       if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3475 +                               spin_lock(&np->lock);
3476 +                               /* disable interrupts on the nic */
3477 +                               if (!(np->msi_flags & NV_MSI_X_ENABLED))
3478 +                                       writel(0, base + NvRegIrqMask);
3479 +                               else
3480 +                                       writel(np->irqmask, base + NvRegIrqMask);
3481 +                               pci_push(base);
3482 +
3483 +                               if (!np->in_shutdown) {
3484 +                                       np->nic_poll_irq = np->irqmask;
3485 +                                       np->recover_error = 1;
3486 +                                       mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3487 +                               }
3488 +                               spin_unlock(&np->lock);
3489 +                               break;
3490 +                       }
3491 +               } else
3492 +                       break;
3493 +       }
3494 +       while (i++ <= max_interrupt_work);
3495 +
3496 +       return IRQ_RETVAL(i);
3497  }
3498  
3499 -static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3500 +static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
3501  {
3502 -       struct fe_priv *np = netdev_priv(dev);
3503 -       wolinfo->supported = WAKE_MAGIC;
3504 +       struct net_device *dev = (struct net_device *) data;
3505 +       struct fe_priv *np = get_nvpriv(dev);
3506 +       u8 __iomem *base = get_hwbase(dev);
3507 +       u32 events;
3508 +       int i;
3509  
3510 -       spin_lock_irq(&np->lock);
3511 -       if (np->wolenabled)
3512 -               wolinfo->wolopts = WAKE_MAGIC;
3513 -       spin_unlock_irq(&np->lock);
3514 +       dprintk("%s:%s\n",dev->name,__FUNCTION__);
3515 +
3516 +       for (i=0; ; i++) {
3517 +               events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3518 +               writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3519 +               dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3520 +               if (!(events & np->irqmask))
3521 +                       break;
3522 +
3523 +               nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3524 +               
3525 +               if (events & (NVREG_IRQ_TX_ERR)) {
3526 +                       dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3527 +                                               dev->name, events);
3528 +               }
3529 +               if (i > max_interrupt_work) {
3530 +                       spin_lock_irq(&np->lock);
3531 +                       /* disable interrupts on the nic */
3532 +                       writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3533 +                       pci_push(base);
3534 +
3535 +                       if (!np->in_shutdown) {
3536 +                               np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3537 +                               mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3538 +                       }
3539 +                       printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3540 +                       spin_unlock_irq(&np->lock);
3541 +                       break;
3542 +               }
3543 +
3544 +       }
3545 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3546 +
3547 +       return IRQ_RETVAL(i);
3548  }
3549  
3550 -static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3551 +static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
3552  {
3553 -       struct fe_priv *np = netdev_priv(dev);
3554 +       struct net_device *dev = (struct net_device *) data;
3555 +       struct fe_priv *np = get_nvpriv(dev);
3556         u8 __iomem *base = get_hwbase(dev);
3557 +       u32 events;
3558 +       int i;
3559 +
3560 +       dprintk("%s:%s\n",dev->name,__FUNCTION__);
3561 +
3562 +       for (i=0; ; i++) {
3563 +               events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3564 +               writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3565 +               dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3566 +               if (!(events & np->irqmask))
3567 +                       break;
3568 +               
3569 +               if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3570 +                       if (unlikely(nv_alloc_rx_optimized(dev))) {
3571 +                       spin_lock_irq(&np->lock);
3572 +                       if (!np->in_shutdown)
3573 +                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3574 +                       spin_unlock_irq(&np->lock);
3575 +                       }
3576 +               }
3577 +               
3578 +               if (i > max_interrupt_work) {
3579 +                       spin_lock_irq(&np->lock);
3580 +                       /* disable interrupts on the nic */
3581 +                       writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3582 +                       pci_push(base);
3583 +
3584 +                       if (!np->in_shutdown) {
3585 +                               np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3586 +                               mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3587 +                       }
3588 +                       printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3589 +                       spin_unlock_irq(&np->lock);
3590 +                       break;
3591 +               }
3592  
3593 -       spin_lock_irq(&np->lock);
3594 -       if (wolinfo->wolopts == 0) {
3595 -               writel(0, base + NvRegWakeUpFlags);
3596 -               np->wolenabled = 0;
3597 -       }
3598 -       if (wolinfo->wolopts & WAKE_MAGIC) {
3599 -               writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
3600 -               np->wolenabled = 1;
3601         }
3602 -       spin_unlock_irq(&np->lock);
3603 -       return 0;
3604 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3605 +
3606 +       return IRQ_RETVAL(i);
3607  }
3608  
3609 -static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3610 +static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
3611  {
3612 -       struct fe_priv *np = netdev_priv(dev);
3613 -       int adv;
3614 +       struct net_device *dev = (struct net_device *) data;
3615 +       struct fe_priv *np = get_nvpriv(dev);
3616 +       u8 __iomem *base = get_hwbase(dev);
3617 +       u32 events;
3618 +       int i;
3619  
3620 -       spin_lock_irq(&np->lock);
3621 -       ecmd->port = PORT_MII;
3622 -       if (!netif_running(dev)) {
3623 -               /* We do not track link speed / duplex setting if the
3624 -                * interface is disabled. Force a link check */
3625 -               nv_update_linkspeed(dev);
3626 -       }
3627 -       switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3628 -               case NVREG_LINKSPEED_10:
3629 -                       ecmd->speed = SPEED_10;
3630 +       dprintk("%s:%s\n",dev->name,__FUNCTION__);
3631 +
3632 +       for (i=0; ; i++) {
3633 +               events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3634 +               writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3635 +               dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3636 +               if (!(events & np->irqmask))
3637                         break;
3638 -               case NVREG_LINKSPEED_100:
3639 -                       ecmd->speed = SPEED_100;
3640 +               
3641 +               if (events & NVREG_IRQ_LINK) {
3642 +                       spin_lock_irq(&np->lock);
3643 +                       nv_link_irq(dev);
3644 +                       spin_unlock_irq(&np->lock);
3645 +               }
3646 +               if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3647 +                       spin_lock_irq(&np->lock);
3648 +                       nv_linkchange(dev);
3649 +                       spin_unlock_irq(&np->lock);
3650 +                       np->link_timeout = jiffies + LINK_TIMEOUT;
3651 +               }
3652 +               if (events & NVREG_IRQ_RECOVER_ERROR) {
3653 +                       spin_lock_irq(&np->lock);
3654 +                       /* disable interrupts on the nic */
3655 +                       writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3656 +                       pci_push(base);
3657 +                       
3658 +                       if (!np->in_shutdown) {
3659 +                               np->nic_poll_irq |= NVREG_IRQ_OTHER;
3660 +                               np->recover_error = 1;
3661 +                               mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3662 +                       }
3663 +                       spin_unlock_irq(&np->lock);
3664                         break;
3665 -               case NVREG_LINKSPEED_1000:
3666 -                       ecmd->speed = SPEED_1000;
3667 +               }
3668 +               if (events & (NVREG_IRQ_UNKNOWN)) {
3669 +                       printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3670 +                                               dev->name, events);
3671 +               }
3672 +               if (i > max_interrupt_work) {
3673 +                       spin_lock_irq(&np->lock);
3674 +                       /* disable interrupts on the nic */
3675 +                       writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3676 +                       pci_push(base);
3677 +
3678 +                       if (!np->in_shutdown) {
3679 +                               np->nic_poll_irq |= NVREG_IRQ_OTHER;
3680 +                               mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3681 +                       }
3682 +                       printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3683 +                       spin_unlock_irq(&np->lock);
3684                         break;
3685 +               }
3686 +
3687         }
3688 -       ecmd->duplex = DUPLEX_HALF;
3689 -       if (np->duplex)
3690 -               ecmd->duplex = DUPLEX_FULL;
3691 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3692  
3693 -       ecmd->autoneg = np->autoneg;
3694 +       return IRQ_RETVAL(i);
3695 +}
3696  
3697 -       ecmd->advertising = ADVERTISED_MII;
3698 -       if (np->autoneg) {
3699 -               ecmd->advertising |= ADVERTISED_Autoneg;
3700 -               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3701 -       } else {
3702 -               adv = np->fixed_mode;
3703 -       }
3704 -       if (adv & ADVERTISE_10HALF)
3705 -               ecmd->advertising |= ADVERTISED_10baseT_Half;
3706 -       if (adv & ADVERTISE_10FULL)
3707 -               ecmd->advertising |= ADVERTISED_10baseT_Full;
3708 -       if (adv & ADVERTISE_100HALF)
3709 -               ecmd->advertising |= ADVERTISED_100baseT_Half;
3710 -       if (adv & ADVERTISE_100FULL)
3711 -               ecmd->advertising |= ADVERTISED_100baseT_Full;
3712 -       if (np->autoneg && np->gigabit == PHY_GIGABIT) {
3713 -               adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
3714 -               if (adv & ADVERTISE_1000FULL)
3715 -                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
3716 -       }
3717 +static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
3718 +{
3719 +       struct net_device *dev = (struct net_device *) data;
3720 +       struct fe_priv *np = get_nvpriv(dev);
3721 +       u8 __iomem *base = get_hwbase(dev);
3722 +       u32 events;
3723  
3724 -       ecmd->supported = (SUPPORTED_Autoneg |
3725 -               SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
3726 -               SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
3727 -               SUPPORTED_MII);
3728 -       if (np->gigabit == PHY_GIGABIT)
3729 -               ecmd->supported |= SUPPORTED_1000baseT_Full;
3730 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
3731  
3732 -       ecmd->phy_address = np->phyaddr;
3733 -       ecmd->transceiver = XCVR_EXTERNAL;
3734 +       if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3735 +               events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3736 +               writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3737 +       } else {
3738 +               events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3739 +               writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3740 +       }
3741 +       pci_push(base);
3742 +       dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3743 +       if (!(events & NVREG_IRQ_TIMER))
3744 +               return IRQ_RETVAL(0);
3745 +       
3746 +       spin_lock(&np->lock);
3747 +       np->intr_test = 1;
3748 +       spin_unlock(&np->lock);
3749 +               
3750 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3751  
3752 -       /* ignore maxtxpkt, maxrxpkt for now */
3753 -       spin_unlock_irq(&np->lock);
3754 -       return 0;
3755 +       return IRQ_RETVAL(1);
3756  }
3757  
3758 -static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3759 +#ifdef CONFIG_PCI_MSI
3760 +static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3761  {
3762 -       struct fe_priv *np = netdev_priv(dev);
3763 +       u8 __iomem *base = get_hwbase(dev);
3764 +       int i;
3765 +       u32 msixmap = 0;
3766  
3767 -       if (ecmd->port != PORT_MII)
3768 -               return -EINVAL;
3769 -       if (ecmd->transceiver != XCVR_EXTERNAL)
3770 -               return -EINVAL;
3771 -       if (ecmd->phy_address != np->phyaddr) {
3772 -               /* TODO: support switching between multiple phys. Should be
3773 -                * trivial, but not enabled due to lack of test hardware. */
3774 -               return -EINVAL;
3775 +       /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3776 +        * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3777 +        * the remaining 8 interrupts.
3778 +        */
3779 +       for (i = 0; i < 8; i++) {
3780 +               if ((irqmask >> i) & 0x1) {
3781 +                       msixmap |= vector << (i << 2);
3782 +               }
3783         }
3784 -       if (ecmd->autoneg == AUTONEG_ENABLE) {
3785 -               u32 mask;
3786 +       writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3787  
3788 -               mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3789 -                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3790 -               if (np->gigabit == PHY_GIGABIT)
3791 -                       mask |= ADVERTISED_1000baseT_Full;
3792 +       msixmap = 0;
3793 +       for (i = 0; i < 8; i++) {
3794 +               if ((irqmask >> (i + 8)) & 0x1) {
3795 +                       msixmap |= vector << (i << 2);
3796 +               }
3797 +       }
3798 +       writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3799 +}
3800 +#endif
3801  
3802 -               if ((ecmd->advertising & mask) == 0)
3803 -                       return -EINVAL;
3804 +static int nv_request_irq(struct net_device *dev, int intr_test)
3805 +{
3806 +       struct fe_priv *np = get_nvpriv(dev);
3807 +       int ret = 1;
3808  
3809 -       } else if (ecmd->autoneg == AUTONEG_DISABLE) {
3810 -               /* Note: autonegotiation disable, speed 1000 intentionally
3811 -                * forbidden - noone should need that. */
3812 +#if NVVER > SLES9
3813 +       u8 __iomem *base = get_hwbase(dev);
3814 +       int i;
3815  
3816 -               if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
3817 -                       return -EINVAL;
3818 -               if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
3819 +       if (np->msi_flags & NV_MSI_X_CAPABLE) {
3820 +               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3821 +                       np->msi_x_entry[i].entry = i;
3822 +               }
3823 +               if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3824 +                       np->msi_flags |= NV_MSI_X_ENABLED;
3825 +                       if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3826 +                               /* Request irq for rx handling */
3827 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
3828 +                                       printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3829 +                                       pci_disable_msix(np->pci_dev);
3830 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3831 +                                       goto out_err;
3832 +                               }
3833 +                               /* Request irq for tx handling */
3834 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
3835 +                                       printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3836 +                                       pci_disable_msix(np->pci_dev);
3837 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3838 +                                       goto out_free_rx;
3839 +                               }
3840 +                               /* Request irq for link and timer handling */
3841 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
3842 +                                       printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3843 +                                       pci_disable_msix(np->pci_dev);
3844 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3845 +                                       goto out_free_tx;
3846 +                               }
3847 +                               /* map interrupts to their respective vector */
3848 +                               writel(0, base + NvRegMSIXMap0);
3849 +                               writel(0, base + NvRegMSIXMap1);
3850 +#ifdef CONFIG_PCI_MSI
3851 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3852 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3853 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3854 +#endif
3855 +                       } else {
3856 +                               /* Request irq for all interrupts */
3857 +                               if ((!intr_test && np->desc_ver == DESC_VER_3 &&
3858 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3859 +                                   (!intr_test && np->desc_ver != DESC_VER_3 &&
3860 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
3861 +                                   (intr_test &&
3862 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
3863 +                                       printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3864 +                                       pci_disable_msix(np->pci_dev);
3865 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3866 +                                       goto out_err;
3867 +                               }
3868 +
3869 +                               /* map interrupts to vector 0 */
3870 +                               writel(0, base + NvRegMSIXMap0);
3871 +                               writel(0, base + NvRegMSIXMap1);
3872 +                       }
3873 +               }
3874 +       }
3875 +       if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3876 +               if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3877 +                       np->msi_flags |= NV_MSI_ENABLED;
3878 +                       if ((!intr_test && np->desc_ver == DESC_VER_3 &&
3879 +                            request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3880 +                           (!intr_test && np->desc_ver != DESC_VER_3 &&
3881 +                            request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
3882 +                           (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
3883 +                               printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3884 +                               pci_disable_msi(np->pci_dev);
3885 +                               np->msi_flags &= ~NV_MSI_ENABLED;
3886 +                               goto out_err;
3887 +                       }
3888 +
3889 +                       /* map interrupts to vector 0 */
3890 +                       writel(0, base + NvRegMSIMap0);
3891 +                       writel(0, base + NvRegMSIMap1);
3892 +                       /* enable msi vector 0 */
3893 +                       writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3894 +               }
3895 +       }
3896 +#else
3897 +#ifdef CONFIG_PCI_MSI
3898 +       u8 __iomem *base = get_hwbase(dev);
3899 +       int i;
3900 +
3901 +       if (np->msi_flags & NV_MSI_X_CAPABLE) {
3902 +               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3903 +                       np->msi_x_entry[i].entry = i;
3904 +               }
3905 +               if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3906 +                       np->msi_flags |= NV_MSI_X_ENABLED;
3907 +                       if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3908 +                               msi_alloc_vectors(np->pci_dev,(int *)np->msi_x_entry,2);
3909 +                               /* Request irq for rx handling */
3910 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
3911 +                                       printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3912 +                                       pci_disable_msi(np->pci_dev);
3913 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3914 +                                       goto out_err;
3915 +                               }
3916 +                               /* Request irq for tx handling */
3917 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
3918 +                                       printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3919 +                                       pci_disable_msi(np->pci_dev);
3920 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3921 +                                       goto out_free_rx;
3922 +                               }
3923 +                               /* Request irq for link and timer handling */
3924 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
3925 +                                       printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3926 +                                       pci_disable_msi(np->pci_dev);
3927 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3928 +                                       goto out_free_tx;
3929 +                               }
3930 +                               /* map interrupts to their respective vector */
3931 +                               writel(0, base + NvRegMSIXMap0);
3932 +                               writel(0, base + NvRegMSIXMap1);
3933 +#ifdef CONFIG_PCI_MSI
3934 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3935 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3936 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3937 +#endif
3938 +                       } else {
3939 +                               /* Request irq for all interrupts */
3940 +                               if ((!intr_test && np->desc_ver == DESC_VER_3 &&
3941 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3942 +                                   (!intr_test && np->desc_ver != DESC_VER_3 &&
3943 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
3944 +                                   (intr_test &&
3945 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
3946 +                                       printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3947 +                                       pci_disable_msi(np->pci_dev);
3948 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3949 +                                       goto out_err;
3950 +                               }
3951 +
3952 +                               /* map interrupts to vector 0 */
3953 +                               writel(0, base + NvRegMSIXMap0);
3954 +                               writel(0, base + NvRegMSIXMap1);
3955 +                       }
3956 +               }
3957 +       }
3958 +       if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3959 +
3960 +               if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3961 +                       np->msi_flags |= NV_MSI_ENABLED;
3962 +                       if ((!intr_test && np->desc_ver == DESC_VER_3 &&
3963 +                            request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3964 +                           (!intr_test && np->desc_ver != DESC_VER_3 &&
3965 +                            request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
3966 +                           (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
3967 +                               printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3968 +                               pci_disable_msi(np->pci_dev);
3969 +                               np->msi_flags &= ~NV_MSI_ENABLED;
3970 +                               goto out_err;
3971 +                       }
3972 +
3973 +                       /* map interrupts to vector 0 */
3974 +                       writel(0, base + NvRegMSIMap0);
3975 +                       writel(0, base + NvRegMSIMap1);
3976 +                       /* enable msi vector 0 */
3977 +                       writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3978 +               }
3979 +       }
3980 +#endif
3981 +#endif
3982 +       if (ret != 0) {
3983 +               if ((!intr_test  && np->desc_ver == DESC_VER_3 && 
3984 +                    request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3985 +                   (!intr_test  && np->desc_ver != DESC_VER_3 &&
3986 +                    request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || 
3987 +                   (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0))
3988 +                       goto out_err;
3989 +                       
3990 +       }
3991 +
3992 +       return 0;
3993 +
3994 +#if NVVER > SLES9
3995 +out_free_tx:
3996 +       free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3997 +out_free_rx:
3998 +       free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3999 +#else
4000 +#ifdef CONFIG_PCI_MSI  
4001 +out_free_tx:
4002 +       free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4003 +out_free_rx:
4004 +       free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4005 +#endif
4006 +#endif
4007 +out_err:
4008 +       return 1;
4009 +}
4010 +
4011 +#if NVVER > SLES9
4012 +static void nv_free_irq(struct net_device *dev)
4013 +{
4014 +       struct fe_priv *np = get_nvpriv(dev);
4015 +       int i;
4016 +       
4017 +       if (np->msi_flags & NV_MSI_X_ENABLED) {
4018 +               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
4019 +                       free_irq(np->msi_x_entry[i].vector, dev);
4020 +               }
4021 +               pci_disable_msix(np->pci_dev);
4022 +               np->msi_flags &= ~NV_MSI_X_ENABLED;
4023 +       } else {
4024 +               free_irq(np->pci_dev->irq, dev);
4025 +               if (np->msi_flags & NV_MSI_ENABLED) {
4026 +                       pci_disable_msi(np->pci_dev);
4027 +                       np->msi_flags &= ~NV_MSI_ENABLED;
4028 +               }
4029 +       }
4030 +}
4031 +#else
4032 +static void nv_free_irq(struct net_device *dev)
4033 +{
4034 +       struct fe_priv *np = get_nvpriv(dev);
4035 +       
4036 +#ifdef CONFIG_PCI_MSI          
4037 +       int i;
4038 +
4039 +       if (np->msi_flags & NV_MSI_X_ENABLED) {
4040 +               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
4041 +                       free_irq(np->msi_x_entry[i].vector, dev);
4042 +               }
4043 +               pci_disable_msi(np->pci_dev);
4044 +               np->msi_flags &= ~NV_MSI_X_ENABLED;
4045 +       } else {
4046 +               free_irq(np->pci_dev->irq, dev);
4047 +
4048 +               if (np->msi_flags & NV_MSI_ENABLED) {
4049 +                       pci_disable_msi(np->pci_dev);
4050 +                       np->msi_flags &= ~NV_MSI_ENABLED;
4051 +               }
4052 +       }
4053 +#else
4054 +       free_irq(np->pci_dev->irq, dev);
4055 +#endif
4056 +       
4057 +}
4058 +#endif 
4059 +
4060 +static void nv_do_nic_poll(unsigned long data)
4061 +{
4062 +       struct net_device *dev = (struct net_device *) data;
4063 +       struct fe_priv *np = get_nvpriv(dev);
4064 +       u8 __iomem *base = get_hwbase(dev);
4065 +       u32 mask = 0;
4066 +
4067 +       /*
4068 +        * First disable irq(s) and then
4069 +        * reenable interrupts on the nic, we have to do this before calling
4070 +        * nv_nic_irq because that may decide to do otherwise
4071 +        */
4072 +
4073 +       if (!using_multi_irqs(dev)) {
4074 +               if (np->msi_flags & NV_MSI_X_ENABLED)
4075 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4076 +               else
4077 +                       disable_irq(dev->irq);
4078 +               mask = np->irqmask;
4079 +       } else {
4080 +               if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4081 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4082 +                       mask |= NVREG_IRQ_RX_ALL;
4083 +               }
4084 +               if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4085 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4086 +                       mask |= NVREG_IRQ_TX_ALL;
4087 +               }
4088 +               if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4089 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4090 +                       mask |= NVREG_IRQ_OTHER;
4091 +               }
4092 +       }
4093 +       np->nic_poll_irq = 0;
4094 +
4095 +       if (np->recover_error) {
4096 +               np->recover_error = 0;
4097 +               printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
4098 +               if (netif_running(dev)) {
4099 +#if NVVER > FEDORA5
4100 +                       netif_tx_lock_bh(dev);
4101 +#else
4102 +                       spin_lock_bh(&dev->xmit_lock);
4103 +#endif
4104 +                       spin_lock(&np->lock);
4105 +                       /* stop engines */
4106 +                       nv_stop_rx(dev);
4107 +                       nv_stop_tx(dev);
4108 +                       nv_txrx_reset(dev);
4109 +                       /* drain rx queue */
4110 +                       nv_drain_rx(dev);
4111 +                       nv_drain_tx(dev);
4112 +                       /* reinit driver view of the rx queue */
4113 +                       set_bufsize(dev);
4114 +                       if (nv_init_ring(dev)) {
4115 +                               if (!np->in_shutdown)
4116 +                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4117 +                       }
4118 +                       /* reinit nic view of the rx queue */
4119 +                       writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4120 +                       setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4121 +                       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4122 +                               base + NvRegRingSizes);
4123 +                       pci_push(base);
4124 +                       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4125 +                       pci_push(base);
4126 +
4127 +                       /* restart rx engine */
4128 +                       nv_start_rx(dev);
4129 +                       nv_start_tx(dev);
4130 +                       spin_unlock(&np->lock);
4131 +#if NVVER > FEDORA5
4132 +                       netif_tx_unlock_bh(dev);
4133 +#else
4134 +                       spin_unlock_bh(&dev->xmit_lock);
4135 +#endif
4136 +               }
4137 +       }
4138 +       /* FIXME: Do we need synchronize_irq(dev->irq) here? */
4139 +       
4140 +       writel(mask, base + NvRegIrqMask);
4141 +       pci_push(base);
4142 +
4143 +       if (!using_multi_irqs(dev)) {
4144 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
4145 +               nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
4146 +               else
4147 +                       nv_nic_irq_optimized((int) 0, (void *) data, (struct pt_regs *) NULL);
4148 +               if (np->msi_flags & NV_MSI_X_ENABLED)
4149 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4150 +               else
4151 +                       enable_irq(dev->irq);
4152 +       } else {
4153 +               if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4154 +                       nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
4155 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4156 +               }
4157 +               if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4158 +                       nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
4159 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4160 +               }
4161 +               if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4162 +                       nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL);
4163 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4164 +               }
4165 +       }
4166 +}
4167 +
4168 +#if NVVER > RHES3
4169 +#ifdef CONFIG_NET_POLL_CONTROLLER
4170 +static void nv_poll_controller(struct net_device *dev)
4171 +{
4172 +       nv_do_nic_poll((unsigned long) dev);
4173 +}
4174 +#endif
4175 +#else
4176 +static void nv_poll_controller(struct net_device *dev)
4177 +{
4178 +       nv_do_nic_poll((unsigned long) dev);
4179 +}
4180 +#endif
4181 +
4182 +static void nv_do_stats_poll(unsigned long data)
4183 +{
4184 +       struct net_device *dev = (struct net_device *) data;
4185 +       struct fe_priv *np = get_nvpriv(dev);
4186 +       u8 __iomem *base = get_hwbase(dev);
4187 +
4188 +       spin_lock_irq(&np->lock);
4189 +       
4190 +       np->estats.tx_dropped = np->stats.tx_dropped;
4191 +       if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
4192 +               np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
4193 +               np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
4194 +               np->estats.tx_bytes += readl(base + NvRegTxCnt);
4195 +               np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
4196 +               np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
4197 +               np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
4198 +               np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
4199 +               np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
4200 +               np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
4201 +               np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
4202 +               np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
4203 +               np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
4204 +               np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
4205 +               np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
4206 +               np->estats.rx_runt += readl(base + NvRegRxRunt);
4207 +               np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
4208 +               np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
4209 +               np->estats.rx_length_error += readl(base + NvRegRxLenErr);
4210 +               np->estats.rx_unicast += readl(base + NvRegRxUnicast);
4211 +               np->estats.rx_multicast += readl(base + NvRegRxMulticast);
4212 +               np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
4213 +               np->estats.rx_packets = 
4214 +                       np->estats.rx_unicast + 
4215 +                       np->estats.rx_multicast + 
4216 +                       np->estats.rx_broadcast;
4217 +               np->estats.rx_errors_total = 
4218 +                       np->estats.rx_crc_errors +
4219 +                       np->estats.rx_over_errors +
4220 +                       np->estats.rx_frame_error +
4221 +                       (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
4222 +                       np->estats.rx_late_collision +
4223 +                       np->estats.rx_runt +
4224 +                       np->estats.rx_frame_too_long +
4225 +                       np->rx_len_errors;
4226 +
4227 +               if (np->driver_data & DEV_HAS_STATISTICS_V2) {
4228 +                       np->estats.tx_deferral += readl(base + NvRegTxDef);
4229 +                       np->estats.tx_packets += readl(base + NvRegTxFrame);
4230 +                       np->estats.rx_bytes += readl(base + NvRegRxCnt);
4231 +                       np->estats.tx_pause += readl(base + NvRegTxPause);
4232 +                       np->estats.rx_pause += readl(base + NvRegRxPause);
4233 +                       np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
4234 +               }
4235 +
4236 +               /* copy to net_device stats */
4237 +               np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
4238 +               np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
4239 +               np->stats.tx_bytes = np->estats.tx_bytes;
4240 +               np->stats.rx_crc_errors = np->estats.rx_crc_errors;
4241 +               np->stats.rx_over_errors = np->estats.rx_over_errors;
4242 +               np->stats.rx_packets = np->estats.rx_packets;
4243 +               np->stats.rx_errors = np->estats.rx_errors_total;
4244 +               
4245 +       } else {
4246 +               np->estats.tx_packets = np->stats.tx_packets;
4247 +               np->estats.tx_fifo_errors = np->stats.tx_fifo_errors;
4248 +               np->estats.tx_carrier_errors = np->stats.tx_carrier_errors;
4249 +               np->estats.tx_bytes = np->stats.tx_bytes;
4250 +               np->estats.rx_bytes = np->stats.rx_bytes;
4251 +               np->estats.rx_crc_errors = np->stats.rx_crc_errors;
4252 +               np->estats.rx_over_errors = np->stats.rx_over_errors;
4253 +               np->estats.rx_packets = np->stats.rx_packets;
4254 +               np->estats.rx_errors_total = np->stats.rx_errors;
4255 +       }
4256 +
4257 +       if (!np->in_shutdown && netif_running(dev))
4258 +               mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4259 +       spin_unlock_irq(&np->lock);
4260 +}
4261 +
4262 +/*
4263 + * nv_get_stats: dev->get_stats function
4264 + * Get latest stats value from the nic.
4265 + * Called with read_lock(&dev_base_lock) held for read -
4266 + * only synchronized against unregister_netdevice.
4267 + */
4268 +static struct net_device_stats *nv_get_stats(struct net_device *dev)
4269 +{
4270 +       struct fe_priv *np = get_nvpriv(dev);
4271 +
4272 +       /* It seems that the nic always generates interrupts and doesn't
4273 +        * accumulate errors internally. Thus the current values in np->stats
4274 +        * are already up to date.
4275 +        */
4276 +       nv_do_stats_poll((unsigned long)dev);
4277 +       return &np->stats;
4278 +}
4279 +
4280 +static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4281 +{
4282 +       struct fe_priv *np = get_nvpriv(dev);
4283 +       strcpy(info->driver, "forcedeth");
4284 +       strcpy(info->version, FORCEDETH_VERSION);
4285 +       strcpy(info->bus_info, pci_name(np->pci_dev));
4286 +}
4287 +
4288 +static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4289 +{
4290 +       struct fe_priv *np = get_nvpriv(dev);
4291 +       wolinfo->supported = WAKE_MAGIC;
4292 +
4293 +       spin_lock_irq(&np->lock);
4294 +       if (np->wolenabled)
4295 +               wolinfo->wolopts = WAKE_MAGIC;
4296 +       spin_unlock_irq(&np->lock);
4297 +}
4298 +
4299 +static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4300 +{
4301 +       struct fe_priv *np = get_nvpriv(dev);
4302 +       u8 __iomem *base = get_hwbase(dev);
4303 +       u32 flags = 0;
4304 +
4305 +       if (wolinfo->wolopts == 0) {
4306 +               np->wolenabled = 0;
4307 +       } else if (wolinfo->wolopts & WAKE_MAGIC) {
4308 +               np->wolenabled = 1;
4309 +               flags = NVREG_WAKEUPFLAGS_ENABLE;
4310 +       }
4311 +       if (netif_running(dev)) {
4312 +               spin_lock_irq(&np->lock);
4313 +               writel(flags, base + NvRegWakeUpFlags);
4314 +               spin_unlock_irq(&np->lock);
4315 +       }
4316 +       return 0;
4317 +}
4318 +
4319 +static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4320 +{
4321 +       struct fe_priv *np = get_nvpriv(dev);
4322 +       int adv;
4323 +
4324 +       spin_lock_irq(&np->lock);
4325 +       ecmd->port = PORT_MII;
4326 +       if (!netif_running(dev)) {
4327 +               /* We do not track link speed / duplex setting if the
4328 +                * interface is disabled. Force a link check */
4329 +               if (nv_update_linkspeed(dev)) {
4330 +                       if (!netif_carrier_ok(dev))
4331 +                               netif_carrier_on(dev);
4332 +               } else {
4333 +                       if (netif_carrier_ok(dev))
4334 +                               netif_carrier_off(dev);
4335 +               }
4336 +       }
4337 +
4338 +       if (netif_carrier_ok(dev)) {
4339 +               switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4340 +               case NVREG_LINKSPEED_10:
4341 +                       ecmd->speed = SPEED_10;
4342 +                       break;
4343 +               case NVREG_LINKSPEED_100:
4344 +                       ecmd->speed = SPEED_100;
4345 +                       break;
4346 +               case NVREG_LINKSPEED_1000:
4347 +                       ecmd->speed = SPEED_1000;
4348 +                       break;
4349 +               }
4350 +               ecmd->duplex = DUPLEX_HALF;
4351 +               if (np->duplex)
4352 +                       ecmd->duplex = DUPLEX_FULL;
4353 +       } else {
4354 +               ecmd->speed = -1;
4355 +               ecmd->duplex = -1;
4356 +       }
4357 +
4358 +       ecmd->autoneg = np->autoneg;
4359 +
4360 +       ecmd->advertising = ADVERTISED_MII;
4361 +       if (np->autoneg) {
4362 +               ecmd->advertising |= ADVERTISED_Autoneg;
4363 +               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4364 +               if (adv & ADVERTISE_10HALF)
4365 +                       ecmd->advertising |= ADVERTISED_10baseT_Half;
4366 +               if (adv & ADVERTISE_10FULL)
4367 +                       ecmd->advertising |= ADVERTISED_10baseT_Full;
4368 +               if (adv & ADVERTISE_100HALF)
4369 +                       ecmd->advertising |= ADVERTISED_100baseT_Half;
4370 +               if (adv & ADVERTISE_100FULL)
4371 +                       ecmd->advertising |= ADVERTISED_100baseT_Full;
4372 +               if (np->gigabit == PHY_GIGABIT) {
4373 +                       adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4374 +                       if (adv & ADVERTISE_1000FULL)
4375 +                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
4376 +               }
4377 +       }
4378 +       ecmd->supported = (SUPPORTED_Autoneg |
4379 +               SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4380 +               SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4381 +               SUPPORTED_MII);
4382 +       if (np->gigabit == PHY_GIGABIT)
4383 +               ecmd->supported |= SUPPORTED_1000baseT_Full;
4384 +
4385 +       ecmd->phy_address = np->phyaddr;
4386 +       ecmd->transceiver = XCVR_EXTERNAL;
4387 +
4388 +       /* ignore maxtxpkt, maxrxpkt for now */
4389 +       spin_unlock_irq(&np->lock);
4390 +       return 0;
4391 +}
4392 +
4393 +static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4394 +{
4395 +       struct fe_priv *np = get_nvpriv(dev);
4396 +
4397 +       dprintk(KERN_DEBUG "%s: nv_set_settings \n", dev->name);
4398 +       if (ecmd->port != PORT_MII)
4399 +               return -EINVAL;
4400 +       if (ecmd->transceiver != XCVR_EXTERNAL)
4401 +               return -EINVAL;
4402 +       if (ecmd->phy_address != np->phyaddr) {
4403 +               /* TODO: support switching between multiple phys. Should be
4404 +                * trivial, but not enabled due to lack of test hardware. */
4405 +               return -EINVAL;
4406 +       }
4407 +       if (ecmd->autoneg == AUTONEG_ENABLE) {
4408 +               u32 mask;
4409 +
4410 +               mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4411 +                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4412 +               if (np->gigabit == PHY_GIGABIT)
4413 +                       mask |= ADVERTISED_1000baseT_Full;
4414 +
4415 +               if ((ecmd->advertising & mask) == 0)
4416                         return -EINVAL;
4417 +
4418 +       } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4419 +               /* Note: autonegotiation disable, speed 1000 intentionally
4420 +                * forbidden - noone should need that. */
4421 +
4422 +               if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
4423 +                       return -EINVAL;
4424 +               if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4425 +                       return -EINVAL;
4426 +       } else {
4427 +               return -EINVAL;
4428 +       }
4429 +
4430 +       netif_carrier_off(dev);
4431 +       if (netif_running(dev)) {
4432 +               nv_disable_irq(dev);
4433 +#if NVVER > FEDORA5
4434 +               netif_tx_lock_bh(dev);
4435 +#else
4436 +               spin_lock_bh(&dev->xmit_lock);
4437 +#endif
4438 +               spin_lock(&np->lock);
4439 +               /* stop engines */
4440 +               nv_stop_rx(dev);
4441 +               nv_stop_tx(dev);
4442 +               spin_unlock(&np->lock);
4443 +#if NVVER > FEDORA5
4444 +               netif_tx_unlock_bh(dev);
4445 +#else
4446 +               spin_unlock_bh(&dev->xmit_lock);
4447 +#endif
4448 +       }
4449 +
4450 +       if (ecmd->autoneg == AUTONEG_ENABLE) {
4451 +               int adv, bmcr;
4452 +
4453 +               np->autoneg = 1;
4454 +
4455 +               /* advertise only what has been requested */
4456 +               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4457 +               adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4458 +        if (ecmd->advertising & ADVERTISED_10baseT_Half) {
4459 +                       adv |= ADVERTISE_10HALF;
4460 +            np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
4461 +        }
4462 +        if (ecmd->advertising & ADVERTISED_10baseT_Full) {
4463 +                       adv |= ADVERTISE_10FULL;
4464 +            np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
4465 +        }
4466 +        if (ecmd->advertising & ADVERTISED_100baseT_Half) {
4467 +                       adv |= ADVERTISE_100HALF;
4468 +            np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
4469 +        }
4470 +        if (ecmd->advertising & ADVERTISED_100baseT_Full) {
4471 +                       adv |= ADVERTISE_100FULL;
4472 +            np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
4473 +        }
4474 +               if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisments but disable tx pause */
4475 +                       adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4476 +               if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4477 +                       adv |=  ADVERTISE_PAUSE_ASYM;
4478 +               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4479 +
4480 +               if (np->gigabit == PHY_GIGABIT) {
4481 +                       adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4482 +                       adv &= ~ADVERTISE_1000FULL;
4483 +            if (ecmd->advertising & ADVERTISED_1000baseT_Full) {
4484 +                               adv |= ADVERTISE_1000FULL;
4485 +                np->speed_duplex = NV_SPEED_DUPLEX_1000_FULL_DUPLEX;
4486 +            }
4487 +                       mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4488 +
4489 +            if (ecmd->advertising & (ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full|ADVERTISED_1000baseT_Full))
4490 +                np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
4491 +        } else {
4492 +            if (ecmd->advertising & (ADVERTISED_10baseT_Half|ADVERTISED_10baseT_Full|ADVERTISED_100baseT_Half|ADVERTISED_100baseT_Full))
4493 +                np->speed_duplex = NV_SPEED_DUPLEX_AUTO;
4494 +        }
4495 +
4496 +               if (netif_running(dev))
4497 +                       printk(KERN_INFO "%s: link down.\n", dev->name);
4498 +               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4499 +               if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4500 +                       bmcr |= BMCR_ANENABLE;
4501 +                       /* reset the phy in order for settings to stick,
4502 +                        * and cause autoneg to start */
4503 +                       if (phy_reset(dev, bmcr)) {
4504 +                               printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4505 +                               return -EINVAL;
4506 +                       }
4507 +               } else {
4508 +               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4509 +               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4510 +               }
4511 +       } else {
4512 +               int adv, bmcr;
4513 +
4514 +               np->autoneg = 0;
4515 +
4516 +               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4517 +               adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4518 +        if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) {
4519 +                       adv |= ADVERTISE_10HALF;
4520 +            np->speed_duplex = NV_SPEED_DUPLEX_10_HALF_DUPLEX;
4521 +        }
4522 +        if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) {
4523 +                       adv |= ADVERTISE_10FULL;
4524 +            np->speed_duplex = NV_SPEED_DUPLEX_10_FULL_DUPLEX;
4525 +        }
4526 +        if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) {
4527 +                       adv |= ADVERTISE_100HALF;
4528 +            np->speed_duplex = NV_SPEED_DUPLEX_100_HALF_DUPLEX;
4529 +        }
4530 +        if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) {
4531 +                       adv |= ADVERTISE_100FULL;
4532 +            np->speed_duplex = NV_SPEED_DUPLEX_100_FULL_DUPLEX;
4533 +        }
4534 +               np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4535 +               if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
4536 +                       adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4537 +                       np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4538 +               }
4539 +               if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4540 +                       adv |=  ADVERTISE_PAUSE_ASYM;
4541 +                       np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4542 +               }
4543 +               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4544 +               np->fixed_mode = adv;
4545 +
4546 +               if (np->gigabit == PHY_GIGABIT) {
4547 +                       adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4548 +                       adv &= ~ADVERTISE_1000FULL;
4549 +                       mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4550 +               }
4551 +
4552 +               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4553 +               bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4554 +               if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4555 +                       bmcr |= BMCR_FULLDPLX;
4556 +               if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4557 +                       bmcr |= BMCR_SPEED100;
4558 +               if (np->phy_oui == PHY_OUI_MARVELL) {
4559 +                       /* reset the phy in order for forced mode settings to stick */
4560 +                       if (phy_reset(dev, bmcr)) {
4561 +                               printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4562 +                               return -EINVAL;
4563 +                       }
4564 +               } else {
4565 +                       mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4566 +                       if (netif_running(dev)) {
4567 +                               /* Wait a bit and then reconfigure the nic. */
4568 +                               udelay(10);
4569 +                               nv_linkchange(dev);
4570 +                       }
4571 +               }
4572 +       }
4573 +
4574 +       if (netif_running(dev)) {
4575 +               nv_start_rx(dev);
4576 +               nv_start_tx(dev);
4577 +               nv_enable_irq(dev);
4578 +       }
4579 +
4580 +       return 0;
4581 +}
4582 +
4583 +#define FORCEDETH_REGS_VER     1
4584 +
4585 +static int nv_get_regs_len(struct net_device *dev)
4586 +{
4587 +       struct fe_priv *np = get_nvpriv(dev);
4588 +       return np->register_size;
4589 +}
4590 +
4591 +static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4592 +{
4593 +       struct fe_priv *np = get_nvpriv(dev);
4594 +       u8 __iomem *base = get_hwbase(dev);
4595 +       u32 *rbuf = buf;
4596 +       int i;
4597 +
4598 +       regs->version = FORCEDETH_REGS_VER;
4599 +       spin_lock_irq(&np->lock);
4600 +       for (i = 0;i <= np->register_size/sizeof(u32); i++)
4601 +               rbuf[i] = readl(base + i*sizeof(u32));
4602 +       spin_unlock_irq(&np->lock);
4603 +}
4604 +
4605 +static int nv_nway_reset(struct net_device *dev)
4606 +{
4607 +       struct fe_priv *np = get_nvpriv(dev);
4608 +       int ret;
4609 +
4610 +       if (np->autoneg) {
4611 +               int bmcr;
4612 +
4613 +               netif_carrier_off(dev);
4614 +               if (netif_running(dev)) {
4615 +                       nv_disable_irq(dev);
4616 +#if NVVER > FEDORA5
4617 +                       netif_tx_lock_bh(dev);
4618 +#else
4619 +                       spin_lock_bh(&dev->xmit_lock);
4620 +#endif
4621 +                       spin_lock(&np->lock);
4622 +                       /* stop engines */
4623 +                       nv_stop_rx(dev);
4624 +                       nv_stop_tx(dev);
4625 +                       spin_unlock(&np->lock);
4626 +#if NVVER > FEDORA5
4627 +                       netif_tx_unlock_bh(dev);
4628 +#else
4629 +                       spin_unlock_bh(&dev->xmit_lock);
4630 +#endif
4631 +                       printk(KERN_INFO "%s: link down.\n", dev->name);
4632 +               }
4633 +
4634 +               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4635 +               if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4636 +                       bmcr |= BMCR_ANENABLE;
4637 +                       /* reset the phy in order for settings to stick*/
4638 +                       if (phy_reset(dev, bmcr)) {
4639 +                               printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4640 +                               return -EINVAL;
4641 +                       }
4642 +               } else {
4643 +                       bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4644 +                       mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4645 +               }
4646 +
4647 +               if (netif_running(dev)) {
4648 +                       nv_start_rx(dev);
4649 +                       nv_start_tx(dev);
4650 +                       nv_enable_irq(dev);
4651 +               }
4652 +               ret = 0;
4653 +       } else {
4654 +               ret = -EINVAL;
4655 +       }
4656 +
4657 +       return ret;
4658 +}
4659 +
4660 +static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4661 +{
4662 +       struct fe_priv *np = get_nvpriv(dev);
4663 +
4664 +       ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4665 +       ring->rx_mini_max_pending = 0;
4666 +       ring->rx_jumbo_max_pending = 0;
4667 +       ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4668 +
4669 +       ring->rx_pending = np->rx_ring_size;
4670 +       ring->rx_mini_pending = 0;
4671 +       ring->rx_jumbo_pending = 0;
4672 +       ring->tx_pending = np->tx_ring_size;
4673 +}
4674 +
4675 +static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4676 +{
4677 +       struct fe_priv *np = get_nvpriv(dev);
4678 +       u8 __iomem *base = get_hwbase(dev);
4679 +       u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4680 +       dma_addr_t ring_addr;
4681 +
4682 +       if (ring->rx_pending < RX_RING_MIN ||
4683 +           ring->tx_pending < TX_RING_MIN ||
4684 +           ring->rx_mini_pending != 0 ||
4685 +           ring->rx_jumbo_pending != 0 ||
4686 +           (np->desc_ver == DESC_VER_1 && 
4687 +            (ring->rx_pending > RING_MAX_DESC_VER_1 || 
4688 +             ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4689 +           (np->desc_ver != DESC_VER_1 && 
4690 +            (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 
4691 +             ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4692 +               return -EINVAL;
4693 +       }
4694 +
4695 +       /* allocate new rings */
4696 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4697 +               rxtx_ring = pci_alloc_consistent(np->pci_dev,
4698 +                                           sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4699 +                                           &ring_addr);
4700 +       } else {
4701 +               rxtx_ring = pci_alloc_consistent(np->pci_dev,
4702 +                                           sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4703 +                                           &ring_addr);
4704 +       }
4705 +       rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4706 +       tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4707 +
4708 +       if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4709 +               /* fall back to old rings */
4710 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4711 +                       if(rxtx_ring)
4712 +                               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4713 +                                                   rxtx_ring, ring_addr);
4714 +               } else {
4715 +                       if (rxtx_ring)
4716 +                               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4717 +                                                   rxtx_ring, ring_addr);
4718 +               }
4719 +               if (rx_skbuff)
4720 +                       kfree(rx_skbuff);
4721 +               if (tx_skbuff)
4722 +                       kfree(tx_skbuff);
4723 +               goto exit;
4724 +       }
4725 +
4726 +       if (netif_running(dev)) {
4727 +               nv_disable_irq(dev);
4728 +#if NVVER > FEDORA5
4729 +               netif_tx_lock_bh(dev);
4730 +#else
4731 +               spin_lock_bh(&dev->xmit_lock);
4732 +#endif
4733 +               spin_lock(&np->lock);
4734 +               /* stop engines */
4735 +               nv_stop_rx(dev);
4736 +               nv_stop_tx(dev);
4737 +               nv_txrx_reset(dev);
4738 +               /* drain queues */
4739 +               nv_drain_rx(dev);
4740 +               nv_drain_tx(dev);
4741 +               /* delete queues */
4742 +               free_rings(dev);
4743 +       }
4744 +       
4745 +       /* set new values */
4746 +       np->rx_ring_size = ring->rx_pending;
4747 +       np->tx_ring_size = ring->tx_pending;
4748 +       np->tx_limit_stop =np->tx_ring_size - TX_LIMIT_DIFFERENCE;
4749 +       np->tx_limit_start =np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
4750 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4751 +               np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4752 +               np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4753 +       } else {
4754 +               np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4755 +               np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4756 +       }
4757 +       np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4758 +       np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4759 +       np->ring_addr = ring_addr;
4760 +       
4761 +       memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4762 +       memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4763 +
4764 +       if (netif_running(dev)) {
4765 +               /* reinit driver view of the queues */
4766 +               set_bufsize(dev);
4767 +               if (nv_init_ring(dev)) {
4768 +                       if (!np->in_shutdown)
4769 +                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4770 +               }
4771 +               
4772 +               /* reinit nic view of the queues */
4773 +               writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4774 +               setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4775 +               writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4776 +                       base + NvRegRingSizes);
4777 +               pci_push(base);
4778 +               writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4779 +               pci_push(base);
4780 +               
4781 +               /* restart engines */
4782 +               nv_start_rx(dev);
4783 +               nv_start_tx(dev);
4784 +               spin_unlock(&np->lock);
4785 +#if NVVER > FEDORA5
4786 +               netif_tx_unlock_bh(dev);
4787 +#else
4788 +               spin_unlock_bh(&dev->xmit_lock);
4789 +#endif
4790 +               nv_enable_irq(dev);
4791 +       }
4792 +       return 0;
4793 +exit:
4794 +       return -ENOMEM;
4795 +}
4796 +
4797 +static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4798 +{
4799 +       struct fe_priv *np = get_nvpriv(dev);
4800 +
4801 +       pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4802 +       pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4803 +       pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4804 +}
4805 +
4806 +static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4807 +{
4808 +       struct fe_priv *np = get_nvpriv(dev);
4809 +       int adv, bmcr;
4810 +
4811 +       if ((!np->autoneg && np->duplex == 0) ||
4812 +           (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4813 +               printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 
4814 +                      dev->name);
4815 +               return -EINVAL;
4816 +       }
4817 +       if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4818 +               printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4819 +               return -EINVAL;
4820 +       }
4821 +
4822 +       netif_carrier_off(dev);
4823 +       if (netif_running(dev)) {
4824 +               nv_disable_irq(dev);
4825 +#if NVVER > FEDORA5
4826 +               netif_tx_lock_bh(dev);
4827 +#else
4828 +               spin_lock_bh(&dev->xmit_lock);
4829 +#endif
4830 +               spin_lock(&np->lock);
4831 +               /* stop engines */
4832 +               nv_stop_rx(dev);
4833 +               nv_stop_tx(dev);
4834 +               spin_unlock(&np->lock);
4835 +#if NVVER > FEDORA5
4836 +               netif_tx_unlock_bh(dev);
4837 +#else
4838 +               spin_unlock_bh(&dev->xmit_lock);
4839 +#endif
4840 +       }
4841 +
4842 +       np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4843 +       if (pause->rx_pause)
4844 +               np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4845 +       if (pause->tx_pause)
4846 +               np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4847 +
4848 +       if (np->autoneg && pause->autoneg) {
4849 +               np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4850 +
4851 +               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4852 +               adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4853 +               if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4854 +                       adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4855 +               if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4856 +                       adv |=  ADVERTISE_PAUSE_ASYM;
4857 +               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4858 +
4859 +               if (netif_running(dev))
4860 +                       printk(KERN_INFO "%s: link down.\n", dev->name);
4861 +               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4862 +               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4863 +               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4864 +       } else {
4865 +               np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4866 +               if (pause->rx_pause)
4867 +                       np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4868 +               if (pause->tx_pause)
4869 +                       np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4870 +
4871 +               if (!netif_running(dev))
4872 +                       nv_update_linkspeed(dev);
4873 +               else
4874 +                       nv_update_pause(dev, np->pause_flags);
4875 +       }
4876 +
4877 +       if (netif_running(dev)) {
4878 +               nv_start_rx(dev);
4879 +               nv_start_tx(dev);
4880 +               nv_enable_irq(dev);
4881 +       }
4882 +       return 0;
4883 +}
4884 +
4885 +static u32 nv_get_rx_csum(struct net_device *dev)
4886 +{
4887 +       struct fe_priv *np = get_nvpriv(dev);
4888 +       return (np->rx_csum) != 0;
4889 +}
4890 +
4891 +static int nv_set_rx_csum(struct net_device *dev, u32 data)
4892 +{
4893 +       struct fe_priv *np = get_nvpriv(dev);
4894 +       u8 __iomem *base = get_hwbase(dev);
4895 +       int retcode = 0;
4896 +
4897 +       if (np->driver_data & DEV_HAS_CHECKSUM) {
4898 +
4899 +               if (data) {
4900 +                       np->rx_csum = 1;
4901 +                       np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4902 +               } else {
4903 +                       np->rx_csum = 0;
4904 +                       /* vlan is dependent on rx checksum offload */
4905 +                       if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4906 +                               np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4907 +               }
4908 +
4909 +               if (netif_running(dev)) {
4910 +                       spin_lock_irq(&np->lock);
4911 +                       writel(np->txrxctl_bits, base + NvRegTxRxControl);
4912 +                       spin_unlock_irq(&np->lock);
4913 +               }
4914         } else {
4915                 return -EINVAL;
4916         }
4917  
4918 +       return retcode;
4919 +}
4920 +
4921 +#ifdef NETIF_F_TSO
4922 +static int nv_set_tso(struct net_device *dev, u32 data)
4923 +{
4924 +       struct fe_priv *np = get_nvpriv(dev);
4925 +       
4926 +       if (np->driver_data & DEV_HAS_CHECKSUM){
4927 +#if NVVER < SUSE10 
4928 +               if(data){
4929 +                       if(ethtool_op_get_sg(dev)==0)
4930 +                                       return -EINVAL;
4931 +               }
4932 +#endif
4933 +               return ethtool_op_set_tso(dev, data);
4934 +       }else
4935 +               return -EINVAL;
4936 +}
4937 +#endif
4938 +
4939 +static int nv_set_sg(struct net_device *dev, u32 data)
4940 +{
4941 +       struct fe_priv *np = get_nvpriv(dev);
4942 +       
4943 +       if (np->driver_data & DEV_HAS_CHECKSUM){
4944 +#if NVVER < SUSE10
4945 +               if(data){
4946 +                       if(ethtool_op_get_tx_csum(dev)==0)
4947 +                                       return -EINVAL;
4948 +               }
4949 +#ifdef NETIF_F_TSO
4950 +               if(!data)
4951 +                       /* set tso off */
4952 +                               nv_set_tso(dev,data);   
4953 +#endif
4954 +#endif
4955 +               return ethtool_op_set_sg(dev, data);
4956 +       }else
4957 +               return -EINVAL;
4958 +}
4959 +
4960 +static int nv_set_tx_csum(struct net_device *dev, u32 data)
4961 +{
4962 +       struct fe_priv *np = get_nvpriv(dev);
4963 +
4964 +#if NVVER < SUSE10
4965 +       /* set sg off if tx off */
4966 +       if(!data)
4967 +               nv_set_sg(dev,data);
4968 +#endif 
4969 +       if (np->driver_data & DEV_HAS_CHECKSUM)
4970 +#if NVVER > RHES4 
4971 +               return ethtool_op_set_tx_hw_csum(dev, data);
4972 +#else
4973 +       {
4974 +                       if (data)
4975 +                               dev->features |= NETIF_F_IP_CSUM;
4976 +                       else
4977 +                               dev->features &= ~NETIF_F_IP_CSUM;
4978 +                       return 0;
4979 +       }
4980 +#endif
4981 +       else
4982 +               return -EINVAL;
4983 +}
4984 +
4985 +static int nv_get_stats_count(struct net_device *dev)
4986 +{
4987 +       struct fe_priv *np = get_nvpriv(dev);
4988 +
4989 +       if (np->driver_data & DEV_HAS_STATISTICS_V1)
4990 +               return NV_DEV_STATISTICS_V1_COUNT;
4991 +       else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4992 +               return NV_DEV_STATISTICS_V2_COUNT;
4993 +       else
4994 +               return NV_DEV_STATISTICS_SW_COUNT;
4995 +}
4996 +
4997 +static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4998 +{
4999 +       struct fe_priv *np = get_nvpriv(dev);
5000 +
5001 +       /* update stats */
5002 +       nv_do_stats_poll((unsigned long)dev);
5003 +
5004 +       memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
5005 +}
5006 +
5007 +static int nv_self_test_count(struct net_device *dev)
5008 +{
5009 +       struct fe_priv *np = get_nvpriv(dev);
5010 +
5011 +       if (np->driver_data & DEV_HAS_TEST_EXTENDED)
5012 +               return NV_TEST_COUNT_EXTENDED;
5013 +       else
5014 +               return NV_TEST_COUNT_BASE;
5015 +}
5016 +
5017 +static int nv_link_test(struct net_device *dev)
5018 +{
5019 +       struct fe_priv *np = get_nvpriv(dev);
5020 +       int mii_status;
5021 +
5022 +       mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5023 +       mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5024 +
5025 +       /* check phy link status */
5026 +       if (!(mii_status & BMSR_LSTATUS))
5027 +               return 0;
5028 +       else
5029 +               return 1;
5030 +}
5031 +
5032 +static int nv_register_test(struct net_device *dev)
5033 +{
5034 +       u8 __iomem *base = get_hwbase(dev);
5035 +       int i = 0;
5036 +       u32 orig_read, new_read;
5037 +
5038 +       do {
5039 +               orig_read = readl(base + nv_registers_test[i].reg);
5040 +
5041 +               /* xor with mask to toggle bits */
5042 +               orig_read ^= nv_registers_test[i].mask;
5043 +
5044 +               writel(orig_read, base + nv_registers_test[i].reg);
5045 +
5046 +               new_read = readl(base + nv_registers_test[i].reg);
5047 +
5048 +               if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
5049 +                       return 0;
5050 +
5051 +               /* restore original value */
5052 +               orig_read ^= nv_registers_test[i].mask;
5053 +               writel(orig_read, base + nv_registers_test[i].reg);
5054 +
5055 +       } while (nv_registers_test[++i].reg != 0);
5056 +
5057 +       return 1;
5058 +}
5059 +
5060 +static int nv_interrupt_test(struct net_device *dev)
5061 +{
5062 +       struct fe_priv *np = get_nvpriv(dev);
5063 +       u8 __iomem *base = get_hwbase(dev);
5064 +       int ret = 1;
5065 +       int testcnt;
5066 +       u32 save_msi_flags, save_poll_interval = 0;
5067 +
5068 +       if (netif_running(dev)) {
5069 +               /* free current irq */
5070 +               nv_free_irq(dev);
5071 +               save_poll_interval = readl(base+NvRegPollingInterval);
5072 +       }
5073 +
5074 +       /* flag to test interrupt handler */
5075 +       np->intr_test = 0;
5076 +
5077 +       /* setup test irq */
5078 +       save_msi_flags = np->msi_flags;
5079 +       np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
5080 +       np->msi_flags |= 0x001; /* setup 1 vector */
5081 +       if (nv_request_irq(dev, 1))
5082 +               return 0;
5083 +
5084 +       /* setup timer interrupt */
5085 +       writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5086 +       writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5087 +
5088 +       nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5089 +
5090 +       /* wait for at least one interrupt */
5091 +       nv_msleep(100);
5092 +
5093         spin_lock_irq(&np->lock);
5094 -       if (ecmd->autoneg == AUTONEG_ENABLE) {
5095 -               int adv, bmcr;
5096  
5097 -               np->autoneg = 1;
5098 +       /* flag should be set within ISR */
5099 +       testcnt = np->intr_test;
5100 +       if (!testcnt)
5101 +               ret = 2;
5102  
5103 -               /* advertise only what has been requested */
5104 -               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
5105 -               adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
5106 -               if (ecmd->advertising & ADVERTISED_10baseT_Half)
5107 -                       adv |= ADVERTISE_10HALF;
5108 -               if (ecmd->advertising & ADVERTISED_10baseT_Full)
5109 -                       adv |= ADVERTISE_10FULL;
5110 -               if (ecmd->advertising & ADVERTISED_100baseT_Half)
5111 -                       adv |= ADVERTISE_100HALF;
5112 -               if (ecmd->advertising & ADVERTISED_100baseT_Full)
5113 -                       adv |= ADVERTISE_100FULL;
5114 -               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
5115 +       nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5116 +       if (!(np->msi_flags & NV_MSI_X_ENABLED))
5117 +               writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5118 +       else
5119 +               writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5120  
5121 -               if (np->gigabit == PHY_GIGABIT) {
5122 -                       adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
5123 -                       adv &= ~ADVERTISE_1000FULL;
5124 -                       if (ecmd->advertising & ADVERTISED_1000baseT_Full)
5125 -                               adv |= ADVERTISE_1000FULL;
5126 -                       mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
5127 -               }
5128 +       spin_unlock_irq(&np->lock);
5129  
5130 -               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5131 -               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
5132 -               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
5133 +       nv_free_irq(dev);
5134 +
5135 +       np->msi_flags = save_msi_flags;
5136 +
5137 +       if (netif_running(dev)) {
5138 +               writel(save_poll_interval, base + NvRegPollingInterval);
5139 +               writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5140 +               /* restore original irq */
5141 +               if (nv_request_irq(dev, 0))
5142 +                       return 0;
5143 +       }
5144  
5145 +       return ret;
5146 +}
5147 +
5148 +static int nv_loopback_test(struct net_device *dev)
5149 +{
5150 +       struct fe_priv *np = get_nvpriv(dev);
5151 +       u8 __iomem *base = get_hwbase(dev);
5152 +       struct sk_buff *tx_skb, *rx_skb;
5153 +       dma_addr_t test_dma_addr;
5154 +       u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
5155 +       u32 Flags;
5156 +       int len, i, pkt_len;
5157 +       u8 *pkt_data;
5158 +       u32 filter_flags = 0;
5159 +       u32 misc1_flags = 0;
5160 +       int ret = 1;
5161 +
5162 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
5163 +
5164 +       if (netif_running(dev)) {
5165 +               nv_disable_irq(dev);
5166 +               filter_flags = readl(base + NvRegPacketFilterFlags);
5167 +               misc1_flags = readl(base + NvRegMisc1);
5168         } else {
5169 -               int adv, bmcr;
5170 +               nv_txrx_reset(dev);
5171 +       }
5172  
5173 -               np->autoneg = 0;
5174 +       /* reinit driver view of the rx queue */
5175 +       set_bufsize(dev);
5176 +       nv_init_ring(dev);
5177  
5178 -               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
5179 -               adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
5180 -               if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
5181 -                       adv |= ADVERTISE_10HALF;
5182 -               if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
5183 -                       adv |= ADVERTISE_10FULL;
5184 -               if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
5185 -                       adv |= ADVERTISE_100HALF;
5186 -               if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
5187 -                       adv |= ADVERTISE_100FULL;
5188 -               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
5189 -               np->fixed_mode = adv;
5190 +       /* setup hardware for loopback */
5191 +       writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
5192 +       writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
5193  
5194 -               if (np->gigabit == PHY_GIGABIT) {
5195 -                       adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
5196 -                       adv &= ~ADVERTISE_1000FULL;
5197 -                       mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
5198 -               }
5199 +       /* reinit nic view of the rx queue */
5200 +       writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5201 +       setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5202 +       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5203 +               base + NvRegRingSizes);
5204 +       pci_push(base);
5205  
5206 -               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5207 -               bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
5208 -               if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
5209 -                       bmcr |= BMCR_FULLDPLX;
5210 -               if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
5211 -                       bmcr |= BMCR_SPEED100;
5212 -               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
5213 +       /* restart rx engine */
5214 +       nv_start_rx(dev);
5215 +       nv_start_tx(dev);
5216  
5217 -               if (netif_running(dev)) {
5218 -                       /* Wait a bit and then reconfigure the nic. */
5219 -                       udelay(10);
5220 -                       nv_linkchange(dev);
5221 +       /* setup packet for tx */
5222 +       pkt_len = ETH_DATA_LEN;
5223 +       tx_skb = dev_alloc_skb(pkt_len);
5224 +       pkt_data = skb_put(tx_skb, pkt_len);
5225 +       for (i = 0; i < pkt_len; i++)
5226 +               pkt_data[i] = (u8)(i & 0xff);
5227 +       test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
5228 +                                      tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
5229 +
5230 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5231 +               np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
5232 +               np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5233 +       } else {
5234 +               np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
5235 +               np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
5236 +               np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5237 +       }
5238 +       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5239 +       pci_push(get_hwbase(dev));
5240 +
5241 +       nv_msleep(500);
5242 +
5243 +       /* check for rx of the packet */
5244 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5245 +               Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
5246 +               len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5247 +                       
5248 +       } else {
5249 +               Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
5250 +               len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5251 +       }
5252 +
5253 +       if (Flags & NV_RX_AVAIL) {
5254 +               ret = 0;
5255 +       } else if (np->desc_ver == DESC_VER_1) {
5256 +               if (Flags & NV_RX_ERROR)
5257 +                       ret = 0;
5258 +       } else {
5259 +               if (Flags & NV_RX2_ERROR) {
5260 +                       ret = 0;
5261                 }
5262         }
5263 -       spin_unlock_irq(&np->lock);
5264  
5265 -       return 0;
5266 +       if (ret) {              
5267 +               if (len != pkt_len) {
5268 +                       ret = 0;
5269 +                       dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 
5270 +                               dev->name, len, pkt_len);
5271 +               } else {
5272 +                       rx_skb = np->rx_skb[0].skb;
5273 +                       for (i = 0; i < pkt_len; i++) {
5274 +                               if (rx_skb->data[i] != (u8)(i & 0xff)) {
5275 +                                       ret = 0;
5276 +                                       dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 
5277 +                                               dev->name, i);
5278 +                                       break;
5279 +                               }
5280 +                       }
5281 +               }
5282 +       } else {
5283 +               dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
5284 +       }
5285 +
5286 +       pci_unmap_page(np->pci_dev, test_dma_addr,
5287 +                      tx_skb->end-tx_skb->data,
5288 +                      PCI_DMA_TODEVICE);
5289 +       dev_kfree_skb_any(tx_skb);
5290 +       
5291 +       /* stop engines */
5292 +       nv_stop_rx(dev);
5293 +       nv_stop_tx(dev);
5294 +       nv_txrx_reset(dev);
5295 +       /* drain rx queue */
5296 +       nv_drain_rx(dev);
5297 +       nv_drain_tx(dev);
5298 +
5299 +       if (netif_running(dev)) {
5300 +               writel(misc1_flags, base + NvRegMisc1);
5301 +               writel(filter_flags, base + NvRegPacketFilterFlags);
5302 +               nv_enable_irq(dev);
5303 +       }
5304 +
5305 +       return ret;
5306  }
5307  
5308 -#define FORCEDETH_REGS_VER     1
5309 -#define FORCEDETH_REGS_SIZE    0x400 /* 256 32-bit registers */
5310 +static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5311 +{
5312 +       struct fe_priv *np = get_nvpriv(dev);
5313 +       u8 __iomem *base = get_hwbase(dev);
5314 +       int result;
5315 +       memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
5316  
5317 -static int nv_get_regs_len(struct net_device *dev)
5318 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
5319 +
5320 +       if (!nv_link_test(dev)) {
5321 +               test->flags |= ETH_TEST_FL_FAILED;
5322 +               buffer[0] = 1;
5323 +       }
5324 +
5325 +       if (test->flags & ETH_TEST_FL_OFFLINE) {
5326 +               if (netif_running(dev)) {
5327 +                       netif_stop_queue(dev);
5328 +#if NVVER > FEDORA5
5329 +                       netif_tx_lock_bh(dev);
5330 +#else
5331 +                       spin_lock_bh(&dev->xmit_lock);
5332 +#endif
5333 +                       spin_lock_irq(&np->lock);
5334 +                       nv_disable_hw_interrupts(dev, np->irqmask);
5335 +                       if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
5336 +                               writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5337 +                       } else {
5338 +                               writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5339 +                       }
5340 +                       /* stop engines */
5341 +                       nv_stop_rx(dev);
5342 +                       nv_stop_tx(dev);
5343 +                       nv_txrx_reset(dev);
5344 +                       /* drain rx queue */
5345 +                       nv_drain_rx(dev);
5346 +                       nv_drain_tx(dev);
5347 +                       spin_unlock_irq(&np->lock);
5348 +#if NVVER > FEDORA5
5349 +                       netif_tx_unlock_bh(dev);
5350 +#else
5351 +                       spin_unlock_bh(&dev->xmit_lock);
5352 +#endif
5353 +               }
5354 +
5355 +               if (!nv_register_test(dev)) {
5356 +                       test->flags |= ETH_TEST_FL_FAILED;
5357 +                       buffer[1] = 1;
5358 +               }
5359 +
5360 +               result = nv_interrupt_test(dev);
5361 +               if (result != 1) {
5362 +                       test->flags |= ETH_TEST_FL_FAILED;
5363 +                       buffer[2] = 1;
5364 +               }
5365 +               if (result == 0) {
5366 +                       /* bail out */
5367 +                       return;
5368 +               }
5369 +
5370 +               if (!nv_loopback_test(dev)) {
5371 +                       test->flags |= ETH_TEST_FL_FAILED;
5372 +                       buffer[3] = 1;
5373 +               }
5374 +
5375 +               if (netif_running(dev)) {
5376 +                       /* reinit driver view of the rx queue */
5377 +                       set_bufsize(dev);
5378 +                       if (nv_init_ring(dev)) {
5379 +                               if (!np->in_shutdown)
5380 +                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5381 +                       }
5382 +                       /* reinit nic view of the rx queue */
5383 +                       writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5384 +                       setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5385 +                       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5386 +                               base + NvRegRingSizes);
5387 +                       pci_push(base);
5388 +                       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5389 +                       pci_push(base);
5390 +                       /* restart rx engine */
5391 +                       nv_start_rx(dev);
5392 +                       nv_start_tx(dev);
5393 +                       netif_start_queue(dev);
5394 +                       nv_enable_hw_interrupts(dev, np->irqmask);
5395 +               }
5396 +       }
5397 +}
5398 +
5399 +static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5400  {
5401 -       return FORCEDETH_REGS_SIZE;
5402 +       switch (stringset) {
5403 +       case ETH_SS_STATS:
5404 +               memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
5405 +               break;
5406 +       case ETH_SS_TEST:
5407 +               memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
5408 +               break;
5409 +       }
5410  }
5411  
5412 -static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
5413 +static struct ethtool_ops ops = {
5414 +       .get_drvinfo = nv_get_drvinfo,
5415 +       .get_link = ethtool_op_get_link,
5416 +       .get_wol = nv_get_wol,
5417 +       .set_wol = nv_set_wol,
5418 +       .get_settings = nv_get_settings,
5419 +       .set_settings = nv_set_settings,
5420 +       .get_regs_len = nv_get_regs_len,
5421 +       .get_regs = nv_get_regs,
5422 +       .nway_reset = nv_nway_reset,
5423 +#if NVVER > SUSE10
5424 +       .get_perm_addr = ethtool_op_get_perm_addr,
5425 +#endif
5426 +       .get_ringparam = nv_get_ringparam,
5427 +       .set_ringparam = nv_set_ringparam,
5428 +       .get_pauseparam = nv_get_pauseparam,
5429 +       .set_pauseparam = nv_set_pauseparam,
5430 +       .get_rx_csum = nv_get_rx_csum,
5431 +       .set_rx_csum = nv_set_rx_csum,
5432 +       .get_tx_csum = ethtool_op_get_tx_csum,
5433 +       .set_tx_csum = nv_set_tx_csum,
5434 +       .get_sg = ethtool_op_get_sg,
5435 +       .set_sg = nv_set_sg,
5436 +#ifdef NETIF_F_TSO
5437 +       .get_tso = ethtool_op_get_tso,
5438 +       .set_tso = nv_set_tso,
5439 +#endif
5440 +       .get_strings = nv_get_strings,
5441 +       .get_stats_count = nv_get_stats_count,
5442 +       .get_ethtool_stats = nv_get_ethtool_stats,
5443 +       .self_test_count = nv_self_test_count,
5444 +       .self_test = nv_self_test,
5445 +};
5446 +
5447 +static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5448  {
5449 -       struct fe_priv *np = netdev_priv(dev);
5450 -       u8 __iomem *base = get_hwbase(dev);
5451 -       u32 *rbuf = buf;
5452 -       int i;
5453 +       struct fe_priv *np = get_nvpriv(dev);
5454  
5455 -       regs->version = FORCEDETH_REGS_VER;
5456         spin_lock_irq(&np->lock);
5457 -       for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
5458 -               rbuf[i] = readl(base + i*sizeof(u32));
5459 +
5460 +       /* save vlan group */
5461 +       np->vlangrp = grp;
5462 +
5463 +       if (grp) {
5464 +               /* enable vlan on MAC */
5465 +               np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
5466 +               /* vlan is dependent on rx checksum */
5467 +               np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5468 +       } else {
5469 +               /* disable vlan on MAC */
5470 +               np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
5471 +               np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
5472 +               if (!np->rx_csum)
5473 +                       np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
5474 +       }
5475 +
5476 +       writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5477 +
5478         spin_unlock_irq(&np->lock);
5479 -}
5480 +};
5481  
5482 -static int nv_nway_reset(struct net_device *dev)
5483 +static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5484 +{
5485 +       /* nothing to do */
5486 +};
5487 +
5488 +/* The mgmt unit and driver use a semaphore to access the phy during init */
5489 +static int nv_mgmt_acquire_sema(struct net_device *dev)
5490  {
5491 -       struct fe_priv *np = netdev_priv(dev);
5492 -       int ret;
5493 +       u8 __iomem *base = get_hwbase(dev);
5494 +       int i;
5495 +       u32 tx_ctrl, mgmt_sema;
5496  
5497 -       spin_lock_irq(&np->lock);
5498 -       if (np->autoneg) {
5499 -               int bmcr;
5500 +       for (i = 0; i < 10; i++) {
5501 +               mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5502 +               if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) {
5503 +                        dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is free\n");
5504 +                       break;
5505 +                }
5506 +               nv_msleep(500);
5507 +       }
5508  
5509 -               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5510 -               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
5511 -               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
5512 +       if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) {
5513 +                dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is not free\n");
5514 +               return 0;
5515 +        }
5516  
5517 -               ret = 0;
5518 -       } else {
5519 -               ret = -EINVAL;
5520 +       for (i = 0; i < 2; i++) {
5521 +               tx_ctrl = readl(base + NvRegTransmitterControl);
5522 +               tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5523 +               writel(tx_ctrl, base + NvRegTransmitterControl);
5524 +
5525 +               /* verify that semaphore was acquired */
5526 +               tx_ctrl = readl(base + NvRegTransmitterControl);
5527 +               if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5528 +                   ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5529 +                        dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: acquired sema\n");
5530 +                       return 1;
5531 +               } else
5532 +                       udelay(50);
5533         }
5534 -       spin_unlock_irq(&np->lock);
5535  
5536 -       return ret;
5537 +        dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: exit\n");
5538 +       return 0;
5539  }
5540  
5541 -static struct ethtool_ops ops = {
5542 -       .get_drvinfo = nv_get_drvinfo,
5543 -       .get_link = ethtool_op_get_link,
5544 -       .get_wol = nv_get_wol,
5545 -       .set_wol = nv_set_wol,
5546 -       .get_settings = nv_get_settings,
5547 -       .set_settings = nv_set_settings,
5548 -       .get_regs_len = nv_get_regs_len,
5549 -       .get_regs = nv_get_regs,
5550 -       .nway_reset = nv_nway_reset,
5551 -       .get_perm_addr = ethtool_op_get_perm_addr,
5552 -};
5553 -
5554  static int nv_open(struct net_device *dev)
5555  {
5556 -       struct fe_priv *np = netdev_priv(dev);
5557 +       struct fe_priv *np = get_nvpriv(dev);
5558         u8 __iomem *base = get_hwbase(dev);
5559 -       int ret, oom, i;
5560 +       int ret = 1;
5561 +       int oom, i;
5562  
5563         dprintk(KERN_DEBUG "nv_open: begin\n");
5564  
5565 -       /* 1) erase previous misconfiguration */
5566 -       /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
5567 +       /* erase previous misconfiguration */
5568 +       if (np->driver_data & DEV_HAS_POWER_CNTRL)
5569 +               nv_mac_reset(dev);
5570 +       /* stop adapter: ignored, 4.3 seems to be overkill */
5571         writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5572         writel(0, base + NvRegMulticastAddrB);
5573         writel(0, base + NvRegMulticastMaskA);
5574 @@ -2238,44 +5498,44 @@
5575  
5576         writel(0, base + NvRegAdapterControl);
5577  
5578 -       /* 2) initialize descriptor rings */
5579 +       if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5580 +               writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5581 +
5582 +       /* initialize descriptor rings */
5583         set_bufsize(dev);
5584         oom = nv_init_ring(dev);
5585  
5586         writel(0, base + NvRegLinkSpeed);
5587 -       writel(0, base + NvRegUnknownTransmitterReg);
5588 +       writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5589         nv_txrx_reset(dev);
5590         writel(0, base + NvRegUnknownSetupReg6);
5591  
5592         np->in_shutdown = 0;
5593  
5594 -       /* 3) set mac address */
5595 -       nv_copy_mac_to_hw(dev);
5596 -
5597 -       /* 4) give hw rings */
5598 -       writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
5599 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5600 -               writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
5601 -       else
5602 -               writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
5603 -       writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
5604 +       /* give hw rings */
5605 +       setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5606 +       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5607                 base + NvRegRingSizes);
5608  
5609 -       /* 5) continue setup */
5610 +       /* continue setup */
5611         writel(np->linkspeed, base + NvRegLinkSpeed);
5612 -       writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
5613 +       if (np->desc_ver == DESC_VER_1)
5614 +               writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5615 +       else
5616 +               writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5617         writel(np->txrxctl_bits, base + NvRegTxRxControl);
5618 +       writel(np->vlanctl_bits, base + NvRegVlanControl);
5619         pci_push(base);
5620         writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5621         reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5622                         NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
5623                         KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
5624  
5625 -       writel(0, base + NvRegUnknownSetupReg4);
5626 +       writel(0, base + NvRegMIIMask);
5627         writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5628         writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
5629  
5630 -       /* 6) continue setup */
5631 +       /* continue setup */
5632         writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5633         writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5634         writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5635 @@ -2284,8 +5544,8 @@
5636         writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5637         get_random_bytes(&i, sizeof(i));
5638         writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
5639 -       writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
5640 -       writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
5641 +       writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5642 +       writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5643         if (poll_interval == -1) {
5644                 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5645                         writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5646 @@ -2298,8 +5558,9 @@
5647         writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5648                         base + NvRegAdapterControl);
5649         writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5650 -       writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
5651 -       writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
5652 +       writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5653 +       if (np->wolenabled)
5654 +               writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5655  
5656         i = readl(base + NvRegPowerState);
5657         if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
5658 @@ -2309,18 +5570,18 @@
5659         udelay(10);
5660         writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5661  
5662 -       writel(0, base + NvRegIrqMask);
5663 +       nv_disable_hw_interrupts(dev, np->irqmask);
5664         pci_push(base);
5665         writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
5666         writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5667         pci_push(base);
5668  
5669 -       ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev);
5670 -       if (ret)
5671 +       if (nv_request_irq(dev, 0)) {
5672                 goto out_drain;
5673 +       }
5674  
5675         /* ask for interrupts */
5676 -       writel(np->irqmask, base + NvRegIrqMask);
5677 +       nv_enable_hw_interrupts(dev, np->irqmask);
5678  
5679         spin_lock_irq(&np->lock);
5680         writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5681 @@ -2347,11 +5608,15 @@
5682         if (ret) {
5683                 netif_carrier_on(dev);
5684         } else {
5685 -               printk("%s: no link during initialization.\n", dev->name);
5686 +               dprintk(KERN_DEBUG "%s: no link during initialization.\n", dev->name);
5687                 netif_carrier_off(dev);
5688         }
5689         if (oom)
5690                 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5691 +
5692 +       /* start statistics timer */
5693 +       mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
5694 +
5695         spin_unlock_irq(&np->lock);
5696  
5697         return 0;
5698 @@ -2362,16 +5627,23 @@
5699  
5700  static int nv_close(struct net_device *dev)
5701  {
5702 -       struct fe_priv *np = netdev_priv(dev);
5703 +       struct fe_priv *np = get_nvpriv(dev);
5704         u8 __iomem *base;
5705  
5706 +       dprintk(KERN_DEBUG "nv_close: begin\n");
5707         spin_lock_irq(&np->lock);
5708         np->in_shutdown = 1;
5709         spin_unlock_irq(&np->lock);
5710 +
5711 +#if NVVER > RHES3
5712         synchronize_irq(dev->irq);
5713 +#else
5714 +       synchronize_irq();
5715 +#endif
5716  
5717         del_timer_sync(&np->oom_kick);
5718         del_timer_sync(&np->nic_poll);
5719 +       del_timer_sync(&np->stats_poll);
5720  
5721         netif_stop_queue(dev);
5722         spin_lock_irq(&np->lock);
5723 @@ -2381,25 +5653,19 @@
5724  
5725         /* disable interrupts on the nic or we will lock up */
5726         base = get_hwbase(dev);
5727 -       writel(0, base + NvRegIrqMask);
5728 +       nv_disable_hw_interrupts(dev, np->irqmask);
5729         pci_push(base);
5730         dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5731  
5732         spin_unlock_irq(&np->lock);
5733  
5734 -       free_irq(dev->irq, dev);
5735 +       nv_free_irq(dev);
5736  
5737         drain_ring(dev);
5738  
5739         if (np->wolenabled)
5740                 nv_start_rx(dev);
5741  
5742 -       /* special op: write back the misordered MAC address - otherwise
5743 -        * the next nv_probe would see a wrong address.
5744 -        */
5745 -       writel(np->orig_mac[0], base + NvRegMacAddrA);
5746 -       writel(np->orig_mac[1], base + NvRegMacAddrB);
5747 -
5748         /* FIXME: power down nic */
5749  
5750         return 0;
5751 @@ -2412,13 +5678,18 @@
5752         unsigned long addr;
5753         u8 __iomem *base;
5754         int err, i;
5755 +       u32 powerstate, phystate_orig = 0, phystate, txreg;
5756 +       int phyinitialized = 0;
5757  
5758 +               /* modify network device class id */    
5759 +       quirk_nforce_network_class(pci_dev);
5760         dev = alloc_etherdev(sizeof(struct fe_priv));
5761         err = -ENOMEM;
5762         if (!dev)
5763                 goto out;
5764  
5765 -       np = netdev_priv(dev);
5766 +       dprintk(KERN_DEBUG "%s:%s\n",dev->name,__FUNCTION__);
5767 +       np = get_nvpriv(dev);
5768         np->pci_dev = pci_dev;
5769         spin_lock_init(&np->lock);
5770         SET_MODULE_OWNER(dev);
5771 @@ -2430,6 +5701,9 @@
5772         init_timer(&np->nic_poll);
5773         np->nic_poll.data = (unsigned long) dev;
5774         np->nic_poll.function = &nv_do_nic_poll;        /* timer handler */
5775 +       init_timer(&np->stats_poll);
5776 +       np->stats_poll.data = (unsigned long) dev;
5777 +       np->stats_poll.function = &nv_do_stats_poll;    /* timer handler */
5778  
5779         err = pci_enable_device(pci_dev);
5780         if (err) {
5781 @@ -2444,15 +5718,22 @@
5782         if (err < 0)
5783                 goto out_disable;
5784  
5785 +       if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
5786 +               np->register_size = NV_PCI_REGSZ_VER3;
5787 +       else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5788 +               np->register_size = NV_PCI_REGSZ_VER2;
5789 +       else
5790 +               np->register_size = NV_PCI_REGSZ_VER1;
5791 +
5792         err = -EINVAL;
5793         addr = 0;
5794         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5795                 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5796                                 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5797 -                               pci_resource_len(pci_dev, i),
5798 -                               pci_resource_flags(pci_dev, i));
5799 +                               (long)pci_resource_len(pci_dev, i),
5800 +                               (long)pci_resource_flags(pci_dev, i));
5801                 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5802 -                               pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
5803 +                               pci_resource_len(pci_dev, i) >= np->register_size) {
5804                         addr = pci_resource_start(pci_dev, i);
5805                         break;
5806                 }
5807 @@ -2463,17 +5744,29 @@
5808                 goto out_relreg;
5809         }
5810  
5811 +       /* copy of driver data */
5812 +       np->driver_data = id->driver_data;
5813 +
5814         /* handle different descriptor versions */
5815         if (id->driver_data & DEV_HAS_HIGH_DMA) {
5816                 /* packet format 3: supports 40-bit addressing */
5817                 np->desc_ver = DESC_VER_3;
5818 -               if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
5819 -                       printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
5820 -                                       pci_name(pci_dev));
5821 -               } else {
5822 -                       dev->features |= NETIF_F_HIGHDMA;
5823 -               }
5824                 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5825 +               if (dma_64bit) {
5826 +                       if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5827 +                               printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
5828 +                                      pci_name(pci_dev));
5829 +                       } else {
5830 +                               dev->features |= NETIF_F_HIGHDMA;
5831 +                               printk(KERN_INFO "forcedeth: using HIGHDMA\n");
5832 +                       }
5833 +#if NVVER > RHES3
5834 +                       if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5835 +                               printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
5836 +                                      pci_name(pci_dev));
5837 +                       }
5838 +#endif
5839 +               }
5840         } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5841                 /* packet format 2: supports jumbo frames */
5842                 np->desc_ver = DESC_VER_2;
5843 @@ -2487,49 +5780,153 @@
5844         np->pkt_limit = NV_PKTLIMIT_1;
5845         if (id->driver_data & DEV_HAS_LARGEDESC)
5846                 np->pkt_limit = NV_PKTLIMIT_2;
5847 +       if (mtu > np->pkt_limit) {
5848 +               printk(KERN_INFO "forcedeth: MTU value of %d is too large. Setting to maximum value of %d\n",
5849 +                      mtu, np->pkt_limit);
5850 +               dev->mtu = np->pkt_limit;
5851 +       } else {
5852 +               dev->mtu = mtu;
5853 +       }
5854  
5855         if (id->driver_data & DEV_HAS_CHECKSUM) {
5856 -               np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5857 -               dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5858 +               if (rx_checksum_offload) {
5859 +                       np->rx_csum = 1;
5860 +                       np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5861 +               }
5862 +
5863 +               if (tx_checksum_offload)        
5864 +#if NVVER > RHES4
5865 +                       dev->features |= NETIF_F_HW_CSUM;
5866 +#else
5867 +                       dev->features |= NETIF_F_IP_CSUM;
5868 +#endif
5869 +
5870 +               if (scatter_gather)
5871 +                       dev->features |= NETIF_F_SG;
5872  #ifdef NETIF_F_TSO
5873 -               dev->features |= NETIF_F_TSO;
5874 +               if (tso_offload)
5875 +                       dev->features |= NETIF_F_TSO;
5876  #endif
5877         }
5878  
5879 +       np->vlanctl_bits = 0;
5880 +       if (id->driver_data & DEV_HAS_VLAN && tagging_8021pq) {
5881 +                       np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5882 +                       dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5883 +                       dev->vlan_rx_register = nv_vlan_rx_register;
5884 +                       dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
5885 +               /* vlan needs rx checksum support, so force it */
5886 +               np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5887 +       }
5888 +
5889 +       np->msi_flags = 0;
5890 +       if ((id->driver_data & DEV_HAS_MSI) && msi) {
5891 +               np->msi_flags |= NV_MSI_CAPABLE;
5892 +       }
5893 +       if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5894 +               np->msi_flags |= NV_MSI_X_CAPABLE;
5895 +       }
5896 +       
5897 +       np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE;
5898 +       if (rx_flow_control == NV_RX_FLOW_CONTROL_ENABLED)
5899 +               np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
5900 +       if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
5901 +               np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
5902 +               if (tx_flow_control == NV_TX_FLOW_CONTROL_ENABLED)
5903 +                       np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
5904 +       }
5905 +       if (autoneg == AUTONEG_ENABLE) {
5906 +               np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
5907 +       } else if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX) {
5908 +               printk(KERN_INFO "forcedeth: speed_duplex of 1000 full can not enabled if autoneg is disabled\n");
5909 +               goto out_relreg;
5910 +       }
5911 +
5912 +    /* save phy config */
5913 +    np->autoneg = autoneg;
5914 +    np->speed_duplex = speed_duplex;
5915 +
5916         err = -ENOMEM;
5917 -       np->base = ioremap(addr, NV_PCI_REGSZ);
5918 +       np->base = ioremap(addr, np->register_size);
5919         if (!np->base)
5920                 goto out_relreg;
5921         dev->base_addr = (unsigned long)np->base;
5922  
5923         dev->irq = pci_dev->irq;
5924  
5925 +       if (np->desc_ver == DESC_VER_1) {
5926 +               if (rx_ring_size > RING_MAX_DESC_VER_1) {
5927 +                       printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n",
5928 +                              rx_ring_size, RING_MAX_DESC_VER_1);
5929 +                       rx_ring_size = RING_MAX_DESC_VER_1;
5930 +               }
5931 +               if (tx_ring_size > RING_MAX_DESC_VER_1) {
5932 +                       printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n",
5933 +                              tx_ring_size, RING_MAX_DESC_VER_1);
5934 +                       tx_ring_size = RING_MAX_DESC_VER_1;
5935 +               }
5936 +       } else {
5937 +               if (rx_ring_size > RING_MAX_DESC_VER_2_3) {
5938 +                       printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n",
5939 +                              rx_ring_size, RING_MAX_DESC_VER_2_3);
5940 +                       rx_ring_size = RING_MAX_DESC_VER_2_3;
5941 +               }
5942 +               if (tx_ring_size > RING_MAX_DESC_VER_2_3) {
5943 +                       printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n",
5944 +                              tx_ring_size, RING_MAX_DESC_VER_2_3);
5945 +                       tx_ring_size = RING_MAX_DESC_VER_2_3;
5946 +               }
5947 +       }
5948 +       np->rx_ring_size = rx_ring_size;
5949 +       np->tx_ring_size = tx_ring_size;
5950 +       np->tx_limit_stop = tx_ring_size - TX_LIMIT_DIFFERENCE;
5951 +       np->tx_limit_start = tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
5952 +
5953         if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5954                 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5955 -                                       sizeof(struct ring_desc) * (RX_RING + TX_RING),
5956 +                                       sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5957                                         &np->ring_addr);
5958                 if (!np->rx_ring.orig)
5959                         goto out_unmap;
5960 -               np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
5961 +               np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5962         } else {
5963                 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5964 -                                       sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
5965 +                                       sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5966                                         &np->ring_addr);
5967                 if (!np->rx_ring.ex)
5968                         goto out_unmap;
5969 -               np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
5970 +               np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5971         }
5972 +       np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
5973 +       np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
5974 +       if (!np->rx_skb || !np->tx_skb)
5975 +               goto out_freering;
5976 +       memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
5977 +       memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
5978  
5979         dev->open = nv_open;
5980         dev->stop = nv_close;
5981 -       dev->hard_start_xmit = nv_start_xmit;
5982 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5983 +               dev->hard_start_xmit = nv_start_xmit;
5984 +       else
5985 +               dev->hard_start_xmit = nv_start_xmit_optimized;
5986         dev->get_stats = nv_get_stats;
5987         dev->change_mtu = nv_change_mtu;
5988         dev->set_mac_address = nv_set_mac_address;
5989         dev->set_multicast_list = nv_set_multicast;
5990 +
5991 +#if NVVER < SLES9
5992 +       dev->do_ioctl = nv_ioctl;
5993 +#endif
5994 +
5995 +#if NVVER > RHES3
5996  #ifdef CONFIG_NET_POLL_CONTROLLER
5997         dev->poll_controller = nv_poll_controller;
5998  #endif
5999 +#else
6000 +       dev->poll_controller = nv_poll_controller;
6001 +#endif
6002 +
6003         SET_ETHTOOL_OPS(dev, &ops);
6004         dev->tx_timeout = nv_tx_timeout;
6005         dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
6006 @@ -2541,15 +5938,37 @@
6007         np->orig_mac[0] = readl(base + NvRegMacAddrA);
6008         np->orig_mac[1] = readl(base + NvRegMacAddrB);
6009  
6010 -       dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
6011 -       dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
6012 -       dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
6013 -       dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
6014 -       dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
6015 -       dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
6016 +       /* check the workaround bit for correct mac address order */
6017 +       txreg = readl(base + NvRegTransmitPoll);
6018 +       if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
6019 +          (id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
6020 +               /* mac address is already in correct order */
6021 +               dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
6022 +               dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
6023 +               dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
6024 +               dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
6025 +               dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
6026 +               dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
6027 +       } else {
6028 +               dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
6029 +               dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
6030 +               dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
6031 +               dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
6032 +               dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
6033 +               dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
6034 +               /* set permanent address to be correct aswell */
6035 +               np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
6036 +                       (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
6037 +               np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
6038 +               writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
6039 +       }
6040 +#if NVVER > SUSE10
6041         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6042  
6043         if (!is_valid_ether_addr(dev->perm_addr)) {
6044 +#else
6045 +       if (!is_valid_ether_addr(dev->dev_addr)) {              
6046 +#endif 
6047                 /*
6048                  * Bad mac address. At least one bios sets the mac address
6049                  * to 01:23:45:67:89:ab
6050 @@ -2568,20 +5987,41 @@
6051         dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
6052                         dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
6053                         dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
6054 +       /* set mac address */
6055 +       nv_copy_mac_to_hw(dev);
6056  
6057         /* disable WOL */
6058         writel(0, base + NvRegWakeUpFlags);
6059 -       np->wolenabled = 0;
6060 +       np->wolenabled = wol;
6061 +
6062 +       if (id->driver_data & DEV_HAS_POWER_CNTRL) {
6063 +               u8 revision_id;
6064 +               pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
6065 +
6066 +               /* take phy and nic out of low power mode */
6067 +               powerstate = readl(base + NvRegPowerState2);
6068 +               powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
6069 +               if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
6070 +                    id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
6071 +                   revision_id >= 0xA3)
6072 +                       powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
6073 +               writel(powerstate, base + NvRegPowerState2);
6074 +       }
6075  
6076         if (np->desc_ver == DESC_VER_1) {
6077                 np->tx_flags = NV_TX_VALID;
6078         } else {
6079                 np->tx_flags = NV_TX2_VALID;
6080         }
6081 -       if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
6082 +       if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
6083                 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
6084 -       else
6085 +               if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
6086 +                       np->msi_flags |= 0x0003;
6087 +       } else {
6088                 np->irqmask = NVREG_IRQMASK_CPU;
6089 +               if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
6090 +                       np->msi_flags |= 0x0001;
6091 +       }
6092  
6093         if (id->driver_data & DEV_NEED_TIMERIRQ)
6094                 np->irqmask |= NVREG_IRQ_TIMER;
6095 @@ -2594,6 +6034,41 @@
6096                 np->need_linktimer = 0;
6097         }
6098  
6099 +       /* clear phy state and temporarily halt phy interrupts */
6100 +       writel(0, base + NvRegMIIMask);
6101 +       phystate = readl(base + NvRegAdapterControl);
6102 +       if (phystate & NVREG_ADAPTCTL_RUNNING) {
6103 +               phystate_orig = 1;
6104 +               phystate &= ~NVREG_ADAPTCTL_RUNNING;
6105 +               writel(phystate, base + NvRegAdapterControl);
6106 +       }
6107 +       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
6108 +
6109 +       if (id->driver_data & DEV_HAS_MGMT_UNIT) {
6110 +               /* management unit running on the mac? */
6111 +               if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
6112 +                       np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
6113 +                       dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
6114 +                               for (i = 0; i < 5000; i++) {
6115 +                                       nv_msleep(1);
6116 +                                       if (nv_mgmt_acquire_sema(dev)) {
6117 +                                               /* management unit setup the phy already? */
6118 +                                               if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
6119 +                                                               NVREG_XMITCTL_SYNC_PHY_INIT) {
6120 +                                                       if(np->mac_in_use){
6121 +                                                               /* phy is inited by mgmt unit */
6122 +                                                               phyinitialized = 1;
6123 +                                                               dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
6124 +                                                       }
6125 +                                               } else {
6126 +                                                       /* we need to init the phy */
6127 +                                               }
6128 +                                               break;
6129 +                                       }
6130 +                               }
6131 +                       }
6132 +       }
6133 +
6134         /* find a suitable phy */
6135         for (i = 1; i <= 32; i++) {
6136                 int id1, id2;
6137 @@ -2610,32 +6085,45 @@
6138                 if (id2 < 0 || id2 == 0xffff)
6139                         continue;
6140  
6141 +               np->phy_model = id2 & PHYID2_MODEL_MASK;
6142                 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
6143                 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
6144                 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
6145 -                       pci_name(pci_dev), id1, id2, phyaddr);
6146 +                               pci_name(pci_dev), id1, id2, phyaddr);
6147                 np->phyaddr = phyaddr;
6148                 np->phy_oui = id1 | id2;
6149                 break;
6150         }
6151         if (i == 33) {
6152                 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
6153 -                      pci_name(pci_dev));
6154 -               goto out_freering;
6155 +                               pci_name(pci_dev));
6156 +               goto out_error;
6157         }
6158         
6159 -       /* reset it */
6160 -       phy_init(dev);
6161 +       if (!phyinitialized) {          
6162 +               /* reset it */
6163 +               phy_init(dev);
6164 +       } else {
6165 +               /* see if it is a gigabit phy */
6166 +               u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
6167 +               if (mii_status & PHY_GIGABIT) {
6168 +                       np->gigabit = PHY_GIGABIT;
6169 +               }
6170 +       }
6171 +
6172 +       if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E && np->pci_dev->subsystem_device==0x6676 ) {
6173 +               nv_LED_on(dev);
6174 +       }
6175  
6176         /* set default link speed settings */
6177         np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
6178         np->duplex = 0;
6179 -       np->autoneg = 1;
6180 +       np->autoneg = autoneg;
6181  
6182         err = register_netdev(dev);
6183         if (err) {
6184                 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
6185 -               goto out_freering;
6186 +               goto out_error;
6187         }
6188         printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
6189                         dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
6190 @@ -2643,14 +6131,12 @@
6191  
6192         return 0;
6193  
6194 -out_freering:
6195 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
6196 -               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
6197 -                                   np->rx_ring.orig, np->ring_addr);
6198 -       else
6199 -               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
6200 -                                   np->rx_ring.ex, np->ring_addr);
6201 +out_error:
6202 +       if (phystate_orig)
6203 +               writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
6204         pci_set_drvdata(pci_dev, NULL);
6205 +out_freering:
6206 +       free_rings(dev);
6207  out_unmap:
6208         iounmap(get_hwbase(dev));
6209  out_relreg:
6210 @@ -2663,18 +6149,27 @@
6211         return err;
6212  }
6213  
6214 +#ifdef CONFIG_PM
6215 +static void nv_set_low_speed(struct net_device *dev);
6216 +#endif
6217  static void __devexit nv_remove(struct pci_dev *pci_dev)
6218  {
6219         struct net_device *dev = pci_get_drvdata(pci_dev);
6220 -       struct fe_priv *np = netdev_priv(dev);
6221 +       struct fe_priv *np = get_nvpriv(dev);
6222 +       u8 __iomem *base = get_hwbase(dev);
6223  
6224 +       if (np->phy_oui== PHY_OUI_MARVELL && np->phy_model == PHY_MODEL_MARVELL_E1011 && np->pci_dev->subsystem_vendor ==0x108E && np->pci_dev->subsystem_device==0x6676) {
6225 +               nv_LED_off(dev);
6226 +       }
6227         unregister_netdev(dev);
6228 +       /* special op: write back the misordered MAC address - otherwise
6229 +        * the next nv_probe would see a wrong address.
6230 +        */
6231 +               writel(np->orig_mac[0], base + NvRegMacAddrA);
6232 +               writel(np->orig_mac[1], base + NvRegMacAddrB);
6233  
6234         /* free all structures */
6235 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
6236 -               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
6237 -       else
6238 -               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
6239 +       free_rings(dev);
6240         iounmap(get_hwbase(dev));
6241         pci_release_regions(pci_dev);
6242         pci_disable_device(pci_dev);
6243 @@ -2713,65 +6208,471 @@
6244         },
6245         {       /* CK804 Ethernet Controller */
6246                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
6247 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
6248 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
6249         },
6250         {       /* CK804 Ethernet Controller */
6251                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
6252 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
6253 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
6254         },
6255         {       /* MCP04 Ethernet Controller */
6256                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
6257 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
6258 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
6259         },
6260         {       /* MCP04 Ethernet Controller */
6261                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
6262 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
6263 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
6264         },
6265         {       /* MCP51 Ethernet Controller */
6266                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
6267 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
6268 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
6269         },
6270         {       /* MCP51 Ethernet Controller */
6271                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
6272 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
6273 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
6274         },
6275         {       /* MCP55 Ethernet Controller */
6276                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
6277 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
6278 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6279         },
6280         {       /* MCP55 Ethernet Controller */
6281                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
6282 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
6283 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6284 +       },
6285 +       {       /* MCP61 Ethernet Controller */
6286 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
6287 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6288 +       },
6289 +       {       /* MCP61 Ethernet Controller */
6290 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
6291 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6292 +       },
6293 +       {       /* MCP61 Ethernet Controller */
6294 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
6295 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6296 +       },
6297 +       {       /* MCP61 Ethernet Controller */
6298 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
6299 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6300 +       },
6301 +       {       /* MCP65 Ethernet Controller */
6302 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
6303 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6304 +       },
6305 +       {       /* MCP65 Ethernet Controller */
6306 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
6307 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6308 +       },
6309 +       {       /* MCP65 Ethernet Controller */
6310 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
6311 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6312 +       },
6313 +       {       /* MCP65 Ethernet Controller */
6314 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
6315 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6316 +       },
6317 +       {       /* MCP67 Ethernet Controller */
6318 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
6319 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6320 +       },
6321 +       {       /* MCP67 Ethernet Controller */
6322 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
6323 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6324 +       },
6325 +       {       /* MCP67 Ethernet Controller */
6326 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
6327 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6328 +       },
6329 +       {       /* MCP67 Ethernet Controller */
6330 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
6331 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6332 +       },
6333 +        {      /* MCP73 Ethernet Controller */
6334 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
6335 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6336 +       },
6337 +       {       /* MCP73 Ethernet Controller */
6338 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
6339 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6340 +       },
6341 +       {       /* MCP73 Ethernet Controller */
6342 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
6343 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6344 +       },
6345 +       {       /* MCP73 Ethernet Controller */
6346 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
6347 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6348 +       },
6349 +       {       /* MCP77 Ethernet Controller */
6350 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
6351 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6352 +       },
6353 +       {       /* MCP77 Ethernet Controller */
6354 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
6355 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6356 +       },
6357 +       {       /* MCP77 Ethernet Controller */
6358 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
6359 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6360 +       },
6361 +       {       /* MCP77 Ethernet Controller */
6362 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
6363 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6364         },
6365         {0,},
6366  };
6367  
6368 -static struct pci_driver driver = {
6369 +#ifdef CONFIG_PM
6370 +static void nv_set_low_speed(struct net_device *dev)
6371 +{
6372 +       struct fe_priv *np = get_nvpriv(dev);
6373 +       int adv = 0;
6374 +       int lpa = 0;
6375 +       int adv_lpa, bmcr, tries = 0;
6376 +       int mii_status;
6377 +       u32 control_1000;
6378 +
6379 +       if (np->autoneg == 0 || ((np->linkspeed & 0xFFF) != NVREG_LINKSPEED_1000))
6380 +               return;
6381 +
6382 +       adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
6383 +       lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
6384 +       control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
6385 +
6386 +       adv_lpa = lpa & adv;
6387 +
6388 +       if ((adv_lpa & LPA_10FULL) || (adv_lpa & LPA_10HALF)) {
6389 +               adv &= ~(ADVERTISE_100BASE4 | ADVERTISE_100FULL | ADVERTISE_100HALF);
6390 +               control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
6391 +               printk(KERN_INFO "forcedeth %s: set low speed to 10mbs\n",dev->name);
6392 +       } else if ((adv_lpa & LPA_100FULL) || (adv_lpa & LPA_100HALF)) {
6393 +               control_1000 &= ~(ADVERTISE_1000FULL|ADVERTISE_1000HALF);
6394 +       } else 
6395 +               return;
6396 +
6397 +       /* set new advertisements */
6398 +       mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
6399 +       mii_rw(dev, np->phyaddr, MII_CTRL1000, control_1000);
6400 +
6401 +       bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6402 +       if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
6403 +               bmcr |= BMCR_ANENABLE;
6404 +               /* reset the phy in order for settings to stick,
6405 +                * and cause autoneg to start */
6406 +               if (phy_reset(dev, bmcr)) {
6407 +                       printk(KERN_INFO "%s: phy reset failed\n", dev->name);
6408 +                       return;
6409 +               }
6410 +       } else {
6411 +               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
6412 +               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
6413 +       }
6414 +       mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
6415 +       mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
6416 +       while (!(mii_status & BMSR_ANEGCOMPLETE)) {
6417 +               nv_msleep(100);
6418 +               mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
6419 +               if (tries++ > 50)
6420 +                       break;
6421 +       }
6422 +       
6423 +       nv_update_linkspeed(dev);
6424 +
6425 +       return;
6426 +}
6427 +
6428 +static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
6429 +{
6430 +       struct net_device *dev = pci_get_drvdata(pdev);
6431 +       struct fe_priv *np = get_nvpriv(dev);
6432 +       u8 __iomem *base = get_hwbase(dev);
6433 +       int i;
6434 +
6435 +       dprintk(KERN_INFO "forcedeth: nv_suspend\n");
6436 +
6437 +       /* save msix table */
6438 +       {
6439 +               unsigned long phys_addr;        
6440 +               void __iomem *base_addr;        
6441 +               void __iomem *base;     
6442 +               unsigned int  bir,len;
6443 +               unsigned int i;
6444 +               int pos;
6445 +               u32 table_offset;
6446 +
6447 +               pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6448 +               pci_read_config_dword(pdev, pos+0x04 , &table_offset);
6449 +               bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
6450 +               table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
6451 +               phys_addr = pci_resource_start(pdev, bir) + table_offset;
6452 +               np->msix_pa_addr = phys_addr;
6453 +               len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
6454 +               base_addr = ioremap_nocache(phys_addr, len);
6455 +
6456 +               for(i=0;i<NV_MSI_X_MAX_VECTORS;i++){
6457 +                       base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
6458 +                       np->nvmsg[i].address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
6459 +                       np->nvmsg[i].address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET );
6460 +                       np->nvmsg[i].data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
6461 +               }
6462 +
6463 +               iounmap(base_addr);
6464 +       }
6465 +
6466 +       nv_update_linkspeed(dev);
6467 +
6468 +       if (netif_running(dev)) {
6469 +               netif_device_detach(dev);
6470 +               /* bring down the adapter */
6471 +               nv_close(dev);
6472 +       }
6473 +
6474 +       /* set phy to a lower speed to conserve power */
6475 +       if((lowpowerspeed==NV_LOW_POWER_ENABLED)&&!np->mac_in_use)
6476 +               nv_set_low_speed(dev);
6477 +
6478 +#if NVVER > RHES4
6479 +       pci_save_state(pdev);
6480 +#else
6481 +       pci_save_state(pdev,np->pci_state);
6482 +#endif
6483 +       np->saved_nvregphyinterface= readl(base+NvRegPhyInterface);
6484 +       for(i=0;i<64;i++){
6485 +               pci_read_config_dword(pdev,i*4,&np->saved_config_space[i]);
6486 +       }
6487 +#if NVVER > RHES4
6488 +       pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
6489 +#else
6490 +       pci_enable_wake(pdev, state, np->wolenabled);
6491 +#endif
6492 +       pci_disable_device(pdev);
6493 +
6494 +#if NVVER > RHES4
6495 +       pci_set_power_state(pdev, pci_choose_state(pdev, state));
6496 +#else
6497 +       pci_set_power_state(pdev, state);
6498 +#endif
6499 +       
6500 +       return 0;
6501 +}
6502 +
6503 +static int nv_resume(struct pci_dev *pdev)
6504 +{
6505 +       struct net_device *dev = pci_get_drvdata(pdev);
6506 +       int rc = 0;
6507 +       struct fe_priv *np = get_nvpriv(dev);
6508 +       u8 __iomem *base = get_hwbase(dev);
6509 +       int i;
6510 +       u32 txreg; 
6511 +
6512 +       dprintk(KERN_INFO "forcedeth: nv_resume\n");
6513 +
6514 +       pci_set_power_state(pdev, PCI_D0);
6515 +#if NVVER > RHES4
6516 +       pci_restore_state(pdev);
6517 +#else
6518 +       pci_restore_state(pdev,np->pci_state);
6519 +#endif
6520 +       for(i=0;i<64;i++){
6521 +               pci_write_config_dword(pdev,i*4,np->saved_config_space[i]);
6522 +       }
6523 +       pci_enable_device(pdev); 
6524 +       pci_set_master(pdev);
6525 +
6526 +       txreg = readl(base + NvRegTransmitPoll);
6527 +       txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV;
6528 +       writel(txreg, base + NvRegTransmitPoll);
6529 +       writel(np->saved_nvregphyinterface,base+NvRegPhyInterface);
6530 +       writel(np->orig_mac[0], base + NvRegMacAddrA);
6531 +       writel(np->orig_mac[1], base + NvRegMacAddrB);
6532 +
6533 +       /* restore msix table */
6534 +       {
6535 +               unsigned long phys_addr;        
6536 +               void __iomem *base_addr;        
6537 +               void __iomem *base;     
6538 +               unsigned int  len;
6539 +               unsigned int i;
6540 +       
6541 +               len = NV_MSI_X_MAX_VECTORS * PCI_MSIX_ENTRY_SIZE;
6542 +               phys_addr = np->msix_pa_addr;
6543 +               base_addr = ioremap_nocache(phys_addr, len);
6544 +               for(i=0;i< NV_MSI_X_MAX_VECTORS;i++){
6545 +                       base = base_addr + i*PCI_MSIX_ENTRY_SIZE;
6546 +                       writel(np->nvmsg[i].address_lo,base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
6547 +                       writel(np->nvmsg[i].address_hi,base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
6548 +                       writel(np->nvmsg[i].data,base + PCI_MSIX_ENTRY_DATA_OFFSET);
6549 +               }
6550 +
6551 +               iounmap(base_addr);
6552 +       }
6553 +
6554 +       if(lowpowerspeed==NV_LOW_POWER_ENABLED){
6555 +               /* re-initialize the phy */
6556 +               phy_init(dev);
6557 +               udelay(10);
6558 +       }
6559 +       /* bring up the adapter */
6560 +       if (netif_running(dev)){
6561 +               rc = nv_open(dev);
6562 +       }
6563 +       netif_device_attach(dev);
6564 +       
6565 +       return rc;
6566 +}
6567 +
6568 +#endif /* CONFIG_PM */
6569 +static struct pci_driver nv_eth_driver = {
6570         .name = "forcedeth",
6571         .id_table = pci_tbl,
6572         .probe = nv_probe,
6573         .remove = __devexit_p(nv_remove),
6574 +#ifdef CONFIG_PM
6575 +       .suspend        = nv_suspend,
6576 +       .resume         = nv_resume,
6577 +#endif
6578  };
6579  
6580 +#ifdef CONFIG_PM
6581 +static int nv_reboot_handler(struct notifier_block *nb, unsigned long event, void *p)
6582 +{
6583 +       struct pci_dev *pdev = NULL;
6584 +       pm_message_t state = { PM_EVENT_SUSPEND };
6585 +
6586 +       switch (event)
6587 +       {
6588 +               case SYS_POWER_OFF:
6589 +               case SYS_HALT:
6590 +               case SYS_DOWN:
6591 +                       while ((pdev = pci_find_device(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, pdev)) != NULL) {
6592 +                               if (pci_dev_driver(pdev) == &nv_eth_driver) {
6593 +                                       nv_suspend(pdev, state);
6594 +                               }
6595 +                       }
6596 +       }
6597 +
6598 +       return NOTIFY_DONE;
6599 +}
6600 +
6601 +/*
6602 + * Reboot notification
6603 + */
6604 +struct notifier_block nv_reboot_notifier = 
6605 +{
6606 +    notifier_call   : nv_reboot_handler,
6607 +    next            : NULL,
6608 +    priority        : 0
6609 +};
6610 +#endif
6611  
6612  static int __init init_nic(void)
6613  {
6614 +       int status;
6615         printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
6616 -       return pci_module_init(&driver);
6617 +       DPRINTK(DRV,KERN_DEBUG,"forcedeth:%s\n",DRV_DATE);
6618 +       status = pci_module_init(&nv_eth_driver);
6619 +#ifdef CONFIG_PM
6620 +       if (status >= 0)
6621 +               register_reboot_notifier(&nv_reboot_notifier);
6622 +#endif
6623 +       return status;
6624  }
6625  
6626  static void __exit exit_nic(void)
6627  {
6628 -       pci_unregister_driver(&driver);
6629 +#ifdef CONFIG_PM
6630 +       unregister_reboot_notifier(&nv_reboot_notifier);
6631 +#endif
6632 +       pci_unregister_driver(&nv_eth_driver);
6633  }
6634  
6635 +#if NVVER > SLES9
6636 +module_param(debug, int, 0);
6637 +module_param(lowpowerspeed, int, 0);
6638 +MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting to 1 and disabled by setting to 0");
6639  module_param(max_interrupt_work, int, 0);
6640  MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6641  module_param(optimization_mode, int, 0);
6642  MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6643  module_param(poll_interval, int, 0);
6644  MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6645 -
6646 +module_param(msi, int, 0);
6647 +MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6648 +module_param(msix, int, 0);
6649 +MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6650 +
6651 +module_param(speed_duplex, int, 0);
6652 +MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0, 10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps full = 5.");
6653 +module_param(autoneg, int, 0);
6654 +MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and disabled by setting to 0.");
6655 +module_param(scatter_gather, int, 0);
6656 +MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1 and disabled by setting to 0.");
6657 +module_param(tso_offload, int, 0);
6658 +MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting to 1 and disabled by setting to 0.");
6659 +module_param(mtu, int, 0);
6660 +MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on hardware.");
6661 +module_param(tx_checksum_offload, int, 0);
6662 +MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
6663 +module_param(rx_checksum_offload, int, 0);
6664 +MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
6665 +module_param(tx_ring_size, int, 0);
6666 +MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384 depending on hardware.");
6667 +module_param(rx_ring_size, int, 0);
6668 +MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384 depending on hardware.");
6669 +module_param(tx_flow_control, int, 0);
6670 +MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1 and disabled by setting to 0.");
6671 +module_param(rx_flow_control, int, 0);
6672 +MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1 and disabled by setting to 0.");
6673 +module_param(dma_64bit, int, 0);
6674 +MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6675 +module_param(wol, int, 0);
6676 +MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by setting to 0.");
6677 +module_param(tagging_8021pq, int, 0);
6678 +MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
6679 +#else
6680 +MODULE_PARM(debug, "i");
6681 +MODULE_PARM(lowpowerspeed, "i");
6682 +MODULE_PARM_DESC(lowpowerspeed, "Low Power State Link Speed enable by setting to 1 and disabled by setting to 0");
6683 +MODULE_PARM(max_interrupt_work, "i");
6684 +MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6685 +MODULE_PARM(optimization_mode, "i");
6686 +MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6687 +MODULE_PARM(poll_interval, "i");
6688 +MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6689 +#ifdef CONFIG_PCI_MSI
6690 +MODULE_PARM(msi, "i");
6691 +MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6692 +MODULE_PARM(msix, "i");
6693 +MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6694 +#endif
6695 +MODULE_PARM(speed_duplex, "i");
6696 +MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0, 10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps full = 5.");
6697 +MODULE_PARM(autoneg, "i");
6698 +MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and disabled by setting to 0.");
6699 +MODULE_PARM(scatter_gather, "i");
6700 +MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1 and disabled by setting to 0.");
6701 +MODULE_PARM(tso_offload, "i");
6702 +MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting to 1 and disabled by setting to 0.");
6703 +MODULE_PARM(mtu, "i");
6704 +MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on hardware.");
6705 +MODULE_PARM(tx_checksum_offload, "i");
6706 +MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
6707 +MODULE_PARM(rx_checksum_offload, "i");
6708 +MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
6709 +MODULE_PARM(tx_ring_size, "i");
6710 +MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384 depending on hardware.");
6711 +MODULE_PARM(rx_ring_size, "i");
6712 +MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384 depending on hardware.");
6713 +MODULE_PARM(tx_flow_control, "i");
6714 +MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1 and disabled by setting to 0.");
6715 +MODULE_PARM(rx_flow_control, "i");
6716 +MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1 and disabled by setting to 0.");
6717 +MODULE_PARM(dma_64bit, "i");
6718 +MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6719 +MODULE_PARM(wol, "i");
6720 +MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by setting to 0.");
6721 +MODULE_PARM(tagging_8021pq, "i");
6722 +MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
6723 +#endif
6724  MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6725  MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6726  MODULE_LICENSE("GPL");
6727 diff -uNr linux-2.6.16.orig/drivers/scsi/sata_nv.c linux-2.6.16/drivers/scsi/sata_nv.c
6728 --- linux-2.6.16.orig/drivers/scsi/sata_nv.c    2008-11-02 19:51:53.000000000 +0100
6729 +++ linux-2.6.16/drivers/scsi/sata_nv.c 2008-11-03 01:02:50.000000000 +0100
6730 @@ -1,630 +1,1284 @@
6731 -/*
6732 - *  sata_nv.c - NVIDIA nForce SATA
6733 - *
6734 - *  Copyright 2004 NVIDIA Corp.  All rights reserved.
6735 - *  Copyright 2004 Andrew Chew
6736 - *
6737 - *
6738 - *  This program is free software; you can redistribute it and/or modify
6739 - *  it under the terms of the GNU General Public License as published by
6740 - *  the Free Software Foundation; either version 2, or (at your option)
6741 - *  any later version.
6742 - *
6743 - *  This program is distributed in the hope that it will be useful,
6744 - *  but WITHOUT ANY WARRANTY; without even the implied warranty of
6745 - *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
6746 - *  GNU General Public License for more details.
6747 - *
6748 - *  You should have received a copy of the GNU General Public License
6749 - *  along with this program; see the file COPYING.  If not, write to
6750 - *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
6751 - *
6752 - *
6753 - *  libata documentation is available via 'make {ps|pdf}docs',
6754 - *  as Documentation/DocBook/libata.*
6755 - *
6756 - *  No hardware documentation available outside of NVIDIA.
6757 - *  This driver programs the NVIDIA SATA controller in a similar
6758 - *  fashion as with other PCI IDE BMDMA controllers, with a few
6759 - *  NV-specific details such as register offsets, SATA phy location,
6760 - *  hotplug info, etc.
6761 - *
6762 - *  0.10
6763 - *     - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB
6764 - *       drive.  Also made the check_hotplug() callbacks return whether there
6765 - *       was a hotplug interrupt or not.  This was not the source of the
6766 - *       spurious interrupts, but is the right thing to do anyway.
6767 - *
6768 - *  0.09
6769 - *     - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
6770 - *
6771 - *  0.08
6772 - *     - Added support for MCP51 and MCP55.
6773 - *
6774 - *  0.07
6775 - *     - Added support for RAID class code.
6776 - *
6777 - *  0.06
6778 - *     - Added generic SATA support by using a pci_device_id that filters on
6779 - *       the IDE storage class code.
6780 - *
6781 - *  0.03
6782 - *     - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using
6783 - *       mmio_base, which is only set for the CK804/MCP04 case.
6784 - *
6785 - *  0.02
6786 - *     - Added support for CK804 SATA controller.
6787 - *
6788 - *  0.01
6789 - *     - Initial revision.
6790 - */
6791 -
6792 -#include <linux/config.h>
6793 -#include <linux/kernel.h>
6794 -#include <linux/module.h>
6795 -#include <linux/pci.h>
6796 -#include <linux/init.h>
6797 -#include <linux/blkdev.h>
6798 -#include <linux/delay.h>
6799 -#include <linux/interrupt.h>
6800 -#include <linux/device.h>
6801 -#include <scsi/scsi_host.h>
6802 -#include <linux/libata.h>
6803 -
6804 -#define DRV_NAME                       "sata_nv"
6805 -#define DRV_VERSION                    "0.8"
6806 -
6807 -#define NV_PORTS                       2
6808 -#define NV_PIO_MASK                    0x1f
6809 -#define NV_MWDMA_MASK                  0x07
6810 -#define NV_UDMA_MASK                   0x7f
6811 -#define NV_PORT0_SCR_REG_OFFSET                0x00
6812 -#define NV_PORT1_SCR_REG_OFFSET                0x40
6813 -
6814 -#define NV_INT_STATUS                  0x10
6815 -#define NV_INT_STATUS_CK804            0x440
6816 -#define NV_INT_STATUS_PDEV_INT         0x01
6817 -#define NV_INT_STATUS_PDEV_PM          0x02
6818 -#define NV_INT_STATUS_PDEV_ADDED       0x04
6819 -#define NV_INT_STATUS_PDEV_REMOVED     0x08
6820 -#define NV_INT_STATUS_SDEV_INT         0x10
6821 -#define NV_INT_STATUS_SDEV_PM          0x20
6822 -#define NV_INT_STATUS_SDEV_ADDED       0x40
6823 -#define NV_INT_STATUS_SDEV_REMOVED     0x80
6824 -#define NV_INT_STATUS_PDEV_HOTPLUG     (NV_INT_STATUS_PDEV_ADDED | \
6825 -                                       NV_INT_STATUS_PDEV_REMOVED)
6826 -#define NV_INT_STATUS_SDEV_HOTPLUG     (NV_INT_STATUS_SDEV_ADDED | \
6827 -                                       NV_INT_STATUS_SDEV_REMOVED)
6828 -#define NV_INT_STATUS_HOTPLUG          (NV_INT_STATUS_PDEV_HOTPLUG | \
6829 -                                       NV_INT_STATUS_SDEV_HOTPLUG)
6830 -
6831 -#define NV_INT_ENABLE                  0x11
6832 -#define NV_INT_ENABLE_CK804            0x441
6833 -#define NV_INT_ENABLE_PDEV_MASK                0x01
6834 -#define NV_INT_ENABLE_PDEV_PM          0x02
6835 -#define NV_INT_ENABLE_PDEV_ADDED       0x04
6836 -#define NV_INT_ENABLE_PDEV_REMOVED     0x08
6837 -#define NV_INT_ENABLE_SDEV_MASK                0x10
6838 -#define NV_INT_ENABLE_SDEV_PM          0x20
6839 -#define NV_INT_ENABLE_SDEV_ADDED       0x40
6840 -#define NV_INT_ENABLE_SDEV_REMOVED     0x80
6841 -#define NV_INT_ENABLE_PDEV_HOTPLUG     (NV_INT_ENABLE_PDEV_ADDED | \
6842 -                                       NV_INT_ENABLE_PDEV_REMOVED)
6843 -#define NV_INT_ENABLE_SDEV_HOTPLUG     (NV_INT_ENABLE_SDEV_ADDED | \
6844 -                                       NV_INT_ENABLE_SDEV_REMOVED)
6845 -#define NV_INT_ENABLE_HOTPLUG          (NV_INT_ENABLE_PDEV_HOTPLUG | \
6846 -                                       NV_INT_ENABLE_SDEV_HOTPLUG)
6847 -
6848 -#define NV_INT_CONFIG                  0x12
6849 -#define NV_INT_CONFIG_METHD            0x01 // 0 = INT, 1 = SMI
6850 -
6851 -// For PCI config register 20
6852 -#define NV_MCP_SATA_CFG_20             0x50
6853 -#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN       0x04
6854 -
6855 -static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
6856 -static irqreturn_t nv_interrupt (int irq, void *dev_instance,
6857 -                                struct pt_regs *regs);
6858 -static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
6859 -static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
6860 -static void nv_host_stop (struct ata_host_set *host_set);
6861 -static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
6862 -static void nv_disable_hotplug(struct ata_host_set *host_set);
6863 -static int nv_check_hotplug(struct ata_host_set *host_set);
6864 -static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
6865 -static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
6866 -static int nv_check_hotplug_ck804(struct ata_host_set *host_set);
6867 -
6868 -enum nv_host_type
6869 -{
6870 -       GENERIC,
6871 -       NFORCE2,
6872 -       NFORCE3,
6873 -       CK804
6874 -};
6875 -
6876 -static const struct pci_device_id nv_pci_tbl[] = {
6877 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
6878 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
6879 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
6880 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
6881 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
6882 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
6883 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
6884 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
6885 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
6886 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
6887 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
6888 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
6889 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
6890 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
6891 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
6892 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6893 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
6894 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6895 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
6896 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6897 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
6898 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6899 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
6900 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6901 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
6902 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6903 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
6904 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6905 -       { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6906 -       { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6907 -       { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6908 -       { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6909 -       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
6910 -               PCI_ANY_ID, PCI_ANY_ID,
6911 -               PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
6912 -       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
6913 -               PCI_ANY_ID, PCI_ANY_ID,
6914 -               PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
6915 -       { 0, } /* terminate list */
6916 -};
6917 -
6918 -#define NV_HOST_FLAGS_SCR_MMIO 0x00000001
6919 -
6920 -struct nv_host_desc
6921 -{
6922 -       enum nv_host_type       host_type;
6923 -       void                    (*enable_hotplug)(struct ata_probe_ent *probe_ent);
6924 -       void                    (*disable_hotplug)(struct ata_host_set *host_set);
6925 -       int                     (*check_hotplug)(struct ata_host_set *host_set);
6926 -
6927 -};
6928 -static struct nv_host_desc nv_device_tbl[] = {
6929 -       {
6930 -               .host_type      = GENERIC,
6931 -               .enable_hotplug = NULL,
6932 -               .disable_hotplug= NULL,
6933 -               .check_hotplug  = NULL,
6934 -       },
6935 -       {
6936 -               .host_type      = NFORCE2,
6937 -               .enable_hotplug = nv_enable_hotplug,
6938 -               .disable_hotplug= nv_disable_hotplug,
6939 -               .check_hotplug  = nv_check_hotplug,
6940 -       },
6941 -       {
6942 -               .host_type      = NFORCE3,
6943 -               .enable_hotplug = nv_enable_hotplug,
6944 -               .disable_hotplug= nv_disable_hotplug,
6945 -               .check_hotplug  = nv_check_hotplug,
6946 -       },
6947 -       {       .host_type      = CK804,
6948 -               .enable_hotplug = nv_enable_hotplug_ck804,
6949 -               .disable_hotplug= nv_disable_hotplug_ck804,
6950 -               .check_hotplug  = nv_check_hotplug_ck804,
6951 -       },
6952 -};
6953 -
6954 -struct nv_host
6955 -{
6956 -       struct nv_host_desc     *host_desc;
6957 -       unsigned long           host_flags;
6958 -};
6959 -
6960 -static struct pci_driver nv_pci_driver = {
6961 -       .name                   = DRV_NAME,
6962 -       .id_table               = nv_pci_tbl,
6963 -       .probe                  = nv_init_one,
6964 -       .remove                 = ata_pci_remove_one,
6965 -};
6966 -
6967 -static struct scsi_host_template nv_sht = {
6968 -       .module                 = THIS_MODULE,
6969 -       .name                   = DRV_NAME,
6970 -       .ioctl                  = ata_scsi_ioctl,
6971 -       .queuecommand           = ata_scsi_queuecmd,
6972 -       .eh_strategy_handler    = ata_scsi_error,
6973 -       .can_queue              = ATA_DEF_QUEUE,
6974 -       .this_id                = ATA_SHT_THIS_ID,
6975 -       .sg_tablesize           = LIBATA_MAX_PRD,
6976 -       .max_sectors            = ATA_MAX_SECTORS,
6977 -       .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
6978 -       .emulated               = ATA_SHT_EMULATED,
6979 -       .use_clustering         = ATA_SHT_USE_CLUSTERING,
6980 -       .proc_name              = DRV_NAME,
6981 -       .dma_boundary           = ATA_DMA_BOUNDARY,
6982 -       .slave_configure        = ata_scsi_slave_config,
6983 -       .bios_param             = ata_std_bios_param,
6984 -};
6985 -
6986 -static const struct ata_port_operations nv_ops = {
6987 -       .port_disable           = ata_port_disable,
6988 -       .tf_load                = ata_tf_load,
6989 -       .tf_read                = ata_tf_read,
6990 -       .exec_command           = ata_exec_command,
6991 -       .check_status           = ata_check_status,
6992 -       .dev_select             = ata_std_dev_select,
6993 -       .phy_reset              = sata_phy_reset,
6994 -       .bmdma_setup            = ata_bmdma_setup,
6995 -       .bmdma_start            = ata_bmdma_start,
6996 -       .bmdma_stop             = ata_bmdma_stop,
6997 -       .bmdma_status           = ata_bmdma_status,
6998 -       .qc_prep                = ata_qc_prep,
6999 -       .qc_issue               = ata_qc_issue_prot,
7000 -       .eng_timeout            = ata_eng_timeout,
7001 -       .irq_handler            = nv_interrupt,
7002 -       .irq_clear              = ata_bmdma_irq_clear,
7003 -       .scr_read               = nv_scr_read,
7004 -       .scr_write              = nv_scr_write,
7005 -       .port_start             = ata_port_start,
7006 -       .port_stop              = ata_port_stop,
7007 -       .host_stop              = nv_host_stop,
7008 -};
7009 -
7010 -/* FIXME: The hardware provides the necessary SATA PHY controls
7011 - * to support ATA_FLAG_SATA_RESET.  However, it is currently
7012 - * necessary to disable that flag, to solve misdetection problems.
7013 - * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.
7014 - *
7015 - * This problem really needs to be investigated further.  But in the
7016 - * meantime, we avoid ATA_FLAG_SATA_RESET to get people working.
7017 - */
7018 -static struct ata_port_info nv_port_info = {
7019 -       .sht            = &nv_sht,
7020 -       .host_flags     = ATA_FLAG_SATA |
7021 -                         /* ATA_FLAG_SATA_RESET | */
7022 -                         ATA_FLAG_SRST |
7023 -                         ATA_FLAG_NO_LEGACY,
7024 -       .pio_mask       = NV_PIO_MASK,
7025 -       .mwdma_mask     = NV_MWDMA_MASK,
7026 -       .udma_mask      = NV_UDMA_MASK,
7027 -       .port_ops       = &nv_ops,
7028 -};
7029 -
7030 -MODULE_AUTHOR("NVIDIA");
7031 -MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
7032 -MODULE_LICENSE("GPL");
7033 -MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
7034 -MODULE_VERSION(DRV_VERSION);
7035 -
7036 -static irqreturn_t nv_interrupt (int irq, void *dev_instance,
7037 -                                struct pt_regs *regs)
7038 -{
7039 -       struct ata_host_set *host_set = dev_instance;
7040 -       struct nv_host *host = host_set->private_data;
7041 -       unsigned int i;
7042 -       unsigned int handled = 0;
7043 -       unsigned long flags;
7044 -
7045 -       spin_lock_irqsave(&host_set->lock, flags);
7046 -
7047 -       for (i = 0; i < host_set->n_ports; i++) {
7048 -               struct ata_port *ap;
7049 -
7050 -               ap = host_set->ports[i];
7051 -               if (ap &&
7052 -                   !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
7053 -                       struct ata_queued_cmd *qc;
7054 -
7055 -                       qc = ata_qc_from_tag(ap, ap->active_tag);
7056 -                       if (qc && (!(qc->tf.ctl & ATA_NIEN)))
7057 -                               handled += ata_host_intr(ap, qc);
7058 -                       else
7059 -                               // No request pending?  Clear interrupt status
7060 -                               // anyway, in case there's one pending.
7061 -                               ap->ops->check_status(ap);
7062 -               }
7063 -
7064 -       }
7065 -
7066 -       if (host->host_desc->check_hotplug)
7067 -               handled += host->host_desc->check_hotplug(host_set);
7068 -
7069 -       spin_unlock_irqrestore(&host_set->lock, flags);
7070 -
7071 -       return IRQ_RETVAL(handled);
7072 -}
7073 -
7074 -static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
7075 -{
7076 -       struct ata_host_set *host_set = ap->host_set;
7077 -       struct nv_host *host = host_set->private_data;
7078 -
7079 -       if (sc_reg > SCR_CONTROL)
7080 -               return 0xffffffffU;
7081 -
7082 -       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
7083 -               return readl((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
7084 -       else
7085 -               return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
7086 -}
7087 -
7088 -static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
7089 -{
7090 -       struct ata_host_set *host_set = ap->host_set;
7091 -       struct nv_host *host = host_set->private_data;
7092 -
7093 -       if (sc_reg > SCR_CONTROL)
7094 -               return;
7095 -
7096 -       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
7097 -               writel(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
7098 -       else
7099 -               outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
7100 -}
7101 -
7102 -static void nv_host_stop (struct ata_host_set *host_set)
7103 -{
7104 -       struct nv_host *host = host_set->private_data;
7105 -       struct pci_dev *pdev = to_pci_dev(host_set->dev);
7106 -
7107 -       // Disable hotplug event interrupts.
7108 -       if (host->host_desc->disable_hotplug)
7109 -               host->host_desc->disable_hotplug(host_set);
7110 -
7111 -       kfree(host);
7112 -
7113 -       if (host_set->mmio_base)
7114 -               pci_iounmap(pdev, host_set->mmio_base);
7115 -}
7116 -
7117 -static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
7118 -{
7119 -       static int printed_version = 0;
7120 -       struct nv_host *host;
7121 -       struct ata_port_info *ppi;
7122 -       struct ata_probe_ent *probe_ent;
7123 -       int pci_dev_busy = 0;
7124 -       int rc;
7125 -       u32 bar;
7126 -
7127 -        // Make sure this is a SATA controller by counting the number of bars
7128 -        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
7129 -        // it's an IDE controller and we ignore it.
7130 -       for (bar=0; bar<6; bar++)
7131 -               if (pci_resource_start(pdev, bar) == 0)
7132 -                       return -ENODEV;
7133 -
7134 -       if (!printed_version++)
7135 -               dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
7136 -
7137 -       rc = pci_enable_device(pdev);
7138 -       if (rc)
7139 -               goto err_out;
7140 -
7141 -       rc = pci_request_regions(pdev, DRV_NAME);
7142 -       if (rc) {
7143 -               pci_dev_busy = 1;
7144 -               goto err_out_disable;
7145 -       }
7146 -
7147 -       rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
7148 -       if (rc)
7149 -               goto err_out_regions;
7150 -       rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
7151 -       if (rc)
7152 -               goto err_out_regions;
7153 -
7154 -       rc = -ENOMEM;
7155 -
7156 -       ppi = &nv_port_info;
7157 -       probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
7158 -       if (!probe_ent)
7159 -               goto err_out_regions;
7160 -
7161 -       host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
7162 -       if (!host)
7163 -               goto err_out_free_ent;
7164 -
7165 -       memset(host, 0, sizeof(struct nv_host));
7166 -       host->host_desc = &nv_device_tbl[ent->driver_data];
7167 -
7168 -       probe_ent->private_data = host;
7169 -
7170 -       if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)
7171 -               host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;
7172 -
7173 -       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
7174 -               unsigned long base;
7175 -
7176 -               probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
7177 -               if (probe_ent->mmio_base == NULL) {
7178 -                       rc = -EIO;
7179 -                       goto err_out_free_host;
7180 -               }
7181 -
7182 -               base = (unsigned long)probe_ent->mmio_base;
7183 -
7184 -               probe_ent->port[0].scr_addr =
7185 -                       base + NV_PORT0_SCR_REG_OFFSET;
7186 -               probe_ent->port[1].scr_addr =
7187 -                       base + NV_PORT1_SCR_REG_OFFSET;
7188 -       } else {
7189 -
7190 -               probe_ent->port[0].scr_addr =
7191 -                       pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
7192 -               probe_ent->port[1].scr_addr =
7193 -                       pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
7194 -       }
7195 -
7196 -       pci_set_master(pdev);
7197 -
7198 -       rc = ata_device_add(probe_ent);
7199 -       if (rc != NV_PORTS)
7200 -               goto err_out_iounmap;
7201 -
7202 -       // Enable hotplug event interrupts.
7203 -       if (host->host_desc->enable_hotplug)
7204 -               host->host_desc->enable_hotplug(probe_ent);
7205 -
7206 -       kfree(probe_ent);
7207 -
7208 -       return 0;
7209 -
7210 -err_out_iounmap:
7211 -       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
7212 -               pci_iounmap(pdev, probe_ent->mmio_base);
7213 -err_out_free_host:
7214 -       kfree(host);
7215 -err_out_free_ent:
7216 -       kfree(probe_ent);
7217 -err_out_regions:
7218 -       pci_release_regions(pdev);
7219 -err_out_disable:
7220 -       if (!pci_dev_busy)
7221 -               pci_disable_device(pdev);
7222 -err_out:
7223 -       return rc;
7224 -}
7225 -
7226 -static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)
7227 -{
7228 -       u8 intr_mask;
7229 -
7230 -       outb(NV_INT_STATUS_HOTPLUG,
7231 -               probe_ent->port[0].scr_addr + NV_INT_STATUS);
7232 -
7233 -       intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);
7234 -       intr_mask |= NV_INT_ENABLE_HOTPLUG;
7235 -
7236 -       outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);
7237 -}
7238 -
7239 -static void nv_disable_hotplug(struct ata_host_set *host_set)
7240 -{
7241 -       u8 intr_mask;
7242 -
7243 -       intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
7244 -
7245 -       intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
7246 -
7247 -       outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
7248 -}
7249 -
7250 -static int nv_check_hotplug(struct ata_host_set *host_set)
7251 -{
7252 -       u8 intr_status;
7253 -
7254 -       intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
7255 -
7256 -       // Clear interrupt status.
7257 -       outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
7258 -
7259 -       if (intr_status & NV_INT_STATUS_HOTPLUG) {
7260 -               if (intr_status & NV_INT_STATUS_PDEV_ADDED)
7261 -                       printk(KERN_WARNING "nv_sata: "
7262 -                               "Primary device added\n");
7263 -
7264 -               if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
7265 -                       printk(KERN_WARNING "nv_sata: "
7266 -                               "Primary device removed\n");
7267 -
7268 -               if (intr_status & NV_INT_STATUS_SDEV_ADDED)
7269 -                       printk(KERN_WARNING "nv_sata: "
7270 -                               "Secondary device added\n");
7271 -
7272 -               if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
7273 -                       printk(KERN_WARNING "nv_sata: "
7274 -                               "Secondary device removed\n");
7275 -
7276 -               return 1;
7277 -       }
7278 -
7279 -       return 0;
7280 -}
7281 -
7282 -static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
7283 -{
7284 -       struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
7285 -       u8 intr_mask;
7286 -       u8 regval;
7287 -
7288 -       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
7289 -       regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
7290 -       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
7291 -
7292 -       writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
7293 -
7294 -       intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
7295 -       intr_mask |= NV_INT_ENABLE_HOTPLUG;
7296 -
7297 -       writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
7298 -}
7299 -
7300 -static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
7301 -{
7302 -       struct pci_dev *pdev = to_pci_dev(host_set->dev);
7303 -       u8 intr_mask;
7304 -       u8 regval;
7305 -
7306 -       intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);
7307 -
7308 -       intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
7309 -
7310 -       writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
7311 -
7312 -       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
7313 -       regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
7314 -       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
7315 -}
7316 -
7317 -static int nv_check_hotplug_ck804(struct ata_host_set *host_set)
7318 -{
7319 -       u8 intr_status;
7320 -
7321 -       intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
7322 -
7323 -       // Clear interrupt status.
7324 -       writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
7325 -
7326 -       if (intr_status & NV_INT_STATUS_HOTPLUG) {
7327 -               if (intr_status & NV_INT_STATUS_PDEV_ADDED)
7328 -                       printk(KERN_WARNING "nv_sata: "
7329 -                               "Primary device added\n");
7330 -
7331 -               if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
7332 -                       printk(KERN_WARNING "nv_sata: "
7333 -                               "Primary device removed\n");
7334 -
7335 -               if (intr_status & NV_INT_STATUS_SDEV_ADDED)
7336 -                       printk(KERN_WARNING "nv_sata: "
7337 -                               "Secondary device added\n");
7338 -
7339 -               if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
7340 -                       printk(KERN_WARNING "nv_sata: "
7341 -                               "Secondary device removed\n");
7342 -
7343 -               return 1;
7344 -       }
7345 -
7346 -       return 0;
7347 -}
7348 -
7349 -static int __init nv_init(void)
7350 -{
7351 -       return pci_module_init(&nv_pci_driver);
7352 -}
7353 -
7354 -static void __exit nv_exit(void)
7355 -{
7356 -       pci_unregister_driver(&nv_pci_driver);
7357 -}
7358 -
7359 -module_init(nv_init);
7360 -module_exit(nv_exit);
7361 +/*
7362 + *  sata_nv.c - NVIDIA nForce SATA
7363 + *
7364 + *  Copyright 2004 NVIDIA Corp.  All rights reserved.
7365 + *  Copyright 2004 Andrew Chew
7366 + *
7367 + *  The contents of this file are subject to the Open
7368 + *  Software License version 1.1 that can be found at
7369 + *  http://www.opensource.org/licenses/osl-1.1.txt and is included herein
7370 + *  by reference.
7371 + *
7372 + *  Alternatively, the contents of this file may be used under the terms
7373 + *  of the GNU General Public License version 2 (the "GPL") as distributed
7374 + *  in the kernel source COPYING file, in which case the provisions of
7375 + *  the GPL are applicable instead of the above.  If you wish to allow
7376 + *  the use of your version of this file only under the terms of the
7377 + *  GPL and not to allow others to use your version of this file under
7378 + *  the OSL, indicate your decision by deleting the provisions above and
7379 + *  replace them with the notice and other provisions required by the GPL.
7380 + *  If you do not delete the provisions above, a recipient may use your
7381 + *  version of this file under either the OSL or the GPL.
7382 + *
7383 + *  0.11
7384 + *     - Added sgpio support
7385 + *
7386 + *  0.10
7387 + *     - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB
7388 + *       drive.  Also made the check_hotplug() callbacks return whether there
7389 + *       was a hotplug interrupt or not.  This was not the source of the
7390 + *       spurious interrupts, but is the right thing to do anyway.
7391 + *
7392 + *  0.09
7393 + *     - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
7394 + *
7395 + *  0.08
7396 + *     - Added support for MCP51 and MCP55.
7397 + *
7398 + *  0.07
7399 + *     - Added support for RAID class code.
7400 + *
7401 + *  0.06
7402 + *     - Added generic SATA support by using a pci_device_id that filters on
7403 + *       the IDE storage class code.
7404 + *
7405 + *  0.03
7406 + *     - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using
7407 + *       mmio_base, which is only set for the CK804/MCP04 case.
7408 + *
7409 + *  0.02
7410 + *     - Added support for CK804 SATA controller.
7411 + *
7412 + *  0.01
7413 + *     - Initial revision.
7414 + */
7415 +
7416 +#include <linux/config.h>
7417 +#include <linux/version.h>
7418 +#include <linux/kernel.h>
7419 +#include <linux/module.h>
7420 +#include <linux/pci.h>
7421 +#include <linux/init.h>
7422 +#include <linux/blkdev.h>
7423 +#include <linux/delay.h>
7424 +#include <linux/interrupt.h>
7425 +#include "scsi.h"
7426 +#include <scsi/scsi_host.h>
7427 +#include <linux/libata.h>
7428 +
7429 +#define DRV_NAME                       "sata_nv"
7430 +#define DRV_VERSION                    "0.11-Driver Package V1.23"
7431 +
7432 +#define NV_PORTS                       2
7433 +#define NV_PIO_MASK                    0x1f
7434 +#define NV_MWDMA_MASK                  0x07
7435 +#define NV_UDMA_MASK                   0x7f
7436 +#define NV_PORT0_SCR_REG_OFFSET                0x00
7437 +#define NV_PORT1_SCR_REG_OFFSET                0x40
7438 +
7439 +#define NV_INT_STATUS                  0x10
7440 +#define NV_INT_STATUS_CK804            0x440
7441 +#define NV_INT_STATUS_MCP55            0x440
7442 +#define NV_INT_STATUS_PDEV_INT         0x01
7443 +#define NV_INT_STATUS_PDEV_PM          0x02
7444 +#define NV_INT_STATUS_PDEV_ADDED       0x04
7445 +#define NV_INT_STATUS_PDEV_REMOVED     0x08
7446 +#define NV_INT_STATUS_SDEV_INT         0x10
7447 +#define NV_INT_STATUS_SDEV_PM          0x20
7448 +#define NV_INT_STATUS_SDEV_ADDED       0x40
7449 +#define NV_INT_STATUS_SDEV_REMOVED     0x80
7450 +#define NV_INT_STATUS_PDEV_HOTPLUG     (NV_INT_STATUS_PDEV_ADDED | \
7451 +                                       NV_INT_STATUS_PDEV_REMOVED)
7452 +#define NV_INT_STATUS_SDEV_HOTPLUG     (NV_INT_STATUS_SDEV_ADDED | \
7453 +                                       NV_INT_STATUS_SDEV_REMOVED)
7454 +#define NV_INT_STATUS_HOTPLUG          (NV_INT_STATUS_PDEV_HOTPLUG | \
7455 +                                       NV_INT_STATUS_SDEV_HOTPLUG)
7456 +
7457 +#define NV_INT_ENABLE                  0x11
7458 +#define NV_INT_ENABLE_CK804            0x441
7459 +#define NV_INT_ENABLE_MCP55            0x444
7460 +#define NV_INT_ENABLE_PDEV_MASK                0x01
7461 +#define NV_INT_ENABLE_PDEV_PM          0x02
7462 +#define NV_INT_ENABLE_PDEV_ADDED       0x04
7463 +#define NV_INT_ENABLE_PDEV_REMOVED     0x08
7464 +#define NV_INT_ENABLE_SDEV_MASK                0x10
7465 +#define NV_INT_ENABLE_SDEV_PM          0x20
7466 +#define NV_INT_ENABLE_SDEV_ADDED       0x40
7467 +#define NV_INT_ENABLE_SDEV_REMOVED     0x80
7468 +#define NV_INT_ENABLE_PDEV_HOTPLUG     (NV_INT_ENABLE_PDEV_ADDED | \
7469 +                                       NV_INT_ENABLE_PDEV_REMOVED)
7470 +#define NV_INT_ENABLE_SDEV_HOTPLUG     (NV_INT_ENABLE_SDEV_ADDED | \
7471 +                                       NV_INT_ENABLE_SDEV_REMOVED)
7472 +#define NV_INT_ENABLE_HOTPLUG          (NV_INT_ENABLE_PDEV_HOTPLUG | \
7473 +                                       NV_INT_ENABLE_SDEV_HOTPLUG)
7474 +
7475 +#define NV_INT_CONFIG                  0x12
7476 +#define NV_INT_CONFIG_METHD            0x01 // 0 = INT, 1 = SMI
7477 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
7478 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2        0x037F
7479 +
7480 +// For PCI config register 20
7481 +#define NV_MCP_SATA_CFG_20             0x50
7482 +#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN       0x04
7483 +
7484 +
7485 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
7486 +#define RHAS3U7
7487 +#endif
7488 +#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,16)
7489 +#define SLES10
7490 +#endif
7491 +
7492 +//sgpio
7493 +// Sgpio defines
7494 +// SGPIO state defines
7495 +#define NV_SGPIO_STATE_RESET           0
7496 +#define NV_SGPIO_STATE_OPERATIONAL     1
7497 +#define NV_SGPIO_STATE_ERROR           2
7498 +
7499 +// SGPIO command opcodes
7500 +#define NV_SGPIO_CMD_RESET             0
7501 +#define NV_SGPIO_CMD_READ_PARAMS       1
7502 +#define NV_SGPIO_CMD_READ_DATA         2
7503 +#define NV_SGPIO_CMD_WRITE_DATA                3
7504 +
7505 +// SGPIO command status defines
7506 +#define NV_SGPIO_CMD_OK                        0
7507 +#define NV_SGPIO_CMD_ACTIVE            1
7508 +#define NV_SGPIO_CMD_ERR               2
7509 +
7510 +#define NV_SGPIO_UPDATE_TICK           90
7511 +#define NV_SGPIO_MIN_UPDATE_DELTA      33
7512 +#define NV_CNTRLR_SHARE_INIT           2
7513 +#define NV_SGPIO_MAX_ACTIVITY_ON       20
7514 +#define NV_SGPIO_MIN_FORCE_OFF         5
7515 +#define NV_SGPIO_PCI_CSR_OFFSET                0x58
7516 +#define NV_SGPIO_PCI_CB_OFFSET         0x5C
7517 +#define NV_SGPIO_DFLT_CB_SIZE          256
7518 +#define NV_ON 1
7519 +#define NV_OFF 0
7520 +#ifndef bool
7521 +#define bool u8
7522 +#endif
7523 +
7524 +static inline unsigned int jiffies_to_msecs1(const unsigned long j)
7525 +{
7526 +#if HZ <= 1000 && !(1000 % HZ)
7527 +        return (1000 / HZ) * j;
7528 +#elif HZ > 1000 && !(HZ % 1000)
7529 +        return (j + (HZ / 1000) - 1)/(HZ / 1000);
7530 +#else
7531 +        return (j * 1000) / HZ;
7532 +#endif
7533 +}
7534 +
7535 +#define BF_EXTRACT(v, off, bc) \
7536 +       ((((u8)(v)) >> (off)) & ((1 << (bc)) - 1))
7537 +
7538 +#define BF_INS(v, ins, off, bc)                                \
7539 +       (((v) & ~((((1 << (bc)) - 1)) << (off))) |      \
7540 +       (((u8)(ins)) << (off)))
7541 +
7542 +#define BF_EXTRACT_U32(v, off, bc)     \
7543 +       ((((u32)(v)) >> (off)) & ((1 << (bc)) - 1))
7544 +
7545 +#define BF_INS_U32(v, ins, off, bc)                    \
7546 +       (((v) & ~((((1 << (bc)) - 1)) << (off))) |      \
7547 +       (((u32)(ins)) << (off)))
7548 +
7549 +#define GET_SGPIO_STATUS(v)    BF_EXTRACT(v, 0, 2)
7550 +#define GET_CMD_STATUS(v)      BF_EXTRACT(v, 3, 2)
7551 +#define GET_CMD(v)             BF_EXTRACT(v, 5, 3)
7552 +#define SET_CMD(v, cmd)                BF_INS(v, cmd, 5, 3) 
7553 +
7554 +#define GET_ENABLE(v)          BF_EXTRACT_U32(v, 23, 1)
7555 +#define SET_ENABLE(v)          BF_INS_U32(v, 1, 23, 1)
7556 +
7557 +// Needs to have a u8 bit-field insert.
7558 +#define GET_ACTIVITY(v)                BF_EXTRACT(v, 5, 3)
7559 +#define SET_ACTIVITY(v, on_off)        BF_INS(v, on_off, 5, 3)
7560 +
7561 +union nv_sgpio_nvcr 
7562 +{
7563 +       struct {
7564 +               u8      init_cnt;
7565 +               u8      cb_size;
7566 +               u8      cbver;
7567 +               u8      rsvd;
7568 +       } bit;
7569 +       u32     all;
7570 +};
7571 +
7572 +union nv_sgpio_tx 
7573 +{
7574 +       u8      tx_port[4];
7575 +       u32     all;
7576 +};
7577 +
7578 +struct nv_sgpio_cb 
7579 +{
7580 +       u64                     scratch_space;
7581 +       union nv_sgpio_nvcr     nvcr;
7582 +       u32     cr0;
7583 +       u32                     rsvd[4];
7584 +       union nv_sgpio_tx       tx[2];
7585 +};
7586 +
7587 +struct nv_sgpio_host_share
7588 +{
7589 +       spinlock_t      *plock;
7590 +       unsigned long   *ptstamp;
7591 +};
7592 +
7593 +struct nv_sgpio_host_flags
7594 +{
7595 +       u8      sgpio_enabled:1;
7596 +       u8      need_update:1;
7597 +       u8      rsvd:6;
7598 +};
7599 +       
7600 +struct nv_host_sgpio
7601 +{
7602 +       struct nv_sgpio_host_flags      flags;
7603 +       u8                              *pcsr;
7604 +       struct nv_sgpio_cb              *pcb;   
7605 +       struct nv_sgpio_host_share      share;
7606 +       struct timer_list               sgpio_timer;
7607 +};
7608 +
7609 +struct nv_sgpio_port_flags
7610 +{
7611 +       u8      last_state:1;
7612 +       u8      recent_activity:1;
7613 +       u8      rsvd:6;
7614 +};
7615 +
7616 +struct nv_sgpio_led 
7617 +{
7618 +       struct nv_sgpio_port_flags      flags;
7619 +       u8                              force_off;
7620 +       u8                              last_cons_active;
7621 +};
7622 +
7623 +struct nv_port_sgpio
7624 +{
7625 +       struct nv_sgpio_led     activity;
7626 +};
7627 +
7628 +static spinlock_t      nv_sgpio_lock;
7629 +static unsigned long   nv_sgpio_tstamp;
7630 +
7631 +static inline void nv_sgpio_set_csr(u8 csr, unsigned long pcsr)
7632 +{
7633 +       outb(csr, pcsr);
7634 +}
7635 +
7636 +static inline u8 nv_sgpio_get_csr(unsigned long pcsr)
7637 +{
7638 +       return inb(pcsr);
7639 +}
7640 +
7641 +static inline u8 nv_sgpio_get_func(struct ata_host_set *host_set)
7642 +{
7643 +       u8 devfn = (to_pci_dev(host_set->dev))->devfn;
7644 +       return (PCI_FUNC(devfn));
7645 +}
7646 +
7647 +static inline u8 nv_sgpio_tx_host_offset(struct ata_host_set *host_set)
7648 +{
7649 +       return (nv_sgpio_get_func(host_set)/NV_CNTRLR_SHARE_INIT);
7650 +}
7651 +
7652 +static inline u8 nv_sgpio_calc_tx_offset(u8 cntrlr, u8 channel)
7653 +{
7654 +       return (sizeof(union nv_sgpio_tx) - (NV_CNTRLR_SHARE_INIT *
7655 +               (cntrlr % NV_CNTRLR_SHARE_INIT)) - channel - 1);
7656 +}
7657 +
7658 +static inline u8 nv_sgpio_tx_port_offset(struct ata_port *ap)
7659 +{
7660 +       u8 cntrlr = nv_sgpio_get_func(ap->host_set);
7661 +       return (nv_sgpio_calc_tx_offset(cntrlr, ap->port_no));
7662 +}
7663 +
7664 +static inline bool nv_sgpio_capable(const struct pci_device_id *ent)
7665 +{
7666 +       if (ent->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2)
7667 +               return 1;
7668 +       else
7669 +               return 0;
7670 +}
7671 +
7672 +
7673 +
7674 +
7675 +
7676 +
7677 +static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
7678 +static irqreturn_t nv_interrupt (int irq, void *dev_instance,
7679 +                                struct pt_regs *regs);
7680 +static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
7681 +static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
7682 +static void nv_host_stop (struct ata_host_set *host_set);
7683 +static int nv_port_start(struct ata_port *ap);
7684 +static void nv_port_stop(struct ata_port *ap);
7685 +static int nv_qc_issue(struct ata_queued_cmd *qc);
7686 +static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
7687 +static void nv_disable_hotplug(struct ata_host_set *host_set);
7688 +static void nv_check_hotplug(struct ata_host_set *host_set);
7689 +static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
7690 +static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
7691 +static void nv_check_hotplug_ck804(struct ata_host_set *host_set);
7692 +static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent);
7693 +static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set);
7694 +static void nv_check_hotplug_mcp55(struct ata_host_set *host_set);
7695 +enum nv_host_type
7696 +{
7697 +       GENERIC,
7698 +       NFORCE2,
7699 +       NFORCE3,
7700 +       CK804,
7701 +       MCP55
7702 +};
7703 +
7704 +static struct pci_device_id nv_pci_tbl[] = {
7705 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
7706 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
7707 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
7708 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
7709 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
7710 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
7711 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
7712 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
7713 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
7714 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
7715 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
7716 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
7717 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
7718 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
7719 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
7720 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
7721 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
7722 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
7723 +       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
7724 +               PCI_ANY_ID, PCI_ANY_ID,
7725 +               PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
7726 +       { 0, } /* terminate list */
7727 +};
7728 +
7729 +#define NV_HOST_FLAGS_SCR_MMIO 0x00000001
7730 +
7731 +struct nv_host_desc
7732 +{
7733 +       enum nv_host_type       host_type;
7734 +       void                    (*enable_hotplug)(struct ata_probe_ent *probe_ent);
7735 +       void                    (*disable_hotplug)(struct ata_host_set *host_set);
7736 +       void                    (*check_hotplug)(struct ata_host_set *host_set);
7737 +
7738 +};
7739 +static struct nv_host_desc nv_device_tbl[] = {
7740 +       {
7741 +               .host_type      = GENERIC,
7742 +               .enable_hotplug = NULL,
7743 +               .disable_hotplug= NULL,
7744 +               .check_hotplug  = NULL,
7745 +       },
7746 +       {
7747 +               .host_type      = NFORCE2,
7748 +               .enable_hotplug = nv_enable_hotplug,
7749 +               .disable_hotplug= nv_disable_hotplug,
7750 +               .check_hotplug  = nv_check_hotplug,
7751 +       },
7752 +       {
7753 +               .host_type      = NFORCE3,
7754 +               .enable_hotplug = nv_enable_hotplug,
7755 +               .disable_hotplug= nv_disable_hotplug,
7756 +               .check_hotplug  = nv_check_hotplug,
7757 +       },
7758 +       {       .host_type      = CK804,
7759 +               .enable_hotplug = nv_enable_hotplug_ck804,
7760 +               .disable_hotplug= nv_disable_hotplug_ck804,
7761 +               .check_hotplug  = nv_check_hotplug_ck804,
7762 +       },
7763 +       {       .host_type      = MCP55,
7764 +               .enable_hotplug = nv_enable_hotplug_mcp55,
7765 +               .disable_hotplug= nv_disable_hotplug_mcp55,
7766 +               .check_hotplug  = nv_check_hotplug_mcp55,
7767 +       },
7768 +};
7769 +
7770 +
7771 +struct nv_host
7772 +{
7773 +       struct nv_host_desc     *host_desc;
7774 +       unsigned long           host_flags;
7775 +       struct nv_host_sgpio    host_sgpio;
7776 +       struct pci_dev          *pdev;
7777 +};
7778 +
7779 +struct nv_port
7780 +{
7781 +       struct nv_port_sgpio    port_sgpio;
7782 +};
7783 +
7784 +// SGPIO function prototypes
7785 +static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost);
7786 +static void nv_sgpio_reset(u8 *pcsr);
7787 +static void nv_sgpio_set_timer(struct timer_list *ptimer, 
7788 +                               unsigned int timeout_msec);
7789 +static void nv_sgpio_timer_handler(unsigned long ptr);
7790 +static void nv_sgpio_host_cleanup(struct nv_host *host);
7791 +static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off);
7792 +static void nv_sgpio_clear_all_leds(struct ata_port *ap);
7793 +static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd);
7794 +
7795 +
7796 +static struct pci_driver nv_pci_driver = {
7797 +       .name                   = DRV_NAME,
7798 +       .id_table               = nv_pci_tbl,
7799 +       .probe                  = nv_init_one,
7800 +       .remove                 = ata_pci_remove_one,
7801 +};
7802 +
7803 +
7804 +#ifdef SLES10
7805 +static  struct scsi_host_template nv_sht = {
7806 +#else
7807 +static  Scsi_Host_Template nv_sht = {
7808 +#endif
7809 +       .module                 = THIS_MODULE,
7810 +       .name                   = DRV_NAME,
7811 +#ifdef RHAS3U7
7812 +       .detect                 = ata_scsi_detect,
7813 +       .release                = ata_scsi_release,
7814 +#endif
7815 +       .ioctl                  = ata_scsi_ioctl,
7816 +       .queuecommand           = ata_scsi_queuecmd,
7817 +       .eh_strategy_handler    = ata_scsi_error,
7818 +       .can_queue              = ATA_DEF_QUEUE,
7819 +       .this_id                = ATA_SHT_THIS_ID,
7820 +       .sg_tablesize           = LIBATA_MAX_PRD,
7821 +       .max_sectors            = ATA_MAX_SECTORS,
7822 +       .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
7823 +#ifdef RHAS3U7
7824 +       .use_new_eh_code        = ATA_SHT_NEW_EH_CODE,
7825 +#endif
7826 +       .emulated               = ATA_SHT_EMULATED,
7827 +       .use_clustering         = ATA_SHT_USE_CLUSTERING,
7828 +       .proc_name              = DRV_NAME,
7829 +#ifndef RHAS3U7
7830 +       .dma_boundary           = ATA_DMA_BOUNDARY,
7831 +       .slave_configure        = ata_scsi_slave_config,
7832 +#endif
7833 +       .bios_param             = ata_std_bios_param,
7834 +};
7835 +
7836 +static struct ata_port_operations nv_ops = {
7837 +       .port_disable           = ata_port_disable,
7838 +       .tf_load                = ata_tf_load,
7839 +       .tf_read                = ata_tf_read,
7840 +       .exec_command           = ata_exec_command,
7841 +       .check_status           = ata_check_status,
7842 +       .dev_select             = ata_std_dev_select,
7843 +       .phy_reset              = sata_phy_reset,
7844 +       .bmdma_setup            = ata_bmdma_setup,
7845 +       .bmdma_start            = ata_bmdma_start,
7846 +       .bmdma_stop             = ata_bmdma_stop,
7847 +       .bmdma_status           = ata_bmdma_status,
7848 +       .qc_prep                = ata_qc_prep,
7849 +       .qc_issue               = nv_qc_issue,
7850 +       .eng_timeout            = ata_eng_timeout,
7851 +       .irq_handler            = nv_interrupt,
7852 +       .irq_clear              = ata_bmdma_irq_clear,
7853 +       .scr_read               = nv_scr_read,
7854 +       .scr_write              = nv_scr_write,
7855 +       .port_start             = nv_port_start,
7856 +       .port_stop              = nv_port_stop,
7857 +       .host_stop              = nv_host_stop,
7858 +};
7859 +
7860 +/* FIXME: The hardware provides the necessary SATA PHY controls
7861 + * to support ATA_FLAG_SATA_RESET.  However, it is currently
7862 + * necessary to disable that flag, to solve misdetection problems.
7863 + * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.
7864 + *
7865 + * This problem really needs to be investigated further.  But in the
7866 + * meantime, we avoid ATA_FLAG_SATA_RESET to get people working.
7867 + */
7868 +static struct ata_port_info nv_port_info = {
7869 +       .sht            = &nv_sht,
7870 +       .host_flags     = ATA_FLAG_SATA |
7871 +                         /* ATA_FLAG_SATA_RESET | */
7872 +                         ATA_FLAG_SRST |
7873 +                         ATA_FLAG_NO_LEGACY,
7874 +       .pio_mask       = NV_PIO_MASK,
7875 +       .mwdma_mask     = NV_MWDMA_MASK,
7876 +       .udma_mask      = NV_UDMA_MASK,
7877 +       .port_ops       = &nv_ops,
7878 +};
7879 +
7880 +MODULE_AUTHOR("NVIDIA");
7881 +MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
7882 +MODULE_LICENSE("GPL");
7883 +MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
7884 +MODULE_VERSION(DRV_VERSION);
7885 +
7886 +static irqreturn_t nv_interrupt (int irq, void *dev_instance,
7887 +                                struct pt_regs *regs)
7888 +{
7889 +       struct ata_host_set *host_set = dev_instance;
7890 +       struct nv_host *host = host_set->private_data;
7891 +       unsigned int i;
7892 +       unsigned int handled = 0;
7893 +       unsigned long flags;
7894 +
7895 +       spin_lock_irqsave(&host_set->lock, flags);
7896 +
7897 +       for (i = 0; i < host_set->n_ports; i++) {
7898 +               struct ata_port *ap;
7899 +
7900 +               ap = host_set->ports[i];
7901 +#ifdef ATA_FLAG_NOINTR
7902 +        if (ap &&
7903 +                   !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
7904 +#else
7905 +        if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
7906 +#endif    
7907 +            struct ata_queued_cmd *qc;
7908 +
7909 +                       qc = ata_qc_from_tag(ap, ap->active_tag);
7910 +                       if (qc && (!(qc->tf.ctl & ATA_NIEN)))
7911 +                               handled += ata_host_intr(ap, qc);
7912 +                       else
7913 +                               // No request pending?  Clear interrupt status
7914 +                               // anyway, in case there's one pending.
7915 +                               ap->ops->check_status(ap);
7916 +               }
7917 +
7918 +       }
7919 +
7920 +       if (host->host_desc->check_hotplug)
7921 +               host->host_desc->check_hotplug(host_set);
7922 +
7923 +       spin_unlock_irqrestore(&host_set->lock, flags);
7924 +
7925 +       return IRQ_RETVAL(handled);
7926 +}
7927 +
7928 +static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
7929 +{
7930 +       struct ata_host_set *host_set = ap->host_set;
7931 +       struct nv_host *host = host_set->private_data;
7932 +
7933 +       if (sc_reg > SCR_CONTROL)
7934 +               return 0xffffffffU;
7935 +
7936 +       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
7937 +               return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4));
7938 +       else
7939 +               return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
7940 +}
7941 +
7942 +static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
7943 +{
7944 +       struct ata_host_set *host_set = ap->host_set;
7945 +       struct nv_host *host = host_set->private_data;
7946 +
7947 +       if (sc_reg > SCR_CONTROL)
7948 +               return;
7949 +
7950 +       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
7951 +               writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4));
7952 +       else
7953 +               outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
7954 +}
7955 +
7956 +static void nv_host_stop (struct ata_host_set *host_set)
7957 +{
7958 +       struct nv_host *host = host_set->private_data;
7959 +
7960 +       // Disable hotplug event interrupts.
7961 +       if (host->host_desc->disable_hotplug)
7962 +               host->host_desc->disable_hotplug(host_set);
7963 +
7964 +       nv_sgpio_host_cleanup(host);
7965 +       kfree(host);
7966 +#ifdef RHAS3U7
7967 +
7968 +       ata_host_stop(host_set);
7969 +#endif
7970 +}
7971 +
7972 +static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
7973 +{
7974 +       static int printed_version = 0;
7975 +       struct nv_host *host;
7976 +       struct ata_port_info *ppi;
7977 +       struct ata_probe_ent *probe_ent;
7978 +       int pci_dev_busy = 0;
7979 +       int rc;
7980 +       u32 bar;
7981 +
7982 +        // Make sure this is a SATA controller by counting the number of bars
7983 +        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
7984 +        // it's an IDE controller and we ignore it.
7985 +       for (bar=0; bar<6; bar++)
7986 +               if (pci_resource_start(pdev, bar) == 0)
7987 +                       return -ENODEV;
7988 +
7989 +       if (!printed_version++)
7990 +               printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
7991 +
7992 +       rc = pci_enable_device(pdev);
7993 +       if (rc)
7994 +               goto err_out;
7995 +
7996 +       rc = pci_request_regions(pdev, DRV_NAME);
7997 +       if (rc) {
7998 +               pci_dev_busy = 1;
7999 +               goto err_out_disable;
8000 +       }
8001 +
8002 +       rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
8003 +       if (rc)
8004 +               goto err_out_regions;
8005 +#ifndef RHAS3U7
8006 +    rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
8007 +       if (rc)
8008 +               goto err_out_regions;
8009 +#endif
8010 +       rc = -ENOMEM;
8011 +
8012 +       ppi = &nv_port_info;
8013 +
8014 +  probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY|ATA_PORT_SECONDARY);
8015 +
8016 +  if (!probe_ent)
8017 +               goto err_out_regions;
8018 +
8019 +       host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
8020 +       if (!host)
8021 +               goto err_out_free_ent;
8022 +
8023 +       memset(host, 0, sizeof(struct nv_host));
8024 +       host->host_desc = &nv_device_tbl[ent->driver_data];
8025 +
8026 +       probe_ent->private_data = host;
8027 +
8028 +       if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)
8029 +               host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;
8030 +
8031 +       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
8032 +               unsigned long base;
8033 +
8034 +               probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),
8035 +                               pci_resource_len(pdev, 5));
8036 +               if (probe_ent->mmio_base == NULL) {
8037 +                       rc = -EIO;
8038 +                       goto err_out_free_host;
8039 +               }
8040 +
8041 +               base = (unsigned long)probe_ent->mmio_base;
8042 +
8043 +               probe_ent->port[0].scr_addr =
8044 +                       base + NV_PORT0_SCR_REG_OFFSET;
8045 +               probe_ent->port[1].scr_addr =
8046 +                       base + NV_PORT1_SCR_REG_OFFSET;
8047 +       } else {
8048 +
8049 +               probe_ent->port[0].scr_addr =
8050 +                       pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
8051 +               probe_ent->port[1].scr_addr =
8052 +                       pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
8053 +       }
8054 +
8055 +       pci_set_master(pdev);
8056 +#ifdef RHAS3U7
8057 +       ata_add_to_probe_list(probe_ent);
8058 +       
8059 +       if (nv_sgpio_capable(ent))
8060 +               nv_sgpio_init(pdev, host);
8061 +       // Enable hotplug event interrupts.
8062 +       if (host->host_desc->enable_hotplug)
8063 +               host->host_desc->enable_hotplug(probe_ent);
8064 +
8065 +       return 0;
8066 +#else
8067 +       rc = ata_device_add(probe_ent);
8068 +       if (rc != NV_PORTS)
8069 +               goto err_out_iounmap;
8070 +       
8071 +       if (nv_sgpio_capable(ent))
8072 +               nv_sgpio_init(pdev, host);
8073 +       // Enable hotplug event interrupts.
8074 +       if (host->host_desc->enable_hotplug)
8075 +               host->host_desc->enable_hotplug(probe_ent);
8076 +
8077 +       kfree(probe_ent);
8078 +
8079 +       return 0;
8080 +
8081 +err_out_iounmap:
8082 +       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
8083 +               iounmap(probe_ent->mmio_base);
8084 +#endif
8085 +err_out_free_host:
8086 +       kfree(host);
8087 +err_out_free_ent:
8088 +       kfree(probe_ent);
8089 +err_out_regions:
8090 +       pci_release_regions(pdev);
8091 +err_out_disable:
8092 +       if (!pci_dev_busy)
8093 +               pci_disable_device(pdev);
8094 +err_out:
8095 +       return rc;
8096 +}
8097 +
8098 +
8099 +static int nv_port_start(struct ata_port *ap)
8100 +{
8101 +       int stat;
8102 +       struct nv_port *port;
8103 +
8104 +       stat = ata_port_start(ap);
8105 +       if (stat) {
8106 +               return stat;
8107 +       }
8108 +
8109 +       port = kmalloc(sizeof(struct nv_port), GFP_KERNEL);
8110 +       if (!port) 
8111 +               goto err_out_no_free;
8112 +
8113 +       memset(port, 0, sizeof(struct nv_port));
8114 +
8115 +       ap->private_data = port;
8116 +       return 0;
8117 +
8118 +err_out_no_free:
8119 +       return 1;
8120 +}
8121 +
8122 +static void nv_port_stop(struct ata_port *ap)
8123 +{
8124 +       nv_sgpio_clear_all_leds(ap);
8125 +
8126 +       if (ap->private_data) {
8127 +               kfree(ap->private_data);
8128 +               ap->private_data = NULL;
8129 +       }
8130 +       ata_port_stop(ap);
8131 +}
8132 +
8133 +static int nv_qc_issue(struct ata_queued_cmd *qc)
8134 +{
8135 +       struct nv_port *port = qc->ap->private_data;
8136 +
8137 +       if (port) 
8138 +               port->port_sgpio.activity.flags.recent_activity = 1;
8139 +       return (ata_qc_issue_prot(qc));
8140 +}
8141 +
8142 +
8143 +
8144 +
8145 +static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)
8146 +{
8147 +       u8 intr_mask;
8148 +
8149 +       outb(NV_INT_STATUS_HOTPLUG,
8150 +               probe_ent->port[0].scr_addr + NV_INT_STATUS);
8151 +
8152 +       intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);
8153 +       intr_mask |= NV_INT_ENABLE_HOTPLUG;
8154 +
8155 +       outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);
8156 +}
8157 +
8158 +static void nv_disable_hotplug(struct ata_host_set *host_set)
8159 +{
8160 +       u8 intr_mask;
8161 +
8162 +       intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
8163 +
8164 +       intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
8165 +
8166 +       outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
8167 +}
8168 +
8169 +static void nv_check_hotplug(struct ata_host_set *host_set)
8170 +{
8171 +       u8 intr_status;
8172 +
8173 +       intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
8174 +
8175 +       // Clear interrupt status.
8176 +       outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
8177 +
8178 +       if (intr_status & NV_INT_STATUS_HOTPLUG) {
8179 +               if (intr_status & NV_INT_STATUS_PDEV_ADDED)
8180 +                       printk(KERN_WARNING "nv_sata: "
8181 +                               "Primary device added\n");
8182 +
8183 +               if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
8184 +                       printk(KERN_WARNING "nv_sata: "
8185 +                               "Primary device removed\n");
8186 +
8187 +               if (intr_status & NV_INT_STATUS_SDEV_ADDED)
8188 +                       printk(KERN_WARNING "nv_sata: "
8189 +                               "Secondary device added\n");
8190 +
8191 +               if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
8192 +                       printk(KERN_WARNING "nv_sata: "
8193 +                               "Secondary device removed\n");
8194 +       }
8195 +}
8196 +
8197 +static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
8198 +{
8199 +       struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
8200 +       u8 intr_mask;
8201 +       u8 regval;
8202 +
8203 +       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
8204 +       regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
8205 +       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
8206 +
8207 +       writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
8208 +
8209 +       intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
8210 +       intr_mask |= NV_INT_ENABLE_HOTPLUG;
8211 +
8212 +       writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
8213 +}
8214 +
8215 +static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
8216 +{
8217 +       struct pci_dev *pdev = to_pci_dev(host_set->dev);
8218 +       u8 intr_mask;
8219 +       u8 regval;
8220 +
8221 +       intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);
8222 +
8223 +       intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
8224 +
8225 +       writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
8226 +
8227 +       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
8228 +       regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
8229 +       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
8230 +}
8231 +
8232 +static void nv_check_hotplug_ck804(struct ata_host_set *host_set)
8233 +{
8234 +       u8 intr_status;
8235 +
8236 +       intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
8237 +
8238 +       // Clear interrupt status.
8239 +       writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
8240 +
8241 +       if (intr_status & NV_INT_STATUS_HOTPLUG) {
8242 +               if (intr_status & NV_INT_STATUS_PDEV_ADDED)
8243 +                       printk(KERN_WARNING "nv_sata: "
8244 +                               "Primary device added\n");
8245 +
8246 +               if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
8247 +                       printk(KERN_WARNING "nv_sata: "
8248 +                               "Primary device removed\n");
8249 +
8250 +               if (intr_status & NV_INT_STATUS_SDEV_ADDED)
8251 +                       printk(KERN_WARNING "nv_sata: "
8252 +                               "Secondary device added\n");
8253 +
8254 +               if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
8255 +                       printk(KERN_WARNING "nv_sata: "
8256 +                               "Secondary device removed\n");
8257 +       }
8258 +}
8259 +static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent)
8260 +{
8261 +       struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
8262 +       u8 intr_mask;
8263 +       u8 regval;
8264 +
8265 +       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
8266 +       regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
8267 +       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
8268 +
8269 +       writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55);
8270 +       writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55+2);
8271 +
8272 +       intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55);
8273 +       intr_mask |= 0x0c;
8274 +       writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55);
8275 +
8276 +       intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);
8277 +       intr_mask |= 0x0c;
8278 +       writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);
8279 +}
8280 +
8281 +static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set)
8282 +{
8283 +       struct pci_dev *pdev = to_pci_dev(host_set->dev);
8284 +       u8 intr_mask;
8285 +       u8 regval;
8286 +
8287 +       intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55);
8288 +       intr_mask &= ~(0x0C);
8289 +       writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55);
8290 +       
8291 +       intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55+2);
8292 +       intr_mask &= ~(0x0C);
8293 +       writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55+2);
8294 +
8295 +       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
8296 +       regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
8297 +       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
8298 +}
8299 +
8300 +static void nv_check_hotplug_mcp55(struct ata_host_set *host_set)
8301 +{
8302 +       u8 intr_status,intr_status1;
8303 +
8304 +       intr_status = readb(host_set->mmio_base + NV_INT_STATUS_MCP55);
8305 +       intr_status1 = readb(host_set->mmio_base + NV_INT_STATUS_MCP55+2);
8306 +
8307 +       // Clear interrupt status.
8308 +       writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55);
8309 +       writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55+2);      
8310 +
8311 +       if ((intr_status & 0x0c) || (intr_status1&0x0c)) {
8312 +               if (intr_status & 0x04)
8313 +                       printk(KERN_WARNING "nv_sata: "
8314 +                               "Primary device added\n");
8315 +
8316 +               if (intr_status & 0x08)
8317 +                       printk(KERN_WARNING "nv_sata: "
8318 +                               "Primary device removed\n");
8319 +
8320 +               if (intr_status1 & 0x04)
8321 +                       printk(KERN_WARNING "nv_sata: "
8322 +                               "Secondary device added\n");
8323 +
8324 +               if (intr_status1 & 0x08)
8325 +                       printk(KERN_WARNING "nv_sata: "
8326 +                               "Secondary device removed\n");
8327 +       }
8328 +}
8329 +
8330 +
8331 +static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost)
8332 +{
8333 +       u16 csr_add; 
8334 +       u32 cb_add, temp32;
8335 +       struct device *dev = pci_dev_to_dev(pdev);
8336 +       struct ata_host_set *host_set = dev_get_drvdata(dev);
8337 +       u8 pro=0;
8338 +       pci_read_config_word(pdev, NV_SGPIO_PCI_CSR_OFFSET, &csr_add);
8339 +       pci_read_config_dword(pdev, NV_SGPIO_PCI_CB_OFFSET, &cb_add);
8340 +       pci_read_config_byte(pdev, 0xA4, &pro);
8341 +       
8342 +       if (csr_add == 0 || cb_add == 0) 
8343 +               return;
8344 +       
8345 +
8346 +       if (!(pro&0x40))
8347 +               return; 
8348 +       
8349 +       
8350 +       temp32 = csr_add;
8351 +       phost->host_sgpio.pcsr = (void *)temp32;
8352 +       phost->host_sgpio.pcb = phys_to_virt(cb_add);
8353 +
8354 +       if (phost->host_sgpio.pcb->nvcr.bit.init_cnt!=0x2 || phost->host_sgpio.pcb->nvcr.bit.cbver!=0x0)
8355 +               return;
8356 +               
8357 +       if (temp32 <=0x200 || temp32 >=0xFFFE )
8358 +               return;
8359 +       
8360 +       
8361 +       if (cb_add<=0x80000 || cb_add>=0x9FC00)
8362 +               return;
8363 +       
8364 +               
8365 +       if (phost->host_sgpio.pcb->scratch_space == 0) {
8366 +               spin_lock_init(&nv_sgpio_lock);
8367 +               phost->host_sgpio.share.plock = &nv_sgpio_lock;
8368 +               phost->host_sgpio.share.ptstamp = &nv_sgpio_tstamp;
8369 +               phost->host_sgpio.pcb->scratch_space = 
8370 +                       (unsigned long)&phost->host_sgpio.share;
8371 +               spin_lock(phost->host_sgpio.share.plock);
8372 +               nv_sgpio_reset(phost->host_sgpio.pcsr);
8373 +               phost->host_sgpio.pcb->cr0 = 
8374 +                       SET_ENABLE(phost->host_sgpio.pcb->cr0);
8375 +
8376 +               spin_unlock(phost->host_sgpio.share.plock);
8377 +       }
8378 +
8379 +       phost->host_sgpio.share = 
8380 +               *(struct nv_sgpio_host_share *)(unsigned long)
8381 +       phost->host_sgpio.pcb->scratch_space;
8382 +       phost->host_sgpio.flags.sgpio_enabled = 1;
8383 +       phost->pdev = pdev;
8384 +       init_timer(&phost->host_sgpio.sgpio_timer);
8385 +       phost->host_sgpio.sgpio_timer.data = (unsigned long)phost;
8386 +       nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer, 
8387 +                               NV_SGPIO_UPDATE_TICK);
8388 +}
8389 +
8390 +static void __nv_sgpio_timer_handler(unsigned long context);
8391 +static void nv_sgpio_set_timer(struct timer_list *ptimer, unsigned int timeout_msec)
8392 +{
8393 +       if (!ptimer)
8394 +               return;
8395 +       ptimer->function = __nv_sgpio_timer_handler;
8396 +       ptimer->expires = msecs_to_jiffies(timeout_msec) + jiffies;
8397 +       add_timer(ptimer);
8398 +}
8399 +static void __nv_sgpio_timer_handler(unsigned long context)
8400 +{
8401 +       struct nv_host *phost = (struct nv_host*)context;
8402 +       struct device *dev = pci_dev_to_dev(phost->pdev);
8403 +       struct ata_host_set *host_set = dev_get_drvdata(dev);
8404 +       
8405 +       if (!host_set)
8406 +               nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer, 
8407 +                               NV_SGPIO_UPDATE_TICK);
8408 +       else
8409 +               nv_sgpio_timer_handler(host_set);
8410 +       
8411 +}
8412 +
8413 +static void nv_sgpio_timer_handler(unsigned long context)
8414 +{
8415 +
8416 +       struct ata_host_set *host_set = (struct ata_host_set *)context;
8417 +       struct nv_host *host;
8418 +       u8 count, host_offset, port_offset;
8419 +       union nv_sgpio_tx tx;
8420 +       bool on_off;
8421 +       unsigned long mask = 0xFFFF;
8422 +       struct nv_port *port;
8423 +
8424 +       if (!host_set)
8425 +               goto err_out;
8426 +       else 
8427 +               host = (struct nv_host *)host_set->private_data;
8428 +
8429 +       if (!host->host_sgpio.flags.sgpio_enabled)
8430 +               goto err_out;
8431 +
8432 +       host_offset = nv_sgpio_tx_host_offset(host_set);
8433 +
8434 +       spin_lock(host->host_sgpio.share.plock);
8435 +       tx = host->host_sgpio.pcb->tx[host_offset];
8436 +       spin_unlock(host->host_sgpio.share.plock);
8437 +
8438 +       for (count = 0; count < host_set->n_ports; count++) {
8439 +               struct ata_port *ap; 
8440 +
8441 +               ap = host_set->ports[count];
8442 +        
8443 +               if (!(ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)))
8444 +                       continue;
8445 +
8446 +               port = (struct nv_port *)ap->private_data;
8447 +               if (!port)
8448 +                       continue;                       
8449 +                port_offset = nv_sgpio_tx_port_offset(ap);
8450 +               on_off = GET_ACTIVITY(tx.tx_port[port_offset]);
8451 +               if (nv_sgpio_update_led(&port->port_sgpio.activity, &on_off)) {
8452 +                       tx.tx_port[port_offset] = 
8453 +                               SET_ACTIVITY(tx.tx_port[port_offset], on_off);
8454 +                       host->host_sgpio.flags.need_update = 1;
8455 +               }
8456 +       }
8457 +
8458 +
8459 +       if (host->host_sgpio.flags.need_update) {
8460 +               spin_lock(host->host_sgpio.share.plock);    
8461 +               if (nv_sgpio_get_func(host_set) 
8462 +                       % NV_CNTRLR_SHARE_INIT == 0) {
8463 +                       host->host_sgpio.pcb->tx[host_offset].all &= mask;
8464 +                       mask = mask << 16;
8465 +                       tx.all &= mask;
8466 +               } else {
8467 +                       tx.all &= mask;
8468 +                       mask = mask << 16;
8469 +                       host->host_sgpio.pcb->tx[host_offset].all &= mask;
8470 +               }
8471 +               host->host_sgpio.pcb->tx[host_offset].all |= tx.all;
8472 +               spin_unlock(host->host_sgpio.share.plock);     
8473
8474 +               if (nv_sgpio_send_cmd(host, NV_SGPIO_CMD_WRITE_DATA)) { 
8475 +                       host->host_sgpio.flags.need_update = 0;
8476 +                       return;
8477 +               }
8478 +       } else {
8479 +               nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, 
8480 +                               NV_SGPIO_UPDATE_TICK);
8481 +       }
8482 +err_out:
8483 +       return;
8484 +}
8485 +
8486 +static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd)
8487 +{
8488 +       u8 csr;
8489 +       unsigned long *ptstamp;
8490 +
8491 +       spin_lock(host->host_sgpio.share.plock);    
8492 +       ptstamp = host->host_sgpio.share.ptstamp;
8493 +       if (jiffies_to_msecs1(jiffies - *ptstamp) >= NV_SGPIO_MIN_UPDATE_DELTA) {
8494 +               csr = 
8495 +               nv_sgpio_get_csr((unsigned long)host->host_sgpio.pcsr);
8496 +               if ((GET_SGPIO_STATUS(csr) != NV_SGPIO_STATE_OPERATIONAL) ||
8497 +                       (GET_CMD_STATUS(csr) == NV_SGPIO_CMD_ACTIVE)) {
8498 +                       //nv_sgpio_reset(host->host_sgpio.pcsr);
8499 +               } else {
8500 +                       host->host_sgpio.pcb->cr0 = 
8501 +                               SET_ENABLE(host->host_sgpio.pcb->cr0);
8502 +                       csr = 0;
8503 +                       csr = SET_CMD(csr, cmd);
8504 +                       nv_sgpio_set_csr(csr, 
8505 +                               (unsigned long)host->host_sgpio.pcsr);
8506 +                       *ptstamp = jiffies;
8507 +               }
8508 +               spin_unlock(host->host_sgpio.share.plock);
8509 +               nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, 
8510 +                       NV_SGPIO_UPDATE_TICK);
8511 +               return 1;
8512 +       } else {
8513 +               spin_unlock(host->host_sgpio.share.plock);
8514 +               nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, 
8515 +                               (NV_SGPIO_MIN_UPDATE_DELTA - 
8516 +                               jiffies_to_msecs1(jiffies - *ptstamp)));
8517 +               return 0;
8518 +       }
8519 +}
8520 +
8521 +static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off)
8522 +{
8523 +       bool need_update = 0;
8524 +
8525 +       if (led->force_off > 0) {
8526 +               led->force_off--;
8527 +       } else if (led->flags.recent_activity ^ led->flags.last_state) {
8528 +               *on_off = led->flags.recent_activity;
8529 +               led->flags.last_state = led->flags.recent_activity;
8530 +               need_update = 1;
8531 +       } else if ((led->flags.recent_activity & led->flags.last_state) &&
8532 +               (led->last_cons_active >= NV_SGPIO_MAX_ACTIVITY_ON)) {
8533 +               *on_off = NV_OFF;
8534 +               led->flags.last_state = NV_OFF;
8535 +               led->force_off = NV_SGPIO_MIN_FORCE_OFF;
8536 +               need_update = 1;
8537 +       }
8538 +
8539 +       if (*on_off) 
8540 +               led->last_cons_active++;        
8541 +       else
8542 +               led->last_cons_active = 0;
8543 +
8544 +       led->flags.recent_activity = 0;
8545 +       return need_update;
8546 +}
8547 +
8548 +static void nv_sgpio_reset(u8  *pcsr)
8549 +{
8550 +       u8 csr;
8551 +
8552 +       csr = nv_sgpio_get_csr((unsigned long)pcsr);
8553 +       if (GET_SGPIO_STATUS(csr) == NV_SGPIO_STATE_RESET) {
8554 +               csr = 0;
8555 +               csr = SET_CMD(csr, NV_SGPIO_CMD_RESET);
8556 +               nv_sgpio_set_csr(csr, (unsigned long)pcsr);
8557 +       }
8558 +       csr = 0;
8559 +       csr = SET_CMD(csr, NV_SGPIO_CMD_READ_PARAMS);
8560 +       nv_sgpio_set_csr(csr, (unsigned long)pcsr);
8561 +}
8562 +
8563 +static void nv_sgpio_host_cleanup(struct nv_host *host)
8564 +{
8565 +       u8 csr;
8566 +       if (!host)
8567 +               return;
8568 +
8569 +       if (host->host_sgpio.flags.sgpio_enabled){
8570 +               spin_lock(host->host_sgpio.share.plock);
8571 +               host->host_sgpio.pcb->cr0 = 
8572 +                       SET_ENABLE(host->host_sgpio.pcb->cr0);
8573 +               csr = 0;
8574 +               csr = SET_CMD(csr, NV_SGPIO_CMD_WRITE_DATA);
8575 +               nv_sgpio_set_csr(csr, 
8576 +                       (unsigned long)host->host_sgpio.pcsr);
8577 +               spin_unlock(host->host_sgpio.share.plock);
8578 +       
8579 +               if (timer_pending(&host->host_sgpio.sgpio_timer))
8580 +                       del_timer(&host->host_sgpio.sgpio_timer);
8581 +               host->host_sgpio.flags.sgpio_enabled = 0;
8582 +               host->host_sgpio.pcb->scratch_space = 0;
8583 +       }
8584 +       
8585 +}
8586 +
8587 +static void nv_sgpio_clear_all_leds(struct ata_port *ap)
8588 +{
8589 +       struct nv_port *port = ap->private_data;
8590 +       struct nv_host *host;
8591 +       u8 host_offset, port_offset;
8592 +
8593 +       if (!port || !ap->host_set)
8594 +               return;
8595 +       if (!ap->host_set->private_data)
8596 +               return;
8597 +
8598 +       host = ap->host_set->private_data;
8599 +       if (!host->host_sgpio.flags.sgpio_enabled)
8600 +               return;
8601 +
8602 +       host_offset = nv_sgpio_tx_host_offset(ap->host_set);
8603 +       port_offset = nv_sgpio_tx_port_offset(ap);
8604 +
8605 +       spin_lock(host->host_sgpio.share.plock);
8606 +       host->host_sgpio.pcb->tx[host_offset].tx_port[port_offset] = 0;
8607 +       host->host_sgpio.flags.need_update = 1;
8608 +       spin_unlock(host->host_sgpio.share.plock);
8609 +}
8610 +
8611 +
8612 +
8613 +static int __init nv_init(void)
8614 +{
8615 +#ifdef RHAS3U7
8616 +       int rc;
8617 +       rc = pci_module_init(&nv_pci_driver);
8618 +       if (rc)
8619 +               return rc;
8620 +       
8621 +       rc = scsi_register_module(MODULE_SCSI_HA, &nv_sht);
8622 +       if (rc) {
8623 +               pci_unregister_driver(&nv_pci_driver);
8624 +               /* TODO: does scsi_register_module return errno val? */
8625 +               return -ENODEV;
8626 +       }
8627 +
8628 +       return 0;
8629 +#else
8630 +       return pci_module_init(&nv_pci_driver);
8631 +#endif
8632 +}
8633 +
8634 +static void __exit nv_exit(void)
8635 +{
8636 +#ifdef RHAS3U7
8637 +       scsi_unregister_module(MODULE_SCSI_HA, &nv_sht);
8638 +#endif
8639 +       pci_unregister_driver(&nv_pci_driver);
8640 +
8641 +}
8642 +
8643 +module_init(nv_init);
8644 +module_exit(nv_exit);
This page took 0.821884 seconds and 3 git commands to generate.