]> git.pld-linux.org Git - packages/kernel.git/blob - linux-nvidia.patch
- http://download.filesystems.org/unionfs/unionfs-2.1/unionfs-2.1.4_for_2.6.22.6...
[packages/kernel.git] / linux-nvidia.patch
1 diff -uNr linux-2.6.16.orig/drivers/net/forcedeth.c linux-2.6.16/drivers/net/forcedeth.c
2 --- linux-2.6.16.orig/drivers/net/forcedeth.c   2007-06-23 20:16:01.572248000 +0200
3 +++ linux-2.6.16/drivers/net/forcedeth.c        2006-10-21 14:44:00.000000000 +0200
4 @@ -102,6 +102,17 @@
5   *     0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
6   *     0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
7   *     0.49: 10 Dec 2005: Fix tso for large buffers.
8 + *     0.50: 20 Jan 2006: Add 8021pq tagging support.
9 + *     0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
10 + *     0.52: 20 Jan 2006: Add MSI/MSIX support.
11 + *     0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
12 + *     0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
13 + *     0.55: 22 Mar 2006: Add flow control (pause frame).
14 + *     0.56: 22 Mar 2006: Additional ethtool and moduleparam support.
15 + *     0.57: 14 May 2006: Moved mac address writes to nv_probe and nv_remove.
16 + *     0.58: 20 May 2006: Optimized rx and tx data paths.
17 + *     0.59: 31 May 2006: Added support for sideband management unit.
18 + *     0.60: 31 May 2006: Added support for recoverable error.
19   *
20   * Known bugs:
21   * We suspect that on some hardware no TX done interrupts are generated.
22 @@ -113,7 +124,7 @@
23   * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
24   * superfluous timer interrupts from the nic.
25   */
26 -#define FORCEDETH_VERSION              "0.49"
27 +#define FORCEDETH_VERSION              "0.60-Driver Package V1.21"
28  #define DRV_NAME                       "forcedeth"
29  
30  #include <linux/module.h>
31 @@ -131,34 +142,189 @@
32  #include <linux/random.h>
33  #include <linux/init.h>
34  #include <linux/if_vlan.h>
35 +#include <linux/rtnetlink.h>
36 +#include <linux/version.h>
37 +
38 +#define RHES3                  0
39 +#define SLES9          1
40 +#define RHES4          2
41 +#define SUSE10         3 
42 +#define        FEDORA5         4 
43 +
44
45 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13)
46 +#define NVVER FEDORA5          
47 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
48 +#define NVVER SUSE10           
49 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,6)
50 +#define NVVER RHES4    
51 +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
52 +#define NVVER SLES9    
53 +#else
54 +#define NVVER RHES3    
55 +#endif
56 +
57 +#if NVVER > RHES3
58 +#include <linux/dma-mapping.h>
59 +#else
60 +#include <linux/forcedeth-compat.h>
61 +#endif
62  
63  #include <asm/irq.h>
64  #include <asm/io.h>
65  #include <asm/uaccess.h>
66  #include <asm/system.h>
67  
68 -#if 0
69 +#ifdef  NVLAN_DEBUG
70  #define dprintk                        printk
71  #else
72  #define dprintk(x...)          do { } while (0)
73  #endif
74  
75 +/* it should add in pci_ids.h */
76 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_12
77 +#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 
78 +#endif
79 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_13
80 +#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 
81 +#endif
82 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_14
83 +#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372 
84 +#endif
85 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_15
86 +#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 
87 +#endif
88 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_16
89 +#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
90 +#endif
91 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_17
92 +#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6 
93 +#endif
94 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_18
95 +#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE 
96 +#endif
97 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_19
98 +#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF 
99 +#endif
100 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_20
101 +#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 
102 +#endif
103 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_21
104 +#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 
105 +#endif
106 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_22
107 +#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 
108 +#endif
109 +#ifndef PCI_DEVICE_ID_NVIDIA_NVENET_23
110 +#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453 
111 +#endif
112 +
113 +/* it should add in mii.h */
114 +#ifndef ADVERTISE_1000HALF
115 +#define ADVERTISE_1000HALF     0x0100
116 +#endif
117 +#ifndef ADVERTISE_1000FULL
118 +#define ADVERTISE_1000FULL     0x0200
119 +#endif
120 +#ifndef ADVERTISE_PAUSE_CAP
121 +#define ADVERTISE_PAUSE_CAP    0x0400
122 +#endif
123 +#ifndef ADVERTISE_PAUSE_ASYM
124 +#define ADVERTISE_PAUSE_ASYM   0x0800
125 +#endif
126 +#ifndef MII_CTRL1000
127 +#define MII_CTRL1000           0x09 
128 +#endif
129 +#ifndef MII_STAT1000
130 +#define MII_STAT1000           0x0A 
131 +#endif
132 +#ifndef LPA_1000FULL
133 +#define LPA_1000FULL           0x0800
134 +#endif
135 +#ifndef LPA_1000HALF
136 +#define LPA_1000HALF           0x0400 
137 +#endif
138 +#ifndef LPA_PAUSE_CAP
139 +#define LPA_PAUSE_CAP          0x0400
140 +#endif
141 +#ifndef LPA_PAUSE_ASYM
142 +#define LPA_PAUSE_ASYM         0x0800
143 +#endif
144 +#ifndef BMCR_SPEED1000
145 +#define BMCR_SPEED1000         0x0040  /* MSB of Speed (1000)         */
146 +#endif
147 +
148 +#ifndef NETDEV_TX_OK
149 +#define NETDEV_TX_OK           0       /* driver took care of packet */
150 +#endif
151 +
152 +#ifndef NETDEV_TX_BUSY
153 +#define NETDEV_TX_BUSY                 1    /* driver tx path was busy*/
154 +#endif
155 +
156 +#ifndef DMA_39BIT_MASK
157 +#define DMA_39BIT_MASK         0x0000007fffffffffULL    
158 +#endif
159 +
160 +#ifndef __iomem
161 +#define __iomem 
162 +#endif
163 +
164 +/* rx/tx mac addr + type + vlan + align + slack*/
165 +#ifndef RX_NIC_BUFSIZE 
166 +#define RX_NIC_BUFSIZE         (ETH_DATA_LEN + 64)
167 +#endif
168 +/* even more slack */
169 +#ifndef RX_ALLOC_BUFSIZE       
170 +#define RX_ALLOC_BUFSIZE       (ETH_DATA_LEN + 128)
171 +#endif
172 +
173 +#ifndef PCI_DEVICE
174 +#define PCI_DEVICE(vend,dev) \
175 +       .vendor = (vend), .device = (dev), \
176 +       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
177 +#endif
178 +
179 +#if NVVER < RHES4
180 +struct msix_entry {
181 +       u16 vector;     /* kernel uses to write allocated vector */
182 +       u16 entry;      /* driver uses to specify entry, OS writes */
183 +};
184 +#endif
185  
186  /*
187   * Hardware access:
188   */
189  
190 -#define DEV_NEED_TIMERIRQ      0x0000  /* work-around for Wake-On-Lan */
191 -#define DEV_NEED_TIMERIRQ_ORIG 0x0001  /* set the timer irq flag in the irq mask */
192 +#define DEV_NEED_TIMERIRQ      0x0001  /* set the timer irq flag in the irq mask */
193  #define DEV_NEED_LINKTIMER     0x0002  /* poll link settings. Relies on the timer irq */
194  #define DEV_HAS_LARGEDESC      0x0004  /* device supports jumbo frames and needs packet format 2 */
195  #define DEV_HAS_HIGH_DMA        0x0008  /* device supports 64bit dma */
196  #define DEV_HAS_CHECKSUM        0x0010  /* device supports tx and rx checksum offloads */
197 +#define DEV_HAS_VLAN            0x0020  /* device supports vlan tagging and striping */
198 +#define DEV_HAS_MSI             0x0040  /* device supports MSI */
199 +#define DEV_HAS_MSI_X           0x0080  /* device supports MSI-X */
200 +#define DEV_HAS_POWER_CNTRL     0x0100  /* device supports power savings */
201 +#define DEV_HAS_PAUSEFRAME_TX   0x0200  /* device supports tx pause frames */
202 +#define DEV_HAS_STATISTICS      0x0400  /* device supports hw statistics */
203 +#define DEV_HAS_TEST_EXTENDED   0x0800  /* device supports extended diagnostic test */
204 +#define DEV_HAS_MGMT_UNIT       0x1000  /* device supports management unit */
205 +
206 +#define NVIDIA_ETHERNET_ID(deviceid,nv_driver_data) {\
207 +               .vendor = PCI_VENDOR_ID_NVIDIA, \
208 +               .device = deviceid, \
209 +               .subvendor = PCI_ANY_ID, \
210 +               .subdevice = PCI_ANY_ID, \
211 +               .driver_data = nv_driver_data, \
212 +               },
213 +               
214 +#define Mv_LED_Control 16
215 +#define Mv_Page_Address 22
216  
217  enum {
218         NvRegIrqStatus = 0x000,
219  #define NVREG_IRQSTAT_MIIEVENT 0x040
220 -#define NVREG_IRQSTAT_MASK             0x1ff
221 +#define NVREG_IRQSTAT_MASK             0x81ff
222         NvRegIrqMask = 0x004,
223  #define NVREG_IRQ_RX_ERROR             0x0001
224  #define NVREG_IRQ_RX                   0x0002
225 @@ -167,14 +333,18 @@
226  #define NVREG_IRQ_TX_OK                        0x0010
227  #define NVREG_IRQ_TIMER                        0x0020
228  #define NVREG_IRQ_LINK                 0x0040
229 -#define NVREG_IRQ_TX_ERROR             0x0080
230 -#define NVREG_IRQ_TX1                  0x0100
231 +#define NVREG_IRQ_RX_FORCED            0x0080
232 +#define NVREG_IRQ_TX_FORCED            0x0100
233 +#define NVREG_IRQ_RECOVER_ERROR                0x8000
234  #define NVREG_IRQMASK_THROUGHPUT       0x00df
235  #define NVREG_IRQMASK_CPU              0x0040
236 +#define NVREG_IRQ_TX_ALL               (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
237 +#define NVREG_IRQ_RX_ALL               (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
238 +#define NVREG_IRQ_OTHER                        (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
239  
240  #define NVREG_IRQ_UNKNOWN      (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
241 -                                       NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
242 -                                       NVREG_IRQ_TX1))
243 +                                       NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
244 +                                       NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
245  
246         NvRegUnknownSetupReg6 = 0x008,
247  #define NVREG_UNKSETUP6_VAL            3
248 @@ -186,25 +356,45 @@
249         NvRegPollingInterval = 0x00c,
250  #define NVREG_POLL_DEFAULT_THROUGHPUT  970
251  #define NVREG_POLL_DEFAULT_CPU 13
252 +       NvRegMSIMap0 = 0x020,
253 +       NvRegMSIMap1 = 0x024,
254 +       NvRegMSIIrqMask = 0x030,
255 +#define NVREG_MSI_VECTOR_0_ENABLED 0x01
256         NvRegMisc1 = 0x080,
257 +#define NVREG_MISC1_PAUSE_TX   0x01
258  #define NVREG_MISC1_HD         0x02
259  #define NVREG_MISC1_FORCE      0x3b0f3c
260  
261 +       NvRegMacReset = 0x3c,
262 +#define NVREG_MAC_RESET_ASSERT 0x0F3
263         NvRegTransmitterControl = 0x084,
264  #define NVREG_XMITCTL_START    0x01
265 +#define NVREG_XMITCTL_MGMT_ST  0x40000000
266 +#define NVREG_XMITCTL_SYNC_MASK                0x000f0000
267 +#define NVREG_XMITCTL_SYNC_NOT_READY   0x0
268 +#define NVREG_XMITCTL_SYNC_PHY_INIT    0x00040000
269 +#define NVREG_XMITCTL_MGMT_SEMA_MASK   0x00000f00
270 +#define NVREG_XMITCTL_MGMT_SEMA_FREE   0x0
271 +#define NVREG_XMITCTL_HOST_SEMA_MASK   0x0000f000
272 +#define NVREG_XMITCTL_HOST_SEMA_ACQ    0x0000f000
273 +#define NVREG_XMITCTL_HOST_LOADED      0x00004000
274 +#define NVREG_XMITCTL_TX_PATH_EN       0x01000000
275         NvRegTransmitterStatus = 0x088,
276  #define NVREG_XMITSTAT_BUSY    0x01
277  
278         NvRegPacketFilterFlags = 0x8c,
279 -#define NVREG_PFF_ALWAYS       0x7F0008
280 +#define NVREG_PFF_PAUSE_RX     0x08
281 +#define NVREG_PFF_ALWAYS       0x7F0000
282  #define NVREG_PFF_PROMISC      0x80
283  #define NVREG_PFF_MYADDR       0x20
284 +#define NVREG_PFF_LOOPBACK     0x10
285  
286         NvRegOffloadConfig = 0x90,
287  #define NVREG_OFFLOAD_HOMEPHY  0x601
288  #define NVREG_OFFLOAD_NORMAL   RX_NIC_BUFSIZE
289         NvRegReceiverControl = 0x094,
290  #define NVREG_RCVCTL_START     0x01
291 +#define NVREG_RCVCTL_RX_PATH_EN        0x01000000
292         NvRegReceiverStatus = 0x98,
293  #define NVREG_RCVSTAT_BUSY     0x01
294  
295 @@ -214,10 +404,12 @@
296  #define NVREG_RNDSEED_FORCE2   0x2d00
297  #define NVREG_RNDSEED_FORCE3   0x7400
298  
299 -       NvRegUnknownSetupReg1 = 0xA0,
300 -#define NVREG_UNKSETUP1_VAL    0x16070f
301 -       NvRegUnknownSetupReg2 = 0xA4,
302 -#define NVREG_UNKSETUP2_VAL    0x16
303 +       NvRegTxDeferral = 0xA0,
304 +#define NVREG_TX_DEFERRAL_DEFAULT              0x15050f
305 +#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
306 +#define NVREG_TX_DEFERRAL_RGMII_1000   0x14050f
307 +       NvRegRxDeferral = 0xA4,
308 +#define NVREG_RX_DEFERRAL_DEFAULT      0x16
309         NvRegMacAddrA = 0xA8,
310         NvRegMacAddrB = 0xAC,
311         NvRegMulticastAddrA = 0xB0,
312 @@ -234,7 +426,8 @@
313         NvRegRingSizes = 0x108,
314  #define NVREG_RINGSZ_TXSHIFT 0
315  #define NVREG_RINGSZ_RXSHIFT 16
316 -       NvRegUnknownTransmitterReg = 0x10c,
317 +       NvRegTransmitPoll = 0x10c,
318 +#define NVREG_TRANSMITPOLL_MAC_ADDR_REV        0x00008000
319         NvRegLinkSpeed = 0x110,
320  #define NVREG_LINKSPEED_FORCE 0x10000
321  #define NVREG_LINKSPEED_10     1000
322 @@ -243,8 +436,10 @@
323  #define NVREG_LINKSPEED_MASK   (0xFFF)
324         NvRegUnknownSetupReg5 = 0x130,
325  #define NVREG_UNKSETUP5_BIT31  (1<<31)
326 -       NvRegUnknownSetupReg3 = 0x13c,
327 -#define NVREG_UNKSETUP3_VAL1   0x200010
328 +       NvRegTxWatermark = 0x13c,
329 +#define NVREG_TX_WM_DESC1_DEFAULT      0x0200010
330 +#define NVREG_TX_WM_DESC2_3_DEFAULT    0x1e08000
331 +#define NVREG_TX_WM_DESC2_3_1000       0xfe08000
332         NvRegTxRxControl = 0x144,
333  #define NVREG_TXRXCTL_KICK     0x0001
334  #define NVREG_TXRXCTL_BIT1     0x0002
335 @@ -253,15 +448,22 @@
336  #define NVREG_TXRXCTL_RESET    0x0010
337  #define NVREG_TXRXCTL_RXCHECK  0x0400
338  #define NVREG_TXRXCTL_DESC_1   0
339 -#define NVREG_TXRXCTL_DESC_2   0x02100
340 -#define NVREG_TXRXCTL_DESC_3   0x02200
341 +#define NVREG_TXRXCTL_DESC_2   0x002100
342 +#define NVREG_TXRXCTL_DESC_3   0xc02200
343 +#define NVREG_TXRXCTL_VLANSTRIP 0x00040
344 +#define NVREG_TXRXCTL_VLANINS  0x00080
345 +       NvRegTxRingPhysAddrHigh = 0x148,
346 +       NvRegRxRingPhysAddrHigh = 0x14C,
347 +       NvRegTxPauseFrame = 0x170,
348 +#define NVREG_TX_PAUSEFRAME_DISABLE    0x1ff0080
349 +#define NVREG_TX_PAUSEFRAME_ENABLE     0x0c00030
350         NvRegMIIStatus = 0x180,
351  #define NVREG_MIISTAT_ERROR            0x0001
352  #define NVREG_MIISTAT_LINKCHANGE       0x0008
353  #define NVREG_MIISTAT_MASK             0x000f
354  #define NVREG_MIISTAT_MASK2            0x000f
355 -       NvRegUnknownSetupReg4 = 0x184,
356 -#define NVREG_UNKSETUP4_VAL    8
357 +       NvRegMIIMask = 0x184,
358 +#define NVREG_MII_LINKCHANGE           0x0008
359  
360         NvRegAdapterControl = 0x188,
361  #define NVREG_ADAPTCTL_START   0x02
362 @@ -291,6 +493,7 @@
363  #define NVREG_WAKEUPFLAGS_ENABLE       0x1111
364  
365         NvRegPatternCRC = 0x204,
366 +#define NV_UNKNOWN_VAL  0x01
367         NvRegPatternMask = 0x208,
368         NvRegPowerCap = 0x268,
369  #define NVREG_POWERCAP_D3SUPP  (1<<30)
370 @@ -304,6 +507,43 @@
371  #define NVREG_POWERSTATE_D1            0x0001
372  #define NVREG_POWERSTATE_D2            0x0002
373  #define NVREG_POWERSTATE_D3            0x0003
374 +       NvRegTxCnt = 0x280,
375 +       NvRegTxZeroReXmt = 0x284,
376 +       NvRegTxOneReXmt = 0x288,
377 +       NvRegTxManyReXmt = 0x28c,
378 +       NvRegTxLateCol = 0x290,
379 +       NvRegTxUnderflow = 0x294,
380 +       NvRegTxLossCarrier = 0x298,
381 +       NvRegTxExcessDef = 0x29c,
382 +       NvRegTxRetryErr = 0x2a0,
383 +       NvRegRxFrameErr = 0x2a4,
384 +       NvRegRxExtraByte = 0x2a8,
385 +       NvRegRxLateCol = 0x2ac,
386 +       NvRegRxRunt = 0x2b0,
387 +       NvRegRxFrameTooLong = 0x2b4,
388 +       NvRegRxOverflow = 0x2b8,
389 +       NvRegRxFCSErr = 0x2bc,
390 +       NvRegRxFrameAlignErr = 0x2c0,
391 +       NvRegRxLenErr = 0x2c4,
392 +       NvRegRxUnicast = 0x2c8,
393 +       NvRegRxMulticast = 0x2cc,
394 +       NvRegRxBroadcast = 0x2d0,
395 +       NvRegTxDef = 0x2d4,
396 +       NvRegTxFrame = 0x2d8,
397 +       NvRegRxCnt = 0x2dc,
398 +       NvRegTxPause = 0x2e0,
399 +       NvRegRxPause = 0x2e4,
400 +       NvRegRxDropFrame = 0x2e8,
401 +
402 +       NvRegVlanControl = 0x300,
403 +#define NVREG_VLANCONTROL_ENABLE       0x2000
404 +       NvRegMSIXMap0 = 0x3e0,
405 +       NvRegMSIXMap1 = 0x3e4,
406 +       NvRegMSIXIrqStatus = 0x3f0,
407 +
408 +       NvRegPowerState2 = 0x600,
409 +#define NVREG_POWERSTATE2_POWERUP_MASK         0x0F11
410 +#define NVREG_POWERSTATE2_POWERUP_REV_A3       0x0001
411  };
412  
413  /* Big endian: should work, but is untested */
414 @@ -315,7 +555,7 @@
415  struct ring_desc_ex {
416         u32 PacketBufferHigh;
417         u32 PacketBufferLow;
418 -       u32 Reserved;
419 +       u32 TxVlan;
420         u32 FlagLen;
421  };
422  
423 @@ -336,7 +576,7 @@
424  #define NV_TX_CARRIERLOST      (1<<27)
425  #define NV_TX_LATECOLLISION    (1<<28)
426  #define NV_TX_UNDERFLOW                (1<<29)
427 -#define NV_TX_ERROR            (1<<30)
428 +#define NV_TX_ERROR            (1<<30) /* logical OR of all errors */
429  #define NV_TX_VALID            (1<<31)
430  
431  #define NV_TX2_LASTPACKET      (1<<29)
432 @@ -347,7 +587,7 @@
433  #define NV_TX2_LATECOLLISION   (1<<27)
434  #define NV_TX2_UNDERFLOW       (1<<28)
435  /* error and valid are the same for both */
436 -#define NV_TX2_ERROR           (1<<30)
437 +#define NV_TX2_ERROR           (1<<30) /* logical OR of all errors */
438  #define NV_TX2_VALID           (1<<31)
439  #define NV_TX2_TSO             (1<<28)
440  #define NV_TX2_TSO_SHIFT       14
441 @@ -356,6 +596,8 @@
442  #define NV_TX2_CHECKSUM_L3     (1<<27)
443  #define NV_TX2_CHECKSUM_L4     (1<<26)
444  
445 +#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
446 +
447  #define NV_RX_DESCRIPTORVALID  (1<<16)
448  #define NV_RX_MISSEDFRAME      (1<<17)
449  #define NV_RX_SUBSTRACT1       (1<<18)
450 @@ -366,7 +608,7 @@
451  #define NV_RX_CRCERR           (1<<27)
452  #define NV_RX_OVERFLOW         (1<<28)
453  #define NV_RX_FRAMINGERR       (1<<29)
454 -#define NV_RX_ERROR            (1<<30)
455 +#define NV_RX_ERROR            (1<<30) /* logical OR of all errors */
456  #define NV_RX_AVAIL            (1<<31)
457  
458  #define NV_RX2_CHECKSUMMASK    (0x1C000000)
459 @@ -383,11 +625,15 @@
460  #define NV_RX2_OVERFLOW                (1<<23)
461  #define NV_RX2_FRAMINGERR      (1<<24)
462  /* error and avail are the same for both */
463 -#define NV_RX2_ERROR           (1<<30)
464 +#define NV_RX2_ERROR           (1<<30) /* logical OR of all errors */
465  #define NV_RX2_AVAIL           (1<<31)
466  
467 +#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
468 +#define NV_RX3_VLAN_TAG_MASK   (0x0000FFFF)
469 +
470  /* Miscelaneous hardware related defines: */
471 -#define NV_PCI_REGSZ           0x270
472 +#define NV_PCI_REGSZ_VER1              0x270
473 +#define NV_PCI_REGSZ_VER2              0x604
474  
475  /* various timeout delays: all in usec */
476  #define NV_TXRX_RESET_DELAY    4
477 @@ -404,6 +650,7 @@
478  #define NV_MIIBUSY_DELAY       50
479  #define NV_MIIPHY_DELAY        10
480  #define NV_MIIPHY_DELAYMAX     10000
481 +#define NV_MAC_RESET_DELAY     64
482  
483  #define NV_WAKEUPPATTERNS      5
484  #define NV_WAKEUPMASKENTRIES   4
485 @@ -411,16 +658,18 @@
486  /* General driver defaults */
487  #define NV_WATCHDOG_TIMEO      (5*HZ)
488  
489 -#define RX_RING                128
490 -#define TX_RING                256
491 +#define RX_RING_DEFAULT                128
492 +#define TX_RING_DEFAULT                64
493 +#define RX_RING_MIN            RX_RING_DEFAULT
494 +#define TX_RING_MIN            TX_RING_DEFAULT
495 +#define RING_MAX_DESC_VER_1    1024
496 +#define RING_MAX_DESC_VER_2_3  16384
497  /* 
498 - * If your nic mysteriously hangs then try to reduce the limits
499 - * to 1/0: It might be required to set NV_TX_LASTPACKET in the
500 - * last valid ring entry. But this would be impossible to
501 - * implement - probably a disassembly error.
502 + * Difference between the get and put pointers for the tx ring.
503 + * This is used to throttle the amount of data outstanding in the
504 + * tx ring.
505   */
506 -#define TX_LIMIT_STOP  255
507 -#define TX_LIMIT_START 254
508 +#define TX_LIMIT_DIFFERENCE    1
509  
510  /* rx/tx mac addr + type + vlan + align + slack*/
511  #define NV_RX_HEADERS          (64)
512 @@ -434,6 +683,7 @@
513  #define OOM_REFILL     (1+HZ/20)
514  #define POLL_WAIT      (1+HZ/100)
515  #define LINK_TIMEOUT   (3*HZ)
516 +#define STATS_INTERVAL (10*HZ)
517  
518  /* 
519   * desc_ver values:
520 @@ -449,16 +699,37 @@
521  /* PHY defines */
522  #define PHY_OUI_MARVELL        0x5043
523  #define PHY_OUI_CICADA 0x03f1
524 +#define PHY_OUI_VITESSE        0x01c1
525  #define PHYID1_OUI_MASK        0x03ff
526  #define PHYID1_OUI_SHFT        6
527  #define PHYID2_OUI_MASK        0xfc00
528  #define PHYID2_OUI_SHFT        10
529 -#define PHY_INIT1      0x0f000
530 -#define PHY_INIT2      0x0e00
531 -#define PHY_INIT3      0x01000
532 -#define PHY_INIT4      0x0200
533 -#define PHY_INIT5      0x0004
534 -#define PHY_INIT6      0x02000
535 +#define PHYID2_MODEL_MASK              0x03f0
536 +#define PHY_MODEL_MARVELL_E3016                0x220
537 +#define PHY_MARVELL_E3016_INITMASK     0x0300
538 +#define PHY_CICADA_INIT1       0x0f000
539 +#define PHY_CICADA_INIT2       0x0e00
540 +#define PHY_CICADA_INIT3       0x01000
541 +#define PHY_CICADA_INIT4       0x0200
542 +#define PHY_CICADA_INIT5       0x0004
543 +#define PHY_CICADA_INIT6       0x02000
544 +#define PHY_VITESSE_INIT_REG1  0x1f
545 +#define PHY_VITESSE_INIT_REG2  0x10
546 +#define PHY_VITESSE_INIT_REG3  0x11
547 +#define PHY_VITESSE_INIT_REG4  0x12
548 +#define PHY_VITESSE_INIT_MSK1  0xc
549 +#define PHY_VITESSE_INIT_MSK2  0x0180
550 +#define PHY_VITESSE_INIT1      0x52b5
551 +#define PHY_VITESSE_INIT2      0xaf8a
552 +#define PHY_VITESSE_INIT3      0x8
553 +#define PHY_VITESSE_INIT4      0x8f8a
554 +#define PHY_VITESSE_INIT5      0xaf86
555 +#define PHY_VITESSE_INIT6      0x8f86
556 +#define PHY_VITESSE_INIT7      0xaf82
557 +#define PHY_VITESSE_INIT8      0x0100
558 +#define PHY_VITESSE_INIT9      0x8f82
559 +#define PHY_VITESSE_INIT10     0x0
560 +
561  #define PHY_GIGABIT    0x0100
562  
563  #define PHY_TIMEOUT    0x1
564 @@ -468,14 +739,148 @@
565  #define PHY_1000       0x2
566  #define PHY_HALF       0x100
567  
568 -/* FIXME: MII defines that should be added to <linux/mii.h> */
569 -#define MII_1000BT_CR  0x09
570 -#define MII_1000BT_SR  0x0a
571 -#define ADVERTISE_1000FULL     0x0200
572 -#define ADVERTISE_1000HALF     0x0100
573 -#define LPA_1000FULL   0x0800
574 -#define LPA_1000HALF   0x0400
575 +#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
576 +#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
577 +#define NV_PAUSEFRAME_RX_ENABLE  0x0004
578 +#define NV_PAUSEFRAME_TX_ENABLE  0x0008
579 +#define NV_PAUSEFRAME_RX_REQ     0x0010
580 +#define NV_PAUSEFRAME_TX_REQ     0x0020
581 +#define NV_PAUSEFRAME_AUTONEG    0x0040
582 +
583 +/* MSI/MSI-X defines */
584 +#define NV_MSI_X_MAX_VECTORS  8
585 +#define NV_MSI_X_VECTORS_MASK 0x000f
586 +#define NV_MSI_CAPABLE        0x0010
587 +#define NV_MSI_X_CAPABLE      0x0020
588 +#define NV_MSI_ENABLED        0x0040
589 +#define NV_MSI_X_ENABLED      0x0080
590 +
591 +#define NV_MSI_X_VECTOR_ALL   0x0
592 +#define NV_MSI_X_VECTOR_RX    0x0
593 +#define NV_MSI_X_VECTOR_TX    0x1
594 +#define NV_MSI_X_VECTOR_OTHER 0x2
595 +
596 +/* statistics */
597 +#define NV_STATS_COUNT_SW 10
598 +
599 +#define NVLAN_DISABLE_ALL_FEATURES  do { \
600 +       msi = NV_MSI_INT_DISABLED; \
601 +       msix = NV_MSIX_INT_DISABLED; \
602 +       scatter_gather = NV_SCATTER_GATHER_DISABLED; \
603 +       tso_offload = NV_TSO_DISABLED; \
604 +       tx_checksum_offload = NV_TX_CHECKSUM_DISABLED; \
605 +       rx_checksum_offload = NV_RX_CHECKSUM_DISABLED; \
606 +       tx_flow_control = NV_TX_FLOW_CONTROL_DISABLED; \
607 +       rx_flow_control = NV_RX_FLOW_CONTROL_DISABLED; \
608 +       wol = NV_WOL_DISABLED; \
609 +       tagging_8021pq = NV_8021PQ_DISABLED; \
610 +} while (0)
611 +   
612 +struct nv_ethtool_str {
613 +       char name[ETH_GSTRING_LEN];
614 +};
615 +
616 +static const struct nv_ethtool_str nv_estats_str[] = {
617 +       { "tx_dropped" },
618 +       { "tx_fifo_errors" },
619 +       { "tx_carrier_errors" },
620 +       { "tx_packets" },
621 +       { "tx_bytes" },
622 +       { "rx_crc_errors" },
623 +       { "rx_over_errors" },
624 +       { "rx_errors_total" },
625 +       { "rx_packets" },
626 +       { "rx_bytes" },
627 +       
628 +       /* hardware counters */
629 +       { "tx_zero_rexmt" },
630 +       { "tx_one_rexmt" },
631 +       { "tx_many_rexmt" },
632 +       { "tx_late_collision" },
633 +       { "tx_excess_deferral" },
634 +       { "tx_retry_error" },
635 +       { "rx_frame_error" },
636 +       { "rx_extra_byte" },
637 +       { "rx_late_collision" },
638 +       { "rx_runt" },
639 +       { "rx_frame_too_long" },
640 +       { "rx_frame_align_error" },
641 +       { "rx_length_error" },
642 +       { "rx_unicast" },
643 +       { "rx_multicast" },
644 +       { "rx_broadcast" },
645 +       { "tx_deferral" },
646 +       { "tx_pause" },
647 +       { "rx_pause" },
648 +       { "rx_drop_frame" }
649 +};
650 +
651 +struct nv_ethtool_stats {
652 +       u64 tx_dropped;
653 +       u64 tx_fifo_errors;
654 +       u64 tx_carrier_errors;
655 +       u64 tx_packets;
656 +       u64 tx_bytes;
657 +       u64 rx_crc_errors;
658 +       u64 rx_over_errors;
659 +       u64 rx_errors_total;
660 +       u64 rx_packets;
661 +       u64 rx_bytes;
662 +
663 +       /* hardware counters */
664 +       u64 tx_zero_rexmt;
665 +       u64 tx_one_rexmt;
666 +       u64 tx_many_rexmt;
667 +       u64 tx_late_collision;
668 +       u64 tx_excess_deferral;
669 +       u64 tx_retry_error;
670 +       u64 rx_frame_error;
671 +       u64 rx_extra_byte;
672 +       u64 rx_late_collision;
673 +       u64 rx_runt;
674 +       u64 rx_frame_too_long;
675 +       u64 rx_frame_align_error;
676 +       u64 rx_length_error;
677 +       u64 rx_unicast;
678 +       u64 rx_multicast;
679 +       u64 rx_broadcast;
680 +       u64 tx_deferral;
681 +       u64 tx_pause;
682 +       u64 rx_pause;
683 +       u64 rx_drop_frame;
684 +};
685 +
686 +/* diagnostics */
687 +#define NV_TEST_COUNT_BASE 3
688 +#define NV_TEST_COUNT_EXTENDED 4
689 +
690 +static const struct nv_ethtool_str nv_etests_str[] = {
691 +       { "link      (online/offline)" },
692 +       { "register  (offline)       " },
693 +       { "interrupt (offline)       " },
694 +       { "loopback  (offline)       " }
695 +};
696 +
697 +struct register_test {
698 +       u32 reg;
699 +       u32 mask;
700 +};
701 +
702 +static const struct register_test nv_registers_test[] = {
703 +       { NvRegUnknownSetupReg6, 0x01 },
704 +       { NvRegMisc1, 0x03c },
705 +       { NvRegOffloadConfig, 0x03ff },
706 +       { NvRegMulticastAddrA, 0xffffffff },
707 +       { NvRegTxWatermark, 0x0ff },
708 +       { NvRegWakeUpFlags, 0x07777 },
709 +       { 0,0 }
710 +};
711  
712 +struct nv_skb_map {
713 +       struct sk_buff *skb;
714 +       dma_addr_t dma;
715 +       unsigned int dma_len;
716 +};
717  
718  /*
719   * SMP locking:
720 @@ -490,11 +895,48 @@
721  
722  /* in dev: base, irq */
723  struct fe_priv {
724 +
725 +       /* fields used in fast path are grouped together 
726 +          for better cache performance
727 +       */
728         spinlock_t lock;
729 +       void __iomem *base;
730 +       struct pci_dev *pci_dev;
731 +       u32 txrxctl_bits;
732 +       int stop_tx;
733 +       int need_linktimer;
734 +       unsigned long link_timeout;
735 +       u32 irqmask;
736 +       u32 msi_flags;
737 +
738 +       unsigned int rx_buf_sz;
739 +       struct vlan_group *vlangrp;
740 +       int tx_ring_size;
741 +       int rx_csum;
742 +
743 +       /*
744 +        * rx specific fields in fast path
745 +        */
746 +       ring_type get_rx __attribute__((aligned(L1_CACHE_BYTES)));
747 +       ring_type put_rx, first_rx, last_rx;
748 +       struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
749 +       struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
750 +       
751 +       /*
752 +        * tx specific fields in fast path
753 +        */
754 +       ring_type get_tx __attribute__((aligned(L1_CACHE_BYTES)));
755 +       ring_type put_tx, first_tx, last_tx;
756 +       struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
757 +       struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
758 +
759 +       struct nv_skb_map *rx_skb;
760 +       struct nv_skb_map *tx_skb;
761  
762         /* General data:
763          * Locking: spin_lock(&np->lock); */
764         struct net_device_stats stats;
765 +       struct nv_ethtool_stats estats;
766         int in_shutdown;
767         u32 linkspeed;
768         int duplex;
769 @@ -503,44 +945,46 @@
770         int phyaddr;
771         int wolenabled;
772         unsigned int phy_oui;
773 +       unsigned int phy_model;
774         u16 gigabit;
775 +       int intr_test;
776 +       int recover_error;
777  
778         /* General data: RO fields */
779         dma_addr_t ring_addr;
780 -       struct pci_dev *pci_dev;
781         u32 orig_mac[2];
782 -       u32 irqmask;
783         u32 desc_ver;
784 -       u32 txrxctl_bits;
785 -
786 -       void __iomem *base;
787 +       u32 vlanctl_bits;
788 +       u32 driver_data;
789 +       u32 register_size;
790 +       u32 mac_in_use;
791  
792         /* rx specific fields.
793          * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
794          */
795         ring_type rx_ring;
796 -       unsigned int cur_rx, refill_rx;
797 -       struct sk_buff *rx_skbuff[RX_RING];
798 -       dma_addr_t rx_dma[RX_RING];
799 -       unsigned int rx_buf_sz;
800         unsigned int pkt_limit;
801         struct timer_list oom_kick;
802         struct timer_list nic_poll;
803 -
804 -       /* media detection workaround.
805 -        * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
806 -        */
807 -       int need_linktimer;
808 -       unsigned long link_timeout;
809 +       struct timer_list stats_poll;
810 +       u32 nic_poll_irq;
811 +       int rx_ring_size;
812 +       u32 rx_len_errors;
813         /*
814          * tx specific fields.
815          */
816         ring_type tx_ring;
817 -       unsigned int next_tx, nic_tx;
818 -       struct sk_buff *tx_skbuff[TX_RING];
819 -       dma_addr_t tx_dma[TX_RING];
820 -       unsigned int tx_dma_len[TX_RING];
821         u32 tx_flags;
822 +       int tx_limit_start;
823 +       int tx_limit_stop;
824 +
825 +
826 +       /* msi/msi-x fields */
827 +       struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
828 +
829 +       /* flow control */
830 +       u32 pause_flags;
831 +       u32 led_stats[3];
832  };
833  
834  /*
835 @@ -555,8 +999,10 @@
836   * Throughput Mode: Every tx and rx packet will generate an interrupt.
837   * CPU Mode: Interrupts are controlled by a timer.
838   */
839 -#define NV_OPTIMIZATION_MODE_THROUGHPUT 0
840 -#define NV_OPTIMIZATION_MODE_CPU        1
841 +enum {
842 +       NV_OPTIMIZATION_MODE_THROUGHPUT, 
843 +       NV_OPTIMIZATION_MODE_CPU
844 +};
845  static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
846  
847  /*
848 @@ -568,14 +1014,213 @@
849   */
850  static int poll_interval = -1;
851  
852 +/*
853 + * MSI interrupts
854 + */
855 +enum {
856 +       NV_MSI_INT_DISABLED,
857 +       NV_MSI_INT_ENABLED
858 +};
859 +
860 +#ifdef CONFIG_PCI_MSI 
861 +static int msi = NV_MSI_INT_ENABLED;
862 +#else
863 +static int msi = NV_MSI_INT_DISABLED;
864 +#endif
865 +
866 +/*
867 + * MSIX interrupts
868 + */
869 +enum {
870 +       NV_MSIX_INT_DISABLED, 
871 +       NV_MSIX_INT_ENABLED
872 +};
873 +
874 +#ifdef CONFIG_PCI_MSI 
875 +static int msix = NV_MSIX_INT_ENABLED;
876 +#else
877 +static int msix = NV_MSIX_INT_DISABLED;
878 +#endif
879 +/*
880 + * PHY Speed and Duplex
881 + */
882 +enum {
883 +       NV_SPEED_DUPLEX_AUTO,
884 +       NV_SPEED_DUPLEX_10_HALF_DUPLEX,
885 +       NV_SPEED_DUPLEX_10_FULL_DUPLEX,
886 +       NV_SPEED_DUPLEX_100_HALF_DUPLEX,
887 +       NV_SPEED_DUPLEX_100_FULL_DUPLEX,
888 +       NV_SPEED_DUPLEX_1000_FULL_DUPLEX
889 +};
890 +static int speed_duplex = NV_SPEED_DUPLEX_AUTO;
891 +
892 +/*
893 + * PHY autonegotiation
894 + */
895 +static int autoneg = AUTONEG_ENABLE;
896 +
897 +/*
898 + * Scatter gather
899 + */
900 +enum {
901 +       NV_SCATTER_GATHER_DISABLED,
902 +       NV_SCATTER_GATHER_ENABLED
903 +};
904 +static int scatter_gather = NV_SCATTER_GATHER_ENABLED;
905 +
906 +/*
907 + * TCP Segmentation Offload (TSO)
908 + */
909 +enum {
910 +       NV_TSO_DISABLED,
911 +       NV_TSO_ENABLED
912 +};
913 +static int tso_offload = NV_TSO_ENABLED;
914 +
915 +/*
916 + * MTU settings
917 + */
918 +static int mtu = ETH_DATA_LEN;
919 +
920 +/*
921 + * Tx checksum offload
922 + */
923 +enum {
924 +       NV_TX_CHECKSUM_DISABLED, 
925 +       NV_TX_CHECKSUM_ENABLED 
926 +};
927 +static int tx_checksum_offload = NV_TX_CHECKSUM_ENABLED;
928 +
929 +/*
930 + * Rx checksum offload
931 + */
932 +enum {
933 +       NV_RX_CHECKSUM_DISABLED, 
934 +       NV_RX_CHECKSUM_ENABLED 
935 +};
936 +static int rx_checksum_offload = NV_RX_CHECKSUM_ENABLED;
937 +
938 +/*
939 + * Tx ring size
940 + */
941 +static int tx_ring_size = TX_RING_DEFAULT;
942 +
943 +/*
944 + * Rx ring size
945 + */
946 +static int rx_ring_size = RX_RING_DEFAULT;
947 +
948 +/*
949 + * Tx flow control
950 + */
951 +enum {
952 +       NV_TX_FLOW_CONTROL_DISABLED, 
953 +       NV_TX_FLOW_CONTROL_ENABLED
954 +};
955 +static int tx_flow_control = NV_TX_FLOW_CONTROL_ENABLED;
956 +
957 +/*
958 + * Rx flow control
959 + */
960 +enum {
961 +       NV_RX_FLOW_CONTROL_DISABLED, 
962 +       NV_RX_FLOW_CONTROL_ENABLED
963 +};
964 +static int rx_flow_control = NV_RX_FLOW_CONTROL_ENABLED;
965 +
966 +/*
967 + * DMA 64bit
968 + */
969 +enum {
970 +       NV_DMA_64BIT_DISABLED,
971 +       NV_DMA_64BIT_ENABLED
972 +};
973 +static int dma_64bit = NV_DMA_64BIT_ENABLED;
974 +
975 +/*
976 + * Wake On Lan
977 + */
978 +enum {
979 +       NV_WOL_DISABLED,
980 +       NV_WOL_ENABLED
981 +};
982 +static int wol = NV_WOL_DISABLED;
983 +
984 +/*
985 + * Tagging 802.1pq
986 + */
987 +enum {
988 +       NV_8021PQ_DISABLED,
989 +       NV_8021PQ_ENABLED
990 +};
991 +static int tagging_8021pq = NV_8021PQ_ENABLED;
992 +
993 +#if NVVER < RHES4
994 +static inline unsigned long nv_msecs_to_jiffies(const unsigned int m)
995 +{
996 +#if HZ <= 1000 && !(1000 % HZ)
997 +        return (m + (1000 / HZ) - 1) / (1000 / HZ);
998 +#elif HZ > 1000 && !(HZ % 1000)
999 +        return m * (HZ / 1000);
1000 +#else
1001 +        return (m * HZ + 999) / 1000;
1002 +#endif
1003 +}
1004 +#endif
1005 +
1006 +static void nv_msleep(unsigned int msecs)
1007 +{
1008 +#if NVVER > SLES9 
1009 +       msleep(msecs);
1010 +#else
1011 +       unsigned long timeout = nv_msecs_to_jiffies(msecs);
1012 +
1013 +       while (timeout) {
1014 +               set_current_state(TASK_UNINTERRUPTIBLE);
1015 +               timeout = schedule_timeout(timeout);
1016 +       }
1017 +#endif
1018 +}
1019 +
1020  static inline struct fe_priv *get_nvpriv(struct net_device *dev)
1021  {
1022 +#if NVVER > RHES3 
1023         return netdev_priv(dev);
1024 +#else
1025 +       return (struct fe_priv *) dev->priv;
1026 +#endif
1027 +}
1028 +
1029 +static void __init quirk_nforce_network_class(struct pci_dev *pdev)
1030 +{
1031 +       /* Some implementations of the nVidia network controllers
1032 +        * show up as bridges, when we need to see them as network
1033 +        * devices.
1034 +        */
1035 +
1036 +       /* If this is already known as a network ctlr, do nothing. */
1037 +       if ((pdev->class >> 8) == PCI_CLASS_NETWORK_ETHERNET)
1038 +               return;
1039 +
1040 +       if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_OTHER) {
1041 +               char    c;
1042 +
1043 +               /* Clearing bit 6 of the register at 0xf8
1044 +                * selects Ethernet device class
1045 +                */
1046 +               pci_read_config_byte(pdev, 0xf8, &c);
1047 +               c &= 0xbf;
1048 +               pci_write_config_byte(pdev, 0xf8, c);
1049 +
1050 +               /* sysfs needs pdev->class to be set correctly */
1051 +               pdev->class &= 0x0000ff;
1052 +               pdev->class |= (PCI_CLASS_NETWORK_ETHERNET << 8);
1053 +       }
1054  }
1055  
1056  static inline u8 __iomem *get_hwbase(struct net_device *dev)
1057  {
1058 -       return ((struct fe_priv *)netdev_priv(dev))->base;
1059 +       return ((struct fe_priv *)get_nvpriv(dev))->base;
1060  }
1061  
1062  static inline void pci_push(u8 __iomem *base)
1063 @@ -613,78 +1258,247 @@
1064         return 0;
1065  }
1066  
1067 -#define MII_READ       (-1)
1068 -/* mii_rw: read/write a register on the PHY.
1069 - *
1070 - * Caller must guarantee serialization
1071 - */
1072 -static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1073 +#define NV_SETUP_RX_RING 0x01
1074 +#define NV_SETUP_TX_RING 0x02
1075 +
1076 +static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
1077  {
1078 +       struct fe_priv *np = get_nvpriv(dev);
1079         u8 __iomem *base = get_hwbase(dev);
1080 -       u32 reg;
1081 -       int retval;
1082 -
1083 -       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1084  
1085 -       reg = readl(base + NvRegMIIControl);
1086 -       if (reg & NVREG_MIICTL_INUSE) {
1087 -               writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1088 -               udelay(NV_MIIBUSY_DELAY);
1089 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1090 +               if (rxtx_flags & NV_SETUP_RX_RING) {
1091 +                       writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
1092 +               }
1093 +               if (rxtx_flags & NV_SETUP_TX_RING) {
1094 +                       writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1095 +               }
1096 +       } else {
1097 +               if (rxtx_flags & NV_SETUP_RX_RING) {
1098 +                       writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
1099 +                       writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
1100 +               }
1101 +               if (rxtx_flags & NV_SETUP_TX_RING) {
1102 +                       writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1103 +                       writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
1104 +               }
1105         }
1106 +}
1107  
1108 -       reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1109 -       if (value != MII_READ) {
1110 -               writel(value, base + NvRegMIIData);
1111 -               reg |= NVREG_MIICTL_WRITE;
1112 -       }
1113 -       writel(reg, base + NvRegMIIControl);
1114 +static void free_rings(struct net_device *dev)
1115 +{
1116 +       struct fe_priv *np = get_nvpriv(dev);
1117  
1118 -       if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1119 -                       NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1120 -               dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1121 -                               dev->name, miireg, addr);
1122 -               retval = -1;
1123 -       } else if (value != MII_READ) {
1124 -               /* it was a write operation - fewer failures are detectable */
1125 -               dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1126 -                               dev->name, value, miireg, addr);
1127 -               retval = 0;
1128 -       } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1129 -               dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1130 -                               dev->name, miireg, addr);
1131 -               retval = -1;
1132 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1133 +               if(np->rx_ring.orig)
1134 +                       pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1135 +                                           np->rx_ring.orig, np->ring_addr);
1136         } else {
1137 -               retval = readl(base + NvRegMIIData);
1138 -               dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1139 -                               dev->name, miireg, addr, retval);
1140 +               if (np->rx_ring.ex)
1141 +                       pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1142 +                                           np->rx_ring.ex, np->ring_addr);
1143         }
1144 -
1145 -       return retval;
1146 +       if (np->rx_skb)
1147 +               kfree(np->rx_skb);
1148 +       if (np->tx_skb)
1149 +               kfree(np->tx_skb);      
1150  }
1151  
1152 -static int phy_reset(struct net_device *dev)
1153 +static int using_multi_irqs(struct net_device *dev)
1154  {
1155 -       struct fe_priv *np = netdev_priv(dev);
1156 -       u32 miicontrol;
1157 -       unsigned int tries = 0;
1158 +       struct fe_priv *np = get_nvpriv(dev);
1159 +
1160 +       if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1161 +           ((np->msi_flags & NV_MSI_X_ENABLED) && 
1162 +            ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1163 +               return 0;
1164 +       else
1165 +               return 1;
1166 +}
1167 +
1168 +static void nv_enable_irq(struct net_device *dev)
1169 +{
1170 +       struct fe_priv *np = get_nvpriv(dev);
1171 +
1172 +       dprintk(KERN_DEBUG "%s: nv_enable_irq: begin\n",dev->name);
1173 +       /* modify network device class id */    
1174 +       if (!using_multi_irqs(dev)) {
1175 +               if (np->msi_flags & NV_MSI_X_ENABLED)
1176 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1177 +               else
1178 +                       enable_irq(dev->irq);
1179 +       } else {
1180 +               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1181 +               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1182 +               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1183 +       }
1184 +}
1185 +
1186 +static void nv_disable_irq(struct net_device *dev)
1187 +{
1188 +       struct fe_priv *np = get_nvpriv(dev);
1189 +
1190 +       dprintk(KERN_DEBUG "%s: nv_disable_irq: begin\n",dev->name);
1191 +       if (!using_multi_irqs(dev)) {
1192 +               if (np->msi_flags & NV_MSI_X_ENABLED)
1193 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1194 +               else
1195 +                       disable_irq(dev->irq);
1196 +       } else {
1197 +               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1198 +               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1199 +               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1200 +       }
1201 +}
1202 +
1203 +/* In MSIX mode, a write to irqmask behaves as XOR */
1204 +static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1205 +{
1206 +       u8 __iomem *base = get_hwbase(dev);
1207 +
1208 +       writel(mask, base + NvRegIrqMask);
1209 +}
1210 +
1211 +static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1212 +{
1213 +       struct fe_priv *np = get_nvpriv(dev);
1214 +       u8 __iomem *base = get_hwbase(dev);
1215 +
1216 +       if (np->msi_flags & NV_MSI_X_ENABLED) {
1217 +               writel(mask, base + NvRegIrqMask);
1218 +       } else {
1219 +               if (np->msi_flags & NV_MSI_ENABLED)
1220 +                       writel(0, base + NvRegMSIIrqMask);
1221 +               writel(0, base + NvRegIrqMask);
1222 +       }
1223 +}
1224 +
1225 +#define MII_READ       (-1)
1226 +/* mii_rw: read/write a register on the PHY.
1227 + *
1228 + * Caller must guarantee serialization
1229 + */
1230 +static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1231 +{
1232 +       u8 __iomem *base = get_hwbase(dev);
1233 +       u32 reg;
1234 +       int retval;
1235 +
1236 +       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1237 +
1238 +       reg = readl(base + NvRegMIIControl);
1239 +       if (reg & NVREG_MIICTL_INUSE) {
1240 +               writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1241 +               udelay(NV_MIIBUSY_DELAY);
1242 +       }
1243 +
1244 +       reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1245 +       if (value != MII_READ) {
1246 +               writel(value, base + NvRegMIIData);
1247 +               reg |= NVREG_MIICTL_WRITE;
1248 +       }
1249 +       writel(reg, base + NvRegMIIControl);
1250 +
1251 +       if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1252 +                       NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1253 +               dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1254 +                               dev->name, miireg, addr);
1255 +               retval = -1;
1256 +       } else if (value != MII_READ) {
1257 +               /* it was a write operation - fewer failures are detectable */
1258 +               dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1259 +                               dev->name, value, miireg, addr);
1260 +               retval = 0;
1261 +       } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1262 +               dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1263 +                               dev->name, miireg, addr);
1264 +               retval = -1;
1265 +       } else {
1266 +               retval = readl(base + NvRegMIIData);
1267 +               dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1268 +                               dev->name, miireg, addr, retval);
1269 +       }
1270 +
1271 +       return retval;
1272 +}
1273 +
1274 +static void nv_save_LED_stats(struct net_device *dev)
1275 +{
1276 +       struct fe_priv *np = get_nvpriv(dev);
1277 +       u32 reg=0;
1278 +       u32 value=0;
1279 +       int i=0;
1280 +
1281 +       reg = Mv_Page_Address;
1282 +       value = 3;
1283 +       mii_rw(dev,np->phyaddr,reg,value);
1284 +       udelay(5);
1285 +
1286 +       reg = Mv_LED_Control;
1287 +       for(i=0;i<3;i++){
1288 +               np->led_stats[i]=mii_rw(dev,np->phyaddr,reg+i,MII_READ);        
1289 +               dprintk(KERN_DEBUG "%s: save LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
1290 +       }
1291 +
1292 +       reg = Mv_Page_Address;
1293 +       value = 0;
1294 +       mii_rw(dev,np->phyaddr,reg,value);
1295 +       udelay(5);
1296 +}
1297 +
1298 +static void nv_restore_LED_stats(struct net_device *dev)
1299 +{
1300 +
1301 +       struct fe_priv *np = get_nvpriv(dev);
1302 +       u32 reg=0;
1303 +       u32 value=0;
1304 +       int i=0;
1305  
1306 -       miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1307 -       miicontrol |= BMCR_RESET;
1308 +       reg = Mv_Page_Address;
1309 +       value = 3;
1310 +       mii_rw(dev,np->phyaddr,reg,value);
1311 +       udelay(5);
1312 +
1313 +       reg = Mv_LED_Control;
1314 +       for(i=0;i<3;i++){
1315 +               mii_rw(dev,np->phyaddr,reg+i,np->led_stats[i]); 
1316 +               udelay(1);
1317 +               dprintk(KERN_DEBUG "%s: restore LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]);
1318 +       }
1319 +
1320 +       reg = Mv_Page_Address;
1321 +       value = 0;
1322 +       mii_rw(dev,np->phyaddr,reg,value);
1323 +       udelay(5);
1324 +}
1325 +
1326 +static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1327 +{
1328 +       struct fe_priv *np = get_nvpriv(dev);
1329 +       u32 miicontrol;
1330 +       unsigned int tries = 0;
1331 +
1332 +       dprintk(KERN_DEBUG "%s: phy_reset: begin\n",dev->name);
1333 +       /**/
1334 +       nv_save_LED_stats(dev);
1335 +       miicontrol = BMCR_RESET | bmcr_setup;
1336         if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1337                 return -1;
1338         }
1339  
1340         /* wait for 500ms */
1341 -       msleep(500);
1342 +       nv_msleep(500);
1343  
1344         /* must wait till reset is deasserted */
1345         while (miicontrol & BMCR_RESET) {
1346 -               msleep(10);
1347 +               nv_msleep(10);
1348                 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1349                 /* FIXME: 100 tries seem excessive */
1350                 if (tries++ > 100)
1351                         return -1;
1352         }
1353 +       nv_restore_LED_stats(dev);
1354 +
1355         return 0;
1356  }
1357  
1358 @@ -694,9 +1508,36 @@
1359         u8 __iomem *base = get_hwbase(dev);
1360         u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1361  
1362 +       dprintk(KERN_DEBUG "%s: phy_init: begin\n",dev->name);
1363 +       /* phy errata for E3016 phy */
1364 +       if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1365 +               reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1366 +               reg &= ~PHY_MARVELL_E3016_INITMASK;
1367 +               if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1368 +                       printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1369 +                       return PHY_ERROR;
1370 +               }
1371 +       }
1372 +
1373         /* set advertise register */
1374         reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1375 -       reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
1376 +       reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1377 +       if (speed_duplex == NV_SPEED_DUPLEX_AUTO)
1378 +               reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL);
1379 +       if (speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
1380 +               reg |= ADVERTISE_10HALF;
1381 +       if (speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
1382 +               reg |= ADVERTISE_10FULL;
1383 +       if (speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
1384 +               reg |= ADVERTISE_100HALF;
1385 +       if (speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
1386 +               reg |= ADVERTISE_100FULL;
1387 +       if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
1388 +               reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1389 +       if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
1390 +               reg |= ADVERTISE_PAUSE_ASYM;
1391 +       np->fixed_mode = reg;
1392 +
1393         if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1394                 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1395                 return PHY_ERROR;
1396 @@ -709,14 +1550,18 @@
1397         mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1398         if (mii_status & PHY_GIGABIT) {
1399                 np->gigabit = PHY_GIGABIT;
1400 -               mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1401 +               mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1402                 mii_control_1000 &= ~ADVERTISE_1000HALF;
1403 -               if (phyinterface & PHY_RGMII)
1404 +               if (phyinterface & PHY_RGMII && 
1405 +                   (speed_duplex == NV_SPEED_DUPLEX_AUTO || 
1406 +                    (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_ENABLE)))
1407                         mii_control_1000 |= ADVERTISE_1000FULL;
1408 -               else
1409 +               else {
1410 +                       if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_DISABLE)
1411 +                               printk(KERN_INFO "%s: 1000mpbs full only allowed with autoneg\n", pci_name(np->pci_dev));
1412                         mii_control_1000 &= ~ADVERTISE_1000FULL;
1413 -
1414 -               if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
1415 +               }
1416 +               if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1417                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1418                         return PHY_ERROR;
1419                 }
1420 @@ -724,8 +1569,25 @@
1421         else
1422                 np->gigabit = 0;
1423  
1424 -       /* reset the phy */
1425 -       if (phy_reset(dev)) {
1426 +       mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1427 +       if (autoneg == AUTONEG_DISABLE){
1428 +               np->pause_flags &= ~(NV_PAUSEFRAME_RX_ENABLE | NV_PAUSEFRAME_TX_ENABLE);
1429 +               if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)
1430 +                       np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
1431 +               if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
1432 +                       np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
1433 +               mii_control &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
1434 +               if (reg & (ADVERTISE_10FULL|ADVERTISE_100FULL))
1435 +                       mii_control |= BMCR_FULLDPLX;
1436 +               if (reg & (ADVERTISE_100HALF|ADVERTISE_100FULL))
1437 +                       mii_control |= BMCR_SPEED100;
1438 +       } else {
1439 +               mii_control |= BMCR_ANENABLE;
1440 +       }
1441 +
1442 +       /* reset the phy and setup BMCR 
1443 +        * (certain phys need reset at same time new values are set) */
1444 +       if (phy_reset(dev, mii_control)) {
1445                 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1446                 return PHY_ERROR;
1447         }
1448 @@ -733,14 +1595,14 @@
1449         /* phy vendor specific configuration */
1450         if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1451                 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1452 -               phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
1453 -               phy_reserved |= (PHY_INIT3 | PHY_INIT4);
1454 +               phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1455 +               phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1456                 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1457                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1458                         return PHY_ERROR;
1459                 }
1460                 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1461 -               phy_reserved |= PHY_INIT5;
1462 +               phy_reserved |= PHY_CICADA_INIT5;
1463                 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1464                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1465                         return PHY_ERROR;
1466 @@ -748,18 +1610,92 @@
1467         }
1468         if (np->phy_oui == PHY_OUI_CICADA) {
1469                 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1470 -               phy_reserved |= PHY_INIT6;
1471 +               phy_reserved |= PHY_CICADA_INIT6;
1472                 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1473                         printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1474                         return PHY_ERROR;
1475                 }
1476         }
1477 +       if (np->phy_oui == PHY_OUI_VITESSE) {
1478 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1479 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1480 +                       return PHY_ERROR;
1481 +               }               
1482 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1483 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1484 +                       return PHY_ERROR;
1485 +               }               
1486 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1487 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1488 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1489 +                       return PHY_ERROR;
1490 +               }               
1491 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1492 +               phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1493 +               phy_reserved |= PHY_VITESSE_INIT3;
1494 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1495 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1496 +                       return PHY_ERROR;
1497 +               }               
1498 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1499 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1500 +                       return PHY_ERROR;
1501 +               }               
1502 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1503 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1504 +                       return PHY_ERROR;
1505 +               }               
1506 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1507 +               phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1508 +               phy_reserved |= PHY_VITESSE_INIT3;
1509 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1510 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1511 +                       return PHY_ERROR;
1512 +               }               
1513 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1514 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1515 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1516 +                       return PHY_ERROR;
1517 +               }               
1518 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1519 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1520 +                       return PHY_ERROR;
1521 +               }               
1522 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1523 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1524 +                       return PHY_ERROR;
1525 +               }               
1526 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1527 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1528 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1529 +                       return PHY_ERROR;
1530 +               }               
1531 +               phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1532 +               phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1533 +               phy_reserved |= PHY_VITESSE_INIT8;
1534 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1535 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1536 +                       return PHY_ERROR;
1537 +               }               
1538 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1539 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1540 +                       return PHY_ERROR;
1541 +               }               
1542 +               if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1543 +                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1544 +                       return PHY_ERROR;
1545 +               }               
1546 +       }
1547 +       /* some phys clear out pause advertisment on reset, set it back */
1548 +       mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1549  
1550         /* restart auto negotiation */
1551 -       mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1552 -       mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1553 -       if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1554 -               return PHY_ERROR;
1555 +       if (autoneg == AUTONEG_ENABLE) {
1556 +               mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1557 +               mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1558 +               if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1559 +                       return PHY_ERROR;
1560 +               }
1561         }
1562  
1563         return 0;
1564 @@ -767,18 +1703,23 @@
1565  
1566  static void nv_start_rx(struct net_device *dev)
1567  {
1568 -       struct fe_priv *np = netdev_priv(dev);
1569 +       struct fe_priv *np = get_nvpriv(dev);
1570         u8 __iomem *base = get_hwbase(dev);
1571 +       u32 rx_ctrl = readl(base + NvRegReceiverControl);
1572  
1573         dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1574         /* Already running? Stop it. */
1575 -       if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
1576 -               writel(0, base + NvRegReceiverControl);
1577 +       if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1578 +               rx_ctrl &= ~NVREG_RCVCTL_START;
1579 +               writel(rx_ctrl, base + NvRegReceiverControl);
1580                 pci_push(base);
1581         }
1582         writel(np->linkspeed, base + NvRegLinkSpeed);
1583         pci_push(base);
1584 -       writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
1585 +        rx_ctrl |= NVREG_RCVCTL_START;
1586 +        if (np->mac_in_use)
1587 +               rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1588 +       writel(rx_ctrl, base + NvRegReceiverControl);
1589         dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1590                                 dev->name, np->duplex, np->linkspeed);
1591         pci_push(base);
1592 @@ -786,44 +1727,63 @@
1593  
1594  static void nv_stop_rx(struct net_device *dev)
1595  {
1596 +       struct fe_priv *np = get_nvpriv(dev);
1597         u8 __iomem *base = get_hwbase(dev);
1598 +       u32 rx_ctrl = readl(base + NvRegReceiverControl);
1599  
1600         dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1601 -       writel(0, base + NvRegReceiverControl);
1602 +       if (!np->mac_in_use)
1603 +               rx_ctrl &= ~NVREG_RCVCTL_START;
1604 +       else
1605 +               rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1606 +       writel(rx_ctrl, base + NvRegReceiverControl);
1607         reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1608                         NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1609                         KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1610  
1611         udelay(NV_RXSTOP_DELAY2);
1612 +       if (!np->mac_in_use)
1613         writel(0, base + NvRegLinkSpeed);
1614  }
1615  
1616  static void nv_start_tx(struct net_device *dev)
1617  {
1618 +       struct fe_priv *np = get_nvpriv(dev);
1619         u8 __iomem *base = get_hwbase(dev);
1620 +       u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1621  
1622         dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1623 -       writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
1624 +       tx_ctrl |= NVREG_XMITCTL_START;
1625 +       if (np->mac_in_use)
1626 +               tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1627 +       writel(tx_ctrl, base + NvRegTransmitterControl);
1628         pci_push(base);
1629  }
1630  
1631  static void nv_stop_tx(struct net_device *dev)
1632  {
1633 +       struct fe_priv *np = get_nvpriv(dev);
1634         u8 __iomem *base = get_hwbase(dev);
1635 +       u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1636  
1637         dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1638 -       writel(0, base + NvRegTransmitterControl);
1639 +       if (!np->mac_in_use)
1640 +               tx_ctrl &= ~NVREG_XMITCTL_START;
1641 +       else
1642 +               tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1643 +       writel(tx_ctrl, base + NvRegTransmitterControl);
1644         reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1645                         NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1646                         KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1647  
1648         udelay(NV_TXSTOP_DELAY2);
1649 -       writel(0, base + NvRegUnknownTransmitterReg);
1650 +       if (!np->mac_in_use)
1651 +               writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
1652  }
1653  
1654  static void nv_txrx_reset(struct net_device *dev)
1655  {
1656 -       struct fe_priv *np = netdev_priv(dev);
1657 +       struct fe_priv *np = get_nvpriv(dev);
1658         u8 __iomem *base = get_hwbase(dev);
1659  
1660         dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1661 @@ -834,140 +1794,301 @@
1662         pci_push(base);
1663  }
1664  
1665 +static void nv_mac_reset(struct net_device *dev)
1666 +{
1667 +       struct fe_priv *np = get_nvpriv(dev);
1668 +       u8 __iomem *base = get_hwbase(dev);
1669 +
1670 +       dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1671 +       writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1672 +       pci_push(base);
1673 +       writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1674 +       pci_push(base);
1675 +       udelay(NV_MAC_RESET_DELAY);
1676 +       writel(0, base + NvRegMacReset);
1677 +       pci_push(base);
1678 +       udelay(NV_MAC_RESET_DELAY);
1679 +       writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1680 +       pci_push(base);
1681 +}
1682 +
1683 +#if NVVER < SLES9
1684 +static int nv_ethtool_ioctl(struct net_device *dev, void *useraddr)
1685 +{
1686 +       struct fe_priv *np = get_nvpriv(dev);
1687 +       u8 *base = get_hwbase(dev);
1688 +       u32 ethcmd;
1689 +
1690 +       if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
1691 +               return -EFAULT;
1692 +
1693 +       switch (ethcmd) {
1694 +               case ETHTOOL_GDRVINFO:
1695 +                       {
1696 +                               struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1697 +                               strcpy(info.driver, "forcedeth");
1698 +                               strcpy(info.version, FORCEDETH_VERSION);
1699 +                               strcpy(info.bus_info, pci_name(np->pci_dev));
1700 +                               if (copy_to_user(useraddr, &info, sizeof (info)))
1701 +                                       return -EFAULT;
1702 +                               return 0;
1703 +                       }
1704 +               case ETHTOOL_GLINK:
1705 +                       {
1706 +                               struct ethtool_value edata = { ETHTOOL_GLINK };
1707 +
1708 +                               edata.data = !!netif_carrier_ok(dev);
1709 +
1710 +                               if (copy_to_user(useraddr, &edata, sizeof(edata)))
1711 +                                       return -EFAULT;
1712 +                               return 0;
1713 +                       }
1714 +               case ETHTOOL_GWOL:
1715 +                       {
1716 +                               struct ethtool_wolinfo wolinfo;
1717 +                               memset(&wolinfo, 0, sizeof(wolinfo));
1718 +                               wolinfo.supported = WAKE_MAGIC;
1719 +
1720 +                               spin_lock_irq(&np->lock);
1721 +                               if (np->wolenabled)
1722 +                                       wolinfo.wolopts = WAKE_MAGIC;
1723 +                               spin_unlock_irq(&np->lock);
1724 +
1725 +                               if (copy_to_user(useraddr, &wolinfo, sizeof(wolinfo)))
1726 +                                       return -EFAULT;
1727 +                               return 0;
1728 +                       }
1729 +               case ETHTOOL_SWOL:
1730 +                       {
1731 +                               struct ethtool_wolinfo wolinfo;
1732 +                               if (copy_from_user(&wolinfo, useraddr, sizeof(wolinfo)))
1733 +                                       return -EFAULT;
1734 +
1735 +                               spin_lock_irq(&np->lock);
1736 +                               if (wolinfo.wolopts == 0) {
1737 +                                       writel(0, base + NvRegWakeUpFlags);
1738 +                                       np->wolenabled = NV_WOL_DISABLED;
1739 +                               }
1740 +                               if (wolinfo.wolopts & WAKE_MAGIC) {
1741 +                                       writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
1742 +                                       np->wolenabled = NV_WOL_ENABLED;
1743 +                               }
1744 +                               spin_unlock_irq(&np->lock);
1745 +                               return 0;
1746 +                       }
1747 +
1748 +               default:
1749 +                       break;
1750 +       }
1751 +
1752 +       return -EOPNOTSUPP;
1753 +}
1754 +
1755  /*
1756 - * nv_get_stats: dev->get_stats function
1757 - * Get latest stats value from the nic.
1758 - * Called with read_lock(&dev_base_lock) held for read -
1759 - * only synchronized against unregister_netdevice.
1760 + * nv_ioctl: dev->do_ioctl function
1761 + * Called with rtnl_lock held.
1762   */
1763 -static struct net_device_stats *nv_get_stats(struct net_device *dev)
1764 +static int nv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1765  {
1766 -       struct fe_priv *np = netdev_priv(dev);
1767 +       switch(cmd) {
1768 +               case SIOCETHTOOL:
1769 +                       return nv_ethtool_ioctl(dev, rq->ifr_data);
1770  
1771 -       /* It seems that the nic always generates interrupts and doesn't
1772 -        * accumulate errors internally. Thus the current values in np->stats
1773 -        * are already up to date.
1774 -        */
1775 -       return &np->stats;
1776 +               default:
1777 +                       return -EOPNOTSUPP;
1778 +       }
1779  }
1780 +#endif
1781  
1782  /*
1783   * nv_alloc_rx: fill rx ring entries.
1784   * Return 1 if the allocations for the skbs failed and the
1785   * rx engine is without Available descriptors
1786   */
1787 -static int nv_alloc_rx(struct net_device *dev)
1788 +static inline int nv_alloc_rx(struct net_device *dev)
1789  {
1790 -       struct fe_priv *np = netdev_priv(dev);
1791 -       unsigned int refill_rx = np->refill_rx;
1792 -       int nr;
1793 -
1794 -       while (np->cur_rx != refill_rx) {
1795 -               struct sk_buff *skb;
1796 -
1797 -               nr = refill_rx % RX_RING;
1798 -               if (np->rx_skbuff[nr] == NULL) {
1799 -
1800 -                       skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1801 -                       if (!skb)
1802 -                               break;
1803 +       struct fe_priv *np = get_nvpriv(dev);
1804 +       struct ring_desc* less_rx;
1805 +       struct sk_buff *skb;
1806  
1807 +       less_rx = np->get_rx.orig;
1808 +       if (less_rx-- == np->first_rx.orig)
1809 +               less_rx = np->last_rx.orig;
1810 +
1811 +       while (np->put_rx.orig != less_rx) {
1812 +               skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1813 +               if (skb) {
1814                         skb->dev = dev;
1815 -                       np->rx_skbuff[nr] = skb;
1816 +                       np->put_rx_ctx->skb = skb;
1817 +                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1818 +                                                            skb->end-skb->data, PCI_DMA_FROMDEVICE);
1819 +                       np->put_rx_ctx->dma_len = skb->end-skb->data;
1820 +                       np->put_rx.orig->PacketBuffer = cpu_to_le32(np->put_rx_ctx->dma);
1821 +                       wmb();
1822 +                       np->put_rx.orig->FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1823 +                       if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1824 +                               np->put_rx.orig = np->first_rx.orig;
1825 +                       if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1826 +                               np->put_rx_ctx = np->first_rx_ctx;
1827                 } else {
1828 -                       skb = np->rx_skbuff[nr];
1829 +                       return 1;
1830                 }
1831 -               np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1832 -                                       skb->end-skb->data, PCI_DMA_FROMDEVICE);
1833 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1834 -                       np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
1835 +       }
1836 +       return 0;
1837 +}
1838 +
1839 +static inline int nv_alloc_rx_optimized(struct net_device *dev)
1840 +{
1841 +       struct fe_priv *np = get_nvpriv(dev);
1842 +       struct ring_desc_ex* less_rx;
1843 +       struct sk_buff *skb;
1844 +
1845 +       less_rx = np->get_rx.ex;
1846 +       if (less_rx-- == np->first_rx.ex)
1847 +               less_rx = np->last_rx.ex;
1848 +
1849 +       while (np->put_rx.ex != less_rx) {
1850 +               skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1851 +               if (skb) {
1852 +                       skb->dev = dev;
1853 +                       np->put_rx_ctx->skb = skb;
1854 +                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1855 +                                                            skb->end-skb->data, PCI_DMA_FROMDEVICE);
1856 +                       np->put_rx_ctx->dma_len = skb->end-skb->data;
1857 +                       np->put_rx.ex->PacketBufferHigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1858 +                       np->put_rx.ex->PacketBufferLow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;                        
1859                         wmb();
1860 -                       np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1861 +                       np->put_rx.ex->FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1862 +                       if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1863 +                               np->put_rx.ex = np->first_rx.ex;
1864 +                       if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1865 +                               np->put_rx_ctx = np->first_rx_ctx;
1866                 } else {
1867 -                       np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1868 -                       np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1869 -                       wmb();
1870 -                       np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1871 +                       return 1;
1872                 }
1873 -               dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1874 -                                       dev->name, refill_rx);
1875 -               refill_rx++;
1876         }
1877 -       np->refill_rx = refill_rx;
1878 -       if (np->cur_rx - refill_rx == RX_RING)
1879 -               return 1;
1880         return 0;
1881 +
1882  }
1883  
1884  static void nv_do_rx_refill(unsigned long data)
1885  {
1886         struct net_device *dev = (struct net_device *) data;
1887 -       struct fe_priv *np = netdev_priv(dev);
1888 +       struct fe_priv *np = get_nvpriv(dev);
1889 +       int retcode;
1890  
1891 -       disable_irq(dev->irq);
1892 -       if (nv_alloc_rx(dev)) {
1893 -               spin_lock(&np->lock);
1894 +       if (!using_multi_irqs(dev)) {
1895 +               if (np->msi_flags & NV_MSI_X_ENABLED)
1896 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1897 +               else
1898 +                       disable_irq(dev->irq);
1899 +       } else {
1900 +               disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1901 +       }
1902 +
1903 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1904 +               retcode = nv_alloc_rx(dev);
1905 +       else
1906 +               retcode = nv_alloc_rx_optimized(dev);
1907 +       if (retcode) {
1908 +               spin_lock_irq(&np->lock);
1909                 if (!np->in_shutdown)
1910                         mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1911 -               spin_unlock(&np->lock);
1912 +               spin_unlock_irq(&np->lock);
1913 +       }
1914 +       if (!using_multi_irqs(dev)) {
1915 +               if (np->msi_flags & NV_MSI_X_ENABLED)
1916 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1917 +               else
1918 +                       enable_irq(dev->irq);
1919 +       } else {
1920 +               enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1921         }
1922 -       enable_irq(dev->irq);
1923  }
1924  
1925  static void nv_init_rx(struct net_device *dev) 
1926  {
1927 -       struct fe_priv *np = netdev_priv(dev);
1928 +       struct fe_priv *np = get_nvpriv(dev);
1929         int i;
1930  
1931 -       np->cur_rx = RX_RING;
1932 -       np->refill_rx = 0;
1933 -       for (i = 0; i < RX_RING; i++)
1934 +       np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1935                 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1936 +               np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1937 +       else
1938 +               np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1939 +       np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1940 +       np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1941 +
1942 +       for (i = 0; i < np->rx_ring_size; i++) {
1943 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1944                         np->rx_ring.orig[i].FlagLen = 0;
1945 -               else
1946 +                       np->rx_ring.orig[i].PacketBuffer = 0;
1947 +               } else {
1948                         np->rx_ring.ex[i].FlagLen = 0;
1949 +                       np->rx_ring.ex[i].TxVlan = 0;
1950 +                       np->rx_ring.ex[i].PacketBufferHigh = 0;
1951 +                       np->rx_ring.ex[i].PacketBufferLow = 0;
1952 +               }
1953 +               np->rx_skb[i].skb = NULL;
1954 +               np->rx_skb[i].dma = 0;
1955 +       }
1956  }
1957  
1958  static void nv_init_tx(struct net_device *dev)
1959  {
1960 -       struct fe_priv *np = netdev_priv(dev);
1961 +       struct fe_priv *np = get_nvpriv(dev);
1962         int i;
1963  
1964 -       np->next_tx = np->nic_tx = 0;
1965 -       for (i = 0; i < TX_RING; i++) {
1966 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1967 +       np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1968 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1969 +               np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1970 +       else
1971 +               np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1972 +       np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1973 +       np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1974 +
1975 +       for (i = 0; i < np->tx_ring_size; i++) {
1976 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1977                         np->tx_ring.orig[i].FlagLen = 0;
1978 -               else
1979 +                       np->tx_ring.orig[i].PacketBuffer = 0;
1980 +               } else {
1981                         np->tx_ring.ex[i].FlagLen = 0;
1982 -               np->tx_skbuff[i] = NULL;
1983 -               np->tx_dma[i] = 0;
1984 +                       np->tx_ring.ex[i].TxVlan = 0;
1985 +                       np->tx_ring.ex[i].PacketBufferHigh = 0;
1986 +                       np->tx_ring.ex[i].PacketBufferLow = 0;
1987 +               }
1988 +               np->tx_skb[i].skb = NULL;
1989 +               np->tx_skb[i].dma = 0;
1990         }
1991  }
1992  
1993  static int nv_init_ring(struct net_device *dev)
1994  {
1995 +       struct fe_priv *np = get_nvpriv(dev);
1996         nv_init_tx(dev);
1997         nv_init_rx(dev);
1998 -       return nv_alloc_rx(dev);
1999 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2000 +               return nv_alloc_rx(dev);
2001 +       else
2002 +               return nv_alloc_rx_optimized(dev);
2003  }
2004  
2005  static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
2006  {
2007 -       struct fe_priv *np = netdev_priv(dev);
2008 +       struct fe_priv *np = get_nvpriv(dev);
2009  
2010         dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
2011                 dev->name, skbnr);
2012  
2013 -       if (np->tx_dma[skbnr]) {
2014 -               pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
2015 -                              np->tx_dma_len[skbnr],
2016 +       if (np->tx_skb[skbnr].dma) {
2017 +               pci_unmap_page(np->pci_dev, np->tx_skb[skbnr].dma,
2018 +                              np->tx_skb[skbnr].dma_len,
2019                                PCI_DMA_TODEVICE);
2020 -               np->tx_dma[skbnr] = 0;
2021 +               np->tx_skb[skbnr].dma = 0;
2022         }
2023 -
2024 -       if (np->tx_skbuff[skbnr]) {
2025 -               dev_kfree_skb_irq(np->tx_skbuff[skbnr]);
2026 -               np->tx_skbuff[skbnr] = NULL;
2027 +       if (np->tx_skb[skbnr].skb) {
2028 +               dev_kfree_skb_any(np->tx_skb[skbnr].skb);
2029 +               np->tx_skb[skbnr].skb = NULL;
2030                 return 1;
2031         } else {
2032                 return 0;
2033 @@ -976,14 +2097,19 @@
2034  
2035  static void nv_drain_tx(struct net_device *dev)
2036  {
2037 -       struct fe_priv *np = netdev_priv(dev);
2038 +       struct fe_priv *np = get_nvpriv(dev);
2039         unsigned int i;
2040         
2041 -       for (i = 0; i < TX_RING; i++) {
2042 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2043 +       for (i = 0; i < np->tx_ring_size; i++) {
2044 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2045                         np->tx_ring.orig[i].FlagLen = 0;
2046 -               else
2047 +                       np->tx_ring.orig[i].PacketBuffer = 0;
2048 +               } else {
2049                         np->tx_ring.ex[i].FlagLen = 0;
2050 +                       np->tx_ring.ex[i].TxVlan = 0;
2051 +                       np->tx_ring.ex[i].PacketBufferHigh = 0;
2052 +                       np->tx_ring.ex[i].PacketBufferLow = 0;
2053 +               }
2054                 if (nv_release_txskb(dev, i))
2055                         np->stats.tx_dropped++;
2056         }
2057 @@ -991,20 +2117,25 @@
2058  
2059  static void nv_drain_rx(struct net_device *dev)
2060  {
2061 -       struct fe_priv *np = netdev_priv(dev);
2062 +       struct fe_priv *np = get_nvpriv(dev);
2063         int i;
2064 -       for (i = 0; i < RX_RING; i++) {
2065 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2066 +       for (i = 0; i < np->rx_ring_size; i++) {
2067 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2068                         np->rx_ring.orig[i].FlagLen = 0;
2069 -               else
2070 +                       np->rx_ring.orig[i].PacketBuffer = 0;
2071 +               } else {
2072                         np->rx_ring.ex[i].FlagLen = 0;
2073 +                       np->rx_ring.ex[i].TxVlan = 0;
2074 +                       np->rx_ring.ex[i].PacketBufferHigh = 0;
2075 +                       np->rx_ring.ex[i].PacketBufferLow = 0;
2076 +               }
2077                 wmb();
2078 -               if (np->rx_skbuff[i]) {
2079 -                       pci_unmap_single(np->pci_dev, np->rx_dma[i],
2080 -                                               np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
2081 +               if (np->rx_skb[i].skb) {
2082 +                       pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
2083 +                                               np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
2084                                                 PCI_DMA_FROMDEVICE);
2085 -                       dev_kfree_skb(np->rx_skbuff[i]);
2086 -                       np->rx_skbuff[i] = NULL;
2087 +                       dev_kfree_skb(np->rx_skb[i].skb);
2088 +                       np->rx_skb[i].skb = NULL;
2089                 }
2090         }
2091  }
2092 @@ -1021,52 +2152,51 @@
2093   */
2094  static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2095  {
2096 -       struct fe_priv *np = netdev_priv(dev);
2097 +       struct fe_priv *np = get_nvpriv(dev);
2098         u32 tx_flags = 0;
2099         u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2100         unsigned int fragments = skb_shinfo(skb)->nr_frags;
2101 -       unsigned int nr = (np->next_tx - 1) % TX_RING;
2102 -       unsigned int start_nr = np->next_tx % TX_RING;
2103         unsigned int i;
2104         u32 offset = 0;
2105         u32 bcnt;
2106         u32 size = skb->len-skb->data_len;
2107         u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2108 +       u32 empty_slots;
2109 +       struct ring_desc* put_tx;
2110 +       struct ring_desc* start_tx;
2111 +       struct ring_desc* prev_tx;
2112 +       struct nv_skb_map* prev_tx_ctx;
2113  
2114 +       //dprintk(KERN_DEBUG "%s: nv_start_xmit \n", dev->name);
2115         /* add fragments to entries count */
2116         for (i = 0; i < fragments; i++) {
2117                 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2118                            ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2119         }
2120  
2121 -       spin_lock_irq(&np->lock);
2122 +       empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2123 +       if (likely(empty_slots > entries)) {
2124  
2125 -       if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
2126 -               spin_unlock_irq(&np->lock);
2127 -               netif_stop_queue(dev);
2128 -               return NETDEV_TX_BUSY;
2129 -       }
2130 +       start_tx = put_tx = np->put_tx.orig;
2131  
2132         /* setup the header buffer */
2133         do {
2134 +               prev_tx = put_tx;
2135 +               prev_tx_ctx = np->put_tx_ctx;
2136                 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2137 -               nr = (nr + 1) % TX_RING;
2138 -
2139 -               np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2140 +               np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2141                                                 PCI_DMA_TODEVICE);
2142 -               np->tx_dma_len[nr] = bcnt;
2143 +               np->put_tx_ctx->dma_len = bcnt;
2144 +               put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma);
2145 +               put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2146  
2147 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2148 -                       np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
2149 -                       np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2150 -               } else {
2151 -                       np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
2152 -                       np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
2153 -                       np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2154 -               }
2155                 tx_flags = np->tx_flags;
2156                 offset += bcnt;
2157                 size -= bcnt;
2158 +               if (unlikely(put_tx++ == np->last_tx.orig))
2159 +                       put_tx = np->first_tx.orig;
2160 +               if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2161 +                       np->put_tx_ctx = np->first_tx_ctx;
2162         } while(size);
2163  
2164         /* setup the fragments */
2165 @@ -1076,34 +2206,133 @@
2166                 offset = 0;
2167  
2168                 do {
2169 +                       prev_tx = put_tx;
2170 +                       prev_tx_ctx = np->put_tx_ctx;
2171                         bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2172 -                       nr = (nr + 1) % TX_RING;
2173  
2174 -                       np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2175 -                                                     PCI_DMA_TODEVICE);
2176 -                       np->tx_dma_len[nr] = bcnt;
2177 +                       np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2178 +                                                          PCI_DMA_TODEVICE);
2179 +                       np->put_tx_ctx->dma_len = bcnt;
2180  
2181 -                       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2182 -                               np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
2183 -                               np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2184 -                       } else {
2185 -                               np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
2186 -                               np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
2187 -                               np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2188 -                       }
2189 +                       put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma);
2190 +                       put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2191                         offset += bcnt;
2192                         size -= bcnt;
2193 +                       if (unlikely(put_tx++ == np->last_tx.orig))
2194 +                               put_tx = np->first_tx.orig;
2195 +                       if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2196 +                               np->put_tx_ctx = np->first_tx_ctx;
2197                 } while (size);
2198         }
2199  
2200         /* set last fragment flag  */
2201 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2202 -               np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
2203 +       prev_tx->FlagLen |= cpu_to_le32(tx_flags_extra);
2204 +
2205 +       /* save skb in this slot's context area */
2206 +       prev_tx_ctx->skb = skb;
2207 +
2208 +#ifdef NETIF_F_TSO
2209 +       if (skb_shinfo(skb)->tso_size)
2210 +               tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
2211 +       else
2212 +#endif
2213 +       tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
2214 +
2215 +       start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
2216 +       np->put_tx.orig = put_tx;
2217 +
2218 +       dev->trans_start = jiffies;
2219 +       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2220 +       return NETDEV_TX_OK;
2221         } else {
2222 -               np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
2223 +               netif_stop_queue(dev);
2224 +               np->stop_tx = 1;
2225 +               return NETDEV_TX_BUSY;
2226 +       }
2227 +}
2228 +
2229 +static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2230 +{
2231 +       struct fe_priv *np = get_nvpriv(dev);
2232 +       u32 tx_flags = 0;
2233 +       u32 tx_flags_extra;
2234 +       unsigned int fragments = skb_shinfo(skb)->nr_frags;
2235 +       unsigned int i;
2236 +       u32 offset = 0;
2237 +       u32 bcnt;
2238 +       u32 size = skb->len-skb->data_len;
2239 +       u32 empty_slots;
2240 +       struct ring_desc_ex* put_tx;
2241 +       struct ring_desc_ex* start_tx;
2242 +       struct ring_desc_ex* prev_tx;
2243 +       struct nv_skb_map* prev_tx_ctx;
2244 +
2245 +       u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2246 +
2247 +       //dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized \n", dev->name);
2248 +       /* add fragments to entries count */
2249 +       for (i = 0; i < fragments; i++) {
2250 +               entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2251 +                          ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2252 +       }
2253 +
2254 +       empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2255 +       if (likely(empty_slots > entries)) {
2256 +
2257 +       start_tx = put_tx = np->put_tx.ex;
2258 +
2259 +       /* setup the header buffer */
2260 +       do {
2261 +               prev_tx = put_tx;
2262 +               prev_tx_ctx = np->put_tx_ctx;
2263 +               bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2264 +               np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2265 +                                               PCI_DMA_TODEVICE);
2266 +               np->put_tx_ctx->dma_len = bcnt;
2267 +               put_tx->PacketBufferHigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
2268 +               put_tx->PacketBufferLow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
2269 +               put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2270 +
2271 +               tx_flags = NV_TX2_VALID;
2272 +               offset += bcnt;
2273 +               size -= bcnt;
2274 +               if (unlikely(put_tx++ == np->last_tx.ex))
2275 +                       put_tx = np->first_tx.ex;
2276 +               if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2277 +                       np->put_tx_ctx = np->first_tx_ctx;
2278 +       } while(size);
2279 +       /* setup the fragments */
2280 +       for (i = 0; i < fragments; i++) {
2281 +               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2282 +               u32 size = frag->size;
2283 +               offset = 0;
2284 +
2285 +               do {
2286 +                       prev_tx = put_tx;
2287 +                       prev_tx_ctx = np->put_tx_ctx;
2288 +                       bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2289 +
2290 +                       np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2291 +                                                          PCI_DMA_TODEVICE);
2292 +                       np->put_tx_ctx->dma_len = bcnt;
2293 +
2294 +                       put_tx->PacketBufferHigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
2295 +                       put_tx->PacketBufferLow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
2296 +                       put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
2297 +                       offset += bcnt;
2298 +                       size -= bcnt;
2299 +                       if (unlikely(put_tx++ == np->last_tx.ex))
2300 +                               put_tx = np->first_tx.ex;
2301 +                       if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2302 +                               np->put_tx_ctx = np->first_tx_ctx;
2303 +               } while (size);
2304         }
2305  
2306 -       np->tx_skbuff[nr] = skb;
2307 +       /* set last fragment flag  */
2308 +       prev_tx->FlagLen |= cpu_to_le32(NV_TX2_LASTPACKET);
2309 +
2310 +       /* save skb in this slot's context area */
2311 +       prev_tx_ctx->skb = skb;
2312  
2313  #ifdef NETIF_F_TSO
2314         if (skb_shinfo(skb)->tso_size)
2315 @@ -1112,32 +2341,29 @@
2316  #endif
2317         tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
2318  
2319 -       /* set tx flags */
2320 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2321 -               np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
2322 +       /* vlan tag */
2323 +       if (likely(!np->vlangrp)) {
2324 +               start_tx->TxVlan = 0;
2325         } else {
2326 -               np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
2327 -       }       
2328 -
2329 -       dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
2330 -               dev->name, np->next_tx, entries, tx_flags_extra);
2331 -       {
2332 -               int j;
2333 -               for (j=0; j<64; j++) {
2334 -                       if ((j%16) == 0)
2335 -                               dprintk("\n%03x:", j);
2336 -                       dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2337 -               }
2338 -               dprintk("\n");
2339 +               if (vlan_tx_tag_present(skb))
2340 +                       start_tx->TxVlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2341 +               else
2342 +                       start_tx->TxVlan = 0;
2343         }
2344  
2345 -       np->next_tx += entries;
2346 +       /* set tx flags */
2347 +       start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
2348 +       np->put_tx.ex = put_tx;
2349  
2350         dev->trans_start = jiffies;
2351 -       spin_unlock_irq(&np->lock);
2352         writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2353 -       pci_push(get_hwbase(dev));
2354         return NETDEV_TX_OK;
2355 +
2356 +       } else {
2357 +               netif_stop_queue(dev);
2358 +               np->stop_tx = 1;
2359 +               return NETDEV_TX_BUSY;
2360 +       }
2361  }
2362  
2363  /*
2364 @@ -1145,30 +2371,26 @@
2365   *
2366   * Caller must own np->lock.
2367   */
2368 -static void nv_tx_done(struct net_device *dev)
2369 +static inline void nv_tx_done(struct net_device *dev)
2370  {
2371 -       struct fe_priv *np = netdev_priv(dev);
2372 +       struct fe_priv *np = get_nvpriv(dev);
2373         u32 Flags;
2374 -       unsigned int i;
2375 -       struct sk_buff *skb;
2376 +       struct ring_desc* orig_get_tx = np->get_tx.orig;
2377 +       struct ring_desc* put_tx = np->put_tx.orig;
2378  
2379 -       while (np->nic_tx != np->next_tx) {
2380 -               i = np->nic_tx % TX_RING;
2381 +       //dprintk(KERN_DEBUG "%s: nv_tx_done \n", dev->name);
2382 +       while ((np->get_tx.orig != put_tx) &&
2383 +              !((Flags = le32_to_cpu(np->get_tx.orig->FlagLen)) & NV_TX_VALID)) {
2384 +               dprintk(KERN_DEBUG "%s: nv_tx_done:NVLAN tx done\n", dev->name);
2385  
2386 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2387 -                       Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
2388 -               else
2389 -                       Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
2390 +               pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2391 +                              np->get_tx_ctx->dma_len,
2392 +                              PCI_DMA_TODEVICE);
2393 +               np->get_tx_ctx->dma = 0;
2394  
2395 -               dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
2396 -                                       dev->name, np->nic_tx, Flags);
2397 -               if (Flags & NV_TX_VALID)
2398 -                       break;
2399                 if (np->desc_ver == DESC_VER_1) {
2400                         if (Flags & NV_TX_LASTPACKET) {
2401 -                               skb = np->tx_skbuff[i];
2402 -                               if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
2403 -                                            NV_TX_UNDERFLOW|NV_TX_ERROR)) {
2404 +                               if (Flags & NV_TX_ERROR) {
2405                                         if (Flags & NV_TX_UNDERFLOW)
2406                                                 np->stats.tx_fifo_errors++;
2407                                         if (Flags & NV_TX_CARRIERLOST)
2408 @@ -1176,14 +2398,15 @@
2409                                         np->stats.tx_errors++;
2410                                 } else {
2411                                         np->stats.tx_packets++;
2412 -                                       np->stats.tx_bytes += skb->len;
2413 +                                       np->stats.tx_bytes += np->get_tx_ctx->skb->len;
2414                                 }
2415 +                               dev_kfree_skb_any(np->get_tx_ctx->skb);
2416 +                               np->get_tx_ctx->skb = NULL;
2417 +
2418                         }
2419                 } else {
2420                         if (Flags & NV_TX2_LASTPACKET) {
2421 -                               skb = np->tx_skbuff[i];
2422 -                               if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
2423 -                                            NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
2424 +                               if (Flags & NV_TX2_ERROR) {
2425                                         if (Flags & NV_TX2_UNDERFLOW)
2426                                                 np->stats.tx_fifo_errors++;
2427                                         if (Flags & NV_TX2_CARRIERLOST)
2428 @@ -1191,15 +2414,59 @@
2429                                         np->stats.tx_errors++;
2430                                 } else {
2431                                         np->stats.tx_packets++;
2432 -                                       np->stats.tx_bytes += skb->len;
2433 +                                       np->stats.tx_bytes += np->get_tx_ctx->skb->len;
2434                                 }                               
2435 +                               dev_kfree_skb_any(np->get_tx_ctx->skb);
2436 +                               np->get_tx_ctx->skb = NULL;
2437 +                       }
2438 +               }
2439 +
2440 +               if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2441 +                       np->get_tx.orig = np->first_tx.orig;
2442 +               if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2443 +                       np->get_tx_ctx = np->first_tx_ctx;
2444 +       }
2445 +       if (unlikely((np->stop_tx == 1) && (np->get_tx.orig != orig_get_tx))) {
2446 +               np->stop_tx = 0;
2447 +               netif_wake_queue(dev);
2448 +       }
2449 +}
2450 +
2451 +static inline void nv_tx_done_optimized(struct net_device *dev, int max_work)
2452 +{
2453 +       struct fe_priv *np = get_nvpriv(dev);
2454 +       u32 Flags;
2455 +       struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2456 +       struct ring_desc_ex* put_tx = np->put_tx.ex;
2457 +
2458 +       //dprintk(KERN_DEBUG "%s: nv_tx_done_optimized \n", dev->name);
2459 +       while ((np->get_tx.ex != put_tx) &&
2460 +              !((Flags = le32_to_cpu(np->get_tx.ex->FlagLen)) & NV_TX_VALID) &&
2461 +              (max_work-- > 0)) {
2462 +               dprintk(KERN_DEBUG "%s: nv_tx_done_optimized:NVLAN tx done\n", dev->name);
2463 +
2464 +               pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2465 +                              np->get_tx_ctx->dma_len,
2466 +                              PCI_DMA_TODEVICE);
2467 +               np->get_tx_ctx->dma = 0;
2468 +
2469 +               if (Flags & NV_TX2_LASTPACKET) {
2470 +                       if (!(Flags & NV_TX2_ERROR)) {
2471 +                               np->stats.tx_packets++;
2472                         }
2473 +                       dev_kfree_skb_any(np->get_tx_ctx->skb);
2474 +                       np->get_tx_ctx->skb = NULL;
2475                 }
2476 -               nv_release_txskb(dev, i);
2477 -               np->nic_tx++;
2478 +               
2479 +               if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2480 +                       np->get_tx.ex = np->first_tx.ex;
2481 +               if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2482 +                       np->get_tx_ctx = np->first_tx_ctx;
2483         }
2484 -       if (np->next_tx - np->nic_tx < TX_LIMIT_START)
2485 +       if (unlikely((np->stop_tx == 1) && (np->get_tx.ex != orig_get_tx))) {
2486 +               np->stop_tx = 0;
2487                 netif_wake_queue(dev);
2488 +       }
2489  }
2490  
2491  /*
2492 @@ -1208,20 +2475,34 @@
2493   */
2494  static void nv_tx_timeout(struct net_device *dev)
2495  {
2496 -       struct fe_priv *np = netdev_priv(dev);
2497 +       struct fe_priv *np = get_nvpriv(dev);
2498         u8 __iomem *base = get_hwbase(dev);
2499 +       u32 status;
2500  
2501 -       printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
2502 -                       readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
2503 +       if (!netif_running(dev))
2504 +               return;
2505 +
2506 +       if (np->msi_flags & NV_MSI_X_ENABLED)
2507 +               status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2508 +       else
2509 +               status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2510 +
2511 +       printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2512  
2513         {
2514                 int i;
2515  
2516 -               printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
2517 -                               dev->name, (unsigned long)np->ring_addr,
2518 -                               np->next_tx, np->nic_tx);
2519 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2520 +                       printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n",
2521 +                              dev->name, (unsigned long)np->tx_ring.orig,
2522 +                              (unsigned long)np->get_tx.orig, (unsigned long)np->put_tx.orig);
2523 +               } else {
2524 +                       printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n",
2525 +                              dev->name, (unsigned long)np->tx_ring.ex,
2526 +                              (unsigned long)np->get_tx.ex, (unsigned long)np->put_tx.ex);
2527 +               }
2528                 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2529 -               for (i=0;i<0x400;i+= 32) {
2530 +               for (i=0;i<=np->register_size;i+= 32) {
2531                         printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2532                                         i,
2533                                         readl(base + i + 0), readl(base + i + 4),
2534 @@ -1230,7 +2511,7 @@
2535                                         readl(base + i + 24), readl(base + i + 28));
2536                 }
2537                 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2538 -               for (i=0;i<TX_RING;i+= 4) {
2539 +               for (i=0;i<np->tx_ring_size;i+= 4) {
2540                         if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2541                                 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2542                                        i, 
2543 @@ -1261,29 +2542,35 @@
2544                 }
2545         }
2546  
2547 +       nv_disable_irq(dev);
2548         spin_lock_irq(&np->lock);
2549  
2550         /* 1) stop tx engine */
2551         nv_stop_tx(dev);
2552  
2553         /* 2) check that the packets were not sent already: */
2554 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2555         nv_tx_done(dev);
2556 +       else
2557 +               nv_tx_done_optimized(dev, np->tx_ring_size);
2558  
2559         /* 3) if there are dead entries: clear everything */
2560 -       if (np->next_tx != np->nic_tx) {
2561 +       if (np->get_tx_ctx != np->put_tx_ctx) {
2562                 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2563                 nv_drain_tx(dev);
2564 -               np->next_tx = np->nic_tx = 0;
2565                 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2566 -                       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2567 +                       np->get_tx.orig = np->put_tx.orig = np->first_tx.orig;
2568                 else
2569 -                       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
2570 +                       np->get_tx.ex = np->put_tx.ex = np->first_tx.ex;
2571 +               np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx;
2572 +               setup_hw_rings(dev, NV_SETUP_TX_RING);
2573                 netif_wake_queue(dev);
2574         }
2575  
2576         /* 4) restart tx engine */
2577         nv_start_tx(dev);
2578         spin_unlock_irq(&np->lock);
2579 +       nv_enable_irq(dev);
2580  }
2581  
2582  /*
2583 @@ -1339,41 +2626,23 @@
2584         }
2585  }
2586  
2587 -static void nv_rx_process(struct net_device *dev)
2588 +static inline void nv_rx_process(struct net_device *dev)
2589  {
2590 -       struct fe_priv *np = netdev_priv(dev);
2591 +       struct fe_priv *np = get_nvpriv(dev);
2592         u32 Flags;
2593 +       struct sk_buff *skb;
2594 +       int len;
2595  
2596 -       for (;;) {
2597 -               struct sk_buff *skb;
2598 -               int len;
2599 -               int i;
2600 -               if (np->cur_rx - np->refill_rx >= RX_RING)
2601 -                       break;  /* we scanned the whole ring - do not continue */
2602 -
2603 -               i = np->cur_rx % RX_RING;
2604 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2605 -                       Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
2606 -                       len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
2607 -               } else {
2608 -                       Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
2609 -                       len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
2610 -               }
2611 -
2612 -               dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
2613 -                                       dev->name, np->cur_rx, Flags);
2614 -
2615 -               if (Flags & NV_RX_AVAIL)
2616 -                       break;  /* still owned by hardware, */
2617 +       //dprintk(KERN_DEBUG "%s: nv_rx_process \n", dev->name);
2618 +       while((np->get_rx.orig != np->put_rx.orig) &&
2619 +             !((Flags = le32_to_cpu(np->get_rx.orig->FlagLen)) & NV_RX_AVAIL)) {
2620 +
2621 +               pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2622 +                                np->get_rx_ctx->dma_len,
2623 +                                PCI_DMA_FROMDEVICE);
2624  
2625 -               /*
2626 -                * the packet is for us - immediately tear down the pci mapping.
2627 -                * TODO: check if a prefetch of the first cacheline improves
2628 -                * the performance.
2629 -                */
2630 -               pci_unmap_single(np->pci_dev, np->rx_dma[i],
2631 -                               np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
2632 -                               PCI_DMA_FROMDEVICE);
2633 +               skb = np->get_rx_ctx->skb;
2634 +               np->get_rx_ctx->skb = NULL;
2635  
2636                 {
2637                         int j;
2638 @@ -1381,112 +2650,198 @@
2639                         for (j=0; j<64; j++) {
2640                                 if ((j%16) == 0)
2641                                         dprintk("\n%03x:", j);
2642 -                               dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
2643 +                               dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2644                         }
2645                         dprintk("\n");
2646                 }
2647 -               /* look at what we actually got: */
2648 +
2649                 if (np->desc_ver == DESC_VER_1) {
2650 -                       if (!(Flags & NV_RX_DESCRIPTORVALID))
2651 -                               goto next_pkt;
2652  
2653 -                       if (Flags & NV_RX_ERROR) {
2654 -                               if (Flags & NV_RX_MISSEDFRAME) {
2655 -                                       np->stats.rx_missed_errors++;
2656 -                                       np->stats.rx_errors++;
2657 -                                       goto next_pkt;
2658 -                               }
2659 -                               if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
2660 -                                       np->stats.rx_errors++;
2661 -                                       goto next_pkt;
2662 -                               }
2663 -                               if (Flags & NV_RX_CRCERR) {
2664 -                                       np->stats.rx_crc_errors++;
2665 -                                       np->stats.rx_errors++;
2666 -                                       goto next_pkt;
2667 -                               }
2668 -                               if (Flags & NV_RX_OVERFLOW) {
2669 -                                       np->stats.rx_over_errors++;
2670 -                                       np->stats.rx_errors++;
2671 -                                       goto next_pkt;
2672 +                       if (likely(Flags & NV_RX_DESCRIPTORVALID)) {
2673 +                               len = Flags & LEN_MASK_V1;
2674 +                               if (unlikely(Flags & NV_RX_ERROR)) {
2675 +                                       if (Flags & NV_RX_ERROR4) {
2676 +                                               len = nv_getlen(dev, skb->data, len);
2677 +                                               if (len < 0) {
2678 +                                                       np->stats.rx_errors++;
2679 +                                                       dev_kfree_skb(skb);
2680 +                                                       goto next_pkt;
2681 +                                               }
2682 +                                       }
2683 +                                       /* framing errors are soft errors */
2684 +                                       else if (Flags & NV_RX_FRAMINGERR) {
2685 +                                               if (Flags & NV_RX_SUBSTRACT1) {
2686 +                                                       len--;
2687 +                                               }
2688 +                                       }
2689 +                                       /* the rest are hard errors */
2690 +                                       else {
2691 +                                               if (Flags & NV_RX_MISSEDFRAME)
2692 +                                                       np->stats.rx_missed_errors++;
2693 +                                               if (Flags & NV_RX_CRCERR)
2694 +                                                       np->stats.rx_crc_errors++;
2695 +                                               if (Flags & NV_RX_OVERFLOW)
2696 +                                                       np->stats.rx_over_errors++;
2697 +                                               np->stats.rx_errors++;
2698 +                                               dev_kfree_skb(skb);
2699 +                                               goto next_pkt;
2700 +                                       }
2701                                 }
2702 -                               if (Flags & NV_RX_ERROR4) {
2703 -                                       len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
2704 -                                       if (len < 0) {
2705 +                       } else {
2706 +                               dev_kfree_skb(skb);
2707 +                               goto next_pkt;
2708 +                       }
2709 +               } else {
2710 +                       if (likely(Flags & NV_RX2_DESCRIPTORVALID)) {
2711 +                               len = Flags & LEN_MASK_V2;
2712 +                               if (unlikely(Flags & NV_RX2_ERROR)) {
2713 +                                       if (Flags & NV_RX2_ERROR4) {
2714 +                                               len = nv_getlen(dev, skb->data, len);
2715 +                                               if (len < 0) {
2716 +                                                       np->stats.rx_errors++;
2717 +                                                       dev_kfree_skb(skb);
2718 +                                                       goto next_pkt;
2719 +                                               }
2720 +                                       }
2721 +                                       /* framing errors are soft errors */
2722 +                                       else if (Flags & NV_RX2_FRAMINGERR) {
2723 +                                               if (Flags & NV_RX2_SUBSTRACT1) {
2724 +                                                       len--;
2725 +                                               }
2726 +                                       }
2727 +                                       /* the rest are hard errors */
2728 +                                       else {
2729 +                                               if (Flags & NV_RX2_CRCERR)
2730 +                                                       np->stats.rx_crc_errors++;
2731 +                                               if (Flags & NV_RX2_OVERFLOW)
2732 +                                                       np->stats.rx_over_errors++;
2733                                                 np->stats.rx_errors++;
2734 +                                               dev_kfree_skb(skb);
2735                                                 goto next_pkt;
2736                                         }
2737                                 }
2738 -                               /* framing errors are soft errors. */
2739 -                               if (Flags & NV_RX_FRAMINGERR) {
2740 -                                       if (Flags & NV_RX_SUBSTRACT1) {
2741 -                                               len--;
2742 +                               if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
2743 +                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
2744 +                               } else {
2745 +                                       if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2746 +                                           (Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2747 +                                               skb->ip_summed = CHECKSUM_UNNECESSARY;
2748                                         }
2749                                 }
2750 -                       }
2751 -               } else {
2752 -                       if (!(Flags & NV_RX2_DESCRIPTORVALID))
2753 +                       } else {
2754 +                               dev_kfree_skb(skb);
2755                                 goto next_pkt;
2756 +                       }
2757 +               }
2758  
2759 -                       if (Flags & NV_RX2_ERROR) {
2760 -                               if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
2761 -                                       np->stats.rx_errors++;
2762 -                                       goto next_pkt;
2763 -                               }
2764 -                               if (Flags & NV_RX2_CRCERR) {
2765 -                                       np->stats.rx_crc_errors++;
2766 -                                       np->stats.rx_errors++;
2767 -                                       goto next_pkt;
2768 -                               }
2769 -                               if (Flags & NV_RX2_OVERFLOW) {
2770 -                                       np->stats.rx_over_errors++;
2771 -                                       np->stats.rx_errors++;
2772 -                                       goto next_pkt;
2773 -                               }
2774 +               /* got a valid packet - forward it to the network core */
2775 +               dprintk(KERN_DEBUG "%s: nv_rx_process:NVLAN rx done\n", dev->name);
2776 +               skb_put(skb, len);
2777 +               skb->protocol = eth_type_trans(skb, dev);
2778 +               netif_rx(skb);
2779 +               dev->last_rx = jiffies;
2780 +               np->stats.rx_packets++;
2781 +               np->stats.rx_bytes += len;
2782 +next_pkt:
2783 +               if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2784 +                       np->get_rx.orig = np->first_rx.orig;
2785 +               if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2786 +                       np->get_rx_ctx = np->first_rx_ctx;
2787 +       }
2788 +}
2789 +
2790 +static inline int nv_rx_process_optimized(struct net_device *dev, int max_work)
2791 +{
2792 +       struct fe_priv *np = get_nvpriv(dev);
2793 +       u32 Flags;
2794 +       u32 vlanflags = 0;
2795 +       u32 rx_processed_cnt = 0;
2796 +       struct sk_buff *skb;
2797 +       int len;
2798 +
2799 +//     dprintk(KERN_DEBUG "%s: nv_rx_process_optimized \n", dev->name);
2800 +       while((np->get_rx.ex != np->put_rx.ex) &&
2801 +             !((Flags = le32_to_cpu(np->get_rx.ex->FlagLen)) & NV_RX2_AVAIL) &&
2802 +             (rx_processed_cnt++ < max_work)) {
2803 +
2804 +               pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2805 +                                np->get_rx_ctx->dma_len,
2806 +                                PCI_DMA_FROMDEVICE);
2807 +
2808 +               skb = np->get_rx_ctx->skb;
2809 +               np->get_rx_ctx->skb = NULL;
2810 +
2811 +               /* look at what we actually got: */
2812 +               if (likely(Flags & NV_RX2_DESCRIPTORVALID)) {
2813 +                       len = Flags & LEN_MASK_V2;
2814 +                       if (unlikely(Flags & NV_RX2_ERROR)) {
2815                                 if (Flags & NV_RX2_ERROR4) {
2816 -                                       len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
2817 +                                       len = nv_getlen(dev, skb->data, len);
2818                                         if (len < 0) {
2819 -                                               np->stats.rx_errors++;
2820 +                                               np->rx_len_errors++;
2821 +                                               dev_kfree_skb(skb);
2822                                                 goto next_pkt;
2823                                         }
2824                                 }
2825                                 /* framing errors are soft errors */
2826 -                               if (Flags & NV_RX2_FRAMINGERR) {
2827 +                               else if (Flags & NV_RX2_FRAMINGERR) {
2828                                         if (Flags & NV_RX2_SUBSTRACT1) {
2829                                                 len--;
2830                                         }
2831                                 }
2832 +                               /* the rest are hard errors */
2833 +                               else {
2834 +                                       dev_kfree_skb(skb);
2835 +                                       goto next_pkt;
2836 +                               }
2837 +                       }
2838 +
2839 +                       if (likely(np->rx_csum)) {
2840 +                               if (likely((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)) {
2841 +                                       /*ip and tcp */
2842 +                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
2843 +                               } else {
2844 +                                       if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2845 +                                           (Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2846 +                                               skb->ip_summed = CHECKSUM_UNNECESSARY;
2847 +                                       }
2848 +                               }
2849                         }
2850 -                       Flags &= NV_RX2_CHECKSUMMASK;
2851 -                       if (Flags == NV_RX2_CHECKSUMOK1 ||
2852 -                                       Flags == NV_RX2_CHECKSUMOK2 ||
2853 -                                       Flags == NV_RX2_CHECKSUMOK3) {
2854 -                               dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
2855 -                               np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
2856 +                       dprintk(KERN_DEBUG "%s: nv_rx_process_optimized:NVLAN rx done\n", dev->name);
2857 +
2858 +                       /* got a valid packet - forward it to the network core */
2859 +                       skb_put(skb, len);
2860 +                       skb->protocol = eth_type_trans(skb, dev);
2861 +                       prefetch(skb->data);
2862 +
2863 +                       if (likely(!np->vlangrp)) {
2864 +                               netif_rx(skb);
2865                         } else {
2866 -                               dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
2867 +                               vlanflags = le32_to_cpu(np->get_rx.ex->PacketBufferLow);
2868 +                               if (vlanflags & NV_RX3_VLAN_TAG_PRESENT)
2869 +                                       vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
2870 +                               else
2871 +                                       netif_rx(skb);
2872                         }
2873 -               }
2874 -               /* got a valid packet - forward it to the network core */
2875 -               skb = np->rx_skbuff[i];
2876 -               np->rx_skbuff[i] = NULL;
2877  
2878 -               skb_put(skb, len);
2879 -               skb->protocol = eth_type_trans(skb, dev);
2880 -               dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
2881 -                                       dev->name, np->cur_rx, len, skb->protocol);
2882 -               netif_rx(skb);
2883 -               dev->last_rx = jiffies;
2884 -               np->stats.rx_packets++;
2885 -               np->stats.rx_bytes += len;
2886 +                       dev->last_rx = jiffies;
2887 +                       np->stats.rx_packets++;
2888 +                       np->stats.rx_bytes += len;
2889 +               } else {
2890 +                       dev_kfree_skb(skb);
2891 +               }
2892  next_pkt:
2893 -               np->cur_rx++;
2894 +               if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2895 +                       np->get_rx.ex = np->first_rx.ex;
2896 +               if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2897 +                       np->get_rx_ctx = np->first_rx_ctx;
2898         }
2899 +       return rx_processed_cnt;
2900  }
2901  
2902  static void set_bufsize(struct net_device *dev)
2903  {
2904 -       struct fe_priv *np = netdev_priv(dev);
2905 +       struct fe_priv *np = get_nvpriv(dev);
2906  
2907         if (dev->mtu <= ETH_DATA_LEN)
2908                 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2909 @@ -1500,7 +2855,7 @@
2910   */
2911  static int nv_change_mtu(struct net_device *dev, int new_mtu)
2912  {
2913 -       struct fe_priv *np = netdev_priv(dev);
2914 +       struct fe_priv *np = get_nvpriv(dev);
2915         int old_mtu;
2916  
2917         if (new_mtu < 64 || new_mtu > np->pkt_limit)
2918 @@ -1524,7 +2879,7 @@
2919                  * guessed, there is probably a simpler approach.
2920                  * Changing the MTU is a rare event, it shouldn't matter.
2921                  */
2922 -               disable_irq(dev->irq);
2923 +               nv_disable_irq(dev);
2924                 spin_lock_bh(&dev->xmit_lock);
2925                 spin_lock(&np->lock);
2926                 /* stop engines */
2927 @@ -1535,22 +2890,15 @@
2928                 nv_drain_rx(dev);
2929                 nv_drain_tx(dev);
2930                 /* reinit driver view of the rx queue */
2931 -               nv_init_rx(dev);
2932 -               nv_init_tx(dev);
2933 -               /* alloc new rx buffers */
2934                 set_bufsize(dev);
2935 -               if (nv_alloc_rx(dev)) {
2936 +               if (nv_init_ring(dev)) {
2937                         if (!np->in_shutdown)
2938                                 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2939                 }
2940                 /* reinit nic view of the rx queue */
2941                 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2942 -               writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
2943 -               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2944 -                       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2945 -               else
2946 -                       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
2947 -               writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
2948 +               setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2949 +               writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2950                         base + NvRegRingSizes);
2951                 pci_push(base);
2952                 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2953 @@ -1561,7 +2909,7 @@
2954                 nv_start_tx(dev);
2955                 spin_unlock(&np->lock);
2956                 spin_unlock_bh(&dev->xmit_lock);
2957 -               enable_irq(dev->irq);
2958 +               nv_enable_irq(dev);
2959         }
2960         return 0;
2961  }
2962 @@ -1585,12 +2933,13 @@
2963   */
2964  static int nv_set_mac_address(struct net_device *dev, void *addr)
2965  {
2966 -       struct fe_priv *np = netdev_priv(dev);
2967 +       struct fe_priv *np = get_nvpriv(dev);
2968         struct sockaddr *macaddr = (struct sockaddr*)addr;
2969  
2970         if(!is_valid_ether_addr(macaddr->sa_data))
2971                 return -EADDRNOTAVAIL;
2972  
2973 +       dprintk(KERN_DEBUG "%s: nv_set_mac_address \n", dev->name);
2974         /* synchronized against open : rtnl_lock() held by caller */
2975         memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2976  
2977 @@ -1620,20 +2969,20 @@
2978   */
2979  static void nv_set_multicast(struct net_device *dev)
2980  {
2981 -       struct fe_priv *np = netdev_priv(dev);
2982 +       struct fe_priv *np = get_nvpriv(dev);
2983         u8 __iomem *base = get_hwbase(dev);
2984         u32 addr[2];
2985         u32 mask[2];
2986 -       u32 pff;
2987 +       u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2988  
2989         memset(addr, 0, sizeof(addr));
2990         memset(mask, 0, sizeof(mask));
2991  
2992         if (dev->flags & IFF_PROMISC) {
2993 -               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
2994 -               pff = NVREG_PFF_PROMISC;
2995 +               dprintk(KERN_DEBUG "%s: Promiscuous mode enabled.\n", dev->name);
2996 +               pff |= NVREG_PFF_PROMISC;
2997         } else {
2998 -               pff = NVREG_PFF_MYADDR;
2999 +               pff |= NVREG_PFF_MYADDR;
3000  
3001                 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
3002                         u32 alwaysOff[2];
3003 @@ -1678,6 +3027,35 @@
3004         spin_unlock_irq(&np->lock);
3005  }
3006  
3007 +static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3008 +{
3009 +       struct fe_priv *np = get_nvpriv(dev);
3010 +       u8 __iomem *base = get_hwbase(dev);
3011 +
3012 +       np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3013 +
3014 +       if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3015 +               u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3016 +               if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3017 +                       writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3018 +                       np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3019 +               } else {
3020 +                       writel(pff, base + NvRegPacketFilterFlags);
3021 +               }
3022 +       }
3023 +       if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3024 +               u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3025 +               if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3026 +                       writel(NVREG_TX_PAUSEFRAME_ENABLE,  base + NvRegTxPauseFrame);
3027 +                       writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3028 +                       np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3029 +               } else {
3030 +                       writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3031 +                       writel(regmisc, base + NvRegMisc1);                     
3032 +               }
3033 +       }
3034 +}
3035 +
3036  /**
3037   * nv_update_linkspeed: Setup the MAC according to the link partner
3038   * @dev: Network device to be configured
3039 @@ -1691,14 +3069,16 @@
3040   */
3041  static int nv_update_linkspeed(struct net_device *dev)
3042  {
3043 -       struct fe_priv *np = netdev_priv(dev);
3044 +       struct fe_priv *np = get_nvpriv(dev);
3045         u8 __iomem *base = get_hwbase(dev);
3046 -       int adv, lpa;
3047 +       int adv = 0;
3048 +       int lpa = 0;
3049 +       int adv_lpa, adv_pause, lpa_pause;
3050         int newls = np->linkspeed;
3051         int newdup = np->duplex;
3052         int mii_status;
3053         int retval = 0;
3054 -       u32 control_1000, status_1000, phyreg;
3055 +       u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3056  
3057         /* BMSR_LSTATUS is latched, read it twice:
3058          * we want the current value.
3059 @@ -1715,7 +3095,7 @@
3060                 goto set_speed;
3061         }
3062  
3063 -       if (np->autoneg == 0) {
3064 +       if (np->autoneg == AUTONEG_DISABLE) {
3065                 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3066                                 dev->name, np->fixed_mode);
3067                 if (np->fixed_mode & LPA_100FULL) {
3068 @@ -1744,10 +3124,14 @@
3069                 goto set_speed;
3070         }
3071  
3072 +       adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3073 +       lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3074 +       dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3075 +                               dev->name, adv, lpa);
3076         retval = 1;
3077         if (np->gigabit == PHY_GIGABIT) {
3078 -               control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
3079 -               status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
3080 +               control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3081 +               status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3082  
3083                 if ((control_1000 & ADVERTISE_1000FULL) &&
3084                         (status_1000 & LPA_1000FULL)) {
3085 @@ -1759,27 +3143,22 @@
3086                 }
3087         }
3088  
3089 -       adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3090 -       lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3091 -       dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3092 -                               dev->name, adv, lpa);
3093 -
3094         /* FIXME: handle parallel detection properly */
3095 -       lpa = lpa & adv;
3096 -       if (lpa & LPA_100FULL) {
3097 +       adv_lpa = lpa & adv;
3098 +       if (adv_lpa & LPA_100FULL) {
3099                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3100                 newdup = 1;
3101 -       } else if (lpa & LPA_100HALF) {
3102 +       } else if (adv_lpa & LPA_100HALF) {
3103                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3104                 newdup = 0;
3105 -       } else if (lpa & LPA_10FULL) {
3106 +       } else if (adv_lpa & LPA_10FULL) {
3107                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3108                 newdup = 1;
3109 -       } else if (lpa & LPA_10HALF) {
3110 +       } else if (adv_lpa & LPA_10HALF) {
3111                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3112                 newdup = 0;
3113         } else {
3114 -               dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
3115 +               dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3116                 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3117                 newdup = 0;
3118         }
3119 @@ -1816,13 +3195,72 @@
3120                 phyreg |= PHY_1000;
3121         writel(phyreg, base + NvRegPhyInterface);
3122  
3123 +       if (phyreg & PHY_RGMII) {
3124 +               if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3125 +                       txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3126 +               else
3127 +                       txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3128 +       } else {
3129 +               txreg = NVREG_TX_DEFERRAL_DEFAULT;
3130 +       }
3131 +       writel(txreg, base + NvRegTxDeferral);
3132 +
3133 +       if (np->desc_ver == DESC_VER_1) {
3134 +               txreg = NVREG_TX_WM_DESC1_DEFAULT;
3135 +       } else {
3136 +               if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3137 +                       txreg = NVREG_TX_WM_DESC2_3_1000;
3138 +               else
3139 +                       txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3140 +       }
3141 +       writel(txreg, base + NvRegTxWatermark);
3142         writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
3143                 base + NvRegMisc1);
3144         pci_push(base);
3145         writel(np->linkspeed, base + NvRegLinkSpeed);
3146         pci_push(base);
3147  
3148 -       return retval;
3149 +       pause_flags = 0;
3150 +       /* setup pause frame */
3151 +       if (np->duplex != 0) {
3152 +               if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3153 +                       adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
3154 +                       lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
3155 +
3156 +                       switch (adv_pause) {
3157 +                       case (ADVERTISE_PAUSE_CAP):
3158 +                               if (lpa_pause & LPA_PAUSE_CAP) {
3159 +                                       pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3160 +                                       if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3161 +                                               pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3162 +                               }
3163 +                               break;
3164 +                       case (ADVERTISE_PAUSE_ASYM):
3165 +                               if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
3166 +                               {
3167 +                                       pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3168 +                               }
3169 +                               break;
3170 +                       case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
3171 +                               if (lpa_pause & LPA_PAUSE_CAP)
3172 +                               {
3173 +                                       pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3174 +                                       if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3175 +                                               pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3176 +                               }
3177 +                               if (lpa_pause == LPA_PAUSE_ASYM)
3178 +                               {
3179 +                                       pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3180 +                               } 
3181 +                               break;
3182 +                       }
3183 +               } else {
3184 +                       pause_flags = np->pause_flags;
3185 +               }
3186 +       }
3187 +       nv_update_pause(dev, pause_flags);
3188 +
3189 +       return retval;
3190  }
3191  
3192  static void nv_linkchange(struct net_device *dev)
3193 @@ -1859,7 +3297,7 @@
3194  static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
3195  {
3196         struct net_device *dev = (struct net_device *) data;
3197 -       struct fe_priv *np = netdev_priv(dev);
3198 +       struct fe_priv *np = get_nvpriv(dev);
3199         u8 __iomem *base = get_hwbase(dev);
3200         u32 events;
3201         int i;
3202 @@ -1867,16 +3305,19 @@
3203         dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3204  
3205         for (i=0; ; i++) {
3206 -               events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3207 -               writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3208 +               if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3209 +                       events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3210 +                       writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3211 +               } else {
3212 +                       events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3213 +                       writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3214 +               }
3215                 pci_push(base);
3216                 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3217                 if (!(events & np->irqmask))
3218                         break;
3219  
3220 -               spin_lock(&np->lock);
3221                 nv_tx_done(dev);
3222 -               spin_unlock(&np->lock);
3223                 
3224                 nv_rx_process(dev);
3225                 if (nv_alloc_rx(dev)) {
3226 @@ -1908,11 +3349,16 @@
3227                 if (i > max_interrupt_work) {
3228                         spin_lock(&np->lock);
3229                         /* disable interrupts on the nic */
3230 -                       writel(0, base + NvRegIrqMask);
3231 +                       if (!(np->msi_flags & NV_MSI_X_ENABLED))
3232 +                               writel(0, base + NvRegIrqMask);
3233 +                       else
3234 +                               writel(np->irqmask, base + NvRegIrqMask);
3235                         pci_push(base);
3236  
3237 -                       if (!np->in_shutdown)
3238 +                       if (!np->in_shutdown) {
3239 +                               np->nic_poll_irq = np->irqmask;
3240                                 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3241 +                       }
3242                         printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3243                         spin_unlock(&np->lock);
3244                         break;
3245 @@ -1924,285 +3370,1749 @@
3246         return IRQ_RETVAL(i);
3247  }
3248  
3249 -static void nv_do_nic_poll(unsigned long data)
3250 +#define TX_WORK_PER_LOOP  64
3251 +#define RX_WORK_PER_LOOP  64
3252 +static irqreturn_t nv_nic_irq_optimized(int foo, void *data, struct pt_regs *regs)
3253  {
3254         struct net_device *dev = (struct net_device *) data;
3255 -       struct fe_priv *np = netdev_priv(dev);
3256 +       struct fe_priv *np = get_nvpriv(dev);
3257         u8 __iomem *base = get_hwbase(dev);
3258 +       u32 events;
3259 +       int i = 1;
3260  
3261 -       disable_irq(dev->irq);
3262 -       /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3263 -       /*
3264 -        * reenable interrupts on the nic, we have to do this before calling
3265 -        * nv_nic_irq because that may decide to do otherwise
3266 -        */
3267 -       writel(np->irqmask, base + NvRegIrqMask);
3268 -       pci_push(base);
3269 -       nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
3270 -       enable_irq(dev->irq);
3271 -}
3272 +       do {
3273 +               if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3274 +                       events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3275 +                       writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3276 +               } else {
3277 +                       events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3278 +                       writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3279 +               }
3280 +               if (events & np->irqmask) {
3281  
3282 -#ifdef CONFIG_NET_POLL_CONTROLLER
3283 -static void nv_poll_controller(struct net_device *dev)
3284 -{
3285 -       nv_do_nic_poll((unsigned long) dev);
3286 -}
3287 -#endif
3288 +                       nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3289  
3290 -static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3291 -{
3292 -       struct fe_priv *np = netdev_priv(dev);
3293 -       strcpy(info->driver, "forcedeth");
3294 -       strcpy(info->version, FORCEDETH_VERSION);
3295 -       strcpy(info->bus_info, pci_name(np->pci_dev));
3296 +                       if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3297 +                               if (unlikely(nv_alloc_rx_optimized(dev))) {
3298 +                                       spin_lock(&np->lock);
3299 +                                       if (!np->in_shutdown)
3300 +                                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3301 +                                       spin_unlock(&np->lock);
3302 +                               }
3303 +                       }
3304 +                       if (unlikely(events & NVREG_IRQ_LINK)) {
3305 +                               spin_lock(&np->lock);
3306 +                               nv_link_irq(dev);
3307 +                               spin_unlock(&np->lock);
3308 +                       }
3309 +                       if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3310 +                               spin_lock(&np->lock);
3311 +                               nv_linkchange(dev);
3312 +                               spin_unlock(&np->lock);
3313 +                               np->link_timeout = jiffies + LINK_TIMEOUT;
3314 +                       }
3315 +                       if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3316 +                               spin_lock(&np->lock);
3317 +                               /* disable interrupts on the nic */
3318 +                               if (!(np->msi_flags & NV_MSI_X_ENABLED))
3319 +                                       writel(0, base + NvRegIrqMask);
3320 +                               else
3321 +                                       writel(np->irqmask, base + NvRegIrqMask);
3322 +                               pci_push(base);
3323 +
3324 +                               if (!np->in_shutdown) {
3325 +                                       np->nic_poll_irq = np->irqmask;
3326 +                                       np->recover_error = 1;
3327 +                                       mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3328 +                               }
3329 +                               spin_unlock(&np->lock);
3330 +                               break;
3331 +                       }
3332 +               } else
3333 +                       break;
3334 +       }
3335 +       while (i++ <= max_interrupt_work);
3336 +
3337 +       return IRQ_RETVAL(i);
3338  }
3339  
3340 -static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3341 +static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
3342  {
3343 -       struct fe_priv *np = netdev_priv(dev);
3344 -       wolinfo->supported = WAKE_MAGIC;
3345 +       struct net_device *dev = (struct net_device *) data;
3346 +       struct fe_priv *np = get_nvpriv(dev);
3347 +       u8 __iomem *base = get_hwbase(dev);
3348 +       u32 events;
3349 +       int i;
3350  
3351 -       spin_lock_irq(&np->lock);
3352 -       if (np->wolenabled)
3353 -               wolinfo->wolopts = WAKE_MAGIC;
3354 -       spin_unlock_irq(&np->lock);
3355 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3356 +
3357 +       for (i=0; ; i++) {
3358 +               events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3359 +               writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3360 +               dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3361 +               if (!(events & np->irqmask))
3362 +                       break;
3363 +
3364 +               nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3365 +               
3366 +               if (events & (NVREG_IRQ_TX_ERR)) {
3367 +                       dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3368 +                                               dev->name, events);
3369 +               }
3370 +               if (i > max_interrupt_work) {
3371 +                       spin_lock_irq(&np->lock);
3372 +                       /* disable interrupts on the nic */
3373 +                       writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3374 +                       pci_push(base);
3375 +
3376 +                       if (!np->in_shutdown) {
3377 +                               np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3378 +                               mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3379 +                       }
3380 +                       printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3381 +                       spin_unlock_irq(&np->lock);
3382 +                       break;
3383 +               }
3384 +
3385 +       }
3386 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3387 +
3388 +       return IRQ_RETVAL(i);
3389  }
3390  
3391 -static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3392 +static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
3393  {
3394 -       struct fe_priv *np = netdev_priv(dev);
3395 +       struct net_device *dev = (struct net_device *) data;
3396 +       struct fe_priv *np = get_nvpriv(dev);
3397         u8 __iomem *base = get_hwbase(dev);
3398 +       u32 events;
3399 +       int i;
3400 +
3401 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3402 +
3403 +       for (i=0; ; i++) {
3404 +               events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3405 +               writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3406 +               dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3407 +               if (!(events & np->irqmask))
3408 +                       break;
3409 +               
3410 +               if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3411 +                       if (unlikely(nv_alloc_rx_optimized(dev))) {
3412 +                       spin_lock_irq(&np->lock);
3413 +                       if (!np->in_shutdown)
3414 +                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3415 +                       spin_unlock_irq(&np->lock);
3416 +                       }
3417 +               }
3418 +               
3419 +               if (i > max_interrupt_work) {
3420 +                       spin_lock_irq(&np->lock);
3421 +                       /* disable interrupts on the nic */
3422 +                       writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3423 +                       pci_push(base);
3424 +
3425 +                       if (!np->in_shutdown) {
3426 +                               np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3427 +                               mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3428 +                       }
3429 +                       printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3430 +                       spin_unlock_irq(&np->lock);
3431 +                       break;
3432 +               }
3433  
3434 -       spin_lock_irq(&np->lock);
3435 -       if (wolinfo->wolopts == 0) {
3436 -               writel(0, base + NvRegWakeUpFlags);
3437 -               np->wolenabled = 0;
3438 -       }
3439 -       if (wolinfo->wolopts & WAKE_MAGIC) {
3440 -               writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
3441 -               np->wolenabled = 1;
3442         }
3443 -       spin_unlock_irq(&np->lock);
3444 -       return 0;
3445 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3446 +
3447 +       return IRQ_RETVAL(i);
3448  }
3449  
3450 -static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3451 +static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
3452  {
3453 -       struct fe_priv *np = netdev_priv(dev);
3454 -       int adv;
3455 +       struct net_device *dev = (struct net_device *) data;
3456 +       struct fe_priv *np = get_nvpriv(dev);
3457 +       u8 __iomem *base = get_hwbase(dev);
3458 +       u32 events;
3459 +       int i;
3460  
3461 -       spin_lock_irq(&np->lock);
3462 -       ecmd->port = PORT_MII;
3463 -       if (!netif_running(dev)) {
3464 -               /* We do not track link speed / duplex setting if the
3465 -                * interface is disabled. Force a link check */
3466 -               nv_update_linkspeed(dev);
3467 -       }
3468 -       switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3469 -               case NVREG_LINKSPEED_10:
3470 -                       ecmd->speed = SPEED_10;
3471 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3472 +
3473 +       for (i=0; ; i++) {
3474 +               events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3475 +               writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3476 +               dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3477 +               if (!(events & np->irqmask))
3478                         break;
3479 -               case NVREG_LINKSPEED_100:
3480 -                       ecmd->speed = SPEED_100;
3481 +               
3482 +               if (events & NVREG_IRQ_LINK) {
3483 +                       spin_lock_irq(&np->lock);
3484 +                       nv_link_irq(dev);
3485 +                       spin_unlock_irq(&np->lock);
3486 +               }
3487 +               if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3488 +                       spin_lock_irq(&np->lock);
3489 +                       nv_linkchange(dev);
3490 +                       spin_unlock_irq(&np->lock);
3491 +                       np->link_timeout = jiffies + LINK_TIMEOUT;
3492 +               }
3493 +               if (events & NVREG_IRQ_RECOVER_ERROR) {
3494 +                       spin_lock_irq(&np->lock);
3495 +                       /* disable interrupts on the nic */
3496 +                       writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3497 +                       pci_push(base);
3498 +                       
3499 +                       if (!np->in_shutdown) {
3500 +                               np->nic_poll_irq |= NVREG_IRQ_OTHER;
3501 +                               np->recover_error = 1;
3502 +                               mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3503 +                       }
3504 +                       spin_unlock_irq(&np->lock);
3505                         break;
3506 -               case NVREG_LINKSPEED_1000:
3507 -                       ecmd->speed = SPEED_1000;
3508 +               }
3509 +               if (events & (NVREG_IRQ_UNKNOWN)) {
3510 +                       printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3511 +                                               dev->name, events);
3512 +               }
3513 +               if (i > max_interrupt_work) {
3514 +                       spin_lock_irq(&np->lock);
3515 +                       /* disable interrupts on the nic */
3516 +                       writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3517 +                       pci_push(base);
3518 +
3519 +                       if (!np->in_shutdown) {
3520 +                               np->nic_poll_irq |= NVREG_IRQ_OTHER;
3521 +                               mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3522 +                       }
3523 +                       printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3524 +                       spin_unlock_irq(&np->lock);
3525                         break;
3526 +               }
3527 +
3528         }
3529 -       ecmd->duplex = DUPLEX_HALF;
3530 -       if (np->duplex)
3531 -               ecmd->duplex = DUPLEX_FULL;
3532 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3533  
3534 -       ecmd->autoneg = np->autoneg;
3535 +       return IRQ_RETVAL(i);
3536 +}
3537  
3538 -       ecmd->advertising = ADVERTISED_MII;
3539 -       if (np->autoneg) {
3540 -               ecmd->advertising |= ADVERTISED_Autoneg;
3541 -               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3542 -       } else {
3543 -               adv = np->fixed_mode;
3544 -       }
3545 -       if (adv & ADVERTISE_10HALF)
3546 -               ecmd->advertising |= ADVERTISED_10baseT_Half;
3547 -       if (adv & ADVERTISE_10FULL)
3548 -               ecmd->advertising |= ADVERTISED_10baseT_Full;
3549 -       if (adv & ADVERTISE_100HALF)
3550 -               ecmd->advertising |= ADVERTISED_100baseT_Half;
3551 -       if (adv & ADVERTISE_100FULL)
3552 -               ecmd->advertising |= ADVERTISED_100baseT_Full;
3553 -       if (np->autoneg && np->gigabit == PHY_GIGABIT) {
3554 -               adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
3555 -               if (adv & ADVERTISE_1000FULL)
3556 -                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
3557 -       }
3558 +static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
3559 +{
3560 +       struct net_device *dev = (struct net_device *) data;
3561 +       struct fe_priv *np = get_nvpriv(dev);
3562 +       u8 __iomem *base = get_hwbase(dev);
3563 +       u32 events;
3564  
3565 -       ecmd->supported = (SUPPORTED_Autoneg |
3566 -               SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
3567 -               SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
3568 -               SUPPORTED_MII);
3569 -       if (np->gigabit == PHY_GIGABIT)
3570 -               ecmd->supported |= SUPPORTED_1000baseT_Full;
3571 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3572  
3573 -       ecmd->phy_address = np->phyaddr;
3574 -       ecmd->transceiver = XCVR_EXTERNAL;
3575 +       if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3576 +               events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3577 +               writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3578 +       } else {
3579 +               events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3580 +               writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3581 +       }
3582 +       pci_push(base);
3583 +       dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3584 +       if (!(events & NVREG_IRQ_TIMER))
3585 +               return IRQ_RETVAL(0);
3586 +       
3587 +       spin_lock(&np->lock);
3588 +       np->intr_test = 1;
3589 +       spin_unlock(&np->lock);
3590 +               
3591 +       dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3592  
3593 -       /* ignore maxtxpkt, maxrxpkt for now */
3594 -       spin_unlock_irq(&np->lock);
3595 -       return 0;
3596 +       return IRQ_RETVAL(1);
3597  }
3598  
3599 -static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3600 +#ifdef CONFIG_PCI_MSI
3601 +static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3602  {
3603 -       struct fe_priv *np = netdev_priv(dev);
3604 +       u8 __iomem *base = get_hwbase(dev);
3605 +       int i;
3606 +       u32 msixmap = 0;
3607  
3608 -       if (ecmd->port != PORT_MII)
3609 -               return -EINVAL;
3610 -       if (ecmd->transceiver != XCVR_EXTERNAL)
3611 -               return -EINVAL;
3612 -       if (ecmd->phy_address != np->phyaddr) {
3613 -               /* TODO: support switching between multiple phys. Should be
3614 -                * trivial, but not enabled due to lack of test hardware. */
3615 -               return -EINVAL;
3616 +       /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3617 +        * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3618 +        * the remaining 8 interrupts.
3619 +        */
3620 +       for (i = 0; i < 8; i++) {
3621 +               if ((irqmask >> i) & 0x1) {
3622 +                       msixmap |= vector << (i << 2);
3623 +               }
3624         }
3625 -       if (ecmd->autoneg == AUTONEG_ENABLE) {
3626 -               u32 mask;
3627 +       writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3628  
3629 -               mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3630 -                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3631 -               if (np->gigabit == PHY_GIGABIT)
3632 -                       mask |= ADVERTISED_1000baseT_Full;
3633 +       msixmap = 0;
3634 +       for (i = 0; i < 8; i++) {
3635 +               if ((irqmask >> (i + 8)) & 0x1) {
3636 +                       msixmap |= vector << (i << 2);
3637 +               }
3638 +       }
3639 +       writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3640 +}
3641 +#endif
3642  
3643 -               if ((ecmd->advertising & mask) == 0)
3644 -                       return -EINVAL;
3645 +static int nv_request_irq(struct net_device *dev, int intr_test)
3646 +{
3647 +       struct fe_priv *np = get_nvpriv(dev);
3648 +       int ret = 1;
3649  
3650 -       } else if (ecmd->autoneg == AUTONEG_DISABLE) {
3651 -               /* Note: autonegotiation disable, speed 1000 intentionally
3652 -                * forbidden - noone should need that. */
3653 +#if NVVER > SLES9
3654 +       u8 __iomem *base = get_hwbase(dev);
3655 +       int i;
3656  
3657 -               if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
3658 -                       return -EINVAL;
3659 -               if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
3660 -                       return -EINVAL;
3661 -       } else {
3662 -               return -EINVAL;
3663 -       }
3664 +       if (np->msi_flags & NV_MSI_X_CAPABLE) {
3665 +               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3666 +                       np->msi_x_entry[i].entry = i;
3667 +               }
3668 +               if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3669 +                       np->msi_flags |= NV_MSI_X_ENABLED;
3670 +                       if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3671 +                               /* Request irq for rx handling */
3672 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
3673 +                                       printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3674 +                                       pci_disable_msix(np->pci_dev);
3675 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3676 +                                       goto out_err;
3677 +                               }
3678 +                               /* Request irq for tx handling */
3679 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
3680 +                                       printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3681 +                                       pci_disable_msix(np->pci_dev);
3682 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3683 +                                       goto out_free_rx;
3684 +                               }
3685 +                               /* Request irq for link and timer handling */
3686 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
3687 +                                       printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3688 +                                       pci_disable_msix(np->pci_dev);
3689 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3690 +                                       goto out_free_tx;
3691 +                               }
3692 +                               /* map interrupts to their respective vector */
3693 +                               writel(0, base + NvRegMSIXMap0);
3694 +                               writel(0, base + NvRegMSIXMap1);
3695 +#ifdef CONFIG_PCI_MSI
3696 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3697 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3698 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3699 +#endif
3700 +                       } else {
3701 +                               /* Request irq for all interrupts */
3702 +                               if ((!intr_test && np->desc_ver == DESC_VER_3 &&
3703 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3704 +                                   (!intr_test && np->desc_ver != DESC_VER_3 &&
3705 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
3706 +                                   (intr_test &&
3707 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
3708 +                                       printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3709 +                                       pci_disable_msix(np->pci_dev);
3710 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3711 +                                       goto out_err;
3712 +                               }
3713  
3714 -       spin_lock_irq(&np->lock);
3715 -       if (ecmd->autoneg == AUTONEG_ENABLE) {
3716 -               int adv, bmcr;
3717 +                               /* map interrupts to vector 0 */
3718 +                               writel(0, base + NvRegMSIXMap0);
3719 +                               writel(0, base + NvRegMSIXMap1);
3720 +                       }
3721 +               }
3722 +       }
3723 +       if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3724 +               if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3725 +                       np->msi_flags |= NV_MSI_ENABLED;
3726 +                       if ((!intr_test && np->desc_ver == DESC_VER_3 &&
3727 +                            request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3728 +                           (!intr_test && np->desc_ver != DESC_VER_3 &&
3729 +                            request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
3730 +                           (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
3731 +                               printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3732 +                               pci_disable_msi(np->pci_dev);
3733 +                               np->msi_flags &= ~NV_MSI_ENABLED;
3734 +                               goto out_err;
3735 +                       }
3736  
3737 -               np->autoneg = 1;
3738 +                       /* map interrupts to vector 0 */
3739 +                       writel(0, base + NvRegMSIMap0);
3740 +                       writel(0, base + NvRegMSIMap1);
3741 +                       /* enable msi vector 0 */
3742 +                       writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3743 +               }
3744 +       }
3745 +#else
3746 +#ifdef CONFIG_PCI_MSI
3747 +       u8 __iomem *base = get_hwbase(dev);
3748 +       int i;
3749 +
3750 +       if (np->msi_flags & NV_MSI_X_CAPABLE) {
3751 +               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3752 +                       np->msi_x_entry[i].entry = i;
3753 +               }
3754 +               if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3755 +                       np->msi_flags |= NV_MSI_X_ENABLED;
3756 +                       if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3757 +                               msi_alloc_vectors(np->pci_dev,(int *)np->msi_x_entry,2);
3758 +                               /* Request irq for rx handling */
3759 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
3760 +                                       printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3761 +                                       pci_disable_msi(np->pci_dev);
3762 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3763 +                                       goto out_err;
3764 +                               }
3765 +                               /* Request irq for tx handling */
3766 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
3767 +                                       printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3768 +                                       pci_disable_msi(np->pci_dev);
3769 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3770 +                                       goto out_free_rx;
3771 +                               }
3772 +                               /* Request irq for link and timer handling */
3773 +                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
3774 +                                       printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3775 +                                       pci_disable_msi(np->pci_dev);
3776 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3777 +                                       goto out_free_tx;
3778 +                               }
3779 +                               /* map interrupts to their respective vector */
3780 +                               writel(0, base + NvRegMSIXMap0);
3781 +                               writel(0, base + NvRegMSIXMap1);
3782 +#ifdef CONFIG_PCI_MSI
3783 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3784 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3785 +                               set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3786 +#endif
3787 +                       } else {
3788 +                               /* Request irq for all interrupts */
3789 +                               if ((!intr_test && np->desc_ver == DESC_VER_3 &&
3790 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3791 +                                   (!intr_test && np->desc_ver != DESC_VER_3 &&
3792 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
3793 +                                   (intr_test &&
3794 +                                    request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
3795 +                                       printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3796 +                                       pci_disable_msi(np->pci_dev);
3797 +                                       np->msi_flags &= ~NV_MSI_X_ENABLED;
3798 +                                       goto out_err;
3799 +                               }
3800 +
3801 +                               /* map interrupts to vector 0 */
3802 +                               writel(0, base + NvRegMSIXMap0);
3803 +                               writel(0, base + NvRegMSIXMap1);
3804 +                       }
3805 +               }
3806 +       }
3807 +       if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3808 +
3809 +               if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3810 +                       np->msi_flags |= NV_MSI_ENABLED;
3811 +                       if ((!intr_test && np->desc_ver == DESC_VER_3 &&
3812 +                            request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3813 +                           (!intr_test && np->desc_ver != DESC_VER_3 &&
3814 +                            request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
3815 +                           (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
3816 +                               printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3817 +                               pci_disable_msi(np->pci_dev);
3818 +                               np->msi_flags &= ~NV_MSI_ENABLED;
3819 +                               goto out_err;
3820 +                       }
3821 +
3822 +                       /* map interrupts to vector 0 */
3823 +                       writel(0, base + NvRegMSIMap0);
3824 +                       writel(0, base + NvRegMSIMap1);
3825 +                       /* enable msi vector 0 */
3826 +                       writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3827 +               }
3828 +       }
3829 +#endif
3830 +#endif
3831 +       if (ret != 0) {
3832 +               if ((!intr_test  && np->desc_ver == DESC_VER_3 && 
3833 +                    request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) ||
3834 +                   (!intr_test  && np->desc_ver != DESC_VER_3 &&
3835 +                    request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || 
3836 +                   (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0))
3837 +                       goto out_err;
3838 +                       
3839 +       }
3840 +
3841 +       return 0;
3842 +
3843 +#if NVVER > SLES9
3844 +out_free_tx:
3845 +       free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3846 +out_free_rx:
3847 +       free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3848 +#else
3849 +#ifdef CONFIG_PCI_MSI  
3850 +out_free_tx:
3851 +       free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3852 +out_free_rx:
3853 +       free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3854 +#endif
3855 +#endif
3856 +out_err:
3857 +       return 1;
3858 +}
3859 +
3860 +#if NVVER > SLES9
3861 +static void nv_free_irq(struct net_device *dev)
3862 +{
3863 +       struct fe_priv *np = get_nvpriv(dev);
3864 +       int i;
3865 +       
3866 +       if (np->msi_flags & NV_MSI_X_ENABLED) {
3867 +               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3868 +                       free_irq(np->msi_x_entry[i].vector, dev);
3869 +               }
3870 +               pci_disable_msix(np->pci_dev);
3871 +               np->msi_flags &= ~NV_MSI_X_ENABLED;
3872 +       } else {
3873 +               free_irq(np->pci_dev->irq, dev);
3874 +               if (np->msi_flags & NV_MSI_ENABLED) {
3875 +                       pci_disable_msi(np->pci_dev);
3876 +                       np->msi_flags &= ~NV_MSI_ENABLED;
3877 +               }
3878 +       }
3879 +}
3880 +#else
3881 +static void nv_free_irq(struct net_device *dev)
3882 +{
3883 +       struct fe_priv *np = get_nvpriv(dev);
3884 +       
3885 +#ifdef CONFIG_PCI_MSI          
3886 +       int i;
3887 +
3888 +       if (np->msi_flags & NV_MSI_X_ENABLED) {
3889 +               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3890 +                       free_irq(np->msi_x_entry[i].vector, dev);
3891 +               }
3892 +               pci_disable_msi(np->pci_dev);
3893 +               np->msi_flags &= ~NV_MSI_X_ENABLED;
3894 +       } else {
3895 +               free_irq(np->pci_dev->irq, dev);
3896 +
3897 +               if (np->msi_flags & NV_MSI_ENABLED) {
3898 +                       pci_disable_msi(np->pci_dev);
3899 +                       np->msi_flags &= ~NV_MSI_ENABLED;
3900 +               }
3901 +       }
3902 +#else
3903 +       free_irq(np->pci_dev->irq, dev);
3904 +#endif
3905 +       
3906 +}
3907 +#endif 
3908 +
3909 +static void nv_do_nic_poll(unsigned long data)
3910 +{
3911 +       struct net_device *dev = (struct net_device *) data;
3912 +       struct fe_priv *np = get_nvpriv(dev);
3913 +       u8 __iomem *base = get_hwbase(dev);
3914 +       u32 mask = 0;
3915 +
3916 +       /*
3917 +        * First disable irq(s) and then
3918 +        * reenable interrupts on the nic, we have to do this before calling
3919 +        * nv_nic_irq because that may decide to do otherwise
3920 +        */
3921 +
3922 +       if (!using_multi_irqs(dev)) {
3923 +               if (np->msi_flags & NV_MSI_X_ENABLED)
3924 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3925 +               else
3926 +                       disable_irq(dev->irq);
3927 +               mask = np->irqmask;
3928 +       } else {
3929 +               if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3930 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3931 +                       mask |= NVREG_IRQ_RX_ALL;
3932 +               }
3933 +               if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3934 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3935 +                       mask |= NVREG_IRQ_TX_ALL;
3936 +               }
3937 +               if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3938 +                       disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3939 +                       mask |= NVREG_IRQ_OTHER;
3940 +               }
3941 +       }
3942 +       np->nic_poll_irq = 0;
3943 +
3944 +       if (np->recover_error) {
3945 +               np->recover_error = 0;
3946 +               printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
3947 +               if (netif_running(dev)) {
3948 +                       spin_lock_bh(&dev->xmit_lock);
3949 +                       spin_lock(&np->lock);
3950 +                       /* stop engines */
3951 +                       nv_stop_rx(dev);
3952 +                       nv_stop_tx(dev);
3953 +                       nv_txrx_reset(dev);
3954 +                       /* drain rx queue */
3955 +                       nv_drain_rx(dev);
3956 +                       nv_drain_tx(dev);
3957 +                       /* reinit driver view of the rx queue */
3958 +                       set_bufsize(dev);
3959 +                       if (nv_init_ring(dev)) {
3960 +                               if (!np->in_shutdown)
3961 +                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3962 +                       }
3963 +                       /* reinit nic view of the rx queue */
3964 +                       writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3965 +                       setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3966 +                       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3967 +                               base + NvRegRingSizes);
3968 +                       pci_push(base);
3969 +                       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3970 +                       pci_push(base);
3971 +
3972 +                       /* restart rx engine */
3973 +                       nv_start_rx(dev);
3974 +                       nv_start_tx(dev);
3975 +                       spin_unlock(&np->lock);
3976 +                       spin_unlock_bh(&dev->xmit_lock);
3977 +               }
3978 +       }
3979 +       /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3980 +       
3981 +       writel(mask, base + NvRegIrqMask);
3982 +       pci_push(base);
3983 +
3984 +       if (!using_multi_irqs(dev)) {
3985 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
3986 +               nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
3987 +               else
3988 +                       nv_nic_irq_optimized((int) 0, (void *) data, (struct pt_regs *) NULL);
3989 +               if (np->msi_flags & NV_MSI_X_ENABLED)
3990 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3991 +               else
3992 +                       enable_irq(dev->irq);
3993 +       } else {
3994 +               if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3995 +                       nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
3996 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3997 +               }
3998 +               if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3999 +                       nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
4000 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4001 +               }
4002 +               if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4003 +                       nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL);
4004 +                       enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4005 +               }
4006 +       }
4007 +}
4008 +
4009 +#if NVVER > RHES3
4010 +#ifdef CONFIG_NET_POLL_CONTROLLER
4011 +static void nv_poll_controller(struct net_device *dev)
4012 +{
4013 +       nv_do_nic_poll((unsigned long) dev);
4014 +}
4015 +#endif
4016 +#else
4017 +static void nv_poll_controller(struct net_device *dev)
4018 +{
4019 +       nv_do_nic_poll((unsigned long) dev);
4020 +}
4021 +#endif
4022 +
4023 +static void nv_do_stats_poll(unsigned long data)
4024 +{
4025 +       struct net_device *dev = (struct net_device *) data;
4026 +       struct fe_priv *np = get_nvpriv(dev);
4027 +       u8 __iomem *base = get_hwbase(dev);
4028 +
4029 +       spin_lock_irq(&np->lock);
4030 +       
4031 +       np->estats.tx_dropped = np->stats.tx_dropped;
4032 +       if (np->driver_data & DEV_HAS_STATISTICS) {
4033 +               np->estats.tx_packets += readl(base + NvRegTxFrame);
4034 +               np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
4035 +               np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
4036 +               np->estats.tx_bytes += readl(base + NvRegTxCnt);
4037 +               np->estats.rx_bytes += readl(base + NvRegRxCnt);
4038 +               np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
4039 +               np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
4040 +
4041 +               np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
4042 +               np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
4043 +               np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
4044 +               np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
4045 +               np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
4046 +               np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
4047 +               np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
4048 +               np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
4049 +               np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
4050 +               np->estats.rx_runt += readl(base + NvRegRxRunt);
4051 +               np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
4052 +               np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
4053 +               np->estats.rx_length_error += readl(base + NvRegRxLenErr);
4054 +               np->estats.rx_unicast += readl(base + NvRegRxUnicast);
4055 +               np->estats.rx_multicast += readl(base + NvRegRxMulticast);
4056 +               np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
4057 +               np->estats.tx_deferral += readl(base + NvRegTxDef);
4058 +               np->estats.tx_pause += readl(base + NvRegTxPause);
4059 +               np->estats.rx_pause += readl(base + NvRegRxPause);
4060 +               np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
4061 +               np->estats.rx_packets = 
4062 +                       np->estats.rx_unicast + 
4063 +                       np->estats.rx_multicast + 
4064 +                       np->estats.rx_broadcast;
4065 +               np->estats.rx_errors_total = 
4066 +                       np->estats.rx_crc_errors +
4067 +                       np->estats.rx_over_errors +
4068 +                       np->estats.rx_frame_error +
4069 +                       (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
4070 +                       np->estats.rx_late_collision +
4071 +                       np->estats.rx_runt +
4072 +                       np->estats.rx_frame_too_long +
4073 +                       np->rx_len_errors;
4074 +
4075 +               /* copy to net_device stats */
4076 +               np->stats.tx_packets = np->estats.tx_packets;
4077 +               np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
4078 +               np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
4079 +               np->stats.tx_bytes = np->estats.tx_bytes;
4080 +               np->stats.rx_bytes = np->estats.rx_bytes;
4081 +               np->stats.rx_crc_errors = np->estats.rx_crc_errors;
4082 +               np->stats.rx_over_errors = np->estats.rx_over_errors;
4083 +               np->stats.rx_packets = np->estats.rx_packets;
4084 +               np->stats.rx_errors = np->estats.rx_errors_total;
4085 +               
4086 +       } else {
4087 +               np->estats.tx_packets = np->stats.tx_packets;
4088 +               np->estats.tx_fifo_errors = np->stats.tx_fifo_errors;
4089 +               np->estats.tx_carrier_errors = np->stats.tx_carrier_errors;
4090 +               np->estats.tx_bytes = np->stats.tx_bytes;
4091 +               np->estats.rx_bytes = np->stats.rx_bytes;
4092 +               np->estats.rx_crc_errors = np->stats.rx_crc_errors;
4093 +               np->estats.rx_over_errors = np->stats.rx_over_errors;
4094 +               np->estats.rx_packets = np->stats.rx_packets;
4095 +               np->estats.rx_errors_total = np->stats.rx_errors;
4096 +       }
4097 +
4098 +       if (!np->in_shutdown && netif_running(dev))
4099 +               mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4100 +       spin_unlock_irq(&np->lock);
4101 +}
4102 +
4103 +/*
4104 + * nv_get_stats: dev->get_stats function
4105 + * Get latest stats value from the nic.
4106 + * Called with read_lock(&dev_base_lock) held for read -
4107 + * only synchronized against unregister_netdevice.
4108 + */
4109 +static struct net_device_stats *nv_get_stats(struct net_device *dev)
4110 +{
4111 +       struct fe_priv *np = get_nvpriv(dev);
4112 +
4113 +       /* It seems that the nic always generates interrupts and doesn't
4114 +        * accumulate errors internally. Thus the current values in np->stats
4115 +        * are already up to date.
4116 +        */
4117 +       nv_do_stats_poll((unsigned long)dev);
4118 +       return &np->stats;
4119 +}
4120 +
4121 +static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4122 +{
4123 +       struct fe_priv *np = get_nvpriv(dev);
4124 +       strcpy(info->driver, "forcedeth");
4125 +       strcpy(info->version, FORCEDETH_VERSION);
4126 +       strcpy(info->bus_info, pci_name(np->pci_dev));
4127 +}
4128 +
4129 +static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4130 +{
4131 +       struct fe_priv *np = get_nvpriv(dev);
4132 +       wolinfo->supported = WAKE_MAGIC;
4133 +
4134 +       spin_lock_irq(&np->lock);
4135 +       if (np->wolenabled)
4136 +               wolinfo->wolopts = WAKE_MAGIC;
4137 +       spin_unlock_irq(&np->lock);
4138 +}
4139 +
4140 +static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4141 +{
4142 +       struct fe_priv *np = get_nvpriv(dev);
4143 +       u8 __iomem *base = get_hwbase(dev);
4144 +       u32 flags = 0;
4145 +
4146 +       if (wolinfo->wolopts == 0) {
4147 +               np->wolenabled = 0;
4148 +       } else if (wolinfo->wolopts & WAKE_MAGIC) {
4149 +               np->wolenabled = 1;
4150 +               flags = NVREG_WAKEUPFLAGS_ENABLE;
4151 +       }
4152 +       if (netif_running(dev)) {
4153 +               spin_lock_irq(&np->lock);
4154 +               writel(flags, base + NvRegWakeUpFlags);
4155 +               spin_unlock_irq(&np->lock);
4156 +       }
4157 +       return 0;
4158 +}
4159 +
4160 +static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4161 +{
4162 +       struct fe_priv *np = get_nvpriv(dev);
4163 +       int adv;
4164 +
4165 +       spin_lock_irq(&np->lock);
4166 +       ecmd->port = PORT_MII;
4167 +       if (!netif_running(dev)) {
4168 +               /* We do not track link speed / duplex setting if the
4169 +                * interface is disabled. Force a link check */
4170 +               if (nv_update_linkspeed(dev)) {
4171 +                       if (!netif_carrier_ok(dev))
4172 +                               netif_carrier_on(dev);
4173 +               } else {
4174 +                       if (netif_carrier_ok(dev))
4175 +                               netif_carrier_off(dev);
4176 +               }
4177 +       }
4178 +
4179 +       if (netif_carrier_ok(dev)) {
4180 +               switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4181 +               case NVREG_LINKSPEED_10:
4182 +                       ecmd->speed = SPEED_10;
4183 +                       break;
4184 +               case NVREG_LINKSPEED_100:
4185 +                       ecmd->speed = SPEED_100;
4186 +                       break;
4187 +               case NVREG_LINKSPEED_1000:
4188 +                       ecmd->speed = SPEED_1000;
4189 +                       break;
4190 +               }
4191 +               ecmd->duplex = DUPLEX_HALF;
4192 +               if (np->duplex)
4193 +                       ecmd->duplex = DUPLEX_FULL;
4194 +       } else {
4195 +               ecmd->speed = -1;
4196 +               ecmd->duplex = -1;
4197 +       }
4198 +
4199 +       ecmd->autoneg = np->autoneg;
4200 +
4201 +       ecmd->advertising = ADVERTISED_MII;
4202 +       if (np->autoneg) {
4203 +               ecmd->advertising |= ADVERTISED_Autoneg;
4204 +               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4205 +               if (adv & ADVERTISE_10HALF)
4206 +                       ecmd->advertising |= ADVERTISED_10baseT_Half;
4207 +               if (adv & ADVERTISE_10FULL)
4208 +                       ecmd->advertising |= ADVERTISED_10baseT_Full;
4209 +               if (adv & ADVERTISE_100HALF)
4210 +                       ecmd->advertising |= ADVERTISED_100baseT_Half;
4211 +               if (adv & ADVERTISE_100FULL)
4212 +                       ecmd->advertising |= ADVERTISED_100baseT_Full;
4213 +               if (np->gigabit == PHY_GIGABIT) {
4214 +                       adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4215 +                       if (adv & ADVERTISE_1000FULL)
4216 +                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
4217 +               }
4218 +       }
4219 +       ecmd->supported = (SUPPORTED_Autoneg |
4220 +               SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4221 +               SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4222 +               SUPPORTED_MII);
4223 +       if (np->gigabit == PHY_GIGABIT)
4224 +               ecmd->supported |= SUPPORTED_1000baseT_Full;
4225 +
4226 +       ecmd->phy_address = np->phyaddr;
4227 +       ecmd->transceiver = XCVR_EXTERNAL;
4228 +
4229 +       /* ignore maxtxpkt, maxrxpkt for now */
4230 +       spin_unlock_irq(&np->lock);
4231 +       return 0;
4232 +}
4233 +
4234 +static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4235 +{
4236 +       struct fe_priv *np = get_nvpriv(dev);
4237 +
4238 +       dprintk(KERN_DEBUG "%s: nv_set_settings \n", dev->name);
4239 +       if (ecmd->port != PORT_MII)
4240 +               return -EINVAL;
4241 +       if (ecmd->transceiver != XCVR_EXTERNAL)
4242 +               return -EINVAL;
4243 +       if (ecmd->phy_address != np->phyaddr) {
4244 +               /* TODO: support switching between multiple phys. Should be
4245 +                * trivial, but not enabled due to lack of test hardware. */
4246 +               return -EINVAL;
4247 +       }
4248 +       if (ecmd->autoneg == AUTONEG_ENABLE) {
4249 +               u32 mask;
4250 +
4251 +               mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4252 +                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4253 +               if (np->gigabit == PHY_GIGABIT)
4254 +                       mask |= ADVERTISED_1000baseT_Full;
4255 +
4256 +               if ((ecmd->advertising & mask) == 0)
4257 +                       return -EINVAL;
4258 +
4259 +       } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4260 +               /* Note: autonegotiation disable, speed 1000 intentionally
4261 +                * forbidden - noone should need that. */
4262 +
4263 +               if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
4264 +                       return -EINVAL;
4265 +               if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4266 +                       return -EINVAL;
4267 +       } else {
4268 +               return -EINVAL;
4269 +       }
4270 +
4271 +       netif_carrier_off(dev);
4272 +       if (netif_running(dev)) {
4273 +               nv_disable_irq(dev);
4274 +               spin_lock_bh(&dev->xmit_lock);
4275 +               spin_lock(&np->lock);
4276 +               /* stop engines */
4277 +               nv_stop_rx(dev);
4278 +               nv_stop_tx(dev);
4279 +               spin_unlock(&np->lock);
4280 +               spin_unlock_bh(&dev->xmit_lock);
4281 +       }
4282 +
4283 +       if (ecmd->autoneg == AUTONEG_ENABLE) {
4284 +               int adv, bmcr;
4285 +
4286 +               np->autoneg = 1;
4287 +
4288 +               /* advertise only what has been requested */
4289 +               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4290 +               adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4291 +               if (ecmd->advertising & ADVERTISED_10baseT_Half)
4292 +                       adv |= ADVERTISE_10HALF;
4293 +               if (ecmd->advertising & ADVERTISED_10baseT_Full)
4294 +                       adv |= ADVERTISE_10FULL;
4295 +               if (ecmd->advertising & ADVERTISED_100baseT_Half)
4296 +                       adv |= ADVERTISE_100HALF;
4297 +               if (ecmd->advertising & ADVERTISED_100baseT_Full)
4298 +                       adv |= ADVERTISE_100FULL;
4299 +               if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisments but disable tx pause */
4300 +                       adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4301 +               if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4302 +                       adv |=  ADVERTISE_PAUSE_ASYM;
4303 +               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4304 +
4305 +               if (np->gigabit == PHY_GIGABIT) {
4306 +                       adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4307 +                       adv &= ~ADVERTISE_1000FULL;
4308 +                       if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4309 +                               adv |= ADVERTISE_1000FULL;
4310 +                       mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4311 +               }
4312 +
4313 +               if (netif_running(dev))
4314 +                       printk(KERN_INFO "%s: link down.\n", dev->name);
4315 +               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4316 +               if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4317 +                       bmcr |= BMCR_ANENABLE;
4318 +                       /* reset the phy in order for settings to stick,
4319 +                        * and cause autoneg to start */
4320 +                       if (phy_reset(dev, bmcr)) {
4321 +                               printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4322 +                               return -EINVAL;
4323 +                       }
4324 +               } else {
4325 +               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4326 +               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4327 +               }
4328 +       } else {
4329 +               int adv, bmcr;
4330 +
4331 +               np->autoneg = 0;
4332 +
4333 +               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4334 +               adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4335 +               if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4336 +                       adv |= ADVERTISE_10HALF;
4337 +               if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4338 +                       adv |= ADVERTISE_10FULL;
4339 +               if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4340 +                       adv |= ADVERTISE_100HALF;
4341 +               if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4342 +                       adv |= ADVERTISE_100FULL;
4343 +               np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4344 +               if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
4345 +                       adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4346 +                       np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4347 +               }
4348 +               if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4349 +                       adv |=  ADVERTISE_PAUSE_ASYM;
4350 +                       np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4351 +               }
4352 +               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4353 +               np->fixed_mode = adv;
4354 +
4355 +               if (np->gigabit == PHY_GIGABIT) {
4356 +                       adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4357 +                       adv &= ~ADVERTISE_1000FULL;
4358 +                       mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4359 +               }
4360 +
4361 +               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4362 +               bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4363 +               if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4364 +                       bmcr |= BMCR_FULLDPLX;
4365 +               if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4366 +                       bmcr |= BMCR_SPEED100;
4367 +               if (np->phy_oui == PHY_OUI_MARVELL) {
4368 +                       /* reset the phy in order for forced mode settings to stick */
4369 +                       if (phy_reset(dev, bmcr)) {
4370 +                               printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4371 +                               return -EINVAL;
4372 +                       }
4373 +               } else {
4374 +                       mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4375 +                       if (netif_running(dev)) {
4376 +                               /* Wait a bit and then reconfigure the nic. */
4377 +                               udelay(10);
4378 +                               nv_linkchange(dev);
4379 +                       }
4380 +               }
4381 +       }
4382 +
4383 +       if (netif_running(dev)) {
4384 +               nv_start_rx(dev);
4385 +               nv_start_tx(dev);
4386 +               nv_enable_irq(dev);
4387 +       }
4388 +
4389 +       return 0;
4390 +}
4391 +
4392 +#define FORCEDETH_REGS_VER     1
4393 +
4394 +static int nv_get_regs_len(struct net_device *dev)
4395 +{
4396 +       struct fe_priv *np = get_nvpriv(dev);
4397 +       return np->register_size;
4398 +}
4399 +
4400 +static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4401 +{
4402 +       struct fe_priv *np = get_nvpriv(dev);
4403 +       u8 __iomem *base = get_hwbase(dev);
4404 +       u32 *rbuf = buf;
4405 +       int i;
4406 +
4407 +       regs->version = FORCEDETH_REGS_VER;
4408 +       spin_lock_irq(&np->lock);
4409 +       for (i = 0;i <= np->register_size/sizeof(u32); i++)
4410 +               rbuf[i] = readl(base + i*sizeof(u32));
4411 +       spin_unlock_irq(&np->lock);
4412 +}
4413 +
4414 +static int nv_nway_reset(struct net_device *dev)
4415 +{
4416 +       struct fe_priv *np = get_nvpriv(dev);
4417 +       int ret;
4418 +
4419 +       if (np->autoneg) {
4420 +               int bmcr;
4421 +
4422 +               netif_carrier_off(dev);
4423 +               if (netif_running(dev)) {
4424 +                       nv_disable_irq(dev);
4425 +                       spin_lock_bh(&dev->xmit_lock);
4426 +                       spin_lock(&np->lock);
4427 +                       /* stop engines */
4428 +                       nv_stop_rx(dev);
4429 +                       nv_stop_tx(dev);
4430 +                       spin_unlock(&np->lock);
4431 +                       spin_unlock_bh(&dev->xmit_lock);
4432 +                       printk(KERN_INFO "%s: link down.\n", dev->name);
4433 +               }
4434 +
4435 +               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4436 +               if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4437 +                       bmcr |= BMCR_ANENABLE;
4438 +                       /* reset the phy in order for settings to stick*/
4439 +                       if (phy_reset(dev, bmcr)) {
4440 +                               printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4441 +                               return -EINVAL;
4442 +                       }
4443 +               } else {
4444 +                       bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4445 +                       mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4446 +               }
4447 +
4448 +               if (netif_running(dev)) {
4449 +                       nv_start_rx(dev);
4450 +                       nv_start_tx(dev);
4451 +                       nv_enable_irq(dev);
4452 +               }
4453 +               ret = 0;
4454 +       } else {
4455 +               ret = -EINVAL;
4456 +       }
4457 +
4458 +       return ret;
4459 +}
4460 +
4461 +static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4462 +{
4463 +       struct fe_priv *np = get_nvpriv(dev);
4464 +
4465 +       ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4466 +       ring->rx_mini_max_pending = 0;
4467 +       ring->rx_jumbo_max_pending = 0;
4468 +       ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4469 +
4470 +       ring->rx_pending = np->rx_ring_size;
4471 +       ring->rx_mini_pending = 0;
4472 +       ring->rx_jumbo_pending = 0;
4473 +       ring->tx_pending = np->tx_ring_size;
4474 +}
4475 +
4476 +static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4477 +{
4478 +       struct fe_priv *np = get_nvpriv(dev);
4479 +       u8 __iomem *base = get_hwbase(dev);
4480 +       u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4481 +       dma_addr_t ring_addr;
4482  
4483 -               /* advertise only what has been requested */
4484 -               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4485 -               adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
4486 -               if (ecmd->advertising & ADVERTISED_10baseT_Half)
4487 -                       adv |= ADVERTISE_10HALF;
4488 -               if (ecmd->advertising & ADVERTISED_10baseT_Full)
4489 -                       adv |= ADVERTISE_10FULL;
4490 -               if (ecmd->advertising & ADVERTISED_100baseT_Half)
4491 -                       adv |= ADVERTISE_100HALF;
4492 -               if (ecmd->advertising & ADVERTISED_100baseT_Full)
4493 -                       adv |= ADVERTISE_100FULL;
4494 -               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4495 +       if (ring->rx_pending < RX_RING_MIN ||
4496 +           ring->tx_pending < TX_RING_MIN ||
4497 +           ring->rx_mini_pending != 0 ||
4498 +           ring->rx_jumbo_pending != 0 ||
4499 +           (np->desc_ver == DESC_VER_1 && 
4500 +            (ring->rx_pending > RING_MAX_DESC_VER_1 || 
4501 +             ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4502 +           (np->desc_ver != DESC_VER_1 && 
4503 +            (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 
4504 +             ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4505 +               return -EINVAL;
4506 +       }
4507  
4508 -               if (np->gigabit == PHY_GIGABIT) {
4509 -                       adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
4510 -                       adv &= ~ADVERTISE_1000FULL;
4511 -                       if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4512 -                               adv |= ADVERTISE_1000FULL;
4513 -                       mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
4514 +       /* allocate new rings */
4515 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4516 +               rxtx_ring = pci_alloc_consistent(np->pci_dev,
4517 +                                           sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4518 +                                           &ring_addr);
4519 +       } else {
4520 +               rxtx_ring = pci_alloc_consistent(np->pci_dev,
4521 +                                           sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4522 +                                           &ring_addr);
4523 +       }
4524 +       rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4525 +       tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4526 +
4527 +       if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4528 +               /* fall back to old rings */
4529 +               if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4530 +                       if(rxtx_ring)
4531 +                               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4532 +                                                   rxtx_ring, ring_addr);
4533 +               } else {
4534 +                       if (rxtx_ring)
4535 +                               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4536 +                                                   rxtx_ring, ring_addr);
4537 +               }
4538 +               if (rx_skbuff)
4539 +                       kfree(rx_skbuff);
4540 +               if (tx_skbuff)
4541 +                       kfree(tx_skbuff);
4542 +               goto exit;
4543 +       }
4544 +
4545 +       if (netif_running(dev)) {
4546 +               nv_disable_irq(dev);
4547 +               spin_lock_bh(&dev->xmit_lock);
4548 +               spin_lock(&np->lock);
4549 +               /* stop engines */
4550 +               nv_stop_rx(dev);
4551 +               nv_stop_tx(dev);
4552 +               nv_txrx_reset(dev);
4553 +               /* drain queues */
4554 +               nv_drain_rx(dev);
4555 +               nv_drain_tx(dev);
4556 +               /* delete queues */
4557 +               free_rings(dev);
4558 +       }
4559 +       
4560 +       /* set new values */
4561 +       np->rx_ring_size = ring->rx_pending;
4562 +       np->tx_ring_size = ring->tx_pending;
4563 +       np->tx_limit_stop =np->tx_ring_size - TX_LIMIT_DIFFERENCE;
4564 +       np->tx_limit_start =np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
4565 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4566 +               np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4567 +               np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4568 +       } else {
4569 +               np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4570 +               np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4571 +       }
4572 +       np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4573 +       np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4574 +       np->ring_addr = ring_addr;
4575 +       
4576 +       memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4577 +       memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4578 +
4579 +       if (netif_running(dev)) {
4580 +               /* reinit driver view of the queues */
4581 +               set_bufsize(dev);
4582 +               if (nv_init_ring(dev)) {
4583 +                       if (!np->in_shutdown)
4584 +                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4585                 }
4586 +               
4587 +               /* reinit nic view of the queues */
4588 +               writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4589 +               setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4590 +               writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4591 +                       base + NvRegRingSizes);
4592 +               pci_push(base);
4593 +               writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4594 +               pci_push(base);
4595 +               
4596 +               /* restart engines */
4597 +               nv_start_rx(dev);
4598 +               nv_start_tx(dev);
4599 +               spin_unlock(&np->lock);
4600 +               spin_unlock_bh(&dev->xmit_lock);
4601 +               nv_enable_irq(dev);
4602 +       }
4603 +       return 0;
4604 +exit:
4605 +       return -ENOMEM;
4606 +}
4607  
4608 +static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4609 +{
4610 +       struct fe_priv *np = get_nvpriv(dev);
4611 +
4612 +       pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4613 +       pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4614 +       pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4615 +}
4616 +
4617 +static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4618 +{
4619 +       struct fe_priv *np = get_nvpriv(dev);
4620 +       int adv, bmcr;
4621 +
4622 +       if ((!np->autoneg && np->duplex == 0) ||
4623 +           (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4624 +               printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 
4625 +                      dev->name);
4626 +               return -EINVAL;
4627 +       }
4628 +       if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4629 +               printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4630 +               return -EINVAL;
4631 +       }
4632 +
4633 +       netif_carrier_off(dev);
4634 +       if (netif_running(dev)) {
4635 +               nv_disable_irq(dev);
4636 +               spin_lock_bh(&dev->xmit_lock);
4637 +               spin_lock(&np->lock);
4638 +               /* stop engines */
4639 +               nv_stop_rx(dev);
4640 +               nv_stop_tx(dev);
4641 +               spin_unlock(&np->lock);
4642 +               spin_unlock_bh(&dev->xmit_lock);
4643 +       }
4644 +
4645 +       np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4646 +       if (pause->rx_pause)
4647 +               np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4648 +       if (pause->tx_pause)
4649 +               np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4650 +
4651 +       if (np->autoneg && pause->autoneg) {
4652 +               np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4653 +
4654 +               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4655 +               adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4656 +               if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4657 +                       adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4658 +               if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4659 +                       adv |=  ADVERTISE_PAUSE_ASYM;
4660 +               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4661 +
4662 +               if (netif_running(dev))
4663 +                       printk(KERN_INFO "%s: link down.\n", dev->name);
4664                 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4665                 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4666                 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4667 +       } else {
4668 +               np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4669 +               if (pause->rx_pause)
4670 +                       np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4671 +               if (pause->tx_pause)
4672 +                       np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4673 +
4674 +               if (!netif_running(dev))
4675 +                       nv_update_linkspeed(dev);
4676 +               else
4677 +                       nv_update_pause(dev, np->pause_flags);
4678 +       }
4679 +
4680 +       if (netif_running(dev)) {
4681 +               nv_start_rx(dev);
4682 +               nv_start_tx(dev);
4683 +               nv_enable_irq(dev);
4684 +       }
4685 +       return 0;
4686 +}
4687 +
4688 +static u32 nv_get_rx_csum(struct net_device *dev)
4689 +{
4690 +       struct fe_priv *np = get_nvpriv(dev);
4691 +       return (np->rx_csum) != 0;
4692 +}
4693 +
4694 +static int nv_set_rx_csum(struct net_device *dev, u32 data)
4695 +{
4696 +       struct fe_priv *np = get_nvpriv(dev);
4697 +       u8 __iomem *base = get_hwbase(dev);
4698 +       int retcode = 0;
4699  
4700 +       if (np->driver_data & DEV_HAS_CHECKSUM) {
4701 +
4702 +               if (data) {
4703 +                       np->rx_csum = 1;
4704 +                       np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4705 +               } else {
4706 +                       np->rx_csum = 0;
4707 +                       /* vlan is dependent on rx checksum offload */
4708 +                       if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4709 +                               np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4710 +               }
4711 +
4712 +               if (netif_running(dev)) {
4713 +                       spin_lock_irq(&np->lock);
4714 +                       writel(np->txrxctl_bits, base + NvRegTxRxControl);
4715 +                       spin_unlock_irq(&np->lock);
4716 +               }
4717         } else {
4718 -               int adv, bmcr;
4719 +               return -EINVAL;
4720 +       }
4721  
4722 -               np->autoneg = 0;
4723 +       return retcode;
4724 +}
4725  
4726 -               adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4727 -               adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
4728 -               if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4729 -                       adv |= ADVERTISE_10HALF;
4730 -               if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4731 -                       adv |= ADVERTISE_10FULL;
4732 -               if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4733 -                       adv |= ADVERTISE_100HALF;
4734 -               if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4735 -                       adv |= ADVERTISE_100FULL;
4736 -               mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4737 -               np->fixed_mode = adv;
4738 +#ifdef NETIF_F_TSO
4739 +static int nv_set_tso(struct net_device *dev, u32 data)
4740 +{
4741 +       struct fe_priv *np = get_nvpriv(dev);
4742 +       
4743 +       if (np->driver_data & DEV_HAS_CHECKSUM){
4744 +#if NVVER < SUSE10 
4745 +               if(data){
4746 +                       if(ethtool_op_get_sg(dev)==0)
4747 +                                       return -EINVAL;
4748 +               }
4749 +#endif
4750 +               return ethtool_op_set_tso(dev, data);
4751 +       }else
4752 +               return -EINVAL;
4753 +}
4754 +#endif
4755 +
4756 +static int nv_set_sg(struct net_device *dev, u32 data)
4757 +{
4758 +       struct fe_priv *np = get_nvpriv(dev);
4759 +       
4760 +       if (np->driver_data & DEV_HAS_CHECKSUM){
4761 +#if NVVER < SUSE10
4762 +               if(data){
4763 +                       if(ethtool_op_get_tx_csum(dev)==0)
4764 +                                       return -EINVAL;
4765 +               }
4766 +#ifdef NETIF_F_TSO
4767 +               if(!data)
4768 +                       /* set tso off */
4769 +                               nv_set_tso(dev,data);   
4770 +#endif
4771 +#endif
4772 +               return ethtool_op_set_sg(dev, data);
4773 +       }else
4774 +               return -EINVAL;
4775 +}
4776 +
4777 +static int nv_set_tx_csum(struct net_device *dev, u32 data)
4778 +{
4779 +       struct fe_priv *np = get_nvpriv(dev);
4780 +
4781 +#if NVVER < SUSE10
4782 +       /* set sg off if tx off */
4783 +       if(!data)
4784 +               nv_set_sg(dev,data);
4785 +#endif 
4786 +       if (np->driver_data & DEV_HAS_CHECKSUM)
4787 +#if NVVER > RHES4 
4788 +               return ethtool_op_set_tx_hw_csum(dev, data);
4789 +#else
4790 +       {
4791 +                       if (data)
4792 +                               dev->features |= NETIF_F_IP_CSUM;
4793 +                       else
4794 +                               dev->features &= ~NETIF_F_IP_CSUM;
4795 +                       return 0;
4796 +       }
4797 +#endif
4798 +       else
4799 +               return -EINVAL;
4800 +}
4801 +
4802 +static int nv_get_stats_count(struct net_device *dev)
4803 +{
4804 +       struct fe_priv *np = get_nvpriv(dev);
4805 +
4806 +       if (np->driver_data & DEV_HAS_STATISTICS)
4807 +               return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
4808 +       else
4809 +               return NV_STATS_COUNT_SW;
4810 +}
4811 +
4812 +static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4813 +{
4814 +       struct fe_priv *np = get_nvpriv(dev);
4815 +
4816 +       /* update stats */
4817 +       nv_do_stats_poll((unsigned long)dev);
4818 +
4819 +       memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
4820 +}
4821 +
4822 +static int nv_self_test_count(struct net_device *dev)
4823 +{
4824 +       struct fe_priv *np = get_nvpriv(dev);
4825 +
4826 +       if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4827 +               return NV_TEST_COUNT_EXTENDED;
4828 +       else
4829 +               return NV_TEST_COUNT_BASE;
4830 +}
4831 +
4832 +static int nv_link_test(struct net_device *dev)
4833 +{
4834 +       struct fe_priv *np = get_nvpriv(dev);
4835 +       int mii_status;
4836 +
4837 +       mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4838 +       mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4839 +
4840 +       /* check phy link status */
4841 +       if (!(mii_status & BMSR_LSTATUS))
4842 +               return 0;
4843 +       else
4844 +               return 1;
4845 +}
4846 +
4847 +static int nv_register_test(struct net_device *dev)
4848 +{
4849 +       u8 __iomem *base = get_hwbase(dev);
4850 +       int i = 0;
4851 +       u32 orig_read, new_read;
4852 +
4853 +       do {
4854 +               orig_read = readl(base + nv_registers_test[i].reg);
4855 +
4856 +               /* xor with mask to toggle bits */
4857 +               orig_read ^= nv_registers_test[i].mask;
4858 +
4859 +               writel(orig_read, base + nv_registers_test[i].reg);
4860 +
4861 +               new_read = readl(base + nv_registers_test[i].reg);
4862 +
4863 +               if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4864 +                       return 0;
4865 +
4866 +               /* restore original value */
4867 +               orig_read ^= nv_registers_test[i].mask;
4868 +               writel(orig_read, base + nv_registers_test[i].reg);
4869 +
4870 +       } while (nv_registers_test[++i].reg != 0);
4871 +
4872 +       return 1;
4873 +}
4874 +
4875 +static int nv_interrupt_test(struct net_device *dev)
4876 +{
4877 +       struct fe_priv *np = get_nvpriv(dev);
4878 +       u8 __iomem *base = get_hwbase(dev);
4879 +       int ret = 1;
4880 +       int testcnt;
4881 +       u32 save_msi_flags, save_poll_interval = 0;
4882 +
4883 +       if (netif_running(dev)) {
4884 +               /* free current irq */
4885 +               nv_free_irq(dev);
4886 +               save_poll_interval = readl(base+NvRegPollingInterval);
4887 +       }
4888 +
4889 +       /* flag to test interrupt handler */
4890 +       np->intr_test = 0;
4891 +
4892 +       /* setup test irq */
4893 +       save_msi_flags = np->msi_flags;
4894 +       np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4895 +       np->msi_flags |= 0x001; /* setup 1 vector */
4896 +       if (nv_request_irq(dev, 1))
4897 +               return 0;
4898 +
4899 +       /* setup timer interrupt */
4900 +       writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4901 +       writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4902 +
4903 +       nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4904 +
4905 +       /* wait for at least one interrupt */
4906 +       nv_msleep(100);
4907 +
4908 +       spin_lock_irq(&np->lock);
4909 +
4910 +       /* flag should be set within ISR */
4911 +       testcnt = np->intr_test;
4912 +       if (!testcnt)
4913 +               ret = 2;
4914 +
4915 +       nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4916 +       if (!(np->msi_flags & NV_MSI_X_ENABLED))
4917 +               writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4918 +       else
4919 +               writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4920 +
4921 +       spin_unlock_irq(&np->lock);
4922 +
4923 +       nv_free_irq(dev);
4924 +
4925 +       np->msi_flags = save_msi_flags;
4926 +
4927 +       if (netif_running(dev)) {
4928 +               writel(save_poll_interval, base + NvRegPollingInterval);
4929 +               writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4930 +               /* restore original irq */
4931 +               if (nv_request_irq(dev, 0))
4932 +                       return 0;
4933 +       }
4934 +
4935 +       return ret;
4936 +}
4937 +
4938 +static int nv_loopback_test(struct net_device *dev)
4939 +{
4940 +       struct fe_priv *np = get_nvpriv(dev);
4941 +       u8 __iomem *base = get_hwbase(dev);
4942 +       struct sk_buff *tx_skb, *rx_skb;
4943 +       dma_addr_t test_dma_addr;
4944 +       u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4945 +       u32 Flags;
4946 +       int len, i, pkt_len;
4947 +       u8 *pkt_data;
4948 +       u32 filter_flags = 0;
4949 +       u32 misc1_flags = 0;
4950 +       int ret = 1;
4951 +
4952 +       if (netif_running(dev)) {
4953 +               nv_disable_irq(dev);
4954 +               filter_flags = readl(base + NvRegPacketFilterFlags);
4955 +               misc1_flags = readl(base + NvRegMisc1);
4956 +       } else {
4957 +               nv_txrx_reset(dev);
4958 +       }
4959 +
4960 +       /* reinit driver view of the rx queue */
4961 +       set_bufsize(dev);
4962 +       nv_init_ring(dev);
4963 +
4964 +       /* setup hardware for loopback */
4965 +       writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4966 +       writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4967 +
4968 +       /* reinit nic view of the rx queue */
4969 +       writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4970 +       setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4971 +       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4972 +               base + NvRegRingSizes);
4973 +       pci_push(base);
4974 +
4975 +       /* restart rx engine */
4976 +       nv_start_rx(dev);
4977 +       nv_start_tx(dev);
4978  
4979 -               if (np->gigabit == PHY_GIGABIT) {
4980 -                       adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
4981 -                       adv &= ~ADVERTISE_1000FULL;
4982 -                       mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
4983 -               }
4984 +       /* setup packet for tx */
4985 +       pkt_len = ETH_DATA_LEN;
4986 +       tx_skb = dev_alloc_skb(pkt_len);
4987 +       pkt_data = skb_put(tx_skb, pkt_len);
4988 +       for (i = 0; i < pkt_len; i++)
4989 +               pkt_data[i] = (u8)(i & 0xff);
4990 +       test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4991 +                                      tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
4992  
4993 -               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4994 -               bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
4995 -               if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4996 -                       bmcr |= BMCR_FULLDPLX;
4997 -               if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4998 -                       bmcr |= BMCR_SPEED100;
4999 -               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
5000 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5001 +               np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
5002 +               np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5003 +       } else {
5004 +               np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
5005 +               np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
5006 +               np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5007 +       }
5008 +       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5009 +       pci_push(get_hwbase(dev));
5010  
5011 -               if (netif_running(dev)) {
5012 -                       /* Wait a bit and then reconfigure the nic. */
5013 -                       udelay(10);
5014 -                       nv_linkchange(dev);
5015 +       nv_msleep(500);
5016 +
5017 +       /* check for rx of the packet */
5018 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5019 +               Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
5020 +               len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5021 +                       
5022 +       } else {
5023 +               Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
5024 +               len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5025 +       }
5026 +
5027 +       if (Flags & NV_RX_AVAIL) {
5028 +               ret = 0;
5029 +       } else if (np->desc_ver == DESC_VER_1) {
5030 +               if (Flags & NV_RX_ERROR)
5031 +                       ret = 0;
5032 +       } else {
5033 +               if (Flags & NV_RX2_ERROR) {
5034 +                       ret = 0;
5035                 }
5036         }
5037 -       spin_unlock_irq(&np->lock);
5038  
5039 -       return 0;
5040 -}
5041 +       if (ret) {              
5042 +               if (len != pkt_len) {
5043 +                       ret = 0;
5044 +                       dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 
5045 +                               dev->name, len, pkt_len);
5046 +               } else {
5047 +                       rx_skb = np->rx_skb[0].skb;
5048 +                       for (i = 0; i < pkt_len; i++) {
5049 +                               if (rx_skb->data[i] != (u8)(i & 0xff)) {
5050 +                                       ret = 0;
5051 +                                       dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 
5052 +                                               dev->name, i);
5053 +                                       break;
5054 +                               }
5055 +                       }
5056 +               }
5057 +       } else {
5058 +               dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
5059 +       }
5060  
5061 -#define FORCEDETH_REGS_VER     1
5062 -#define FORCEDETH_REGS_SIZE    0x400 /* 256 32-bit registers */
5063 +       pci_unmap_page(np->pci_dev, test_dma_addr,
5064 +                      tx_skb->end-tx_skb->data,
5065 +                      PCI_DMA_TODEVICE);
5066 +       dev_kfree_skb_any(tx_skb);
5067 +       
5068 +       /* stop engines */
5069 +       nv_stop_rx(dev);
5070 +       nv_stop_tx(dev);
5071 +       nv_txrx_reset(dev);
5072 +       /* drain rx queue */
5073 +       nv_drain_rx(dev);
5074 +       nv_drain_tx(dev);
5075  
5076 -static int nv_get_regs_len(struct net_device *dev)
5077 -{
5078 -       return FORCEDETH_REGS_SIZE;
5079 +       if (netif_running(dev)) {
5080 +               writel(misc1_flags, base + NvRegMisc1);
5081 +               writel(filter_flags, base + NvRegPacketFilterFlags);
5082 +               nv_enable_irq(dev);
5083 +       }
5084 +
5085 +       return ret;
5086  }
5087  
5088 -static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
5089 +static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5090  {
5091 -       struct fe_priv *np = netdev_priv(dev);
5092 +       struct fe_priv *np = get_nvpriv(dev);
5093         u8 __iomem *base = get_hwbase(dev);
5094 -       u32 *rbuf = buf;
5095 -       int i;
5096 +       int result;
5097 +       memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
5098  
5099 -       regs->version = FORCEDETH_REGS_VER;
5100 -       spin_lock_irq(&np->lock);
5101 -       for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
5102 -               rbuf[i] = readl(base + i*sizeof(u32));
5103 -       spin_unlock_irq(&np->lock);
5104 -}
5105 +       if (!nv_link_test(dev)) {
5106 +               test->flags |= ETH_TEST_FL_FAILED;
5107 +               buffer[0] = 1;
5108 +       }
5109  
5110 -static int nv_nway_reset(struct net_device *dev)
5111 -{
5112 -       struct fe_priv *np = netdev_priv(dev);
5113 -       int ret;
5114 +       if (test->flags & ETH_TEST_FL_OFFLINE) {
5115 +               if (netif_running(dev)) {
5116 +                       netif_stop_queue(dev);
5117 +                       spin_lock_bh(&dev->xmit_lock);
5118 +                       spin_lock_irq(&np->lock);
5119 +                       nv_disable_hw_interrupts(dev, np->irqmask);
5120 +                       if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
5121 +                               writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5122 +                       } else {
5123 +                               writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5124 +                       }
5125 +                       /* stop engines */
5126 +                       nv_stop_rx(dev);
5127 +                       nv_stop_tx(dev);
5128 +                       nv_txrx_reset(dev);
5129 +                       /* drain rx queue */
5130 +                       nv_drain_rx(dev);
5131 +                       nv_drain_tx(dev);
5132 +                       spin_unlock_irq(&np->lock);
5133 +                       spin_unlock_bh(&dev->xmit_lock);
5134 +               }
5135  
5136 -       spin_lock_irq(&np->lock);
5137 -       if (np->autoneg) {
5138 -               int bmcr;
5139 +               if (!nv_register_test(dev)) {
5140 +                       test->flags |= ETH_TEST_FL_FAILED;
5141 +                       buffer[1] = 1;
5142 +               }
5143  
5144 -               bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5145 -               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
5146 -               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
5147 +               result = nv_interrupt_test(dev);
5148 +               if (result != 1) {
5149 +                       test->flags |= ETH_TEST_FL_FAILED;
5150 +                       buffer[2] = 1;
5151 +               }
5152 +               if (result == 0) {
5153 +                       /* bail out */
5154 +                       return;
5155 +               }
5156  
5157 -               ret = 0;
5158 -       } else {
5159 -               ret = -EINVAL;
5160 +               if (!nv_loopback_test(dev)) {
5161 +                       test->flags |= ETH_TEST_FL_FAILED;
5162 +                       buffer[3] = 1;
5163 +               }
5164 +
5165 +               if (netif_running(dev)) {
5166 +                       /* reinit driver view of the rx queue */
5167 +                       set_bufsize(dev);
5168 +                       if (nv_init_ring(dev)) {
5169 +                               if (!np->in_shutdown)
5170 +                                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5171 +                       }
5172 +                       /* reinit nic view of the rx queue */
5173 +                       writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5174 +                       setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5175 +                       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5176 +                               base + NvRegRingSizes);
5177 +                       pci_push(base);
5178 +                       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5179 +                       pci_push(base);
5180 +                       /* restart rx engine */
5181 +                       nv_start_rx(dev);
5182 +                       nv_start_tx(dev);
5183 +                       netif_start_queue(dev);
5184 +                       nv_enable_hw_interrupts(dev, np->irqmask);
5185 +               }
5186         }
5187 -       spin_unlock_irq(&np->lock);
5188 +}
5189  
5190 -       return ret;
5191 +static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5192 +{
5193 +       switch (stringset) {
5194 +       case ETH_SS_STATS:
5195 +               memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
5196 +               break;
5197 +       case ETH_SS_TEST:
5198 +               memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
5199 +               break;
5200 +       }
5201  }
5202  
5203  static struct ethtool_ops ops = {
5204 @@ -2215,68 +5125,175 @@
5205         .get_regs_len = nv_get_regs_len,
5206         .get_regs = nv_get_regs,
5207         .nway_reset = nv_nway_reset,
5208 +#if NVVER > SUSE10
5209         .get_perm_addr = ethtool_op_get_perm_addr,
5210 +#endif
5211 +       .get_ringparam = nv_get_ringparam,
5212 +       .set_ringparam = nv_set_ringparam,
5213 +       .get_pauseparam = nv_get_pauseparam,
5214 +       .set_pauseparam = nv_set_pauseparam,
5215 +       .get_rx_csum = nv_get_rx_csum,
5216 +       .set_rx_csum = nv_set_rx_csum,
5217 +       .get_tx_csum = ethtool_op_get_tx_csum,
5218 +       .set_tx_csum = nv_set_tx_csum,
5219 +       .get_sg = ethtool_op_get_sg,
5220 +       .set_sg = nv_set_sg,
5221 +#ifdef NETIF_F_TSO
5222 +       .get_tso = ethtool_op_get_tso,
5223 +       .set_tso = nv_set_tso,
5224 +#endif
5225 +       .get_strings = nv_get_strings,
5226 +       .get_stats_count = nv_get_stats_count,
5227 +       .get_ethtool_stats = nv_get_ethtool_stats,
5228 +       .self_test_count = nv_self_test_count,
5229 +       .self_test = nv_self_test,
5230 +};
5231 +
5232 +static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5233 +{
5234 +       struct fe_priv *np = get_nvpriv(dev);
5235 +
5236 +       spin_lock_irq(&np->lock);
5237 +
5238 +       /* save vlan group */
5239 +       np->vlangrp = grp;
5240 +
5241 +       if (grp) {
5242 +               /* enable vlan on MAC */
5243 +               np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
5244 +               np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5245 +       } else {
5246 +               /* disable vlan on MAC */
5247 +               np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
5248 +               np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
5249 +               if (!np->rx_csum)
5250 +                       np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
5251 +       }
5252 +
5253 +       writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5254 +
5255 +       spin_unlock_irq(&np->lock);
5256 +};
5257 +
5258 +static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
5259 +{
5260 +       /* nothing to do */
5261  };
5262  
5263 +/* The mgmt unit and driver use a semaphore to access the phy during init */
5264 +static int nv_mgmt_acquire_sema(struct net_device *dev)
5265 +{
5266 +       u8 __iomem *base = get_hwbase(dev);
5267 +       int i;
5268 +       u32 tx_ctrl, mgmt_sema;
5269 +
5270 +       for (i = 0; i < 10; i++) {
5271 +               mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5272 +               if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) {
5273 +                        dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is free\n");
5274 +                       break;
5275 +                }
5276 +               nv_msleep(500);
5277 +       }
5278 +
5279 +       if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) {
5280 +                dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is not free\n");
5281 +               return 0;
5282 +        }
5283 +
5284 +       for (i = 0; i < 2; i++) {
5285 +               tx_ctrl = readl(base + NvRegTransmitterControl);
5286 +               tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5287 +               writel(tx_ctrl, base + NvRegTransmitterControl);
5288 +
5289 +               /* verify that semaphore was acquired */
5290 +               tx_ctrl = readl(base + NvRegTransmitterControl);
5291 +               if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5292 +                   ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5293 +                        dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: acquired sema\n");
5294 +                       return 1;
5295 +               } else
5296 +                       udelay(50);
5297 +       }
5298 +
5299 +        dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: exit\n");
5300 +       return 0;
5301 +}
5302 +
5303 +/* Indicate to mgmt unit whether driver is loaded or not */
5304 +static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded)
5305 +{
5306 +       u8 __iomem *base = get_hwbase(dev);
5307 +       u32 tx_ctrl;
5308 +
5309 +       tx_ctrl = readl(base + NvRegTransmitterControl);
5310 +       if (loaded)
5311 +               tx_ctrl |= NVREG_XMITCTL_HOST_LOADED;
5312 +       else
5313 +               tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED;
5314 +       writel(tx_ctrl, base + NvRegTransmitterControl);
5315 +}
5316 +
5317  static int nv_open(struct net_device *dev)
5318  {
5319 -       struct fe_priv *np = netdev_priv(dev);
5320 +       struct fe_priv *np = get_nvpriv(dev);
5321         u8 __iomem *base = get_hwbase(dev);
5322 -       int ret, oom, i;
5323 +       int ret = 1;
5324 +       int oom, i;
5325  
5326         dprintk(KERN_DEBUG "nv_open: begin\n");
5327  
5328 -       /* 1) erase previous misconfiguration */
5329 -       /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
5330 +       /* erase previous misconfiguration */
5331 +       if (np->driver_data & DEV_HAS_POWER_CNTRL)
5332 +               nv_mac_reset(dev);
5333 +       /* stop adapter: ignored, 4.3 seems to be overkill */
5334         writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5335         writel(0, base + NvRegMulticastAddrB);
5336         writel(0, base + NvRegMulticastMaskA);
5337         writel(0, base + NvRegMulticastMaskB);
5338         writel(0, base + NvRegPacketFilterFlags);
5339  
5340 -       writel(0, base + NvRegTransmitterControl);
5341 -       writel(0, base + NvRegReceiverControl);
5342 +       nv_stop_tx(dev);
5343 +       nv_stop_rx(dev);
5344  
5345         writel(0, base + NvRegAdapterControl);
5346  
5347 -       /* 2) initialize descriptor rings */
5348 +       if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5349 +               writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5350 +
5351 +       /* initialize descriptor rings */
5352         set_bufsize(dev);
5353         oom = nv_init_ring(dev);
5354  
5355 -       writel(0, base + NvRegLinkSpeed);
5356 -       writel(0, base + NvRegUnknownTransmitterReg);
5357         nv_txrx_reset(dev);
5358         writel(0, base + NvRegUnknownSetupReg6);
5359  
5360         np->in_shutdown = 0;
5361  
5362 -       /* 3) set mac address */
5363 -       nv_copy_mac_to_hw(dev);
5364 -
5365 -       /* 4) give hw rings */
5366 -       writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
5367 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5368 -               writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
5369 -       else
5370 -               writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
5371 -       writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
5372 +       /* give hw rings */
5373 +       setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5374 +       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5375                 base + NvRegRingSizes);
5376  
5377 -       /* 5) continue setup */
5378 +       /* continue setup */
5379         writel(np->linkspeed, base + NvRegLinkSpeed);
5380 -       writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
5381 +       if (np->desc_ver == DESC_VER_1)
5382 +               writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5383 +       else
5384 +               writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5385         writel(np->txrxctl_bits, base + NvRegTxRxControl);
5386 +       writel(np->vlanctl_bits, base + NvRegVlanControl);
5387         pci_push(base);
5388         writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5389         reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5390                         NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
5391                         KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
5392  
5393 -       writel(0, base + NvRegUnknownSetupReg4);
5394 +       writel(0, base + NvRegMIIMask);
5395         writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5396         writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
5397  
5398 -       /* 6) continue setup */
5399 +       /* continue setup */
5400         writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5401         writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5402         writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5403 @@ -2285,8 +5302,8 @@
5404         writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5405         get_random_bytes(&i, sizeof(i));
5406         writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
5407 -       writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
5408 -       writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
5409 +       writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5410 +       writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5411         if (poll_interval == -1) {
5412                 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5413                         writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5414 @@ -2299,8 +5316,9 @@
5415         writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5416                         base + NvRegAdapterControl);
5417         writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5418 -       writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
5419 -       writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
5420 +       writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5421 +       if (np->wolenabled)
5422 +               writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5423  
5424         i = readl(base + NvRegPowerState);
5425         if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
5426 @@ -2310,18 +5328,18 @@
5427         udelay(10);
5428         writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5429  
5430 -       writel(0, base + NvRegIrqMask);
5431 +       nv_disable_hw_interrupts(dev, np->irqmask);
5432         pci_push(base);
5433         writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
5434         writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5435         pci_push(base);
5436  
5437 -       ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev);
5438 -       if (ret)
5439 +       if (nv_request_irq(dev, 0)) {
5440                 goto out_drain;
5441 +       }
5442  
5443         /* ask for interrupts */
5444 -       writel(np->irqmask, base + NvRegIrqMask);
5445 +       nv_enable_hw_interrupts(dev, np->irqmask);
5446  
5447         spin_lock_irq(&np->lock);
5448         writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5449 @@ -2348,11 +5366,15 @@
5450         if (ret) {
5451                 netif_carrier_on(dev);
5452         } else {
5453 -               printk("%s: no link during initialization.\n", dev->name);
5454 +               dprintk(KERN_DEBUG "%s: no link during initialization.\n", dev->name);
5455                 netif_carrier_off(dev);
5456         }
5457         if (oom)
5458                 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5459 +
5460 +       /* start statistics timer */
5461 +       mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
5462 +
5463         spin_unlock_irq(&np->lock);
5464  
5465         return 0;
5466 @@ -2363,16 +5385,23 @@
5467  
5468  static int nv_close(struct net_device *dev)
5469  {
5470 -       struct fe_priv *np = netdev_priv(dev);
5471 +       struct fe_priv *np = get_nvpriv(dev);
5472         u8 __iomem *base;
5473  
5474 +       dprintk(KERN_DEBUG "nv_close: begin\n");
5475         spin_lock_irq(&np->lock);
5476         np->in_shutdown = 1;
5477         spin_unlock_irq(&np->lock);
5478 +
5479 +#if NVVER > RHES3
5480         synchronize_irq(dev->irq);
5481 +#else
5482 +       synchronize_irq();
5483 +#endif
5484  
5485         del_timer_sync(&np->oom_kick);
5486         del_timer_sync(&np->nic_poll);
5487 +       del_timer_sync(&np->stats_poll);
5488  
5489         netif_stop_queue(dev);
5490         spin_lock_irq(&np->lock);
5491 @@ -2382,25 +5411,19 @@
5492  
5493         /* disable interrupts on the nic or we will lock up */
5494         base = get_hwbase(dev);
5495 -       writel(0, base + NvRegIrqMask);
5496 +       nv_disable_hw_interrupts(dev, np->irqmask);
5497         pci_push(base);
5498         dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5499  
5500         spin_unlock_irq(&np->lock);
5501  
5502 -       free_irq(dev->irq, dev);
5503 +       nv_free_irq(dev);
5504  
5505         drain_ring(dev);
5506  
5507         if (np->wolenabled)
5508                 nv_start_rx(dev);
5509  
5510 -       /* special op: write back the misordered MAC address - otherwise
5511 -        * the next nv_probe would see a wrong address.
5512 -        */
5513 -       writel(np->orig_mac[0], base + NvRegMacAddrA);
5514 -       writel(np->orig_mac[1], base + NvRegMacAddrB);
5515 -
5516         /* FIXME: power down nic */
5517  
5518         return 0;
5519 @@ -2413,13 +5436,19 @@
5520         unsigned long addr;
5521         u8 __iomem *base;
5522         int err, i;
5523 +       u32 powerstate, phystate_orig = 0, phystate, txreg;
5524 +       int phyinitialized = 0;
5525  
5526 +       //NVLAN_DISABLE_ALL_FEATURES ;
5527 +               /* modify network device class id */    
5528 +       quirk_nforce_network_class(pci_dev);
5529         dev = alloc_etherdev(sizeof(struct fe_priv));
5530         err = -ENOMEM;
5531         if (!dev)
5532                 goto out;
5533  
5534 -       np = netdev_priv(dev);
5535 +       dprintk(KERN_DEBUG "%s:nv_probe: begin\n",dev->name);
5536 +       np = get_nvpriv(dev);
5537         np->pci_dev = pci_dev;
5538         spin_lock_init(&np->lock);
5539         SET_MODULE_OWNER(dev);
5540 @@ -2431,6 +5460,9 @@
5541         init_timer(&np->nic_poll);
5542         np->nic_poll.data = (unsigned long) dev;
5543         np->nic_poll.function = &nv_do_nic_poll;        /* timer handler */
5544 +       init_timer(&np->stats_poll);
5545 +       np->stats_poll.data = (unsigned long) dev;
5546 +       np->stats_poll.function = &nv_do_stats_poll;    /* timer handler */
5547  
5548         err = pci_enable_device(pci_dev);
5549         if (err) {
5550 @@ -2445,6 +5477,11 @@
5551         if (err < 0)
5552                 goto out_disable;
5553  
5554 +       if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
5555 +               np->register_size = NV_PCI_REGSZ_VER2;
5556 +       else
5557 +               np->register_size = NV_PCI_REGSZ_VER1;
5558 +
5559         err = -EINVAL;
5560         addr = 0;
5561         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5562 @@ -2453,7 +5490,7 @@
5563                                 pci_resource_len(pci_dev, i),
5564                                 pci_resource_flags(pci_dev, i));
5565                 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5566 -                               pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
5567 +                               pci_resource_len(pci_dev, i) >= np->register_size) {
5568                         addr = pci_resource_start(pci_dev, i);
5569                         break;
5570                 }
5571 @@ -2464,17 +5501,29 @@
5572                 goto out_relreg;
5573         }
5574  
5575 +       /* copy of driver data */
5576 +       np->driver_data = id->driver_data;
5577 +
5578         /* handle different descriptor versions */
5579         if (id->driver_data & DEV_HAS_HIGH_DMA) {
5580                 /* packet format 3: supports 40-bit addressing */
5581                 np->desc_ver = DESC_VER_3;
5582 -               if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
5583 -                       printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
5584 -                                       pci_name(pci_dev));
5585 -               } else {
5586 -                       dev->features |= NETIF_F_HIGHDMA;
5587 -               }
5588                 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5589 +               if (dma_64bit) {
5590 +                       if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5591 +                               printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
5592 +                                      pci_name(pci_dev));
5593 +                       } else {
5594 +                               dev->features |= NETIF_F_HIGHDMA;
5595 +                               printk(KERN_INFO "forcedeth: using HIGHDMA\n");
5596 +                       }
5597 +#if NVVER > RHES3
5598 +                       if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5599 +                               printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
5600 +                                      pci_name(pci_dev));
5601 +                       }
5602 +#endif
5603 +               }
5604         } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5605                 /* packet format 2: supports jumbo frames */
5606                 np->desc_ver = DESC_VER_2;
5607 @@ -2488,49 +5537,149 @@
5608         np->pkt_limit = NV_PKTLIMIT_1;
5609         if (id->driver_data & DEV_HAS_LARGEDESC)
5610                 np->pkt_limit = NV_PKTLIMIT_2;
5611 +       if (mtu > np->pkt_limit) {
5612 +               printk(KERN_INFO "forcedeth: MTU value of %d is too large. Setting to maximum value of %d\n",
5613 +                      mtu, np->pkt_limit);
5614 +               dev->mtu = np->pkt_limit;
5615 +       } else {
5616 +               dev->mtu = mtu;
5617 +       }
5618  
5619         if (id->driver_data & DEV_HAS_CHECKSUM) {
5620 -               np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5621 -               dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5622 +               if (rx_checksum_offload) {
5623 +                       np->rx_csum = 1;
5624 +                       np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5625 +               }
5626 +
5627 +               if (tx_checksum_offload)        
5628 +#if NVVER > RHES4
5629 +                       dev->features |= NETIF_F_HW_CSUM;
5630 +#else
5631 +                       dev->features |= NETIF_F_IP_CSUM;
5632 +#endif
5633 +
5634 +               if (scatter_gather)
5635 +                       dev->features |= NETIF_F_SG;
5636  #ifdef NETIF_F_TSO
5637 -               dev->features |= NETIF_F_TSO;
5638 +               if (tso_offload)
5639 +                       dev->features |= NETIF_F_TSO;
5640  #endif
5641         }
5642  
5643 +       np->vlanctl_bits = 0;
5644 +       if (id->driver_data & DEV_HAS_VLAN && tagging_8021pq) {
5645 +                       np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5646 +                       dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5647 +                       dev->vlan_rx_register = nv_vlan_rx_register;
5648 +                       dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
5649 +               /* vlan needs rx checksum support, so force it */
5650 +               np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5651 +       }
5652 +
5653 +       np->msi_flags = 0;
5654 +       if ((id->driver_data & DEV_HAS_MSI) && msi) {
5655 +               np->msi_flags |= NV_MSI_CAPABLE;
5656 +       }
5657 +       if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5658 +               np->msi_flags |= NV_MSI_X_CAPABLE;
5659 +       }
5660 +       
5661 +       np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE;
5662 +       if (rx_flow_control == NV_RX_FLOW_CONTROL_ENABLED)
5663 +               np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
5664 +       if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
5665 +               np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
5666 +               if (tx_flow_control == NV_TX_FLOW_CONTROL_ENABLED)
5667 +                       np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
5668 +       }
5669 +       if (autoneg == AUTONEG_ENABLE) {
5670 +               np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
5671 +       } else if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX) {
5672 +               printk(KERN_INFO "forcedeth: speed_duplex of 1000 full can not enabled if autoneg is disabled\n");
5673 +               goto out_relreg;
5674 +       }
5675 +
5676         err = -ENOMEM;
5677 -       np->base = ioremap(addr, NV_PCI_REGSZ);
5678 +       np->base = ioremap(addr, np->register_size);
5679         if (!np->base)
5680                 goto out_relreg;
5681         dev->base_addr = (unsigned long)np->base;
5682  
5683         dev->irq = pci_dev->irq;
5684  
5685 +       if (np->desc_ver == DESC_VER_1) {
5686 +               if (rx_ring_size > RING_MAX_DESC_VER_1) {
5687 +                       printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n",
5688 +                              rx_ring_size, RING_MAX_DESC_VER_1);
5689 +                       rx_ring_size = RING_MAX_DESC_VER_1;
5690 +               }
5691 +               if (tx_ring_size > RING_MAX_DESC_VER_1) {
5692 +                       printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n",
5693 +                              tx_ring_size, RING_MAX_DESC_VER_1);
5694 +                       tx_ring_size = RING_MAX_DESC_VER_1;
5695 +               }
5696 +       } else {
5697 +               if (rx_ring_size > RING_MAX_DESC_VER_2_3) {
5698 +                       printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n",
5699 +                              rx_ring_size, RING_MAX_DESC_VER_2_3);
5700 +                       rx_ring_size = RING_MAX_DESC_VER_2_3;
5701 +               }
5702 +               if (tx_ring_size > RING_MAX_DESC_VER_2_3) {
5703 +                       printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n",
5704 +                              tx_ring_size, RING_MAX_DESC_VER_2_3);
5705 +                       tx_ring_size = RING_MAX_DESC_VER_2_3;
5706 +               }
5707 +       }
5708 +       np->rx_ring_size = rx_ring_size;
5709 +       np->tx_ring_size = tx_ring_size;
5710 +       np->tx_limit_stop = tx_ring_size - TX_LIMIT_DIFFERENCE;
5711 +       np->tx_limit_start = tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
5712 +
5713         if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5714                 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5715 -                                       sizeof(struct ring_desc) * (RX_RING + TX_RING),
5716 +                                       sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5717                                         &np->ring_addr);
5718                 if (!np->rx_ring.orig)
5719                         goto out_unmap;
5720 -               np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
5721 +               np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5722         } else {
5723                 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5724 -                                       sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
5725 +                                       sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5726                                         &np->ring_addr);
5727                 if (!np->rx_ring.ex)
5728                         goto out_unmap;
5729 -               np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
5730 +               np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5731         }
5732 +       np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
5733 +       np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
5734 +       if (!np->rx_skb || !np->tx_skb)
5735 +               goto out_freering;
5736 +       memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
5737 +       memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
5738  
5739         dev->open = nv_open;
5740         dev->stop = nv_close;
5741 -       dev->hard_start_xmit = nv_start_xmit;
5742 +       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5743 +               dev->hard_start_xmit = nv_start_xmit;
5744 +       else
5745 +               dev->hard_start_xmit = nv_start_xmit_optimized;
5746         dev->get_stats = nv_get_stats;
5747         dev->change_mtu = nv_change_mtu;
5748         dev->set_mac_address = nv_set_mac_address;
5749         dev->set_multicast_list = nv_set_multicast;
5750 +
5751 +#if NVVER < SLES9
5752 +       dev->do_ioctl = nv_ioctl;
5753 +#endif
5754 +
5755 +#if NVVER > RHES3
5756  #ifdef CONFIG_NET_POLL_CONTROLLER
5757         dev->poll_controller = nv_poll_controller;
5758  #endif
5759 +#else
5760 +       dev->poll_controller = nv_poll_controller;
5761 +#endif
5762 +
5763         SET_ETHTOOL_OPS(dev, &ops);
5764         dev->tx_timeout = nv_tx_timeout;
5765         dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5766 @@ -2542,15 +5691,36 @@
5767         np->orig_mac[0] = readl(base + NvRegMacAddrA);
5768         np->orig_mac[1] = readl(base + NvRegMacAddrB);
5769  
5770 +       /* check the workaround bit for correct mac address order */
5771 +       txreg = readl(base + NvRegTransmitPoll);
5772 +       if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5773 +               /* mac address is already in correct order */
5774 +               dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5775 +               dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5776 +               dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5777 +               dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5778 +               dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5779 +               dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5780 +       } else {
5781         dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
5782         dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
5783         dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5784         dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5785         dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
5786         dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
5787 +               /* set permanent address to be correct aswell */
5788 +               np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
5789 +                       (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
5790 +               np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
5791 +               writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5792 +       }
5793 +#if NVVER > SUSE10
5794         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5795  
5796         if (!is_valid_ether_addr(dev->perm_addr)) {
5797 +#else
5798 +       if (!is_valid_ether_addr(dev->dev_addr)) {              
5799 +#endif 
5800                 /*
5801                  * Bad mac address. At least one bios sets the mac address
5802                  * to 01:23:45:67:89:ab
5803 @@ -2569,22 +5739,43 @@
5804         dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
5805                         dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5806                         dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5807 +       /* set mac address */
5808 +       nv_copy_mac_to_hw(dev);
5809  
5810         /* disable WOL */
5811         writel(0, base + NvRegWakeUpFlags);
5812 -       np->wolenabled = 0;
5813 +       np->wolenabled = wol;
5814 +
5815 +       if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5816 +               u8 revision_id;
5817 +               pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
5818 +
5819 +               /* take phy and nic out of low power mode */
5820 +               powerstate = readl(base + NvRegPowerState2);
5821 +               powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5822 +               if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5823 +                    id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5824 +                   revision_id >= 0xA3)
5825 +                       powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5826 +               writel(powerstate, base + NvRegPowerState2);
5827 +       }
5828  
5829         if (np->desc_ver == DESC_VER_1) {
5830                 np->tx_flags = NV_TX_VALID;
5831         } else {
5832                 np->tx_flags = NV_TX2_VALID;
5833         }
5834 -       if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5835 +       if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5836                 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5837 -       else
5838 +               if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5839 +                       np->msi_flags |= 0x0003;
5840 +       } else {
5841                 np->irqmask = NVREG_IRQMASK_CPU;
5842 +               if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5843 +                       np->msi_flags |= 0x0001;
5844 +       }
5845  
5846 -       if (id->driver_data & DEV_NEED_TIMERIRQ_ORIG)
5847 +       if (id->driver_data & DEV_NEED_TIMERIRQ)
5848                 np->irqmask |= NVREG_IRQ_TIMER;
5849         if (id->driver_data & DEV_NEED_LINKTIMER) {
5850                 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5851 @@ -2595,6 +5786,59 @@
5852                 np->need_linktimer = 0;
5853         }
5854  
5855 +       /* clear phy state and temporarily halt phy interrupts */
5856 +       writel(0, base + NvRegMIIMask);
5857 +       phystate = readl(base + NvRegAdapterControl);
5858 +       if (phystate & NVREG_ADAPTCTL_RUNNING) {
5859 +               phystate_orig = 1;
5860 +               phystate &= ~NVREG_ADAPTCTL_RUNNING;
5861 +               writel(phystate, base + NvRegAdapterControl);
5862 +       }
5863 +       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
5864 +
5865 +       if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5866 +               writel(NV_UNKNOWN_VAL, base + NvRegPatternCRC);
5867 +               pci_push(base);
5868 +               nv_msleep(500);
5869 +               /* management unit running on the mac? */
5870 +               np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5871 +               if (np->mac_in_use) {
5872 +                       u32 mgmt_sync;
5873 +                       dprintk(KERN_DEBUG "%s: probe: mac in use\n",dev->name);
5874 +                       /* management unit setup the phy already? */
5875 +                       mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
5876 +                       if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) {
5877 +                               dprintk(KERN_DEBUG"%s : probe: sync not ready\n",dev->name);
5878 +                               if (!nv_mgmt_acquire_sema(dev)) {
5879 +                                       dprintk(KERN_DEBUG"%s: probe: could not acquire sema\n",dev->name);
5880 +                                       for (i = 0; i < 5000; i++) {
5881 +                                               nv_msleep(1);
5882 +                                               mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
5883 +                                               if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY)
5884 +                                                       continue;
5885 +                                               if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
5886 +                                                       dprintk(KERN_DEBUG"%s: probe: phy inited by SMU 1\n",dev->name);
5887 +                                                       phyinitialized = 1;
5888 +                                               }
5889 +                                               break;
5890 +                                               dprintk(KERN_DEBUG"%s: probe: breaking out of loop\n",dev->name);
5891 +                                       }
5892 +                               } else {
5893 +                                       /* we need to init the phy */
5894 +                                       dprintk(KERN_DEBUG"%s: probe: we need to init phy 1\n",dev->name);
5895 +                               }
5896 +                       } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
5897 +                               dprintk(KERN_DEBUG"%s: probe: phy inited by SMU 2\n",dev->name);
5898 +                               /* phy is inited by SMU */
5899 +                               phyinitialized = 1;
5900 +                       } else {
5901 +                               /* we need to init the phy */
5902 +                               dprintk(KERN_DEBUG"%s: probe: we need to init phy 2\n",dev->name);
5903 +                       }
5904 +               } else
5905 +                       dprintk(KERN_DEBUG"%s: probe: mac not in use\n",dev->name);
5906 +       }
5907 +
5908         /* find a suitable phy */
5909         for (i = 1; i <= 32; i++) {
5910                 int id1, id2;
5911 @@ -2611,6 +5855,7 @@
5912                 if (id2 < 0 || id2 == 0xffff)
5913                         continue;
5914  
5915 +               np->phy_model = id2 & PHYID2_MODEL_MASK;
5916                 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5917                 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5918                 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5919 @@ -2622,21 +5867,32 @@
5920         if (i == 33) {
5921                 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
5922                        pci_name(pci_dev));
5923 -               goto out_freering;
5924 +               goto out_error;
5925         }
5926         
5927 +       if (!phyinitialized) {          
5928         /* reset it */
5929         phy_init(dev);
5930 +       } else {
5931 +               /* see if gigabit phy */
5932 +               u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5933 +               if (mii_status & PHY_GIGABIT) {
5934 +                       np->gigabit = PHY_GIGABIT;
5935 +               }
5936 +       }
5937 +       if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5938 +               nv_mgmt_driver_loaded(dev, 1);
5939 +       }
5940  
5941         /* set default link speed settings */
5942         np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5943         np->duplex = 0;
5944 -       np->autoneg = 1;
5945 +       np->autoneg = autoneg;
5946  
5947         err = register_netdev(dev);
5948         if (err) {
5949                 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
5950 -               goto out_freering;
5951 +               goto out_error;
5952         }
5953         printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
5954                         dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
5955 @@ -2644,14 +5900,14 @@
5956  
5957         return 0;
5958  
5959 -out_freering:
5960 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5961 -               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
5962 -                                   np->rx_ring.orig, np->ring_addr);
5963 -       else
5964 -               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
5965 -                                   np->rx_ring.ex, np->ring_addr);
5966 +out_error:
5967 +       if (phystate_orig)
5968 +               writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5969 +       if (np->mac_in_use)
5970 +               nv_mgmt_driver_loaded(dev, 0);
5971         pci_set_drvdata(pci_dev, NULL);
5972 +out_freering:
5973 +       free_rings(dev);
5974  out_unmap:
5975         iounmap(get_hwbase(dev));
5976  out_relreg:
5977 @@ -2667,15 +5923,20 @@
5978  static void __devexit nv_remove(struct pci_dev *pci_dev)
5979  {
5980         struct net_device *dev = pci_get_drvdata(pci_dev);
5981 -       struct fe_priv *np = netdev_priv(dev);
5982 +       struct fe_priv *np = get_nvpriv(dev);
5983 +       u8 __iomem *base = get_hwbase(dev);
5984  
5985         unregister_netdev(dev);
5986 +       /* special op: write back the misordered MAC address - otherwise
5987 +        * the next nv_probe would see a wrong address.
5988 +        */
5989 +       writel(np->orig_mac[0], base + NvRegMacAddrA);
5990 +       writel(np->orig_mac[1], base + NvRegMacAddrB);
5991 +       if (np->mac_in_use)
5992 +               nv_mgmt_driver_loaded(dev, 0);
5993  
5994         /* free all structures */
5995 -       if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5996 -               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
5997 -       else
5998 -               pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
5999 +       free_rings(dev);
6000         iounmap(get_hwbase(dev));
6001         pci_release_regions(pci_dev);
6002         pci_disable_device(pci_dev);
6003 @@ -2730,19 +5991,51 @@
6004         },
6005         {       /* MCP51 Ethernet Controller */
6006                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
6007 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
6008 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
6009         },
6010         {       /* MCP51 Ethernet Controller */
6011                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
6012 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
6013 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
6014         },
6015         {       /* MCP55 Ethernet Controller */
6016                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
6017 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
6018 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6019         },
6020         {       /* MCP55 Ethernet Controller */
6021                 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
6022 -               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
6023 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6024 +       },
6025 +       {       /* MCP61 Ethernet Controller */
6026 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
6027 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6028 +       },
6029 +       {       /* MCP61 Ethernet Controller */
6030 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
6031 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6032 +       },
6033 +       {       /* MCP61 Ethernet Controller */
6034 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
6035 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6036 +       },
6037 +       {       /* MCP61 Ethernet Controller */
6038 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
6039 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6040 +       },
6041 +       {       /* MCP65 Ethernet Controller */
6042 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
6043 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6044 +       },
6045 +       {       /* MCP65 Ethernet Controller */
6046 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
6047 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6048 +       },
6049 +       {       /* MCP65 Ethernet Controller */
6050 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
6051 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6052 +       },
6053 +       {       /* MCP65 Ethernet Controller */
6054 +               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
6055 +               .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
6056         },
6057         {0,},
6058  };
6059 @@ -2758,6 +6051,7 @@
6060  static int __init init_nic(void)
6061  {
6062         printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
6063 +       dprintk(KERN_DEBUG "DEBUG VERSION\n");
6064         return pci_module_init(&driver);
6065  }
6066  
6067 @@ -2766,15 +6060,90 @@
6068         pci_unregister_driver(&driver);
6069  }
6070  
6071 +#if NVVER > SLES9
6072  module_param(max_interrupt_work, int, 0);
6073  MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6074  module_param(optimization_mode, int, 0);
6075  MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6076  module_param(poll_interval, int, 0);
6077  MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6078 -
6079 +module_param(msi, int, 0);
6080 +MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6081 +module_param(msix, int, 0);
6082 +MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6083 +
6084 +module_param(speed_duplex, int, 0);
6085 +MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0, 10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps full = 5.");
6086 +module_param(autoneg, int, 0);
6087 +MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and disabled by setting to 0.");
6088 +module_param(scatter_gather, int, 0);
6089 +MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1 and disabled by setting to 0.");
6090 +module_param(tso_offload, int, 0);
6091 +MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting to 1 and disabled by setting to 0.");
6092 +module_param(mtu, int, 0);
6093 +MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on hardware.");
6094 +module_param(tx_checksum_offload, int, 0);
6095 +MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
6096 +module_param(rx_checksum_offload, int, 0);
6097 +MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
6098 +module_param(tx_ring_size, int, 0);
6099 +MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384 depending on hardware.");
6100 +module_param(rx_ring_size, int, 0);
6101 +MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384 depending on hardware.");
6102 +module_param(tx_flow_control, int, 0);
6103 +MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1 and disabled by setting to 0.");
6104 +module_param(rx_flow_control, int, 0);
6105 +MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1 and disabled by setting to 0.");
6106 +module_param(dma_64bit, int, 0);
6107 +MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6108 +module_param(wol, int, 0);
6109 +MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by setting to 0.");
6110 +module_param(tagging_8021pq, int, 0);
6111 +MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
6112 +#else
6113 +MODULE_PARM(max_interrupt_work, "i");
6114 +MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6115 +MODULE_PARM(optimization_mode, "i");
6116 +MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6117 +MODULE_PARM(poll_interval, "i");
6118 +MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6119 +#ifdef CONFIG_PCI_MSI
6120 +MODULE_PARM(msi, "i");
6121 +MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6122 +MODULE_PARM(msix, "i");
6123 +MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6124 +#endif
6125 +MODULE_PARM(speed_duplex, "i");
6126 +MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0, 10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps full = 5.");
6127 +MODULE_PARM(autoneg, "i");
6128 +MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and disabled by setting to 0.");
6129 +MODULE_PARM(scatter_gather, "i");
6130 +MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1 and disabled by setting to 0.");
6131 +MODULE_PARM(tso_offload, "i");
6132 +MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting to 1 and disabled by setting to 0.");
6133 +MODULE_PARM(mtu, "i");
6134 +MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on hardware.");
6135 +MODULE_PARM(tx_checksum_offload, "i");
6136 +MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
6137 +MODULE_PARM(rx_checksum_offload, "i");
6138 +MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
6139 +MODULE_PARM(tx_ring_size, "i");
6140 +MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384 depending on hardware.");
6141 +MODULE_PARM(rx_ring_size, "i");
6142 +MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384 depending on hardware.");
6143 +MODULE_PARM(tx_flow_control, "i");
6144 +MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1 and disabled by setting to 0.");
6145 +MODULE_PARM(rx_flow_control, "i");
6146 +MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1 and disabled by setting to 0.");
6147 +MODULE_PARM(dma_64bit, "i");
6148 +MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6149 +MODULE_PARM(wol, "i");
6150 +MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by setting to 0.");
6151 +MODULE_PARM(tagging_8021pq, "i");
6152 +MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
6153 +#endif
6154  MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6155 -MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver (TIMERIRQ DISABLED)");
6156 +MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6157  MODULE_LICENSE("GPL");
6158  
6159  MODULE_DEVICE_TABLE(pci, pci_tbl);
6160 diff -uNr linux-2.6.16.orig/drivers/scsi/sata_nv.c linux-2.6.16/drivers/scsi/sata_nv.c
6161 --- linux-2.6.16.orig/drivers/scsi/sata_nv.c    2007-06-23 20:15:59.919947000 +0200
6162 +++ linux-2.6.16/drivers/scsi/sata_nv.c 2006-10-21 14:45:00.000000000 +0200
6163 @@ -1,630 +1,1284 @@
6164 -/*
6165 - *  sata_nv.c - NVIDIA nForce SATA
6166 - *
6167 - *  Copyright 2004 NVIDIA Corp.  All rights reserved.
6168 - *  Copyright 2004 Andrew Chew
6169 - *
6170 - *
6171 - *  This program is free software; you can redistribute it and/or modify
6172 - *  it under the terms of the GNU General Public License as published by
6173 - *  the Free Software Foundation; either version 2, or (at your option)
6174 - *  any later version.
6175 - *
6176 - *  This program is distributed in the hope that it will be useful,
6177 - *  but WITHOUT ANY WARRANTY; without even the implied warranty of
6178 - *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
6179 - *  GNU General Public License for more details.
6180 - *
6181 - *  You should have received a copy of the GNU General Public License
6182 - *  along with this program; see the file COPYING.  If not, write to
6183 - *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
6184 - *
6185 - *
6186 - *  libata documentation is available via 'make {ps|pdf}docs',
6187 - *  as Documentation/DocBook/libata.*
6188 - *
6189 - *  No hardware documentation available outside of NVIDIA.
6190 - *  This driver programs the NVIDIA SATA controller in a similar
6191 - *  fashion as with other PCI IDE BMDMA controllers, with a few
6192 - *  NV-specific details such as register offsets, SATA phy location,
6193 - *  hotplug info, etc.
6194 - *
6195 - *  0.10
6196 - *     - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB
6197 - *       drive.  Also made the check_hotplug() callbacks return whether there
6198 - *       was a hotplug interrupt or not.  This was not the source of the
6199 - *       spurious interrupts, but is the right thing to do anyway.
6200 - *
6201 - *  0.09
6202 - *     - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
6203 - *
6204 - *  0.08
6205 - *     - Added support for MCP51 and MCP55.
6206 - *
6207 - *  0.07
6208 - *     - Added support for RAID class code.
6209 - *
6210 - *  0.06
6211 - *     - Added generic SATA support by using a pci_device_id that filters on
6212 - *       the IDE storage class code.
6213 - *
6214 - *  0.03
6215 - *     - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using
6216 - *       mmio_base, which is only set for the CK804/MCP04 case.
6217 - *
6218 - *  0.02
6219 - *     - Added support for CK804 SATA controller.
6220 - *
6221 - *  0.01
6222 - *     - Initial revision.
6223 - */
6224 -
6225 -#include <linux/config.h>
6226 -#include <linux/kernel.h>
6227 -#include <linux/module.h>
6228 -#include <linux/pci.h>
6229 -#include <linux/init.h>
6230 -#include <linux/blkdev.h>
6231 -#include <linux/delay.h>
6232 -#include <linux/interrupt.h>
6233 -#include <linux/device.h>
6234 -#include <scsi/scsi_host.h>
6235 -#include <linux/libata.h>
6236 -
6237 -#define DRV_NAME                       "sata_nv"
6238 -#define DRV_VERSION                    "0.8"
6239 -
6240 -#define NV_PORTS                       2
6241 -#define NV_PIO_MASK                    0x1f
6242 -#define NV_MWDMA_MASK                  0x07
6243 -#define NV_UDMA_MASK                   0x7f
6244 -#define NV_PORT0_SCR_REG_OFFSET                0x00
6245 -#define NV_PORT1_SCR_REG_OFFSET                0x40
6246 -
6247 -#define NV_INT_STATUS                  0x10
6248 -#define NV_INT_STATUS_CK804            0x440
6249 -#define NV_INT_STATUS_PDEV_INT         0x01
6250 -#define NV_INT_STATUS_PDEV_PM          0x02
6251 -#define NV_INT_STATUS_PDEV_ADDED       0x04
6252 -#define NV_INT_STATUS_PDEV_REMOVED     0x08
6253 -#define NV_INT_STATUS_SDEV_INT         0x10
6254 -#define NV_INT_STATUS_SDEV_PM          0x20
6255 -#define NV_INT_STATUS_SDEV_ADDED       0x40
6256 -#define NV_INT_STATUS_SDEV_REMOVED     0x80
6257 -#define NV_INT_STATUS_PDEV_HOTPLUG     (NV_INT_STATUS_PDEV_ADDED | \
6258 -                                       NV_INT_STATUS_PDEV_REMOVED)
6259 -#define NV_INT_STATUS_SDEV_HOTPLUG     (NV_INT_STATUS_SDEV_ADDED | \
6260 -                                       NV_INT_STATUS_SDEV_REMOVED)
6261 -#define NV_INT_STATUS_HOTPLUG          (NV_INT_STATUS_PDEV_HOTPLUG | \
6262 -                                       NV_INT_STATUS_SDEV_HOTPLUG)
6263 -
6264 -#define NV_INT_ENABLE                  0x11
6265 -#define NV_INT_ENABLE_CK804            0x441
6266 -#define NV_INT_ENABLE_PDEV_MASK                0x01
6267 -#define NV_INT_ENABLE_PDEV_PM          0x02
6268 -#define NV_INT_ENABLE_PDEV_ADDED       0x04
6269 -#define NV_INT_ENABLE_PDEV_REMOVED     0x08
6270 -#define NV_INT_ENABLE_SDEV_MASK                0x10
6271 -#define NV_INT_ENABLE_SDEV_PM          0x20
6272 -#define NV_INT_ENABLE_SDEV_ADDED       0x40
6273 -#define NV_INT_ENABLE_SDEV_REMOVED     0x80
6274 -#define NV_INT_ENABLE_PDEV_HOTPLUG     (NV_INT_ENABLE_PDEV_ADDED | \
6275 -                                       NV_INT_ENABLE_PDEV_REMOVED)
6276 -#define NV_INT_ENABLE_SDEV_HOTPLUG     (NV_INT_ENABLE_SDEV_ADDED | \
6277 -                                       NV_INT_ENABLE_SDEV_REMOVED)
6278 -#define NV_INT_ENABLE_HOTPLUG          (NV_INT_ENABLE_PDEV_HOTPLUG | \
6279 -                                       NV_INT_ENABLE_SDEV_HOTPLUG)
6280 -
6281 -#define NV_INT_CONFIG                  0x12
6282 -#define NV_INT_CONFIG_METHD            0x01 // 0 = INT, 1 = SMI
6283 -
6284 -// For PCI config register 20
6285 -#define NV_MCP_SATA_CFG_20             0x50
6286 -#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN       0x04
6287 -
6288 -static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
6289 -static irqreturn_t nv_interrupt (int irq, void *dev_instance,
6290 -                                struct pt_regs *regs);
6291 -static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
6292 -static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
6293 -static void nv_host_stop (struct ata_host_set *host_set);
6294 -static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
6295 -static void nv_disable_hotplug(struct ata_host_set *host_set);
6296 -static int nv_check_hotplug(struct ata_host_set *host_set);
6297 -static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
6298 -static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
6299 -static int nv_check_hotplug_ck804(struct ata_host_set *host_set);
6300 -
6301 -enum nv_host_type
6302 -{
6303 -       GENERIC,
6304 -       NFORCE2,
6305 -       NFORCE3,
6306 -       CK804
6307 -};
6308 -
6309 -static const struct pci_device_id nv_pci_tbl[] = {
6310 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
6311 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
6312 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
6313 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
6314 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
6315 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
6316 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
6317 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
6318 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
6319 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
6320 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
6321 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
6322 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
6323 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
6324 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
6325 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6326 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
6327 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6328 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
6329 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6330 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
6331 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6332 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
6333 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6334 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
6335 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6336 -       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
6337 -               PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6338 -       { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6339 -       { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6340 -       { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6341 -       { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
6342 -       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
6343 -               PCI_ANY_ID, PCI_ANY_ID,
6344 -               PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
6345 -       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
6346 -               PCI_ANY_ID, PCI_ANY_ID,
6347 -               PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
6348 -       { 0, } /* terminate list */
6349 -};
6350 -
6351 -#define NV_HOST_FLAGS_SCR_MMIO 0x00000001
6352 -
6353 -struct nv_host_desc
6354 -{
6355 -       enum nv_host_type       host_type;
6356 -       void                    (*enable_hotplug)(struct ata_probe_ent *probe_ent);
6357 -       void                    (*disable_hotplug)(struct ata_host_set *host_set);
6358 -       int                     (*check_hotplug)(struct ata_host_set *host_set);
6359 -
6360 -};
6361 -static struct nv_host_desc nv_device_tbl[] = {
6362 -       {
6363 -               .host_type      = GENERIC,
6364 -               .enable_hotplug = NULL,
6365 -               .disable_hotplug= NULL,
6366 -               .check_hotplug  = NULL,
6367 -       },
6368 -       {
6369 -               .host_type      = NFORCE2,
6370 -               .enable_hotplug = nv_enable_hotplug,
6371 -               .disable_hotplug= nv_disable_hotplug,
6372 -               .check_hotplug  = nv_check_hotplug,
6373 -       },
6374 -       {
6375 -               .host_type      = NFORCE3,
6376 -               .enable_hotplug = nv_enable_hotplug,
6377 -               .disable_hotplug= nv_disable_hotplug,
6378 -               .check_hotplug  = nv_check_hotplug,
6379 -       },
6380 -       {       .host_type      = CK804,
6381 -               .enable_hotplug = nv_enable_hotplug_ck804,
6382 -               .disable_hotplug= nv_disable_hotplug_ck804,
6383 -               .check_hotplug  = nv_check_hotplug_ck804,
6384 -       },
6385 -};
6386 -
6387 -struct nv_host
6388 -{
6389 -       struct nv_host_desc     *host_desc;
6390 -       unsigned long           host_flags;
6391 -};
6392 -
6393 -static struct pci_driver nv_pci_driver = {
6394 -       .name                   = DRV_NAME,
6395 -       .id_table               = nv_pci_tbl,
6396 -       .probe                  = nv_init_one,
6397 -       .remove                 = ata_pci_remove_one,
6398 -};
6399 -
6400 -static struct scsi_host_template nv_sht = {
6401 -       .module                 = THIS_MODULE,
6402 -       .name                   = DRV_NAME,
6403 -       .ioctl                  = ata_scsi_ioctl,
6404 -       .queuecommand           = ata_scsi_queuecmd,
6405 -       .eh_strategy_handler    = ata_scsi_error,
6406 -       .can_queue              = ATA_DEF_QUEUE,
6407 -       .this_id                = ATA_SHT_THIS_ID,
6408 -       .sg_tablesize           = LIBATA_MAX_PRD,
6409 -       .max_sectors            = ATA_MAX_SECTORS,
6410 -       .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
6411 -       .emulated               = ATA_SHT_EMULATED,
6412 -       .use_clustering         = ATA_SHT_USE_CLUSTERING,
6413 -       .proc_name              = DRV_NAME,
6414 -       .dma_boundary           = ATA_DMA_BOUNDARY,
6415 -       .slave_configure        = ata_scsi_slave_config,
6416 -       .bios_param             = ata_std_bios_param,
6417 -};
6418 -
6419 -static const struct ata_port_operations nv_ops = {
6420 -       .port_disable           = ata_port_disable,
6421 -       .tf_load                = ata_tf_load,
6422 -       .tf_read                = ata_tf_read,
6423 -       .exec_command           = ata_exec_command,
6424 -       .check_status           = ata_check_status,
6425 -       .dev_select             = ata_std_dev_select,
6426 -       .phy_reset              = sata_phy_reset,
6427 -       .bmdma_setup            = ata_bmdma_setup,
6428 -       .bmdma_start            = ata_bmdma_start,
6429 -       .bmdma_stop             = ata_bmdma_stop,
6430 -       .bmdma_status           = ata_bmdma_status,
6431 -       .qc_prep                = ata_qc_prep,
6432 -       .qc_issue               = ata_qc_issue_prot,
6433 -       .eng_timeout            = ata_eng_timeout,
6434 -       .irq_handler            = nv_interrupt,
6435 -       .irq_clear              = ata_bmdma_irq_clear,
6436 -       .scr_read               = nv_scr_read,
6437 -       .scr_write              = nv_scr_write,
6438 -       .port_start             = ata_port_start,
6439 -       .port_stop              = ata_port_stop,
6440 -       .host_stop              = nv_host_stop,
6441 -};
6442 -
6443 -/* FIXME: The hardware provides the necessary SATA PHY controls
6444 - * to support ATA_FLAG_SATA_RESET.  However, it is currently
6445 - * necessary to disable that flag, to solve misdetection problems.
6446 - * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.
6447 - *
6448 - * This problem really needs to be investigated further.  But in the
6449 - * meantime, we avoid ATA_FLAG_SATA_RESET to get people working.
6450 - */
6451 -static struct ata_port_info nv_port_info = {
6452 -       .sht            = &nv_sht,
6453 -       .host_flags     = ATA_FLAG_SATA |
6454 -                         /* ATA_FLAG_SATA_RESET | */
6455 -                         ATA_FLAG_SRST |
6456 -                         ATA_FLAG_NO_LEGACY,
6457 -       .pio_mask       = NV_PIO_MASK,
6458 -       .mwdma_mask     = NV_MWDMA_MASK,
6459 -       .udma_mask      = NV_UDMA_MASK,
6460 -       .port_ops       = &nv_ops,
6461 -};
6462 -
6463 -MODULE_AUTHOR("NVIDIA");
6464 -MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
6465 -MODULE_LICENSE("GPL");
6466 -MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
6467 -MODULE_VERSION(DRV_VERSION);
6468 -
6469 -static irqreturn_t nv_interrupt (int irq, void *dev_instance,
6470 -                                struct pt_regs *regs)
6471 -{
6472 -       struct ata_host_set *host_set = dev_instance;
6473 -       struct nv_host *host = host_set->private_data;
6474 -       unsigned int i;
6475 -       unsigned int handled = 0;
6476 -       unsigned long flags;
6477 -
6478 -       spin_lock_irqsave(&host_set->lock, flags);
6479 -
6480 -       for (i = 0; i < host_set->n_ports; i++) {
6481 -               struct ata_port *ap;
6482 -
6483 -               ap = host_set->ports[i];
6484 -               if (ap &&
6485 -                   !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
6486 -                       struct ata_queued_cmd *qc;
6487 -
6488 -                       qc = ata_qc_from_tag(ap, ap->active_tag);
6489 -                       if (qc && (!(qc->tf.ctl & ATA_NIEN)))
6490 -                               handled += ata_host_intr(ap, qc);
6491 -                       else
6492 -                               // No request pending?  Clear interrupt status
6493 -                               // anyway, in case there's one pending.
6494 -                               ap->ops->check_status(ap);
6495 -               }
6496 -
6497 -       }
6498 -
6499 -       if (host->host_desc->check_hotplug)
6500 -               handled += host->host_desc->check_hotplug(host_set);
6501 -
6502 -       spin_unlock_irqrestore(&host_set->lock, flags);
6503 -
6504 -       return IRQ_RETVAL(handled);
6505 -}
6506 -
6507 -static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
6508 -{
6509 -       struct ata_host_set *host_set = ap->host_set;
6510 -       struct nv_host *host = host_set->private_data;
6511 -
6512 -       if (sc_reg > SCR_CONTROL)
6513 -               return 0xffffffffU;
6514 -
6515 -       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
6516 -               return readl((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
6517 -       else
6518 -               return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
6519 -}
6520 -
6521 -static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
6522 -{
6523 -       struct ata_host_set *host_set = ap->host_set;
6524 -       struct nv_host *host = host_set->private_data;
6525 -
6526 -       if (sc_reg > SCR_CONTROL)
6527 -               return;
6528 -
6529 -       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
6530 -               writel(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
6531 -       else
6532 -               outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
6533 -}
6534 -
6535 -static void nv_host_stop (struct ata_host_set *host_set)
6536 -{
6537 -       struct nv_host *host = host_set->private_data;
6538 -       struct pci_dev *pdev = to_pci_dev(host_set->dev);
6539 -
6540 -       // Disable hotplug event interrupts.
6541 -       if (host->host_desc->disable_hotplug)
6542 -               host->host_desc->disable_hotplug(host_set);
6543 -
6544 -       kfree(host);
6545 -
6546 -       if (host_set->mmio_base)
6547 -               pci_iounmap(pdev, host_set->mmio_base);
6548 -}
6549 -
6550 -static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
6551 -{
6552 -       static int printed_version = 0;
6553 -       struct nv_host *host;
6554 -       struct ata_port_info *ppi;
6555 -       struct ata_probe_ent *probe_ent;
6556 -       int pci_dev_busy = 0;
6557 -       int rc;
6558 -       u32 bar;
6559 -
6560 -        // Make sure this is a SATA controller by counting the number of bars
6561 -        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
6562 -        // it's an IDE controller and we ignore it.
6563 -       for (bar=0; bar<6; bar++)
6564 -               if (pci_resource_start(pdev, bar) == 0)
6565 -                       return -ENODEV;
6566 -
6567 -       if (!printed_version++)
6568 -               dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
6569 -
6570 -       rc = pci_enable_device(pdev);
6571 -       if (rc)
6572 -               goto err_out;
6573 -
6574 -       rc = pci_request_regions(pdev, DRV_NAME);
6575 -       if (rc) {
6576 -               pci_dev_busy = 1;
6577 -               goto err_out_disable;
6578 -       }
6579 -
6580 -       rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
6581 -       if (rc)
6582 -               goto err_out_regions;
6583 -       rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
6584 -       if (rc)
6585 -               goto err_out_regions;
6586 -
6587 -       rc = -ENOMEM;
6588 -
6589 -       ppi = &nv_port_info;
6590 -       probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
6591 -       if (!probe_ent)
6592 -               goto err_out_regions;
6593 -
6594 -       host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
6595 -       if (!host)
6596 -               goto err_out_free_ent;
6597 -
6598 -       memset(host, 0, sizeof(struct nv_host));
6599 -       host->host_desc = &nv_device_tbl[ent->driver_data];
6600 -
6601 -       probe_ent->private_data = host;
6602 -
6603 -       if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)
6604 -               host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;
6605 -
6606 -       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
6607 -               unsigned long base;
6608 -
6609 -               probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
6610 -               if (probe_ent->mmio_base == NULL) {
6611 -                       rc = -EIO;
6612 -                       goto err_out_free_host;
6613 -               }
6614 -
6615 -               base = (unsigned long)probe_ent->mmio_base;
6616 -
6617 -               probe_ent->port[0].scr_addr =
6618 -                       base + NV_PORT0_SCR_REG_OFFSET;
6619 -               probe_ent->port[1].scr_addr =
6620 -                       base + NV_PORT1_SCR_REG_OFFSET;
6621 -       } else {
6622 -
6623 -               probe_ent->port[0].scr_addr =
6624 -                       pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
6625 -               probe_ent->port[1].scr_addr =
6626 -                       pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
6627 -       }
6628 -
6629 -       pci_set_master(pdev);
6630 -
6631 -       rc = ata_device_add(probe_ent);
6632 -       if (rc != NV_PORTS)
6633 -               goto err_out_iounmap;
6634 -
6635 -       // Enable hotplug event interrupts.
6636 -       if (host->host_desc->enable_hotplug)
6637 -               host->host_desc->enable_hotplug(probe_ent);
6638 -
6639 -       kfree(probe_ent);
6640 -
6641 -       return 0;
6642 -
6643 -err_out_iounmap:
6644 -       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
6645 -               pci_iounmap(pdev, probe_ent->mmio_base);
6646 -err_out_free_host:
6647 -       kfree(host);
6648 -err_out_free_ent:
6649 -       kfree(probe_ent);
6650 -err_out_regions:
6651 -       pci_release_regions(pdev);
6652 -err_out_disable:
6653 -       if (!pci_dev_busy)
6654 -               pci_disable_device(pdev);
6655 -err_out:
6656 -       return rc;
6657 -}
6658 -
6659 -static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)
6660 -{
6661 -       u8 intr_mask;
6662 -
6663 -       outb(NV_INT_STATUS_HOTPLUG,
6664 -               probe_ent->port[0].scr_addr + NV_INT_STATUS);
6665 -
6666 -       intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);
6667 -       intr_mask |= NV_INT_ENABLE_HOTPLUG;
6668 -
6669 -       outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);
6670 -}
6671 -
6672 -static void nv_disable_hotplug(struct ata_host_set *host_set)
6673 -{
6674 -       u8 intr_mask;
6675 -
6676 -       intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
6677 -
6678 -       intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
6679 -
6680 -       outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
6681 -}
6682 -
6683 -static int nv_check_hotplug(struct ata_host_set *host_set)
6684 -{
6685 -       u8 intr_status;
6686 -
6687 -       intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
6688 -
6689 -       // Clear interrupt status.
6690 -       outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
6691 -
6692 -       if (intr_status & NV_INT_STATUS_HOTPLUG) {
6693 -               if (intr_status & NV_INT_STATUS_PDEV_ADDED)
6694 -                       printk(KERN_WARNING "nv_sata: "
6695 -                               "Primary device added\n");
6696 -
6697 -               if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
6698 -                       printk(KERN_WARNING "nv_sata: "
6699 -                               "Primary device removed\n");
6700 -
6701 -               if (intr_status & NV_INT_STATUS_SDEV_ADDED)
6702 -                       printk(KERN_WARNING "nv_sata: "
6703 -                               "Secondary device added\n");
6704 -
6705 -               if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
6706 -                       printk(KERN_WARNING "nv_sata: "
6707 -                               "Secondary device removed\n");
6708 -
6709 -               return 1;
6710 -       }
6711 -
6712 -       return 0;
6713 -}
6714 -
6715 -static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
6716 -{
6717 -       struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
6718 -       u8 intr_mask;
6719 -       u8 regval;
6720 -
6721 -       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
6722 -       regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
6723 -       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
6724 -
6725 -       writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
6726 -
6727 -       intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
6728 -       intr_mask |= NV_INT_ENABLE_HOTPLUG;
6729 -
6730 -       writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
6731 -}
6732 -
6733 -static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
6734 -{
6735 -       struct pci_dev *pdev = to_pci_dev(host_set->dev);
6736 -       u8 intr_mask;
6737 -       u8 regval;
6738 -
6739 -       intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);
6740 -
6741 -       intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
6742 -
6743 -       writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
6744 -
6745 -       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
6746 -       regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
6747 -       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
6748 -}
6749 -
6750 -static int nv_check_hotplug_ck804(struct ata_host_set *host_set)
6751 -{
6752 -       u8 intr_status;
6753 -
6754 -       intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
6755 -
6756 -       // Clear interrupt status.
6757 -       writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
6758 -
6759 -       if (intr_status & NV_INT_STATUS_HOTPLUG) {
6760 -               if (intr_status & NV_INT_STATUS_PDEV_ADDED)
6761 -                       printk(KERN_WARNING "nv_sata: "
6762 -                               "Primary device added\n");
6763 -
6764 -               if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
6765 -                       printk(KERN_WARNING "nv_sata: "
6766 -                               "Primary device removed\n");
6767 -
6768 -               if (intr_status & NV_INT_STATUS_SDEV_ADDED)
6769 -                       printk(KERN_WARNING "nv_sata: "
6770 -                               "Secondary device added\n");
6771 -
6772 -               if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
6773 -                       printk(KERN_WARNING "nv_sata: "
6774 -                               "Secondary device removed\n");
6775 -
6776 -               return 1;
6777 -       }
6778 -
6779 -       return 0;
6780 -}
6781 -
6782 -static int __init nv_init(void)
6783 -{
6784 -       return pci_module_init(&nv_pci_driver);
6785 -}
6786 -
6787 -static void __exit nv_exit(void)
6788 -{
6789 -       pci_unregister_driver(&nv_pci_driver);
6790 -}
6791 -
6792 -module_init(nv_init);
6793 -module_exit(nv_exit);
6794 +/*\r
6795 + *  sata_nv.c - NVIDIA nForce SATA\r
6796 + *\r
6797 + *  Copyright 2004 NVIDIA Corp.  All rights reserved.\r
6798 + *  Copyright 2004 Andrew Chew\r
6799 + *\r
6800 + *  The contents of this file are subject to the Open\r
6801 + *  Software License version 1.1 that can be found at\r
6802 + *  http://www.opensource.org/licenses/osl-1.1.txt and is included herein\r
6803 + *  by reference.\r
6804 + *\r
6805 + *  Alternatively, the contents of this file may be used under the terms\r
6806 + *  of the GNU General Public License version 2 (the "GPL") as distributed\r
6807 + *  in the kernel source COPYING file, in which case the provisions of\r
6808 + *  the GPL are applicable instead of the above.  If you wish to allow\r
6809 + *  the use of your version of this file only under the terms of the\r
6810 + *  GPL and not to allow others to use your version of this file under\r
6811 + *  the OSL, indicate your decision by deleting the provisions above and\r
6812 + *  replace them with the notice and other provisions required by the GPL.\r
6813 + *  If you do not delete the provisions above, a recipient may use your\r
6814 + *  version of this file under either the OSL or the GPL.\r
6815 + *\r
6816 + *  0.11\r
6817 + *     - Added sgpio support\r
6818 + *\r
6819 + *  0.10\r
6820 + *     - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB\r
6821 + *       drive.  Also made the check_hotplug() callbacks return whether there\r
6822 + *       was a hotplug interrupt or not.  This was not the source of the\r
6823 + *       spurious interrupts, but is the right thing to do anyway.\r
6824 + *\r
6825 + *  0.09\r
6826 + *     - Fixed bug introduced by 0.08's MCP51 and MCP55 support.\r
6827 + *\r
6828 + *  0.08\r
6829 + *     - Added support for MCP51 and MCP55.\r
6830 + *\r
6831 + *  0.07\r
6832 + *     - Added support for RAID class code.\r
6833 + *\r
6834 + *  0.06\r
6835 + *     - Added generic SATA support by using a pci_device_id that filters on\r
6836 + *       the IDE storage class code.\r
6837 + *\r
6838 + *  0.03\r
6839 + *     - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using\r
6840 + *       mmio_base, which is only set for the CK804/MCP04 case.\r
6841 + *\r
6842 + *  0.02\r
6843 + *     - Added support for CK804 SATA controller.\r
6844 + *\r
6845 + *  0.01\r
6846 + *     - Initial revision.\r
6847 + */\r
6848 +\r
6849 +#include <linux/config.h>\r
6850 +#include <linux/version.h>\r
6851 +#include <linux/kernel.h>\r
6852 +#include <linux/module.h>\r
6853 +#include <linux/pci.h>\r
6854 +#include <linux/init.h>\r
6855 +#include <linux/blkdev.h>\r
6856 +#include <linux/delay.h>\r
6857 +#include <linux/interrupt.h>\r
6858 +#include "scsi.h"\r
6859 +#include <scsi/scsi_host.h>\r
6860 +#include <linux/libata.h>\r
6861 +\r
6862 +#define DRV_NAME                       "sata_nv"\r
6863 +#define DRV_VERSION                    "0.11-Driver Package V1.21"\r
6864 +\r
6865 +#define NV_PORTS                       2\r
6866 +#define NV_PIO_MASK                    0x1f\r
6867 +#define NV_MWDMA_MASK                  0x07\r
6868 +#define NV_UDMA_MASK                   0x7f\r
6869 +#define NV_PORT0_SCR_REG_OFFSET                0x00\r
6870 +#define NV_PORT1_SCR_REG_OFFSET                0x40\r
6871 +\r
6872 +#define NV_INT_STATUS                  0x10\r
6873 +#define NV_INT_STATUS_CK804            0x440\r
6874 +#define NV_INT_STATUS_MCP55            0x440\r
6875 +#define NV_INT_STATUS_PDEV_INT         0x01\r
6876 +#define NV_INT_STATUS_PDEV_PM          0x02\r
6877 +#define NV_INT_STATUS_PDEV_ADDED       0x04\r
6878 +#define NV_INT_STATUS_PDEV_REMOVED     0x08\r
6879 +#define NV_INT_STATUS_SDEV_INT         0x10\r
6880 +#define NV_INT_STATUS_SDEV_PM          0x20\r
6881 +#define NV_INT_STATUS_SDEV_ADDED       0x40\r
6882 +#define NV_INT_STATUS_SDEV_REMOVED     0x80\r
6883 +#define NV_INT_STATUS_PDEV_HOTPLUG     (NV_INT_STATUS_PDEV_ADDED | \\r
6884 +                                       NV_INT_STATUS_PDEV_REMOVED)\r
6885 +#define NV_INT_STATUS_SDEV_HOTPLUG     (NV_INT_STATUS_SDEV_ADDED | \\r
6886 +                                       NV_INT_STATUS_SDEV_REMOVED)\r
6887 +#define NV_INT_STATUS_HOTPLUG          (NV_INT_STATUS_PDEV_HOTPLUG | \\r
6888 +                                       NV_INT_STATUS_SDEV_HOTPLUG)\r
6889 +\r
6890 +#define NV_INT_ENABLE                  0x11\r
6891 +#define NV_INT_ENABLE_CK804            0x441\r
6892 +#define NV_INT_ENABLE_MCP55            0x444\r
6893 +#define NV_INT_ENABLE_PDEV_MASK                0x01\r
6894 +#define NV_INT_ENABLE_PDEV_PM          0x02\r
6895 +#define NV_INT_ENABLE_PDEV_ADDED       0x04\r
6896 +#define NV_INT_ENABLE_PDEV_REMOVED     0x08\r
6897 +#define NV_INT_ENABLE_SDEV_MASK                0x10\r
6898 +#define NV_INT_ENABLE_SDEV_PM          0x20\r
6899 +#define NV_INT_ENABLE_SDEV_ADDED       0x40\r
6900 +#define NV_INT_ENABLE_SDEV_REMOVED     0x80\r
6901 +#define NV_INT_ENABLE_PDEV_HOTPLUG     (NV_INT_ENABLE_PDEV_ADDED | \\r
6902 +                                       NV_INT_ENABLE_PDEV_REMOVED)\r
6903 +#define NV_INT_ENABLE_SDEV_HOTPLUG     (NV_INT_ENABLE_SDEV_ADDED | \\r
6904 +                                       NV_INT_ENABLE_SDEV_REMOVED)\r
6905 +#define NV_INT_ENABLE_HOTPLUG          (NV_INT_ENABLE_PDEV_HOTPLUG | \\r
6906 +                                       NV_INT_ENABLE_SDEV_HOTPLUG)\r
6907 +\r
6908 +#define NV_INT_CONFIG                  0x12\r
6909 +#define NV_INT_CONFIG_METHD            0x01 // 0 = INT, 1 = SMI\r
6910 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E\r
6911 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2        0x037F\r
6912 +\r
6913 +// For PCI config register 20\r
6914 +#define NV_MCP_SATA_CFG_20             0x50\r
6915 +#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN       0x04\r
6916 +\r
6917 +\r
6918 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)\r
6919 +#define RHAS3U7\r
6920 +#endif\r
6921 +#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,16)\r
6922 +#define SLES10\r
6923 +#endif\r
6924 +\r
6925 +//sgpio\r
6926 +// Sgpio defines\r
6927 +// SGPIO state defines\r
6928 +#define NV_SGPIO_STATE_RESET           0\r
6929 +#define NV_SGPIO_STATE_OPERATIONAL     1\r
6930 +#define NV_SGPIO_STATE_ERROR           2\r
6931 +\r
6932 +// SGPIO command opcodes\r
6933 +#define NV_SGPIO_CMD_RESET             0\r
6934 +#define NV_SGPIO_CMD_READ_PARAMS       1\r
6935 +#define NV_SGPIO_CMD_READ_DATA         2\r
6936 +#define NV_SGPIO_CMD_WRITE_DATA                3\r
6937 +\r
6938 +// SGPIO command status defines\r
6939 +#define NV_SGPIO_CMD_OK                        0\r
6940 +#define NV_SGPIO_CMD_ACTIVE            1\r
6941 +#define NV_SGPIO_CMD_ERR               2\r
6942 +\r
6943 +#define NV_SGPIO_UPDATE_TICK           90\r
6944 +#define NV_SGPIO_MIN_UPDATE_DELTA      33\r
6945 +#define NV_CNTRLR_SHARE_INIT           2\r
6946 +#define NV_SGPIO_MAX_ACTIVITY_ON       20\r
6947 +#define NV_SGPIO_MIN_FORCE_OFF         5\r
6948 +#define NV_SGPIO_PCI_CSR_OFFSET                0x58\r
6949 +#define NV_SGPIO_PCI_CB_OFFSET         0x5C\r
6950 +#define NV_SGPIO_DFLT_CB_SIZE          256\r
6951 +#define NV_ON 1\r
6952 +#define NV_OFF 0\r
6953 +#ifndef bool\r
6954 +#define bool u8\r
6955 +#endif\r
6956 +\r
6957 +static inline unsigned int jiffies_to_msecs1(const unsigned long j)\r
6958 +{\r
6959 +#if HZ <= 1000 && !(1000 % HZ)\r
6960 +        return (1000 / HZ) * j;\r
6961 +#elif HZ > 1000 && !(HZ % 1000)\r
6962 +        return (j + (HZ / 1000) - 1)/(HZ / 1000);\r
6963 +#else\r
6964 +        return (j * 1000) / HZ;\r
6965 +#endif\r
6966 +}\r
6967 +\r
6968 +#define BF_EXTRACT(v, off, bc) \\r
6969 +       ((((u8)(v)) >> (off)) & ((1 << (bc)) - 1))\r
6970 +\r
6971 +#define BF_INS(v, ins, off, bc)                                \\r
6972 +       (((v) & ~((((1 << (bc)) - 1)) << (off))) |      \\r
6973 +       (((u8)(ins)) << (off)))\r
6974 +\r
6975 +#define BF_EXTRACT_U32(v, off, bc)     \\r
6976 +       ((((u32)(v)) >> (off)) & ((1 << (bc)) - 1))\r
6977 +\r
6978 +#define BF_INS_U32(v, ins, off, bc)                    \\r
6979 +       (((v) & ~((((1 << (bc)) - 1)) << (off))) |      \\r
6980 +       (((u32)(ins)) << (off)))\r
6981 +\r
6982 +#define GET_SGPIO_STATUS(v)    BF_EXTRACT(v, 0, 2)\r
6983 +#define GET_CMD_STATUS(v)      BF_EXTRACT(v, 3, 2)\r
6984 +#define GET_CMD(v)             BF_EXTRACT(v, 5, 3)\r
6985 +#define SET_CMD(v, cmd)                BF_INS(v, cmd, 5, 3) \r
6986 +\r
6987 +#define GET_ENABLE(v)          BF_EXTRACT_U32(v, 23, 1)\r
6988 +#define SET_ENABLE(v)          BF_INS_U32(v, 1, 23, 1)\r
6989 +\r
6990 +// Needs to have a u8 bit-field insert.\r
6991 +#define GET_ACTIVITY(v)                BF_EXTRACT(v, 5, 3)\r
6992 +#define SET_ACTIVITY(v, on_off)        BF_INS(v, on_off, 5, 3)\r
6993 +\r
6994 +union nv_sgpio_nvcr \r
6995 +{\r
6996 +       struct {\r
6997 +               u8      init_cnt;\r
6998 +               u8      cb_size;\r
6999 +               u8      cbver;\r
7000 +               u8      rsvd;\r
7001 +       } bit;\r
7002 +       u32     all;\r
7003 +};\r
7004 +\r
7005 +union nv_sgpio_tx \r
7006 +{\r
7007 +       u8      tx_port[4];\r
7008 +       u32     all;\r
7009 +};\r
7010 +\r
7011 +struct nv_sgpio_cb \r
7012 +{\r
7013 +       u64                     scratch_space;\r
7014 +       union nv_sgpio_nvcr     nvcr;\r
7015 +       u32     cr0;\r
7016 +       u32                     rsvd[4];\r
7017 +       union nv_sgpio_tx       tx[2];\r
7018 +};\r
7019 +\r
7020 +struct nv_sgpio_host_share\r
7021 +{\r
7022 +       spinlock_t      *plock;\r
7023 +       unsigned long   *ptstamp;\r
7024 +};\r
7025 +\r
7026 +struct nv_sgpio_host_flags\r
7027 +{\r
7028 +       u8      sgpio_enabled:1;\r
7029 +       u8      need_update:1;\r
7030 +       u8      rsvd:6;\r
7031 +};\r
7032 +       \r
7033 +struct nv_host_sgpio\r
7034 +{\r
7035 +       struct nv_sgpio_host_flags      flags;\r
7036 +       u8                              *pcsr;\r
7037 +       struct nv_sgpio_cb              *pcb;   \r
7038 +       struct nv_sgpio_host_share      share;\r
7039 +       struct timer_list               sgpio_timer;\r
7040 +};\r
7041 +\r
7042 +struct nv_sgpio_port_flags\r
7043 +{\r
7044 +       u8      last_state:1;\r
7045 +       u8      recent_activity:1;\r
7046 +       u8      rsvd:6;\r
7047 +};\r
7048 +\r
7049 +struct nv_sgpio_led \r
7050 +{\r
7051 +       struct nv_sgpio_port_flags      flags;\r
7052 +       u8                              force_off;\r
7053 +       u8                              last_cons_active;\r
7054 +};\r
7055 +\r
7056 +struct nv_port_sgpio\r
7057 +{\r
7058 +       struct nv_sgpio_led     activity;\r
7059 +};\r
7060 +\r
7061 +static spinlock_t      nv_sgpio_lock;\r
7062 +static unsigned long   nv_sgpio_tstamp;\r
7063 +\r
7064 +static inline void nv_sgpio_set_csr(u8 csr, unsigned long pcsr)\r
7065 +{\r
7066 +       outb(csr, pcsr);\r
7067 +}\r
7068 +\r
7069 +static inline u8 nv_sgpio_get_csr(unsigned long pcsr)\r
7070 +{\r
7071 +       return inb(pcsr);\r
7072 +}\r
7073 +\r
7074 +static inline u8 nv_sgpio_get_func(struct ata_host_set *host_set)\r
7075 +{\r
7076 +       u8 devfn = (to_pci_dev(host_set->dev))->devfn;\r
7077 +       return (PCI_FUNC(devfn));\r
7078 +}\r
7079 +\r
7080 +static inline u8 nv_sgpio_tx_host_offset(struct ata_host_set *host_set)\r
7081 +{\r
7082 +       return (nv_sgpio_get_func(host_set)/NV_CNTRLR_SHARE_INIT);\r
7083 +}\r
7084 +\r
7085 +static inline u8 nv_sgpio_calc_tx_offset(u8 cntrlr, u8 channel)\r
7086 +{\r
7087 +       return (sizeof(union nv_sgpio_tx) - (NV_CNTRLR_SHARE_INIT *\r
7088 +               (cntrlr % NV_CNTRLR_SHARE_INIT)) - channel - 1);\r
7089 +}\r
7090 +\r
7091 +static inline u8 nv_sgpio_tx_port_offset(struct ata_port *ap)\r
7092 +{\r
7093 +       u8 cntrlr = nv_sgpio_get_func(ap->host_set);\r
7094 +       return (nv_sgpio_calc_tx_offset(cntrlr, ap->port_no));\r
7095 +}\r
7096 +\r
7097 +static inline bool nv_sgpio_capable(const struct pci_device_id *ent)\r
7098 +{\r
7099 +       if (ent->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2)\r
7100 +               return 1;\r
7101 +       else\r
7102 +               return 0;\r
7103 +}\r
7104 +\r
7105 +\r
7106 +\r
7107 +\r
7108 +\r
7109 +\r
7110 +static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);\r
7111 +static irqreturn_t nv_interrupt (int irq, void *dev_instance,\r
7112 +                                struct pt_regs *regs);\r
7113 +static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);\r
7114 +static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);\r
7115 +static void nv_host_stop (struct ata_host_set *host_set);\r
7116 +static int nv_port_start(struct ata_port *ap);\r
7117 +static void nv_port_stop(struct ata_port *ap);\r
7118 +static int nv_qc_issue(struct ata_queued_cmd *qc);\r
7119 +static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);\r
7120 +static void nv_disable_hotplug(struct ata_host_set *host_set);\r
7121 +static void nv_check_hotplug(struct ata_host_set *host_set);\r
7122 +static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);\r
7123 +static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);\r
7124 +static void nv_check_hotplug_ck804(struct ata_host_set *host_set);\r
7125 +static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent);\r
7126 +static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set);\r
7127 +static void nv_check_hotplug_mcp55(struct ata_host_set *host_set);\r
7128 +enum nv_host_type\r
7129 +{\r
7130 +       GENERIC,\r
7131 +       NFORCE2,\r
7132 +       NFORCE3,\r
7133 +       CK804,\r
7134 +       MCP55\r
7135 +};\r
7136 +\r
7137 +static struct pci_device_id nv_pci_tbl[] = {\r
7138 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,\r
7139 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },\r
7140 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,\r
7141 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },\r
7142 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,\r
7143 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },\r
7144 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,\r
7145 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
7146 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,\r
7147 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
7148 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,\r
7149 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
7150 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,\r
7151 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },\r
7152 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,\r
7153 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },\r
7154 +       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,\r
7155 +               PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },\r
7156 +       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,\r
7157 +               PCI_ANY_ID, PCI_ANY_ID,\r
7158 +               PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },\r
7159 +       { 0, } /* terminate list */\r
7160 +};\r
7161 +\r
7162 +#define NV_HOST_FLAGS_SCR_MMIO 0x00000001\r
7163 +\r
7164 +struct nv_host_desc\r
7165 +{\r
7166 +       enum nv_host_type       host_type;\r
7167 +       void                    (*enable_hotplug)(struct ata_probe_ent *probe_ent);\r
7168 +       void                    (*disable_hotplug)(struct ata_host_set *host_set);\r
7169 +       void                    (*check_hotplug)(struct ata_host_set *host_set);\r
7170 +\r
7171 +};\r
7172 +static struct nv_host_desc nv_device_tbl[] = {\r
7173 +       {\r
7174 +               .host_type      = GENERIC,\r
7175 +               .enable_hotplug = NULL,\r
7176 +               .disable_hotplug= NULL,\r
7177 +               .check_hotplug  = NULL,\r
7178 +       },\r
7179 +       {\r
7180 +               .host_type      = NFORCE2,\r
7181 +               .enable_hotplug = nv_enable_hotplug,\r
7182 +               .disable_hotplug= nv_disable_hotplug,\r
7183 +               .check_hotplug  = nv_check_hotplug,\r
7184 +       },\r
7185 +       {\r
7186 +               .host_type      = NFORCE3,\r
7187 +               .enable_hotplug = nv_enable_hotplug,\r
7188 +               .disable_hotplug= nv_disable_hotplug,\r
7189 +               .check_hotplug  = nv_check_hotplug,\r
7190 +       },\r
7191 +       {       .host_type      = CK804,\r
7192 +               .enable_hotplug = nv_enable_hotplug_ck804,\r
7193 +               .disable_hotplug= nv_disable_hotplug_ck804,\r
7194 +               .check_hotplug  = nv_check_hotplug_ck804,\r
7195 +       },\r
7196 +       {       .host_type      = MCP55,\r
7197 +               .enable_hotplug = nv_enable_hotplug_mcp55,\r
7198 +               .disable_hotplug= nv_disable_hotplug_mcp55,\r
7199 +               .check_hotplug  = nv_check_hotplug_mcp55,\r
7200 +       },\r
7201 +};\r
7202 +\r
7203 +\r
7204 +struct nv_host\r
7205 +{\r
7206 +       struct nv_host_desc     *host_desc;\r
7207 +       unsigned long           host_flags;\r
7208 +       struct nv_host_sgpio    host_sgpio;\r
7209 +       struct pci_dev          *pdev;\r
7210 +};\r
7211 +\r
7212 +struct nv_port\r
7213 +{\r
7214 +       struct nv_port_sgpio    port_sgpio;\r
7215 +};\r
7216 +\r
7217 +// SGPIO function prototypes\r
7218 +static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost);\r
7219 +static void nv_sgpio_reset(u8 *pcsr);\r
7220 +static void nv_sgpio_set_timer(struct timer_list *ptimer, \r
7221 +                               unsigned int timeout_msec);\r
7222 +static void nv_sgpio_timer_handler(unsigned long ptr);\r
7223 +static void nv_sgpio_host_cleanup(struct nv_host *host);\r
7224 +static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off);\r
7225 +static void nv_sgpio_clear_all_leds(struct ata_port *ap);\r
7226 +static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd);\r
7227 +\r
7228 +\r
7229 +static struct pci_driver nv_pci_driver = {\r
7230 +       .name                   = DRV_NAME,\r
7231 +       .id_table               = nv_pci_tbl,\r
7232 +       .probe                  = nv_init_one,\r
7233 +       .remove                 = ata_pci_remove_one,\r
7234 +};\r
7235 +\r
7236 +\r
7237 +#ifdef SLES10\r
7238 +static  struct scsi_host_template nv_sht = {\r
7239 +#else\r
7240 +static  Scsi_Host_Template nv_sht = {\r
7241 +#endif\r
7242 +       .module                 = THIS_MODULE,\r
7243 +       .name                   = DRV_NAME,\r
7244 +#ifdef RHAS3U7\r
7245 +       .detect                 = ata_scsi_detect,\r
7246 +       .release                = ata_scsi_release,\r
7247 +#endif\r
7248 +       .ioctl                  = ata_scsi_ioctl,\r
7249 +       .queuecommand           = ata_scsi_queuecmd,\r
7250 +       .eh_strategy_handler    = ata_scsi_error,\r
7251 +       .can_queue              = ATA_DEF_QUEUE,\r
7252 +       .this_id                = ATA_SHT_THIS_ID,\r
7253 +       .sg_tablesize           = LIBATA_MAX_PRD,\r
7254 +       .max_sectors            = ATA_MAX_SECTORS,\r
7255 +       .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,\r
7256 +#ifdef RHAS3U7\r
7257 +       .use_new_eh_code        = ATA_SHT_NEW_EH_CODE,\r
7258 +#endif\r
7259 +       .emulated               = ATA_SHT_EMULATED,\r
7260 +       .use_clustering         = ATA_SHT_USE_CLUSTERING,\r
7261 +       .proc_name              = DRV_NAME,\r
7262 +#ifndef RHAS3U7\r
7263 +       .dma_boundary           = ATA_DMA_BOUNDARY,\r
7264 +       .slave_configure        = ata_scsi_slave_config,\r
7265 +#endif\r
7266 +       .bios_param             = ata_std_bios_param,\r
7267 +};\r
7268 +\r
7269 +static struct ata_port_operations nv_ops = {\r
7270 +       .port_disable           = ata_port_disable,\r
7271 +       .tf_load                = ata_tf_load,\r
7272 +       .tf_read                = ata_tf_read,\r
7273 +       .exec_command           = ata_exec_command,\r
7274 +       .check_status           = ata_check_status,\r
7275 +       .dev_select             = ata_std_dev_select,\r
7276 +       .phy_reset              = sata_phy_reset,\r
7277 +       .bmdma_setup            = ata_bmdma_setup,\r
7278 +       .bmdma_start            = ata_bmdma_start,\r
7279 +       .bmdma_stop             = ata_bmdma_stop,\r
7280 +       .bmdma_status           = ata_bmdma_status,\r
7281 +       .qc_prep                = ata_qc_prep,\r
7282 +       .qc_issue               = nv_qc_issue,\r
7283 +       .eng_timeout            = ata_eng_timeout,\r
7284 +       .irq_handler            = nv_interrupt,\r
7285 +       .irq_clear              = ata_bmdma_irq_clear,\r
7286 +       .scr_read               = nv_scr_read,\r
7287 +       .scr_write              = nv_scr_write,\r
7288 +       .port_start             = nv_port_start,\r
7289 +       .port_stop              = nv_port_stop,\r
7290 +       .host_stop              = nv_host_stop,\r
7291 +};\r
7292 +\r
7293 +/* FIXME: The hardware provides the necessary SATA PHY controls\r
7294 + * to support ATA_FLAG_SATA_RESET.  However, it is currently\r
7295 + * necessary to disable that flag, to solve misdetection problems.\r
7296 + * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.\r
7297 + *\r
7298 + * This problem really needs to be investigated further.  But in the\r
7299 + * meantime, we avoid ATA_FLAG_SATA_RESET to get people working.\r
7300 + */\r
7301 +static struct ata_port_info nv_port_info = {\r
7302 +       .sht            = &nv_sht,\r
7303 +       .host_flags     = ATA_FLAG_SATA |\r
7304 +                         /* ATA_FLAG_SATA_RESET | */\r
7305 +                         ATA_FLAG_SRST |\r
7306 +                         ATA_FLAG_NO_LEGACY,\r
7307 +       .pio_mask       = NV_PIO_MASK,\r
7308 +       .mwdma_mask     = NV_MWDMA_MASK,\r
7309 +       .udma_mask      = NV_UDMA_MASK,\r
7310 +       .port_ops       = &nv_ops,\r
7311 +};\r
7312 +\r
7313 +MODULE_AUTHOR("NVIDIA");\r
7314 +MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");\r
7315 +MODULE_LICENSE("GPL");\r
7316 +MODULE_DEVICE_TABLE(pci, nv_pci_tbl);\r
7317 +MODULE_VERSION(DRV_VERSION);\r
7318 +\r
7319 +static irqreturn_t nv_interrupt (int irq, void *dev_instance,\r
7320 +                                struct pt_regs *regs)\r
7321 +{\r
7322 +       struct ata_host_set *host_set = dev_instance;\r
7323 +       struct nv_host *host = host_set->private_data;\r
7324 +       unsigned int i;\r
7325 +       unsigned int handled = 0;\r
7326 +       unsigned long flags;\r
7327 +\r
7328 +       spin_lock_irqsave(&host_set->lock, flags);\r
7329 +\r
7330 +       for (i = 0; i < host_set->n_ports; i++) {\r
7331 +               struct ata_port *ap;\r
7332 +\r
7333 +               ap = host_set->ports[i];\r
7334 +#ifdef ATA_FLAG_NOINTR\r
7335 +        if (ap &&\r
7336 +                   !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {\r
7337 +#else\r
7338 +        if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {\r
7339 +#endif    \r
7340 +            struct ata_queued_cmd *qc;\r
7341 +\r
7342 +                       qc = ata_qc_from_tag(ap, ap->active_tag);\r
7343 +                       if (qc && (!(qc->tf.ctl & ATA_NIEN)))\r
7344 +                               handled += ata_host_intr(ap, qc);\r
7345 +                       else\r
7346 +                               // No request pending?  Clear interrupt status\r
7347 +                               // anyway, in case there's one pending.\r
7348 +                               ap->ops->check_status(ap);\r
7349 +               }\r
7350 +\r
7351 +       }\r
7352 +\r
7353 +       if (host->host_desc->check_hotplug)\r
7354 +               host->host_desc->check_hotplug(host_set);\r
7355 +\r
7356 +       spin_unlock_irqrestore(&host_set->lock, flags);\r
7357 +\r
7358 +       return IRQ_RETVAL(handled);\r
7359 +}\r
7360 +\r
7361 +static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)\r
7362 +{\r
7363 +       struct ata_host_set *host_set = ap->host_set;\r
7364 +       struct nv_host *host = host_set->private_data;\r
7365 +\r
7366 +       if (sc_reg > SCR_CONTROL)\r
7367 +               return 0xffffffffU;\r
7368 +\r
7369 +       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
7370 +               return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4));\r
7371 +       else\r
7372 +               return inl(ap->ioaddr.scr_addr + (sc_reg * 4));\r
7373 +}\r
7374 +\r
7375 +static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)\r
7376 +{\r
7377 +       struct ata_host_set *host_set = ap->host_set;\r
7378 +       struct nv_host *host = host_set->private_data;\r
7379 +\r
7380 +       if (sc_reg > SCR_CONTROL)\r
7381 +               return;\r
7382 +\r
7383 +       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
7384 +               writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4));\r
7385 +       else\r
7386 +               outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));\r
7387 +}\r
7388 +\r
7389 +static void nv_host_stop (struct ata_host_set *host_set)\r
7390 +{\r
7391 +       struct nv_host *host = host_set->private_data;\r
7392 +\r
7393 +       // Disable hotplug event interrupts.\r
7394 +       if (host->host_desc->disable_hotplug)\r
7395 +               host->host_desc->disable_hotplug(host_set);\r
7396 +\r
7397 +       nv_sgpio_host_cleanup(host);\r
7398 +       kfree(host);\r
7399 +#ifdef RHAS3U7\r
7400 +\r
7401 +       ata_host_stop(host_set);\r
7402 +#endif\r
7403 +}\r
7404 +\r
7405 +static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)\r
7406 +{\r
7407 +       static int printed_version = 0;\r
7408 +       struct nv_host *host;\r
7409 +       struct ata_port_info *ppi;\r
7410 +       struct ata_probe_ent *probe_ent;\r
7411 +       int pci_dev_busy = 0;\r
7412 +       int rc;\r
7413 +       u32 bar;\r
7414 +\r
7415 +        // Make sure this is a SATA controller by counting the number of bars\r
7416 +        // (NVIDIA SATA controllers will always have six bars).  Otherwise,\r
7417 +        // it's an IDE controller and we ignore it.\r
7418 +       for (bar=0; bar<6; bar++)\r
7419 +               if (pci_resource_start(pdev, bar) == 0)\r
7420 +                       return -ENODEV;\r
7421 +\r
7422 +       if (!printed_version++)\r
7423 +               printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");\r
7424 +\r
7425 +       rc = pci_enable_device(pdev);\r
7426 +       if (rc)\r
7427 +               goto err_out;\r
7428 +\r
7429 +       rc = pci_request_regions(pdev, DRV_NAME);\r
7430 +       if (rc) {\r
7431 +               pci_dev_busy = 1;\r
7432 +               goto err_out_disable;\r
7433 +       }\r
7434 +\r
7435 +       rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);\r
7436 +       if (rc)\r
7437 +               goto err_out_regions;\r
7438 +#ifndef RHAS3U7\r
7439 +    rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);\r
7440 +       if (rc)\r
7441 +               goto err_out_regions;\r
7442 +#endif\r
7443 +       rc = -ENOMEM;\r
7444 +\r
7445 +       ppi = &nv_port_info;\r
7446 +\r
7447 +  probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY|ATA_PORT_SECONDARY);\r
7448 +\r
7449 +  if (!probe_ent)\r
7450 +               goto err_out_regions;\r
7451 +\r
7452 +       host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);\r
7453 +       if (!host)\r
7454 +               goto err_out_free_ent;\r
7455 +\r
7456 +       memset(host, 0, sizeof(struct nv_host));\r
7457 +       host->host_desc = &nv_device_tbl[ent->driver_data];\r
7458 +\r
7459 +       probe_ent->private_data = host;\r
7460 +\r
7461 +       if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)\r
7462 +               host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;\r
7463 +\r
7464 +       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {\r
7465 +               unsigned long base;\r
7466 +\r
7467 +               probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),\r
7468 +                               pci_resource_len(pdev, 5));\r
7469 +               if (probe_ent->mmio_base == NULL) {\r
7470 +                       rc = -EIO;\r
7471 +                       goto err_out_free_host;\r
7472 +               }\r
7473 +\r
7474 +               base = (unsigned long)probe_ent->mmio_base;\r
7475 +\r
7476 +               probe_ent->port[0].scr_addr =\r
7477 +                       base + NV_PORT0_SCR_REG_OFFSET;\r
7478 +               probe_ent->port[1].scr_addr =\r
7479 +                       base + NV_PORT1_SCR_REG_OFFSET;\r
7480 +       } else {\r
7481 +\r
7482 +               probe_ent->port[0].scr_addr =\r
7483 +                       pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;\r
7484 +               probe_ent->port[1].scr_addr =\r
7485 +                       pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;\r
7486 +       }\r
7487 +\r
7488 +       pci_set_master(pdev);\r
7489 +#ifdef RHAS3U7\r
7490 +       ata_add_to_probe_list(probe_ent);\r
7491 +       \r
7492 +       if (nv_sgpio_capable(ent))\r
7493 +               nv_sgpio_init(pdev, host);\r
7494 +       // Enable hotplug event interrupts.\r
7495 +       if (host->host_desc->enable_hotplug)\r
7496 +               host->host_desc->enable_hotplug(probe_ent);\r
7497 +\r
7498 +       return 0;\r
7499 +#else\r
7500 +       rc = ata_device_add(probe_ent);\r
7501 +       if (rc != NV_PORTS)\r
7502 +               goto err_out_iounmap;\r
7503 +       \r
7504 +       if (nv_sgpio_capable(ent))\r
7505 +               nv_sgpio_init(pdev, host);\r
7506 +       // Enable hotplug event interrupts.\r
7507 +       if (host->host_desc->enable_hotplug)\r
7508 +               host->host_desc->enable_hotplug(probe_ent);\r
7509 +\r
7510 +       kfree(probe_ent);\r
7511 +\r
7512 +       return 0;\r
7513 +\r
7514 +err_out_iounmap:\r
7515 +       if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)\r
7516 +               iounmap(probe_ent->mmio_base);\r
7517 +#endif\r
7518 +err_out_free_host:\r
7519 +       kfree(host);\r
7520 +err_out_free_ent:\r
7521 +       kfree(probe_ent);\r
7522 +err_out_regions:\r
7523 +       pci_release_regions(pdev);\r
7524 +err_out_disable:\r
7525 +       if (!pci_dev_busy)\r
7526 +               pci_disable_device(pdev);\r
7527 +err_out:\r
7528 +       return rc;\r
7529 +}\r
7530 +\r
7531 +\r
7532 +static int nv_port_start(struct ata_port *ap)\r
7533 +{\r
7534 +       int stat;\r
7535 +       struct nv_port *port;\r
7536 +\r
7537 +       stat = ata_port_start(ap);\r
7538 +       if (stat) {\r
7539 +               return stat;\r
7540 +       }\r
7541 +\r
7542 +       port = kmalloc(sizeof(struct nv_port), GFP_KERNEL);\r
7543 +       if (!port) \r
7544 +               goto err_out_no_free;\r
7545 +\r
7546 +       memset(port, 0, sizeof(struct nv_port));\r
7547 +\r
7548 +       ap->private_data = port;\r
7549 +       return 0;\r
7550 +\r
7551 +err_out_no_free:\r
7552 +       return 1;\r
7553 +}\r
7554 +\r
7555 +static void nv_port_stop(struct ata_port *ap)\r
7556 +{\r
7557 +       nv_sgpio_clear_all_leds(ap);\r
7558 +\r
7559 +       if (ap->private_data) {\r
7560 +               kfree(ap->private_data);\r
7561 +               ap->private_data = NULL;\r
7562 +       }\r
7563 +       ata_port_stop(ap);\r
7564 +}\r
7565 +\r
7566 +static int nv_qc_issue(struct ata_queued_cmd *qc)\r
7567 +{\r
7568 +       struct nv_port *port = qc->ap->private_data;\r
7569 +\r
7570 +       if (port) \r
7571 +               port->port_sgpio.activity.flags.recent_activity = 1;\r
7572 +       return (ata_qc_issue_prot(qc));\r
7573 +}\r
7574 +\r
7575 +\r
7576 +\r
7577 +\r
7578 +static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)\r
7579 +{\r
7580 +       u8 intr_mask;\r
7581 +\r
7582 +       outb(NV_INT_STATUS_HOTPLUG,\r
7583 +               probe_ent->port[0].scr_addr + NV_INT_STATUS);\r
7584 +\r
7585 +       intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);\r
7586 +       intr_mask |= NV_INT_ENABLE_HOTPLUG;\r
7587 +\r
7588 +       outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);\r
7589 +}\r
7590 +\r
7591 +static void nv_disable_hotplug(struct ata_host_set *host_set)\r
7592 +{\r
7593 +       u8 intr_mask;\r
7594 +\r
7595 +       intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);\r
7596 +\r
7597 +       intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);\r
7598 +\r
7599 +       outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);\r
7600 +}\r
7601 +\r
7602 +static void nv_check_hotplug(struct ata_host_set *host_set)\r
7603 +{\r
7604 +       u8 intr_status;\r
7605 +\r
7606 +       intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);\r
7607 +\r
7608 +       // Clear interrupt status.\r
7609 +       outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);\r
7610 +\r
7611 +       if (intr_status & NV_INT_STATUS_HOTPLUG) {\r
7612 +               if (intr_status & NV_INT_STATUS_PDEV_ADDED)\r
7613 +                       printk(KERN_WARNING "nv_sata: "\r
7614 +                               "Primary device added\n");\r
7615 +\r
7616 +               if (intr_status & NV_INT_STATUS_PDEV_REMOVED)\r
7617 +                       printk(KERN_WARNING "nv_sata: "\r
7618 +                               "Primary device removed\n");\r
7619 +\r
7620 +               if (intr_status & NV_INT_STATUS_SDEV_ADDED)\r
7621 +                       printk(KERN_WARNING "nv_sata: "\r
7622 +                               "Secondary device added\n");\r
7623 +\r
7624 +               if (intr_status & NV_INT_STATUS_SDEV_REMOVED)\r
7625 +                       printk(KERN_WARNING "nv_sata: "\r
7626 +                               "Secondary device removed\n");\r
7627 +       }\r
7628 +}\r
7629 +\r
7630 +static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)\r
7631 +{\r
7632 +       struct pci_dev *pdev = to_pci_dev(probe_ent->dev);\r
7633 +       u8 intr_mask;\r
7634 +       u8 regval;\r
7635 +\r
7636 +       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);\r
7637 +       regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
7638 +       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
7639 +\r
7640 +       writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);\r
7641 +\r
7642 +       intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);\r
7643 +       intr_mask |= NV_INT_ENABLE_HOTPLUG;\r
7644 +\r
7645 +       writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);\r
7646 +}\r
7647 +\r
7648 +static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)\r
7649 +{\r
7650 +       struct pci_dev *pdev = to_pci_dev(host_set->dev);\r
7651 +       u8 intr_mask;\r
7652 +       u8 regval;\r
7653 +\r
7654 +       intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);\r
7655 +\r
7656 +       intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);\r
7657 +\r
7658 +       writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);\r
7659 +\r
7660 +       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);\r
7661 +       regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
7662 +       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
7663 +}\r
7664 +\r
7665 +static void nv_check_hotplug_ck804(struct ata_host_set *host_set)\r
7666 +{\r
7667 +       u8 intr_status;\r
7668 +\r
7669 +       intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);\r
7670 +\r
7671 +       // Clear interrupt status.\r
7672 +       writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);\r
7673 +\r
7674 +       if (intr_status & NV_INT_STATUS_HOTPLUG) {\r
7675 +               if (intr_status & NV_INT_STATUS_PDEV_ADDED)\r
7676 +                       printk(KERN_WARNING "nv_sata: "\r
7677 +                               "Primary device added\n");\r
7678 +\r
7679 +               if (intr_status & NV_INT_STATUS_PDEV_REMOVED)\r
7680 +                       printk(KERN_WARNING "nv_sata: "\r
7681 +                               "Primary device removed\n");\r
7682 +\r
7683 +               if (intr_status & NV_INT_STATUS_SDEV_ADDED)\r
7684 +                       printk(KERN_WARNING "nv_sata: "\r
7685 +                               "Secondary device added\n");\r
7686 +\r
7687 +               if (intr_status & NV_INT_STATUS_SDEV_REMOVED)\r
7688 +                       printk(KERN_WARNING "nv_sata: "\r
7689 +                               "Secondary device removed\n");\r
7690 +       }\r
7691 +}\r
7692 +static void nv_enable_hotplug_mcp55(struct ata_probe_ent *probe_ent)\r
7693 +{\r
7694 +       struct pci_dev *pdev = to_pci_dev(probe_ent->dev);\r
7695 +       u8 intr_mask;\r
7696 +       u8 regval;\r
7697 +\r
7698 +       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);\r
7699 +       regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
7700 +       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
7701 +\r
7702 +       writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55);\r
7703 +       writeb(0x0c, probe_ent->mmio_base + NV_INT_STATUS_MCP55+2);\r
7704 +\r
7705 +       intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55);\r
7706 +       intr_mask |= 0x0c;\r
7707 +       writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55);\r
7708 +\r
7709 +       intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);\r
7710 +       intr_mask |= 0x0c;\r
7711 +       writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_MCP55+2);\r
7712 +}\r
7713 +\r
7714 +static void nv_disable_hotplug_mcp55(struct ata_host_set *host_set)\r
7715 +{\r
7716 +       struct pci_dev *pdev = to_pci_dev(host_set->dev);\r
7717 +       u8 intr_mask;\r
7718 +       u8 regval;\r
7719 +\r
7720 +       intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55);\r
7721 +       intr_mask &= ~(0x0C);\r
7722 +       writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55);\r
7723 +       \r
7724 +       intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_MCP55+2);\r
7725 +       intr_mask &= ~(0x0C);\r
7726 +       writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_MCP55+2);\r
7727 +\r
7728 +       pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);\r
7729 +       regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;\r
7730 +       pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);\r
7731 +}\r
7732 +\r
7733 +static void nv_check_hotplug_mcp55(struct ata_host_set *host_set)\r
7734 +{\r
7735 +       u8 intr_status,intr_status1;\r
7736 +\r
7737 +       intr_status = readb(host_set->mmio_base + NV_INT_STATUS_MCP55);\r
7738 +       intr_status1 = readb(host_set->mmio_base + NV_INT_STATUS_MCP55+2);\r
7739 +\r
7740 +       // Clear interrupt status.\r
7741 +       writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55);\r
7742 +       writeb(0xff, host_set->mmio_base + NV_INT_STATUS_MCP55+2);      \r
7743 +\r
7744 +       if ((intr_status & 0x0c) || (intr_status1&0x0c)) {\r
7745 +               if (intr_status & 0x04)\r
7746 +                       printk(KERN_WARNING "nv_sata: "\r
7747 +                               "Primary device added\n");\r
7748 +\r
7749 +               if (intr_status & 0x08)\r
7750 +                       printk(KERN_WARNING "nv_sata: "\r
7751 +                               "Primary device removed\n");\r
7752 +\r
7753 +               if (intr_status1 & 0x04)\r
7754 +                       printk(KERN_WARNING "nv_sata: "\r
7755 +                               "Secondary device added\n");\r
7756 +\r
7757 +               if (intr_status1 & 0x08)\r
7758 +                       printk(KERN_WARNING "nv_sata: "\r
7759 +                               "Secondary device removed\n");\r
7760 +       }\r
7761 +}\r
7762 +\r
7763 +\r
7764 +static void nv_sgpio_init(struct pci_dev *pdev, struct nv_host *phost)\r
7765 +{\r
7766 +       u16 csr_add; \r
7767 +       u32 cb_add, temp32;\r
7768 +       struct device *dev = pci_dev_to_dev(pdev);\r
7769 +       struct ata_host_set *host_set = dev_get_drvdata(dev);\r
7770 +       u8 pro=0;\r
7771 +       pci_read_config_word(pdev, NV_SGPIO_PCI_CSR_OFFSET, &csr_add);\r
7772 +       pci_read_config_dword(pdev, NV_SGPIO_PCI_CB_OFFSET, &cb_add);\r
7773 +       pci_read_config_byte(pdev, 0xA4, &pro);\r
7774 +       \r
7775 +       if (csr_add == 0 || cb_add == 0) \r
7776 +               return;\r
7777 +       \r
7778 +\r
7779 +       if (!(pro&0x40))\r
7780 +               return; \r
7781 +       \r
7782 +       \r
7783 +       temp32 = csr_add;\r
7784 +       phost->host_sgpio.pcsr = (void *)temp32;\r
7785 +       phost->host_sgpio.pcb = phys_to_virt(cb_add);\r
7786 +\r
7787 +       if (phost->host_sgpio.pcb->nvcr.bit.init_cnt!=0x2 || phost->host_sgpio.pcb->nvcr.bit.cbver!=0x0)\r
7788 +               return;\r
7789 +               \r
7790 +       if (temp32 <=0x200 || temp32 >=0xFFFE )\r
7791 +               return;\r
7792 +       \r
7793 +       \r
7794 +       if (cb_add<=0x80000 || cb_add>=0x9FC00)\r
7795 +               return;\r
7796 +       \r
7797 +               \r
7798 +       if (phost->host_sgpio.pcb->scratch_space == 0) {\r
7799 +               spin_lock_init(&nv_sgpio_lock);\r
7800 +               phost->host_sgpio.share.plock = &nv_sgpio_lock;\r
7801 +               phost->host_sgpio.share.ptstamp = &nv_sgpio_tstamp;\r
7802 +               phost->host_sgpio.pcb->scratch_space = \r
7803 +                       (unsigned long)&phost->host_sgpio.share;\r
7804 +               spin_lock(phost->host_sgpio.share.plock);\r
7805 +               nv_sgpio_reset(phost->host_sgpio.pcsr);\r
7806 +               phost->host_sgpio.pcb->cr0 = \r
7807 +                       SET_ENABLE(phost->host_sgpio.pcb->cr0);\r
7808 +\r
7809 +               spin_unlock(phost->host_sgpio.share.plock);\r
7810 +       }\r
7811 +\r
7812 +       phost->host_sgpio.share = \r
7813 +               *(struct nv_sgpio_host_share *)(unsigned long)\r
7814 +       phost->host_sgpio.pcb->scratch_space;\r
7815 +       phost->host_sgpio.flags.sgpio_enabled = 1;\r
7816 +       phost->pdev = pdev;\r
7817 +       init_timer(&phost->host_sgpio.sgpio_timer);\r
7818 +       phost->host_sgpio.sgpio_timer.data = (unsigned long)phost;\r
7819 +       nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer, \r
7820 +                               NV_SGPIO_UPDATE_TICK);\r
7821 +}\r
7822 +\r
7823 +static void __nv_sgpio_timer_handler(unsigned long context);\r
7824 +static void nv_sgpio_set_timer(struct timer_list *ptimer, unsigned int timeout_msec)\r
7825 +{\r
7826 +       if (!ptimer)\r
7827 +               return;\r
7828 +       ptimer->function = __nv_sgpio_timer_handler;\r
7829 +       ptimer->expires = msecs_to_jiffies(timeout_msec) + jiffies;\r
7830 +       add_timer(ptimer);\r
7831 +}\r
7832 +static void __nv_sgpio_timer_handler(unsigned long context)\r
7833 +{\r
7834 +       struct nv_host *phost = (struct nv_host*)context;\r
7835 +       struct device *dev = pci_dev_to_dev(phost->pdev);\r
7836 +       struct ata_host_set *host_set = dev_get_drvdata(dev);\r
7837 +       \r
7838 +       if (!host_set)\r
7839 +               nv_sgpio_set_timer(&phost->host_sgpio.sgpio_timer, \r
7840 +                               NV_SGPIO_UPDATE_TICK);\r
7841 +       else\r
7842 +               nv_sgpio_timer_handler(host_set);\r
7843 +       \r
7844 +}\r
7845 +\r
7846 +static void nv_sgpio_timer_handler(unsigned long context)\r
7847 +{\r
7848 +\r
7849 +       struct ata_host_set *host_set = (struct ata_host_set *)context;\r
7850 +       struct nv_host *host;\r
7851 +       u8 count, host_offset, port_offset;\r
7852 +       union nv_sgpio_tx tx;\r
7853 +       bool on_off;\r
7854 +       unsigned long mask = 0xFFFF;\r
7855 +       struct nv_port *port;\r
7856 +\r
7857 +       if (!host_set)\r
7858 +               goto err_out;\r
7859 +       else \r
7860 +               host = (struct nv_host *)host_set->private_data;\r
7861 +\r
7862 +       if (!host->host_sgpio.flags.sgpio_enabled)\r
7863 +               goto err_out;\r
7864 +\r
7865 +       host_offset = nv_sgpio_tx_host_offset(host_set);\r
7866 +\r
7867 +       spin_lock(host->host_sgpio.share.plock);\r
7868 +       tx = host->host_sgpio.pcb->tx[host_offset];\r
7869 +       spin_unlock(host->host_sgpio.share.plock);\r
7870 +\r
7871 +       for (count = 0; count < host_set->n_ports; count++) {\r
7872 +               struct ata_port *ap; \r
7873 +\r
7874 +               ap = host_set->ports[count];\r
7875 +        \r
7876 +               if (!(ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)))\r
7877 +                       continue;\r
7878 +\r
7879 +               port = (struct nv_port *)ap->private_data;\r
7880 +               if (!port)\r
7881 +                       continue;                       \r
7882 +                port_offset = nv_sgpio_tx_port_offset(ap);\r
7883 +               on_off = GET_ACTIVITY(tx.tx_port[port_offset]);\r
7884 +               if (nv_sgpio_update_led(&port->port_sgpio.activity, &on_off)) {\r
7885 +                       tx.tx_port[port_offset] = \r
7886 +                               SET_ACTIVITY(tx.tx_port[port_offset], on_off);\r
7887 +                       host->host_sgpio.flags.need_update = 1;\r
7888 +               }\r
7889 +       }\r
7890 +\r
7891 +\r
7892 +       if (host->host_sgpio.flags.need_update) {\r
7893 +               spin_lock(host->host_sgpio.share.plock);    \r
7894 +               if (nv_sgpio_get_func(host_set) \r
7895 +                       % NV_CNTRLR_SHARE_INIT == 0) {\r
7896 +                       host->host_sgpio.pcb->tx[host_offset].all &= mask;\r
7897 +                       mask = mask << 16;\r
7898 +                       tx.all &= mask;\r
7899 +               } else {\r
7900 +                       tx.all &= mask;\r
7901 +                       mask = mask << 16;\r
7902 +                       host->host_sgpio.pcb->tx[host_offset].all &= mask;\r
7903 +               }\r
7904 +               host->host_sgpio.pcb->tx[host_offset].all |= tx.all;\r
7905 +               spin_unlock(host->host_sgpio.share.plock);     \r
7906\r
7907 +               if (nv_sgpio_send_cmd(host, NV_SGPIO_CMD_WRITE_DATA)) { \r
7908 +                       host->host_sgpio.flags.need_update = 0;\r
7909 +                       return;\r
7910 +               }\r
7911 +       } else {\r
7912 +               nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
7913 +                               NV_SGPIO_UPDATE_TICK);\r
7914 +       }\r
7915 +err_out:\r
7916 +       return;\r
7917 +}\r
7918 +\r
7919 +static bool nv_sgpio_send_cmd(struct nv_host *host, u8 cmd)\r
7920 +{\r
7921 +       u8 csr;\r
7922 +       unsigned long *ptstamp;\r
7923 +\r
7924 +       spin_lock(host->host_sgpio.share.plock);    \r
7925 +       ptstamp = host->host_sgpio.share.ptstamp;\r
7926 +       if (jiffies_to_msecs1(jiffies - *ptstamp) >= NV_SGPIO_MIN_UPDATE_DELTA) {\r
7927 +               csr = \r
7928 +               nv_sgpio_get_csr((unsigned long)host->host_sgpio.pcsr);\r
7929 +               if ((GET_SGPIO_STATUS(csr) != NV_SGPIO_STATE_OPERATIONAL) ||\r
7930 +                       (GET_CMD_STATUS(csr) == NV_SGPIO_CMD_ACTIVE)) {\r
7931 +                       //nv_sgpio_reset(host->host_sgpio.pcsr);\r
7932 +               } else {\r
7933 +                       host->host_sgpio.pcb->cr0 = \r
7934 +                               SET_ENABLE(host->host_sgpio.pcb->cr0);\r
7935 +                       csr = 0;\r
7936 +                       csr = SET_CMD(csr, cmd);\r
7937 +                       nv_sgpio_set_csr(csr, \r
7938 +                               (unsigned long)host->host_sgpio.pcsr);\r
7939 +                       *ptstamp = jiffies;\r
7940 +               }\r
7941 +               spin_unlock(host->host_sgpio.share.plock);\r
7942 +               nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
7943 +                       NV_SGPIO_UPDATE_TICK);\r
7944 +               return 1;\r
7945 +       } else {\r
7946 +               spin_unlock(host->host_sgpio.share.plock);\r
7947 +               nv_sgpio_set_timer(&host->host_sgpio.sgpio_timer, \r
7948 +                               (NV_SGPIO_MIN_UPDATE_DELTA - \r
7949 +                               jiffies_to_msecs1(jiffies - *ptstamp)));\r
7950 +               return 0;\r
7951 +       }\r
7952 +}\r
7953 +\r
7954 +static bool nv_sgpio_update_led(struct nv_sgpio_led *led, bool *on_off)\r
7955 +{\r
7956 +       bool need_update = 0;\r
7957 +\r
7958 +       if (led->force_off > 0) {\r
7959 +               led->force_off--;\r
7960 +       } else if (led->flags.recent_activity ^ led->flags.last_state) {\r
7961 +               *on_off = led->flags.recent_activity;\r
7962 +               led->flags.last_state = led->flags.recent_activity;\r
7963 +               need_update = 1;\r
7964 +       } else if ((led->flags.recent_activity & led->flags.last_state) &&\r
7965 +               (led->last_cons_active >= NV_SGPIO_MAX_ACTIVITY_ON)) {\r
7966 +               *on_off = NV_OFF;\r
7967 +               led->flags.last_state = NV_OFF;\r
7968 +               led->force_off = NV_SGPIO_MIN_FORCE_OFF;\r
7969 +               need_update = 1;\r
7970 +       }\r
7971 +\r
7972 +       if (*on_off) \r
7973 +               led->last_cons_active++;        \r
7974 +       else\r
7975 +               led->last_cons_active = 0;\r
7976 +\r
7977 +       led->flags.recent_activity = 0;\r
7978 +       return need_update;\r
7979 +}\r
7980 +\r
7981 +static void nv_sgpio_reset(u8  *pcsr)\r
7982 +{\r
7983 +       u8 csr;\r
7984 +\r
7985 +       csr = nv_sgpio_get_csr((unsigned long)pcsr);\r
7986 +       if (GET_SGPIO_STATUS(csr) == NV_SGPIO_STATE_RESET) {\r
7987 +               csr = 0;\r
7988 +               csr = SET_CMD(csr, NV_SGPIO_CMD_RESET);\r
7989 +               nv_sgpio_set_csr(csr, (unsigned long)pcsr);\r
7990 +       }\r
7991 +       csr = 0;\r
7992 +       csr = SET_CMD(csr, NV_SGPIO_CMD_READ_PARAMS);\r
7993 +       nv_sgpio_set_csr(csr, (unsigned long)pcsr);\r
7994 +}\r
7995 +\r
7996 +static void nv_sgpio_host_cleanup(struct nv_host *host)\r
7997 +{\r
7998 +       u8 csr;\r
7999 +       if (!host)\r
8000 +               return;\r
8001 +\r
8002 +       if (host->host_sgpio.flags.sgpio_enabled){\r
8003 +               spin_lock(host->host_sgpio.share.plock);\r
8004 +               host->host_sgpio.pcb->cr0 = \r
8005 +                       SET_ENABLE(host->host_sgpio.pcb->cr0);\r
8006 +               csr = 0;\r
8007 +               csr = SET_CMD(csr, NV_SGPIO_CMD_WRITE_DATA);\r
8008 +               nv_sgpio_set_csr(csr, \r
8009 +                       (unsigned long)host->host_sgpio.pcsr);\r
8010 +               spin_unlock(host->host_sgpio.share.plock);\r
8011 +       \r
8012 +               if (timer_pending(&host->host_sgpio.sgpio_timer))\r
8013 +                       del_timer(&host->host_sgpio.sgpio_timer);\r
8014 +               host->host_sgpio.flags.sgpio_enabled = 0;\r
8015 +               host->host_sgpio.pcb->scratch_space = 0;\r
8016 +       }\r
8017 +       \r
8018 +}\r
8019 +\r
8020 +static void nv_sgpio_clear_all_leds(struct ata_port *ap)\r
8021 +{\r
8022 +       struct nv_port *port = ap->private_data;\r
8023 +       struct nv_host *host;\r
8024 +       u8 host_offset, port_offset;\r
8025 +\r
8026 +       if (!port || !ap->host_set)\r
8027 +               return;\r
8028 +       if (!ap->host_set->private_data)\r
8029 +               return;\r
8030 +\r
8031 +       host = ap->host_set->private_data;\r
8032 +       if (!host->host_sgpio.flags.sgpio_enabled)\r
8033 +               return;\r
8034 +\r
8035 +       host_offset = nv_sgpio_tx_host_offset(ap->host_set);\r
8036 +       port_offset = nv_sgpio_tx_port_offset(ap);\r
8037 +\r
8038 +       spin_lock(host->host_sgpio.share.plock);\r
8039 +       host->host_sgpio.pcb->tx[host_offset].tx_port[port_offset] = 0;\r
8040 +       host->host_sgpio.flags.need_update = 1;\r
8041 +       spin_unlock(host->host_sgpio.share.plock);\r
8042 +}\r
8043 +\r
8044 +\r
8045 +\r
8046 +static int __init nv_init(void)\r
8047 +{\r
8048 +#ifdef RHAS3U7\r
8049 +       int rc;\r
8050 +       rc = pci_module_init(&nv_pci_driver);\r
8051 +       if (rc)\r
8052 +               return rc;\r
8053 +       \r
8054 +       rc = scsi_register_module(MODULE_SCSI_HA, &nv_sht);\r
8055 +       if (rc) {\r
8056 +               pci_unregister_driver(&nv_pci_driver);\r
8057 +               /* TODO: does scsi_register_module return errno val? */\r
8058 +               return -ENODEV;\r
8059 +       }\r
8060 +\r
8061 +       return 0;\r
8062 +#else\r
8063 +       return pci_module_init(&nv_pci_driver);\r
8064 +#endif\r
8065 +}\r
8066 +\r
8067 +static void __exit nv_exit(void)\r
8068 +{\r
8069 +#ifdef RHAS3U7\r
8070 +       scsi_unregister_module(MODULE_SCSI_HA, &nv_sht);\r
8071 +#endif\r
8072 +       pci_unregister_driver(&nv_pci_driver);\r
8073 +\r
8074 +}\r
8075 +\r
8076 +module_init(nv_init);\r
8077 +module_exit(nv_exit);\r
This page took 0.657043 seconds and 3 git commands to generate.