]>
Commit | Line | Data |
---|---|---|
1cbd4257 | 1 | --- 2.6/drivers/net/Kconfig 2003-10-18 21:17:01.000000000 +0200 |
2 | +++ build-2.6/drivers/net/Kconfig 2003-10-25 15:06:15.000000000 +0200 | |
3 | @@ -1283,6 +1283,21 @@ | |
4 | <file:Documentation/networking/net-modules.txt>. The module will be | |
5 | called b44. | |
6 | ||
7 | +config FORCEDETH | |
8 | + tristate "Reverse Engineered nForce Ethernet support (EXPERIMENTAL)" | |
9 | + depends on NET_PCI && PCI && EXPERIMENTAL | |
10 | + help | |
11 | + If you have a network (Ethernet) controller of this type, say Y and | |
12 | + read the Ethernet-HOWTO, available from | |
13 | + <http://www.tldp.org/docs.html#howto>. | |
14 | + | |
15 | + If you want to compile this as a module ( = code which can be | |
16 | + inserted in and removed from the running kernel whenever you want), | |
17 | + say M here and read <file:Documentation/modules.txt> as well as | |
18 | + <file:Documentation/networking/net-modules.txt>. The module will be | |
19 | + called nveth. | |
20 | + | |
21 | + | |
22 | config CS89x0 | |
23 | tristate "CS89x0 support" | |
24 | depends on NET_PCI && ISA | |
25 | --- 2.6/drivers/net/Makefile 2003-10-09 21:19:50.000000000 +0200 | |
26 | +++ build-2.6/drivers/net/Makefile 2003-10-25 15:06:15.000000000 +0200 | |
27 | @@ -95,6 +95,7 @@ | |
28 | obj-$(CONFIG_NE3210) += ne3210.o 8390.o | |
29 | obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o | |
30 | obj-$(CONFIG_B44) += b44.o | |
31 | +obj-$(CONFIG_FORCEDETH) += forcedeth.o | |
32 | ||
33 | obj-$(CONFIG_PPP) += ppp_generic.o slhc.o | |
34 | obj-$(CONFIG_PPP_ASYNC) += ppp_async.o | |
35 | --- 2.6/drivers/net/forcedeth.c 1970-01-01 01:00:00.000000000 +0100 | |
36 | +++ build-2.6/drivers/net/forcedeth.c 2003-10-25 15:06:39.000000000 +0200 | |
37 | @@ -0,0 +1,1406 @@ | |
38 | +/* | |
39 | + * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. | |
40 | + * | |
41 | + * Note: This driver is a cleanroom reimplementation based on reverse | |
42 | + * engineered documentation written by Carl-Daniel Hailfinger | |
43 | + * and Andrew de Quincey. It's neither supported nor endorsed | |
44 | + * by NVIDIA Corp. Use at your own risk. | |
45 | + * | |
46 | + * NVIDIA, nForce and other NVIDIA marks are trademarks or registered | |
47 | + * trademarks of NVIDIA Corporation in the United States and other | |
48 | + * countries. | |
49 | + * | |
50 | + * Copyright (C) 2003 Manfred Spraul | |
51 | + * | |
52 | + * This program is free software; you can redistribute it and/or modify | |
53 | + * it under the terms of the GNU General Public License as published by | |
54 | + * the Free Software Foundation; either version 2 of the License, or | |
55 | + * (at your option) any later version. | |
56 | + * | |
57 | + * This program is distributed in the hope that it will be useful, | |
58 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
59 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
60 | + * GNU General Public License for more details. | |
61 | + * | |
62 | + * You should have received a copy of the GNU General Public License | |
63 | + * along with this program; if not, write to the Free Software | |
64 | + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
65 | + * | |
66 | + * Changelog: | |
67 | + * 0.01: 05 Oct 2003: First release that compiles without warnings. | |
68 | + * 0.02: 05 Oct 2003: Fix bug for drain_tx: do not try to free NULL skbs. | |
69 | + * Check all PCI BARs for the register window. | |
70 | + * udelay added to mii_rw. | |
71 | + * 0.03: 06 Oct 2003: Initialize dev->irq. | |
72 | + * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. | |
73 | + * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. | |
74 | + * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, | |
75 | + * irq mask updated | |
76 | + * 0.07: 14 Oct 2003: Further irq mask updates. | |
77 | + * 0.08: 20 Oct 2003: rx_desc.Length initialization added, alloc_rx refill | |
78 | + * added into irq handler, NULL check for drain_ring. | |
79 | + * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the | |
80 | + * requested interrupt sources. | |
81 | + * 0.10: 20 Oct 2003: First cleanup for release. | |
82 | + * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. | |
83 | + * MAC Address init fix, set_multicast cleanup. | |
84 | + * 0.12: 23 Oct 2003: Cleanups for release. | |
85 | + * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. | |
86 | + * Set link speed correctly. start rx before starting | |
87 | + * tx (start_rx sets the link speed). | |
88 | + * 0.14: 25 Oct 2003: Nic dependant irq mask. | |
89 | + * | |
90 | + * Known bugs: | |
91 | + * The irq handling is wrong - no tx done interrupts are generated. | |
92 | + * This means recovery from netif_stop_queue only happens in the hw timer | |
93 | + * interrupt (1/2 second), or if an rx packet arrives by chance. | |
94 | + */ | |
95 | +#define FORCEDETH_VERSION "0.14" | |
96 | + | |
97 | +#include <linux/module.h> | |
98 | +#include <linux/types.h> | |
99 | +#include <linux/pci.h> | |
100 | +#include <linux/netdevice.h> | |
101 | +#include <linux/etherdevice.h> | |
102 | +#include <linux/delay.h> | |
103 | +#include <linux/spinlock.h> | |
104 | +#include <linux/ethtool.h> | |
105 | +#include <linux/timer.h> | |
106 | +#include <linux/skbuff.h> | |
107 | +#include <linux/mii.h> | |
108 | +#include <linux/random.h> | |
109 | + | |
110 | +#include <asm/io.h> | |
111 | +#include <asm/uaccess.h> | |
112 | +#include <asm/system.h> | |
113 | + | |
114 | +#if 1 | |
115 | +#define dprintk printk | |
116 | +#else | |
117 | +#define dprintk(x...) do { } while (0) | |
118 | +#endif | |
119 | + | |
120 | + | |
121 | +/* | |
122 | + * Hardware access: | |
123 | + */ | |
124 | + | |
125 | +#define DEV_NEED_LASTPACKET1 0x0001 | |
126 | +#define DEV_IRQMASK_1 0x0002 | |
127 | +#define DEV_IRQMASK_2 0x0004 | |
128 | + | |
129 | +enum { | |
130 | + NvRegIrqStatus = 0x000, | |
131 | +#define NVREG_IRQSTAT_MIIEVENT 0x040 | |
132 | +#define NVREG_IRQSTAT_MASK 0x1ff | |
133 | + NvRegIrqMask = 0x004, | |
134 | +#define NVREG_IRQ_UNKNOWN 0x0005 | |
135 | +#define NVREG_IRQ_RX 0x0002 | |
136 | +#define NVREG_IRQ_TX2 0x0010 | |
137 | +#define NVREG_IRQ_TIMER 0x0020 | |
138 | +#define NVREG_IRQ_LINK 0x0040 | |
139 | +#define NVREG_IRQ_TX1 0x0100 | |
140 | +#define NVREG_IRQMASK_WANTED_1 0x005f | |
141 | +#define NVREG_IRQMASK_WANTED_2 0x0147 | |
142 | + | |
143 | + NvRegUnknownSetupReg6 = 0x008, | |
144 | +#define NVREG_UNKSETUP6_VAL 3 | |
145 | + | |
146 | + NvRegPollingInterval = 0x00c, | |
147 | + NvRegMisc1 = 0x080, | |
148 | +#define NVREG_MISC1_HD 0x02 | |
149 | +#define NVREG_MISC1_FORCE 0x3b0f3c | |
150 | + | |
151 | + NvRegTransmitterControl = 0x084, | |
152 | +#define NVREG_XMITCTL_START 0x01 | |
153 | + NvRegTransmitterStatus = 0x088, | |
154 | +#define NVREG_XMITSTAT_BUSY 0x01 | |
155 | + | |
156 | + NvRegPacketFilterFlags = 0x8c, | |
157 | +#define NVREG_PFF_ALWAYS 0x7F0008 | |
158 | +#define NVREG_PFF_PROMISC 0x80 | |
159 | +#define NVREG_PFF_MYADDR 0x20 | |
160 | + | |
161 | + NvRegOffloadConfig = 0x90, | |
162 | +#define NVREG_OFFLOAD_HOMEPHY 0x601 | |
163 | +#define NVREG_OFFLOAD_NORMAL 0x5ee | |
164 | + NvRegReceiverControl = 0x094, | |
165 | +#define NVREG_RCVCTL_START 0x01 | |
166 | + NvRegReceiverStatus = 0x98, | |
167 | +#define NVREG_RCVSTAT_BUSY 0x01 | |
168 | + | |
169 | + NvRegRandomSeed = 0x9c, | |
170 | +#define NVREG_RNDSEED_MASK 0x00ff | |
171 | +#define NVREG_RNDSEED_FORCE 0x7f00 | |
172 | + | |
173 | + NvRegUnknownSetupReg1 = 0xA0, | |
174 | +#define NVREG_UNKSETUP1_VAL 0x16070f | |
175 | + NvRegUnknownSetupReg2 = 0xA4, | |
176 | +#define NVREG_UNKSETUP2_VAL 0x16 | |
177 | + NvRegMacAddrA = 0xA8, | |
178 | + NvRegMacAddrB = 0xAC, | |
179 | + NvRegMulticastAddrA = 0xB0, | |
180 | +#define NVREG_MCASTADDRA_FORCE 0x01 | |
181 | + NvRegMulticastAddrB = 0xB4, | |
182 | + NvRegMulticastMaskA = 0xB8, | |
183 | + NvRegMulticastMaskB = 0xBC, | |
184 | + | |
185 | + NvRegTxRingPhysAddr = 0x100, | |
186 | + NvRegRxRingPhysAddr = 0x104, | |
187 | + NvRegRingSizes = 0x108, | |
188 | +#define NVREG_RINGSZ_TXSHIFT 0 | |
189 | +#define NVREG_RINGSZ_RXSHIFT 16 | |
190 | + NvRegUnknownTransmitterReg = 0x10c, | |
191 | + NvRegLinkSpeed = 0x110, | |
192 | +#define NVREG_LINKSPEED_FORCE 0x10000 | |
193 | +#define NVREG_LINKSPEED_10 10 | |
194 | +#define NVREG_LINKSPEED_100 100 | |
195 | +#define NVREG_LINKSPEED_1000 1000 | |
196 | + NvRegUnknownSetupReg5 = 0x130, | |
197 | +#define NVREG_UNKSETUP5_BIT31 (1<<31) | |
198 | + NvRegUnknownSetupReg3 = 0x134, | |
199 | +#define NVREG_UNKSETUP3_VAL1 0x200010 | |
200 | + NvRegTxRxControl = 0x144, | |
201 | +#define NVREG_TXRXCTL_KICK 0x0001 | |
202 | +#define NVREG_TXRXCTL_BIT1 0x0002 | |
203 | +#define NVREG_TXRXCTL_BIT2 0x0004 | |
204 | +#define NVREG_TXRXCTL_IDLE 0x0008 | |
205 | +#define NVREG_TXRXCTL_RESET 0x0010 | |
206 | + NvRegMIIStatus = 0x180, | |
207 | +#define NVREG_MIISTAT_ERROR 0x0001 | |
208 | +#define NVREG_MIISTAT_LINKCHANGE 0x0008 | |
209 | +#define NVREG_MIISTAT_MASK 0x000f | |
210 | +#define NVREG_MIISTAT_MASK2 0x000f | |
211 | + NvRegUnknownSetupReg4 = 0x184, | |
212 | +#define NVREG_UNKSETUP4_VAL 8 | |
213 | + | |
214 | + NvRegAdapterControl = 0x188, | |
215 | +#define NVREG_ADAPTCTL_START 0x02 | |
216 | +#define NVREG_ADAPTCTL_LINKUP 0x04 | |
217 | +#define NVREG_ADAPTCTL_PHYVALID 0x4000 | |
218 | +#define NVREG_ADAPTCTL_RUNNING 0x100000 | |
219 | +#define NVREG_ADAPTCTL_PHYSHIFT 24 | |
220 | + NvRegMIISpeed = 0x18c, | |
221 | +#define NVREG_MIISPEED_BIT8 (1<<8) | |
222 | +#define NVREG_MIIDELAY 5 | |
223 | + NvRegMIIControl = 0x190, | |
224 | +#define NVREG_MIICTL_INUSE 0x10000 | |
225 | +#define NVREG_MIICTL_WRITE 0x08000 | |
226 | +#define NVREG_MIICTL_ADDRSHIFT 5 | |
227 | + NvRegMIIData = 0x194, | |
228 | + NvRegWakeUpFlags = 0x200, | |
229 | +#define NVREG_WAKEUPFLAGS_VAL 0x7770 | |
230 | +#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 | |
231 | +#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 | |
232 | +#define NVREG_WAKEUPFLAGS_D3SHIFT 12 | |
233 | +#define NVREG_WAKEUPFLAGS_D2SHIFT 8 | |
234 | +#define NVREG_WAKEUPFLAGS_D1SHIFT 4 | |
235 | +#define NVREG_WAKEUPFLAGS_D0SHIFT 0 | |
236 | +#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 | |
237 | +#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 | |
238 | +#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 | |
239 | + | |
240 | + NvRegPatternCRC = 0x204, | |
241 | + NvRegPatternMask = 0x208, | |
242 | + NvRegPowerCap = 0x268, | |
243 | +#define NVREG_POWERCAP_D3SUPP (1<<30) | |
244 | +#define NVREG_POWERCAP_D2SUPP (1<<26) | |
245 | +#define NVREG_POWERCAP_D1SUPP (1<<25) | |
246 | + NvRegPowerState = 0x26c, | |
247 | +#define NVREG_POWERSTATE_POWEREDUP 0x8000 | |
248 | +#define NVREG_POWERSTATE_VALID 0x0100 | |
249 | +#define NVREG_POWERSTATE_MASK 0x0003 | |
250 | +#define NVREG_POWERSTATE_D0 0x0000 | |
251 | +#define NVREG_POWERSTATE_D1 0x0001 | |
252 | +#define NVREG_POWERSTATE_D2 0x0002 | |
253 | +#define NVREG_POWERSTATE_D3 0x0003 | |
254 | +}; | |
255 | + | |
256 | +struct ring_desc { | |
257 | + u32 PacketBuffer; | |
258 | + u16 Length; | |
259 | + u16 Flags; | |
260 | +}; | |
261 | + | |
262 | +#define NV_TX_LASTPACKET (1<<0) | |
263 | +#define NV_TX_RETRYERROR (1<<3) | |
264 | +#define NV_TX_LASTPACKET1 (1<<8) | |
265 | +#define NV_TX_DEFERRED (1<<10) | |
266 | +#define NV_TX_CARRIERLOST (1<<11) | |
267 | +#define NV_TX_LATECOLLISION (1<<12) | |
268 | +#define NV_TX_UNDERFLOW (1<<13) | |
269 | +#define NV_TX_ERROR (1<<14) | |
270 | +#define NV_TX_VALID (1<<15) | |
271 | + | |
272 | +#define NV_RX_DESCRIPTORVALID (1<<0) | |
273 | +#define NV_RX_MISSEDFRAME (1<<1) | |
274 | +#define NV_RX_SUBSTRACT1 (1<<3) | |
275 | +#define NV_RX_ERROR1 (1<<7) | |
276 | +#define NV_RX_ERROR2 (1<<8) | |
277 | +#define NV_RX_ERROR3 (1<<9) | |
278 | +#define NV_RX_ERROR4 (1<<10) | |
279 | +#define NV_RX_CRCERR (1<<11) | |
280 | +#define NV_RX_OVERFLOW (1<<12) | |
281 | +#define NV_RX_FRAMINGERR (1<<13) | |
282 | +#define NV_RX_ERROR (1<<14) | |
283 | +#define NV_RX_AVAIL (1<<15) | |
284 | + | |
285 | +/* Miscelaneous hardware related defines: */ | |
286 | +#define NV_PCI_REGSZ 0x270 | |
287 | + | |
288 | +/* various timeout delays: all in usec */ | |
289 | +#define NV_TXRX_RESET_DELAY 4 | |
290 | +#define NV_TXSTOP_DELAY1 10 | |
291 | +#define NV_TXSTOP_DELAY1MAX 500000 | |
292 | +#define NV_TXSTOP_DELAY2 100 | |
293 | +#define NV_RXSTOP_DELAY1 10 | |
294 | +#define NV_RXSTOP_DELAY1MAX 500000 | |
295 | +#define NV_RXSTOP_DELAY2 100 | |
296 | +#define NV_SETUP5_DELAY 5 | |
297 | +#define NV_SETUP5_DELAYMAX 50000 | |
298 | +#define NV_POWERUP_DELAY 5 | |
299 | +#define NV_POWERUP_DELAYMAX 5000 | |
300 | +#define NV_MIIBUSY_DELAY 50 | |
301 | +#define NV_MIIPHY_DELAY 10 | |
302 | +#define NV_MIIPHY_DELAYMAX 10000 | |
303 | + | |
304 | +#define NV_WAKEUPPATTERNS 5 | |
305 | +#define NV_WAKEUPMASKENTRIES 4 | |
306 | + | |
307 | +/* General driver defaults */ | |
308 | +#define NV_WATCHDOG_TIMEO (2*HZ) | |
309 | +#define DEFAULT_MTU 1500 /* also maximum supported, at least for now */ | |
310 | + | |
311 | +#define RX_RING 128 | |
312 | +#define TX_RING 16 | |
313 | +/* limited to 1 packet until we understand NV_TX_LASTPACKET */ | |
314 | +#define TX_LIMIT_STOP 10 | |
315 | +#define TX_LIMIT_START 5 | |
316 | + | |
317 | +/* rx/tx mac addr + type + vlan + align + slack*/ | |
318 | +#define RX_NIC_BUFSIZE (DEFAULT_MTU + 64) | |
319 | +/* even more slack */ | |
320 | +#define RX_ALLOC_BUFSIZE (DEFAULT_MTU + 128) | |
321 | + | |
322 | +#define OOM_REFILL (1+HZ/20) | |
323 | +/* | |
324 | + * SMP locking: | |
325 | + * All hardware access under dev->priv->lock, except the performance | |
326 | + * critical parts: | |
327 | + * - rx is (pseudo-) lockless: it relies on the single-threading provided | |
328 | + * by the arch code for interrupts. | |
329 | + * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission | |
330 | + * needs dev->priv->lock :-( | |
331 | + * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. | |
332 | + */ | |
333 | + | |
334 | +/* in dev: base, irq */ | |
335 | +struct fe_priv { | |
336 | + spinlock_t lock; | |
337 | + | |
338 | + /* General data: | |
339 | + * Locking: spin_lock(&np->lock); */ | |
340 | + struct net_device_stats stats; | |
341 | + int in_shutdown; | |
342 | + u32 linkspeed; | |
343 | + int duplex; | |
344 | + int phyaddr; | |
345 | + | |
346 | + /* General data: RO fields */ | |
347 | + dma_addr_t ring_addr; | |
348 | + struct pci_dev *pci_dev; | |
349 | + u32 orig_mac[2]; | |
350 | + u32 irqmask; | |
351 | + | |
352 | + /* rx specific fields. | |
353 | + * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | |
354 | + */ | |
355 | + struct ring_desc *rx_ring; | |
356 | + unsigned int cur_rx, refill_rx; | |
357 | + struct sk_buff *rx_skbuff[RX_RING]; | |
358 | + dma_addr_t rx_dma[RX_RING]; | |
359 | + unsigned int rx_buf_sz; | |
360 | + struct timer_list oom_kick; | |
361 | + | |
362 | + /* | |
363 | + * tx specific fields. | |
364 | + */ | |
365 | + struct ring_desc *tx_ring; | |
366 | + unsigned int next_tx, nic_tx; | |
367 | + struct sk_buff *tx_skbuff[TX_RING]; | |
368 | + dma_addr_t tx_dma[TX_RING]; | |
369 | + u16 tx_flags; | |
370 | +}; | |
371 | + | |
372 | +static inline struct fe_priv *get_nvpriv(struct net_device *dev) | |
373 | +{ | |
374 | + return (struct fe_priv *) dev->priv; | |
375 | +} | |
376 | + | |
377 | +static inline u8 *get_hwbase(struct net_device *dev) | |
378 | +{ | |
379 | + return (u8 *) dev->base_addr; | |
380 | +} | |
381 | + | |
382 | +static inline void pci_push(u8 * base) | |
383 | +{ | |
384 | + /* force out pending posted writes */ | |
385 | + readl(base); | |
386 | +} | |
387 | + | |
388 | +static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, | |
389 | + int delay, int delaymax, const char *msg) | |
390 | +{ | |
391 | + u8 *base = get_hwbase(dev); | |
392 | + | |
393 | + pci_push(base); | |
394 | + do { | |
395 | + udelay(delay); | |
396 | + delaymax -= delay; | |
397 | + if (delaymax < 0) { | |
398 | + if (msg) | |
399 | + printk(msg); | |
400 | + return 1; | |
401 | + } | |
402 | + } while ((readl(base + offset) & mask) != target); | |
403 | + return 0; | |
404 | +} | |
405 | + | |
406 | +#define MII_READ (-1) | |
407 | +/* mii_rw: read/write a register on the PHY. | |
408 | + * | |
409 | + * Caller must guarantee serialization | |
410 | + */ | |
411 | +static int mii_rw(struct net_device *dev, int addr, int miireg, int value) | |
412 | +{ | |
413 | + u8 *base = get_hwbase(dev); | |
414 | + int was_running; | |
415 | + u32 reg; | |
416 | + int retval; | |
417 | + | |
418 | + writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
419 | + was_running = 0; | |
420 | + reg = readl(base + NvRegAdapterControl); | |
421 | + if (reg & NVREG_ADAPTCTL_RUNNING) { | |
422 | + was_running = 1; | |
423 | + writel(reg & ~NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); | |
424 | + } | |
425 | + reg = readl(base + NvRegMIIControl); | |
426 | + if (reg & NVREG_MIICTL_INUSE) { | |
427 | + writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); | |
428 | + udelay(NV_MIIBUSY_DELAY); | |
429 | + } | |
430 | + | |
431 | + reg = NVREG_MIICTL_INUSE | (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; | |
432 | + if (value != MII_READ) { | |
433 | + writel(value, base + NvRegMIIData); | |
434 | + reg |= NVREG_MIICTL_WRITE; | |
435 | + } | |
436 | + writel(reg, base + NvRegMIIControl); | |
437 | + | |
438 | + if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, | |
439 | + NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { | |
440 | + dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", | |
441 | + dev->name, miireg, addr); | |
442 | + retval = -1; | |
443 | + } else if (value != MII_READ) { | |
444 | + /* it was a write operation - fewer failures are detectable */ | |
445 | + dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", | |
446 | + dev->name, value, miireg, addr); | |
447 | + retval = 0; | |
448 | + } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { | |
449 | + dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", | |
450 | + dev->name, miireg, addr); | |
451 | + retval = -1; | |
452 | + } else { | |
453 | + /* FIXME: why is that required? */ | |
454 | + udelay(50); | |
455 | + retval = readl(base + NvRegMIIData); | |
456 | + dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", | |
457 | + dev->name, miireg, addr, retval); | |
458 | + } | |
459 | + if (was_running) { | |
460 | + reg = readl(base + NvRegAdapterControl); | |
461 | + writel(reg | NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); | |
462 | + } | |
463 | + return retval; | |
464 | +} | |
465 | + | |
466 | +static void start_rx(struct net_device *dev) | |
467 | +{ | |
468 | + struct fe_priv *np = get_nvpriv(dev); | |
469 | + u8 *base = get_hwbase(dev); | |
470 | + | |
471 | + dprintk(KERN_DEBUG "%s: start_rx\n", dev->name); | |
472 | + /* Already running? Stop it. */ | |
473 | + if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { | |
474 | + writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); | |
475 | + pci_push(base); | |
476 | + } | |
477 | + writel(np->linkspeed, base + NvRegLinkSpeed); | |
478 | + pci_push(base); | |
479 | + writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); | |
480 | + pci_push(base); | |
481 | +} | |
482 | + | |
483 | +static void stop_rx(struct net_device *dev) | |
484 | +{ | |
485 | + u8 *base = get_hwbase(dev); | |
486 | + | |
487 | + dprintk(KERN_DEBUG "%s: stop_rx\n", dev->name); | |
488 | + writel(0, base + NvRegReceiverControl); | |
489 | + reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, | |
490 | + NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, | |
491 | + KERN_INFO "stop_rx: ReceiverStatus remained busy"); | |
492 | + | |
493 | + udelay(NV_RXSTOP_DELAY2); | |
494 | + writel(0, base + NvRegLinkSpeed); | |
495 | +} | |
496 | + | |
497 | +static void start_tx(struct net_device *dev) | |
498 | +{ | |
499 | + u8 *base = get_hwbase(dev); | |
500 | + | |
501 | + dprintk(KERN_DEBUG "%s: start_tx\n", dev->name); | |
502 | + writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); | |
503 | + pci_push(base); | |
504 | +} | |
505 | + | |
506 | +static void stop_tx(struct net_device *dev) | |
507 | +{ | |
508 | + u8 *base = get_hwbase(dev); | |
509 | + | |
510 | + dprintk(KERN_DEBUG "%s: stop_tx\n", dev->name); | |
511 | + writel(0, base + NvRegTransmitterControl); | |
512 | + reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, | |
513 | + NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, | |
514 | + KERN_INFO "stop_tx: TransmitterStatus remained busy"); | |
515 | + | |
516 | + udelay(NV_TXSTOP_DELAY2); | |
517 | + writel(0, base + NvRegUnknownTransmitterReg); | |
518 | +} | |
519 | + | |
520 | +static void txrx_reset(struct net_device *dev) | |
521 | +{ | |
522 | + u8 *base = get_hwbase(dev); | |
523 | + | |
524 | + dprintk(KERN_DEBUG "%s: txrx_reset\n", dev->name); | |
525 | + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET, base + NvRegTxRxControl); | |
526 | + pci_push(base); | |
527 | + udelay(NV_TXRX_RESET_DELAY); | |
528 | + writel(NVREG_TXRXCTL_BIT2, base + NvRegTxRxControl); | |
529 | + pci_push(base); | |
530 | +} | |
531 | + | |
532 | +/* | |
533 | + * get_stats: dev->get_stats function | |
534 | + * Get latest stats value from the nic. | |
535 | + * Called with read_lock(&dev_base_lock) held for read - | |
536 | + * only synchronized against unregister_netdevice. | |
537 | + */ | |
538 | +static struct net_device_stats *get_stats(struct net_device *dev) | |
539 | +{ | |
540 | + struct fe_priv *np = get_nvpriv(dev); | |
541 | + | |
542 | + /* It seems that the nic always generates interrupts and doesn't | |
543 | + * accumulate errors internally. Thus the current values in np->stats | |
544 | + * are already up to date. | |
545 | + */ | |
546 | + return &np->stats; | |
547 | +} | |
548 | + | |
549 | + | |
550 | +/* | |
551 | + * nic_ioctl: dev->do_ioctl function | |
552 | + * Called with rtnl_lock held. | |
553 | + */ | |
554 | +static int nic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
555 | +{ | |
556 | + return -EOPNOTSUPP; | |
557 | +} | |
558 | + | |
559 | +/* | |
560 | + * alloc_rx: fill rx ring entries. | |
561 | + * Return 1 if the allocations for the skbs failed and the | |
562 | + * rx engine is without Available descriptors | |
563 | + */ | |
564 | +static int alloc_rx(struct net_device *dev) | |
565 | +{ | |
566 | + struct fe_priv *np = get_nvpriv(dev); | |
567 | + unsigned int refill_rx = np->refill_rx; | |
568 | + | |
569 | + while (np->cur_rx != refill_rx) { | |
570 | + int nr = refill_rx % RX_RING; | |
571 | + struct sk_buff *skb; | |
572 | + | |
573 | + if (np->rx_skbuff[nr] == NULL) { | |
574 | + | |
575 | + skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); | |
576 | + if (!skb) | |
577 | + break; | |
578 | + | |
579 | + skb->dev = dev; | |
580 | + np->rx_skbuff[nr] = skb; | |
581 | + } else { | |
582 | + skb = np->rx_skbuff[nr]; | |
583 | + } | |
584 | + np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, | |
585 | + PCI_DMA_FROMDEVICE); | |
586 | + np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); | |
587 | + np->rx_ring[nr].Length = cpu_to_le16(RX_NIC_BUFSIZE); | |
588 | + wmb(); | |
589 | + np->rx_ring[nr].Flags = cpu_to_le16(NV_RX_AVAIL); | |
590 | + dprintk(KERN_DEBUG "%s: alloc_rx: Packet %d marked as Available\n", | |
591 | + dev->name, refill_rx); | |
592 | + refill_rx++; | |
593 | + } | |
594 | + if (np->refill_rx != refill_rx) { | |
595 | + /* FIXME: made progress. Kick hardware */ | |
596 | + } | |
597 | + np->refill_rx = refill_rx; | |
598 | + if (np->cur_rx - refill_rx == RX_RING) | |
599 | + return 1; | |
600 | + return 0; | |
601 | +} | |
602 | + | |
603 | +static void do_rx_refill(unsigned long data) | |
604 | +{ | |
605 | + struct net_device *dev = (struct net_device *) data; | |
606 | + struct fe_priv *np = get_nvpriv(dev); | |
607 | + | |
608 | + disable_irq(dev->irq); | |
609 | + if (alloc_rx(dev)) { | |
610 | + spin_lock(&np->lock); | |
611 | + if (!np->in_shutdown) | |
612 | + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
613 | + spin_unlock(&np->lock); | |
614 | + } | |
615 | + enable_irq(dev->irq); | |
616 | +} | |
617 | + | |
618 | +static int init_ring(struct net_device *dev) | |
619 | +{ | |
620 | + struct fe_priv *np = get_nvpriv(dev); | |
621 | + int i; | |
622 | + | |
623 | + np->next_tx = np->nic_tx = 0; | |
624 | + for (i = 0; i < TX_RING; i++) { | |
625 | + np->tx_ring[i].Flags = 0; | |
626 | + } | |
627 | + | |
628 | + np->cur_rx = RX_RING; | |
629 | + np->refill_rx = 0; | |
630 | + for (i = 0; i < RX_RING; i++) { | |
631 | + np->rx_ring[i].Flags = 0; | |
632 | + } | |
633 | + init_timer(&np->oom_kick); | |
634 | + np->oom_kick.data = (unsigned long) dev; | |
635 | + np->oom_kick.function = &do_rx_refill; /* timer handler */ | |
636 | + | |
637 | + return alloc_rx(dev); | |
638 | +} | |
639 | + | |
640 | +static void drain_tx(struct net_device *dev) | |
641 | +{ | |
642 | + struct fe_priv *np = get_nvpriv(dev); | |
643 | + int i; | |
644 | + for (i = 0; i < TX_RING; i++) { | |
645 | + np->tx_ring[i].Flags = 0; | |
646 | + if (np->tx_skbuff[i]) { | |
647 | + pci_unmap_single(np->pci_dev, np->tx_dma[i], | |
648 | + np->tx_skbuff[i]->len, | |
649 | + PCI_DMA_TODEVICE); | |
650 | + dev_kfree_skb(np->tx_skbuff[i]); | |
651 | + np->tx_skbuff[i] = NULL; | |
652 | + np->stats.tx_dropped++; | |
653 | + } | |
654 | + } | |
655 | +} | |
656 | +static void drain_ring(struct net_device *dev) | |
657 | +{ | |
658 | + struct fe_priv *np = get_nvpriv(dev); | |
659 | + int i; | |
660 | + | |
661 | + drain_tx(dev); | |
662 | + | |
663 | + for (i = 0; i < RX_RING; i++) { | |
664 | + np->rx_ring[i].Flags = 0; | |
665 | + wmb(); | |
666 | + if (np->rx_skbuff[i]) { | |
667 | + pci_unmap_single(np->pci_dev, np->rx_dma[i], | |
668 | + np->rx_skbuff[i]->len, | |
669 | + PCI_DMA_FROMDEVICE); | |
670 | + dev_kfree_skb(np->rx_skbuff[i]); | |
671 | + np->rx_skbuff[i] = NULL; | |
672 | + } | |
673 | + } | |
674 | +} | |
675 | + | |
676 | +/* | |
677 | + * start_xmit: dev->hard_start_xmit function | |
678 | + * Called with dev->xmit_lock held. | |
679 | + */ | |
680 | +static int start_xmit(struct sk_buff *skb, struct net_device *dev) | |
681 | +{ | |
682 | + struct fe_priv *np = get_nvpriv(dev); | |
683 | + int nr = np->next_tx % TX_RING; | |
684 | + | |
685 | + np->tx_skbuff[nr] = skb; | |
686 | + np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, | |
687 | + PCI_DMA_TODEVICE); | |
688 | + | |
689 | + np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | |
690 | + np->tx_ring[nr].Length = cpu_to_le16(skb->len); | |
691 | + | |
692 | + spin_lock_irq(&np->lock); | |
693 | + wmb(); | |
694 | + np->tx_ring[nr].Flags = np->tx_flags; | |
695 | + dprintk(KERN_DEBUG "%s: start_xmit: packet packet %d queued for transmission.\n", | |
696 | + dev->name, np->next_tx); | |
697 | + { | |
698 | + int j; | |
699 | + for (j=0; j<64; j++) { | |
700 | + if ((j%16) == 0) | |
701 | + dprintk("\n%03x:", j); | |
702 | + dprintk(" %02x", ((unsigned char*)skb->data)[j]); | |
703 | + } | |
704 | + dprintk("\n"); | |
705 | + } | |
706 | + | |
707 | + np->next_tx++; | |
708 | + | |
709 | + dev->trans_start = jiffies; | |
710 | + if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP) | |
711 | + netif_stop_queue(dev); | |
712 | + spin_unlock_irq(&np->lock); | |
713 | + writel(NVREG_TXRXCTL_KICK, get_hwbase(dev) + NvRegTxRxControl); | |
714 | + return 0; | |
715 | +} | |
716 | + | |
717 | +/* | |
718 | + * tx_done: check for completed packets, release the skbs. | |
719 | + * | |
720 | + * Caller must own np->lock. | |
721 | + */ | |
722 | +static void tx_done(struct net_device *dev) | |
723 | +{ | |
724 | + struct fe_priv *np = get_nvpriv(dev); | |
725 | + | |
726 | + while (np->nic_tx < np->next_tx) { | |
727 | + struct ring_desc *prd; | |
728 | + int i = np->nic_tx % TX_RING; | |
729 | + | |
730 | + prd = &np->tx_ring[i]; | |
731 | + | |
732 | + dprintk(KERN_DEBUG "%s: tx_done: looking at packet %d, Flags 0x%x.\n", | |
733 | + dev->name, np->nic_tx, prd->Flags); | |
734 | + if (prd->Flags & cpu_to_le16(NV_TX_VALID)) | |
735 | + break; | |
736 | + if (prd->Flags & cpu_to_le16(NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| | |
737 | + NV_TX_UNDERFLOW|NV_TX_ERROR)) { | |
738 | + if (prd->Flags & cpu_to_le16(NV_TX_UNDERFLOW)) | |
739 | + np->stats.tx_fifo_errors++; | |
740 | + if (prd->Flags & cpu_to_le16(NV_TX_CARRIERLOST)) | |
741 | + np->stats.tx_carrier_errors++; | |
742 | + np->stats.tx_errors++; | |
743 | + } else { | |
744 | + np->stats.tx_packets++; | |
745 | + np->stats.tx_bytes += np->tx_skbuff[i]->len; | |
746 | + } | |
747 | + pci_unmap_single(np->pci_dev, np->tx_dma[i], | |
748 | + np->tx_skbuff[i]->len, | |
749 | + PCI_DMA_TODEVICE); | |
750 | + dev_kfree_skb_irq(np->tx_skbuff[i]); | |
751 | + np->tx_skbuff[i] = NULL; | |
752 | + np->nic_tx++; | |
753 | + } | |
754 | + if (np->next_tx - np->nic_tx < TX_LIMIT_START) | |
755 | + netif_wake_queue(dev); | |
756 | +} | |
757 | +/* | |
758 | + * tx_timeout: dev->tx_timeout function | |
759 | + * Called with dev->xmit_lock held. | |
760 | + */ | |
761 | +static void tx_timeout(struct net_device *dev) | |
762 | +{ | |
763 | + struct fe_priv *np = get_nvpriv(dev); | |
764 | + u8 *base = get_hwbase(dev); | |
765 | + | |
766 | + dprintk(KERN_DEBUG "%s: Got tx_timeout.\n", dev->name); | |
767 | + dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, | |
768 | + readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); | |
769 | + | |
770 | + spin_lock_irq(&np->lock); | |
771 | + | |
772 | + /* 1) stop tx engine */ | |
773 | + stop_tx(dev); | |
774 | + | |
775 | + /* 2) check that the packets were not sent already: */ | |
776 | + tx_done(dev); | |
777 | + | |
778 | + /* 3) if there are dead entries: clear everything */ | |
779 | + if (np->next_tx != np->nic_tx) { | |
780 | + drain_tx(dev); | |
781 | + np->next_tx = np->nic_tx = 0; | |
782 | + writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | |
783 | + netif_wake_queue(dev); | |
784 | + } | |
785 | + | |
786 | + /* 4) restart tx engine */ | |
787 | + start_tx(dev); | |
788 | + spin_unlock_irq(&np->lock); | |
789 | +} | |
790 | + | |
791 | +static void rx_process(struct net_device *dev) | |
792 | +{ | |
793 | + struct fe_priv *np = get_nvpriv(dev); | |
794 | + | |
795 | + for (;;) { | |
796 | + struct ring_desc *prd; | |
797 | + struct sk_buff *skb; | |
798 | + int len; | |
799 | + int i; | |
800 | + if (np->cur_rx - np->refill_rx >= RX_RING) | |
801 | + break; /* ring empty - do not continue */ | |
802 | + | |
803 | + i = np->cur_rx % RX_RING; | |
804 | + prd = &np->rx_ring[i]; | |
805 | + dprintk(KERN_DEBUG "%s: rx_process: looking at packet %d, Flags 0x%x.\n", | |
806 | + dev->name, np->cur_rx, prd->Flags); | |
807 | + | |
808 | + if (prd->Flags & cpu_to_le16(NV_RX_AVAIL)) | |
809 | + break; /* still owned by hardware, */ | |
810 | + | |
811 | + /* the packet is for us - immediately tear down the pci mapping, and | |
812 | + * prefetch the first cacheline of the packet. | |
813 | + */ | |
814 | + pci_unmap_single(np->pci_dev, np->rx_dma[i], | |
815 | + np->rx_skbuff[i]->len, | |
816 | + PCI_DMA_FROMDEVICE); | |
817 | + prefetch(np->rx_skbuff[i]->data); | |
818 | + | |
819 | + { | |
820 | + int j; | |
821 | + dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",prd->Flags); | |
822 | + for (j=0; j<64; j++) { | |
823 | + if ((j%16) == 0) | |
824 | + dprintk("\n%03x:", j); | |
825 | + dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); | |
826 | + } | |
827 | + dprintk("\n"); | |
828 | + } | |
829 | + /* look at what we actually got: */ | |
830 | + if (!(prd->Flags & cpu_to_le16(NV_RX_DESCRIPTORVALID))) | |
831 | + goto next_pkt; | |
832 | + | |
833 | + | |
834 | + len = le16_to_cpu(prd->Length); | |
835 | + | |
836 | + if (prd->Flags & cpu_to_le16(NV_RX_MISSEDFRAME)) { | |
837 | + np->stats.rx_missed_errors++; | |
838 | + np->stats.rx_errors++; | |
839 | + goto next_pkt; | |
840 | + } | |
841 | + if (prd->Flags & cpu_to_le16(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) { | |
842 | + np->stats.rx_errors++; | |
843 | + goto next_pkt; | |
844 | + } | |
845 | + if (prd->Flags & cpu_to_le16(NV_RX_CRCERR)) { | |
846 | + np->stats.rx_crc_errors++; | |
847 | + np->stats.rx_errors++; | |
848 | + goto next_pkt; | |
849 | + } | |
850 | + if (prd->Flags & cpu_to_le16(NV_RX_OVERFLOW)) { | |
851 | + np->stats.rx_over_errors++; | |
852 | + np->stats.rx_errors++; | |
853 | + goto next_pkt; | |
854 | + } | |
855 | + if (prd->Flags & cpu_to_le16(NV_RX_ERROR)) { | |
856 | + /* framing errors are soft errors, the rest is fatal. */ | |
857 | + if (prd->Flags & cpu_to_le16(NV_RX_FRAMINGERR)) { | |
858 | + if (prd->Flags & cpu_to_le16(NV_RX_SUBSTRACT1)) { | |
859 | + len--; | |
860 | + } | |
861 | + } else { | |
862 | + np->stats.rx_errors++; | |
863 | + goto next_pkt; | |
864 | + } | |
865 | + } | |
866 | + /* got a valid packet - forward it to the network core */ | |
867 | + skb = np->rx_skbuff[i]; | |
868 | + np->rx_skbuff[i] = NULL; | |
869 | + | |
870 | + skb_put(skb, len); | |
871 | + skb->protocol = eth_type_trans(skb, dev); | |
872 | + dprintk(KERN_DEBUG "%s: rx_process: packet %d with %d bytes, proto %d accepted.\n", | |
873 | + dev->name, np->cur_rx, len, skb->protocol); | |
874 | + netif_rx(skb); | |
875 | + dev->last_rx = jiffies; | |
876 | + np->stats.rx_packets++; | |
877 | + np->stats.rx_bytes += len; | |
878 | +next_pkt: | |
879 | + np->cur_rx++; | |
880 | + } | |
881 | +} | |
882 | + | |
883 | +/* | |
884 | + * change_mtu: dev->change_mtu function | |
885 | + * Called with dev_base_lock held for read. | |
886 | + */ | |
887 | +static int change_mtu(struct net_device *dev, int new_mtu) | |
888 | +{ | |
889 | + if (new_mtu > DEFAULT_MTU) | |
890 | + return -EINVAL; | |
891 | + dev->mtu = new_mtu; | |
892 | + return 0; | |
893 | +} | |
894 | + | |
895 | +/* | |
896 | + * change_mtu: dev->change_mtu function | |
897 | + * Called with dev->xmit_lock held. | |
898 | + */ | |
899 | +static void set_multicast(struct net_device *dev) | |
900 | +{ | |
901 | + struct fe_priv *np = get_nvpriv(dev); | |
902 | + u8 *base = get_hwbase(dev); | |
903 | + u32 addr[2]; | |
904 | + u32 mask[2]; | |
905 | + u32 pff; | |
906 | + | |
907 | + memset(addr, 0, sizeof(addr)); | |
908 | + memset(mask, 0, sizeof(mask)); | |
909 | + | |
910 | + if (dev->flags & IFF_PROMISC) { | |
911 | + printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); | |
912 | + pff = NVREG_PFF_PROMISC; | |
913 | + } else { | |
914 | + pff = NVREG_PFF_MYADDR; | |
915 | + | |
916 | + if (dev->flags & IFF_ALLMULTI || dev->mc_list) { | |
917 | + u32 alwaysOff[2]; | |
918 | + u32 alwaysOn[2]; | |
919 | + | |
920 | + alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; | |
921 | + if (dev->flags & IFF_ALLMULTI) { | |
922 | + alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; | |
923 | + } else { | |
924 | + struct dev_mc_list *walk; | |
925 | + | |
926 | + walk = dev->mc_list; | |
927 | + while (walk != NULL) { | |
928 | + u32 a, b; | |
929 | + a = le32_to_cpu(*(u32 *) walk->dmi_addr); | |
930 | + b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); | |
931 | + alwaysOn[0] &= a; | |
932 | + alwaysOff[0] &= ~a; | |
933 | + alwaysOn[1] &= b; | |
934 | + alwaysOff[1] &= ~b; | |
935 | + walk = walk->next; | |
936 | + } | |
937 | + } | |
938 | + addr[0] = alwaysOn[0]; | |
939 | + addr[1] = alwaysOn[1]; | |
940 | + mask[0] = alwaysOn[0] | alwaysOff[0]; | |
941 | + mask[1] = alwaysOn[1] | alwaysOff[1]; | |
942 | + } | |
943 | + } | |
944 | + addr[0] |= NVREG_MCASTADDRA_FORCE; | |
945 | + pff |= NVREG_PFF_ALWAYS; | |
946 | + spin_lock_irq(&np->lock); | |
947 | + stop_rx(dev); | |
948 | + writel(addr[0], base + NvRegMulticastAddrA); | |
949 | + writel(addr[1], base + NvRegMulticastAddrB); | |
950 | + writel(mask[0], base + NvRegMulticastMaskA); | |
951 | + writel(mask[1], base + NvRegMulticastMaskB); | |
952 | + writel(pff, base + NvRegPacketFilterFlags); | |
953 | + start_rx(dev); | |
954 | + spin_unlock_irq(&np->lock); | |
955 | +} | |
956 | + | |
957 | +static int update_linkspeed(struct net_device *dev) | |
958 | +{ | |
959 | + struct fe_priv *np = get_nvpriv(dev); | |
960 | + int adv, lpa, newls, newdup; | |
961 | + | |
962 | + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
963 | + lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); | |
964 | + dprintk(KERN_DEBUG "%s: update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", | |
965 | + dev->name, adv, lpa); | |
966 | + | |
967 | + /* FIXME: handle parallel detection properly, handle gigabit ethernet */ | |
968 | + lpa = lpa & adv; | |
969 | + if (lpa & LPA_100FULL) { | |
970 | + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | |
971 | + newdup = 1; | |
972 | + } else if (lpa & LPA_100HALF) { | |
973 | + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | |
974 | + newdup = 0; | |
975 | + } else if (lpa & LPA_10FULL) { | |
976 | + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
977 | + newdup = 1; | |
978 | + } else if (lpa & LPA_10HALF) { | |
979 | + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
980 | + newdup = 0; | |
981 | + } else { | |
982 | + dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa); | |
983 | + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
984 | + newdup = 0; | |
985 | + } | |
986 | + if (np->duplex != newdup || np->linkspeed != newls) { | |
987 | + np->duplex = newdup; | |
988 | + np->linkspeed = newls; | |
989 | + return 1; | |
990 | + } | |
991 | + return 0; | |
992 | +} | |
993 | + | |
994 | +static void link_irq(struct net_device *dev) | |
995 | +{ | |
996 | + struct fe_priv *np = get_nvpriv(dev); | |
997 | + u8 *base = get_hwbase(dev); | |
998 | + u32 miistat; | |
999 | + int miival; | |
1000 | + | |
1001 | + miistat = readl(base + NvRegMIIStatus); | |
1002 | + writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
1003 | + printk(KERN_DEBUG "%s: link change notification, status 0x%x.\n", dev->name, miistat); | |
1004 | + | |
1005 | + miival = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
1006 | + if (miival & BMSR_ANEGCOMPLETE) { | |
1007 | + update_linkspeed(dev); | |
1008 | + | |
1009 | + if (netif_carrier_ok(dev)) { | |
1010 | + stop_rx(dev); | |
1011 | + } else { | |
1012 | + netif_carrier_on(dev); | |
1013 | + printk(KERN_INFO "%s: link up.\n", dev->name); | |
1014 | + } | |
1015 | + writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), | |
1016 | + base + NvRegMisc1); | |
1017 | + start_rx(dev); | |
1018 | + } else { | |
1019 | + if (netif_carrier_ok(dev)) { | |
1020 | + netif_carrier_off(dev); | |
1021 | + printk(KERN_INFO "%s: link down.\n", dev->name); | |
1022 | + stop_rx(dev); | |
1023 | + } | |
1024 | + writel(np->linkspeed, base + NvRegLinkSpeed); | |
1025 | + pci_push(base); | |
1026 | + } | |
1027 | +} | |
1028 | + | |
1029 | +static irqreturn_t nic_irq(int foo, void *data, struct pt_regs *regs) | |
1030 | +{ | |
1031 | + struct net_device *dev = (struct net_device *) data; | |
1032 | + struct fe_priv *np = get_nvpriv(dev); | |
1033 | + u8 *base = get_hwbase(dev); | |
1034 | + u32 events; | |
1035 | + | |
1036 | + dprintk(KERN_DEBUG "%s: nic_irq\n", dev->name); | |
1037 | + | |
1038 | + for (;;) { | |
1039 | + events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
1040 | + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
1041 | + pci_push(base); | |
1042 | + dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | |
1043 | + if (!(events & np->irqmask)) | |
1044 | + break; | |
1045 | + | |
1046 | + /* FIXME: only call the required processing functions */ | |
1047 | + if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2)) { | |
1048 | + spin_lock(&np->lock); | |
1049 | + tx_done(dev); | |
1050 | + spin_unlock(&np->lock); | |
1051 | + } | |
1052 | + | |
1053 | + if (events & NVREG_IRQ_RX) { | |
1054 | + rx_process(dev); | |
1055 | + if (alloc_rx(dev)) { | |
1056 | + spin_lock(&np->lock); | |
1057 | + if (!np->in_shutdown) | |
1058 | + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
1059 | + spin_unlock(&np->lock); | |
1060 | + } | |
1061 | + } | |
1062 | + | |
1063 | + if (events & NVREG_IRQ_LINK) { | |
1064 | + spin_lock(&np->lock); | |
1065 | + link_irq(dev); | |
1066 | + spin_unlock(&np->lock); | |
1067 | + } | |
1068 | + if (events & (NVREG_IRQ_UNKNOWN)) { | |
1069 | + printk("%s: received irq with unknown source 0x%x.\n", dev->name, events); | |
1070 | + } | |
1071 | + /* FIXME: general errors, link change interrupts */ | |
1072 | + } | |
1073 | + dprintk(KERN_DEBUG "%s: nic_irq completed\n", dev->name); | |
1074 | + | |
1075 | + return IRQ_HANDLED; | |
1076 | +} | |
1077 | + | |
1078 | +static int open(struct net_device *dev) | |
1079 | +{ | |
1080 | + struct fe_priv *np = get_nvpriv(dev); | |
1081 | + u8 *base = get_hwbase(dev); | |
1082 | + int ret, oom, i; | |
1083 | + | |
1084 | + dprintk(KERN_DEBUG "forcedeth: open\n"); | |
1085 | + | |
1086 | + /* 1) erase previous misconfiguration */ | |
1087 | + /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ | |
1088 | + writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | |
1089 | + writel(0, base + NvRegMulticastAddrB); | |
1090 | + writel(0, base + NvRegMulticastMaskA); | |
1091 | + writel(0, base + NvRegMulticastMaskB); | |
1092 | + writel(0, base + NvRegPacketFilterFlags); | |
1093 | + writel(0, base + NvRegAdapterControl); | |
1094 | + writel(0, base + NvRegLinkSpeed); | |
1095 | + writel(0, base + NvRegUnknownTransmitterReg); | |
1096 | + txrx_reset(dev); | |
1097 | + writel(0, base + NvRegUnknownSetupReg6); | |
1098 | + | |
1099 | + /* 2) initialize descriptor rings */ | |
1100 | + np->in_shutdown = 0; | |
1101 | + oom = init_ring(dev); | |
1102 | + | |
1103 | + /* 3) set mac address */ | |
1104 | + { | |
1105 | + u32 mac[2]; | |
1106 | + | |
1107 | + mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | |
1108 | + (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | |
1109 | + mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | |
1110 | + | |
1111 | + writel(mac[0], base + NvRegMacAddrA); | |
1112 | + writel(mac[1], base + NvRegMacAddrB); | |
1113 | + } | |
1114 | + | |
1115 | + /* 4) continue setup */ | |
1116 | + np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
1117 | + np->duplex = 0; | |
1118 | + writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); | |
1119 | + writel(0, base + NvRegTxRxControl); | |
1120 | + pci_push(base); | |
1121 | + writel(NVREG_TXRXCTL_BIT1, base + NvRegTxRxControl); | |
1122 | + reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, | |
1123 | + NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, | |
1124 | + KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); | |
1125 | + writel(0, base + NvRegUnknownSetupReg4); | |
1126 | + | |
1127 | + /* 5) Find a suitable PHY */ | |
1128 | + writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); | |
1129 | + for (i = 1; i < 32; i++) { | |
1130 | + int id1, id2; | |
1131 | + | |
1132 | + id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ); | |
1133 | + if (id1 < 0) | |
1134 | + continue; | |
1135 | + id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ); | |
1136 | + if (id2 < 0) | |
1137 | + continue; | |
1138 | + dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", | |
1139 | + dev->name, id1, id2, i); | |
1140 | + np->phyaddr = i; | |
1141 | + | |
1142 | + update_linkspeed(dev); | |
1143 | + | |
1144 | + break; | |
1145 | + } | |
1146 | + if (i == 32) { | |
1147 | + printk(KERN_INFO "%s: open: failing due to lack of suitable PHY.\n", | |
1148 | + dev->name); | |
1149 | + ret = -EINVAL; | |
1150 | + goto out_drain; | |
1151 | + } | |
1152 | + | |
1153 | + /* 6) continue setup */ | |
1154 | + writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), | |
1155 | + base + NvRegMisc1); | |
1156 | + writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); | |
1157 | + writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); | |
1158 | + writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig); | |
1159 | + | |
1160 | + writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); | |
1161 | + get_random_bytes(&i, sizeof(i)); | |
1162 | + writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); | |
1163 | + writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); | |
1164 | + writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); | |
1165 | + writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | |
1166 | + writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID, | |
1167 | + base + NvRegAdapterControl); | |
1168 | + writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); | |
1169 | + writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags); | |
1170 | + | |
1171 | + /* 7) start packet processing */ | |
1172 | + writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | |
1173 | + writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | |
1174 | + writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | |
1175 | + base + NvRegRingSizes); | |
1176 | + | |
1177 | + i = readl(base + NvRegPowerState); | |
1178 | + if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) { | |
1179 | + writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); | |
1180 | + } | |
1181 | + pci_push(base); | |
1182 | + udelay(10); | |
1183 | + writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); | |
1184 | + writel(NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); | |
1185 | + | |
1186 | + | |
1187 | + writel(0, base + NvRegIrqMask); | |
1188 | + pci_push(base); | |
1189 | + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
1190 | + pci_push(base); | |
1191 | + writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | |
1192 | + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
1193 | + pci_push(base); | |
1194 | + | |
1195 | + ret = request_irq(dev->irq, &nic_irq, SA_SHIRQ, dev->name, dev); | |
1196 | + if (ret) | |
1197 | + goto out_drain; | |
1198 | + | |
1199 | + writel(np->irqmask, base + NvRegIrqMask); | |
1200 | + spin_lock_irq(&np->lock); | |
1201 | + start_rx(dev); | |
1202 | + set_multicast(dev); | |
1203 | + start_tx(dev); | |
1204 | + netif_start_queue(dev); | |
1205 | + if (oom) | |
1206 | + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
1207 | + if (!(mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ) & BMSR_ANEGCOMPLETE)) { | |
1208 | + printk("%s: no link during initialization.\n", dev->name); | |
1209 | + netif_carrier_off(dev); | |
1210 | + } | |
1211 | + | |
1212 | + spin_unlock_irq(&np->lock); | |
1213 | + | |
1214 | + return 0; | |
1215 | +out_drain: | |
1216 | + drain_ring(dev); | |
1217 | + return ret; | |
1218 | +} | |
1219 | + | |
1220 | +static int close(struct net_device *dev) | |
1221 | +{ | |
1222 | + struct fe_priv *np = get_nvpriv(dev); | |
1223 | + | |
1224 | + spin_lock_irq(&np->lock); | |
1225 | + np->in_shutdown = 1; | |
1226 | + spin_unlock_irq(&np->lock); | |
1227 | + synchronize_irq(dev->irq); | |
1228 | + | |
1229 | + del_timer_sync(&np->oom_kick); | |
1230 | + | |
1231 | + netif_stop_queue(dev); | |
1232 | + spin_lock_irq(&np->lock); | |
1233 | + stop_tx(dev); | |
1234 | + stop_rx(dev); | |
1235 | + spin_unlock_irq(&np->lock); | |
1236 | + | |
1237 | + free_irq(dev->irq, dev); | |
1238 | + | |
1239 | + drain_ring(dev); | |
1240 | + | |
1241 | + /* FIXME: power down nic */ | |
1242 | + | |
1243 | + return 0; | |
1244 | +} | |
1245 | + | |
1246 | +static int __devinit probe_nic(struct pci_dev *pci_dev, const struct pci_device_id *id) | |
1247 | +{ | |
1248 | + struct net_device *dev; | |
1249 | + struct fe_priv *np; | |
1250 | + unsigned long addr; | |
1251 | + u8 *base; | |
1252 | + int err, i; | |
1253 | + | |
1254 | + dev = alloc_etherdev(sizeof(struct fe_priv)); | |
1255 | + np = get_nvpriv(dev); | |
1256 | + err = -ENOMEM; | |
1257 | + if (!dev) | |
1258 | + goto out; | |
1259 | + | |
1260 | + np->pci_dev = pci_dev; | |
1261 | + spin_lock_init(&np->lock); | |
1262 | + SET_MODULE_OWNER(dev); | |
1263 | + SET_NETDEV_DEV(dev, &pci_dev->dev); | |
1264 | + | |
1265 | + err = pci_enable_device(pci_dev); | |
1266 | + if (err) { | |
1267 | + printk(KERN_INFO "forcedeth: pci_enable_dev failed: %d\n", err); | |
1268 | + goto out_free; | |
1269 | + } | |
1270 | + | |
1271 | + pci_set_master(pci_dev); | |
1272 | + | |
1273 | + err = pci_request_regions(pci_dev, dev->name); | |
1274 | + if (err < 0) | |
1275 | + goto out_disable; | |
1276 | + | |
1277 | + err = -EINVAL; | |
1278 | + addr = 0; | |
1279 | + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
1280 | + dprintk(KERN_DEBUG "forcedeth: resource %d start %p len %ld flags 0x%08lx.\n", | |
1281 | + i, (void*)pci_resource_start(pci_dev, i), | |
1282 | + pci_resource_len(pci_dev, i), | |
1283 | + pci_resource_flags(pci_dev, i)); | |
1284 | + if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && | |
1285 | + pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) { | |
1286 | + addr = pci_resource_start(pci_dev, i); | |
1287 | + break; | |
1288 | + } | |
1289 | + } | |
1290 | + if (i == DEVICE_COUNT_RESOURCE) { | |
1291 | + printk(KERN_INFO "forcedeth: Couldn't find register window.\n"); | |
1292 | + goto out_relreg; | |
1293 | + } | |
1294 | + | |
1295 | + err = -ENOMEM; | |
1296 | + dev->base_addr = (unsigned long) ioremap(addr, NV_PCI_REGSZ); | |
1297 | + if (!dev->base_addr) | |
1298 | + goto out_disable; | |
1299 | + dev->irq = pci_dev->irq; | |
1300 | + np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), | |
1301 | + &np->ring_addr); | |
1302 | + if (!np->rx_ring) | |
1303 | + goto out_unmap; | |
1304 | + np->tx_ring = &np->rx_ring[RX_RING]; | |
1305 | + | |
1306 | + err = register_netdev(dev); | |
1307 | + if (err) { | |
1308 | + printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); | |
1309 | + goto out_freering; | |
1310 | + } | |
1311 | + | |
1312 | + printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x\n", | |
1313 | + dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device); | |
1314 | + | |
1315 | + dev->open = open; | |
1316 | + dev->stop = close; | |
1317 | + dev->hard_start_xmit = start_xmit; | |
1318 | + dev->get_stats = get_stats; | |
1319 | + dev->change_mtu = change_mtu; | |
1320 | + dev->set_multicast_list = set_multicast; | |
1321 | + dev->do_ioctl = nic_ioctl; | |
1322 | + dev->tx_timeout = tx_timeout; | |
1323 | + dev->watchdog_timeo = NV_WATCHDOG_TIMEO; | |
1324 | + | |
1325 | + pci_set_drvdata(pci_dev, dev); | |
1326 | + | |
1327 | + /* read the mac address */ | |
1328 | + base = get_hwbase(dev); | |
1329 | + np->orig_mac[0] = readl(base + NvRegMacAddrA); | |
1330 | + np->orig_mac[1] = readl(base + NvRegMacAddrB); | |
1331 | + | |
1332 | + dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; | |
1333 | + dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; | |
1334 | + dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; | |
1335 | + dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; | |
1336 | + dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; | |
1337 | + dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; | |
1338 | + | |
1339 | + dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, | |
1340 | + dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | |
1341 | + dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | |
1342 | + | |
1343 | + np->tx_flags = cpu_to_le16(NV_TX_LASTPACKET|NV_TX_LASTPACKET1|NV_TX_VALID); | |
1344 | + if (id->driver_data & DEV_NEED_LASTPACKET1) | |
1345 | + np->tx_flags |= cpu_to_le16(NV_TX_LASTPACKET1); | |
1346 | + if (id->driver_data & DEV_IRQMASK_1) | |
1347 | + np->irqmask = NVREG_IRQMASK_WANTED_1; | |
1348 | + if (id->driver_data & DEV_IRQMASK_2) | |
1349 | + np->irqmask = NVREG_IRQMASK_WANTED_2; | |
1350 | + | |
1351 | + return 0; | |
1352 | + | |
1353 | +out_freering: | |
1354 | + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), | |
1355 | + np->rx_ring, np->ring_addr); | |
1356 | +out_unmap: | |
1357 | + iounmap(get_hwbase(dev)); | |
1358 | +out_relreg: | |
1359 | + pci_release_regions(pci_dev); | |
1360 | +out_disable: | |
1361 | + pci_disable_device(pci_dev); | |
1362 | +out_free: | |
1363 | + kfree(dev); | |
1364 | + pci_set_drvdata(pci_dev, NULL); | |
1365 | +out: | |
1366 | + return err; | |
1367 | +} | |
1368 | + | |
1369 | +static void __devexit remove_nic(struct pci_dev *pci_dev) | |
1370 | +{ | |
1371 | + struct net_device *dev = pci_get_drvdata(pci_dev); | |
1372 | + struct fe_priv *np = get_nvpriv(dev); | |
1373 | + u8 *base = get_hwbase(dev); | |
1374 | + | |
1375 | + unregister_netdev(dev); | |
1376 | + | |
1377 | + /* special op: write back the misordered MAC address - otherwise | |
1378 | + * the next probe_nic would see a wrong address. | |
1379 | + */ | |
1380 | + writel(np->orig_mac[0], base + NvRegMacAddrA); | |
1381 | + writel(np->orig_mac[1], base + NvRegMacAddrB); | |
1382 | + | |
1383 | + /* free all structures */ | |
1384 | + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr); | |
1385 | + iounmap(get_hwbase(dev)); | |
1386 | + pci_release_regions(pci_dev); | |
1387 | + pci_disable_device(pci_dev); | |
1388 | + kfree(dev); | |
1389 | + pci_set_drvdata(pci_dev, NULL); | |
1390 | +} | |
1391 | + | |
1392 | +static struct pci_device_id pci_tbl[] = { | |
1393 | + { /* nForce Ethernet Controller */ | |
1394 | + .vendor = PCI_VENDOR_ID_NVIDIA, | |
1395 | + .device = 0x1C3, | |
1396 | + .subvendor = PCI_ANY_ID, | |
1397 | + .subdevice = PCI_ANY_ID, | |
1398 | + .driver_data = DEV_IRQMASK_1, | |
1399 | + }, | |
1400 | + { /* nForce2 Ethernet Controller */ | |
1401 | + .vendor = PCI_VENDOR_ID_NVIDIA, | |
1402 | + .device = 0x0066, | |
1403 | + .subvendor = PCI_ANY_ID, | |
1404 | + .subdevice = PCI_ANY_ID, | |
1405 | + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2, | |
1406 | + }, | |
1407 | + { /* nForce3 Ethernet Controller */ | |
1408 | + .vendor = PCI_VENDOR_ID_NVIDIA, | |
1409 | + .device = 0x00D6, | |
1410 | + .subvendor = PCI_ANY_ID, | |
1411 | + .subdevice = PCI_ANY_ID, | |
1412 | + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2, | |
1413 | + }, | |
1414 | + {0,}, | |
1415 | +}; | |
1416 | + | |
1417 | +static struct pci_driver driver = { | |
1418 | + .name = "forcedeth", | |
1419 | + .id_table = pci_tbl, | |
1420 | + .probe = probe_nic, | |
1421 | + .remove = __devexit_p(remove_nic), | |
1422 | +}; | |
1423 | + | |
1424 | + | |
1425 | +static int __init init_nic(void) | |
1426 | +{ | |
1427 | + printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); | |
1428 | + return pci_module_init(&driver); | |
1429 | +} | |
1430 | + | |
1431 | +static void __exit exit_nic(void) | |
1432 | +{ | |
1433 | + pci_unregister_driver(&driver); | |
1434 | +} | |
1435 | + | |
1436 | +MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); | |
1437 | +MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | |
1438 | +MODULE_LICENSE("GPL"); | |
1439 | + | |
1440 | +MODULE_DEVICE_TABLE(pci, pci_tbl); | |
1441 | + | |
1442 | +module_init(init_nic); | |
1443 | +module_exit(exit_nic); |