1 --- 2.6/drivers/net/Kconfig 2003-10-18 21:17:01.000000000 +0200
2 +++ build-2.6/drivers/net/Kconfig 2003-10-25 15:06:15.000000000 +0200
4 <file:Documentation/networking/net-modules.txt>. The module will be
8 + tristate "Reverse Engineered nForce Ethernet support (EXPERIMENTAL)"
9 + depends on NET_PCI && PCI && EXPERIMENTAL
11 + If you have a network (Ethernet) controller of this type, say Y and
12 + read the Ethernet-HOWTO, available from
13 + <http://www.tldp.org/docs.html#howto>.
15 + If you want to compile this as a module ( = code which can be
16 + inserted in and removed from the running kernel whenever you want),
17 + say M here and read <file:Documentation/modules.txt> as well as
18 + <file:Documentation/networking/net-modules.txt>. The module will be
23 tristate "CS89x0 support"
24 depends on NET_PCI && ISA
25 --- 2.6/drivers/net/Makefile 2003-10-09 21:19:50.000000000 +0200
26 +++ build-2.6/drivers/net/Makefile 2003-10-25 15:06:15.000000000 +0200
28 obj-$(CONFIG_NE3210) += ne3210.o 8390.o
29 obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o
30 obj-$(CONFIG_B44) += b44.o
31 +obj-$(CONFIG_FORCEDETH) += forcedeth.o
33 obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
34 obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
35 --- 2.6/drivers/net/forcedeth.c 1970-01-01 01:00:00.000000000 +0100
36 +++ build-2.6/drivers/net/forcedeth.c 2003-10-25 15:06:39.000000000 +0200
39 + * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
41 + * Note: This driver is a cleanroom reimplementation based on reverse
42 + * engineered documentation written by Carl-Daniel Hailfinger
43 + * and Andrew de Quincey. It's neither supported nor endorsed
44 + * by NVIDIA Corp. Use at your own risk.
46 + * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
47 + * trademarks of NVIDIA Corporation in the United States and other
50 + * Copyright (C) 2003 Manfred Spraul
52 + * This program is free software; you can redistribute it and/or modify
53 + * it under the terms of the GNU General Public License as published by
54 + * the Free Software Foundation; either version 2 of the License, or
55 + * (at your option) any later version.
57 + * This program is distributed in the hope that it will be useful,
58 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
59 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
60 + * GNU General Public License for more details.
62 + * You should have received a copy of the GNU General Public License
63 + * along with this program; if not, write to the Free Software
64 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
67 + * 0.01: 05 Oct 2003: First release that compiles without warnings.
68 + * 0.02: 05 Oct 2003: Fix bug for drain_tx: do not try to free NULL skbs.
69 + * Check all PCI BARs for the register window.
70 + * udelay added to mii_rw.
71 + * 0.03: 06 Oct 2003: Initialize dev->irq.
72 + * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
73 + * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
74 + * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
76 + * 0.07: 14 Oct 2003: Further irq mask updates.
77 + * 0.08: 20 Oct 2003: rx_desc.Length initialization added, alloc_rx refill
78 + * added into irq handler, NULL check for drain_ring.
79 + * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
80 + * requested interrupt sources.
81 + * 0.10: 20 Oct 2003: First cleanup for release.
82 + * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
83 + * MAC Address init fix, set_multicast cleanup.
84 + * 0.12: 23 Oct 2003: Cleanups for release.
85 + * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
86 + * Set link speed correctly. start rx before starting
87 + * tx (start_rx sets the link speed).
88 + * 0.14: 25 Oct 2003: Nic dependant irq mask.
91 + * The irq handling is wrong - no tx done interrupts are generated.
92 + * This means recovery from netif_stop_queue only happens in the hw timer
93 + * interrupt (1/2 second), or if an rx packet arrives by chance.
95 +#define FORCEDETH_VERSION "0.14"
97 +#include <linux/module.h>
98 +#include <linux/types.h>
99 +#include <linux/pci.h>
100 +#include <linux/netdevice.h>
101 +#include <linux/etherdevice.h>
102 +#include <linux/delay.h>
103 +#include <linux/spinlock.h>
104 +#include <linux/ethtool.h>
105 +#include <linux/timer.h>
106 +#include <linux/skbuff.h>
107 +#include <linux/mii.h>
108 +#include <linux/random.h>
111 +#include <asm/uaccess.h>
112 +#include <asm/system.h>
115 +#define dprintk printk
117 +#define dprintk(x...) do { } while (0)
125 +#define DEV_NEED_LASTPACKET1 0x0001
126 +#define DEV_IRQMASK_1 0x0002
127 +#define DEV_IRQMASK_2 0x0004
130 + NvRegIrqStatus = 0x000,
131 +#define NVREG_IRQSTAT_MIIEVENT 0x040
132 +#define NVREG_IRQSTAT_MASK 0x1ff
133 + NvRegIrqMask = 0x004,
134 +#define NVREG_IRQ_UNKNOWN 0x0005
135 +#define NVREG_IRQ_RX 0x0002
136 +#define NVREG_IRQ_TX2 0x0010
137 +#define NVREG_IRQ_TIMER 0x0020
138 +#define NVREG_IRQ_LINK 0x0040
139 +#define NVREG_IRQ_TX1 0x0100
140 +#define NVREG_IRQMASK_WANTED_1 0x005f
141 +#define NVREG_IRQMASK_WANTED_2 0x0147
143 + NvRegUnknownSetupReg6 = 0x008,
144 +#define NVREG_UNKSETUP6_VAL 3
146 + NvRegPollingInterval = 0x00c,
147 + NvRegMisc1 = 0x080,
148 +#define NVREG_MISC1_HD 0x02
149 +#define NVREG_MISC1_FORCE 0x3b0f3c
151 + NvRegTransmitterControl = 0x084,
152 +#define NVREG_XMITCTL_START 0x01
153 + NvRegTransmitterStatus = 0x088,
154 +#define NVREG_XMITSTAT_BUSY 0x01
156 + NvRegPacketFilterFlags = 0x8c,
157 +#define NVREG_PFF_ALWAYS 0x7F0008
158 +#define NVREG_PFF_PROMISC 0x80
159 +#define NVREG_PFF_MYADDR 0x20
161 + NvRegOffloadConfig = 0x90,
162 +#define NVREG_OFFLOAD_HOMEPHY 0x601
163 +#define NVREG_OFFLOAD_NORMAL 0x5ee
164 + NvRegReceiverControl = 0x094,
165 +#define NVREG_RCVCTL_START 0x01
166 + NvRegReceiverStatus = 0x98,
167 +#define NVREG_RCVSTAT_BUSY 0x01
169 + NvRegRandomSeed = 0x9c,
170 +#define NVREG_RNDSEED_MASK 0x00ff
171 +#define NVREG_RNDSEED_FORCE 0x7f00
173 + NvRegUnknownSetupReg1 = 0xA0,
174 +#define NVREG_UNKSETUP1_VAL 0x16070f
175 + NvRegUnknownSetupReg2 = 0xA4,
176 +#define NVREG_UNKSETUP2_VAL 0x16
177 + NvRegMacAddrA = 0xA8,
178 + NvRegMacAddrB = 0xAC,
179 + NvRegMulticastAddrA = 0xB0,
180 +#define NVREG_MCASTADDRA_FORCE 0x01
181 + NvRegMulticastAddrB = 0xB4,
182 + NvRegMulticastMaskA = 0xB8,
183 + NvRegMulticastMaskB = 0xBC,
185 + NvRegTxRingPhysAddr = 0x100,
186 + NvRegRxRingPhysAddr = 0x104,
187 + NvRegRingSizes = 0x108,
188 +#define NVREG_RINGSZ_TXSHIFT 0
189 +#define NVREG_RINGSZ_RXSHIFT 16
190 + NvRegUnknownTransmitterReg = 0x10c,
191 + NvRegLinkSpeed = 0x110,
192 +#define NVREG_LINKSPEED_FORCE 0x10000
193 +#define NVREG_LINKSPEED_10 10
194 +#define NVREG_LINKSPEED_100 100
195 +#define NVREG_LINKSPEED_1000 1000
196 + NvRegUnknownSetupReg5 = 0x130,
197 +#define NVREG_UNKSETUP5_BIT31 (1<<31)
198 + NvRegUnknownSetupReg3 = 0x134,
199 +#define NVREG_UNKSETUP3_VAL1 0x200010
200 + NvRegTxRxControl = 0x144,
201 +#define NVREG_TXRXCTL_KICK 0x0001
202 +#define NVREG_TXRXCTL_BIT1 0x0002
203 +#define NVREG_TXRXCTL_BIT2 0x0004
204 +#define NVREG_TXRXCTL_IDLE 0x0008
205 +#define NVREG_TXRXCTL_RESET 0x0010
206 + NvRegMIIStatus = 0x180,
207 +#define NVREG_MIISTAT_ERROR 0x0001
208 +#define NVREG_MIISTAT_LINKCHANGE 0x0008
209 +#define NVREG_MIISTAT_MASK 0x000f
210 +#define NVREG_MIISTAT_MASK2 0x000f
211 + NvRegUnknownSetupReg4 = 0x184,
212 +#define NVREG_UNKSETUP4_VAL 8
214 + NvRegAdapterControl = 0x188,
215 +#define NVREG_ADAPTCTL_START 0x02
216 +#define NVREG_ADAPTCTL_LINKUP 0x04
217 +#define NVREG_ADAPTCTL_PHYVALID 0x4000
218 +#define NVREG_ADAPTCTL_RUNNING 0x100000
219 +#define NVREG_ADAPTCTL_PHYSHIFT 24
220 + NvRegMIISpeed = 0x18c,
221 +#define NVREG_MIISPEED_BIT8 (1<<8)
222 +#define NVREG_MIIDELAY 5
223 + NvRegMIIControl = 0x190,
224 +#define NVREG_MIICTL_INUSE 0x10000
225 +#define NVREG_MIICTL_WRITE 0x08000
226 +#define NVREG_MIICTL_ADDRSHIFT 5
227 + NvRegMIIData = 0x194,
228 + NvRegWakeUpFlags = 0x200,
229 +#define NVREG_WAKEUPFLAGS_VAL 0x7770
230 +#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
231 +#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
232 +#define NVREG_WAKEUPFLAGS_D3SHIFT 12
233 +#define NVREG_WAKEUPFLAGS_D2SHIFT 8
234 +#define NVREG_WAKEUPFLAGS_D1SHIFT 4
235 +#define NVREG_WAKEUPFLAGS_D0SHIFT 0
236 +#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
237 +#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
238 +#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
240 + NvRegPatternCRC = 0x204,
241 + NvRegPatternMask = 0x208,
242 + NvRegPowerCap = 0x268,
243 +#define NVREG_POWERCAP_D3SUPP (1<<30)
244 +#define NVREG_POWERCAP_D2SUPP (1<<26)
245 +#define NVREG_POWERCAP_D1SUPP (1<<25)
246 + NvRegPowerState = 0x26c,
247 +#define NVREG_POWERSTATE_POWEREDUP 0x8000
248 +#define NVREG_POWERSTATE_VALID 0x0100
249 +#define NVREG_POWERSTATE_MASK 0x0003
250 +#define NVREG_POWERSTATE_D0 0x0000
251 +#define NVREG_POWERSTATE_D1 0x0001
252 +#define NVREG_POWERSTATE_D2 0x0002
253 +#define NVREG_POWERSTATE_D3 0x0003
262 +#define NV_TX_LASTPACKET (1<<0)
263 +#define NV_TX_RETRYERROR (1<<3)
264 +#define NV_TX_LASTPACKET1 (1<<8)
265 +#define NV_TX_DEFERRED (1<<10)
266 +#define NV_TX_CARRIERLOST (1<<11)
267 +#define NV_TX_LATECOLLISION (1<<12)
268 +#define NV_TX_UNDERFLOW (1<<13)
269 +#define NV_TX_ERROR (1<<14)
270 +#define NV_TX_VALID (1<<15)
272 +#define NV_RX_DESCRIPTORVALID (1<<0)
273 +#define NV_RX_MISSEDFRAME (1<<1)
274 +#define NV_RX_SUBSTRACT1 (1<<3)
275 +#define NV_RX_ERROR1 (1<<7)
276 +#define NV_RX_ERROR2 (1<<8)
277 +#define NV_RX_ERROR3 (1<<9)
278 +#define NV_RX_ERROR4 (1<<10)
279 +#define NV_RX_CRCERR (1<<11)
280 +#define NV_RX_OVERFLOW (1<<12)
281 +#define NV_RX_FRAMINGERR (1<<13)
282 +#define NV_RX_ERROR (1<<14)
283 +#define NV_RX_AVAIL (1<<15)
285 +/* Miscelaneous hardware related defines: */
286 +#define NV_PCI_REGSZ 0x270
288 +/* various timeout delays: all in usec */
289 +#define NV_TXRX_RESET_DELAY 4
290 +#define NV_TXSTOP_DELAY1 10
291 +#define NV_TXSTOP_DELAY1MAX 500000
292 +#define NV_TXSTOP_DELAY2 100
293 +#define NV_RXSTOP_DELAY1 10
294 +#define NV_RXSTOP_DELAY1MAX 500000
295 +#define NV_RXSTOP_DELAY2 100
296 +#define NV_SETUP5_DELAY 5
297 +#define NV_SETUP5_DELAYMAX 50000
298 +#define NV_POWERUP_DELAY 5
299 +#define NV_POWERUP_DELAYMAX 5000
300 +#define NV_MIIBUSY_DELAY 50
301 +#define NV_MIIPHY_DELAY 10
302 +#define NV_MIIPHY_DELAYMAX 10000
304 +#define NV_WAKEUPPATTERNS 5
305 +#define NV_WAKEUPMASKENTRIES 4
307 +/* General driver defaults */
308 +#define NV_WATCHDOG_TIMEO (2*HZ)
309 +#define DEFAULT_MTU 1500 /* also maximum supported, at least for now */
313 +/* limited to 1 packet until we understand NV_TX_LASTPACKET */
314 +#define TX_LIMIT_STOP 10
315 +#define TX_LIMIT_START 5
317 +/* rx/tx mac addr + type + vlan + align + slack*/
318 +#define RX_NIC_BUFSIZE (DEFAULT_MTU + 64)
319 +/* even more slack */
320 +#define RX_ALLOC_BUFSIZE (DEFAULT_MTU + 128)
322 +#define OOM_REFILL (1+HZ/20)
325 + * All hardware access under dev->priv->lock, except the performance
327 + * - rx is (pseudo-) lockless: it relies on the single-threading provided
328 + * by the arch code for interrupts.
329 + * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
330 + * needs dev->priv->lock :-(
331 + * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
334 +/* in dev: base, irq */
339 + * Locking: spin_lock(&np->lock); */
340 + struct net_device_stats stats;
346 + /* General data: RO fields */
347 + dma_addr_t ring_addr;
348 + struct pci_dev *pci_dev;
352 + /* rx specific fields.
353 + * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
355 + struct ring_desc *rx_ring;
356 + unsigned int cur_rx, refill_rx;
357 + struct sk_buff *rx_skbuff[RX_RING];
358 + dma_addr_t rx_dma[RX_RING];
359 + unsigned int rx_buf_sz;
360 + struct timer_list oom_kick;
363 + * tx specific fields.
365 + struct ring_desc *tx_ring;
366 + unsigned int next_tx, nic_tx;
367 + struct sk_buff *tx_skbuff[TX_RING];
368 + dma_addr_t tx_dma[TX_RING];
372 +static inline struct fe_priv *get_nvpriv(struct net_device *dev)
374 + return (struct fe_priv *) dev->priv;
377 +static inline u8 *get_hwbase(struct net_device *dev)
379 + return (u8 *) dev->base_addr;
382 +static inline void pci_push(u8 * base)
384 + /* force out pending posted writes */
388 +static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
389 + int delay, int delaymax, const char *msg)
391 + u8 *base = get_hwbase(dev);
397 + if (delaymax < 0) {
402 + } while ((readl(base + offset) & mask) != target);
406 +#define MII_READ (-1)
407 +/* mii_rw: read/write a register on the PHY.
409 + * Caller must guarantee serialization
411 +static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
413 + u8 *base = get_hwbase(dev);
418 + writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
420 + reg = readl(base + NvRegAdapterControl);
421 + if (reg & NVREG_ADAPTCTL_RUNNING) {
423 + writel(reg & ~NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
425 + reg = readl(base + NvRegMIIControl);
426 + if (reg & NVREG_MIICTL_INUSE) {
427 + writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
428 + udelay(NV_MIIBUSY_DELAY);
431 + reg = NVREG_MIICTL_INUSE | (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
432 + if (value != MII_READ) {
433 + writel(value, base + NvRegMIIData);
434 + reg |= NVREG_MIICTL_WRITE;
436 + writel(reg, base + NvRegMIIControl);
438 + if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
439 + NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
440 + dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
441 + dev->name, miireg, addr);
443 + } else if (value != MII_READ) {
444 + /* it was a write operation - fewer failures are detectable */
445 + dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
446 + dev->name, value, miireg, addr);
448 + } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
449 + dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
450 + dev->name, miireg, addr);
453 + /* FIXME: why is that required? */
455 + retval = readl(base + NvRegMIIData);
456 + dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
457 + dev->name, miireg, addr, retval);
460 + reg = readl(base + NvRegAdapterControl);
461 + writel(reg | NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
466 +static void start_rx(struct net_device *dev)
468 + struct fe_priv *np = get_nvpriv(dev);
469 + u8 *base = get_hwbase(dev);
471 + dprintk(KERN_DEBUG "%s: start_rx\n", dev->name);
472 + /* Already running? Stop it. */
473 + if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
474 + writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
477 + writel(np->linkspeed, base + NvRegLinkSpeed);
479 + writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
483 +static void stop_rx(struct net_device *dev)
485 + u8 *base = get_hwbase(dev);
487 + dprintk(KERN_DEBUG "%s: stop_rx\n", dev->name);
488 + writel(0, base + NvRegReceiverControl);
489 + reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
490 + NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
491 + KERN_INFO "stop_rx: ReceiverStatus remained busy");
493 + udelay(NV_RXSTOP_DELAY2);
494 + writel(0, base + NvRegLinkSpeed);
497 +static void start_tx(struct net_device *dev)
499 + u8 *base = get_hwbase(dev);
501 + dprintk(KERN_DEBUG "%s: start_tx\n", dev->name);
502 + writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
506 +static void stop_tx(struct net_device *dev)
508 + u8 *base = get_hwbase(dev);
510 + dprintk(KERN_DEBUG "%s: stop_tx\n", dev->name);
511 + writel(0, base + NvRegTransmitterControl);
512 + reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
513 + NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
514 + KERN_INFO "stop_tx: TransmitterStatus remained busy");
516 + udelay(NV_TXSTOP_DELAY2);
517 + writel(0, base + NvRegUnknownTransmitterReg);
520 +static void txrx_reset(struct net_device *dev)
522 + u8 *base = get_hwbase(dev);
524 + dprintk(KERN_DEBUG "%s: txrx_reset\n", dev->name);
525 + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET, base + NvRegTxRxControl);
527 + udelay(NV_TXRX_RESET_DELAY);
528 + writel(NVREG_TXRXCTL_BIT2, base + NvRegTxRxControl);
533 + * get_stats: dev->get_stats function
534 + * Get latest stats value from the nic.
535 + * Called with read_lock(&dev_base_lock) held for read -
536 + * only synchronized against unregister_netdevice.
538 +static struct net_device_stats *get_stats(struct net_device *dev)
540 + struct fe_priv *np = get_nvpriv(dev);
542 + /* It seems that the nic always generates interrupts and doesn't
543 + * accumulate errors internally. Thus the current values in np->stats
544 + * are already up to date.
551 + * nic_ioctl: dev->do_ioctl function
552 + * Called with rtnl_lock held.
554 +static int nic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
556 + return -EOPNOTSUPP;
560 + * alloc_rx: fill rx ring entries.
561 + * Return 1 if the allocations for the skbs failed and the
562 + * rx engine is without Available descriptors
564 +static int alloc_rx(struct net_device *dev)
566 + struct fe_priv *np = get_nvpriv(dev);
567 + unsigned int refill_rx = np->refill_rx;
569 + while (np->cur_rx != refill_rx) {
570 + int nr = refill_rx % RX_RING;
571 + struct sk_buff *skb;
573 + if (np->rx_skbuff[nr] == NULL) {
575 + skb = dev_alloc_skb(RX_ALLOC_BUFSIZE);
580 + np->rx_skbuff[nr] = skb;
582 + skb = np->rx_skbuff[nr];
584 + np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
585 + PCI_DMA_FROMDEVICE);
586 + np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
587 + np->rx_ring[nr].Length = cpu_to_le16(RX_NIC_BUFSIZE);
589 + np->rx_ring[nr].Flags = cpu_to_le16(NV_RX_AVAIL);
590 + dprintk(KERN_DEBUG "%s: alloc_rx: Packet %d marked as Available\n",
591 + dev->name, refill_rx);
594 + if (np->refill_rx != refill_rx) {
595 + /* FIXME: made progress. Kick hardware */
597 + np->refill_rx = refill_rx;
598 + if (np->cur_rx - refill_rx == RX_RING)
603 +static void do_rx_refill(unsigned long data)
605 + struct net_device *dev = (struct net_device *) data;
606 + struct fe_priv *np = get_nvpriv(dev);
608 + disable_irq(dev->irq);
609 + if (alloc_rx(dev)) {
610 + spin_lock(&np->lock);
611 + if (!np->in_shutdown)
612 + mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
613 + spin_unlock(&np->lock);
615 + enable_irq(dev->irq);
618 +static int init_ring(struct net_device *dev)
620 + struct fe_priv *np = get_nvpriv(dev);
623 + np->next_tx = np->nic_tx = 0;
624 + for (i = 0; i < TX_RING; i++) {
625 + np->tx_ring[i].Flags = 0;
628 + np->cur_rx = RX_RING;
630 + for (i = 0; i < RX_RING; i++) {
631 + np->rx_ring[i].Flags = 0;
633 + init_timer(&np->oom_kick);
634 + np->oom_kick.data = (unsigned long) dev;
635 + np->oom_kick.function = &do_rx_refill; /* timer handler */
637 + return alloc_rx(dev);
640 +static void drain_tx(struct net_device *dev)
642 + struct fe_priv *np = get_nvpriv(dev);
644 + for (i = 0; i < TX_RING; i++) {
645 + np->tx_ring[i].Flags = 0;
646 + if (np->tx_skbuff[i]) {
647 + pci_unmap_single(np->pci_dev, np->tx_dma[i],
648 + np->tx_skbuff[i]->len,
650 + dev_kfree_skb(np->tx_skbuff[i]);
651 + np->tx_skbuff[i] = NULL;
652 + np->stats.tx_dropped++;
656 +static void drain_ring(struct net_device *dev)
658 + struct fe_priv *np = get_nvpriv(dev);
663 + for (i = 0; i < RX_RING; i++) {
664 + np->rx_ring[i].Flags = 0;
666 + if (np->rx_skbuff[i]) {
667 + pci_unmap_single(np->pci_dev, np->rx_dma[i],
668 + np->rx_skbuff[i]->len,
669 + PCI_DMA_FROMDEVICE);
670 + dev_kfree_skb(np->rx_skbuff[i]);
671 + np->rx_skbuff[i] = NULL;
677 + * start_xmit: dev->hard_start_xmit function
678 + * Called with dev->xmit_lock held.
680 +static int start_xmit(struct sk_buff *skb, struct net_device *dev)
682 + struct fe_priv *np = get_nvpriv(dev);
683 + int nr = np->next_tx % TX_RING;
685 + np->tx_skbuff[nr] = skb;
686 + np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
689 + np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
690 + np->tx_ring[nr].Length = cpu_to_le16(skb->len);
692 + spin_lock_irq(&np->lock);
694 + np->tx_ring[nr].Flags = np->tx_flags;
695 + dprintk(KERN_DEBUG "%s: start_xmit: packet packet %d queued for transmission.\n",
696 + dev->name, np->next_tx);
699 + for (j=0; j<64; j++) {
701 + dprintk("\n%03x:", j);
702 + dprintk(" %02x", ((unsigned char*)skb->data)[j]);
709 + dev->trans_start = jiffies;
710 + if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
711 + netif_stop_queue(dev);
712 + spin_unlock_irq(&np->lock);
713 + writel(NVREG_TXRXCTL_KICK, get_hwbase(dev) + NvRegTxRxControl);
718 + * tx_done: check for completed packets, release the skbs.
720 + * Caller must own np->lock.
722 +static void tx_done(struct net_device *dev)
724 + struct fe_priv *np = get_nvpriv(dev);
726 + while (np->nic_tx < np->next_tx) {
727 + struct ring_desc *prd;
728 + int i = np->nic_tx % TX_RING;
730 + prd = &np->tx_ring[i];
732 + dprintk(KERN_DEBUG "%s: tx_done: looking at packet %d, Flags 0x%x.\n",
733 + dev->name, np->nic_tx, prd->Flags);
734 + if (prd->Flags & cpu_to_le16(NV_TX_VALID))
736 + if (prd->Flags & cpu_to_le16(NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
737 + NV_TX_UNDERFLOW|NV_TX_ERROR)) {
738 + if (prd->Flags & cpu_to_le16(NV_TX_UNDERFLOW))
739 + np->stats.tx_fifo_errors++;
740 + if (prd->Flags & cpu_to_le16(NV_TX_CARRIERLOST))
741 + np->stats.tx_carrier_errors++;
742 + np->stats.tx_errors++;
744 + np->stats.tx_packets++;
745 + np->stats.tx_bytes += np->tx_skbuff[i]->len;
747 + pci_unmap_single(np->pci_dev, np->tx_dma[i],
748 + np->tx_skbuff[i]->len,
750 + dev_kfree_skb_irq(np->tx_skbuff[i]);
751 + np->tx_skbuff[i] = NULL;
754 + if (np->next_tx - np->nic_tx < TX_LIMIT_START)
755 + netif_wake_queue(dev);
758 + * tx_timeout: dev->tx_timeout function
759 + * Called with dev->xmit_lock held.
761 +static void tx_timeout(struct net_device *dev)
763 + struct fe_priv *np = get_nvpriv(dev);
764 + u8 *base = get_hwbase(dev);
766 + dprintk(KERN_DEBUG "%s: Got tx_timeout.\n", dev->name);
767 + dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name,
768 + readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
770 + spin_lock_irq(&np->lock);
772 + /* 1) stop tx engine */
775 + /* 2) check that the packets were not sent already: */
778 + /* 3) if there are dead entries: clear everything */
779 + if (np->next_tx != np->nic_tx) {
781 + np->next_tx = np->nic_tx = 0;
782 + writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
783 + netif_wake_queue(dev);
786 + /* 4) restart tx engine */
788 + spin_unlock_irq(&np->lock);
791 +static void rx_process(struct net_device *dev)
793 + struct fe_priv *np = get_nvpriv(dev);
796 + struct ring_desc *prd;
797 + struct sk_buff *skb;
800 + if (np->cur_rx - np->refill_rx >= RX_RING)
801 + break; /* ring empty - do not continue */
803 + i = np->cur_rx % RX_RING;
804 + prd = &np->rx_ring[i];
805 + dprintk(KERN_DEBUG "%s: rx_process: looking at packet %d, Flags 0x%x.\n",
806 + dev->name, np->cur_rx, prd->Flags);
808 + if (prd->Flags & cpu_to_le16(NV_RX_AVAIL))
809 + break; /* still owned by hardware, */
811 + /* the packet is for us - immediately tear down the pci mapping, and
812 + * prefetch the first cacheline of the packet.
814 + pci_unmap_single(np->pci_dev, np->rx_dma[i],
815 + np->rx_skbuff[i]->len,
816 + PCI_DMA_FROMDEVICE);
817 + prefetch(np->rx_skbuff[i]->data);
821 + dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",prd->Flags);
822 + for (j=0; j<64; j++) {
824 + dprintk("\n%03x:", j);
825 + dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
829 + /* look at what we actually got: */
830 + if (!(prd->Flags & cpu_to_le16(NV_RX_DESCRIPTORVALID)))
834 + len = le16_to_cpu(prd->Length);
836 + if (prd->Flags & cpu_to_le16(NV_RX_MISSEDFRAME)) {
837 + np->stats.rx_missed_errors++;
838 + np->stats.rx_errors++;
841 + if (prd->Flags & cpu_to_le16(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) {
842 + np->stats.rx_errors++;
845 + if (prd->Flags & cpu_to_le16(NV_RX_CRCERR)) {
846 + np->stats.rx_crc_errors++;
847 + np->stats.rx_errors++;
850 + if (prd->Flags & cpu_to_le16(NV_RX_OVERFLOW)) {
851 + np->stats.rx_over_errors++;
852 + np->stats.rx_errors++;
855 + if (prd->Flags & cpu_to_le16(NV_RX_ERROR)) {
856 + /* framing errors are soft errors, the rest is fatal. */
857 + if (prd->Flags & cpu_to_le16(NV_RX_FRAMINGERR)) {
858 + if (prd->Flags & cpu_to_le16(NV_RX_SUBSTRACT1)) {
862 + np->stats.rx_errors++;
866 + /* got a valid packet - forward it to the network core */
867 + skb = np->rx_skbuff[i];
868 + np->rx_skbuff[i] = NULL;
871 + skb->protocol = eth_type_trans(skb, dev);
872 + dprintk(KERN_DEBUG "%s: rx_process: packet %d with %d bytes, proto %d accepted.\n",
873 + dev->name, np->cur_rx, len, skb->protocol);
875 + dev->last_rx = jiffies;
876 + np->stats.rx_packets++;
877 + np->stats.rx_bytes += len;
884 + * change_mtu: dev->change_mtu function
885 + * Called with dev_base_lock held for read.
887 +static int change_mtu(struct net_device *dev, int new_mtu)
889 + if (new_mtu > DEFAULT_MTU)
891 + dev->mtu = new_mtu;
896 + * change_mtu: dev->change_mtu function
897 + * Called with dev->xmit_lock held.
899 +static void set_multicast(struct net_device *dev)
901 + struct fe_priv *np = get_nvpriv(dev);
902 + u8 *base = get_hwbase(dev);
907 + memset(addr, 0, sizeof(addr));
908 + memset(mask, 0, sizeof(mask));
910 + if (dev->flags & IFF_PROMISC) {
911 + printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
912 + pff = NVREG_PFF_PROMISC;
914 + pff = NVREG_PFF_MYADDR;
916 + if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
920 + alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
921 + if (dev->flags & IFF_ALLMULTI) {
922 + alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
924 + struct dev_mc_list *walk;
926 + walk = dev->mc_list;
927 + while (walk != NULL) {
929 + a = le32_to_cpu(*(u32 *) walk->dmi_addr);
930 + b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
932 + alwaysOff[0] &= ~a;
934 + alwaysOff[1] &= ~b;
938 + addr[0] = alwaysOn[0];
939 + addr[1] = alwaysOn[1];
940 + mask[0] = alwaysOn[0] | alwaysOff[0];
941 + mask[1] = alwaysOn[1] | alwaysOff[1];
944 + addr[0] |= NVREG_MCASTADDRA_FORCE;
945 + pff |= NVREG_PFF_ALWAYS;
946 + spin_lock_irq(&np->lock);
948 + writel(addr[0], base + NvRegMulticastAddrA);
949 + writel(addr[1], base + NvRegMulticastAddrB);
950 + writel(mask[0], base + NvRegMulticastMaskA);
951 + writel(mask[1], base + NvRegMulticastMaskB);
952 + writel(pff, base + NvRegPacketFilterFlags);
954 + spin_unlock_irq(&np->lock);
957 +static int update_linkspeed(struct net_device *dev)
959 + struct fe_priv *np = get_nvpriv(dev);
960 + int adv, lpa, newls, newdup;
962 + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
963 + lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
964 + dprintk(KERN_DEBUG "%s: update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
965 + dev->name, adv, lpa);
967 + /* FIXME: handle parallel detection properly, handle gigabit ethernet */
969 + if (lpa & LPA_100FULL) {
970 + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
972 + } else if (lpa & LPA_100HALF) {
973 + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
975 + } else if (lpa & LPA_10FULL) {
976 + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
978 + } else if (lpa & LPA_10HALF) {
979 + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
982 + dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
983 + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
986 + if (np->duplex != newdup || np->linkspeed != newls) {
987 + np->duplex = newdup;
988 + np->linkspeed = newls;
994 +static void link_irq(struct net_device *dev)
996 + struct fe_priv *np = get_nvpriv(dev);
997 + u8 *base = get_hwbase(dev);
1001 + miistat = readl(base + NvRegMIIStatus);
1002 + writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1003 + printk(KERN_DEBUG "%s: link change notification, status 0x%x.\n", dev->name, miistat);
1005 + miival = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1006 + if (miival & BMSR_ANEGCOMPLETE) {
1007 + update_linkspeed(dev);
1009 + if (netif_carrier_ok(dev)) {
1012 + netif_carrier_on(dev);
1013 + printk(KERN_INFO "%s: link up.\n", dev->name);
1015 + writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
1016 + base + NvRegMisc1);
1019 + if (netif_carrier_ok(dev)) {
1020 + netif_carrier_off(dev);
1021 + printk(KERN_INFO "%s: link down.\n", dev->name);
1024 + writel(np->linkspeed, base + NvRegLinkSpeed);
1029 +static irqreturn_t nic_irq(int foo, void *data, struct pt_regs *regs)
1031 + struct net_device *dev = (struct net_device *) data;
1032 + struct fe_priv *np = get_nvpriv(dev);
1033 + u8 *base = get_hwbase(dev);
1036 + dprintk(KERN_DEBUG "%s: nic_irq\n", dev->name);
1039 + events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1040 + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1042 + dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
1043 + if (!(events & np->irqmask))
1046 + /* FIXME: only call the required processing functions */
1047 + if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2)) {
1048 + spin_lock(&np->lock);
1050 + spin_unlock(&np->lock);
1053 + if (events & NVREG_IRQ_RX) {
1055 + if (alloc_rx(dev)) {
1056 + spin_lock(&np->lock);
1057 + if (!np->in_shutdown)
1058 + mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1059 + spin_unlock(&np->lock);
1063 + if (events & NVREG_IRQ_LINK) {
1064 + spin_lock(&np->lock);
1066 + spin_unlock(&np->lock);
1068 + if (events & (NVREG_IRQ_UNKNOWN)) {
1069 + printk("%s: received irq with unknown source 0x%x.\n", dev->name, events);
1071 + /* FIXME: general errors, link change interrupts */
1073 + dprintk(KERN_DEBUG "%s: nic_irq completed\n", dev->name);
1075 + return IRQ_HANDLED;
1078 +static int open(struct net_device *dev)
1080 + struct fe_priv *np = get_nvpriv(dev);
1081 + u8 *base = get_hwbase(dev);
1084 + dprintk(KERN_DEBUG "forcedeth: open\n");
1086 + /* 1) erase previous misconfiguration */
1087 + /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
1088 + writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
1089 + writel(0, base + NvRegMulticastAddrB);
1090 + writel(0, base + NvRegMulticastMaskA);
1091 + writel(0, base + NvRegMulticastMaskB);
1092 + writel(0, base + NvRegPacketFilterFlags);
1093 + writel(0, base + NvRegAdapterControl);
1094 + writel(0, base + NvRegLinkSpeed);
1095 + writel(0, base + NvRegUnknownTransmitterReg);
1097 + writel(0, base + NvRegUnknownSetupReg6);
1099 + /* 2) initialize descriptor rings */
1100 + np->in_shutdown = 0;
1101 + oom = init_ring(dev);
1103 + /* 3) set mac address */
1107 + mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1108 + (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1109 + mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1111 + writel(mac[0], base + NvRegMacAddrA);
1112 + writel(mac[1], base + NvRegMacAddrB);
1115 + /* 4) continue setup */
1116 + np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1118 + writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
1119 + writel(0, base + NvRegTxRxControl);
1121 + writel(NVREG_TXRXCTL_BIT1, base + NvRegTxRxControl);
1122 + reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
1123 + NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
1124 + KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
1125 + writel(0, base + NvRegUnknownSetupReg4);
1127 + /* 5) Find a suitable PHY */
1128 + writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
1129 + for (i = 1; i < 32; i++) {
1132 + id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ);
1135 + id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ);
1138 + dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
1139 + dev->name, id1, id2, i);
1142 + update_linkspeed(dev);
1147 + printk(KERN_INFO "%s: open: failing due to lack of suitable PHY.\n",
1153 + /* 6) continue setup */
1154 + writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
1155 + base + NvRegMisc1);
1156 + writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
1157 + writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
1158 + writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig);
1160 + writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
1161 + get_random_bytes(&i, sizeof(i));
1162 + writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
1163 + writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
1164 + writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
1165 + writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
1166 + writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID,
1167 + base + NvRegAdapterControl);
1168 + writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
1169 + writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
1171 + /* 7) start packet processing */
1172 + writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1173 + writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1174 + writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1175 + base + NvRegRingSizes);
1177 + i = readl(base + NvRegPowerState);
1178 + if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) {
1179 + writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
1183 + writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
1184 + writel(NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
1187 + writel(0, base + NvRegIrqMask);
1189 + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1191 + writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
1192 + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1195 + ret = request_irq(dev->irq, &nic_irq, SA_SHIRQ, dev->name, dev);
1199 + writel(np->irqmask, base + NvRegIrqMask);
1200 + spin_lock_irq(&np->lock);
1202 + set_multicast(dev);
1204 + netif_start_queue(dev);
1206 + mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1207 + if (!(mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ) & BMSR_ANEGCOMPLETE)) {
1208 + printk("%s: no link during initialization.\n", dev->name);
1209 + netif_carrier_off(dev);
1212 + spin_unlock_irq(&np->lock);
1220 +static int close(struct net_device *dev)
1222 + struct fe_priv *np = get_nvpriv(dev);
1224 + spin_lock_irq(&np->lock);
1225 + np->in_shutdown = 1;
1226 + spin_unlock_irq(&np->lock);
1227 + synchronize_irq(dev->irq);
1229 + del_timer_sync(&np->oom_kick);
1231 + netif_stop_queue(dev);
1232 + spin_lock_irq(&np->lock);
1235 + spin_unlock_irq(&np->lock);
1237 + free_irq(dev->irq, dev);
1241 + /* FIXME: power down nic */
1246 +static int __devinit probe_nic(struct pci_dev *pci_dev, const struct pci_device_id *id)
1248 + struct net_device *dev;
1249 + struct fe_priv *np;
1250 + unsigned long addr;
1254 + dev = alloc_etherdev(sizeof(struct fe_priv));
1255 + np = get_nvpriv(dev);
1260 + np->pci_dev = pci_dev;
1261 + spin_lock_init(&np->lock);
1262 + SET_MODULE_OWNER(dev);
1263 + SET_NETDEV_DEV(dev, &pci_dev->dev);
1265 + err = pci_enable_device(pci_dev);
1267 + printk(KERN_INFO "forcedeth: pci_enable_dev failed: %d\n", err);
1271 + pci_set_master(pci_dev);
1273 + err = pci_request_regions(pci_dev, dev->name);
1279 + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1280 + dprintk(KERN_DEBUG "forcedeth: resource %d start %p len %ld flags 0x%08lx.\n",
1281 + i, (void*)pci_resource_start(pci_dev, i),
1282 + pci_resource_len(pci_dev, i),
1283 + pci_resource_flags(pci_dev, i));
1284 + if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
1285 + pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) {
1286 + addr = pci_resource_start(pci_dev, i);
1290 + if (i == DEVICE_COUNT_RESOURCE) {
1291 + printk(KERN_INFO "forcedeth: Couldn't find register window.\n");
1296 + dev->base_addr = (unsigned long) ioremap(addr, NV_PCI_REGSZ);
1297 + if (!dev->base_addr)
1299 + dev->irq = pci_dev->irq;
1300 + np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
1304 + np->tx_ring = &np->rx_ring[RX_RING];
1306 + err = register_netdev(dev);
1308 + printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
1309 + goto out_freering;
1312 + printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x\n",
1313 + dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device);
1316 + dev->stop = close;
1317 + dev->hard_start_xmit = start_xmit;
1318 + dev->get_stats = get_stats;
1319 + dev->change_mtu = change_mtu;
1320 + dev->set_multicast_list = set_multicast;
1321 + dev->do_ioctl = nic_ioctl;
1322 + dev->tx_timeout = tx_timeout;
1323 + dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
1325 + pci_set_drvdata(pci_dev, dev);
1327 + /* read the mac address */
1328 + base = get_hwbase(dev);
1329 + np->orig_mac[0] = readl(base + NvRegMacAddrA);
1330 + np->orig_mac[1] = readl(base + NvRegMacAddrB);
1332 + dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
1333 + dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
1334 + dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
1335 + dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
1336 + dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
1337 + dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
1339 + dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
1340 + dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1341 + dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1343 + np->tx_flags = cpu_to_le16(NV_TX_LASTPACKET|NV_TX_LASTPACKET1|NV_TX_VALID);
1344 + if (id->driver_data & DEV_NEED_LASTPACKET1)
1345 + np->tx_flags |= cpu_to_le16(NV_TX_LASTPACKET1);
1346 + if (id->driver_data & DEV_IRQMASK_1)
1347 + np->irqmask = NVREG_IRQMASK_WANTED_1;
1348 + if (id->driver_data & DEV_IRQMASK_2)
1349 + np->irqmask = NVREG_IRQMASK_WANTED_2;
1354 + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
1355 + np->rx_ring, np->ring_addr);
1357 + iounmap(get_hwbase(dev));
1359 + pci_release_regions(pci_dev);
1361 + pci_disable_device(pci_dev);
1364 + pci_set_drvdata(pci_dev, NULL);
1369 +static void __devexit remove_nic(struct pci_dev *pci_dev)
1371 + struct net_device *dev = pci_get_drvdata(pci_dev);
1372 + struct fe_priv *np = get_nvpriv(dev);
1373 + u8 *base = get_hwbase(dev);
1375 + unregister_netdev(dev);
1377 + /* special op: write back the misordered MAC address - otherwise
1378 + * the next probe_nic would see a wrong address.
1380 + writel(np->orig_mac[0], base + NvRegMacAddrA);
1381 + writel(np->orig_mac[1], base + NvRegMacAddrB);
1383 + /* free all structures */
1384 + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr);
1385 + iounmap(get_hwbase(dev));
1386 + pci_release_regions(pci_dev);
1387 + pci_disable_device(pci_dev);
1389 + pci_set_drvdata(pci_dev, NULL);
1392 +static struct pci_device_id pci_tbl[] = {
1393 + { /* nForce Ethernet Controller */
1394 + .vendor = PCI_VENDOR_ID_NVIDIA,
1396 + .subvendor = PCI_ANY_ID,
1397 + .subdevice = PCI_ANY_ID,
1398 + .driver_data = DEV_IRQMASK_1,
1400 + { /* nForce2 Ethernet Controller */
1401 + .vendor = PCI_VENDOR_ID_NVIDIA,
1403 + .subvendor = PCI_ANY_ID,
1404 + .subdevice = PCI_ANY_ID,
1405 + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2,
1407 + { /* nForce3 Ethernet Controller */
1408 + .vendor = PCI_VENDOR_ID_NVIDIA,
1410 + .subvendor = PCI_ANY_ID,
1411 + .subdevice = PCI_ANY_ID,
1412 + .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2,
1417 +static struct pci_driver driver = {
1418 + .name = "forcedeth",
1419 + .id_table = pci_tbl,
1420 + .probe = probe_nic,
1421 + .remove = __devexit_p(remove_nic),
1425 +static int __init init_nic(void)
1427 + printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
1428 + return pci_module_init(&driver);
1431 +static void __exit exit_nic(void)
1433 + pci_unregister_driver(&driver);
1436 +MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
1437 +MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
1438 +MODULE_LICENSE("GPL");
1440 +MODULE_DEVICE_TABLE(pci, pci_tbl);
1442 +module_init(init_nic);
1443 +module_exit(exit_nic);