1 diff -u --recursive --new-file v2.4.12/linux/drivers/net/acenic.c linux/drivers/net/acenic.c
2 --- v2.4.12/linux/drivers/net/acenic.c Thu Oct 11 08:02:26 2001
3 +++ linux/drivers/net/acenic.c Fri Oct 12 15:35:53 2001
5 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8 -#if (BITS_PER_LONG == 64) || defined(CONFIG_HIGHMEM)
9 -#define ACE_64BIT_PTR 1
12 #ifndef SET_MODULE_OWNER
13 #define SET_MODULE_OWNER(dev) {do{} while(0);}
14 #define ACE_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
16 *dma_handle = virt_to_bus(virt_ptr);
20 #define pci_free_consistent(cookie, size, ptr, dma_ptr) kfree(ptr)
21 -#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
22 -#define pci_unmap_single(cookie, address, size, dir)
23 +#define pci_map_page(cookie, page, off, size, dir) \
24 + virt_to_bus(page_address(page)+(off))
25 +#define pci_unmap_page(cookie, address, size, dir)
26 +#define pci_set_dma_mask(dev, mask) \
27 + (((u64)(mask) & 0xffffffff00000000) == 0 ? 0 : -EIO)
28 +#define pci_dma_supported(dev, mask) \
29 + (((u64)(mask) & 0xffffffff00000000) == 0 ? 1 : 0)
32 #if (LINUX_VERSION_CODE < 0x02032b)
34 #define ace_if_down(dev) {do{} while(0);}
37 -#ifndef pci_set_dma_mask
38 -#define pci_set_dma_mask(dev, mask) dev->dma_mask = mask;
41 #if (LINUX_VERSION_CODE >= 0x02031b)
43 #define ACE_PROBE_ARG void
46 dev->open = &ace_open;
47 dev->hard_start_xmit = &ace_start_xmit;
48 - dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
49 + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
51 static void ace_watchdog(struct net_device *dev);
52 dev->tx_timeout = &ace_watchdog;
57 + if (ap->pci_using_dac)
58 + dev->features |= NETIF_F_HIGHDMA;
65 mapping = ap->skb->rx_std_skbuff[i].mapping;
66 - pci_unmap_single(ap->pdev, mapping,
67 - ACE_STD_BUFSIZE - (2 + 16),
68 - PCI_DMA_FROMDEVICE);
69 + pci_unmap_page(ap->pdev, mapping,
70 + ACE_STD_BUFSIZE - (2 + 16),
71 + PCI_DMA_FROMDEVICE);
73 ap->rx_std_ring[i].size = 0;
74 ap->skb->rx_std_skbuff[i].skb = NULL;
78 mapping = ap->skb->rx_mini_skbuff[i].mapping;
79 - pci_unmap_single(ap->pdev, mapping,
80 - ACE_MINI_BUFSIZE - (2 + 16),
81 - PCI_DMA_FROMDEVICE);
82 + pci_unmap_page(ap->pdev, mapping,
83 + ACE_MINI_BUFSIZE - (2 + 16),
84 + PCI_DMA_FROMDEVICE);
86 ap->rx_mini_ring[i].size = 0;
87 ap->skb->rx_mini_skbuff[i].skb = NULL;
91 mapping = ap->skb->rx_jumbo_skbuff[i].mapping;
92 - pci_unmap_single(ap->pdev, mapping,
93 - ACE_JUMBO_BUFSIZE - (2 + 16),
94 - PCI_DMA_FROMDEVICE);
95 + pci_unmap_page(ap->pdev, mapping,
96 + ACE_JUMBO_BUFSIZE - (2 + 16),
97 + PCI_DMA_FROMDEVICE);
99 ap->rx_jumbo_ring[i].size = 0;
100 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
101 @@ -1210,12 +1210,6 @@
105 - * Make sure to enable the 64 bit DMA mask if we're in a 64bit slot
107 - if (!(pci_state & PCI_32BIT))
108 - pci_set_dma_mask(ap->pdev, (dma_addr_t)~0ULL);
111 * Set the max DMA transfer size. Seems that for most systems
112 * the performance is better when no MAX parameter is
113 * set. However for systems enabling PCI write and invalidate,
114 @@ -1309,12 +1303,24 @@
118 + * Configure DMA attributes.
120 + if (!pci_set_dma_mask(ap->pdev, (u64) 0xffffffffffffffff)) {
121 + ap->pci_using_dac = 1;
122 + } else if (!pci_set_dma_mask(ap->pdev, (u64) 0xffffffff)) {
123 + ap->pci_using_dac = 0;
130 * Initialize the generic info block and the command+event rings
131 * and the control blocks for the transmit and receive rings
132 * as they need to be setup once and for all.
134 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
140 @@ -1355,12 +1361,8 @@
141 ace_load_firmware(dev);
144 - tmp_ptr = (unsigned long) ap->info_dma;
145 -#ifdef ACE_64BIT_PTR
146 + tmp_ptr = (u64) ap->info_dma;
147 writel(tmp_ptr >> 32, ®s->InfoPtrHi);
149 - writel(0, ®s->InfoPtrHi);
151 writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
153 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
154 @@ -1796,9 +1798,12 @@
155 * Make sure IP header starts on a fresh cache line.
157 skb_reserve(skb, 2 + 16);
158 - mapping = pci_map_single(ap->pdev, skb->data,
159 - ACE_STD_BUFSIZE - (2 + 16),
160 - PCI_DMA_FROMDEVICE);
161 + mapping = pci_map_page(ap->pdev,
162 + virt_to_page(skb->data),
163 + ((unsigned long) skb->data &
165 + ACE_STD_BUFSIZE - (2 + 16),
166 + PCI_DMA_FROMDEVICE);
167 ap->skb->rx_std_skbuff[idx].skb = skb;
168 ap->skb->rx_std_skbuff[idx].mapping = mapping;
170 @@ -1860,9 +1865,12 @@
171 * Make sure the IP header ends up on a fresh cache line
173 skb_reserve(skb, 2 + 16);
174 - mapping = pci_map_single(ap->pdev, skb->data,
175 - ACE_MINI_BUFSIZE - (2 + 16),
176 - PCI_DMA_FROMDEVICE);
177 + mapping = pci_map_page(ap->pdev,
178 + virt_to_page(skb->data),
179 + ((unsigned long) skb->data &
181 + ACE_MINI_BUFSIZE - (2 + 16),
182 + PCI_DMA_FROMDEVICE);
183 ap->skb->rx_mini_skbuff[idx].skb = skb;
184 ap->skb->rx_mini_skbuff[idx].mapping = mapping;
186 @@ -1919,9 +1927,12 @@
187 * Make sure the IP header ends up on a fresh cache line
189 skb_reserve(skb, 2 + 16);
190 - mapping = pci_map_single(ap->pdev, skb->data,
191 - ACE_JUMBO_BUFSIZE - (2 + 16),
192 - PCI_DMA_FROMDEVICE);
193 + mapping = pci_map_page(ap->pdev,
194 + virt_to_page(skb->data),
195 + ((unsigned long) skb->data &
197 + ACE_JUMBO_BUFSIZE - (2 + 16),
198 + PCI_DMA_FROMDEVICE);
199 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
200 ap->skb->rx_jumbo_skbuff[idx].mapping = mapping;
202 @@ -2129,8 +2140,8 @@
206 - pci_unmap_single(ap->pdev, rip->mapping, mapsize,
207 - PCI_DMA_FROMDEVICE);
208 + pci_unmap_page(ap->pdev, rip->mapping, mapsize,
209 + PCI_DMA_FROMDEVICE);
210 skb_put(skb, retdesc->size);
213 @@ -2198,8 +2209,8 @@
214 mapping = info->mapping;
217 - pci_unmap_single(ap->pdev, mapping, info->maplen,
219 + pci_unmap_page(ap->pdev, mapping, info->maplen,
224 @@ -2488,11 +2499,10 @@
227 memset(ap->tx_ring+i, 0, sizeof(struct tx_desc));
228 - pci_unmap_single(ap->pdev, mapping, info->maplen,
230 + pci_unmap_page(ap->pdev, mapping, info->maplen,
238 @@ -2512,75 +2522,35 @@
244 - * Following below should be (in more clean form!) in arch/ARCH/kernel/pci_*.
245 - * For now, let it stay here.
247 -#if defined(CONFIG_HIGHMEM) && MAX_SKB_FRAGS
249 -#if defined(CONFIG_X86)
250 -#define DMAADDR_OFFSET 0
251 -typedef unsigned long long dmaaddr_high_t;
252 -#elif defined(CONFIG_PPC)
253 -#define DMAADDR_OFFSET PCI_DRAM_OFFSET
254 -typedef unsigned long dmaaddr_high_t;
258 -static inline dmaaddr_high_t
259 -pci_map_single_high(struct pci_dev *hwdev, struct page *page,
260 - int offset, size_t size, int dir)
262 - dmaaddr_high_t phys;
264 - phys = (page-mem_map) * (dmaaddr_high_t) PAGE_SIZE + offset;
266 - return (phys + DMAADDR_OFFSET);
271 -typedef unsigned long dmaaddr_high_t;
273 -static inline dmaaddr_high_t
274 -pci_map_single_high(struct pci_dev *hwdev, struct page *page,
275 - int offset, size_t size, int dir)
277 - return pci_map_single(hwdev, page_address(page) + offset, size, dir);
283 -static inline dmaaddr_high_t
284 +static inline dma_addr_t
285 ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
286 struct sk_buff *tail, u32 idx)
289 struct tx_ring_info *info;
291 - addr = pci_map_single(ap->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
292 + addr = pci_map_page(ap->pdev,
293 + virt_to_page(skb->data),
294 + ((unsigned long) skb->data &
296 + skb->len, PCI_DMA_TODEVICE);
298 info = ap->skb->tx_skbuff + idx;
300 info->mapping = addr;
301 info->maplen = skb->len;
308 -ace_load_tx_bd(struct tx_desc *desc, dmaaddr_high_t addr, u32 flagsize)
309 +ace_load_tx_bd(struct tx_desc *desc, u64 addr, u32 flagsize)
312 flagsize &= ~BD_FLG_COAL_NOW;
315 -#ifdef ACE_64BIT_PTR
316 desc->addr.addrhi = addr >> 32;
318 desc->addr.addrlo = addr;
319 desc->flagsize = flagsize;
321 @@ -2642,16 +2612,16 @@
322 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
323 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
324 struct tx_ring_info *info;
325 - dmaaddr_high_t phys;
329 info = ap->skb->tx_skbuff + idx;
330 desc = ap->tx_ring + idx;
332 - phys = pci_map_single_high(ap->pdev, frag->page,
336 + phys = pci_map_page(ap->pdev, frag->page,
341 flagsize = (frag->size << 16);
342 if (skb->ip_summed == CHECKSUM_HW)
343 @@ -2673,7 +2643,6 @@
345 info->mapping = phys;
346 info->maplen = frag->size;
348 ace_load_tx_bd(desc, phys, flagsize);
351 @@ -2995,7 +2964,7 @@
354 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
355 - min_t(u32, size, ACE_WINDOW_SIZE));
356 + min_t(u32, size, ACE_WINDOW_SIZE));
357 tdest = (unsigned long)®s->Window +
358 (dest & (ACE_WINDOW_SIZE - 1));
359 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
360 @@ -3026,7 +2995,7 @@
363 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
364 - min_t(u32, size, ACE_WINDOW_SIZE));
365 + min_t(u32, size, ACE_WINDOW_SIZE));
366 tdest = (unsigned long)®s->Window +
367 (dest & (ACE_WINDOW_SIZE - 1));
368 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
369 diff -u --recursive --new-file v2.4.12/linux/drivers/net/acenic.h linux/drivers/net/acenic.h
370 --- v2.4.12/linux/drivers/net/acenic.h Thu Oct 11 08:02:26 2001
371 +++ linux/drivers/net/acenic.h Fri Oct 12 15:35:53 2001
381 u32 last_tx, last_std_rx, last_mini_rx;
383 struct net_device_stats stats;
388 @@ -705,31 +705,11 @@
390 static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
392 - unsigned long baddr = (unsigned long) addr;
393 -#ifdef ACE_64BIT_PTR
394 + u64 baddr = (u64) addr;
395 aa->addrlo = baddr & 0xffffffff;
396 aa->addrhi = baddr >> 32;
398 - /* Don't bother setting zero every time */
399 - aa->addrlo = baddr;
406 -static inline void *get_aceaddr(aceaddr *aa)
408 - unsigned long addr;
410 -#ifdef ACE_64BIT_PTR
411 - addr = (u64)aa->addrhi << 32 | aa->addrlo;
415 - return (void *)addr;
420 static inline void ace_set_txprd(struct ace_regs *regs,