Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
acenic.h
Go to the documentation of this file.
1 #ifndef _ACENIC_H_
2 #define _ACENIC_H_
3 #include <linux/interrupt.h>
4 
5 
6 /*
7  * Generate TX index update each time, when TX ring is closed.
8  * Normally, this is not useful, because results in more dma (and irqs
9  * without TX_COAL_INTS_ONLY).
10  */
11 #define USE_TX_COAL_NOW 0
12 
13 /*
14  * Addressing:
15  *
16  * The Tigon uses 64-bit host addresses, regardless of their actual
17  * length, and it expects a big-endian format. For 32 bit systems the
18  * upper 32 bits of the address are simply ignored (zero), however for
19  * little endian 64 bit systems (Alpha) this looks strange with the
20  * two parts of the address word being swapped.
21  *
22  * The addresses are split in two 32 bit words for all architectures
23  * as some of them are in PCI shared memory and it is necessary to use
24  * readl/writel to access them.
25  *
26  * The addressing code is derived from Pete Wyckoff's work, but
27  * modified to deal properly with readl/writel usage.
28  */
29 
30 struct ace_regs {
31  u32 pad0[16]; /* PCI control registers */
32 
33  u32 HostCtrl; /* 0x40 */
35 
36  u32 pad1[2];
37 
38  u32 MiscCfg; /* 0x50 */
39 
40  u32 pad2[2];
41 
43 
44  u32 pad3[2]; /* 0x60 */
45 
48 
49  u32 pad4[12]; /* 0x70 */
50 
51  u32 DmaWriteState; /* 0xa0 */
52  u32 pad5[3];
53  u32 DmaReadState; /* 0xb0 */
54 
55  u32 pad6[26];
56 
58 
59  u32 pad7[8]; /* 0x120 */
60 
61  u32 CpuCtrl; /* 0x140 */
63 
64  u32 pad8[3];
65 
66  u32 SramAddr; /* 0x154 */
68 
69  u32 pad9[49];
70 
71  u32 MacRxState; /* 0x220 */
72 
73  u32 pad10[7];
74 
75  u32 CpuBCtrl; /* 0x240 */
77 
78  u32 pad11[3];
79 
80  u32 SramBAddr; /* 0x254 */
82 
83  u32 pad12[105];
84 
85  u32 pad13[32]; /* 0x400 */
86  u32 Stats[32];
87 
88  u32 Mb0Hi; /* 0x500 */
120 
121  u32 pad14[32];
122 
123  u32 MacAddrHi; /* 0x600 */
127  u32 MultiCastHi; /* 0x610 */
131  u32 DmaWriteCfg; /* 0x620 */
135  u32 TuneRxCoalTicks;/* 0x630 */
139  u32 TuneMaxRxDesc; /* 0x640 */
143  u32 TracePtr; /* 0x650 */
147  u32 IfMtu; /* 0x660 */
151  u32 pad16[4]; /* 0x670 */
152  u32 RxRetCsm; /* 0x680 */
153 
154  u32 pad17[31];
155 
156  u32 CmdRng[64]; /* 0x700 */
157  u32 Window[0x200];
158 };
159 
160 
161 typedef struct {
164 } aceaddr;
165 
166 
167 #define ACE_WINDOW_SIZE 0x800
168 
169 #define ACE_JUMBO_MTU 9000
170 #define ACE_STD_MTU 1500
171 
172 #define ACE_TRACE_SIZE 0x8000
173 
174 /*
175  * Host control register bits.
176  */
177 
178 #define IN_INT 0x01
179 #define CLR_INT 0x02
180 #define HW_RESET 0x08
181 #define BYTE_SWAP 0x10
182 #define WORD_SWAP 0x20
183 #define MASK_INTS 0x40
184 
185 /*
186  * Local control register bits.
187  */
188 
189 #define EEPROM_DATA_IN 0x800000
190 #define EEPROM_DATA_OUT 0x400000
191 #define EEPROM_WRITE_ENABLE 0x200000
192 #define EEPROM_CLK_OUT 0x100000
193 
194 #define EEPROM_BASE 0xa0000000
195 
196 #define EEPROM_WRITE_SELECT 0xa0
197 #define EEPROM_READ_SELECT 0xa1
198 
199 #define SRAM_BANK_512K 0x200
200 
201 
202 /*
203  * udelay() values for when clocking the eeprom
204  */
205 #define ACE_SHORT_DELAY 2
206 #define ACE_LONG_DELAY 4
207 
208 
209 /*
210  * Misc Config bits
211  */
212 
213 #define SYNC_SRAM_TIMING 0x100000
214 
215 
216 /*
217  * CPU state bits.
218  */
219 
220 #define CPU_RESET 0x01
221 #define CPU_TRACE 0x02
222 #define CPU_PROM_FAILED 0x10
223 #define CPU_HALT 0x00010000
224 #define CPU_HALTED 0xffff0000
225 
226 
227 /*
228  * PCI State bits.
229  */
230 
231 #define DMA_READ_MAX_4 0x04
232 #define DMA_READ_MAX_16 0x08
233 #define DMA_READ_MAX_32 0x0c
234 #define DMA_READ_MAX_64 0x10
235 #define DMA_READ_MAX_128 0x14
236 #define DMA_READ_MAX_256 0x18
237 #define DMA_READ_MAX_1K 0x1c
238 #define DMA_WRITE_MAX_4 0x20
239 #define DMA_WRITE_MAX_16 0x40
240 #define DMA_WRITE_MAX_32 0x60
241 #define DMA_WRITE_MAX_64 0x80
242 #define DMA_WRITE_MAX_128 0xa0
243 #define DMA_WRITE_MAX_256 0xc0
244 #define DMA_WRITE_MAX_1K 0xe0
245 #define DMA_READ_WRITE_MASK 0xfc
246 #define MEM_READ_MULTIPLE 0x00020000
247 #define PCI_66MHZ 0x00080000
248 #define PCI_32BIT 0x00100000
249 #define DMA_WRITE_ALL_ALIGN 0x00800000
250 #define READ_CMD_MEM 0x06000000
251 #define WRITE_CMD_MEM 0x70000000
252 
253 
254 /*
255  * Mode status
256  */
257 
258 #define ACE_BYTE_SWAP_BD 0x02
259 #define ACE_WORD_SWAP_BD 0x04 /* not actually used */
260 #define ACE_WARN 0x08
261 #define ACE_BYTE_SWAP_DMA 0x10
262 #define ACE_NO_JUMBO_FRAG 0x200
263 #define ACE_FATAL 0x40000000
264 
265 
266 /*
267  * DMA config
268  */
269 
270 #define DMA_THRESH_1W 0x10
271 #define DMA_THRESH_2W 0x20
272 #define DMA_THRESH_4W 0x40
273 #define DMA_THRESH_8W 0x80
274 #define DMA_THRESH_16W 0x100
275 #define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */
276 
277 
278 /*
279  * Tuning parameters
280  */
281 
282 #define TICKS_PER_SEC 1000000
283 
284 
285 /*
286  * Link bits
287  */
288 
289 #define LNK_PREF 0x00008000
290 #define LNK_10MB 0x00010000
291 #define LNK_100MB 0x00020000
292 #define LNK_1000MB 0x00040000
293 #define LNK_FULL_DUPLEX 0x00080000
294 #define LNK_HALF_DUPLEX 0x00100000
295 #define LNK_TX_FLOW_CTL_Y 0x00200000
296 #define LNK_NEG_ADVANCED 0x00400000
297 #define LNK_RX_FLOW_CTL_Y 0x00800000
298 #define LNK_NIC 0x01000000
299 #define LNK_JAM 0x02000000
300 #define LNK_JUMBO 0x04000000
301 #define LNK_ALTEON 0x08000000
302 #define LNK_NEG_FCTL 0x10000000
303 #define LNK_NEGOTIATE 0x20000000
304 #define LNK_ENABLE 0x40000000
305 #define LNK_UP 0x80000000
306 
307 
308 /*
309  * Event definitions
310  */
311 
312 #define EVT_RING_ENTRIES 256
313 #define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
314 
315 struct event {
316 #ifdef __LITTLE_ENDIAN_BITFIELD
317  u32 idx:12;
318  u32 code:12;
319  u32 evt:8;
320 #else
321  u32 evt:8;
322  u32 code:12;
323  u32 idx:12;
324 #endif
326 };
327 
328 
329 /*
330  * Events
331  */
332 
333 #define E_FW_RUNNING 0x01
334 #define E_STATS_UPDATED 0x04
335 
336 #define E_STATS_UPDATE 0x04
337 
338 #define E_LNK_STATE 0x06
339 #define E_C_LINK_UP 0x01
340 #define E_C_LINK_DOWN 0x02
341 #define E_C_LINK_10_100 0x03
342 
343 #define E_ERROR 0x07
344 #define E_C_ERR_INVAL_CMD 0x01
345 #define E_C_ERR_UNIMP_CMD 0x02
346 #define E_C_ERR_BAD_CFG 0x03
347 
348 #define E_MCAST_LIST 0x08
349 #define E_C_MCAST_ADDR_ADD 0x01
350 #define E_C_MCAST_ADDR_DEL 0x02
351 
352 #define E_RESET_JUMBO_RNG 0x09
353 
354 
355 /*
356  * Commands
357  */
358 
359 #define CMD_RING_ENTRIES 64
360 
361 struct cmd {
362 #ifdef __LITTLE_ENDIAN_BITFIELD
363  u32 idx:12;
364  u32 code:12;
365  u32 evt:8;
366 #else
367  u32 evt:8;
368  u32 code:12;
369  u32 idx:12;
370 #endif
371 };
372 
373 
374 #define C_HOST_STATE 0x01
375 #define C_C_STACK_UP 0x01
376 #define C_C_STACK_DOWN 0x02
377 
378 #define C_FDR_FILTERING 0x02
379 #define C_C_FDR_FILT_ENABLE 0x01
380 #define C_C_FDR_FILT_DISABLE 0x02
381 
382 #define C_SET_RX_PRD_IDX 0x03
383 #define C_UPDATE_STATS 0x04
384 #define C_RESET_JUMBO_RNG 0x05
385 #define C_ADD_MULTICAST_ADDR 0x08
386 #define C_DEL_MULTICAST_ADDR 0x09
387 
388 #define C_SET_PROMISC_MODE 0x0a
389 #define C_C_PROMISC_ENABLE 0x01
390 #define C_C_PROMISC_DISABLE 0x02
391 
392 #define C_LNK_NEGOTIATION 0x0b
393 #define C_C_NEGOTIATE_BOTH 0x00
394 #define C_C_NEGOTIATE_GIG 0x01
395 #define C_C_NEGOTIATE_10_100 0x02
396 
397 #define C_SET_MAC_ADDR 0x0c
398 #define C_CLEAR_PROFILE 0x0d
399 
400 #define C_SET_MULTICAST_MODE 0x0e
401 #define C_C_MCAST_ENABLE 0x01
402 #define C_C_MCAST_DISABLE 0x02
403 
404 #define C_CLEAR_STATS 0x0f
405 #define C_SET_RX_JUMBO_PRD_IDX 0x10
406 #define C_REFRESH_STATS 0x11
407 
408 
409 /*
410  * Descriptor flags
411  */
412 #define BD_FLG_TCP_UDP_SUM 0x01
413 #define BD_FLG_IP_SUM 0x02
414 #define BD_FLG_END 0x04
415 #define BD_FLG_MORE 0x08
416 #define BD_FLG_JUMBO 0x10
417 #define BD_FLG_UCAST 0x20
418 #define BD_FLG_MCAST 0x40
419 #define BD_FLG_BCAST 0x60
420 #define BD_FLG_TYP_MASK 0x60
421 #define BD_FLG_IP_FRAG 0x80
422 #define BD_FLG_IP_FRAG_END 0x100
423 #define BD_FLG_VLAN_TAG 0x200
424 #define BD_FLG_FRAME_ERROR 0x400
425 #define BD_FLG_COAL_NOW 0x800
426 #define BD_FLG_MINI 0x1000
427 
428 
429 /*
430  * Ring Control block flags
431  */
432 #define RCB_FLG_TCP_UDP_SUM 0x01
433 #define RCB_FLG_IP_SUM 0x02
434 #define RCB_FLG_NO_PSEUDO_HDR 0x08
435 #define RCB_FLG_VLAN_ASSIST 0x10
436 #define RCB_FLG_COAL_INT_ONLY 0x20
437 #define RCB_FLG_TX_HOST_RING 0x40
438 #define RCB_FLG_IEEE_SNAP_SUM 0x80
439 #define RCB_FLG_EXT_RX_BD 0x100
440 #define RCB_FLG_RNG_DISABLE 0x200
441 
442 
443 /*
444  * TX ring - maximum TX ring entries for Tigon I's is 128
445  */
446 #define MAX_TX_RING_ENTRIES 256
447 #define TIGON_I_TX_RING_ENTRIES 128
448 #define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
449 #define TX_RING_BASE 0x3800
450 
451 struct tx_desc{
454 #if 0
455 /*
456  * This is in PCI shared mem and must be accessed with readl/writel
457  * real layout is:
458  */
459 #if __LITTLE_ENDIAN
460  u16 flags;
461  u16 size;
462  u16 vlan;
463  u16 reserved;
464 #else
465  u16 size;
466  u16 flags;
467  u16 reserved;
468  u16 vlan;
469 #endif
470 #endif
472 };
473 
474 
475 #define RX_STD_RING_ENTRIES 512
476 #define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
477 
478 #define RX_JUMBO_RING_ENTRIES 256
479 #define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
480 
481 #define RX_MINI_RING_ENTRIES 1024
482 #define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
483 
484 #define RX_RETURN_RING_ENTRIES 2048
485 #define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \
486  sizeof(struct rx_desc))
487 
488 struct rx_desc{
490 #ifdef __LITTLE_ENDIAN
491  u16 size;
492  u16 idx;
493 #else
496 #endif
497 #ifdef __LITTLE_ENDIAN
498  u16 flags;
499  u16 type;
500 #else
503 #endif
504 #ifdef __LITTLE_ENDIAN
506  u16 ip_csum;
507 #else
510 #endif
511 #ifdef __LITTLE_ENDIAN
512  u16 vlan;
513  u16 err_flags;
514 #else
517 #endif
520 };
521 
522 
523 /*
524  * This struct is shared with the NIC firmware.
525  */
526 struct ring_ctrl {
528 #ifdef __LITTLE_ENDIAN
529  u16 flags;
530  u16 max_len;
531 #else
534 #endif
536 };
537 
538 
561  u32 pad[3];
570 };
571 
572 
573 struct ace_info {
574  union {
575  u32 stats[256];
576  } s;
588 };
589 
590 
591 struct ring_info {
592  struct sk_buff *skb;
594 };
595 
596 
597 /*
598  * Funny... As soon as we add maplen on alpha, it starts to work
599  * much slower. Hmm... is it because struct does not fit to one cacheline?
600  * So, split tx_ring_info.
601  */
602 struct tx_ring_info {
603  struct sk_buff *skb;
605  DEFINE_DMA_UNMAP_LEN(maplen);
606 };
607 
608 
609 /*
610  * struct ace_skb holding the rings of skb's. This is an awful lot of
611  * pointers, but I don't see any other smart mode to do this in an
612  * efficient manner ;-(
613  */
614 struct ace_skb
615 {
620 };
621 
622 
623 /*
624  * Struct private for the AceNIC.
625  *
626  * Elements are grouped so variables used by the tx handling goes
627  * together, and will go into the same cache lines etc. in order to
628  * avoid cache line contention between the rx and tx handling on SMP.
629  *
630  * Frequently accessed variables are put at the beginning of the
631  * struct to help the compiler generate better/shorter code.
632  */
634 {
635  struct ace_info *info;
636  struct ace_regs __iomem *regs; /* register base */
637  struct ace_skb *skb;
638  dma_addr_t info_dma; /* 32/64 bit */
639 
640  int version, link;
642 
643  /*
644  * TX elements
645  */
646  struct tx_desc *tx_ring;
648  volatile u32 tx_ret_csm;
650 
651  /*
652  * RX elements
653  */
654  unsigned long std_refill_busy
662 
667 
670 
671  struct event *evt_ring;
672 
673  volatile u32 *evt_prd, *rx_ret_prd, *tx_csm;
674 
675  dma_addr_t tx_ring_dma; /* 32/64 bit */
679 
680  unsigned char *trace_buf;
681  struct pci_dev *pdev;
682  struct net_device *next;
683  volatile int fw_running;
687  const char *name;
688 #ifdef INDEX_DEBUG
689  spinlock_t debug_lock
691  u32 last_tx, last_std_rx, last_mini_rx;
692 #endif
698 };
699 
700 
701 #define TX_RESERVED MAX_SKB_FRAGS
702 
703 static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
704 {
705  return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
706 }
707 
708 #define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
709 #define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED)
710 
711 static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
712 {
713  u64 baddr = (u64) addr;
714  aa->addrlo = baddr & 0xffffffff;
715  aa->addrhi = baddr >> 32;
716  wmb();
717 }
718 
719 
720 static inline void ace_set_txprd(struct ace_regs __iomem *regs,
721  struct ace_private *ap, u32 value)
722 {
723 #ifdef INDEX_DEBUG
724  unsigned long flags;
725  spin_lock_irqsave(&ap->debug_lock, flags);
726  writel(value, &regs->TxPrd);
727  if (value == ap->last_tx)
728  printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
729  "to tx producer (%i)\n", value);
730  ap->last_tx = value;
731  spin_unlock_irqrestore(&ap->debug_lock, flags);
732 #else
733  writel(value, &regs->TxPrd);
734 #endif
735  wmb();
736 }
737 
738 
739 static inline void ace_mask_irq(struct net_device *dev)
740 {
741  struct ace_private *ap = netdev_priv(dev);
742  struct ace_regs __iomem *regs = ap->regs;
743 
744  if (ACE_IS_TIGON_I(ap))
745  writel(1, &regs->MaskInt);
746  else
747  writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
748 
749  ace_sync_irq(dev->irq);
750 }
751 
752 
753 static inline void ace_unmask_irq(struct net_device *dev)
754 {
755  struct ace_private *ap = netdev_priv(dev);
756  struct ace_regs __iomem *regs = ap->regs;
757 
758  if (ACE_IS_TIGON_I(ap))
759  writel(0, &regs->MaskInt);
760  else
761  writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
762 }
763 
764 
765 /*
766  * Prototypes
767  */
768 static int ace_init(struct net_device *dev);
769 static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
770 static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
771 static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
772 static irqreturn_t ace_interrupt(int irq, void *dev_id);
773 static int ace_load_firmware(struct net_device *dev);
774 static int ace_open(struct net_device *dev);
775 static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
776  struct net_device *dev);
777 static int ace_close(struct net_device *dev);
778 static void ace_tasklet(unsigned long dev);
779 static void ace_dump_trace(struct ace_private *ap);
780 static void ace_set_multicast_list(struct net_device *dev);
781 static int ace_change_mtu(struct net_device *dev, int new_mtu);
782 static int ace_set_mac_addr(struct net_device *dev, void *p);
783 static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
784 static int ace_allocate_descriptors(struct net_device *dev);
785 static void ace_free_descriptors(struct net_device *dev);
786 static void ace_init_cleanup(struct net_device *dev);
787 static struct net_device_stats *ace_get_stats(struct net_device *dev);
788 static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
789 
790 #endif /* _ACENIC_H_ */