Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ks8851_mll.c
Go to the documentation of this file.
1 
19 /* Supports:
20  * KS8851 16bit MLL chip from Micrel Inc.
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/cache.h>
32 #include <linux/crc32.h>
33 #include <linux/mii.h>
34 #include <linux/platform_device.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #include <linux/ks8851_mll.h>
38 
39 #define DRV_NAME "ks8851_mll"
40 
41 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
42 #define MAX_RECV_FRAMES 255
43 #define MAX_BUF_SIZE 2048
44 #define TX_BUF_SIZE 2000
45 #define RX_BUF_SIZE 2000
46 
47 #define KS_CCR 0x08
48 #define CCR_EEPROM (1 << 9)
49 #define CCR_SPI (1 << 8)
50 #define CCR_8BIT (1 << 7)
51 #define CCR_16BIT (1 << 6)
52 #define CCR_32BIT (1 << 5)
53 #define CCR_SHARED (1 << 4)
54 #define CCR_32PIN (1 << 0)
55 
56 /* MAC address registers */
57 #define KS_MARL 0x10
58 #define KS_MARM 0x12
59 #define KS_MARH 0x14
60 
61 #define KS_OBCR 0x20
62 #define OBCR_ODS_16MA (1 << 6)
63 
64 #define KS_EEPCR 0x22
65 #define EEPCR_EESA (1 << 4)
66 #define EEPCR_EESB (1 << 3)
67 #define EEPCR_EEDO (1 << 2)
68 #define EEPCR_EESCK (1 << 1)
69 #define EEPCR_EECS (1 << 0)
70 
71 #define KS_MBIR 0x24
72 #define MBIR_TXMBF (1 << 12)
73 #define MBIR_TXMBFA (1 << 11)
74 #define MBIR_RXMBF (1 << 4)
75 #define MBIR_RXMBFA (1 << 3)
76 
77 #define KS_GRR 0x26
78 #define GRR_QMU (1 << 1)
79 #define GRR_GSR (1 << 0)
80 
81 #define KS_WFCR 0x2A
82 #define WFCR_MPRXE (1 << 7)
83 #define WFCR_WF3E (1 << 3)
84 #define WFCR_WF2E (1 << 2)
85 #define WFCR_WF1E (1 << 1)
86 #define WFCR_WF0E (1 << 0)
87 
88 #define KS_WF0CRC0 0x30
89 #define KS_WF0CRC1 0x32
90 #define KS_WF0BM0 0x34
91 #define KS_WF0BM1 0x36
92 #define KS_WF0BM2 0x38
93 #define KS_WF0BM3 0x3A
94 
95 #define KS_WF1CRC0 0x40
96 #define KS_WF1CRC1 0x42
97 #define KS_WF1BM0 0x44
98 #define KS_WF1BM1 0x46
99 #define KS_WF1BM2 0x48
100 #define KS_WF1BM3 0x4A
101 
102 #define KS_WF2CRC0 0x50
103 #define KS_WF2CRC1 0x52
104 #define KS_WF2BM0 0x54
105 #define KS_WF2BM1 0x56
106 #define KS_WF2BM2 0x58
107 #define KS_WF2BM3 0x5A
108 
109 #define KS_WF3CRC0 0x60
110 #define KS_WF3CRC1 0x62
111 #define KS_WF3BM0 0x64
112 #define KS_WF3BM1 0x66
113 #define KS_WF3BM2 0x68
114 #define KS_WF3BM3 0x6A
115 
116 #define KS_TXCR 0x70
117 #define TXCR_TCGICMP (1 << 8)
118 #define TXCR_TCGUDP (1 << 7)
119 #define TXCR_TCGTCP (1 << 6)
120 #define TXCR_TCGIP (1 << 5)
121 #define TXCR_FTXQ (1 << 4)
122 #define TXCR_TXFCE (1 << 3)
123 #define TXCR_TXPE (1 << 2)
124 #define TXCR_TXCRC (1 << 1)
125 #define TXCR_TXE (1 << 0)
126 
127 #define KS_TXSR 0x72
128 #define TXSR_TXLC (1 << 13)
129 #define TXSR_TXMC (1 << 12)
130 #define TXSR_TXFID_MASK (0x3f << 0)
131 #define TXSR_TXFID_SHIFT (0)
132 #define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
133 
134 
135 #define KS_RXCR1 0x74
136 #define RXCR1_FRXQ (1 << 15)
137 #define RXCR1_RXUDPFCC (1 << 14)
138 #define RXCR1_RXTCPFCC (1 << 13)
139 #define RXCR1_RXIPFCC (1 << 12)
140 #define RXCR1_RXPAFMA (1 << 11)
141 #define RXCR1_RXFCE (1 << 10)
142 #define RXCR1_RXEFE (1 << 9)
143 #define RXCR1_RXMAFMA (1 << 8)
144 #define RXCR1_RXBE (1 << 7)
145 #define RXCR1_RXME (1 << 6)
146 #define RXCR1_RXUE (1 << 5)
147 #define RXCR1_RXAE (1 << 4)
148 #define RXCR1_RXINVF (1 << 1)
149 #define RXCR1_RXE (1 << 0)
150 #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
151  RXCR1_RXMAFMA | RXCR1_RXPAFMA)
152 
153 #define KS_RXCR2 0x76
154 #define RXCR2_SRDBL_MASK (0x7 << 5)
155 #define RXCR2_SRDBL_SHIFT (5)
156 #define RXCR2_SRDBL_4B (0x0 << 5)
157 #define RXCR2_SRDBL_8B (0x1 << 5)
158 #define RXCR2_SRDBL_16B (0x2 << 5)
159 #define RXCR2_SRDBL_32B (0x3 << 5)
160 /* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
161 #define RXCR2_IUFFP (1 << 4)
162 #define RXCR2_RXIUFCEZ (1 << 3)
163 #define RXCR2_UDPLFE (1 << 2)
164 #define RXCR2_RXICMPFCC (1 << 1)
165 #define RXCR2_RXSAF (1 << 0)
166 
167 #define KS_TXMIR 0x78
168 
169 #define KS_RXFHSR 0x7C
170 #define RXFSHR_RXFV (1 << 15)
171 #define RXFSHR_RXICMPFCS (1 << 13)
172 #define RXFSHR_RXIPFCS (1 << 12)
173 #define RXFSHR_RXTCPFCS (1 << 11)
174 #define RXFSHR_RXUDPFCS (1 << 10)
175 #define RXFSHR_RXBF (1 << 7)
176 #define RXFSHR_RXMF (1 << 6)
177 #define RXFSHR_RXUF (1 << 5)
178 #define RXFSHR_RXMR (1 << 4)
179 #define RXFSHR_RXFT (1 << 3)
180 #define RXFSHR_RXFTL (1 << 2)
181 #define RXFSHR_RXRF (1 << 1)
182 #define RXFSHR_RXCE (1 << 0)
183 #define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
184  RXFSHR_RXFTL | RXFSHR_RXMR |\
185  RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
186  RXFSHR_RXTCPFCS)
187 #define KS_RXFHBCR 0x7E
188 #define RXFHBCR_CNT_MASK 0x0FFF
189 
190 #define KS_TXQCR 0x80
191 #define TXQCR_AETFE (1 << 2)
192 #define TXQCR_TXQMAM (1 << 1)
193 #define TXQCR_METFE (1 << 0)
194 
195 #define KS_RXQCR 0x82
196 #define RXQCR_RXDTTS (1 << 12)
197 #define RXQCR_RXDBCTS (1 << 11)
198 #define RXQCR_RXFCTS (1 << 10)
199 #define RXQCR_RXIPHTOE (1 << 9)
200 #define RXQCR_RXDTTE (1 << 7)
201 #define RXQCR_RXDBCTE (1 << 6)
202 #define RXQCR_RXFCTE (1 << 5)
203 #define RXQCR_ADRFE (1 << 4)
204 #define RXQCR_SDA (1 << 3)
205 #define RXQCR_RRXEF (1 << 0)
206 #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
207 
208 #define KS_TXFDPR 0x84
209 #define TXFDPR_TXFPAI (1 << 14)
210 #define TXFDPR_TXFP_MASK (0x7ff << 0)
211 #define TXFDPR_TXFP_SHIFT (0)
212 
213 #define KS_RXFDPR 0x86
214 #define RXFDPR_RXFPAI (1 << 14)
215 
216 #define KS_RXDTTR 0x8C
217 #define KS_RXDBCTR 0x8E
218 
219 #define KS_IER 0x90
220 #define KS_ISR 0x92
221 #define IRQ_LCI (1 << 15)
222 #define IRQ_TXI (1 << 14)
223 #define IRQ_RXI (1 << 13)
224 #define IRQ_RXOI (1 << 11)
225 #define IRQ_TXPSI (1 << 9)
226 #define IRQ_RXPSI (1 << 8)
227 #define IRQ_TXSAI (1 << 6)
228 #define IRQ_RXWFDI (1 << 5)
229 #define IRQ_RXMPDI (1 << 4)
230 #define IRQ_LDI (1 << 3)
231 #define IRQ_EDI (1 << 2)
232 #define IRQ_SPIBEI (1 << 1)
233 #define IRQ_DEDI (1 << 0)
234 
235 #define KS_RXFCTR 0x9C
236 #define RXFCTR_THRESHOLD_MASK 0x00FF
237 
238 #define KS_RXFC 0x9D
239 #define RXFCTR_RXFC_MASK (0xff << 8)
240 #define RXFCTR_RXFC_SHIFT (8)
241 #define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
242 #define RXFCTR_RXFCT_MASK (0xff << 0)
243 #define RXFCTR_RXFCT_SHIFT (0)
244 
245 #define KS_TXNTFSR 0x9E
246 
247 #define KS_MAHTR0 0xA0
248 #define KS_MAHTR1 0xA2
249 #define KS_MAHTR2 0xA4
250 #define KS_MAHTR3 0xA6
251 
252 #define KS_FCLWR 0xB0
253 #define KS_FCHWR 0xB2
254 #define KS_FCOWR 0xB4
255 
256 #define KS_CIDER 0xC0
257 #define CIDER_ID 0x8870
258 #define CIDER_REV_MASK (0x7 << 1)
259 #define CIDER_REV_SHIFT (1)
260 #define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
261 
262 #define KS_CGCR 0xC6
263 #define KS_IACR 0xC8
264 #define IACR_RDEN (1 << 12)
265 #define IACR_TSEL_MASK (0x3 << 10)
266 #define IACR_TSEL_SHIFT (10)
267 #define IACR_TSEL_MIB (0x3 << 10)
268 #define IACR_ADDR_MASK (0x1f << 0)
269 #define IACR_ADDR_SHIFT (0)
270 
271 #define KS_IADLR 0xD0
272 #define KS_IAHDR 0xD2
273 
274 #define KS_PMECR 0xD4
275 #define PMECR_PME_DELAY (1 << 14)
276 #define PMECR_PME_POL (1 << 12)
277 #define PMECR_WOL_WAKEUP (1 << 11)
278 #define PMECR_WOL_MAGICPKT (1 << 10)
279 #define PMECR_WOL_LINKUP (1 << 9)
280 #define PMECR_WOL_ENERGY (1 << 8)
281 #define PMECR_AUTO_WAKE_EN (1 << 7)
282 #define PMECR_WAKEUP_NORMAL (1 << 6)
283 #define PMECR_WKEVT_MASK (0xf << 2)
284 #define PMECR_WKEVT_SHIFT (2)
285 #define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
286 #define PMECR_WKEVT_ENERGY (0x1 << 2)
287 #define PMECR_WKEVT_LINK (0x2 << 2)
288 #define PMECR_WKEVT_MAGICPKT (0x4 << 2)
289 #define PMECR_WKEVT_FRAME (0x8 << 2)
290 #define PMECR_PM_MASK (0x3 << 0)
291 #define PMECR_PM_SHIFT (0)
292 #define PMECR_PM_NORMAL (0x0 << 0)
293 #define PMECR_PM_ENERGY (0x1 << 0)
294 #define PMECR_PM_SOFTDOWN (0x2 << 0)
295 #define PMECR_PM_POWERSAVE (0x3 << 0)
296 
297 /* Standard MII PHY data */
298 #define KS_P1MBCR 0xE4
299 #define P1MBCR_FORCE_FDX (1 << 8)
300 
301 #define KS_P1MBSR 0xE6
302 #define P1MBSR_AN_COMPLETE (1 << 5)
303 #define P1MBSR_AN_CAPABLE (1 << 3)
304 #define P1MBSR_LINK_UP (1 << 2)
305 
306 #define KS_PHY1ILR 0xE8
307 #define KS_PHY1IHR 0xEA
308 #define KS_P1ANAR 0xEC
309 #define KS_P1ANLPR 0xEE
310 
311 #define KS_P1SCLMD 0xF4
312 #define P1SCLMD_LEDOFF (1 << 15)
313 #define P1SCLMD_TXIDS (1 << 14)
314 #define P1SCLMD_RESTARTAN (1 << 13)
315 #define P1SCLMD_DISAUTOMDIX (1 << 10)
316 #define P1SCLMD_FORCEMDIX (1 << 9)
317 #define P1SCLMD_AUTONEGEN (1 << 7)
318 #define P1SCLMD_FORCE100 (1 << 6)
319 #define P1SCLMD_FORCEFDX (1 << 5)
320 #define P1SCLMD_ADV_FLOW (1 << 4)
321 #define P1SCLMD_ADV_100BT_FDX (1 << 3)
322 #define P1SCLMD_ADV_100BT_HDX (1 << 2)
323 #define P1SCLMD_ADV_10BT_FDX (1 << 1)
324 #define P1SCLMD_ADV_10BT_HDX (1 << 0)
325 
326 #define KS_P1CR 0xF6
327 #define P1CR_HP_MDIX (1 << 15)
328 #define P1CR_REV_POL (1 << 13)
329 #define P1CR_OP_100M (1 << 10)
330 #define P1CR_OP_FDX (1 << 9)
331 #define P1CR_OP_MDI (1 << 7)
332 #define P1CR_AN_DONE (1 << 6)
333 #define P1CR_LINK_GOOD (1 << 5)
334 #define P1CR_PNTR_FLOW (1 << 4)
335 #define P1CR_PNTR_100BT_FDX (1 << 3)
336 #define P1CR_PNTR_100BT_HDX (1 << 2)
337 #define P1CR_PNTR_10BT_FDX (1 << 1)
338 #define P1CR_PNTR_10BT_HDX (1 << 0)
339 
340 /* TX Frame control */
341 
342 #define TXFR_TXIC (1 << 15)
343 #define TXFR_TXFID_MASK (0x3f << 0)
344 #define TXFR_TXFID_SHIFT (0)
345 
346 #define KS_P1SR 0xF8
347 #define P1SR_HP_MDIX (1 << 15)
348 #define P1SR_REV_POL (1 << 13)
349 #define P1SR_OP_100M (1 << 10)
350 #define P1SR_OP_FDX (1 << 9)
351 #define P1SR_OP_MDI (1 << 7)
352 #define P1SR_AN_DONE (1 << 6)
353 #define P1SR_LINK_GOOD (1 << 5)
354 #define P1SR_PNTR_FLOW (1 << 4)
355 #define P1SR_PNTR_100BT_FDX (1 << 3)
356 #define P1SR_PNTR_100BT_HDX (1 << 2)
357 #define P1SR_PNTR_10BT_FDX (1 << 1)
358 #define P1SR_PNTR_10BT_HDX (1 << 0)
359 
360 #define ENUM_BUS_NONE 0
361 #define ENUM_BUS_8BIT 1
362 #define ENUM_BUS_16BIT 2
363 #define ENUM_BUS_32BIT 3
364 
365 #define MAX_MCAST_LST 32
366 #define HW_MCAST_SIZE 8
367 
377 union ks_tx_hdr {
378  u8 txb[4];
380 };
381 
423 /* Receive multiplex framer header info */
425  u16 sts; /* Frame status */
426  u16 len; /* Byte count */
427 };
428 
429 struct ks_net {
434  struct mutex lock; /* spinlock to be interrupt safe */
436  struct mii_if_info mii;
442 
458 };
459 
460 static int msg_enable;
461 
462 #define BE3 0x8000 /* Byte Enable 3 */
463 #define BE2 0x4000 /* Byte Enable 2 */
464 #define BE1 0x2000 /* Byte Enable 1 */
465 #define BE0 0x1000 /* Byte Enable 0 */
466 
467 /* register read/write calls.
468  *
469  * All these calls issue transactions to access the chip's registers. They
470  * all require that the necessary lock is held to prevent accesses when the
471  * chip is busy transferring packet data (RX/TX FIFO accesses).
472  */
473 
481 static u8 ks_rdreg8(struct ks_net *ks, int offset)
482 {
483  u16 data;
484  u8 shift_bit = offset & 0x03;
485  u8 shift_data = (offset & 1) << 3;
486  ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
488  data = ioread16(ks->hw_addr);
489  return (u8)(data >> shift_data);
490 }
491 
500 static u16 ks_rdreg16(struct ks_net *ks, int offset)
501 {
502  ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
504  return ioread16(ks->hw_addr);
505 }
506 
514 static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
515 {
516  u8 shift_bit = (offset & 0x03);
517  u16 value_write = (u16)(value << ((offset & 1) << 3));
518  ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
520  iowrite16(value_write, ks->hw_addr);
521 }
522 
531 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
532 {
533  ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
535  iowrite16(value, ks->hw_addr);
536 }
537 
545 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
546 {
547  len >>= 1;
548  while (len--)
549  *wptr++ = (u16)ioread16(ks->hw_addr);
550 }
551 
559 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
560 {
561  len >>= 1;
562  while (len--)
563  iowrite16(*wptr++, ks->hw_addr);
564 }
565 
566 static void ks_disable_int(struct ks_net *ks)
567 {
568  ks_wrreg16(ks, KS_IER, 0x0000);
569 } /* ks_disable_int */
570 
571 static void ks_enable_int(struct ks_net *ks)
572 {
573  ks_wrreg16(ks, KS_IER, ks->rc_ier);
574 } /* ks_enable_int */
575 
581 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
582 {
583  return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
584 }
585 
591 static inline void ks_save_cmd_reg(struct ks_net *ks)
592 {
593  /*ks8851 MLL has a bug to read back the command register.
594  * So rely on software to save the content of command register.
595  */
597 }
598 
605 static inline void ks_restore_cmd_reg(struct ks_net *ks)
606 {
609 }
610 
618 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
619 {
620  unsigned pmecr;
621 
622  netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
623 
624  ks_rdreg16(ks, KS_GRR);
625  pmecr = ks_rdreg16(ks, KS_PMECR);
626  pmecr &= ~PMECR_PM_MASK;
627  pmecr |= pwrmode;
628 
629  ks_wrreg16(ks, KS_PMECR, pmecr);
630 }
631 
637 static void ks_read_config(struct ks_net *ks)
638 {
639  u16 reg_data = 0;
640 
641  /* Regardless of bus width, 8 bit read should always work.*/
642  reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
643  reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
644 
645  /* addr/data bus are multiplexed */
646  ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
647 
648  /* There are garbage data when reading data from QMU,
649  depending on bus-width.
650  */
651 
652  if (reg_data & CCR_8BIT) {
653  ks->bus_width = ENUM_BUS_8BIT;
654  ks->extra_byte = 1;
655  } else if (reg_data & CCR_16BIT) {
657  ks->extra_byte = 2;
658  } else {
660  ks->extra_byte = 4;
661  }
662 }
663 
677 static void ks_soft_reset(struct ks_net *ks, unsigned op)
678 {
679  /* Disable interrupt first */
680  ks_wrreg16(ks, KS_IER, 0x0000);
681  ks_wrreg16(ks, KS_GRR, op);
682  mdelay(10); /* wait a short time to effect reset */
683  ks_wrreg16(ks, KS_GRR, 0);
684  mdelay(1); /* wait for condition to clear */
685 }
686 
687 
688 void ks_enable_qmu(struct ks_net *ks)
689 {
690  u16 w;
691 
692  w = ks_rdreg16(ks, KS_TXCR);
693  /* Enables QMU Transmit (TXCR). */
694  ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
695 
696  /*
697  * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
698  * Enable
699  */
700 
701  w = ks_rdreg16(ks, KS_RXQCR);
702  ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
703 
704  /* Enables QMU Receive (RXCR1). */
705  w = ks_rdreg16(ks, KS_RXCR1);
706  ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
707  ks->enabled = true;
708 } /* ks_enable_qmu */
709 
710 static void ks_disable_qmu(struct ks_net *ks)
711 {
712  u16 w;
713 
714  w = ks_rdreg16(ks, KS_TXCR);
715 
716  /* Disables QMU Transmit (TXCR). */
717  w &= ~TXCR_TXE;
718  ks_wrreg16(ks, KS_TXCR, w);
719 
720  /* Disables QMU Receive (RXCR1). */
721  w = ks_rdreg16(ks, KS_RXCR1);
722  w &= ~RXCR1_RXE ;
723  ks_wrreg16(ks, KS_RXCR1, w);
724 
725  ks->enabled = false;
726 
727 } /* ks_disable_qmu */
728 
740 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
741 {
742  u32 r = ks->extra_byte & 0x1 ;
743  u32 w = ks->extra_byte - r;
744 
745  /* 1. set sudo DMA mode */
746  ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
747  ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
748 
749  /* 2. read prepend data */
755  /* use likely(r) for 8 bit access for performance */
756  if (unlikely(r))
757  ioread8(ks->hw_addr);
758  ks_inblk(ks, buf, w + 2 + 2);
759 
760  /* 3. read pkt data */
761  ks_inblk(ks, buf, ALIGN(len, 4));
762 
763  /* 4. reset sudo DMA Mode */
764  ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
765 }
766 
776 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
777 {
778  u32 i;
779  struct type_frame_head *frame_hdr = ks->frame_head_info;
780  struct sk_buff *skb;
781 
782  ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
783 
784  /* read all header information */
785  for (i = 0; i < ks->frame_cnt; i++) {
786  /* Checking Received packet status */
787  frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
788  /* Get packet len from hardware */
789  frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
790  frame_hdr++;
791  }
792 
793  frame_hdr = ks->frame_head_info;
794  while (ks->frame_cnt--) {
795  skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
796  if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
797  (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
798  skb_reserve(skb, 2);
799  /* read data block including CRC 4 bytes */
800  ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
801  skb_put(skb, frame_hdr->len);
802  skb->protocol = eth_type_trans(skb, netdev);
803  netif_rx(skb);
804  } else {
805  pr_err("%s: err:skb alloc\n", __func__);
806  ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
807  if (skb)
808  dev_kfree_skb_irq(skb);
809  }
810  frame_hdr++;
811  }
812 }
813 
821 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
822 {
823  /* check the status of the link */
824  u32 link_up_status;
825  if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
826  netif_carrier_on(netdev);
827  link_up_status = true;
828  } else {
829  netif_carrier_off(netdev);
830  link_up_status = false;
831  }
832  netif_dbg(ks, link, ks->netdev,
833  "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
834 }
835 
847 static irqreturn_t ks_irq(int irq, void *pw)
848 {
849  struct net_device *netdev = pw;
850  struct ks_net *ks = netdev_priv(netdev);
851  u16 status;
852 
853  /*this should be the first in IRQ handler */
854  ks_save_cmd_reg(ks);
855 
856  status = ks_rdreg16(ks, KS_ISR);
857  if (unlikely(!status)) {
858  ks_restore_cmd_reg(ks);
859  return IRQ_NONE;
860  }
861 
862  ks_wrreg16(ks, KS_ISR, status);
863 
864  if (likely(status & IRQ_RXI))
865  ks_rcv(ks, netdev);
866 
867  if (unlikely(status & IRQ_LCI))
868  ks_update_link_status(netdev, ks);
869 
870  if (unlikely(status & IRQ_TXI))
871  netif_wake_queue(netdev);
872 
873  if (unlikely(status & IRQ_LDI)) {
874 
875  u16 pmecr = ks_rdreg16(ks, KS_PMECR);
876  pmecr &= ~PMECR_WKEVT_MASK;
877  ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
878  }
879 
880  /* this should be the last in IRQ handler*/
881  ks_restore_cmd_reg(ks);
882  return IRQ_HANDLED;
883 }
884 
885 
893 static int ks_net_open(struct net_device *netdev)
894 {
895  struct ks_net *ks = netdev_priv(netdev);
896  int err;
897 
898 #define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
899  /* lock the card, even if we may not actually do anything
900  * else at the moment.
901  */
902 
903  netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
904 
905  /* reset the HW */
906  err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
907 
908  if (err) {
909  pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
910  return err;
911  }
912 
913  /* wake up powermode to normal mode */
914  ks_set_powermode(ks, PMECR_PM_NORMAL);
915  mdelay(1); /* wait for normal mode to take effect */
916 
917  ks_wrreg16(ks, KS_ISR, 0xffff);
918  ks_enable_int(ks);
919  ks_enable_qmu(ks);
920  netif_start_queue(ks->netdev);
921 
922  netif_dbg(ks, ifup, ks->netdev, "network device up\n");
923 
924  return 0;
925 }
926 
935 static int ks_net_stop(struct net_device *netdev)
936 {
937  struct ks_net *ks = netdev_priv(netdev);
938 
939  netif_info(ks, ifdown, netdev, "shutting down\n");
940 
941  netif_stop_queue(netdev);
942 
943  mutex_lock(&ks->lock);
944 
945  /* turn off the IRQs and ack any outstanding */
946  ks_wrreg16(ks, KS_IER, 0x0000);
947  ks_wrreg16(ks, KS_ISR, 0xffff);
948 
949  /* shutdown RX/TX QMU */
950  ks_disable_qmu(ks);
951 
952  /* set powermode to soft power down to save power */
953  ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
954  free_irq(netdev->irq, netdev);
955  mutex_unlock(&ks->lock);
956  return 0;
957 }
958 
959 
973 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
974 {
975  /* start header at txb[0] to align txw entries */
976  ks->txh.txw[0] = 0;
977  ks->txh.txw[1] = cpu_to_le16(len);
978 
979  /* 1. set sudo-DMA mode */
980  ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
981  /* 2. write status/lenth info */
982  ks_outblk(ks, ks->txh.txw, 4);
983  /* 3. write pkt data */
984  ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
985  /* 4. reset sudo-DMA mode */
986  ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
987  /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
988  ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
989  /* 6. wait until TXQCR_METFE is auto-cleared */
990  while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
991  ;
992 }
993 
1003 static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1004 {
1005  int retv = NETDEV_TX_OK;
1006  struct ks_net *ks = netdev_priv(netdev);
1007 
1008  disable_irq(netdev->irq);
1009  ks_disable_int(ks);
1010  spin_lock(&ks->statelock);
1011 
1012  /* Extra space are required:
1013  * 4 byte for alignment, 4 for status/length, 4 for CRC
1014  */
1015 
1016  if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1017  ks_write_qmu(ks, skb->data, skb->len);
1018  dev_kfree_skb(skb);
1019  } else
1020  retv = NETDEV_TX_BUSY;
1021  spin_unlock(&ks->statelock);
1022  ks_enable_int(ks);
1023  enable_irq(netdev->irq);
1024  return retv;
1025 }
1026 
1032 static void ks_start_rx(struct ks_net *ks)
1033 {
1034  u16 cntl;
1035 
1036  /* Enables QMU Receive (RXCR1). */
1037  cntl = ks_rdreg16(ks, KS_RXCR1);
1038  cntl |= RXCR1_RXE ;
1039  ks_wrreg16(ks, KS_RXCR1, cntl);
1040 } /* ks_start_rx */
1041 
1047 static void ks_stop_rx(struct ks_net *ks)
1048 {
1049  u16 cntl;
1050 
1051  /* Disables QMU Receive (RXCR1). */
1052  cntl = ks_rdreg16(ks, KS_RXCR1);
1053  cntl &= ~RXCR1_RXE ;
1054  ks_wrreg16(ks, KS_RXCR1, cntl);
1055 
1056 } /* ks_stop_rx */
1057 
1058 static unsigned long const ethernet_polynomial = 0x04c11db7U;
1059 
1060 static unsigned long ether_gen_crc(int length, u8 *data)
1061 {
1062  long crc = -1;
1063  while (--length >= 0) {
1064  u8 current_octet = *data++;
1065  int bit;
1066 
1067  for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1068  crc = (crc << 1) ^
1069  ((crc < 0) ^ (current_octet & 1) ?
1070  ethernet_polynomial : 0);
1071  }
1072  }
1073  return (unsigned long)crc;
1074 } /* ether_gen_crc */
1075 
1081 static void ks_set_grpaddr(struct ks_net *ks)
1082 {
1083  u8 i;
1084  u32 index, position, value;
1085 
1086  memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1087 
1088  for (i = 0; i < ks->mcast_lst_size; i++) {
1089  position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1090  index = position >> 3;
1091  value = 1 << (position & 7);
1092  ks->mcast_bits[index] |= (u8)value;
1093  }
1094 
1095  for (i = 0; i < HW_MCAST_SIZE; i++) {
1096  if (i & 1) {
1097  ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1098  (ks->mcast_bits[i] << 8) |
1099  ks->mcast_bits[i - 1]);
1100  }
1101  }
1102 } /* ks_set_grpaddr */
1103 
1111 static void ks_clear_mcast(struct ks_net *ks)
1112 {
1113  u16 i, mcast_size;
1114  for (i = 0; i < HW_MCAST_SIZE; i++)
1115  ks->mcast_bits[i] = 0;
1116 
1117  mcast_size = HW_MCAST_SIZE >> 2;
1118  for (i = 0; i < mcast_size; i++)
1119  ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1120 }
1121 
1122 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1123 {
1124  u16 cntl;
1125  ks->promiscuous = promiscuous_mode;
1126  ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1127  cntl = ks_rdreg16(ks, KS_RXCR1);
1128 
1129  cntl &= ~RXCR1_FILTER_MASK;
1130  if (promiscuous_mode)
1131  /* Enable Promiscuous mode */
1132  cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1133  else
1134  /* Disable Promiscuous mode (default normal mode) */
1135  cntl |= RXCR1_RXPAFMA;
1136 
1137  ks_wrreg16(ks, KS_RXCR1, cntl);
1138 
1139  if (ks->enabled)
1140  ks_start_rx(ks);
1141 
1142 } /* ks_set_promis */
1143 
1144 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1145 {
1146  u16 cntl;
1147 
1148  ks->all_mcast = mcast;
1149  ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1150  cntl = ks_rdreg16(ks, KS_RXCR1);
1151  cntl &= ~RXCR1_FILTER_MASK;
1152  if (mcast)
1153  /* Enable "Perfect with Multicast address passed mode" */
1154  cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1155  else
1160  cntl |= RXCR1_RXPAFMA;
1161 
1162  ks_wrreg16(ks, KS_RXCR1, cntl);
1163 
1164  if (ks->enabled)
1165  ks_start_rx(ks);
1166 } /* ks_set_mcast */
1167 
1168 static void ks_set_rx_mode(struct net_device *netdev)
1169 {
1170  struct ks_net *ks = netdev_priv(netdev);
1171  struct netdev_hw_addr *ha;
1172 
1173  /* Turn on/off promiscuous mode. */
1174  if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1175  ks_set_promis(ks,
1176  (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1177  /* Turn on/off all mcast mode. */
1178  else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1179  ks_set_mcast(ks,
1180  (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1181  else
1182  ks_set_promis(ks, false);
1183 
1184  if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1185  if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1186  int i = 0;
1187 
1188  netdev_for_each_mc_addr(ha, netdev) {
1189  if (i >= MAX_MCAST_LST)
1190  break;
1191  memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1192  }
1193  ks->mcast_lst_size = (u8)i;
1194  ks_set_grpaddr(ks);
1195  } else {
1201  ks_set_mcast(ks, true);
1202  }
1203  } else {
1204  ks->mcast_lst_size = 0;
1205  ks_clear_mcast(ks);
1206  }
1207 } /* ks_set_rx_mode */
1208 
1209 static void ks_set_mac(struct ks_net *ks, u8 *data)
1210 {
1211  u16 *pw = (u16 *)data;
1212  u16 w, u;
1213 
1214  ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1215 
1216  u = *pw++;
1217  w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1218  ks_wrreg16(ks, KS_MARH, w);
1219 
1220  u = *pw++;
1221  w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1222  ks_wrreg16(ks, KS_MARM, w);
1223 
1224  u = *pw;
1225  w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1226  ks_wrreg16(ks, KS_MARL, w);
1227 
1228  memcpy(ks->mac_addr, data, 6);
1229 
1230  if (ks->enabled)
1231  ks_start_rx(ks);
1232 }
1233 
1234 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1235 {
1236  struct ks_net *ks = netdev_priv(netdev);
1237  struct sockaddr *addr = paddr;
1238  u8 *da;
1239 
1240  netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
1241  memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1242 
1243  da = (u8 *)netdev->dev_addr;
1244 
1245  ks_set_mac(ks, da);
1246  return 0;
1247 }
1248 
1249 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1250 {
1251  struct ks_net *ks = netdev_priv(netdev);
1252 
1253  if (!netif_running(netdev))
1254  return -EINVAL;
1255 
1256  return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1257 }
1258 
1259 static const struct net_device_ops ks_netdev_ops = {
1260  .ndo_open = ks_net_open,
1261  .ndo_stop = ks_net_stop,
1262  .ndo_do_ioctl = ks_net_ioctl,
1263  .ndo_start_xmit = ks_start_xmit,
1264  .ndo_set_mac_address = ks_set_mac_address,
1265  .ndo_set_rx_mode = ks_set_rx_mode,
1266  .ndo_change_mtu = eth_change_mtu,
1267  .ndo_validate_addr = eth_validate_addr,
1268 };
1269 
1270 /* ethtool support */
1271 
1272 static void ks_get_drvinfo(struct net_device *netdev,
1273  struct ethtool_drvinfo *di)
1274 {
1275  strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1276  strlcpy(di->version, "1.00", sizeof(di->version));
1277  strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1278  sizeof(di->bus_info));
1279 }
1280 
1281 static u32 ks_get_msglevel(struct net_device *netdev)
1282 {
1283  struct ks_net *ks = netdev_priv(netdev);
1284  return ks->msg_enable;
1285 }
1286 
1287 static void ks_set_msglevel(struct net_device *netdev, u32 to)
1288 {
1289  struct ks_net *ks = netdev_priv(netdev);
1290  ks->msg_enable = to;
1291 }
1292 
1293 static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1294 {
1295  struct ks_net *ks = netdev_priv(netdev);
1296  return mii_ethtool_gset(&ks->mii, cmd);
1297 }
1298 
1299 static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1300 {
1301  struct ks_net *ks = netdev_priv(netdev);
1302  return mii_ethtool_sset(&ks->mii, cmd);
1303 }
1304 
1305 static u32 ks_get_link(struct net_device *netdev)
1306 {
1307  struct ks_net *ks = netdev_priv(netdev);
1308  return mii_link_ok(&ks->mii);
1309 }
1310 
1311 static int ks_nway_reset(struct net_device *netdev)
1312 {
1313  struct ks_net *ks = netdev_priv(netdev);
1314  return mii_nway_restart(&ks->mii);
1315 }
1316 
1317 static const struct ethtool_ops ks_ethtool_ops = {
1318  .get_drvinfo = ks_get_drvinfo,
1319  .get_msglevel = ks_get_msglevel,
1320  .set_msglevel = ks_set_msglevel,
1321  .get_settings = ks_get_settings,
1322  .set_settings = ks_set_settings,
1323  .get_link = ks_get_link,
1324  .nway_reset = ks_nway_reset,
1325 };
1326 
1327 /* MII interface controls */
1328 
1337 static int ks_phy_reg(int reg)
1338 {
1339  switch (reg) {
1340  case MII_BMCR:
1341  return KS_P1MBCR;
1342  case MII_BMSR:
1343  return KS_P1MBSR;
1344  case MII_PHYSID1:
1345  return KS_PHY1ILR;
1346  case MII_PHYSID2:
1347  return KS_PHY1IHR;
1348  case MII_ADVERTISE:
1349  return KS_P1ANAR;
1350  case MII_LPA:
1351  return KS_P1ANLPR;
1352  }
1353 
1354  return 0x0;
1355 }
1356 
1372 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1373 {
1374  struct ks_net *ks = netdev_priv(netdev);
1375  int ksreg;
1376  int result;
1377 
1378  ksreg = ks_phy_reg(reg);
1379  if (!ksreg)
1380  return 0x0; /* no error return allowed, so use zero */
1381 
1382  mutex_lock(&ks->lock);
1383  result = ks_rdreg16(ks, ksreg);
1384  mutex_unlock(&ks->lock);
1385 
1386  return result;
1387 }
1388 
1389 static void ks_phy_write(struct net_device *netdev,
1390  int phy, int reg, int value)
1391 {
1392  struct ks_net *ks = netdev_priv(netdev);
1393  int ksreg;
1394 
1395  ksreg = ks_phy_reg(reg);
1396  if (ksreg) {
1397  mutex_lock(&ks->lock);
1398  ks_wrreg16(ks, ksreg, value);
1399  mutex_unlock(&ks->lock);
1400  }
1401 }
1402 
1409 static int ks_read_selftest(struct ks_net *ks)
1410 {
1411  unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1412  int ret = 0;
1413  unsigned rd;
1414 
1415  rd = ks_rdreg16(ks, KS_MBIR);
1416 
1417  if ((rd & both_done) != both_done) {
1418  netdev_warn(ks->netdev, "Memory selftest not finished\n");
1419  return 0;
1420  }
1421 
1422  if (rd & MBIR_TXMBFA) {
1423  netdev_err(ks->netdev, "TX memory selftest fails\n");
1424  ret |= 1;
1425  }
1426 
1427  if (rd & MBIR_RXMBFA) {
1428  netdev_err(ks->netdev, "RX memory selftest fails\n");
1429  ret |= 2;
1430  }
1431 
1432  netdev_info(ks->netdev, "the selftest passes\n");
1433  return ret;
1434 }
1435 
1436 static void ks_setup(struct ks_net *ks)
1437 {
1438  u16 w;
1439 
1444  /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1445  ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1446 
1447  /* Setup Receive Frame Data Pointer Auto-Increment */
1448  ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1449 
1450  /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1451  ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1452 
1453  /* Setup RxQ Command Control (RXQCR) */
1454  ks->rc_rxqcr = RXQCR_CMD_CNTL;
1455  ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1456 
1463  w = ks_rdreg16(ks, KS_P1MBCR);
1464  w &= ~P1MBCR_FORCE_FDX;
1465  ks_wrreg16(ks, KS_P1MBCR, w);
1466 
1468  ks_wrreg16(ks, KS_TXCR, w);
1469 
1471 
1472  if (ks->promiscuous) /* bPromiscuous */
1473  w |= (RXCR1_RXAE | RXCR1_RXINVF);
1474  else if (ks->all_mcast) /* Multicast address passed mode */
1476  else /* Normal mode */
1477  w |= RXCR1_RXPAFMA;
1478 
1479  ks_wrreg16(ks, KS_RXCR1, w);
1480 } /*ks_setup */
1481 
1482 
1483 static void ks_setup_int(struct ks_net *ks)
1484 {
1485  ks->rc_ier = 0x00;
1486  /* Clear the interrupts status of the hardware. */
1487  ks_wrreg16(ks, KS_ISR, 0xffff);
1488 
1489  /* Enables the interrupts of the hardware. */
1490  ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1491 } /* ks_setup_int */
1492 
1493 static int ks_hw_init(struct ks_net *ks)
1494 {
1495 #define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1496  ks->promiscuous = 0;
1497  ks->all_mcast = 0;
1498  ks->mcast_lst_size = 0;
1499 
1500  ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
1501  if (!ks->frame_head_info)
1502  return false;
1503 
1504  ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1505  return true;
1506 }
1507 
1508 
1509 static int __devinit ks8851_probe(struct platform_device *pdev)
1510 {
1511  int err = -ENOMEM;
1512  struct resource *io_d, *io_c;
1513  struct net_device *netdev;
1514  struct ks_net *ks;
1515  u16 id, data;
1517 
1518  io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1519  io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1520 
1521  if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1522  goto err_mem_region;
1523 
1524  if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1525  goto err_mem_region1;
1526 
1527  netdev = alloc_etherdev(sizeof(struct ks_net));
1528  if (!netdev)
1529  goto err_alloc_etherdev;
1530 
1531  SET_NETDEV_DEV(netdev, &pdev->dev);
1532 
1533  ks = netdev_priv(netdev);
1534  ks->netdev = netdev;
1535  ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1536 
1537  if (!ks->hw_addr)
1538  goto err_ioremap;
1539 
1540  ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1541  if (!ks->hw_addr_cmd)
1542  goto err_ioremap1;
1543 
1544  netdev->irq = platform_get_irq(pdev, 0);
1545 
1546  if ((int)netdev->irq < 0) {
1547  err = netdev->irq;
1548  goto err_get_irq;
1549  }
1550 
1551  ks->pdev = pdev;
1552 
1553  mutex_init(&ks->lock);
1554  spin_lock_init(&ks->statelock);
1555 
1556  netdev->netdev_ops = &ks_netdev_ops;
1557  netdev->ethtool_ops = &ks_ethtool_ops;
1558 
1559  /* setup mii state */
1560  ks->mii.dev = netdev;
1561  ks->mii.phy_id = 1,
1562  ks->mii.phy_id_mask = 1;
1563  ks->mii.reg_num_mask = 0xf;
1564  ks->mii.mdio_read = ks_phy_read;
1565  ks->mii.mdio_write = ks_phy_write;
1566 
1567  netdev_info(netdev, "message enable is %d\n", msg_enable);
1568  /* set the default message enable */
1569  ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1570  NETIF_MSG_PROBE |
1571  NETIF_MSG_LINK));
1572  ks_read_config(ks);
1573 
1574  /* simple check for a valid chip being connected to the bus */
1575  if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1576  netdev_err(netdev, "failed to read device ID\n");
1577  err = -ENODEV;
1578  goto err_register;
1579  }
1580 
1581  if (ks_read_selftest(ks)) {
1582  netdev_err(netdev, "failed to read device ID\n");
1583  err = -ENODEV;
1584  goto err_register;
1585  }
1586 
1587  err = register_netdev(netdev);
1588  if (err)
1589  goto err_register;
1590 
1591  platform_set_drvdata(pdev, netdev);
1592 
1593  ks_soft_reset(ks, GRR_GSR);
1594  ks_hw_init(ks);
1595  ks_disable_qmu(ks);
1596  ks_setup(ks);
1597  ks_setup_int(ks);
1598 
1599  data = ks_rdreg16(ks, KS_OBCR);
1600  ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1601 
1602  /* overwriting the default MAC address */
1603  pdata = pdev->dev.platform_data;
1604  if (!pdata) {
1605  netdev_err(netdev, "No platform data\n");
1606  err = -ENODEV;
1607  goto err_pdata;
1608  }
1609  memcpy(ks->mac_addr, pdata->mac_addr, 6);
1610  if (!is_valid_ether_addr(ks->mac_addr)) {
1611  /* Use random MAC address if none passed */
1612  eth_random_addr(ks->mac_addr);
1613  netdev_info(netdev, "Using random mac address\n");
1614  }
1615  netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1616 
1617  memcpy(netdev->dev_addr, ks->mac_addr, 6);
1618 
1619  ks_set_mac(ks, netdev->dev_addr);
1620 
1621  id = ks_rdreg16(ks, KS_CIDER);
1622 
1623  netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1624  (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1625  return 0;
1626 
1627 err_pdata:
1628  unregister_netdev(netdev);
1629 err_register:
1630 err_get_irq:
1631  iounmap(ks->hw_addr_cmd);
1632 err_ioremap1:
1633  iounmap(ks->hw_addr);
1634 err_ioremap:
1635  free_netdev(netdev);
1636 err_alloc_etherdev:
1637  release_mem_region(io_c->start, resource_size(io_c));
1638 err_mem_region1:
1639  release_mem_region(io_d->start, resource_size(io_d));
1640 err_mem_region:
1641  return err;
1642 }
1643 
1644 static int __devexit ks8851_remove(struct platform_device *pdev)
1645 {
1646  struct net_device *netdev = platform_get_drvdata(pdev);
1647  struct ks_net *ks = netdev_priv(netdev);
1648  struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1649 
1650  kfree(ks->frame_head_info);
1651  unregister_netdev(netdev);
1652  iounmap(ks->hw_addr);
1653  free_netdev(netdev);
1654  release_mem_region(iomem->start, resource_size(iomem));
1655  platform_set_drvdata(pdev, NULL);
1656  return 0;
1657 
1658 }
1659 
1660 static struct platform_driver ks8851_platform_driver = {
1661  .driver = {
1662  .name = DRV_NAME,
1663  .owner = THIS_MODULE,
1664  },
1665  .probe = ks8851_probe,
1666  .remove = __devexit_p(ks8851_remove),
1667 };
1668 
1669 module_platform_driver(ks8851_platform_driver);
1670 
1671 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1672 MODULE_AUTHOR("David Choi <[email protected]>");
1673 MODULE_LICENSE("GPL");
1675 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1676