Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hwmtm.c
Go to the documentation of this file.
1 /******************************************************************************
2  *
3  * (C)Copyright 1998,1999 SysKonnect,
4  * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
5  *
6  * See the file "skfddi.c" for further information.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * The information in this file is provided "AS IS" without warranty.
14  *
15  ******************************************************************************/
16 
17 #ifndef lint
18 static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ;
19 #endif
20 
21 #define HWMTM
22 
23 #ifndef FDDI
24 #define FDDI
25 #endif
26 
27 #include "h/types.h"
28 #include "h/fddi.h"
29 #include "h/smc.h"
30 #include "h/supern_2.h"
31 #include "h/skfbiinc.h"
32 
33 /*
34  -------------------------------------------------------------
35  DOCUMENTATION
36  -------------------------------------------------------------
37  BEGIN_MANUAL_ENTRY(DOCUMENTATION)
38 
39  T B D
40 
41  END_MANUAL_ENTRY
42 */
43 /*
44  -------------------------------------------------------------
45  LOCAL VARIABLES:
46  -------------------------------------------------------------
47 */
48 #ifdef COMMON_MB_POOL
49 static SMbuf *mb_start = 0 ;
50 static SMbuf *mb_free = 0 ;
51 static int mb_init = FALSE ;
52 static int call_count = 0 ;
53 #endif
54 
55 /*
56  -------------------------------------------------------------
57  EXTERNE VARIABLES:
58  -------------------------------------------------------------
59 */
60 
61 #ifdef DEBUG
62 #ifndef DEBUG_BRD
63 extern struct smt_debug debug ;
64 #endif
65 #endif
66 
67 #ifdef NDIS_OS2
68 extern u_char offDepth ;
69 extern u_char force_irq_pending ;
70 #endif
71 
72 /*
73  -------------------------------------------------------------
74  LOCAL FUNCTIONS:
75  -------------------------------------------------------------
76 */
77 
78 static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
79 static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
80 static void init_txd_ring(struct s_smc *smc);
81 static void init_rxd_ring(struct s_smc *smc);
82 static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
83 static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
84  int count);
85 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
86 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
87 static SMbuf* get_llc_rx(struct s_smc *smc);
88 static SMbuf* get_txd_mb(struct s_smc *smc);
89 static void mac_drv_clear_txd(struct s_smc *smc);
90 
91 /*
92  -------------------------------------------------------------
93  EXTERNAL FUNCTIONS:
94  -------------------------------------------------------------
95 */
96 /* The external SMT functions are listed in cmtdef.h */
97 
98 extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
99 extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
100 extern void mac_drv_fill_rxd(struct s_smc *smc);
101 extern void mac_drv_tx_complete(struct s_smc *smc,
102  volatile struct s_smt_fp_txd *txd);
103 extern void mac_drv_rx_complete(struct s_smc *smc,
104  volatile struct s_smt_fp_rxd *rxd,
105  int frag_count, int len);
106 extern void mac_drv_requeue_rxd(struct s_smc *smc,
107  volatile struct s_smt_fp_rxd *rxd,
108  int frag_count);
109 extern void mac_drv_clear_rxd(struct s_smc *smc,
110  volatile struct s_smt_fp_rxd *rxd, int frag_count);
111 
112 #ifdef USE_OS_CPY
113 extern void hwm_cpy_rxd2mb(void);
114 extern void hwm_cpy_txd2mb(void);
115 #endif
116 
117 #ifdef ALL_RX_COMPLETE
118 extern void mac_drv_all_receives_complete(void);
119 #endif
120 
121 extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
122 extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
123 
124 #ifdef NDIS_OS2
125 extern void post_proc(void);
126 #else
127 extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
128  int flag);
129 #endif
130 
131 extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
132  int la_len);
133 
134 /*
135  -------------------------------------------------------------
136  PUBLIC FUNCTIONS:
137  -------------------------------------------------------------
138 */
139 void process_receive(struct s_smc *smc);
140 void fddi_isr(struct s_smc *smc);
141 void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
142 void init_driver_fplus(struct s_smc *smc);
143 void mac_drv_rx_mode(struct s_smc *smc, int mode);
144 void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
145 void mac_drv_clear_tx_queue(struct s_smc *smc);
146 void mac_drv_clear_rx_queue(struct s_smc *smc);
147 void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
148  int frame_status);
149 void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
150  int frame_status);
151 
152 int mac_drv_init(struct s_smc *smc);
153 int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
154  int frame_status);
155 
157 
158 SMbuf* smt_get_mbuf(struct s_smc *smc);
159 
160 #ifdef DEBUG
161  void mac_drv_debug_lev(void);
162 #endif
163 
164 /*
165  -------------------------------------------------------------
166  MACROS:
167  -------------------------------------------------------------
168 */
169 #ifndef UNUSED
170 #ifdef lint
171 #define UNUSED(x) (x) = (x)
172 #else
173 #define UNUSED(x)
174 #endif
175 #endif
176 
177 #ifdef USE_CAN_ADDR
178 #define MA smc->hw.fddi_canon_addr.a
179 #define GROUP_ADDR_BIT 0x01
180 #else
181 #define MA smc->hw.fddi_home_addr.a
182 #define GROUP_ADDR_BIT 0x80
183 #endif
184 
185 #define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
186  SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
187 
188 #ifdef MB_OUTSIDE_SMC
189 #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
190  MAX_MBUF*sizeof(SMbuf))
191 #define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
192 #else
193 #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
194 #endif
195 
196  /*
197  * define critical read for 16 Bit drivers
198  */
199 #if defined(NDIS_OS2) || defined(ODI2)
200 #define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
201 #else
202 #define CR_READ(var) (__le32)(var)
203 #endif
204 
205 #define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
206  IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
207  IS_R1_C | IS_XA_C | IS_XS_C)
208 
209 /*
210  -------------------------------------------------------------
211  INIT- AND SMT FUNCTIONS:
212  -------------------------------------------------------------
213 */
214 
215 
216 /*
217  * BEGIN_MANUAL_ENTRY(mac_drv_check_space)
218  * u_int mac_drv_check_space()
219  *
220  * function DOWNCALL (drvsr.c)
221  * This function calculates the needed non virtual
222  * memory for MBufs, RxD and TxD descriptors etc.
223  * needed by the driver.
224  *
225  * return u_int memory in bytes
226  *
227  * END_MANUAL_ENTRY
228  */
230 {
231 #ifdef MB_OUTSIDE_SMC
232 #ifdef COMMON_MB_POOL
233  call_count++ ;
234  if (call_count == 1) {
235  return EXT_VIRT_MEM;
236  }
237  else {
238  return EXT_VIRT_MEM_2;
239  }
240 #else
241  return EXT_VIRT_MEM;
242 #endif
243 #else
244  return 0;
245 #endif
246 }
247 
248 /*
249  * BEGIN_MANUAL_ENTRY(mac_drv_init)
250  * void mac_drv_init(smc)
251  *
252  * function DOWNCALL (drvsr.c)
253  * In this function the hardware module allocates it's
254  * memory.
255  * The operating system dependent module should call
256  * mac_drv_init once, after the adatper is detected.
257  * END_MANUAL_ENTRY
258  */
259 int mac_drv_init(struct s_smc *smc)
260 {
261  if (sizeof(struct s_smt_fp_rxd) % 16) {
263  }
264  if (sizeof(struct s_smt_fp_txd) % 16) {
266  }
267 
268  /*
269  * get the required memory for the RxDs and TxDs
270  */
271  if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
273  (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
274  return 1; /* no space the hwm modul can't work */
275  }
276 
277  /*
278  * get the memory for the SMT MBufs
279  */
280 #ifndef MB_OUTSIDE_SMC
281  smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
282 #else
283 #ifndef COMMON_MB_POOL
284  if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
285  MAX_MBUF*sizeof(SMbuf)))) {
286  return 1; /* no space the hwm modul can't work */
287  }
288 #else
289  if (!mb_start) {
290  if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
291  MAX_MBUF*sizeof(SMbuf)))) {
292  return 1; /* no space the hwm modul can't work */
293  }
294  }
295 #endif
296 #endif
297  return 0;
298 }
299 
300 /*
301  * BEGIN_MANUAL_ENTRY(init_driver_fplus)
302  * init_driver_fplus(smc)
303  *
304  * Sets hardware modul specific values for the mode register 2
305  * (e.g. the byte alignment for the received frames, the position of the
306  * least significant byte etc.)
307  * END_MANUAL_ENTRY
308  */
310 {
311  smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
312 
313 #ifdef PCI
314  smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
315 #endif
316  smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
317 
318 #ifdef USE_CAN_ADDR
319  /* enable address bit swapping */
320  smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
321 #endif
322 }
323 
324 static u_long init_descr_ring(struct s_smc *smc,
325  union s_fp_descr volatile *start,
326  int count)
327 {
328  int i ;
329  union s_fp_descr volatile *d1 ;
330  union s_fp_descr volatile *d2 ;
331  u_long phys ;
332 
333  DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ;
334  for (i=count-1, d1=start; i ; i--) {
335  d2 = d1 ;
336  d1++ ; /* descr is owned by the host */
337  d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
338  d2->r.rxd_next = &d1->r ;
339  phys = mac_drv_virt2phys(smc,(void *)d1) ;
340  d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
341  }
342  DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
343  d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
344  d1->r.rxd_next = &start->r ;
345  phys = mac_drv_virt2phys(smc,(void *)start) ;
346  d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
347 
348  for (i=count, d1=start; i ; i--) {
350  d1++;
351  }
352  return phys;
353 }
354 
355 static void init_txd_ring(struct s_smc *smc)
356 {
357  struct s_smt_fp_txd volatile *ds ;
358  struct s_smt_tx_queue *queue ;
359  u_long phys ;
360 
361  /*
362  * initialize the transmit descriptors
363  */
364  ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
365  SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
366  queue = smc->hw.fp.tx[QUEUE_A0] ;
367  DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
368  (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
370  phys = le32_to_cpu(ds->txd_ntdadr) ;
371  ds++ ;
372  queue->tx_curr_put = queue->tx_curr_get = ds ;
373  ds-- ;
374  queue->tx_free = HWM_ASYNC_TXD_COUNT ;
375  queue->tx_used = 0 ;
376  outpd(ADDR(B5_XA_DA),phys) ;
377 
378  ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
379  HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
380  queue = smc->hw.fp.tx[QUEUE_S] ;
381  DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
382  (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
384  phys = le32_to_cpu(ds->txd_ntdadr) ;
385  ds++ ;
386  queue->tx_curr_put = queue->tx_curr_get = ds ;
387  queue->tx_free = HWM_SYNC_TXD_COUNT ;
388  queue->tx_used = 0 ;
389  outpd(ADDR(B5_XS_DA),phys) ;
390 }
391 
392 static void init_rxd_ring(struct s_smc *smc)
393 {
394  struct s_smt_fp_rxd volatile *ds ;
395  struct s_smt_rx_queue *queue ;
396  u_long phys ;
397 
398  /*
399  * initialize the receive descriptors
400  */
401  ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
402  queue = smc->hw.fp.rx[QUEUE_R1] ;
403  DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
404  (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
406  phys = le32_to_cpu(ds->rxd_nrdadr) ;
407  ds++ ;
408  queue->rx_curr_put = queue->rx_curr_get = ds ;
409  queue->rx_free = SMT_R1_RXD_COUNT ;
410  queue->rx_used = 0 ;
411  outpd(ADDR(B4_R1_DA),phys) ;
412 }
413 
414 /*
415  * BEGIN_MANUAL_ENTRY(init_fddi_driver)
416  * void init_fddi_driver(smc,mac_addr)
417  *
418  * initializes the driver and it's variables
419  *
420  * END_MANUAL_ENTRY
421  */
423 {
424  SMbuf *mb ;
425  int i ;
426 
427  init_board(smc,mac_addr) ;
428  (void)init_fplus(smc) ;
429 
430  /*
431  * initialize the SMbufs for the SMT
432  */
433 #ifndef COMMON_MB_POOL
434  mb = smc->os.hwm.mbuf_pool.mb_start ;
435  smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
436  for (i = 0; i < MAX_MBUF; i++) {
437  mb->sm_use_count = 1 ;
438  smt_free_mbuf(smc,mb) ;
439  mb++ ;
440  }
441 #else
442  mb = mb_start ;
443  if (!mb_init) {
444  mb_free = 0 ;
445  for (i = 0; i < MAX_MBUF; i++) {
446  mb->sm_use_count = 1 ;
447  smt_free_mbuf(smc,mb) ;
448  mb++ ;
449  }
450  mb_init = TRUE ;
451  }
452 #endif
453 
454  /*
455  * initialize the other variables
456  */
457  smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
458  smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
459  smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
460  smc->os.hwm.pass_llc_promisc = TRUE ;
461  smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
462  smc->os.hwm.detec_count = 0 ;
463  smc->os.hwm.rx_break = 0 ;
464  smc->os.hwm.rx_len_error = 0 ;
465  smc->os.hwm.isr_flag = FALSE ;
466 
467  /*
468  * make sure that the start pointer is 16 byte aligned
469  */
470  i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
471  if (i != 16) {
472  DB_GEN("i = %d",i,0,3) ;
473  smc->os.hwm.descr_p = (union s_fp_descr volatile *)
474  ((char *)smc->os.hwm.descr_p+i) ;
475  }
476  DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ;
477 
478  init_txd_ring(smc) ;
479  init_rxd_ring(smc) ;
480  mac_drv_fill_rxd(smc) ;
481 
482  init_plc(smc) ;
483 }
484 
485 
486 SMbuf *smt_get_mbuf(struct s_smc *smc)
487 {
488  register SMbuf *mb ;
489 
490 #ifndef COMMON_MB_POOL
491  mb = smc->os.hwm.mbuf_pool.mb_free ;
492 #else
493  mb = mb_free ;
494 #endif
495  if (mb) {
496 #ifndef COMMON_MB_POOL
497  smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
498 #else
499  mb_free = mb->sm_next ;
500 #endif
501  mb->sm_off = 8 ;
502  mb->sm_use_count = 1 ;
503  }
504  DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
505  return mb; /* May be NULL */
506 }
507 
508 void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
509 {
510 
511  if (mb) {
512  mb->sm_use_count-- ;
513  DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ;
514  /*
515  * If the use_count is != zero the MBuf is queued
516  * more than once and must not queued into the
517  * free MBuf queue
518  */
519  if (!mb->sm_use_count) {
520  DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ;
521 #ifndef COMMON_MB_POOL
522  mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
523  smc->os.hwm.mbuf_pool.mb_free = mb ;
524 #else
525  mb->sm_next = mb_free ;
526  mb_free = mb ;
527 #endif
528  }
529  }
530  else
532 }
533 
534 
535 /*
536  * BEGIN_MANUAL_ENTRY(mac_drv_repair_descr)
537  * void mac_drv_repair_descr(smc)
538  *
539  * function called from SMT (HWM / hwmtm.c)
540  * The BMU is idle when this function is called.
541  * Mac_drv_repair_descr sets up the physical address
542  * for all receive and transmit queues where the BMU
543  * should continue.
544  * It may be that the BMU was reseted during a fragmented
545  * transfer. In this case there are some fragments which will
546  * never completed by the BMU. The OWN bit of this fragments
547  * must be switched to be owned by the host.
548  *
549  * Give a start command to the receive BMU.
550  * Start the transmit BMUs if transmit frames pending.
551  *
552  * END_MANUAL_ENTRY
553  */
554 void mac_drv_repair_descr(struct s_smc *smc)
555 {
556  u_long phys ;
557 
558  if (smc->hw.hw_state != STOPPED) {
559  SK_BREAK() ;
561  return ;
562  }
563 
564  /*
565  * repair tx queues: don't start
566  */
567  phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
568  outpd(ADDR(B5_XA_DA),phys) ;
569  if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
570  outpd(ADDR(B0_XA_CSR),CSR_START) ;
571  }
572  phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
573  outpd(ADDR(B5_XS_DA),phys) ;
574  if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
575  outpd(ADDR(B0_XS_CSR),CSR_START) ;
576  }
577 
578  /*
579  * repair rx queues
580  */
581  phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
582  outpd(ADDR(B4_R1_DA),phys) ;
584 }
585 
586 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
587 {
588  int i ;
589  int tx_used ;
590  u_long phys ;
591  u_long tbctrl ;
592  struct s_smt_fp_txd volatile *t ;
593 
594  SK_UNUSED(smc) ;
595 
596  t = queue->tx_curr_get ;
597  tx_used = queue->tx_used ;
598  for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
599  t = t->txd_next ;
600  }
601  phys = le32_to_cpu(t->txd_ntdadr) ;
602 
603  t = queue->tx_curr_get ;
604  while (tx_used) {
606  tbctrl = le32_to_cpu(t->txd_tbctrl) ;
607 
608  if (tbctrl & BMU_OWN) {
609  if (tbctrl & BMU_STF) {
610  break ; /* exit the loop */
611  }
612  else {
613  /*
614  * repair the descriptor
615  */
616  t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
617  }
618  }
619  phys = le32_to_cpu(t->txd_ntdadr) ;
621  t = t->txd_next ;
622  tx_used-- ;
623  }
624  return phys;
625 }
626 
627 /*
628  * Repairs the receive descriptor ring and returns the physical address
629  * where the BMU should continue working.
630  *
631  * o The physical address where the BMU was stopped has to be
632  * determined. This is the next RxD after rx_curr_get with an OWN
633  * bit set.
634  * o The BMU should start working at beginning of the next frame.
635  * RxDs with an OWN bit set but with a reset STF bit should be
636  * skipped and owned by the driver (OWN = 0).
637  */
638 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
639 {
640  int i ;
641  int rx_used ;
642  u_long phys ;
643  u_long rbctrl ;
644  struct s_smt_fp_rxd volatile *r ;
645 
646  SK_UNUSED(smc) ;
647 
648  r = queue->rx_curr_get ;
649  rx_used = queue->rx_used ;
650  for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
651  r = r->rxd_next ;
652  }
653  phys = le32_to_cpu(r->rxd_nrdadr) ;
654 
655  r = queue->rx_curr_get ;
656  while (rx_used) {
658  rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
659 
660  if (rbctrl & BMU_OWN) {
661  if (rbctrl & BMU_STF) {
662  break ; /* exit the loop */
663  }
664  else {
665  /*
666  * repair the descriptor
667  */
668  r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
669  }
670  }
671  phys = le32_to_cpu(r->rxd_nrdadr) ;
673  r = r->rxd_next ;
674  rx_used-- ;
675  }
676  return phys;
677 }
678 
679 
680 /*
681  -------------------------------------------------------------
682  INTERRUPT SERVICE ROUTINE:
683  -------------------------------------------------------------
684 */
685 
686 /*
687  * BEGIN_MANUAL_ENTRY(fddi_isr)
688  * void fddi_isr(smc)
689  *
690  * function DOWNCALL (drvsr.c)
691  * interrupt service routine, handles the interrupt requests
692  * generated by the FDDI adapter.
693  *
694  * NOTE: The operating system dependent module must guarantee that the
695  * interrupts of the adapter are disabled when it calls fddi_isr.
696  *
697  * About the USE_BREAK_ISR mechanismn:
698  *
699  * The main requirement of this mechanismn is to force an timer IRQ when
700  * leaving process_receive() with leave_isr set. process_receive() may
701  * be called at any time from anywhere!
702  * To be sure we don't miss such event we set 'force_irq' per default.
703  * We have to force and Timer IRQ if 'smc->os.hwm.leave_isr' AND
704  * 'force_irq' are set. 'force_irq' may be reset if a receive complete
705  * IRQ is pending.
706  *
707  * END_MANUAL_ENTRY
708  */
709 void fddi_isr(struct s_smc *smc)
710 {
711  u_long is ; /* ISR source */
712  u_short stu, stl ;
713  SMbuf *mb ;
714 
715 #ifdef USE_BREAK_ISR
716  int force_irq ;
717 #endif
718 
719 #ifdef ODI2
720  if (smc->os.hwm.rx_break) {
721  mac_drv_fill_rxd(smc) ;
722  if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
723  smc->os.hwm.rx_break = 0 ;
724  process_receive(smc) ;
725  }
726  else {
727  smc->os.hwm.detec_count = 0 ;
728  smt_force_irq(smc) ;
729  }
730  }
731 #endif
732  smc->os.hwm.isr_flag = TRUE ;
733 
734 #ifdef USE_BREAK_ISR
735  force_irq = TRUE ;
736  if (smc->os.hwm.leave_isr) {
737  smc->os.hwm.leave_isr = FALSE ;
738  process_receive(smc) ;
739  }
740 #endif
741 
742  while ((is = GET_ISR() & ISR_MASK)) {
743  NDD_TRACE("CH0B",is,0,0) ;
744  DB_GEN("ISA = 0x%x",is,0,7) ;
745 
746  if (is & IMASK_SLOW) {
747  NDD_TRACE("CH1b",is,0,0) ;
748  if (is & IS_PLINT1) { /* PLC1 */
749  plc1_irq(smc) ;
750  }
751  if (is & IS_PLINT2) { /* PLC2 */
752  plc2_irq(smc) ;
753  }
754  if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */
755  stu = inpw(FM_A(FM_ST1U)) ;
756  stl = inpw(FM_A(FM_ST1L)) ;
757  DB_GEN("Slow transmit complete",0,0,6) ;
758  mac1_irq(smc,stu,stl) ;
759  }
760  if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */
761  stu= inpw(FM_A(FM_ST2U)) ;
762  stl= inpw(FM_A(FM_ST2L)) ;
763  DB_GEN("Slow receive complete",0,0,6) ;
764  DB_GEN("stl = %x : stu = %x",stl,stu,7) ;
765  mac2_irq(smc,stu,stl) ;
766  }
767  if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */
768  stu= inpw(FM_A(FM_ST3U)) ;
769  stl= inpw(FM_A(FM_ST3L)) ;
770  DB_GEN("FORMAC Mode Register 3",0,0,6) ;
771  mac3_irq(smc,stu,stl) ;
772  }
773  if (is & IS_TIMINT) { /* Timer 82C54-2 */
774  timer_irq(smc) ;
775 #ifdef NDIS_OS2
776  force_irq_pending = 0 ;
777 #endif
778  /*
779  * out of RxD detection
780  */
781  if (++smc->os.hwm.detec_count > 4) {
782  /*
783  * check out of RxD condition
784  */
785  process_receive(smc) ;
786  }
787  }
788  if (is & IS_TOKEN) { /* Restricted Token Monitor */
789  rtm_irq(smc) ;
790  }
791  if (is & IS_R1_P) { /* Parity error rx queue 1 */
792  /* clear IRQ */
793  outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
795  }
796  if (is & IS_R1_C) { /* Encoding error rx queue 1 */
797  /* clear IRQ */
798  outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
800  }
801  if (is & IS_XA_C) { /* Encoding error async tx q */
802  /* clear IRQ */
803  outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
805  }
806  if (is & IS_XS_C) { /* Encoding error sync tx q */
807  /* clear IRQ */
808  outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
810  }
811  }
812 
813  /*
814  * Fast Tx complete Async/Sync Queue (BMU service)
815  */
816  if (is & (IS_XS_F|IS_XA_F)) {
817  DB_GEN("Fast tx complete queue",0,0,6) ;
818  /*
819  * clear IRQ, Note: no IRQ is lost, because
820  * we always service both queues
821  */
822  outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
823  outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
824  mac_drv_clear_txd(smc) ;
825  llc_restart_tx(smc) ;
826  }
827 
828  /*
829  * Fast Rx Complete (BMU service)
830  */
831  if (is & IS_R1_F) {
832  DB_GEN("Fast receive complete",0,0,6) ;
833  /* clear IRQ */
834 #ifndef USE_BREAK_ISR
835  outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
836  process_receive(smc) ;
837 #else
838  process_receive(smc) ;
839  if (smc->os.hwm.leave_isr) {
840  force_irq = FALSE ;
841  } else {
842  outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
843  process_receive(smc) ;
844  }
845 #endif
846  }
847 
848 #ifndef NDIS_OS2
849  while ((mb = get_llc_rx(smc))) {
850  smt_to_llc(smc,mb) ;
851  }
852 #else
853  if (offDepth)
854  post_proc() ;
855 
856  while (!offDepth && (mb = get_llc_rx(smc))) {
857  smt_to_llc(smc,mb) ;
858  }
859 
860  if (!offDepth && smc->os.hwm.rx_break) {
861  process_receive(smc) ;
862  }
863 #endif
864  if (smc->q.ev_get != smc->q.ev_put) {
865  NDD_TRACE("CH2a",0,0,0) ;
866  ev_dispatcher(smc) ;
867  }
868 #ifdef NDIS_OS2
869  post_proc() ;
870  if (offDepth) { /* leave fddi_isr because */
871  break ; /* indications not allowed */
872  }
873 #endif
874 #ifdef USE_BREAK_ISR
875  if (smc->os.hwm.leave_isr) {
876  break ; /* leave fddi_isr */
877  }
878 #endif
879 
880  /* NOTE: when the isr is left, no rx is pending */
881  } /* end of interrupt source polling loop */
882 
883 #ifdef USE_BREAK_ISR
884  if (smc->os.hwm.leave_isr && force_irq) {
885  smt_force_irq(smc) ;
886  }
887 #endif
888  smc->os.hwm.isr_flag = FALSE ;
889  NDD_TRACE("CH0E",0,0,0) ;
890 }
891 
892 
893 /*
894  -------------------------------------------------------------
895  RECEIVE FUNCTIONS:
896  -------------------------------------------------------------
897 */
898 
899 #ifndef NDIS_OS2
900 /*
901  * BEGIN_MANUAL_ENTRY(mac_drv_rx_mode)
902  * void mac_drv_rx_mode(smc,mode)
903  *
904  * function DOWNCALL (fplus.c)
905  * Corresponding to the parameter mode, the operating system
906  * dependent module can activate several receive modes.
907  *
908  * para mode = 1: RX_ENABLE_ALLMULTI enable all multicasts
909  * = 2: RX_DISABLE_ALLMULTI disable "enable all multicasts"
910  * = 3: RX_ENABLE_PROMISC enable promiscuous
911  * = 4: RX_DISABLE_PROMISC disable promiscuous
912  * = 5: RX_ENABLE_NSA enable rec. of all NSA frames
913  * (disabled after 'driver reset' & 'set station address')
914  * = 6: RX_DISABLE_NSA disable rec. of all NSA frames
915  *
916  * = 21: RX_ENABLE_PASS_SMT ( see description )
917  * = 22: RX_DISABLE_PASS_SMT ( " " )
918  * = 23: RX_ENABLE_PASS_NSA ( " " )
919  * = 24: RX_DISABLE_PASS_NSA ( " " )
920  * = 25: RX_ENABLE_PASS_DB ( " " )
921  * = 26: RX_DISABLE_PASS_DB ( " " )
922  * = 27: RX_DISABLE_PASS_ALL ( " " )
923  * = 28: RX_DISABLE_LLC_PROMISC ( " " )
924  * = 29: RX_ENABLE_LLC_PROMISC ( " " )
925  *
926  *
927  * RX_ENABLE_PASS_SMT / RX_DISABLE_PASS_SMT
928  *
929  * If the operating system dependent module activates the
930  * mode RX_ENABLE_PASS_SMT, the hardware module
931  * duplicates all SMT frames with the frame control
932  * FC_SMT_INFO and passes them to the LLC receive channel
933  * by calling mac_drv_rx_init.
934  * The SMT Frames which are sent by the local SMT and the NSA
935  * frames whose A- and C-Indicator is not set are also duplicated
936  * and passed.
937  * The receive mode RX_DISABLE_PASS_SMT disables the passing
938  * of SMT frames.
939  *
940  * RX_ENABLE_PASS_NSA / RX_DISABLE_PASS_NSA
941  *
942  * If the operating system dependent module activates the
943  * mode RX_ENABLE_PASS_NSA, the hardware module
944  * duplicates all NSA frames with frame control FC_SMT_NSA
945  * and a set A-Indicator and passed them to the LLC
946  * receive channel by calling mac_drv_rx_init.
947  * All NSA Frames which are sent by the local SMT
948  * are also duplicated and passed.
949  * The receive mode RX_DISABLE_PASS_NSA disables the passing
950  * of NSA frames with the A- or C-Indicator set.
951  *
952  * NOTE: For fear that the hardware module receives NSA frames with
953  * a reset A-Indicator, the operating system dependent module
954  * has to call mac_drv_rx_mode with the mode RX_ENABLE_NSA
955  * before activate the RX_ENABLE_PASS_NSA mode and after every
956  * 'driver reset' and 'set station address'.
957  *
958  * RX_ENABLE_PASS_DB / RX_DISABLE_PASS_DB
959  *
960  * If the operating system dependent module activates the
961  * mode RX_ENABLE_PASS_DB, direct BEACON frames
962  * (FC_BEACON frame control) are passed to the LLC receive
963  * channel by mac_drv_rx_init.
964  * The receive mode RX_DISABLE_PASS_DB disables the passing
965  * of direct BEACON frames.
966  *
967  * RX_DISABLE_PASS_ALL
968  *
969  * Disables all special receives modes. It is equal to
970  * call mac_drv_set_rx_mode successively with the
971  * parameters RX_DISABLE_NSA, RX_DISABLE_PASS_SMT,
972  * RX_DISABLE_PASS_NSA and RX_DISABLE_PASS_DB.
973  *
974  * RX_ENABLE_LLC_PROMISC
975  *
976  * (default) all received LLC frames and all SMT/NSA/DBEACON
977  * frames depending on the attitude of the flags
978  * PASS_SMT/PASS_NSA/PASS_DBEACON will be delivered to the
979  * LLC layer
980  *
981  * RX_DISABLE_LLC_PROMISC
982  *
983  * all received SMT/NSA/DBEACON frames depending on the
984  * attitude of the flags PASS_SMT/PASS_NSA/PASS_DBEACON
985  * will be delivered to the LLC layer.
986  * all received LLC frames with a directed address, Multicast
987  * or Broadcast address will be delivered to the LLC
988  * layer too.
989  *
990  * END_MANUAL_ENTRY
991  */
992 void mac_drv_rx_mode(struct s_smc *smc, int mode)
993 {
994  switch(mode) {
995  case RX_ENABLE_PASS_SMT:
996  smc->os.hwm.pass_SMT = TRUE ;
997  break ;
998  case RX_DISABLE_PASS_SMT:
999  smc->os.hwm.pass_SMT = FALSE ;
1000  break ;
1001  case RX_ENABLE_PASS_NSA:
1002  smc->os.hwm.pass_NSA = TRUE ;
1003  break ;
1004  case RX_DISABLE_PASS_NSA:
1005  smc->os.hwm.pass_NSA = FALSE ;
1006  break ;
1007  case RX_ENABLE_PASS_DB:
1008  smc->os.hwm.pass_DB = TRUE ;
1009  break ;
1010  case RX_DISABLE_PASS_DB:
1011  smc->os.hwm.pass_DB = FALSE ;
1012  break ;
1013  case RX_DISABLE_PASS_ALL:
1014  smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
1015  smc->os.hwm.pass_DB = FALSE ;
1016  smc->os.hwm.pass_llc_promisc = TRUE ;
1018  break ;
1020  smc->os.hwm.pass_llc_promisc = FALSE ;
1021  break ;
1022  case RX_ENABLE_LLC_PROMISC:
1023  smc->os.hwm.pass_llc_promisc = TRUE ;
1024  break ;
1025  case RX_ENABLE_ALLMULTI:
1026  case RX_DISABLE_ALLMULTI:
1027  case RX_ENABLE_PROMISC:
1028  case RX_DISABLE_PROMISC:
1029  case RX_ENABLE_NSA:
1030  case RX_DISABLE_NSA:
1031  default:
1032  mac_set_rx_mode(smc,mode) ;
1033  break ;
1034  }
1035 }
1036 #endif /* ifndef NDIS_OS2 */
1037 
1038 /*
1039  * process receive queue
1040  */
1041 void process_receive(struct s_smc *smc)
1042 {
1043  int i ;
1044  int n ;
1045  int frag_count ; /* number of RxDs of the curr rx buf */
1046  int used_frags ; /* number of RxDs of the curr frame */
1047  struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */
1048  struct s_smt_fp_rxd volatile *r ; /* rxd pointer */
1049  struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */
1050  u_long rbctrl ; /* receive buffer control word */
1051  u_long rfsw ; /* receive frame status word */
1052  u_short rx_used ;
1053  u_char far *virt ;
1054  char far *data ;
1055  SMbuf *mb ;
1056  u_char fc ; /* Frame control */
1057  int len ; /* Frame length */
1058 
1059  smc->os.hwm.detec_count = 0 ;
1060  queue = smc->hw.fp.rx[QUEUE_R1] ;
1061  NDD_TRACE("RHxB",0,0,0) ;
1062  for ( ; ; ) {
1063  r = queue->rx_curr_get ;
1064  rx_used = queue->rx_used ;
1065  frag_count = 0 ;
1066 
1067 #ifdef USE_BREAK_ISR
1068  if (smc->os.hwm.leave_isr) {
1069  goto rx_end ;
1070  }
1071 #endif
1072 #ifdef NDIS_OS2
1073  if (offDepth) {
1074  smc->os.hwm.rx_break = 1 ;
1075  goto rx_end ;
1076  }
1077  smc->os.hwm.rx_break = 0 ;
1078 #endif
1079 #ifdef ODI2
1080  if (smc->os.hwm.rx_break) {
1081  goto rx_end ;
1082  }
1083 #endif
1084  n = 0 ;
1085  do {
1086  DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
1088  rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
1089 
1090  if (rbctrl & BMU_OWN) {
1091  NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
1092  DB_RX("End of RxDs",0,0,4) ;
1093  goto rx_end ;
1094  }
1095  /*
1096  * out of RxD detection
1097  */
1098  if (!rx_used) {
1099  SK_BREAK() ;
1101  /* Either we don't have an RxD or all
1102  * RxDs are filled. Therefore it's allowed
1103  * for to set the STOPPED flag */
1104  smc->hw.hw_state = STOPPED ;
1105  mac_drv_clear_rx_queue(smc) ;
1106  smc->hw.hw_state = STARTED ;
1107  mac_drv_fill_rxd(smc) ;
1108  smc->os.hwm.detec_count = 0 ;
1109  goto rx_end ;
1110  }
1111  rfsw = le32_to_cpu(r->rxd_rfsw) ;
1112  if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
1113  /*
1114  * The BMU_STF bit is deleted, 1 frame is
1115  * placed into more than 1 rx buffer
1116  *
1117  * skip frame by setting the rx len to 0
1118  *
1119  * if fragment count == 0
1120  * The missing STF bit belongs to the
1121  * current frame, search for the
1122  * EOF bit to complete the frame
1123  * else
1124  * the fragment belongs to the next frame,
1125  * exit the loop and process the frame
1126  */
1127  SK_BREAK() ;
1128  rfsw = 0 ;
1129  if (frag_count) {
1130  break ;
1131  }
1132  }
1133  n += rbctrl & 0xffff ;
1134  r = r->rxd_next ;
1135  frag_count++ ;
1136  rx_used-- ;
1137  } while (!(rbctrl & BMU_EOF)) ;
1138  used_frags = frag_count ;
1139  DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ;
1140 
1141  /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
1142  /* BMU_ST_BUF will not be changed by the ASIC */
1144  while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1145  DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1146  r = r->rxd_next ;
1148  frag_count++ ;
1149  rx_used-- ;
1150  }
1151  DB_RX("STF bit found",0,0,5) ;
1152 
1153  /*
1154  * The received frame is finished for the process receive
1155  */
1156  rxd = queue->rx_curr_get ;
1157  queue->rx_curr_get = r ;
1158  queue->rx_free += frag_count ;
1159  queue->rx_used = rx_used ;
1160 
1161  /*
1162  * ASIC Errata no. 7 (STF - Bit Bug)
1163  */
1164  rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
1165 
1166  for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
1167  DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
1168  dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1169  }
1170  smc->hw.fp.err_stats.err_valid++ ;
1171  smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
1172 
1173  /* the length of the data including the FC */
1174  len = (rfsw & RD_LENGTH) - 4 ;
1175 
1176  DB_RX("frame length = %d",len,0,4) ;
1177  /*
1178  * check the frame_length and all error flags
1179  */
1180  if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
1181  if (rfsw & RD_S_MSRABT) {
1182  DB_RX("Frame aborted by the FORMAC",0,0,2) ;
1183  smc->hw.fp.err_stats.err_abort++ ;
1184  }
1185  /*
1186  * check frame status
1187  */
1188  if (rfsw & RD_S_SEAC2) {
1189  DB_RX("E-Indicator set",0,0,2) ;
1190  smc->hw.fp.err_stats.err_e_indicator++ ;
1191  }
1192  if (rfsw & RD_S_SFRMERR) {
1193  DB_RX("CRC error",0,0,2) ;
1194  smc->hw.fp.err_stats.err_crc++ ;
1195  }
1196  if (rfsw & RX_FS_IMPL) {
1197  DB_RX("Implementer frame",0,0,2) ;
1198  smc->hw.fp.err_stats.err_imp_frame++ ;
1199  }
1200  goto abort_frame ;
1201  }
1202  if (len > FDDI_RAW_MTU-4) {
1203  DB_RX("Frame too long error",0,0,2) ;
1204  smc->hw.fp.err_stats.err_too_long++ ;
1205  goto abort_frame ;
1206  }
1207  /*
1208  * SUPERNET 3 Bug: FORMAC delivers status words
1209  * of aborded frames to the BMU
1210  */
1211  if (len <= 4) {
1212  DB_RX("Frame length = 0",0,0,2) ;
1213  goto abort_frame ;
1214  }
1215 
1216  if (len != (n-4)) {
1217  DB_RX("BMU: rx len differs: [%d:%d]",len,n,4);
1218  smc->os.hwm.rx_len_error++ ;
1219  goto abort_frame ;
1220  }
1221 
1222  /*
1223  * Check SA == MA
1224  */
1225  virt = (u_char far *) rxd->rxd_virt ;
1226  DB_RX("FC = %x",*virt,0,2) ;
1227  if (virt[12] == MA[5] &&
1228  virt[11] == MA[4] &&
1229  virt[10] == MA[3] &&
1230  virt[9] == MA[2] &&
1231  virt[8] == MA[1] &&
1232  (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
1233  goto abort_frame ;
1234  }
1235 
1236  /*
1237  * test if LLC frame
1238  */
1239  if (rfsw & RX_FS_LLC) {
1240  /*
1241  * if pass_llc_promisc is disable
1242  * if DA != Multicast or Broadcast or DA!=MA
1243  * abort the frame
1244  */
1245  if (!smc->os.hwm.pass_llc_promisc) {
1246  if(!(virt[1] & GROUP_ADDR_BIT)) {
1247  if (virt[6] != MA[5] ||
1248  virt[5] != MA[4] ||
1249  virt[4] != MA[3] ||
1250  virt[3] != MA[2] ||
1251  virt[2] != MA[1] ||
1252  virt[1] != MA[0]) {
1253  DB_RX("DA != MA and not multi- or broadcast",0,0,2) ;
1254  goto abort_frame ;
1255  }
1256  }
1257  }
1258 
1259  /*
1260  * LLC frame received
1261  */
1262  DB_RX("LLC - receive",0,0,4) ;
1263  mac_drv_rx_complete(smc,rxd,frag_count,len) ;
1264  }
1265  else {
1266  if (!(mb = smt_get_mbuf(smc))) {
1267  smc->hw.fp.err_stats.err_no_buf++ ;
1268  DB_RX("No SMbuf; receive terminated",0,0,4) ;
1269  goto abort_frame ;
1270  }
1271  data = smtod(mb,char *) - 1 ;
1272 
1273  /*
1274  * copy the frame into a SMT_MBuf
1275  */
1276 #ifdef USE_OS_CPY
1277  hwm_cpy_rxd2mb(rxd,data,len) ;
1278 #else
1279  for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
1280  n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
1281  DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
1282  memcpy(data,r->rxd_virt,n) ;
1283  data += n ;
1284  }
1285  data = smtod(mb,char *) - 1 ;
1286 #endif
1287  fc = *(char *)mb->sm_data = *data ;
1288  mb->sm_len = len - 1 ; /* len - fc */
1289  data++ ;
1290 
1291  /*
1292  * SMT frame received
1293  */
1294  switch(fc) {
1295  case FC_SMT_INFO :
1296  smc->hw.fp.err_stats.err_smt_frame++ ;
1297  DB_RX("SMT frame received ",0,0,5) ;
1298 
1299  if (smc->os.hwm.pass_SMT) {
1300  DB_RX("pass SMT frame ",0,0,5) ;
1301  mac_drv_rx_complete(smc, rxd,
1302  frag_count,len) ;
1303  }
1304  else {
1305  DB_RX("requeue RxD",0,0,5) ;
1306  mac_drv_requeue_rxd(smc,rxd,frag_count);
1307  }
1308 
1309  smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1310  break ;
1311  case FC_SMT_NSA :
1312  smc->hw.fp.err_stats.err_smt_frame++ ;
1313  DB_RX("SMT frame received ",0,0,5) ;
1314 
1315  /* if pass_NSA set pass the NSA frame or */
1316  /* pass_SMT set and the A-Indicator */
1317  /* is not set, pass the NSA frame */
1318  if (smc->os.hwm.pass_NSA ||
1319  (smc->os.hwm.pass_SMT &&
1320  !(rfsw & A_INDIC))) {
1321  DB_RX("pass SMT frame ",0,0,5) ;
1322  mac_drv_rx_complete(smc, rxd,
1323  frag_count,len) ;
1324  }
1325  else {
1326  DB_RX("requeue RxD",0,0,5) ;
1327  mac_drv_requeue_rxd(smc,rxd,frag_count);
1328  }
1329 
1330  smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1331  break ;
1332  case FC_BEACON :
1333  if (smc->os.hwm.pass_DB) {
1334  DB_RX("pass DB frame ",0,0,5) ;
1335  mac_drv_rx_complete(smc, rxd,
1336  frag_count,len) ;
1337  }
1338  else {
1339  DB_RX("requeue RxD",0,0,5) ;
1340  mac_drv_requeue_rxd(smc,rxd,frag_count);
1341  }
1342  smt_free_mbuf(smc,mb) ;
1343  break ;
1344  default :
1345  /*
1346  * unknown FC abord the frame
1347  */
1348  DB_RX("unknown FC error",0,0,2) ;
1349  smt_free_mbuf(smc,mb) ;
1350  DB_RX("requeue RxD",0,0,5) ;
1351  mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1352  if ((fc & 0xf0) == FC_MAC)
1353  smc->hw.fp.err_stats.err_mac_frame++ ;
1354  else
1355  smc->hw.fp.err_stats.err_imp_frame++ ;
1356 
1357  break ;
1358  }
1359  }
1360 
1361  DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
1362  NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
1363 
1364  continue ;
1365  /*--------------------------------------------------------------------*/
1366 abort_frame:
1367  DB_RX("requeue RxD",0,0,5) ;
1368  mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1369 
1370  DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
1371  NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
1372  }
1373 rx_end:
1374 #ifdef ALL_RX_COMPLETE
1375  mac_drv_all_receives_complete(smc) ;
1376 #endif
1377  return ; /* lint bug: needs return detect end of function */
1378 }
1379 
1380 static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
1381 {
1382  u_char fc ;
1383 
1384  DB_RX("send a queued frame to the llc layer",0,0,4) ;
1385  smc->os.hwm.r.len = mb->sm_len ;
1386  smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
1387  fc = *smc->os.hwm.r.mb_pos ;
1388  (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
1389  smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
1390  smt_free_mbuf(smc,mb) ;
1391 }
1392 
1393 /*
1394  * BEGIN_MANUAL_ENTRY(hwm_rx_frag)
1395  * void hwm_rx_frag(smc,virt,phys,len,frame_status)
1396  *
1397  * function MACRO (hardware module, hwmtm.h)
1398  * This function calls dma_master for preparing the
1399  * system hardware for the DMA transfer and initializes
1400  * the current RxD with the length and the physical and
1401  * virtual address of the fragment. Furthermore, it sets the
1402  * STF and EOF bits depending on the frame status byte,
1403  * switches the OWN flag of the RxD, so that it is owned by the
1404  * adapter and issues an rx_start.
1405  *
1406  * para virt virtual pointer to the fragment
1407  * len the length of the fragment
1408  * frame_status status of the frame, see design description
1409  *
1410  * NOTE: It is possible to call this function with a fragment length
1411  * of zero.
1412  *
1413  * END_MANUAL_ENTRY
1414  */
1415 void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1416  int frame_status)
1417 {
1418  struct s_smt_fp_rxd volatile *r ;
1419  __le32 rbctrl;
1420 
1421  NDD_TRACE("RHfB",virt,len,frame_status) ;
1422  DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
1423  r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
1424  r->rxd_virt = virt ;
1425  r->rxd_rbadr = cpu_to_le32(phys) ;
1426  rbctrl = cpu_to_le32( (((__u32)frame_status &
1427  (FIRST_FRAG|LAST_FRAG))<<26) |
1428  (((u_long) frame_status & FIRST_FRAG) << 21) |
1429  BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
1430  r->rxd_rbctrl = rbctrl ;
1431 
1434  smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
1435  smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
1436  smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
1437  NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
1438 }
1439 
1440 /*
1441  * BEGINN_MANUAL_ENTRY(mac_drv_clear_rx_queue)
1442  *
1443  * void mac_drv_clear_rx_queue(smc)
1444  * struct s_smc *smc ;
1445  *
1446  * function DOWNCALL (hardware module, hwmtm.c)
1447  * mac_drv_clear_rx_queue is called by the OS-specific module
1448  * after it has issued a card_stop.
1449  * In this case, the frames in the receive queue are obsolete and
1450  * should be removed. For removing mac_drv_clear_rx_queue
1451  * calls dma_master for each RxD and mac_drv_clear_rxd for each
1452  * receive buffer.
1453  *
1454  * NOTE: calling sequence card_stop:
1455  * CLI_FBI(), card_stop(),
1456  * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
1457  *
1458  * NOTE: The caller is responsible that the BMUs are idle
1459  * when this function is called.
1460  *
1461  * END_MANUAL_ENTRY
1462  */
1464 {
1465  struct s_smt_fp_rxd volatile *r ;
1466  struct s_smt_fp_rxd volatile *next_rxd ;
1467  struct s_smt_rx_queue *queue ;
1468  int frag_count ;
1469  int i ;
1470 
1471  if (smc->hw.hw_state != STOPPED) {
1472  SK_BREAK() ;
1474  return ;
1475  }
1476 
1477  queue = smc->hw.fp.rx[QUEUE_R1] ;
1478  DB_RX("clear_rx_queue",0,0,5) ;
1479 
1480  /*
1481  * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers
1482  */
1483  r = queue->rx_curr_get ;
1484  while (queue->rx_used) {
1486  DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
1487  r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1488  frag_count = 1 ;
1490  r = r->rxd_next ;
1492  while (r != queue->rx_curr_put &&
1493  !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1494  DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1495  r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1497  r = r->rxd_next ;
1499  frag_count++ ;
1500  }
1501  DB_RX("STF bit found",0,0,5) ;
1502  next_rxd = r ;
1503 
1504  for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
1505  DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
1506  dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1507  }
1508 
1509  DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ",
1510  (void *)queue->rx_curr_get,frag_count,5) ;
1511  mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
1512 
1513  queue->rx_curr_get = next_rxd ;
1514  queue->rx_used -= frag_count ;
1515  queue->rx_free += frag_count ;
1516  }
1517 }
1518 
1519 
1520 /*
1521  -------------------------------------------------------------
1522  SEND FUNCTIONS:
1523  -------------------------------------------------------------
1524 */
1525 
1526 /*
1527  * BEGIN_MANUAL_ENTRY(hwm_tx_init)
1528  * int hwm_tx_init(smc,fc,frag_count,frame_len,frame_status)
1529  *
1530  * function DOWN_CALL (hardware module, hwmtm.c)
1531  * hwm_tx_init checks if the frame can be sent through the
1532  * corresponding send queue.
1533  *
1534  * para fc the frame control. To determine through which
1535  * send queue the frame should be transmitted.
1536  * 0x50 - 0x57: asynchronous LLC frame
1537  * 0xD0 - 0xD7: synchronous LLC frame
1538  * 0x41, 0x4F: SMT frame to the network
1539  * 0x42: SMT frame to the network and to the local SMT
1540  * 0x43: SMT frame to the local SMT
1541  * frag_count count of the fragments for this frame
1542  * frame_len length of the frame
1543  * frame_status status of the frame, the send queue bit is already
1544  * specified
1545  *
1546  * return frame_status
1547  *
1548  * END_MANUAL_ENTRY
1549  */
1550 int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
1551  int frame_status)
1552 {
1553  NDD_TRACE("THiB",fc,frag_count,frame_len) ;
1554  smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
1555  smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
1556  smc->os.hwm.tx_len = frame_len ;
1557  DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ;
1558  if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1559  frame_status |= LAN_TX ;
1560  }
1561  else {
1562  switch (fc) {
1563  case FC_SMT_INFO :
1564  case FC_SMT_NSA :
1565  frame_status |= LAN_TX ;
1566  break ;
1567  case FC_SMT_LOC :
1568  frame_status |= LOC_TX ;
1569  break ;
1570  case FC_SMT_LAN_LOC :
1571  frame_status |= LAN_TX | LOC_TX ;
1572  break ;
1573  default :
1575  }
1576  }
1577  if (!smc->hw.mac_ring_is_up) {
1578  frame_status &= ~LAN_TX ;
1579  frame_status |= RING_DOWN ;
1580  DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
1581  }
1582  if (frag_count > smc->os.hwm.tx_p->tx_free) {
1583 #ifndef NDIS_OS2
1584  mac_drv_clear_txd(smc) ;
1585  if (frag_count > smc->os.hwm.tx_p->tx_free) {
1586  DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
1587  frame_status &= ~LAN_TX ;
1588  frame_status |= OUT_OF_TXD ;
1589  }
1590 #else
1591  DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
1592  frame_status &= ~LAN_TX ;
1593  frame_status |= OUT_OF_TXD ;
1594 #endif
1595  }
1596  DB_TX("frame_status = %x",frame_status,0,3) ;
1597  NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
1598  return frame_status;
1599 }
1600 
1601 /*
1602  * BEGIN_MANUAL_ENTRY(hwm_tx_frag)
1603  * void hwm_tx_frag(smc,virt,phys,len,frame_status)
1604  *
1605  * function DOWNCALL (hardware module, hwmtm.c)
1606  * If the frame should be sent to the LAN, this function calls
1607  * dma_master, fills the current TxD with the virtual and the
1608  * physical address, sets the STF and EOF bits dependent on
1609  * the frame status, and requests the BMU to start the
1610  * transmit.
1611  * If the frame should be sent to the local SMT, an SMT_MBuf
1612  * is allocated if the FIRST_FRAG bit is set in the frame_status.
1613  * The fragment of the frame is copied into the SMT MBuf.
1614  * The function smt_received_pack is called if the LAST_FRAG
1615  * bit is set in the frame_status word.
1616  *
1617  * para virt virtual pointer to the fragment
1618  * len the length of the fragment
1619  * frame_status status of the frame, see design description
1620  *
1621  * return nothing returned, no parameter is modified
1622  *
1623  * NOTE: It is possible to invoke this macro with a fragment length
1624  * of zero.
1625  *
1626  * END_MANUAL_ENTRY
1627  */
1628 void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1629  int frame_status)
1630 {
1631  struct s_smt_fp_txd volatile *t ;
1632  struct s_smt_tx_queue *queue ;
1633  __le32 tbctrl ;
1634 
1635  queue = smc->os.hwm.tx_p ;
1636 
1637  NDD_TRACE("THfB",virt,len,frame_status) ;
1638  /* Bug fix: AF / May 31 1999 (#missing)
1639  * snmpinfo problem reported by IBM is caused by invalid
1640  * t-pointer (txd) if LAN_TX is not set but LOC_TX only.
1641  * Set: t = queue->tx_curr_put here !
1642  */
1643  t = queue->tx_curr_put ;
1644 
1645  DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
1646  if (frame_status & LAN_TX) {
1647  /* '*t' is already defined */
1648  DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
1649  t->txd_virt = virt ;
1650  t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
1651  t->txd_tbadr = cpu_to_le32(phys) ;
1652  tbctrl = cpu_to_le32((((__u32)frame_status &
1653  (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
1654  BMU_OWN|BMU_CHECK |len) ;
1655  t->txd_tbctrl = tbctrl ;
1656 
1657 #ifndef AIX
1659  outpd(queue->tx_bmu_ctl,CSR_START) ;
1660 #else /* ifndef AIX */
1662  if (frame_status & QUEUE_A0) {
1663  outpd(ADDR(B0_XA_CSR),CSR_START) ;
1664  }
1665  else {
1666  outpd(ADDR(B0_XS_CSR),CSR_START) ;
1667  }
1668 #endif
1669  queue->tx_free-- ;
1670  queue->tx_used++ ;
1671  queue->tx_curr_put = t->txd_next ;
1672  if (frame_status & LAST_FRAG) {
1673  smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1674  }
1675  }
1676  if (frame_status & LOC_TX) {
1677  DB_TX("LOC_TX: ",0,0,3) ;
1678  if (frame_status & FIRST_FRAG) {
1679  if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
1680  smc->hw.fp.err_stats.err_no_buf++ ;
1681  DB_TX("No SMbuf; transmit terminated",0,0,4) ;
1682  }
1683  else {
1684  smc->os.hwm.tx_data =
1685  smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1686 #ifdef USE_OS_CPY
1687 #ifdef PASS_1ST_TXD_2_TX_COMP
1688  hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1689  smc->os.hwm.tx_len) ;
1690 #endif
1691 #endif
1692  }
1693  }
1694  if (smc->os.hwm.tx_mb) {
1695 #ifndef USE_OS_CPY
1696  DB_TX("copy fragment into MBuf ",0,0,3) ;
1697  memcpy(smc->os.hwm.tx_data,virt,len) ;
1698  smc->os.hwm.tx_data += len ;
1699 #endif
1700  if (frame_status & LAST_FRAG) {
1701 #ifdef USE_OS_CPY
1702 #ifndef PASS_1ST_TXD_2_TX_COMP
1703  /*
1704  * hwm_cpy_txd2mb(txd,data,len) copies 'len'
1705  * bytes from the virtual pointer in 'rxd'
1706  * to 'data'. The virtual pointer of the
1707  * os-specific tx-buffer should be written
1708  * in the LAST txd.
1709  */
1710  hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1711  smc->os.hwm.tx_len) ;
1712 #endif /* nPASS_1ST_TXD_2_TX_COMP */
1713 #endif /* USE_OS_CPY */
1714  smc->os.hwm.tx_data =
1715  smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1716  *(char *)smc->os.hwm.tx_mb->sm_data =
1717  *smc->os.hwm.tx_data ;
1718  smc->os.hwm.tx_data++ ;
1719  smc->os.hwm.tx_mb->sm_len =
1720  smc->os.hwm.tx_len - 1 ;
1721  DB_TX("pass LLC frame to SMT ",0,0,3) ;
1722  smt_received_pack(smc,smc->os.hwm.tx_mb,
1723  RD_FS_LOCAL) ;
1724  }
1725  }
1726  }
1727  NDD_TRACE("THfE",t,queue->tx_free,0) ;
1728 }
1729 
1730 
1731 /*
1732  * queues a receive for later send
1733  */
1734 static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
1735 {
1736  DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ;
1737  smc->os.hwm.queued_rx_frames++ ;
1738  mb->sm_next = (SMbuf *)NULL ;
1739  if (smc->os.hwm.llc_rx_pipe == NULL) {
1740  smc->os.hwm.llc_rx_pipe = mb ;
1741  }
1742  else {
1743  smc->os.hwm.llc_rx_tail->sm_next = mb ;
1744  }
1745  smc->os.hwm.llc_rx_tail = mb ;
1746 
1747  /*
1748  * force an timer IRQ to receive the data
1749  */
1750  if (!smc->os.hwm.isr_flag) {
1751  smt_force_irq(smc) ;
1752  }
1753 }
1754 
1755 /*
1756  * get a SMbuf from the llc_rx_queue
1757  */
1758 static SMbuf *get_llc_rx(struct s_smc *smc)
1759 {
1760  SMbuf *mb ;
1761 
1762  if ((mb = smc->os.hwm.llc_rx_pipe)) {
1763  smc->os.hwm.queued_rx_frames-- ;
1764  smc->os.hwm.llc_rx_pipe = mb->sm_next ;
1765  }
1766  DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
1767  return mb;
1768 }
1769 
1770 /*
1771  * queues a transmit SMT MBuf during the time were the MBuf is
1772  * queued the TxD ring
1773  */
1774 static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
1775 {
1776  DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ;
1777  smc->os.hwm.queued_txd_mb++ ;
1778  mb->sm_next = (SMbuf *)NULL ;
1779  if (smc->os.hwm.txd_tx_pipe == NULL) {
1780  smc->os.hwm.txd_tx_pipe = mb ;
1781  }
1782  else {
1783  smc->os.hwm.txd_tx_tail->sm_next = mb ;
1784  }
1785  smc->os.hwm.txd_tx_tail = mb ;
1786 }
1787 
1788 /*
1789  * get a SMbuf from the txd_tx_queue
1790  */
1791 static SMbuf *get_txd_mb(struct s_smc *smc)
1792 {
1793  SMbuf *mb ;
1794 
1795  if ((mb = smc->os.hwm.txd_tx_pipe)) {
1796  smc->os.hwm.queued_txd_mb-- ;
1797  smc->os.hwm.txd_tx_pipe = mb->sm_next ;
1798  }
1799  DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
1800  return mb;
1801 }
1802 
1803 /*
1804  * SMT Send function
1805  */
1806 void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1807 {
1808  char far *data ;
1809  int len ;
1810  int n ;
1811  int i ;
1812  int frag_count ;
1813  int frame_status ;
1814  SK_LOC_DECL(char far,*virt[3]) ;
1815  int frag_len[3] ;
1816  struct s_smt_tx_queue *queue ;
1817  struct s_smt_fp_txd volatile *t ;
1818  u_long phys ;
1819  __le32 tbctrl;
1820 
1821  NDD_TRACE("THSB",mb,fc,0) ;
1822  DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
1823 
1824  mb->sm_off-- ; /* set to fc */
1825  mb->sm_len++ ; /* + fc */
1826  data = smtod(mb,char *) ;
1827  *data = fc ;
1828  if (fc == FC_SMT_LOC)
1829  *data = FC_SMT_INFO ;
1830 
1831  /*
1832  * determine the frag count and the virt addresses of the frags
1833  */
1834  frag_count = 0 ;
1835  len = mb->sm_len ;
1836  while (len) {
1837  n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
1838  if (n >= len) {
1839  n = len ;
1840  }
1841  DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ;
1842  virt[frag_count] = data ;
1843  frag_len[frag_count] = n ;
1844  frag_count++ ;
1845  len -= n ;
1846  data += n ;
1847  }
1848 
1849  /*
1850  * determine the frame status
1851  */
1852  queue = smc->hw.fp.tx[QUEUE_A0] ;
1853  if (fc == FC_BEACON || fc == FC_SMT_LOC) {
1854  frame_status = LOC_TX ;
1855  }
1856  else {
1857  frame_status = LAN_TX ;
1858  if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
1859  (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
1860  frame_status |= LOC_TX ;
1861  }
1862 
1863  if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
1864  frame_status &= ~LAN_TX;
1865  if (frame_status) {
1866  DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
1867  }
1868  else {
1869  DB_TX("Ring is down: terminate transmission",0,0,2) ;
1870  smt_free_mbuf(smc,mb) ;
1871  return ;
1872  }
1873  }
1874  DB_TX("frame_status = 0x%x ",frame_status,0,5) ;
1875 
1876  if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
1877  mb->sm_use_count = 2 ;
1878  }
1879 
1880  if (frame_status & LAN_TX) {
1881  t = queue->tx_curr_put ;
1882  frame_status |= FIRST_FRAG ;
1883  for (i = 0; i < frag_count; i++) {
1884  DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
1885  if (i == frag_count-1) {
1886  frame_status |= LAST_FRAG ;
1888  (((__u32)(mb->sm_len-1)&3) << 27)) ;
1889  }
1890  t->txd_virt = virt[i] ;
1891  phys = dma_master(smc, (void far *)virt[i],
1892  frag_len[i], DMA_RD|SMT_BUF) ;
1893  t->txd_tbadr = cpu_to_le32(phys) ;
1894  tbctrl = cpu_to_le32((((__u32)frame_status &
1895  (FIRST_FRAG|LAST_FRAG)) << 26) |
1896  BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
1897  t->txd_tbctrl = tbctrl ;
1898 #ifndef AIX
1900  outpd(queue->tx_bmu_ctl,CSR_START) ;
1901 #else
1903  outpd(ADDR(B0_XA_CSR),CSR_START) ;
1904 #endif
1905  frame_status &= ~FIRST_FRAG ;
1906  queue->tx_curr_put = t = t->txd_next ;
1907  queue->tx_free-- ;
1908  queue->tx_used++ ;
1909  }
1910  smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1911  queue_txd_mb(smc,mb) ;
1912  }
1913 
1914  if (frame_status & LOC_TX) {
1915  DB_TX("pass Mbuf to LLC queue",0,0,5) ;
1916  queue_llc_rx(smc,mb) ;
1917  }
1918 
1919  /*
1920  * We need to unqueue the free SMT_MBUFs here, because it may
1921  * be that the SMT want's to send more than 1 frame for one down call
1922  */
1923  mac_drv_clear_txd(smc) ;
1924  NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
1925 }
1926 
1927 /* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd)
1928  * void mac_drv_clear_txd(smc)
1929  *
1930  * function DOWNCALL (hardware module, hwmtm.c)
1931  * mac_drv_clear_txd searches in both send queues for TxD's
1932  * which were finished by the adapter. It calls dma_complete
1933  * for each TxD. If the last fragment of an LLC frame is
1934  * reached, it calls mac_drv_tx_complete to release the
1935  * send buffer.
1936  *
1937  * return nothing
1938  *
1939  * END_MANUAL_ENTRY
1940  */
1941 static void mac_drv_clear_txd(struct s_smc *smc)
1942 {
1943  struct s_smt_tx_queue *queue ;
1944  struct s_smt_fp_txd volatile *t1 ;
1945  struct s_smt_fp_txd volatile *t2 = NULL ;
1946  SMbuf *mb ;
1947  u_long tbctrl ;
1948  int i ;
1949  int frag_count ;
1950  int n ;
1951 
1952  NDD_TRACE("THcB",0,0,0) ;
1953  for (i = QUEUE_S; i <= QUEUE_A0; i++) {
1954  queue = smc->hw.fp.tx[i] ;
1955  t1 = queue->tx_curr_get ;
1956  DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ;
1957 
1958  for ( ; ; ) {
1959  frag_count = 0 ;
1960 
1961  do {
1963  DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
1964  tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
1965 
1966  if (tbctrl & BMU_OWN || !queue->tx_used){
1967  DB_TX("End of TxDs queue %d",i,0,4) ;
1968  goto free_next_queue ; /* next queue */
1969  }
1970  t1 = t1->txd_next ;
1971  frag_count++ ;
1972  } while (!(tbctrl & BMU_EOF)) ;
1973 
1974  t1 = queue->tx_curr_get ;
1975  for (n = frag_count; n; n--) {
1976  tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
1977  dma_complete(smc,
1978  (union s_fp_descr volatile *) t1,
1979  (int) (DMA_RD |
1980  ((tbctrl & BMU_SMT_TX) >> 18))) ;
1981  t2 = t1 ;
1982  t1 = t1->txd_next ;
1983  }
1984 
1985  if (tbctrl & BMU_SMT_TX) {
1986  mb = get_txd_mb(smc) ;
1987  smt_free_mbuf(smc,mb) ;
1988  }
1989  else {
1990 #ifndef PASS_1ST_TXD_2_TX_COMP
1991  DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ;
1992  mac_drv_tx_complete(smc,t2) ;
1993 #else
1994  DB_TX("mac_drv_tx_comp for TxD 0x%x",
1995  queue->tx_curr_get,0,4) ;
1996  mac_drv_tx_complete(smc,queue->tx_curr_get) ;
1997 #endif
1998  }
1999  queue->tx_curr_get = t1 ;
2000  queue->tx_free += frag_count ;
2001  queue->tx_used -= frag_count ;
2002  }
2003 free_next_queue: ;
2004  }
2005  NDD_TRACE("THcE",0,0,0) ;
2006 }
2007 
2008 /*
2009  * BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue)
2010  *
2011  * void mac_drv_clear_tx_queue(smc)
2012  * struct s_smc *smc ;
2013  *
2014  * function DOWNCALL (hardware module, hwmtm.c)
2015  * mac_drv_clear_tx_queue is called from the SMT when
2016  * the RMT state machine has entered the ISOLATE state.
2017  * This function is also called by the os-specific module
2018  * after it has called the function card_stop().
2019  * In this case, the frames in the send queues are obsolete and
2020  * should be removed.
2021  *
2022  * note calling sequence:
2023  * CLI_FBI(), card_stop(),
2024  * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
2025  *
2026  * NOTE: The caller is responsible that the BMUs are idle
2027  * when this function is called.
2028  *
2029  * END_MANUAL_ENTRY
2030  */
2032 {
2033  struct s_smt_fp_txd volatile *t ;
2034  struct s_smt_tx_queue *queue ;
2035  int tx_used ;
2036  int i ;
2037 
2038  if (smc->hw.hw_state != STOPPED) {
2039  SK_BREAK() ;
2041  return ;
2042  }
2043 
2044  for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2045  queue = smc->hw.fp.tx[i] ;
2046  DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ;
2047 
2048  /*
2049  * switch the OWN bit of all pending frames to the host
2050  */
2051  t = queue->tx_curr_get ;
2052  tx_used = queue->tx_used ;
2053  while (tx_used) {
2055  DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
2056  t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
2058  t = t->txd_next ;
2059  tx_used-- ;
2060  }
2061  }
2062 
2063  /*
2064  * release all TxD's for both send queues
2065  */
2066  mac_drv_clear_txd(smc) ;
2067 
2068  for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2069  queue = smc->hw.fp.tx[i] ;
2070  t = queue->tx_curr_get ;
2071 
2072  /*
2073  * write the phys pointer of the NEXT descriptor into the
2074  * BMU's current address descriptor pointer and set
2075  * tx_curr_get and tx_curr_put to this position
2076  */
2077  if (i == QUEUE_S) {
2078  outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
2079  }
2080  else {
2081  outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
2082  }
2083 
2084  queue->tx_curr_put = queue->tx_curr_get->txd_next ;
2085  queue->tx_curr_get = queue->tx_curr_put ;
2086  }
2087 }
2088 
2089 
2090 /*
2091  -------------------------------------------------------------
2092  TEST FUNCTIONS:
2093  -------------------------------------------------------------
2094 */
2095 
2096 #ifdef DEBUG
2097 /*
2098  * BEGIN_MANUAL_ENTRY(mac_drv_debug_lev)
2099  * void mac_drv_debug_lev(smc,flag,lev)
2100  *
2101  * function DOWNCALL (drvsr.c)
2102  * To get a special debug info the user can assign a debug level
2103  * to any debug flag.
2104  *
2105  * para flag debug flag, possible values are:
2106  * = 0: reset all debug flags (the defined level is
2107  * ignored)
2108  * = 1: debug.d_smtf
2109  * = 2: debug.d_smt
2110  * = 3: debug.d_ecm
2111  * = 4: debug.d_rmt
2112  * = 5: debug.d_cfm
2113  * = 6: debug.d_pcm
2114  *
2115  * = 10: debug.d_os.hwm_rx (hardware module receive path)
2116  * = 11: debug.d_os.hwm_tx(hardware module transmit path)
2117  * = 12: debug.d_os.hwm_gen(hardware module general flag)
2118  *
2119  * lev debug level
2120  *
2121  * END_MANUAL_ENTRY
2122  */
2123 void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
2124 {
2125  switch(flag) {
2126  case (int)NULL:
2127  DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
2128  DB_P.d_cfm = 0 ;
2129  DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
2130 #ifdef SBA
2131  DB_P.d_sba = 0 ;
2132 #endif
2133 #ifdef ESS
2134  DB_P.d_ess = 0 ;
2135 #endif
2136  break ;
2137  case DEBUG_SMTF:
2138  DB_P.d_smtf = lev ;
2139  break ;
2140  case DEBUG_SMT:
2141  DB_P.d_smt = lev ;
2142  break ;
2143  case DEBUG_ECM:
2144  DB_P.d_ecm = lev ;
2145  break ;
2146  case DEBUG_RMT:
2147  DB_P.d_rmt = lev ;
2148  break ;
2149  case DEBUG_CFM:
2150  DB_P.d_cfm = lev ;
2151  break ;
2152  case DEBUG_PCM:
2153  DB_P.d_pcm = lev ;
2154  break ;
2155  case DEBUG_SBA:
2156 #ifdef SBA
2157  DB_P.d_sba = lev ;
2158 #endif
2159  break ;
2160  case DEBUG_ESS:
2161 #ifdef ESS
2162  DB_P.d_ess = lev ;
2163 #endif
2164  break ;
2165  case DB_HWM_RX:
2166  DB_P.d_os.hwm_rx = lev ;
2167  break ;
2168  case DB_HWM_TX:
2169  DB_P.d_os.hwm_tx = lev ;
2170  break ;
2171  case DB_HWM_GEN:
2172  DB_P.d_os.hwm_gen = lev ;
2173  break ;
2174  default:
2175  break ;
2176  }
2177 }
2178 #endif