Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
he.c
Go to the documentation of this file.
1 /*
2 
3  he.c
4 
5  ForeRunnerHE ATM Adapter driver for ATM on Linux
6  Copyright (C) 1999-2001 Naval Research Laboratory
7 
8  This library is free software; you can redistribute it and/or
9  modify it under the terms of the GNU Lesser General Public
10  License as published by the Free Software Foundation; either
11  version 2.1 of the License, or (at your option) any later version.
12 
13  This library is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  Lesser General Public License for more details.
17 
18  You should have received a copy of the GNU Lesser General Public
19  License along with this library; if not, write to the Free Software
20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 
22 */
23 
24 /*
25 
26  he.c
27 
28  ForeRunnerHE ATM Adapter driver for ATM on Linux
29  Copyright (C) 1999-2001 Naval Research Laboratory
30 
31  Permission to use, copy, modify and distribute this software and its
32  documentation is hereby granted, provided that both the copyright
33  notice and this permission notice appear in all copies of the software,
34  derivative works or modified versions, and any portions thereof, and
35  that both notices appear in supporting documentation.
36 
37  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39  RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41  This driver was written using the "Programmer's Reference Manual for
42  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44  AUTHORS:
45  chas williams <[email protected]>
46  eric kinzie <[email protected]>
47 
48  NOTES:
49  4096 supported 'connections'
50  group 0 is used for all traffic
51  interrupt queue 0 is used for all interrupts
52  aal0 support (based on work from [email protected])
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW /* still confused about this */
82 /* #undef HE_DEBUG */
83 
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87 
88 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...) do { } while (0)
94 #endif /* HE_DEBUG */
95 
96 /* declarations */
97 
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 
112 /* globals */
113 
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = 1;
120 static bool sdh = 0;
121 
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124  CS_HIGH | CLK_HIGH,
125  CS_LOW | CLK_LOW,
126  CLK_HIGH, /* 0 */
127  CLK_LOW,
128  CLK_HIGH, /* 0 */
129  CLK_LOW,
130  CLK_HIGH, /* 0 */
131  CLK_LOW,
132  CLK_HIGH, /* 0 */
133  CLK_LOW,
134  CLK_HIGH, /* 0 */
135  CLK_LOW,
136  CLK_HIGH, /* 0 */
137  CLK_LOW | SI_HIGH,
138  CLK_HIGH | SI_HIGH, /* 1 */
139  CLK_LOW | SI_HIGH,
140  CLK_HIGH | SI_HIGH /* 1 */
141 };
142 
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145  CLK_LOW,
146  CLK_HIGH,
147  CLK_LOW,
148  CLK_HIGH,
149  CLK_LOW,
150  CLK_HIGH,
151  CLK_LOW,
152  CLK_HIGH,
153  CLK_LOW,
154  CLK_HIGH,
155  CLK_LOW,
156  CLK_HIGH,
157  CLK_LOW,
158  CLK_HIGH,
159  CLK_LOW,
160  CLK_HIGH,
161  CLK_LOW
162 };
163 
164 static struct atmdev_ops he_ops =
165 {
166  .open = he_open,
167  .close = he_close,
168  .ioctl = he_ioctl,
169  .send = he_send,
170  .phy_put = he_phy_put,
171  .phy_get = he_phy_get,
172  .proc_read = he_proc_read,
173  .owner = THIS_MODULE
174 };
175 
176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg) readl((dev)->membase + (reg))
178 
179 /* section 2.12 connection memory access */
180 
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183  unsigned flags)
184 {
185  he_writel(he_dev, val, CON_DAT);
186  (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187  he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188  while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190 
191 #define he_writel_rcm(dev, val, reg) \
192  he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 
194 #define he_writel_tcm(dev, val, reg) \
195  he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 
197 #define he_writel_mbox(dev, val, reg) \
198  he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 
200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203  he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204  while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205  return he_readl(he_dev, CON_DAT);
206 }
207 
208 #define he_readl_rcm(dev, reg) \
209  he_readl_internal(dev, reg, CON_CTL_RCM)
210 
211 #define he_readl_tcm(dev, reg) \
212  he_readl_internal(dev, reg, CON_CTL_TCM)
213 
214 #define he_readl_mbox(dev, reg) \
215  he_readl_internal(dev, reg, CON_CTL_MBOX)
216 
217 
218 /* figure 2.2 connection id */
219 
220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 
222 /* 2.5.1 per connection transmit state registers */
223 
224 #define he_writel_tsr0(dev, val, cid) \
225  he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227  he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 
229 #define he_writel_tsr1(dev, val, cid) \
230  he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 
232 #define he_writel_tsr2(dev, val, cid) \
233  he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 
235 #define he_writel_tsr3(dev, val, cid) \
236  he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 
238 #define he_writel_tsr4(dev, val, cid) \
239  he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 
241  /* from page 2-20
242  *
243  * NOTE While the transmit connection is active, bits 23 through 0
244  * of this register must not be written by the host. Byte
245  * enables should be used during normal operation when writing
246  * the most significant byte.
247  */
248 
249 #define he_writel_tsr4_upper(dev, val, cid) \
250  he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251  CON_CTL_TCM \
252  | CON_BYTE_DISABLE_2 \
253  | CON_BYTE_DISABLE_1 \
254  | CON_BYTE_DISABLE_0)
255 
256 #define he_readl_tsr4(dev, cid) \
257  he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 
259 #define he_writel_tsr5(dev, val, cid) \
260  he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 
262 #define he_writel_tsr6(dev, val, cid) \
263  he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 
265 #define he_writel_tsr7(dev, val, cid) \
266  he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 
268 
269 #define he_writel_tsr8(dev, val, cid) \
270  he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 
272 #define he_writel_tsr9(dev, val, cid) \
273  he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 
275 #define he_writel_tsr10(dev, val, cid) \
276  he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 
278 #define he_writel_tsr11(dev, val, cid) \
279  he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 
281 
282 #define he_writel_tsr12(dev, val, cid) \
283  he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 
285 #define he_writel_tsr13(dev, val, cid) \
286  he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 
288 
289 #define he_writel_tsr14(dev, val, cid) \
290  he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 
292 #define he_writel_tsr14_upper(dev, val, cid) \
293  he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294  CON_CTL_TCM \
295  | CON_BYTE_DISABLE_2 \
296  | CON_BYTE_DISABLE_1 \
297  | CON_BYTE_DISABLE_0)
298 
299 /* 2.7.1 per connection receive state registers */
300 
301 #define he_writel_rsr0(dev, val, cid) \
302  he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304  he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 
306 #define he_writel_rsr1(dev, val, cid) \
307  he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 
309 #define he_writel_rsr2(dev, val, cid) \
310  he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 
312 #define he_writel_rsr3(dev, val, cid) \
313  he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 
315 #define he_writel_rsr4(dev, val, cid) \
316  he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 
318 #define he_writel_rsr5(dev, val, cid) \
319  he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 
321 #define he_writel_rsr6(dev, val, cid) \
322  he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 
324 #define he_writel_rsr7(dev, val, cid) \
325  he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330  struct hlist_head *head;
331  struct atm_vcc *vcc;
332  struct hlist_node *node;
333  struct sock *s;
334  short vpi;
335  int vci;
336 
337  vpi = cid >> he_dev->vcibits;
338  vci = cid & ((1 << he_dev->vcibits) - 1);
339  head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340 
341  sk_for_each(s, node, head) {
342  vcc = atm_sk(s);
343  if (vcc->dev == he_dev->atm_dev &&
344  vcc->vci == vci && vcc->vpi == vpi &&
345  vcc->qos.rxtp.traffic_class != ATM_NONE) {
346  return vcc;
347  }
348  }
349  return NULL;
350 }
351 
352 static int __devinit
353 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
354 {
355  struct atm_dev *atm_dev = NULL;
356  struct he_dev *he_dev = NULL;
357  int err = 0;
358 
359  printk(KERN_INFO "ATM he driver\n");
360 
361  if (pci_enable_device(pci_dev))
362  return -EIO;
363  if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
364  printk(KERN_WARNING "he: no suitable dma available\n");
365  err = -EIO;
366  goto init_one_failure;
367  }
368 
369  atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
370  if (!atm_dev) {
371  err = -ENODEV;
372  goto init_one_failure;
373  }
374  pci_set_drvdata(pci_dev, atm_dev);
375 
376  he_dev = kzalloc(sizeof(struct he_dev),
377  GFP_KERNEL);
378  if (!he_dev) {
379  err = -ENOMEM;
380  goto init_one_failure;
381  }
382  he_dev->pci_dev = pci_dev;
383  he_dev->atm_dev = atm_dev;
384  he_dev->atm_dev->dev_data = he_dev;
385  atm_dev->dev_data = he_dev;
386  he_dev->number = atm_dev->number;
387  tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
388  spin_lock_init(&he_dev->global_lock);
389 
390  if (he_start(atm_dev)) {
391  he_stop(he_dev);
392  err = -ENODEV;
393  goto init_one_failure;
394  }
395  he_dev->next = NULL;
396  if (he_devs)
397  he_dev->next = he_devs;
398  he_devs = he_dev;
399  return 0;
400 
401 init_one_failure:
402  if (atm_dev)
403  atm_dev_deregister(atm_dev);
404  kfree(he_dev);
405  pci_disable_device(pci_dev);
406  return err;
407 }
408 
409 static void __devexit
410 he_remove_one (struct pci_dev *pci_dev)
411 {
412  struct atm_dev *atm_dev;
413  struct he_dev *he_dev;
414 
415  atm_dev = pci_get_drvdata(pci_dev);
416  he_dev = HE_DEV(atm_dev);
417 
418  /* need to remove from he_devs */
419 
420  he_stop(he_dev);
421  atm_dev_deregister(atm_dev);
422  kfree(he_dev);
423 
424  pci_set_drvdata(pci_dev, NULL);
425  pci_disable_device(pci_dev);
426 }
427 
428 
429 static unsigned
430 rate_to_atmf(unsigned rate) /* cps to atm forum format */
431 {
432 #define NONZERO (1 << 14)
433 
434  unsigned exp = 0;
435 
436  if (rate == 0)
437  return 0;
438 
439  rate <<= 9;
440  while (rate > 0x3ff) {
441  ++exp;
442  rate >>= 1;
443  }
444 
445  return (NONZERO | (exp << 9) | (rate & 0x1ff));
446 }
447 
448 static void __devinit
449 he_init_rx_lbfp0(struct he_dev *he_dev)
450 {
451  unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
452  unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
453  unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
454  unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
455 
456  lbufd_index = 0;
457  lbm_offset = he_readl(he_dev, RCMLBM_BA);
458 
459  he_writel(he_dev, lbufd_index, RLBF0_H);
460 
461  for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
462  lbufd_index += 2;
463  lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
464 
465  he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
466  he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
467 
468  if (++lbuf_count == lbufs_per_row) {
469  lbuf_count = 0;
470  row_offset += he_dev->bytes_per_row;
471  }
472  lbm_offset += 4;
473  }
474 
475  he_writel(he_dev, lbufd_index - 2, RLBF0_T);
476  he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
477 }
478 
479 static void __devinit
480 he_init_rx_lbfp1(struct he_dev *he_dev)
481 {
482  unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
483  unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
484  unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
485  unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
486 
487  lbufd_index = 1;
488  lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
489 
490  he_writel(he_dev, lbufd_index, RLBF1_H);
491 
492  for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
493  lbufd_index += 2;
494  lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
495 
496  he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
497  he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
498 
499  if (++lbuf_count == lbufs_per_row) {
500  lbuf_count = 0;
501  row_offset += he_dev->bytes_per_row;
502  }
503  lbm_offset += 4;
504  }
505 
506  he_writel(he_dev, lbufd_index - 2, RLBF1_T);
507  he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
508 }
509 
510 static void __devinit
511 he_init_tx_lbfp(struct he_dev *he_dev)
512 {
513  unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
514  unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
515  unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
516  unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
517 
518  lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
519  lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
520 
521  he_writel(he_dev, lbufd_index, TLBF_H);
522 
523  for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
524  lbufd_index += 1;
525  lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
526 
527  he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
528  he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
529 
530  if (++lbuf_count == lbufs_per_row) {
531  lbuf_count = 0;
532  row_offset += he_dev->bytes_per_row;
533  }
534  lbm_offset += 2;
535  }
536 
537  he_writel(he_dev, lbufd_index - 1, TLBF_T);
538 }
539 
540 static int __devinit
541 he_init_tpdrq(struct he_dev *he_dev)
542 {
543  he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
544  CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
545  if (he_dev->tpdrq_base == NULL) {
546  hprintk("failed to alloc tpdrq\n");
547  return -ENOMEM;
548  }
549  memset(he_dev->tpdrq_base, 0,
550  CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
551 
552  he_dev->tpdrq_tail = he_dev->tpdrq_base;
553  he_dev->tpdrq_head = he_dev->tpdrq_base;
554 
555  he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
556  he_writel(he_dev, 0, TPDRQ_T);
557  he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
558 
559  return 0;
560 }
561 
562 static void __devinit
563 he_init_cs_block(struct he_dev *he_dev)
564 {
565  unsigned clock, rate, delta;
566  int reg;
567 
568  /* 5.1.7 cs block initialization */
569 
570  for (reg = 0; reg < 0x20; ++reg)
571  he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
572 
573  /* rate grid timer reload values */
574 
575  clock = he_is622(he_dev) ? 66667000 : 50000000;
576  rate = he_dev->atm_dev->link_rate;
577  delta = rate / 16 / 2;
578 
579  for (reg = 0; reg < 0x10; ++reg) {
580  /* 2.4 internal transmit function
581  *
582  * we initialize the first row in the rate grid.
583  * values are period (in clock cycles) of timer
584  */
585  unsigned period = clock / rate;
586 
587  he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
588  rate -= delta;
589  }
590 
591  if (he_is622(he_dev)) {
592  /* table 5.2 (4 cells per lbuf) */
593  he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
594  he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
595  he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
596  he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
597  he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
598 
599  /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
600  he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
601  he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
602  he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
603  he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
604  he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
605  he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
606 
607  he_writel_mbox(he_dev, 0x4680, CS_RTATR);
608 
609  /* table 5.8 */
610  he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
611  he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
612  he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
613  he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
614  he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
615  he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
616 
617  /* table 5.9 */
618  he_writel_mbox(he_dev, 0x5, CS_OTPPER);
619  he_writel_mbox(he_dev, 0x14, CS_OTWPER);
620  } else {
621  /* table 5.1 (4 cells per lbuf) */
622  he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
623  he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
624  he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
625  he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
626  he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
627 
628  /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
629  he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
630  he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
631  he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
632  he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
633  he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
634  he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
635 
636  he_writel_mbox(he_dev, 0x4680, CS_RTATR);
637 
638  /* table 5.8 */
639  he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
640  he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
641  he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
642  he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
643  he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
644  he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
645 
646  /* table 5.9 */
647  he_writel_mbox(he_dev, 0x6, CS_OTPPER);
648  he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
649  }
650 
651  he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
652 
653  for (reg = 0; reg < 0x8; ++reg)
654  he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
655 
656 }
657 
658 static int __devinit
659 he_init_cs_block_rcm(struct he_dev *he_dev)
660 {
661  unsigned (*rategrid)[16][16];
662  unsigned rate, delta;
663  int i, j, reg;
664 
665  unsigned rate_atmf, exp, man;
666  unsigned long long rate_cps;
667  int mult, buf, buf_limit = 4;
668 
669  rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
670  if (!rategrid)
671  return -ENOMEM;
672 
673  /* initialize rate grid group table */
674 
675  for (reg = 0x0; reg < 0xff; ++reg)
676  he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
677 
678  /* initialize rate controller groups */
679 
680  for (reg = 0x100; reg < 0x1ff; ++reg)
681  he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
682 
683  /* initialize tNrm lookup table */
684 
685  /* the manual makes reference to a routine in a sample driver
686  for proper configuration; fortunately, we only need this
687  in order to support abr connection */
688 
689  /* initialize rate to group table */
690 
691  rate = he_dev->atm_dev->link_rate;
692  delta = rate / 32;
693 
694  /*
695  * 2.4 transmit internal functions
696  *
697  * we construct a copy of the rate grid used by the scheduler
698  * in order to construct the rate to group table below
699  */
700 
701  for (j = 0; j < 16; j++) {
702  (*rategrid)[0][j] = rate;
703  rate -= delta;
704  }
705 
706  for (i = 1; i < 16; i++)
707  for (j = 0; j < 16; j++)
708  if (i > 14)
709  (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
710  else
711  (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
712 
713  /*
714  * 2.4 transmit internal function
715  *
716  * this table maps the upper 5 bits of exponent and mantissa
717  * of the atm forum representation of the rate into an index
718  * on rate grid
719  */
720 
721  rate_atmf = 0;
722  while (rate_atmf < 0x400) {
723  man = (rate_atmf & 0x1f) << 4;
724  exp = rate_atmf >> 5;
725 
726  /*
727  instead of '/ 512', use '>> 9' to prevent a call
728  to divdu3 on x86 platforms
729  */
730  rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
731 
732  if (rate_cps < 10)
733  rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
734 
735  for (i = 255; i > 0; i--)
736  if ((*rategrid)[i/16][i%16] >= rate_cps)
737  break; /* pick nearest rate instead? */
738 
739  /*
740  * each table entry is 16 bits: (rate grid index (8 bits)
741  * and a buffer limit (8 bits)
742  * there are two table entries in each 32-bit register
743  */
744 
745 #ifdef notdef
746  buf = rate_cps * he_dev->tx_numbuffs /
747  (he_dev->atm_dev->link_rate * 2);
748 #else
749  /* this is pretty, but avoids _divdu3 and is mostly correct */
750  mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
751  if (rate_cps > (272 * mult))
752  buf = 4;
753  else if (rate_cps > (204 * mult))
754  buf = 3;
755  else if (rate_cps > (136 * mult))
756  buf = 2;
757  else if (rate_cps > (68 * mult))
758  buf = 1;
759  else
760  buf = 0;
761 #endif
762  if (buf > buf_limit)
763  buf = buf_limit;
764  reg = (reg << 16) | ((i << 8) | buf);
765 
766 #define RTGTBL_OFFSET 0x400
767 
768  if (rate_atmf & 0x1)
769  he_writel_rcm(he_dev, reg,
770  CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
771 
772  ++rate_atmf;
773  }
774 
775  kfree(rategrid);
776  return 0;
777 }
778 
779 static int __devinit
780 he_init_group(struct he_dev *he_dev, int group)
781 {
782  struct he_buff *heb, *next;
784  int i;
785 
786  he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
787  he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
788  he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
789  he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
790  G0_RBPS_BS + (group * 32));
791 
792  /* bitmap table */
794  * sizeof(unsigned long), GFP_KERNEL);
795  if (!he_dev->rbpl_table) {
796  hprintk("unable to allocate rbpl bitmap table\n");
797  return -ENOMEM;
798  }
799  bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
800 
801  /* rbpl_virt 64-bit pointers */
803  * sizeof(struct he_buff *), GFP_KERNEL);
804  if (!he_dev->rbpl_virt) {
805  hprintk("unable to allocate rbpl virt table\n");
806  goto out_free_rbpl_table;
807  }
808 
809  /* large buffer pool */
810  he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
811  CONFIG_RBPL_BUFSIZE, 64, 0);
812  if (he_dev->rbpl_pool == NULL) {
813  hprintk("unable to create rbpl pool\n");
814  goto out_free_rbpl_virt;
815  }
816 
817  he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
818  CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
819  if (he_dev->rbpl_base == NULL) {
820  hprintk("failed to alloc rbpl_base\n");
821  goto out_destroy_rbpl_pool;
822  }
823  memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
824 
825  INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
826 
827  for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
828 
829  heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
830  if (!heb)
831  goto out_free_rbpl;
832  heb->mapping = mapping;
833  list_add(&heb->entry, &he_dev->rbpl_outstanding);
834 
835  set_bit(i, he_dev->rbpl_table);
836  he_dev->rbpl_virt[i] = heb;
837  he_dev->rbpl_hint = i + 1;
838  he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
839  he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
840  }
841  he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
842 
843  he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
844  he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
845  G0_RBPL_T + (group * 32));
846  he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
847  G0_RBPL_BS + (group * 32));
848  he_writel(he_dev,
850  RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
851  RBP_INT_ENB,
852  G0_RBPL_QI + (group * 32));
853 
854  /* rx buffer ready queue */
855 
856  he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
857  CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
858  if (he_dev->rbrq_base == NULL) {
859  hprintk("failed to allocate rbrq\n");
860  goto out_free_rbpl;
861  }
862  memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
863 
864  he_dev->rbrq_head = he_dev->rbrq_base;
865  he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
866  he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
867  he_writel(he_dev,
869  G0_RBRQ_Q + (group * 16));
870  if (irq_coalesce) {
871  hprintk("coalescing interrupts\n");
872  he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
873  G0_RBRQ_I + (group * 16));
874  } else
875  he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
876  G0_RBRQ_I + (group * 16));
877 
878  /* tx buffer ready queue */
879 
880  he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
881  CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
882  if (he_dev->tbrq_base == NULL) {
883  hprintk("failed to allocate tbrq\n");
884  goto out_free_rbpq_base;
885  }
886  memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
887 
888  he_dev->tbrq_head = he_dev->tbrq_base;
889 
890  he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
891  he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
892  he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
893  he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
894 
895  return 0;
896 
897 out_free_rbpq_base:
899  sizeof(struct he_rbrq), he_dev->rbrq_base,
900  he_dev->rbrq_phys);
901 out_free_rbpl:
902  list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
903  pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
904 
905  pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
906  sizeof(struct he_rbp), he_dev->rbpl_base,
907  he_dev->rbpl_phys);
908 out_destroy_rbpl_pool:
909  pci_pool_destroy(he_dev->rbpl_pool);
910 out_free_rbpl_virt:
911  kfree(he_dev->rbpl_virt);
912 out_free_rbpl_table:
913  kfree(he_dev->rbpl_table);
914 
915  return -ENOMEM;
916 }
917 
918 static int __devinit
919 he_init_irq(struct he_dev *he_dev)
920 {
921  int i;
922 
923  /* 2.9.3.5 tail offset for each interrupt queue is located after the
924  end of the interrupt queue */
925 
926  he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
927  (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
928  if (he_dev->irq_base == NULL) {
929  hprintk("failed to allocate irq\n");
930  return -ENOMEM;
931  }
932  he_dev->irq_tailoffset = (unsigned *)
933  &he_dev->irq_base[CONFIG_IRQ_SIZE];
934  *he_dev->irq_tailoffset = 0;
935  he_dev->irq_head = he_dev->irq_base;
936  he_dev->irq_tail = he_dev->irq_base;
937 
938  for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
939  he_dev->irq_base[i].isw = ITYPE_INVALID;
940 
941  he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
942  he_writel(he_dev,
943  IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
944  IRQ0_HEAD);
946  he_writel(he_dev, 0x0, IRQ0_DATA);
947 
948  he_writel(he_dev, 0x0, IRQ1_BASE);
949  he_writel(he_dev, 0x0, IRQ1_HEAD);
950  he_writel(he_dev, 0x0, IRQ1_CNTL);
951  he_writel(he_dev, 0x0, IRQ1_DATA);
952 
953  he_writel(he_dev, 0x0, IRQ2_BASE);
954  he_writel(he_dev, 0x0, IRQ2_HEAD);
955  he_writel(he_dev, 0x0, IRQ2_CNTL);
956  he_writel(he_dev, 0x0, IRQ2_DATA);
957 
958  he_writel(he_dev, 0x0, IRQ3_BASE);
959  he_writel(he_dev, 0x0, IRQ3_HEAD);
960  he_writel(he_dev, 0x0, IRQ3_CNTL);
961  he_writel(he_dev, 0x0, IRQ3_DATA);
962 
963  /* 2.9.3.2 interrupt queue mapping registers */
964 
965  he_writel(he_dev, 0x0, GRP_10_MAP);
966  he_writel(he_dev, 0x0, GRP_32_MAP);
967  he_writel(he_dev, 0x0, GRP_54_MAP);
968  he_writel(he_dev, 0x0, GRP_76_MAP);
969 
970  if (request_irq(he_dev->pci_dev->irq,
971  he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
972  hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
973  return -EINVAL;
974  }
975 
976  he_dev->irq = he_dev->pci_dev->irq;
977 
978  return 0;
979 }
980 
981 static int __devinit
982 he_start(struct atm_dev *dev)
983 {
984  struct he_dev *he_dev;
985  struct pci_dev *pci_dev;
986  unsigned long membase;
987 
988  u16 command;
989  u32 gen_cntl_0, host_cntl, lb_swap;
990  u8 cache_size, timer;
991 
992  unsigned err;
993  unsigned int status, reg;
994  int i, group;
995 
996  he_dev = HE_DEV(dev);
997  pci_dev = he_dev->pci_dev;
998 
999  membase = pci_resource_start(pci_dev, 0);
1000  HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1001 
1002  /*
1003  * pci bus controller initialization
1004  */
1005 
1006  /* 4.3 pci bus controller-specific initialization */
1007  if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1008  hprintk("can't read GEN_CNTL_0\n");
1009  return -EINVAL;
1010  }
1011  gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1012  if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1013  hprintk("can't write GEN_CNTL_0.\n");
1014  return -EINVAL;
1015  }
1016 
1017  if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1018  hprintk("can't read PCI_COMMAND.\n");
1019  return -EINVAL;
1020  }
1021 
1023  if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1024  hprintk("can't enable memory.\n");
1025  return -EINVAL;
1026  }
1027 
1028  if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1029  hprintk("can't read cache line size?\n");
1030  return -EINVAL;
1031  }
1032 
1033  if (cache_size < 16) {
1034  cache_size = 16;
1035  if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1036  hprintk("can't set cache line size to %d\n", cache_size);
1037  }
1038 
1039  if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1040  hprintk("can't read latency timer?\n");
1041  return -EINVAL;
1042  }
1043 
1044  /* from table 3.9
1045  *
1046  * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1047  *
1048  * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1049  * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1050  *
1051  */
1052 #define LAT_TIMER 209
1053  if (timer < LAT_TIMER) {
1054  HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1055  timer = LAT_TIMER;
1056  if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1057  hprintk("can't set latency timer to %d\n", timer);
1058  }
1059 
1060  if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1061  hprintk("can't set up page mapping\n");
1062  return -EINVAL;
1063  }
1064 
1065  /* 4.4 card reset */
1066  he_writel(he_dev, 0x0, RESET_CNTL);
1067  he_writel(he_dev, 0xff, RESET_CNTL);
1068 
1069  udelay(16*1000); /* 16 ms */
1070  status = he_readl(he_dev, RESET_CNTL);
1071  if ((status & BOARD_RST_STATUS) == 0) {
1072  hprintk("reset failed\n");
1073  return -EINVAL;
1074  }
1075 
1076  /* 4.5 set bus width */
1077  host_cntl = he_readl(he_dev, HOST_CNTL);
1078  if (host_cntl & PCI_BUS_SIZE64)
1079  gen_cntl_0 |= ENBL_64;
1080  else
1081  gen_cntl_0 &= ~ENBL_64;
1082 
1083  if (disable64 == 1) {
1084  hprintk("disabling 64-bit pci bus transfers\n");
1085  gen_cntl_0 &= ~ENBL_64;
1086  }
1087 
1088  if (gen_cntl_0 & ENBL_64)
1089  hprintk("64-bit transfers enabled\n");
1090 
1091  pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1092 
1093  /* 4.7 read prom contents */
1094  for (i = 0; i < PROD_ID_LEN; ++i)
1095  he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1096 
1097  he_dev->media = read_prom_byte(he_dev, MEDIA);
1098 
1099  for (i = 0; i < 6; ++i)
1100  dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1101 
1102  hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1103  he_dev->prod_id,
1104  he_dev->media & 0x40 ? "SM" : "MM",
1105  dev->esi[0],
1106  dev->esi[1],
1107  dev->esi[2],
1108  dev->esi[3],
1109  dev->esi[4],
1110  dev->esi[5]);
1111  he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1113 
1114  /* 4.6 set host endianess */
1115  lb_swap = he_readl(he_dev, LB_SWAP);
1116  if (he_is622(he_dev))
1117  lb_swap &= ~XFER_SIZE; /* 4 cells */
1118  else
1119  lb_swap |= XFER_SIZE; /* 8 cells */
1120 #ifdef __BIG_ENDIAN
1121  lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1122 #else
1123  lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1125 #endif /* __BIG_ENDIAN */
1126  he_writel(he_dev, lb_swap, LB_SWAP);
1127 
1128  /* 4.8 sdram controller initialization */
1129  he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1130 
1131  /* 4.9 initialize rnum value */
1132  lb_swap |= SWAP_RNUM_MAX(0xf);
1133  he_writel(he_dev, lb_swap, LB_SWAP);
1134 
1135  /* 4.10 initialize the interrupt queues */
1136  if ((err = he_init_irq(he_dev)) != 0)
1137  return err;
1138 
1139  /* 4.11 enable pci bus controller state machines */
1140  host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1142  he_writel(he_dev, host_cntl, HOST_CNTL);
1143 
1144  gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1145  pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1146 
1147  /*
1148  * atm network controller initialization
1149  */
1150 
1151  /* 5.1.1 generic configuration state */
1152 
1153  /*
1154  * local (cell) buffer memory map
1155  *
1156  * HE155 HE622
1157  *
1158  * 0 ____________1023 bytes 0 _______________________2047 bytes
1159  * | | | | |
1160  * | utility | | rx0 | |
1161  * 5|____________| 255|___________________| u |
1162  * 6| | 256| | t |
1163  * | | | | i |
1164  * | rx0 | row | tx | l |
1165  * | | | | i |
1166  * | | 767|___________________| t |
1167  * 517|____________| 768| | y |
1168  * row 518| | | rx1 | |
1169  * | | 1023|___________________|___|
1170  * | |
1171  * | tx |
1172  * | |
1173  * | |
1174  * 1535|____________|
1175  * 1536| |
1176  * | rx1 |
1177  * 2047|____________|
1178  *
1179  */
1180 
1181  /* total 4096 connections */
1182  he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1183  he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1184 
1185  if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1186  hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1187  return -ENODEV;
1188  }
1189 
1190  if (nvpibits != -1) {
1191  he_dev->vpibits = nvpibits;
1192  he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1193  }
1194 
1195  if (nvcibits != -1) {
1196  he_dev->vcibits = nvcibits;
1197  he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1198  }
1199 
1200 
1201  if (he_is622(he_dev)) {
1202  he_dev->cells_per_row = 40;
1203  he_dev->bytes_per_row = 2048;
1204  he_dev->r0_numrows = 256;
1205  he_dev->tx_numrows = 512;
1206  he_dev->r1_numrows = 256;
1207  he_dev->r0_startrow = 0;
1208  he_dev->tx_startrow = 256;
1209  he_dev->r1_startrow = 768;
1210  } else {
1211  he_dev->cells_per_row = 20;
1212  he_dev->bytes_per_row = 1024;
1213  he_dev->r0_numrows = 512;
1214  he_dev->tx_numrows = 1018;
1215  he_dev->r1_numrows = 512;
1216  he_dev->r0_startrow = 6;
1217  he_dev->tx_startrow = 518;
1218  he_dev->r1_startrow = 1536;
1219  }
1220 
1221  he_dev->cells_per_lbuf = 4;
1222  he_dev->buffer_limit = 4;
1223  he_dev->r0_numbuffs = he_dev->r0_numrows *
1224  he_dev->cells_per_row / he_dev->cells_per_lbuf;
1225  if (he_dev->r0_numbuffs > 2560)
1226  he_dev->r0_numbuffs = 2560;
1227 
1228  he_dev->r1_numbuffs = he_dev->r1_numrows *
1229  he_dev->cells_per_row / he_dev->cells_per_lbuf;
1230  if (he_dev->r1_numbuffs > 2560)
1231  he_dev->r1_numbuffs = 2560;
1232 
1233  he_dev->tx_numbuffs = he_dev->tx_numrows *
1234  he_dev->cells_per_row / he_dev->cells_per_lbuf;
1235  if (he_dev->tx_numbuffs > 5120)
1236  he_dev->tx_numbuffs = 5120;
1237 
1238  /* 5.1.2 configure hardware dependent registers */
1239 
1240  he_writel(he_dev,
1241  SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1242  RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1243  (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1244  (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1245  LBARB);
1246 
1247  he_writel(he_dev, BANK_ON |
1248  (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1249  SDRAMCON);
1250 
1251  he_writel(he_dev,
1252  (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1253  RM_RW_WAIT(1), RCMCONFIG);
1254  he_writel(he_dev,
1255  (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1256  TM_RW_WAIT(1), TCMCONFIG);
1257 
1258  he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1259 
1260  he_writel(he_dev,
1261  (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1262  (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1263  RX_VALVP(he_dev->vpibits) |
1264  RX_VALVC(he_dev->vcibits), RC_CONFIG);
1265 
1266  he_writel(he_dev, DRF_THRESH(0x20) |
1267  (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1268  TX_VCI_MASK(he_dev->vcibits) |
1269  LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1270 
1271  he_writel(he_dev, 0x0, TXAAL5_PROTO);
1272 
1273  he_writel(he_dev, PHY_INT_ENB |
1274  (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1275  RH_CONFIG);
1276 
1277  /* 5.1.3 initialize connection memory */
1278 
1279  for (i = 0; i < TCM_MEM_SIZE; ++i)
1280  he_writel_tcm(he_dev, 0, i);
1281 
1282  for (i = 0; i < RCM_MEM_SIZE; ++i)
1283  he_writel_rcm(he_dev, 0, i);
1284 
1285  /*
1286  * transmit connection memory map
1287  *
1288  * tx memory
1289  * 0x0 ___________________
1290  * | |
1291  * | |
1292  * | TSRa |
1293  * | |
1294  * | |
1295  * 0x8000|___________________|
1296  * | |
1297  * | TSRb |
1298  * 0xc000|___________________|
1299  * | |
1300  * | TSRc |
1301  * 0xe000|___________________|
1302  * | TSRd |
1303  * 0xf000|___________________|
1304  * | tmABR |
1305  * 0x10000|___________________|
1306  * | |
1307  * | tmTPD |
1308  * |___________________|
1309  * | |
1310  * ....
1311  * 0x1ffff|___________________|
1312  *
1313  *
1314  */
1315 
1316  he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1317  he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1318  he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1319  he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1320  he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1321 
1322 
1323  /*
1324  * receive connection memory map
1325  *
1326  * 0x0 ___________________
1327  * | |
1328  * | |
1329  * | RSRa |
1330  * | |
1331  * | |
1332  * 0x8000|___________________|
1333  * | |
1334  * | rx0/1 |
1335  * | LBM | link lists of local
1336  * | tx | buffer memory
1337  * | |
1338  * 0xd000|___________________|
1339  * | |
1340  * | rmABR |
1341  * 0xe000|___________________|
1342  * | |
1343  * | RSRb |
1344  * |___________________|
1345  * | |
1346  * ....
1347  * 0xffff|___________________|
1348  */
1349 
1350  he_writel(he_dev, 0x08000, RCMLBM_BA);
1351  he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1352  he_writel(he_dev, 0x0d800, RCMABR_BA);
1353 
1354  /* 5.1.4 initialize local buffer free pools linked lists */
1355 
1356  he_init_rx_lbfp0(he_dev);
1357  he_init_rx_lbfp1(he_dev);
1358 
1359  he_writel(he_dev, 0x0, RLBC_H);
1360  he_writel(he_dev, 0x0, RLBC_T);
1361  he_writel(he_dev, 0x0, RLBC_H2);
1362 
1363  he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1364  he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1365 
1366  he_init_tx_lbfp(he_dev);
1367 
1368  he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1369 
1370  /* 5.1.5 initialize intermediate receive queues */
1371 
1372  if (he_is622(he_dev)) {
1373  he_writel(he_dev, 0x000f, G0_INMQ_S);
1374  he_writel(he_dev, 0x200f, G0_INMQ_L);
1375 
1376  he_writel(he_dev, 0x001f, G1_INMQ_S);
1377  he_writel(he_dev, 0x201f, G1_INMQ_L);
1378 
1379  he_writel(he_dev, 0x002f, G2_INMQ_S);
1380  he_writel(he_dev, 0x202f, G2_INMQ_L);
1381 
1382  he_writel(he_dev, 0x003f, G3_INMQ_S);
1383  he_writel(he_dev, 0x203f, G3_INMQ_L);
1384 
1385  he_writel(he_dev, 0x004f, G4_INMQ_S);
1386  he_writel(he_dev, 0x204f, G4_INMQ_L);
1387 
1388  he_writel(he_dev, 0x005f, G5_INMQ_S);
1389  he_writel(he_dev, 0x205f, G5_INMQ_L);
1390 
1391  he_writel(he_dev, 0x006f, G6_INMQ_S);
1392  he_writel(he_dev, 0x206f, G6_INMQ_L);
1393 
1394  he_writel(he_dev, 0x007f, G7_INMQ_S);
1395  he_writel(he_dev, 0x207f, G7_INMQ_L);
1396  } else {
1397  he_writel(he_dev, 0x0000, G0_INMQ_S);
1398  he_writel(he_dev, 0x0008, G0_INMQ_L);
1399 
1400  he_writel(he_dev, 0x0001, G1_INMQ_S);
1401  he_writel(he_dev, 0x0009, G1_INMQ_L);
1402 
1403  he_writel(he_dev, 0x0002, G2_INMQ_S);
1404  he_writel(he_dev, 0x000a, G2_INMQ_L);
1405 
1406  he_writel(he_dev, 0x0003, G3_INMQ_S);
1407  he_writel(he_dev, 0x000b, G3_INMQ_L);
1408 
1409  he_writel(he_dev, 0x0004, G4_INMQ_S);
1410  he_writel(he_dev, 0x000c, G4_INMQ_L);
1411 
1412  he_writel(he_dev, 0x0005, G5_INMQ_S);
1413  he_writel(he_dev, 0x000d, G5_INMQ_L);
1414 
1415  he_writel(he_dev, 0x0006, G6_INMQ_S);
1416  he_writel(he_dev, 0x000e, G6_INMQ_L);
1417 
1418  he_writel(he_dev, 0x0007, G7_INMQ_S);
1419  he_writel(he_dev, 0x000f, G7_INMQ_L);
1420  }
1421 
1422  /* 5.1.6 application tunable parameters */
1423 
1424  he_writel(he_dev, 0x0, MCC);
1425  he_writel(he_dev, 0x0, OEC);
1426  he_writel(he_dev, 0x0, DCC);
1427  he_writel(he_dev, 0x0, CEC);
1428 
1429  /* 5.1.7 cs block initialization */
1430 
1431  he_init_cs_block(he_dev);
1432 
1433  /* 5.1.8 cs block connection memory initialization */
1434 
1435  if (he_init_cs_block_rcm(he_dev) < 0)
1436  return -ENOMEM;
1437 
1438  /* 5.1.10 initialize host structures */
1439 
1440  he_init_tpdrq(he_dev);
1441 
1442  he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1443  sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1444  if (he_dev->tpd_pool == NULL) {
1445  hprintk("unable to create tpd pci_pool\n");
1446  return -ENOMEM;
1447  }
1448 
1449  INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1450 
1451  if (he_init_group(he_dev, 0) != 0)
1452  return -ENOMEM;
1453 
1454  for (group = 1; group < HE_NUM_GROUPS; ++group) {
1455  he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1456  he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1457  he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1458  he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1459  G0_RBPS_BS + (group * 32));
1460 
1461  he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1462  he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1463  he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1464  G0_RBPL_QI + (group * 32));
1465  he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1466 
1467  he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1468  he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1469  he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1470  G0_RBRQ_Q + (group * 16));
1471  he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1472 
1473  he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1474  he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1475  he_writel(he_dev, TBRQ_THRESH(0x1),
1476  G0_TBRQ_THRESH + (group * 16));
1477  he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1478  }
1479 
1480  /* host status page */
1481 
1482  he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1483  sizeof(struct he_hsp), &he_dev->hsp_phys);
1484  if (he_dev->hsp == NULL) {
1485  hprintk("failed to allocate host status page\n");
1486  return -ENOMEM;
1487  }
1488  memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1489  he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1490 
1491  /* initialize framer */
1492 
1493 #ifdef CONFIG_ATM_HE_USE_SUNI
1494  if (he_isMM(he_dev))
1495  suni_init(he_dev->atm_dev);
1496  if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1497  he_dev->atm_dev->phy->start(he_dev->atm_dev);
1498 #endif /* CONFIG_ATM_HE_USE_SUNI */
1499 
1500  if (sdh) {
1501  /* this really should be in suni.c but for now... */
1502  int val;
1503 
1504  val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1506  he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1507  he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1508  }
1509 
1510  /* 5.1.12 enable transmit and receive */
1511 
1512  reg = he_readl_mbox(he_dev, CS_ERCTL0);
1513  reg |= TX_ENABLE|ER_ENABLE;
1514  he_writel_mbox(he_dev, reg, CS_ERCTL0);
1515 
1516  reg = he_readl(he_dev, RC_CONFIG);
1517  reg |= RX_ENABLE;
1518  he_writel(he_dev, reg, RC_CONFIG);
1519 
1520  for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1521  he_dev->cs_stper[i].inuse = 0;
1522  he_dev->cs_stper[i].pcr = -1;
1523  }
1524  he_dev->total_bw = 0;
1525 
1526 
1527  /* atm linux initialization */
1528 
1529  he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1530  he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1531 
1532  he_dev->irq_peak = 0;
1533  he_dev->rbrq_peak = 0;
1534  he_dev->rbpl_peak = 0;
1535  he_dev->tbrq_peak = 0;
1536 
1537  HPRINTK("hell bent for leather!\n");
1538 
1539  return 0;
1540 }
1541 
1542 static void
1543 he_stop(struct he_dev *he_dev)
1544 {
1545  struct he_buff *heb, *next;
1546  struct pci_dev *pci_dev;
1547  u32 gen_cntl_0, reg;
1548  u16 command;
1549 
1550  pci_dev = he_dev->pci_dev;
1551 
1552  /* disable interrupts */
1553 
1554  if (he_dev->membase) {
1555  pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1556  gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1557  pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1558 
1559  tasklet_disable(&he_dev->tasklet);
1560 
1561  /* disable recv and transmit */
1562 
1563  reg = he_readl_mbox(he_dev, CS_ERCTL0);
1564  reg &= ~(TX_ENABLE|ER_ENABLE);
1565  he_writel_mbox(he_dev, reg, CS_ERCTL0);
1566 
1567  reg = he_readl(he_dev, RC_CONFIG);
1568  reg &= ~(RX_ENABLE);
1569  he_writel(he_dev, reg, RC_CONFIG);
1570  }
1571 
1572 #ifdef CONFIG_ATM_HE_USE_SUNI
1573  if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1574  he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1575 #endif /* CONFIG_ATM_HE_USE_SUNI */
1576 
1577  if (he_dev->irq)
1578  free_irq(he_dev->irq, he_dev);
1579 
1580  if (he_dev->irq_base)
1581  pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1582  * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1583 
1584  if (he_dev->hsp)
1585  pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1586  he_dev->hsp, he_dev->hsp_phys);
1587 
1588  if (he_dev->rbpl_base) {
1589  list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1590  pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1591 
1592  pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1593  * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1594  }
1595 
1596  kfree(he_dev->rbpl_virt);
1597  kfree(he_dev->rbpl_table);
1598 
1599  if (he_dev->rbpl_pool)
1600  pci_pool_destroy(he_dev->rbpl_pool);
1601 
1602  if (he_dev->rbrq_base)
1604  he_dev->rbrq_base, he_dev->rbrq_phys);
1605 
1606  if (he_dev->tbrq_base)
1608  he_dev->tbrq_base, he_dev->tbrq_phys);
1609 
1610  if (he_dev->tpdrq_base)
1611  pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1612  he_dev->tpdrq_base, he_dev->tpdrq_phys);
1613 
1614  if (he_dev->tpd_pool)
1615  pci_pool_destroy(he_dev->tpd_pool);
1616 
1617  if (he_dev->pci_dev) {
1618  pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1619  command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1620  pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1621  }
1622 
1623  if (he_dev->membase)
1624  iounmap(he_dev->membase);
1625 }
1626 
1627 static struct he_tpd *
1628 __alloc_tpd(struct he_dev *he_dev)
1629 {
1630  struct he_tpd *tpd;
1632 
1633  tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1634  if (tpd == NULL)
1635  return NULL;
1636 
1637  tpd->status = TPD_ADDR(mapping);
1638  tpd->reserved = 0;
1639  tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1640  tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1641  tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1642 
1643  return tpd;
1644 }
1645 
1646 #define AAL5_LEN(buf,len) \
1647  ((((unsigned char *)(buf))[(len)-6] << 8) | \
1648  (((unsigned char *)(buf))[(len)-5]))
1649 
1650 /* 2.10.1.2 receive
1651  *
1652  * aal5 packets can optionally return the tcp checksum in the lower
1653  * 16 bits of the crc (RSR0_TCP_CKSUM)
1654  */
1655 
1656 #define TCP_CKSUM(buf,len) \
1657  ((((unsigned char *)(buf))[(len)-2] << 8) | \
1658  (((unsigned char *)(buf))[(len-1)]))
1659 
1660 static int
1661 he_service_rbrq(struct he_dev *he_dev, int group)
1662 {
1663  struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1664  ((unsigned long)he_dev->rbrq_base |
1665  he_dev->hsp->group[group].rbrq_tail);
1666  unsigned cid, lastcid = -1;
1667  struct sk_buff *skb;
1668  struct atm_vcc *vcc = NULL;
1669  struct he_vcc *he_vcc;
1670  struct he_buff *heb, *next;
1671  int i;
1672  int pdus_assembled = 0;
1673  int updated = 0;
1674 
1676  while (he_dev->rbrq_head != rbrq_tail) {
1677  ++updated;
1678 
1679  HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1680  he_dev->rbrq_head, group,
1681  RBRQ_ADDR(he_dev->rbrq_head),
1682  RBRQ_BUFLEN(he_dev->rbrq_head),
1683  RBRQ_CID(he_dev->rbrq_head),
1684  RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1685  RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1686  RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1687  RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1688  RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1689  RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1690 
1691  i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1692  heb = he_dev->rbpl_virt[i];
1693 
1694  cid = RBRQ_CID(he_dev->rbrq_head);
1695  if (cid != lastcid)
1696  vcc = __find_vcc(he_dev, cid);
1697  lastcid = cid;
1698 
1699  if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1700  hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1701  if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1702  clear_bit(i, he_dev->rbpl_table);
1703  list_del(&heb->entry);
1704  pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1705  }
1706 
1707  goto next_rbrq_entry;
1708  }
1709 
1710  if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1711  hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1712  atomic_inc(&vcc->stats->rx_drop);
1713  goto return_host_buffers;
1714  }
1715 
1716  heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1717  clear_bit(i, he_dev->rbpl_table);
1718  list_move_tail(&heb->entry, &he_vcc->buffers);
1719  he_vcc->pdu_len += heb->len;
1720 
1721  if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1722  lastcid = -1;
1723  HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1724  wake_up(&he_vcc->rx_waitq);
1725  goto return_host_buffers;
1726  }
1727 
1728  if (!RBRQ_END_PDU(he_dev->rbrq_head))
1729  goto next_rbrq_entry;
1730 
1731  if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1732  || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1733  HPRINTK("%s%s (%d.%d)\n",
1734  RBRQ_CRC_ERR(he_dev->rbrq_head)
1735  ? "CRC_ERR " : "",
1736  RBRQ_LEN_ERR(he_dev->rbrq_head)
1737  ? "LEN_ERR" : "",
1738  vcc->vpi, vcc->vci);
1739  atomic_inc(&vcc->stats->rx_err);
1740  goto return_host_buffers;
1741  }
1742 
1743  skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1744  GFP_ATOMIC);
1745  if (!skb) {
1746  HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1747  goto return_host_buffers;
1748  }
1749 
1750  if (rx_skb_reserve > 0)
1751  skb_reserve(skb, rx_skb_reserve);
1752 
1753  __net_timestamp(skb);
1754 
1755  list_for_each_entry(heb, &he_vcc->buffers, entry)
1756  memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1757 
1758  switch (vcc->qos.aal) {
1759  case ATM_AAL0:
1760  /* 2.10.1.5 raw cell receive */
1761  skb->len = ATM_AAL0_SDU;
1762  skb_set_tail_pointer(skb, skb->len);
1763  break;
1764  case ATM_AAL5:
1765  /* 2.10.1.2 aal5 receive */
1766 
1767  skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1768  skb_set_tail_pointer(skb, skb->len);
1769 #ifdef USE_CHECKSUM_HW
1770  if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1772  skb->csum = TCP_CKSUM(skb->data,
1773  he_vcc->pdu_len);
1774  }
1775 #endif
1776  break;
1777  }
1778 
1779 #ifdef should_never_happen
1780  if (skb->len > vcc->qos.rxtp.max_sdu)
1781  hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1782 #endif
1783 
1784 #ifdef notdef
1785  ATM_SKB(skb)->vcc = vcc;
1786 #endif
1787  spin_unlock(&he_dev->global_lock);
1788  vcc->push(vcc, skb);
1789  spin_lock(&he_dev->global_lock);
1790 
1791  atomic_inc(&vcc->stats->rx);
1792 
1793 return_host_buffers:
1794  ++pdus_assembled;
1795 
1796  list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1797  pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1798  INIT_LIST_HEAD(&he_vcc->buffers);
1799  he_vcc->pdu_len = 0;
1800 
1801 next_rbrq_entry:
1802  he_dev->rbrq_head = (struct he_rbrq *)
1803  ((unsigned long) he_dev->rbrq_base |
1804  RBRQ_MASK(he_dev->rbrq_head + 1));
1805 
1806  }
1808 
1809  if (updated) {
1810  if (updated > he_dev->rbrq_peak)
1811  he_dev->rbrq_peak = updated;
1812 
1813  he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1814  G0_RBRQ_H + (group * 16));
1815  }
1816 
1817  return pdus_assembled;
1818 }
1819 
1820 static void
1821 he_service_tbrq(struct he_dev *he_dev, int group)
1822 {
1823  struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1824  ((unsigned long)he_dev->tbrq_base |
1825  he_dev->hsp->group[group].tbrq_tail);
1826  struct he_tpd *tpd;
1827  int slot, updated = 0;
1828  struct he_tpd *__tpd;
1829 
1830  /* 2.1.6 transmit buffer return queue */
1831 
1832  while (he_dev->tbrq_head != tbrq_tail) {
1833  ++updated;
1834 
1835  HPRINTK("tbrq%d 0x%x%s%s\n",
1836  group,
1837  TBRQ_TPD(he_dev->tbrq_head),
1838  TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1839  TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1840  tpd = NULL;
1841  list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1842  if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1843  tpd = __tpd;
1844  list_del(&__tpd->entry);
1845  break;
1846  }
1847  }
1848 
1849  if (tpd == NULL) {
1850  hprintk("unable to locate tpd for dma buffer %x\n",
1851  TBRQ_TPD(he_dev->tbrq_head));
1852  goto next_tbrq_entry;
1853  }
1854 
1855  if (TBRQ_EOS(he_dev->tbrq_head)) {
1856  HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1857  he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1858  if (tpd->vcc)
1859  wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1860 
1861  goto next_tbrq_entry;
1862  }
1863 
1864  for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1865  if (tpd->iovec[slot].addr)
1866  pci_unmap_single(he_dev->pci_dev,
1867  tpd->iovec[slot].addr,
1868  tpd->iovec[slot].len & TPD_LEN_MASK,
1870  if (tpd->iovec[slot].len & TPD_LST)
1871  break;
1872 
1873  }
1874 
1875  if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1876  if (tpd->vcc && tpd->vcc->pop)
1877  tpd->vcc->pop(tpd->vcc, tpd->skb);
1878  else
1879  dev_kfree_skb_any(tpd->skb);
1880  }
1881 
1882 next_tbrq_entry:
1883  if (tpd)
1884  pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1885  he_dev->tbrq_head = (struct he_tbrq *)
1886  ((unsigned long) he_dev->tbrq_base |
1887  TBRQ_MASK(he_dev->tbrq_head + 1));
1888  }
1889 
1890  if (updated) {
1891  if (updated > he_dev->tbrq_peak)
1892  he_dev->tbrq_peak = updated;
1893 
1894  he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1895  G0_TBRQ_H + (group * 16));
1896  }
1897 }
1898 
1899 static void
1900 he_service_rbpl(struct he_dev *he_dev, int group)
1901 {
1902  struct he_rbp *new_tail;
1903  struct he_rbp *rbpl_head;
1904  struct he_buff *heb;
1906  int i;
1907  int moved = 0;
1908 
1909  rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1910  RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1911 
1912  for (;;) {
1913  new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1914  RBPL_MASK(he_dev->rbpl_tail+1));
1915 
1916  /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1917  if (new_tail == rbpl_head)
1918  break;
1919 
1920  i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1921  if (i > (RBPL_TABLE_SIZE - 1)) {
1923  if (i > (RBPL_TABLE_SIZE - 1))
1924  break;
1925  }
1926  he_dev->rbpl_hint = i + 1;
1927 
1928  heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1929  if (!heb)
1930  break;
1931  heb->mapping = mapping;
1932  list_add(&heb->entry, &he_dev->rbpl_outstanding);
1933  he_dev->rbpl_virt[i] = heb;
1934  set_bit(i, he_dev->rbpl_table);
1935  new_tail->idx = i << RBP_IDX_OFFSET;
1936  new_tail->phys = mapping + offsetof(struct he_buff, data);
1937 
1938  he_dev->rbpl_tail = new_tail;
1939  ++moved;
1940  }
1941 
1942  if (moved)
1943  he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1944 }
1945 
1946 static void
1947 he_tasklet(unsigned long data)
1948 {
1949  unsigned long flags;
1950  struct he_dev *he_dev = (struct he_dev *) data;
1951  int group, type;
1952  int updated = 0;
1953 
1954  HPRINTK("tasklet (0x%lx)\n", data);
1955  spin_lock_irqsave(&he_dev->global_lock, flags);
1956 
1957  while (he_dev->irq_head != he_dev->irq_tail) {
1958  ++updated;
1959 
1960  type = ITYPE_TYPE(he_dev->irq_head->isw);
1961  group = ITYPE_GROUP(he_dev->irq_head->isw);
1962 
1963  switch (type) {
1964  case ITYPE_RBRQ_THRESH:
1965  HPRINTK("rbrq%d threshold\n", group);
1966  /* fall through */
1967  case ITYPE_RBRQ_TIMER:
1968  if (he_service_rbrq(he_dev, group))
1969  he_service_rbpl(he_dev, group);
1970  break;
1971  case ITYPE_TBRQ_THRESH:
1972  HPRINTK("tbrq%d threshold\n", group);
1973  /* fall through */
1974  case ITYPE_TPD_COMPLETE:
1975  he_service_tbrq(he_dev, group);
1976  break;
1977  case ITYPE_RBPL_THRESH:
1978  he_service_rbpl(he_dev, group);
1979  break;
1980  case ITYPE_RBPS_THRESH:
1981  /* shouldn't happen unless small buffers enabled */
1982  break;
1983  case ITYPE_PHY:
1984  HPRINTK("phy interrupt\n");
1985 #ifdef CONFIG_ATM_HE_USE_SUNI
1986  spin_unlock_irqrestore(&he_dev->global_lock, flags);
1987  if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1988  he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1989  spin_lock_irqsave(&he_dev->global_lock, flags);
1990 #endif
1991  break;
1992  case ITYPE_OTHER:
1993  switch (type|group) {
1994  case ITYPE_PARITY:
1995  hprintk("parity error\n");
1996  break;
1997  case ITYPE_ABORT:
1998  hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1999  break;
2000  }
2001  break;
2002  case ITYPE_TYPE(ITYPE_INVALID):
2003  /* see 8.1.1 -- check all queues */
2004 
2005  HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2006 
2007  he_service_rbrq(he_dev, 0);
2008  he_service_rbpl(he_dev, 0);
2009  he_service_tbrq(he_dev, 0);
2010  break;
2011  default:
2012  hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2013  }
2014 
2015  he_dev->irq_head->isw = ITYPE_INVALID;
2016 
2017  he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2018  }
2019 
2020  if (updated) {
2021  if (updated > he_dev->irq_peak)
2022  he_dev->irq_peak = updated;
2023 
2024  he_writel(he_dev,
2025  IRQ_SIZE(CONFIG_IRQ_SIZE) |
2027  IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2028  (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2029  }
2030  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2031 }
2032 
2033 static irqreturn_t
2034 he_irq_handler(int irq, void *dev_id)
2035 {
2036  unsigned long flags;
2037  struct he_dev *he_dev = (struct he_dev * )dev_id;
2038  int handled = 0;
2039 
2040  if (he_dev == NULL)
2041  return IRQ_NONE;
2042 
2043  spin_lock_irqsave(&he_dev->global_lock, flags);
2044 
2045  he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2046  (*he_dev->irq_tailoffset << 2));
2047 
2048  if (he_dev->irq_tail == he_dev->irq_head) {
2049  HPRINTK("tailoffset not updated?\n");
2050  he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2051  ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2052  (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2053  }
2054 
2055 #ifdef DEBUG
2056  if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2057  hprintk("spurious (or shared) interrupt?\n");
2058 #endif
2059 
2060  if (he_dev->irq_head != he_dev->irq_tail) {
2061  handled = 1;
2062  tasklet_schedule(&he_dev->tasklet);
2063  he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2064  (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2065  }
2066  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2067  return IRQ_RETVAL(handled);
2068 
2069 }
2070 
2071 static __inline__ void
2072 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2073 {
2074  struct he_tpdrq *new_tail;
2075 
2076  HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2077  tpd, cid, he_dev->tpdrq_tail);
2078 
2079  /* new_tail = he_dev->tpdrq_tail; */
2080  new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2081  TPDRQ_MASK(he_dev->tpdrq_tail+1));
2082 
2083  /*
2084  * check to see if we are about to set the tail == head
2085  * if true, update the head pointer from the adapter
2086  * to see if this is really the case (reading the queue
2087  * head for every enqueue would be unnecessarily slow)
2088  */
2089 
2090  if (new_tail == he_dev->tpdrq_head) {
2091  he_dev->tpdrq_head = (struct he_tpdrq *)
2092  (((unsigned long)he_dev->tpdrq_base) |
2093  TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2094 
2095  if (new_tail == he_dev->tpdrq_head) {
2096  int slot;
2097 
2098  hprintk("tpdrq full (cid 0x%x)\n", cid);
2099  /*
2100  * FIXME
2101  * push tpd onto a transmit backlog queue
2102  * after service_tbrq, service the backlog
2103  * for now, we just drop the pdu
2104  */
2105  for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2106  if (tpd->iovec[slot].addr)
2107  pci_unmap_single(he_dev->pci_dev,
2108  tpd->iovec[slot].addr,
2109  tpd->iovec[slot].len & TPD_LEN_MASK,
2111  }
2112  if (tpd->skb) {
2113  if (tpd->vcc->pop)
2114  tpd->vcc->pop(tpd->vcc, tpd->skb);
2115  else
2116  dev_kfree_skb_any(tpd->skb);
2117  atomic_inc(&tpd->vcc->stats->tx_err);
2118  }
2119  pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2120  return;
2121  }
2122  }
2123 
2124  /* 2.1.5 transmit packet descriptor ready queue */
2125  list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2126  he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2127  he_dev->tpdrq_tail->cid = cid;
2128  wmb();
2129 
2130  he_dev->tpdrq_tail = new_tail;
2131 
2132  he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2133  (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2134 }
2135 
2136 static int
2137 he_open(struct atm_vcc *vcc)
2138 {
2139  unsigned long flags;
2140  struct he_dev *he_dev = HE_DEV(vcc->dev);
2141  struct he_vcc *he_vcc;
2142  int err = 0;
2143  unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2144  short vpi = vcc->vpi;
2145  int vci = vcc->vci;
2146 
2147  if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2148  return 0;
2149 
2150  HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2151 
2152  set_bit(ATM_VF_ADDR, &vcc->flags);
2153 
2154  cid = he_mkcid(he_dev, vpi, vci);
2155 
2156  he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2157  if (he_vcc == NULL) {
2158  hprintk("unable to allocate he_vcc during open\n");
2159  return -ENOMEM;
2160  }
2161 
2162  INIT_LIST_HEAD(&he_vcc->buffers);
2163  he_vcc->pdu_len = 0;
2164  he_vcc->rc_index = -1;
2165 
2166  init_waitqueue_head(&he_vcc->rx_waitq);
2167  init_waitqueue_head(&he_vcc->tx_waitq);
2168 
2169  vcc->dev_data = he_vcc;
2170 
2171  if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2172  int pcr_goal;
2173 
2174  pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2175  if (pcr_goal == 0)
2176  pcr_goal = he_dev->atm_dev->link_rate;
2177  if (pcr_goal < 0) /* means round down, technically */
2178  pcr_goal = -pcr_goal;
2179 
2180  HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2181 
2182  switch (vcc->qos.aal) {
2183  case ATM_AAL5:
2184  tsr0_aal = TSR0_AAL5;
2185  tsr4 = TSR4_AAL5;
2186  break;
2187  case ATM_AAL0:
2188  tsr0_aal = TSR0_AAL0_SDU;
2189  tsr4 = TSR4_AAL0_SDU;
2190  break;
2191  default:
2192  err = -EINVAL;
2193  goto open_failed;
2194  }
2195 
2196  spin_lock_irqsave(&he_dev->global_lock, flags);
2197  tsr0 = he_readl_tsr0(he_dev, cid);
2198  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2199 
2200  if (TSR0_CONN_STATE(tsr0) != 0) {
2201  hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2202  err = -EBUSY;
2203  goto open_failed;
2204  }
2205 
2206  switch (vcc->qos.txtp.traffic_class) {
2207  case ATM_UBR:
2208  /* 2.3.3.1 open connection ubr */
2209 
2210  tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2212  break;
2213 
2214  case ATM_CBR:
2215  /* 2.3.3.2 open connection cbr */
2216 
2217  /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2218  if ((he_dev->total_bw + pcr_goal)
2219  > (he_dev->atm_dev->link_rate * 9 / 10))
2220  {
2221  err = -EBUSY;
2222  goto open_failed;
2223  }
2224 
2225  spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2226 
2227  /* find an unused cs_stper register */
2228  for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2229  if (he_dev->cs_stper[reg].inuse == 0 ||
2230  he_dev->cs_stper[reg].pcr == pcr_goal)
2231  break;
2232 
2233  if (reg == HE_NUM_CS_STPER) {
2234  err = -EBUSY;
2235  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2236  goto open_failed;
2237  }
2238 
2239  he_dev->total_bw += pcr_goal;
2240 
2241  he_vcc->rc_index = reg;
2242  ++he_dev->cs_stper[reg].inuse;
2243  he_dev->cs_stper[reg].pcr = pcr_goal;
2244 
2245  clock = he_is622(he_dev) ? 66667000 : 50000000;
2246  period = clock / pcr_goal;
2247 
2248  HPRINTK("rc_index = %d period = %d\n",
2249  reg, period);
2250 
2251  he_writel_mbox(he_dev, rate_to_atmf(period/2),
2252  CS_STPER0 + reg);
2253  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2254 
2255  tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2256  TSR0_RC_INDEX(reg);
2257 
2258  break;
2259  default:
2260  err = -EINVAL;
2261  goto open_failed;
2262  }
2263 
2264  spin_lock_irqsave(&he_dev->global_lock, flags);
2265 
2266  he_writel_tsr0(he_dev, tsr0, cid);
2267  he_writel_tsr4(he_dev, tsr4 | 1, cid);
2268  he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2269  TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2270  he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2271  he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2272 
2273  he_writel_tsr3(he_dev, 0x0, cid);
2274  he_writel_tsr5(he_dev, 0x0, cid);
2275  he_writel_tsr6(he_dev, 0x0, cid);
2276  he_writel_tsr7(he_dev, 0x0, cid);
2277  he_writel_tsr8(he_dev, 0x0, cid);
2278  he_writel_tsr10(he_dev, 0x0, cid);
2279  he_writel_tsr11(he_dev, 0x0, cid);
2280  he_writel_tsr12(he_dev, 0x0, cid);
2281  he_writel_tsr13(he_dev, 0x0, cid);
2282  he_writel_tsr14(he_dev, 0x0, cid);
2283  (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2284  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2285  }
2286 
2287  if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2288  unsigned aal;
2289 
2290  HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2291  &HE_VCC(vcc)->rx_waitq);
2292 
2293  switch (vcc->qos.aal) {
2294  case ATM_AAL5:
2295  aal = RSR0_AAL5;
2296  break;
2297  case ATM_AAL0:
2298  aal = RSR0_RAWCELL;
2299  break;
2300  default:
2301  err = -EINVAL;
2302  goto open_failed;
2303  }
2304 
2305  spin_lock_irqsave(&he_dev->global_lock, flags);
2306 
2307  rsr0 = he_readl_rsr0(he_dev, cid);
2308  if (rsr0 & RSR0_OPEN_CONN) {
2309  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2310 
2311  hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2312  err = -EBUSY;
2313  goto open_failed;
2314  }
2315 
2316  rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2317  rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2318  rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2320 
2321 #ifdef USE_CHECKSUM_HW
2322  if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2323  rsr0 |= RSR0_TCP_CKSUM;
2324 #endif
2325 
2326  he_writel_rsr4(he_dev, rsr4, cid);
2327  he_writel_rsr1(he_dev, rsr1, cid);
2328  /* 5.1.11 last parameter initialized should be
2329  the open/closed indication in rsr0 */
2330  he_writel_rsr0(he_dev,
2331  rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2332  (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2333 
2334  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2335  }
2336 
2337 open_failed:
2338 
2339  if (err) {
2340  kfree(he_vcc);
2341  clear_bit(ATM_VF_ADDR, &vcc->flags);
2342  }
2343  else
2344  set_bit(ATM_VF_READY, &vcc->flags);
2345 
2346  return err;
2347 }
2348 
2349 static void
2350 he_close(struct atm_vcc *vcc)
2351 {
2352  unsigned long flags;
2354  struct he_dev *he_dev = HE_DEV(vcc->dev);
2355  struct he_tpd *tpd;
2356  unsigned cid;
2357  struct he_vcc *he_vcc = HE_VCC(vcc);
2358 #define MAX_RETRY 30
2359  int retry = 0, sleep = 1, tx_inuse;
2360 
2361  HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2362 
2363  clear_bit(ATM_VF_READY, &vcc->flags);
2364  cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2365 
2366  if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2367  int timeout;
2368 
2369  HPRINTK("close rx cid 0x%x\n", cid);
2370 
2371  /* 2.7.2.2 close receive operation */
2372 
2373  /* wait for previous close (if any) to finish */
2374 
2375  spin_lock_irqsave(&he_dev->global_lock, flags);
2376  while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2377  HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2378  udelay(250);
2379  }
2380 
2382  add_wait_queue(&he_vcc->rx_waitq, &wait);
2383 
2384  he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2385  (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2386  he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2387  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2388 
2389  timeout = schedule_timeout(30*HZ);
2390 
2391  remove_wait_queue(&he_vcc->rx_waitq, &wait);
2393 
2394  if (timeout == 0)
2395  hprintk("close rx timeout cid 0x%x\n", cid);
2396 
2397  HPRINTK("close rx cid 0x%x complete\n", cid);
2398 
2399  }
2400 
2401  if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2402  volatile unsigned tsr4, tsr0;
2403  int timeout;
2404 
2405  HPRINTK("close tx cid 0x%x\n", cid);
2406 
2407  /* 2.1.2
2408  *
2409  * ... the host must first stop queueing packets to the TPDRQ
2410  * on the connection to be closed, then wait for all outstanding
2411  * packets to be transmitted and their buffers returned to the
2412  * TBRQ. When the last packet on the connection arrives in the
2413  * TBRQ, the host issues the close command to the adapter.
2414  */
2415 
2416  while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2417  (retry < MAX_RETRY)) {
2418  msleep(sleep);
2419  if (sleep < 250)
2420  sleep = sleep * 2;
2421 
2422  ++retry;
2423  }
2424 
2425  if (tx_inuse > 1)
2426  hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2427 
2428  /* 2.3.1.1 generic close operations with flush */
2429 
2430  spin_lock_irqsave(&he_dev->global_lock, flags);
2431  he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2432  /* also clears TSR4_SESSION_ENDED */
2433 
2434  switch (vcc->qos.txtp.traffic_class) {
2435  case ATM_UBR:
2436  he_writel_tsr1(he_dev,
2437  TSR1_MCR(rate_to_atmf(200000))
2438  | TSR1_PCR(0), cid);
2439  break;
2440  case ATM_CBR:
2441  he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2442  break;
2443  }
2444  (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2445 
2446  tpd = __alloc_tpd(he_dev);
2447  if (tpd == NULL) {
2448  hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2449  goto close_tx_incomplete;
2450  }
2451  tpd->status |= TPD_EOS | TPD_INT;
2452  tpd->skb = NULL;
2453  tpd->vcc = vcc;
2454  wmb();
2455 
2457  add_wait_queue(&he_vcc->tx_waitq, &wait);
2458  __enqueue_tpd(he_dev, tpd, cid);
2459  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2460 
2461  timeout = schedule_timeout(30*HZ);
2462 
2463  remove_wait_queue(&he_vcc->tx_waitq, &wait);
2465 
2466  spin_lock_irqsave(&he_dev->global_lock, flags);
2467 
2468  if (timeout == 0) {
2469  hprintk("close tx timeout cid 0x%x\n", cid);
2470  goto close_tx_incomplete;
2471  }
2472 
2473  while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2474  HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2475  udelay(250);
2476  }
2477 
2478  while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2479  HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2480  udelay(250);
2481  }
2482 
2483 close_tx_incomplete:
2484 
2485  if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2486  int reg = he_vcc->rc_index;
2487 
2488  HPRINTK("cs_stper reg = %d\n", reg);
2489 
2490  if (he_dev->cs_stper[reg].inuse == 0)
2491  hprintk("cs_stper[%d].inuse = 0!\n", reg);
2492  else
2493  --he_dev->cs_stper[reg].inuse;
2494 
2495  he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2496  }
2497  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2498 
2499  HPRINTK("close tx cid 0x%x complete\n", cid);
2500  }
2501 
2502  kfree(he_vcc);
2503 
2504  clear_bit(ATM_VF_ADDR, &vcc->flags);
2505 }
2506 
2507 static int
2508 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2509 {
2510  unsigned long flags;
2511  struct he_dev *he_dev = HE_DEV(vcc->dev);
2512  unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2513  struct he_tpd *tpd;
2514 #ifdef USE_SCATTERGATHER
2515  int i, slot = 0;
2516 #endif
2517 
2518 #define HE_TPD_BUFSIZE 0xffff
2519 
2520  HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2521 
2522  if ((skb->len > HE_TPD_BUFSIZE) ||
2523  ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2524  hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2525  if (vcc->pop)
2526  vcc->pop(vcc, skb);
2527  else
2528  dev_kfree_skb_any(skb);
2529  atomic_inc(&vcc->stats->tx_err);
2530  return -EINVAL;
2531  }
2532 
2533 #ifndef USE_SCATTERGATHER
2534  if (skb_shinfo(skb)->nr_frags) {
2535  hprintk("no scatter/gather support\n");
2536  if (vcc->pop)
2537  vcc->pop(vcc, skb);
2538  else
2539  dev_kfree_skb_any(skb);
2540  atomic_inc(&vcc->stats->tx_err);
2541  return -EINVAL;
2542  }
2543 #endif
2544  spin_lock_irqsave(&he_dev->global_lock, flags);
2545 
2546  tpd = __alloc_tpd(he_dev);
2547  if (tpd == NULL) {
2548  if (vcc->pop)
2549  vcc->pop(vcc, skb);
2550  else
2551  dev_kfree_skb_any(skb);
2552  atomic_inc(&vcc->stats->tx_err);
2553  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2554  return -ENOMEM;
2555  }
2556 
2557  if (vcc->qos.aal == ATM_AAL5)
2558  tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2559  else {
2560  char *pti_clp = (void *) (skb->data + 3);
2561  int clp, pti;
2562 
2563  pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2564  clp = (*pti_clp & ATM_HDR_CLP);
2565  tpd->status |= TPD_CELLTYPE(pti);
2566  if (clp)
2567  tpd->status |= TPD_CLP;
2568 
2570  }
2571 
2572 #ifdef USE_SCATTERGATHER
2573  tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2574  skb_headlen(skb), PCI_DMA_TODEVICE);
2575  tpd->iovec[slot].len = skb_headlen(skb);
2576  ++slot;
2577 
2578  for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2579  skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2580 
2581  if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2582  tpd->vcc = vcc;
2583  tpd->skb = NULL; /* not the last fragment
2584  so dont ->push() yet */
2585  wmb();
2586 
2587  __enqueue_tpd(he_dev, tpd, cid);
2588  tpd = __alloc_tpd(he_dev);
2589  if (tpd == NULL) {
2590  if (vcc->pop)
2591  vcc->pop(vcc, skb);
2592  else
2593  dev_kfree_skb_any(skb);
2594  atomic_inc(&vcc->stats->tx_err);
2595  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2596  return -ENOMEM;
2597  }
2598  tpd->status |= TPD_USERCELL;
2599  slot = 0;
2600  }
2601 
2602  tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2603  (void *) page_address(frag->page) + frag->page_offset,
2604  frag->size, PCI_DMA_TODEVICE);
2605  tpd->iovec[slot].len = frag->size;
2606  ++slot;
2607 
2608  }
2609 
2610  tpd->iovec[slot - 1].len |= TPD_LST;
2611 #else
2612  tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2613  tpd->length0 = skb->len | TPD_LST;
2614 #endif
2615  tpd->status |= TPD_INT;
2616 
2617  tpd->vcc = vcc;
2618  tpd->skb = skb;
2619  wmb();
2620  ATM_SKB(skb)->vcc = vcc;
2621 
2622  __enqueue_tpd(he_dev, tpd, cid);
2623  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2624 
2625  atomic_inc(&vcc->stats->tx);
2626 
2627  return 0;
2628 }
2629 
2630 static int
2631 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2632 {
2633  unsigned long flags;
2634  struct he_dev *he_dev = HE_DEV(atm_dev);
2635  struct he_ioctl_reg reg;
2636  int err = 0;
2637 
2638  switch (cmd) {
2639  case HE_GET_REG:
2640  if (!capable(CAP_NET_ADMIN))
2641  return -EPERM;
2642 
2643  if (copy_from_user(&reg, arg,
2644  sizeof(struct he_ioctl_reg)))
2645  return -EFAULT;
2646 
2647  spin_lock_irqsave(&he_dev->global_lock, flags);
2648  switch (reg.type) {
2649  case HE_REGTYPE_PCI:
2650  if (reg.addr >= HE_REGMAP_SIZE) {
2651  err = -EINVAL;
2652  break;
2653  }
2654 
2655  reg.val = he_readl(he_dev, reg.addr);
2656  break;
2657  case HE_REGTYPE_RCM:
2658  reg.val =
2659  he_readl_rcm(he_dev, reg.addr);
2660  break;
2661  case HE_REGTYPE_TCM:
2662  reg.val =
2663  he_readl_tcm(he_dev, reg.addr);
2664  break;
2665  case HE_REGTYPE_MBOX:
2666  reg.val =
2667  he_readl_mbox(he_dev, reg.addr);
2668  break;
2669  default:
2670  err = -EINVAL;
2671  break;
2672  }
2673  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2674  if (err == 0)
2675  if (copy_to_user(arg, &reg,
2676  sizeof(struct he_ioctl_reg)))
2677  return -EFAULT;
2678  break;
2679  default:
2680 #ifdef CONFIG_ATM_HE_USE_SUNI
2681  if (atm_dev->phy && atm_dev->phy->ioctl)
2682  err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2683 #else /* CONFIG_ATM_HE_USE_SUNI */
2684  err = -EINVAL;
2685 #endif /* CONFIG_ATM_HE_USE_SUNI */
2686  break;
2687  }
2688 
2689  return err;
2690 }
2691 
2692 static void
2693 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2694 {
2695  unsigned long flags;
2696  struct he_dev *he_dev = HE_DEV(atm_dev);
2697 
2698  HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2699 
2700  spin_lock_irqsave(&he_dev->global_lock, flags);
2701  he_writel(he_dev, val, FRAMER + (addr*4));
2702  (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2703  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2704 }
2705 
2706 
2707 static unsigned char
2708 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2709 {
2710  unsigned long flags;
2711  struct he_dev *he_dev = HE_DEV(atm_dev);
2712  unsigned reg;
2713 
2714  spin_lock_irqsave(&he_dev->global_lock, flags);
2715  reg = he_readl(he_dev, FRAMER + (addr*4));
2716  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2717 
2718  HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2719  return reg;
2720 }
2721 
2722 static int
2723 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2724 {
2725  unsigned long flags;
2726  struct he_dev *he_dev = HE_DEV(dev);
2727  int left, i;
2728 #ifdef notdef
2729  struct he_rbrq *rbrq_tail;
2730  struct he_tpdrq *tpdrq_head;
2731  int rbpl_head, rbpl_tail;
2732 #endif
2733  static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2734 
2735 
2736  left = *pos;
2737  if (!left--)
2738  return sprintf(page, "ATM he driver\n");
2739 
2740  if (!left--)
2741  return sprintf(page, "%s%s\n\n",
2742  he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2743 
2744  if (!left--)
2745  return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2746 
2747  spin_lock_irqsave(&he_dev->global_lock, flags);
2748  mcc += he_readl(he_dev, MCC);
2749  oec += he_readl(he_dev, OEC);
2750  dcc += he_readl(he_dev, DCC);
2751  cec += he_readl(he_dev, CEC);
2752  spin_unlock_irqrestore(&he_dev->global_lock, flags);
2753 
2754  if (!left--)
2755  return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2756  mcc, oec, dcc, cec);
2757 
2758  if (!left--)
2759  return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2760  CONFIG_IRQ_SIZE, he_dev->irq_peak);
2761 
2762  if (!left--)
2763  return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2765 
2766  if (!left--)
2767  return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2768  CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2769 
2770  if (!left--)
2771  return sprintf(page, "tbrq_size = %d peak = %d\n",
2772  CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2773 
2774 
2775 #ifdef notdef
2776  rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2777  rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2778 
2779  inuse = rbpl_head - rbpl_tail;
2780  if (inuse < 0)
2781  inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2782  inuse /= sizeof(struct he_rbp);
2783 
2784  if (!left--)
2785  return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2786  CONFIG_RBPL_SIZE, inuse);
2787 #endif
2788 
2789  if (!left--)
2790  return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2791 
2792  for (i = 0; i < HE_NUM_CS_STPER; ++i)
2793  if (!left--)
2794  return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2795  he_dev->cs_stper[i].pcr,
2796  he_dev->cs_stper[i].inuse);
2797 
2798  if (!left--)
2799  return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2800  he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2801 
2802  return 0;
2803 }
2804 
2805 /* eeprom routines -- see 4.7 */
2806 
2807 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2808 {
2809  u32 val = 0, tmp_read = 0;
2810  int i, j = 0;
2811  u8 byte_read = 0;
2812 
2813  val = readl(he_dev->membase + HOST_CNTL);
2814  val &= 0xFFFFE0FF;
2815 
2816  /* Turn on write enable */
2817  val |= 0x800;
2818  he_writel(he_dev, val, HOST_CNTL);
2819 
2820  /* Send READ instruction */
2821  for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2822  he_writel(he_dev, val | readtab[i], HOST_CNTL);
2824  }
2825 
2826  /* Next, we need to send the byte address to read from */
2827  for (i = 7; i >= 0; i--) {
2828  he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2830  he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2832  }
2833 
2834  j = 0;
2835 
2836  val &= 0xFFFFF7FF; /* Turn off write enable */
2837  he_writel(he_dev, val, HOST_CNTL);
2838 
2839  /* Now, we can read data from the EEPROM by clocking it in */
2840  for (i = 7; i >= 0; i--) {
2841  he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2843  tmp_read = he_readl(he_dev, HOST_CNTL);
2844  byte_read |= (unsigned char)
2845  ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2846  he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2848  }
2849 
2850  he_writel(he_dev, val | ID_CS, HOST_CNTL);
2852 
2853  return byte_read;
2854 }
2855 
2856 MODULE_LICENSE("GPL");
2857 MODULE_AUTHOR("chas williams <[email protected]>");
2858 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2859 module_param(disable64, bool, 0);
2860 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2861 module_param(nvpibits, short, 0);
2862 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2863 module_param(nvcibits, short, 0);
2864 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2865 module_param(rx_skb_reserve, short, 0);
2866 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2867 module_param(irq_coalesce, bool, 0);
2868 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2869 module_param(sdh, bool, 0);
2870 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2871 
2872 static struct pci_device_id he_pci_tbl[] = {
2873  { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2874  { 0, }
2875 };
2876 
2877 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2878 
2879 static struct pci_driver he_driver = {
2880  .name = "he",
2881  .probe = he_init_one,
2882  .remove = __devexit_p(he_remove_one),
2883  .id_table = he_pci_tbl,
2884 };
2885 
2886 static int __init he_init(void)
2887 {
2888  return pci_register_driver(&he_driver);
2889 }
2890 
2891 static void __exit he_cleanup(void)
2892 {
2893  pci_unregister_driver(&he_driver);
2894 }
2895 
2896 module_init(he_init);
2897 module_exit(he_cleanup);