26 #include <linux/kernel.h>
27 #include <linux/slab.h>
29 #include <linux/capability.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
41 #include <asm/string.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
52 #include <asm/idprom.h>
53 #include <asm/openprom.h>
54 #include <asm/oplib.h>
55 #include <asm/pgtable.h>
58 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET)
59 #define FORE200E_USE_TASKLET
63 #define FORE200E_BSQ_DEBUG
67 #define FORE200E_52BYTE_AAL0_SDU
73 #define FORE200E_VERSION "0.3e"
75 #define FORE200E "fore200e: "
78 #define CONFIG_ATM_FORE200E_DEBUG 1
80 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
81 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
82 printk(FORE200E format, ##args); } while (0)
84 #define DPRINTK(level, format, args...) do {} while (0)
88 #define FORE200E_ALIGN(addr, alignment) \
89 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
91 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
93 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
95 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
98 #define ASSERT(expr) if (!(expr)) { \
99 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
100 __func__, __LINE__, #expr); \
101 panic(FORE200E "%s", __func__); \
104 #define ASSERT(expr) do {} while (0)
109 static const struct fore200e_bus fore200e_bus[];
114 MODULE_AUTHOR(
"Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
119 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
120 { BUFFER_S1_NBR, BUFFER_L1_NBR },
121 { BUFFER_S2_NBR, BUFFER_L2_NBR }
124 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
125 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
126 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
130 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
131 static const char* fore200e_traffic_class[] = {
"NONE",
"UBR",
"CBR",
"VBR",
"ABR",
"ANY" };
137 fore200e_fore2atm_aal(
enum fore200e_aal aal)
140 case FORE200E_AAL0:
return ATM_AAL0;
142 case FORE200E_AAL5:
return ATM_AAL5;
150 static enum fore200e_aal
151 fore200e_atm2fore_aal(
int aal)
154 case ATM_AAL0:
return FORE200E_AAL0;
158 case ATM_AAL5:
return FORE200E_AAL5;
166 fore200e_irq_itoa(
int irq)
182 if (alignment <=
sizeof(
int))
186 chunk->align_size =
size;
190 if (chunk->alloc_addr ==
NULL)
196 chunk->align_addr = chunk->alloc_addr +
offset;
198 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
207 fore200e_chunk_free(
struct fore200e* fore200e,
struct chunk* chunk)
209 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
211 kfree(chunk->alloc_addr);
216 fore200e_spin(
int msecs)
224 fore200e_poll(
struct fore200e* fore200e,
volatile u32*
addr,
u32 val,
int msecs)
238 printk(
FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
248 fore200e_io_poll(
struct fore200e* fore200e,
volatile u32 __iomem *addr,
u32 val,
int msecs)
254 if ((ok = (fore200e->bus->read(addr) ==
val)))
261 printk(
FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
262 fore200e->bus->read(addr),
val);
271 fore200e_free_rx_buf(
struct fore200e* fore200e)
273 int scheme, magn, nbr;
276 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
277 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
279 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) !=
NULL) {
281 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
283 struct chunk*
data = &buffer[ nbr ].
data;
285 if (data->alloc_addr !=
NULL)
286 fore200e_chunk_free(fore200e, data);
295 fore200e_uninit_bs_queue(
struct fore200e* fore200e)
299 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
300 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
302 struct chunk*
status = &fore200e->host_bsq[ scheme ][ magn ].status;
303 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
305 if (status->alloc_addr)
306 fore200e->bus->dma_chunk_free(fore200e, status);
308 if (rbd_block->alloc_addr)
309 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
316 fore200e_reset(
struct fore200e* fore200e,
int diag)
320 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
322 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
324 fore200e->bus->reset(fore200e);
327 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
336 fore200e->state = FORE200E_STATE_RESET;
344 fore200e_shutdown(
struct fore200e* fore200e)
347 fore200e->name, fore200e->phys_base,
348 fore200e_irq_itoa(fore200e->irq));
350 if (fore200e->state > FORE200E_STATE_RESET) {
352 fore200e_reset(fore200e, 0);
356 switch(fore200e->state) {
358 case FORE200E_STATE_COMPLETE:
359 kfree(fore200e->stats);
361 case FORE200E_STATE_IRQ:
362 free_irq(fore200e->irq, fore200e->atm_dev);
364 case FORE200E_STATE_ALLOC_BUF:
365 fore200e_free_rx_buf(fore200e);
367 case FORE200E_STATE_INIT_BSQ:
368 fore200e_uninit_bs_queue(fore200e);
370 case FORE200E_STATE_INIT_RXQ:
371 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
372 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
374 case FORE200E_STATE_INIT_TXQ:
375 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
376 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
378 case FORE200E_STATE_INIT_CMDQ:
379 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
381 case FORE200E_STATE_INITIALIZE:
384 case FORE200E_STATE_START_FW:
387 case FORE200E_STATE_RESET:
390 case FORE200E_STATE_MAP:
391 fore200e->bus->unmap(fore200e);
393 case FORE200E_STATE_CONFIGURE:
396 case FORE200E_STATE_REGISTER:
400 case FORE200E_STATE_BLANK:
417 static void fore200e_pca_write(
u32 val,
volatile u32 __iomem *addr)
426 fore200e_pca_dma_map(
struct fore200e* fore200e,
void* virt_addr,
int size,
int direction)
428 u32 dma_addr = pci_map_single((
struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
430 DPRINTK(3,
"PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
431 virt_addr, size, direction, dma_addr);
438 fore200e_pca_dma_unmap(
struct fore200e* fore200e,
u32 dma_addr,
int size,
int direction)
440 DPRINTK(3,
"PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
441 dma_addr, size, direction);
443 pci_unmap_single((
struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
448 fore200e_pca_dma_sync_for_cpu(
struct fore200e* fore200e,
u32 dma_addr,
int size,
int direction)
450 DPRINTK(3,
"PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
452 pci_dma_sync_single_for_cpu((
struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
456 fore200e_pca_dma_sync_for_device(
struct fore200e* fore200e,
u32 dma_addr,
int size,
int direction)
458 DPRINTK(3,
"PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
460 pci_dma_sync_single_for_device((
struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
468 fore200e_pca_dma_chunk_alloc(
struct fore200e* fore200e,
struct chunk* chunk,
469 int size,
int nbr,
int alignment)
472 chunk->alloc_size = size * nbr;
477 if ((chunk->alloc_addr ==
NULL) || (chunk->dma_addr == 0))
480 chunk->align_addr = chunk->alloc_addr;
489 fore200e_pca_dma_chunk_free(
struct fore200e* fore200e,
struct chunk* chunk)
499 fore200e_pca_irq_check(
struct fore200e* fore200e)
502 int irq_posted =
readl(fore200e->regs.pca.psr);
504 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
505 if (irq_posted && (
readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
506 DPRINTK(2,
"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
515 fore200e_pca_irq_ack(
struct fore200e* fore200e)
517 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
522 fore200e_pca_reset(
struct fore200e* fore200e)
524 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
526 writel(0, fore200e->regs.pca.hcr);
531 fore200e_pca_map(
struct fore200e* fore200e)
533 DPRINTK(2,
"device %s being mapped in memory\n", fore200e->name);
535 fore200e->virt_base =
ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
537 if (fore200e->virt_base ==
NULL) {
542 DPRINTK(1,
"device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
545 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
546 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
547 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
549 fore200e->state = FORE200E_STATE_MAP;
555 fore200e_pca_unmap(
struct fore200e* fore200e)
557 DPRINTK(2,
"device %s being unmapped from memory\n", fore200e->name);
559 if (fore200e->virt_base !=
NULL)
565 fore200e_pca_configure(
struct fore200e* fore200e)
570 DPRINTK(2,
"device %s being configured\n", fore200e->name);
572 if ((pci_dev->
irq == 0) || (pci_dev->
irq == 0xFF)) {
573 printk(
FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
577 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
579 master_ctrl = master_ctrl
580 #if defined(__BIG_ENDIAN)
582 | PCA200E_CTRL_CONVERT_ENDIAN
585 | PCA200E_CTRL_DIS_CACHE_RD
586 | PCA200E_CTRL_DIS_WRT_INVAL
587 | PCA200E_CTRL_ENA_CONT_REQ_MODE
588 | PCA200E_CTRL_2_CACHE_WRT_INVAL
590 | PCA200E_CTRL_LARGE_PCI_BURSTS;
592 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
600 fore200e->state = FORE200E_STATE_CONFIGURE;
606 fore200e_pca_prom_read(
struct fore200e* fore200e,
struct prom_data* prom)
608 struct host_cmdq* cmdq = &fore200e->host_cmdq;
609 struct host_cmdq_entry*
entry = &cmdq->host_entry[ cmdq->head ];
610 struct prom_opcode
opcode;
616 opcode.opcode = OPCODE_GET_PROM;
619 prom_dma = fore200e->bus->dma_map(fore200e, prom,
sizeof(
struct prom_data),
DMA_FROM_DEVICE);
621 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
625 fore200e->bus->write(*(
u32*)&
opcode, (
u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
627 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
631 fore200e->bus->dma_unmap(fore200e, prom_dma,
sizeof(
struct prom_data),
DMA_FROM_DEVICE);
634 printk(
FORE200E "unable to get PROM data from device %s\n", fore200e->name);
638 #if defined(__BIG_ENDIAN)
640 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
643 swap_here(&prom->mac_addr[0]);
644 swap_here(&prom->mac_addr[4]);
652 fore200e_pca_proc_read(
struct fore200e* fore200e,
char *
page)
654 struct pci_dev* pci_dev = (
struct pci_dev*)fore200e->bus_dev;
656 return sprintf(page,
" PCI bus/slot/function:\t%d/%d/%d\n",
670 static void fore200e_sba_write(
u32 val,
volatile u32 __iomem *addr)
675 static u32 fore200e_sba_dma_map(
struct fore200e *fore200e,
void* virt_addr,
int size,
int direction)
682 DPRINTK(3,
"SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
683 virt_addr, size, direction, dma_addr);
688 static void fore200e_sba_dma_unmap(
struct fore200e *fore200e,
u32 dma_addr,
int size,
int direction)
692 DPRINTK(3,
"SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
693 dma_addr, size, direction);
698 static void fore200e_sba_dma_sync_for_cpu(
struct fore200e *fore200e,
u32 dma_addr,
int size,
int direction)
702 DPRINTK(3,
"SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
707 static void fore200e_sba_dma_sync_for_device(
struct fore200e *fore200e,
u32 dma_addr,
int size,
int direction)
711 DPRINTK(3,
"SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
719 static int fore200e_sba_dma_chunk_alloc(
struct fore200e *fore200e,
struct chunk *chunk,
720 int size,
int nbr,
int alignment)
724 chunk->alloc_size = chunk->align_size = size * nbr;
730 if ((chunk->alloc_addr ==
NULL) || (chunk->dma_addr == 0))
733 chunk->align_addr = chunk->alloc_addr;
739 static void fore200e_sba_dma_chunk_free(
struct fore200e *fore200e,
struct chunk *chunk)
744 chunk->alloc_addr, chunk->dma_addr);
747 static void fore200e_sba_irq_enable(
struct fore200e *fore200e)
749 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
750 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
753 static int fore200e_sba_irq_check(
struct fore200e *fore200e)
755 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
758 static void fore200e_sba_irq_ack(
struct fore200e *fore200e)
760 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
761 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
764 static void fore200e_sba_reset(
struct fore200e *fore200e)
766 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
768 fore200e->bus->write(0, fore200e->regs.sba.hcr);
771 static int __init fore200e_sba_map(
struct fore200e *fore200e)
777 fore200e->regs.sba.hcr =
of_ioremap(&op->
resource[0], 0, SBA200E_HCR_LENGTH,
"SBA HCR");
778 fore200e->regs.sba.bsr =
of_ioremap(&op->
resource[1], 0, SBA200E_BSR_LENGTH,
"SBA BSR");
779 fore200e->regs.sba.isr =
of_ioremap(&op->
resource[2], 0, SBA200E_ISR_LENGTH,
"SBA ISR");
782 if (!fore200e->virt_base) {
783 printk(
FORE200E "unable to map RAM of device %s\n", fore200e->name);
787 DPRINTK(1,
"device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
789 fore200e->bus->write(0x02, fore200e->regs.sba.isr);
794 if (sbus_can_dma_64bit())
797 fore200e->state = FORE200E_STATE_MAP;
801 static void fore200e_sba_unmap(
struct fore200e *fore200e)
811 static int __init fore200e_sba_configure(
struct fore200e *fore200e)
813 fore200e->state = FORE200E_STATE_CONFIGURE;
817 static int __init fore200e_sba_prom_read(
struct fore200e *fore200e,
struct prom_data *prom)
826 memcpy(&prom->mac_addr[4], prop, 4);
831 memcpy(&prom->mac_addr[2], prop, 4);
841 static int fore200e_sba_proc_read(
struct fore200e *fore200e,
char *page)
848 return sprintf(page,
" SBUS slot/device:\t\t%d/'%s'\n",
849 (regs ? regs->
which_io : 0), op->
dev.of_node->name);
855 fore200e_tx_irq(
struct fore200e* fore200e)
857 struct host_txq* txq = &fore200e->host_txq;
858 struct host_txq_entry*
entry;
860 struct fore200e_vc_map*
vc_map;
862 if (fore200e->host_txq.txing == 0)
867 entry = &txq->host_entry[ txq->tail ];
869 if ((*entry->status & STATUS_COMPLETE) == 0) {
873 DPRINTK(3,
"TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
874 entry, txq->tail, entry->vc_map, entry->skb);
880 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
883 vc_map = entry->vc_map;
886 if ((vc_map->vcc ==
NULL) ||
889 DPRINTK(1,
"no ready vcc found for PDU sent on device %d\n",
890 fore200e->atm_dev->number);
898 if (vc_map->incarn != entry->incarn) {
912 DPRINTK(1,
"vcc closed-then-re-opened; dropping PDU sent on device %d\n",
913 fore200e->atm_dev->number);
923 vcc->
pop(vcc, entry->skb);
930 if (
atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
944 fore200e->host_txq.txing--;
951 #ifdef FORE200E_BSQ_DEBUG
952 int bsq_audit(
int where,
struct host_bsq* bsq,
int scheme,
int magn)
957 buffer = bsq->freebuf;
960 if (buffer->supplied) {
961 printk(
FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
962 where, scheme, magn, buffer->index);
965 if (buffer->magn != magn) {
966 printk(
FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
967 where, scheme, magn, buffer->index, buffer->magn);
970 if (buffer->scheme != scheme) {
971 printk(
FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
972 where, scheme, magn, buffer->index, buffer->scheme);
975 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
976 printk(
FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
977 where, scheme, magn, buffer->index);
981 buffer = buffer->
next;
984 if (count != bsq->freebuf_count) {
985 printk(
FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
986 where, scheme, magn, count, bsq->freebuf_count);
994 fore200e_supply(
struct fore200e* fore200e)
998 struct host_bsq* bsq;
999 struct host_bsq_entry*
entry;
1002 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1003 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1005 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1007 #ifdef FORE200E_BSQ_DEBUG
1008 bsq_audit(1, bsq, scheme, magn);
1010 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1012 DPRINTK(2,
"supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1013 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1015 entry = &bsq->host_entry[ bsq->head ];
1017 for (i = 0; i < RBD_BLK_SIZE; i++) {
1020 buffer = bsq->freebuf;
1022 printk(
FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1023 scheme, magn, bsq->freebuf_count);
1026 bsq->freebuf = buffer->
next;
1028 #ifdef FORE200E_BSQ_DEBUG
1029 if (buffer->supplied)
1031 scheme, magn, buffer->index);
1032 buffer->supplied = 1;
1034 entry->rbd_block->rbd[
i ].buffer_haddr = buffer->
data.dma_addr;
1035 entry->rbd_block->rbd[
i ].handle = FORE200E_BUF2HDL(buffer);
1041 bsq->freebuf_count -= RBD_BLK_SIZE;
1044 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1052 fore200e_push_rpd(
struct fore200e* fore200e,
struct atm_vcc* vcc,
struct rpd* rpd)
1056 struct fore200e_vcc* fore200e_vcc;
1058 #ifdef FORE200E_52BYTE_AAL0_SDU
1059 u32 cell_header = 0;
1064 fore200e_vcc = FORE200E_VCC(vcc);
1067 #ifdef FORE200E_52BYTE_AAL0_SDU
1074 rpd->atm_header.clp;
1080 for (i = 0; i < rpd->nseg; i++)
1081 pdu_len += rpd->rsd[ i ].length;
1085 DPRINTK(2,
"unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1091 __net_timestamp(skb);
1093 #ifdef FORE200E_52BYTE_AAL0_SDU
1100 for (i = 0; i < rpd->nseg; i++) {
1103 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1106 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->
data.dma_addr, rpd->rsd[ i ].length,
DMA_FROM_DEVICE);
1108 memcpy(
skb_put(skb, rpd->rsd[ i ].length), buffer->
data.align_addr, rpd->rsd[ i ].length);
1111 fore200e->bus->dma_sync_for_device(fore200e, buffer->
data.dma_addr, rpd->rsd[ i ].length,
DMA_FROM_DEVICE);
1116 if (pdu_len < fore200e_vcc->rx_min_pdu)
1117 fore200e_vcc->rx_min_pdu = pdu_len;
1118 if (pdu_len > fore200e_vcc->rx_max_pdu)
1119 fore200e_vcc->rx_max_pdu = pdu_len;
1120 fore200e_vcc->rx_pdu++;
1125 DPRINTK(2,
"receive buffers saturated for %d.%d.%d - PDU dropped\n",
1136 vcc->
push(vcc, skb);
1146 fore200e_collect_rpd(
struct fore200e* fore200e,
struct rpd* rpd)
1148 struct host_bsq* bsq;
1152 for (i = 0; i < rpd->nseg; i++) {
1155 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1157 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1159 #ifdef FORE200E_BSQ_DEBUG
1160 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1162 if (buffer->supplied == 0)
1164 buffer->scheme, buffer->magn, buffer->index);
1165 buffer->supplied = 0;
1169 buffer->
next = bsq->freebuf;
1173 bsq->freebuf_count++;
1179 fore200e_rx_irq(
struct fore200e* fore200e)
1181 struct host_rxq* rxq = &fore200e->host_rxq;
1182 struct host_rxq_entry*
entry;
1184 struct fore200e_vc_map*
vc_map;
1188 entry = &rxq->host_entry[ rxq->head ];
1191 if ((*entry->status & STATUS_COMPLETE) == 0)
1194 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1196 if ((vc_map->vcc ==
NULL) ||
1199 DPRINTK(1,
"no ready VC found for PDU received on %d.%d.%d\n",
1200 fore200e->atm_dev->number,
1201 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1209 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1212 DPRINTK(2,
"damaged PDU on %d.%d.%d\n",
1213 fore200e->atm_dev->number,
1214 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1221 fore200e_collect_rpd(fore200e, entry->rpd);
1224 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1227 fore200e_supply(fore200e);
1232 #ifndef FORE200E_USE_TASKLET
1234 fore200e_irq(
struct fore200e* fore200e)
1236 unsigned long flags;
1239 fore200e_rx_irq(fore200e);
1240 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1243 fore200e_tx_irq(fore200e);
1244 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1250 fore200e_interrupt(
int irq,
void*
dev)
1252 struct fore200e* fore200e = FORE200E_DEV((
struct atm_dev*)dev);
1254 if (fore200e->bus->irq_check(fore200e) == 0) {
1256 DPRINTK(3,
"interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1259 DPRINTK(3,
"interrupt triggered by device %d\n", fore200e->atm_dev->number);
1261 #ifdef FORE200E_USE_TASKLET
1262 tasklet_schedule(&fore200e->tx_tasklet);
1263 tasklet_schedule(&fore200e->rx_tasklet);
1265 fore200e_irq(fore200e);
1268 fore200e->bus->irq_ack(fore200e);
1273 #ifdef FORE200E_USE_TASKLET
1275 fore200e_tx_tasklet(
unsigned long data)
1277 struct fore200e* fore200e = (
struct fore200e*) data;
1278 unsigned long flags;
1280 DPRINTK(3,
"tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1283 fore200e_tx_irq(fore200e);
1284 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1289 fore200e_rx_tasklet(
unsigned long data)
1291 struct fore200e* fore200e = (
struct fore200e*) data;
1292 unsigned long flags;
1294 DPRINTK(3,
"rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1297 fore200e_rx_irq((
struct fore200e*) data);
1298 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1304 fore200e_select_scheme(
struct atm_vcc* vcc)
1307 int scheme = vcc->
vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1309 DPRINTK(1,
"VC %d.%d.%d uses buffer scheme %d\n",
1317 fore200e_activate_vcin(
struct fore200e* fore200e,
int activate,
struct atm_vcc* vcc,
int mtu)
1319 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1320 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1321 struct activate_opcode activ_opcode;
1322 struct deactivate_opcode deactiv_opcode;
1325 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->
qos.aal);
1330 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1332 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1333 activ_opcode.aal = aal;
1334 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1335 activ_opcode.pad = 0;
1338 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1339 deactiv_opcode.pad = 0;
1342 vpvc.vci = vcc->
vci;
1343 vpvc.vpi = vcc->
vpi;
1349 #ifdef FORE200E_52BYTE_AAL0_SDU
1353 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1354 fore200e->bus->write(*(
u32*)&vpvc, (
u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1355 fore200e->bus->write(*(
u32*)&activ_opcode, (
u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1358 fore200e->bus->write(*(
u32*)&vpvc, (
u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1359 fore200e->bus->write(*(
u32*)&deactiv_opcode, (
u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1362 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1368 activate ?
"open" :
"close", vcc->
itf, vcc->
vpi, vcc->
vci);
1373 activate ?
"open" :
"clos");
1379 #define FORE200E_MAX_BACK2BACK_CELLS 255
1382 fore200e_rate_ctrl(
struct atm_qos* qos,
struct tpd_rate*
rate)
1392 rate->data_cells = rate->idle_cells = 0;
1398 fore200e_open(
struct atm_vcc *vcc)
1400 struct fore200e* fore200e = FORE200E_DEV(vcc->
dev);
1401 struct fore200e_vcc* fore200e_vcc;
1402 struct fore200e_vc_map*
vc_map;
1403 unsigned long flags;
1405 short vpi = vcc->
vpi;
1407 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1408 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1412 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1415 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1418 fore200e->atm_dev->number, vpi, vci);
1425 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1427 fore200e_vcc = kzalloc(
sizeof(
struct fore200e_vcc),
GFP_ATOMIC);
1428 if (fore200e_vcc ==
NULL) {
1433 DPRINTK(2,
"opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1434 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1435 vcc->
itf, vcc->
vpi, vcc->
vci, fore200e_atm2fore_aal(vcc->
qos.aal),
1436 fore200e_traffic_class[ vcc->
qos.txtp.traffic_class ],
1437 vcc->
qos.txtp.min_pcr, vcc->
qos.txtp.max_pcr, vcc->
qos.txtp.max_cdv, vcc->
qos.txtp.max_sdu,
1438 fore200e_traffic_class[ vcc->
qos.rxtp.traffic_class ],
1439 vcc->
qos.rxtp.min_pcr, vcc->
qos.rxtp.max_pcr, vcc->
qos.rxtp.max_cdv, vcc->
qos.rxtp.max_sdu);
1442 if ((vcc->
qos.txtp.traffic_class ==
ATM_CBR) && (vcc->
qos.txtp.max_pcr > 0)) {
1445 if (fore200e->available_cell_rate < vcc->
qos.txtp.max_pcr) {
1448 kfree(fore200e_vcc);
1454 fore200e->available_cell_rate -= vcc->
qos.txtp.max_pcr;
1458 vcc->
itf = vcc->
dev->number;
1465 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->
qos.rxtp.max_sdu) < 0) {
1474 fore200e->available_cell_rate += vcc->
qos.txtp.max_pcr;
1476 kfree(fore200e_vcc);
1481 if ((vcc->
qos.txtp.traffic_class ==
ATM_CBR) && (vcc->
qos.txtp.max_pcr > 0)) {
1483 fore200e_rate_ctrl(&vcc->
qos, &fore200e_vcc->rate);
1486 DPRINTK(3,
"tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1487 vcc->
itf, vcc->
vpi, vcc->
vci, fore200e_atm2fore_aal(vcc->
qos.aal),
1488 vcc->
qos.txtp.max_pcr, vcc->
qos.rxtp.max_pcr,
1489 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1492 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1493 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1494 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1497 vc_map->incarn = ++fore200e->incarn_count;
1507 fore200e_close(
struct atm_vcc* vcc)
1509 struct fore200e* fore200e = FORE200E_DEV(vcc->
dev);
1510 struct fore200e_vcc* fore200e_vcc;
1511 struct fore200e_vc_map*
vc_map;
1512 unsigned long flags;
1515 ASSERT((vcc->
vpi >= 0) && (vcc->
vpi < 1<<FORE200E_VPI_BITS));
1516 ASSERT((vcc->
vci >= 0) && (vcc->
vci < 1<<FORE200E_VCI_BITS));
1518 DPRINTK(2,
"closing %d.%d.%d:%d\n", vcc->
itf, vcc->
vpi, vcc->
vci, fore200e_atm2fore_aal(vcc->
qos.aal));
1522 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1526 vc_map = FORE200E_VC_MAP(fore200e, vcc->
vpi, vcc->
vci);
1533 fore200e_vcc = FORE200E_VCC(vcc);
1536 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1539 if ((vcc->
qos.txtp.traffic_class ==
ATM_CBR) && (vcc->
qos.txtp.max_pcr > 0)) {
1542 fore200e->available_cell_rate += vcc->
qos.txtp.max_pcr;
1552 kfree(fore200e_vcc);
1559 struct fore200e* fore200e = FORE200E_DEV(vcc->
dev);
1560 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1561 struct fore200e_vc_map*
vc_map;
1562 struct host_txq* txq = &fore200e->host_txq;
1563 struct host_txq_entry*
entry;
1565 struct tpd_haddr tpd_haddr;
1566 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1568 int tx_len = skb->
len;
1572 unsigned char*
data;
1573 unsigned long flags;
1581 DPRINTK(1,
"VC %d.%d.%d not ready for tx\n", vcc->
itf, vcc->
vpi, vcc->
vpi);
1586 #ifdef FORE200E_52BYTE_AAL0_SDU
1588 cell_header = (
u32*) skb->
data;
1589 skb_data = skb->
data + 4;
1590 skb_len = tx_len = skb->
len - 4;
1592 DPRINTK(3,
"user-supplied cell header = 0x%08x\n", *cell_header);
1597 skb_data = skb->
data;
1601 if (((
unsigned long)skb_data) & 0x3) {
1603 DPRINTK(2,
"misaligned tx PDU on device %s\n", fore200e->name);
1611 DPRINTK(2,
"incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1628 memcpy(data, skb_data, skb_len);
1629 if (skb_len < tx_len)
1630 memset(data + skb_len, 0x00, tx_len - skb_len);
1636 vc_map = FORE200E_VC_MAP(fore200e, vcc->
vpi, vcc->
vci);
1637 ASSERT(vc_map->vcc == vcc);
1643 entry = &txq->host_entry[ txq->head ];
1645 if ((*entry->status !=
STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1648 fore200e_tx_irq(fore200e);
1652 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1663 DPRINTK(2,
"tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1664 fore200e->name, fore200e->cp_queues->heartbeat);
1679 entry->incarn = vc_map->incarn;
1682 entry->data = tx_copy ? data :
NULL;
1685 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len,
DMA_TO_DEVICE);
1686 tpd->tsd[ 0 ].length = tx_len;
1695 DPRINTK(3,
"tx on %d.%d.%d:%d, len = %u (%u)\n",
1696 vcc->
itf, vcc->
vpi, vcc->
vci, fore200e_atm2fore_aal(vcc->
qos.aal),
1697 tpd->tsd[0].length, skb_len);
1699 if (skb_len < fore200e_vcc->tx_min_pdu)
1700 fore200e_vcc->tx_min_pdu = skb_len;
1701 if (skb_len > fore200e_vcc->tx_max_pdu)
1702 fore200e_vcc->tx_max_pdu = skb_len;
1703 fore200e_vcc->tx_pdu++;
1706 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1707 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1710 tpd->atm_header.clp = (*cell_header &
ATM_HDR_CLP);
1718 tpd->atm_header.clp = 0;
1719 tpd->atm_header.plt = 0;
1720 tpd->atm_header.vci = vcc->
vci;
1721 tpd->atm_header.vpi = vcc->
vpi;
1722 tpd->atm_header.gfc = 0;
1725 tpd->spec.length = tx_len;
1727 tpd->spec.aal = fore200e_atm2fore_aal(vcc->
qos.aal);
1730 tpd_haddr.size =
sizeof(
struct tpd) / (1<<TPD_HADDR_SHIFT);
1732 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;
1735 fore200e->bus->write(*(
u32*)&tpd_haddr, (
u32 __iomem *)&entry->cp_entry->tpd_haddr);
1737 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1744 fore200e_getstats(
struct fore200e* fore200e)
1746 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1747 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1748 struct stats_opcode
opcode;
1752 if (fore200e->stats ==
NULL) {
1754 if (fore200e->stats ==
NULL)
1758 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1763 opcode.opcode = OPCODE_GET_STATS;
1766 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1770 fore200e->bus->write(*(
u32*)&
opcode, (
u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1772 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1779 printk(
FORE200E "unable to get statistics from device %s\n", fore200e->name);
1788 fore200e_getsockopt(
struct atm_vcc* vcc,
int level,
int optname,
void __user *optval,
int optlen)
1792 DPRINTK(2,
"getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1793 vcc->
itf, vcc->
vpi, vcc->
vci, level, optname, optval, optlen);
1800 fore200e_setsockopt(
struct atm_vcc* vcc,
int level,
int optname,
void __user *optval,
unsigned int optlen)
1804 DPRINTK(2,
"setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1805 vcc->
itf, vcc->
vpi, vcc->
vci, level, optname, optval, optlen);
1813 fore200e_get_oc3(
struct fore200e* fore200e,
struct oc3_regs* regs)
1815 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1816 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1817 struct oc3_opcode
opcode;
1819 u32 oc3_regs_dma_addr;
1821 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs,
sizeof(
struct oc3_regs),
DMA_FROM_DEVICE);
1825 opcode.opcode = OPCODE_GET_OC3;
1830 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1834 fore200e->bus->write(*(
u32*)&
opcode, (
u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1836 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1840 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr,
sizeof(
struct oc3_regs),
DMA_FROM_DEVICE);
1843 printk(
FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1855 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1856 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1857 struct oc3_opcode
opcode;
1860 DPRINTK(2,
"set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1864 opcode.opcode = OPCODE_SET_OC3;
1869 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1873 fore200e->bus->write(*(
u32*)&
opcode, (
u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1875 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1880 printk(
FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1889 fore200e_setloop(
struct fore200e* fore200e,
int loop_mode)
1891 u32 mct_value, mct_mask;
1897 switch (loop_mode) {
1916 error = fore200e_set_oc3(fore200e,
SUNI_MCT, mct_value, mct_mask);
1918 fore200e->loop_mode = loop_mode;
1925 fore200e_fetch_stats(
struct fore200e* fore200e,
struct sonet_stats __user *
arg)
1929 if (fore200e_getstats(fore200e) < 0)
1932 tmp.section_bip =
be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1933 tmp.line_bip =
be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1934 tmp.path_bip =
be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1935 tmp.line_febe =
be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1936 tmp.path_febe =
be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1937 tmp.corr_hcs =
be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1938 tmp.uncorr_hcs =
be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1939 tmp.tx_cells =
be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
1940 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1941 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1942 tmp.rx_cells =
be32_to_cpu(fore200e->stats->aal0.cells_received) +
1943 be32_to_cpu(fore200e->stats->aal34.cells_received) +
1944 be32_to_cpu(fore200e->stats->aal5.cells_received);
1954 fore200e_ioctl(
struct atm_dev* dev,
unsigned int cmd,
void __user * arg)
1956 struct fore200e* fore200e = FORE200E_DEV(dev);
1958 DPRINTK(2,
"ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (
unsigned long)arg);
1963 return fore200e_fetch_stats(fore200e, (
struct sonet_stats __user *)arg);
1969 return fore200e_setloop(fore200e, (
int)(
unsigned long)arg);
1972 return put_user(fore200e->loop_mode, (
int __user *)arg) ? -
EFAULT : 0;
1983 fore200e_change_qos(
struct atm_vcc* vcc,
struct atm_qos* qos,
int flags)
1985 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1986 struct fore200e* fore200e = FORE200E_DEV(vcc->
dev);
1989 DPRINTK(1,
"VC %d.%d.%d not ready for QoS change\n", vcc->
itf, vcc->
vpi, vcc->
vpi);
1993 DPRINTK(2,
"change_qos %d.%d.%d, "
1994 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1995 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1996 "available_cell_rate = %u",
1998 fore200e_traffic_class[ qos->
txtp.traffic_class ],
1999 qos->
txtp.min_pcr, qos->
txtp.max_pcr, qos->
txtp.max_cdv, qos->
txtp.max_sdu,
2000 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2001 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2002 flags, fore200e->available_cell_rate);
2004 if ((qos->
txtp.traffic_class ==
ATM_CBR) && (qos->
txtp.max_pcr > 0)) {
2007 if (fore200e->available_cell_rate + vcc->
qos.txtp.max_pcr < qos->
txtp.max_pcr) {
2012 fore200e->available_cell_rate += vcc->
qos.txtp.max_pcr;
2013 fore200e->available_cell_rate -= qos->
txtp.max_pcr;
2020 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2032 fore200e_irq_request(
struct fore200e* fore200e)
2034 if (
request_irq(fore200e->irq, fore200e_interrupt,
IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2037 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2042 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2044 #ifdef FORE200E_USE_TASKLET
2045 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (
unsigned long)fore200e);
2046 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (
unsigned long)fore200e);
2049 fore200e->state = FORE200E_STATE_IRQ;
2055 fore200e_get_esi(
struct fore200e* fore200e)
2057 struct prom_data* prom = kzalloc(
sizeof(
struct prom_data),
GFP_KERNEL |
GFP_DMA);
2063 ok = fore200e->bus->prom_read(fore200e, prom);
2071 (prom->hw_revision & 0xFF) +
'@',
2072 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
2074 for (i = 0; i <
ESI_LEN; i++) {
2075 fore200e->esi[
i ] = fore200e->atm_dev->esi[
i ] = prom->mac_addr[ i + 2 ];
2085 fore200e_alloc_rx_buf(
struct fore200e* fore200e)
2087 int scheme, magn, nbr,
size,
i;
2089 struct host_bsq* bsq;
2092 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2093 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2095 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2097 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2098 size = fore200e_rx_buf_size[ scheme ][ magn ];
2100 DPRINTK(2,
"rx buffers %d / %d are being allocated\n", scheme, magn);
2103 buffer = bsq->buffer = kzalloc(nbr *
sizeof(
struct buffer),
GFP_KERNEL);
2108 bsq->freebuf =
NULL;
2110 for (i = 0; i < nbr; i++) {
2112 buffer[
i ].scheme = scheme;
2113 buffer[
i ].magn = magn;
2114 #ifdef FORE200E_BSQ_DEBUG
2115 buffer[
i ].index =
i;
2116 buffer[
i ].supplied = 0;
2120 if (fore200e_chunk_alloc(fore200e,
2121 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2125 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2132 buffer[
i ].
next = bsq->freebuf;
2133 bsq->freebuf = &buffer[
i ];
2136 bsq->freebuf_count = nbr;
2138 #ifdef FORE200E_BSQ_DEBUG
2139 bsq_audit(3, bsq, scheme, magn);
2144 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2150 fore200e_init_bs_queue(
struct fore200e* fore200e)
2152 int scheme, magn,
i;
2154 struct host_bsq* bsq;
2155 struct cp_bsq_entry
__iomem * cp_entry;
2157 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2158 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2160 DPRINTK(2,
"buffer supply queue %d / %d is being initialized\n", scheme, magn);
2162 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2165 if (fore200e->bus->dma_chunk_alloc(fore200e,
2167 sizeof(
enum status),
2169 fore200e->bus->status_alignment) < 0) {
2174 if (fore200e->bus->dma_chunk_alloc(fore200e,
2176 sizeof(
struct rbd_block),
2178 fore200e->bus->descr_alignment) < 0) {
2180 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2185 cp_entry = fore200e->virt_base +
2186 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2189 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2191 bsq->host_entry[
i ].status =
2193 bsq->host_entry[
i ].rbd_block =
2195 bsq->host_entry[
i ].rbd_block_dma =
2197 bsq->host_entry[
i ].cp_entry = &cp_entry[
i ];
2202 &cp_entry[ i ].status_haddr);
2207 fore200e->state = FORE200E_STATE_INIT_BSQ;
2213 fore200e_init_rx_queue(
struct fore200e* fore200e)
2215 struct host_rxq* rxq = &fore200e->host_rxq;
2216 struct cp_rxq_entry
__iomem * cp_entry;
2219 DPRINTK(2,
"receive queue is being initialized\n");
2222 if (fore200e->bus->dma_chunk_alloc(fore200e,
2224 sizeof(
enum status),
2226 fore200e->bus->status_alignment) < 0) {
2231 if (fore200e->bus->dma_chunk_alloc(fore200e,
2235 fore200e->bus->descr_alignment) < 0) {
2237 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2242 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2245 for (i=0; i < QUEUE_SIZE_RX; i++) {
2247 rxq->host_entry[
i ].status =
2249 rxq->host_entry[
i ].rpd =
2251 rxq->host_entry[
i ].rpd_dma =
2253 rxq->host_entry[
i ].cp_entry = &cp_entry[
i ];
2258 &cp_entry[ i ].status_haddr);
2261 &cp_entry[ i ].rpd_haddr);
2267 fore200e->state = FORE200E_STATE_INIT_RXQ;
2273 fore200e_init_tx_queue(
struct fore200e* fore200e)
2275 struct host_txq* txq = &fore200e->host_txq;
2276 struct cp_txq_entry
__iomem * cp_entry;
2279 DPRINTK(2,
"transmit queue is being initialized\n");
2282 if (fore200e->bus->dma_chunk_alloc(fore200e,
2284 sizeof(
enum status),
2286 fore200e->bus->status_alignment) < 0) {
2291 if (fore200e->bus->dma_chunk_alloc(fore200e,
2295 fore200e->bus->descr_alignment) < 0) {
2297 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2302 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2305 for (i=0; i < QUEUE_SIZE_TX; i++) {
2307 txq->host_entry[
i ].status =
2309 txq->host_entry[
i ].tpd =
2311 txq->host_entry[
i ].tpd_dma =
2313 txq->host_entry[
i ].cp_entry = &cp_entry[
i ];
2318 &cp_entry[ i ].status_haddr);
2330 fore200e->state = FORE200E_STATE_INIT_TXQ;
2336 fore200e_init_cmd_queue(
struct fore200e* fore200e)
2338 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2339 struct cp_cmdq_entry
__iomem * cp_entry;
2342 DPRINTK(2,
"command queue is being initialized\n");
2345 if (fore200e->bus->dma_chunk_alloc(fore200e,
2347 sizeof(
enum status),
2349 fore200e->bus->status_alignment) < 0) {
2354 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2357 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2359 cmdq->host_entry[
i ].status =
2361 cmdq->host_entry[
i ].cp_entry = &cp_entry[
i ];
2366 &cp_entry[ i ].status_haddr);
2372 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2378 fore200e_param_bs_queue(
struct fore200e* fore200e,
2379 enum buffer_scheme scheme,
enum buffer_magn magn,
2380 int queue_length,
int pool_size,
int supply_blksize)
2382 struct bs_spec
__iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2384 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2385 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2386 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2387 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2392 fore200e_initialize(
struct fore200e* fore200e)
2394 struct cp_queues
__iomem * cpq;
2395 int ok, scheme, magn;
2397 DPRINTK(2,
"device %s being initialized\n", fore200e->name);
2402 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2405 fore200e->bus->write(1, &cpq->imask);
2407 if (fore200e->bus->irq_enable)
2408 fore200e->bus->irq_enable(fore200e);
2410 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2412 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2413 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2414 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2416 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2417 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2419 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2420 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2421 fore200e_param_bs_queue(fore200e, scheme, magn,
2423 fore200e_rx_buf_nbr[ scheme ][ magn ],
2428 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2430 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2432 printk(
FORE200E "device %s initialization failed\n", fore200e->name);
2438 fore200e->state = FORE200E_STATE_INITIALIZE;
2444 fore200e_monitor_putc(
struct fore200e* fore200e,
char c)
2446 struct cp_monitor
__iomem * monitor = fore200e->cp_monitor;
2451 fore200e->bus->write(((
u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2456 fore200e_monitor_getc(
struct fore200e* fore200e)
2458 struct cp_monitor
__iomem * monitor = fore200e->cp_monitor;
2464 c = (
int) fore200e->bus->read(&monitor->soft_uart.recv);
2466 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2468 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2481 fore200e_monitor_puts(
struct fore200e* fore200e,
char*
str)
2486 while (fore200e_monitor_getc(fore200e) >= 0);
2488 fore200e_monitor_putc(fore200e, *str++);
2491 while (fore200e_monitor_getc(fore200e) >= 0);
2494 #ifdef __LITTLE_ENDIAN
2495 #define FW_EXT ".bin"
2497 #define FW_EXT "_ecd.bin2"
2501 fore200e_load_and_start_fw(
struct fore200e* fore200e)
2512 if (
strcmp(fore200e->bus->model_name,
"PCA-200E") == 0)
2513 device = &((
struct pci_dev *) fore200e->bus_dev)->dev;
2515 else if (
strcmp(fore200e->bus->model_name,
"SBA-200E") == 0)
2523 printk(
FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2528 fw_size = firmware->
size /
sizeof(
u32);
2529 fw_header = (
struct fw_header *) firmware->
data;
2530 load_addr = fore200e->virt_base +
le32_to_cpu(fw_header->load_offset);
2532 DPRINTK(2,
"device %s firmware being loaded at 0x%p (%d words)\n",
2533 fore200e->name, load_addr, fw_size);
2536 printk(
FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2540 for (; fw_size--; fw_data++, load_addr++)
2541 fore200e->bus->write(
le32_to_cpu(*fw_data), load_addr);
2543 DPRINTK(2,
"device %s firmware being started\n", fore200e->name);
2545 #if defined(__sparc_v9__)
2551 fore200e_monitor_puts(fore200e, buf);
2553 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2554 printk(
FORE200E "device %s firmware didn't start\n", fore200e->name);
2560 fore200e->state = FORE200E_STATE_START_FW;
2570 fore200e_register(
struct fore200e* fore200e,
struct device *parent)
2574 DPRINTK(2,
"device %s being registered\n", fore200e->name);
2576 atm_dev =
atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2578 if (atm_dev ==
NULL) {
2579 printk(
FORE200E "unable to register device %s\n", fore200e->name);
2584 fore200e->atm_dev = atm_dev;
2586 atm_dev->
ci_range.vpi_bits = FORE200E_VPI_BITS;
2587 atm_dev->
ci_range.vci_bits = FORE200E_VCI_BITS;
2591 fore200e->state = FORE200E_STATE_REGISTER;
2597 fore200e_init(
struct fore200e* fore200e,
struct device *parent)
2599 if (fore200e_register(fore200e, parent) < 0)
2602 if (fore200e->bus->configure(fore200e) < 0)
2605 if (fore200e->bus->map(fore200e) < 0)
2608 if (fore200e_reset(fore200e, 1) < 0)
2611 if (fore200e_load_and_start_fw(fore200e) < 0)
2614 if (fore200e_initialize(fore200e) < 0)
2617 if (fore200e_init_cmd_queue(fore200e) < 0)
2620 if (fore200e_init_tx_queue(fore200e) < 0)
2623 if (fore200e_init_rx_queue(fore200e) < 0)
2626 if (fore200e_init_bs_queue(fore200e) < 0)
2629 if (fore200e_alloc_rx_buf(fore200e) < 0)
2632 if (fore200e_get_esi(fore200e) < 0)
2635 if (fore200e_irq_request(fore200e) < 0)
2638 fore200e_supply(fore200e);
2641 fore200e->state = FORE200E_STATE_COMPLETE;
2650 const struct fore200e_bus *
bus;
2651 struct fore200e *fore200e;
2652 static int index = 0;
2660 fore200e = kzalloc(
sizeof(
struct fore200e),
GFP_KERNEL);
2664 fore200e->bus =
bus;
2665 fore200e->bus_dev =
op;
2666 fore200e->irq = op->
archdata.irqs[0];
2667 fore200e->phys_base = op->
resource[0].start;
2669 sprintf(fore200e->name,
"%s-%d", bus->model_name, index);
2671 err = fore200e_init(fore200e, &op->
dev);
2673 fore200e_shutdown(fore200e);
2688 fore200e_shutdown(fore200e);
2694 static const struct of_device_id fore200e_sba_match[] = {
2696 .
name = SBA200E_PROM_NAME,
2697 .data = (
void *) &fore200e_bus[1],
2705 .name =
"fore_200e",
2707 .of_match_table = fore200e_sba_match,
2709 .probe = fore200e_sba_probe,
2716 fore200e_pca_detect(
struct pci_dev *pci_dev,
const struct pci_device_id *pci_ent)
2718 const struct fore200e_bus* bus = (
struct fore200e_bus*) pci_ent->
driver_data;
2719 struct fore200e* fore200e;
2721 static int index = 0;
2728 fore200e = kzalloc(
sizeof(
struct fore200e),
GFP_KERNEL);
2729 if (fore200e ==
NULL) {
2734 fore200e->bus =
bus;
2736 fore200e->irq = pci_dev->
irq;
2739 sprintf(fore200e->name,
"%s-%d", bus->model_name, index - 1);
2744 fore200e->bus->model_name,
2745 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2747 sprintf(fore200e->name,
"%s-%d", bus->model_name, index);
2749 err = fore200e_init(fore200e, &pci_dev->
dev);
2751 fore200e_shutdown(fore200e);
2756 pci_set_drvdata(pci_dev, fore200e);
2769 static void __devexit fore200e_pca_remove_one(
struct pci_dev *pci_dev)
2771 struct fore200e *fore200e;
2773 fore200e = pci_get_drvdata(pci_dev);
2775 fore200e_shutdown(fore200e);
2783 0, 0, (
unsigned long) &fore200e_bus[0] },
2789 static struct pci_driver fore200e_pca_driver = {
2790 .
name =
"fore_200e",
2791 .probe = fore200e_pca_detect,
2793 .id_table = fore200e_pca_tbl,
2797 static int __init fore200e_module_init(
void)
2810 err = pci_register_driver(&fore200e_pca_driver);
2821 static void __exit fore200e_module_cleanup(
void)
2832 fore200e_proc_read(
struct atm_dev *dev, loff_t*
pos,
char* page)
2834 struct fore200e* fore200e = FORE200E_DEV(dev);
2835 struct fore200e_vcc* fore200e_vcc;
2838 unsigned long flags;
2842 if (fore200e_getstats(fore200e) < 0)
2847 " internal name:\t\t%s\n", fore200e->name);
2850 if (fore200e->bus->proc_read)
2851 len += fore200e->bus->proc_read(fore200e, page + len);
2854 " interrupt line:\t\t%s\n"
2855 " physical base address:\t0x%p\n"
2856 " virtual base address:\t0x%p\n"
2857 " factory address (ESI):\t%pM\n"
2858 " board serial number:\t\t%d\n\n",
2859 fore200e_irq_itoa(fore200e->irq),
2860 (
void*)fore200e->phys_base,
2861 fore200e->virt_base,
2863 fore200e->esi[4] * 256 + fore200e->esi[5]);
2870 " free small bufs, scheme 1:\t%d\n"
2871 " free large bufs, scheme 1:\t%d\n"
2872 " free small bufs, scheme 2:\t%d\n"
2873 " free large bufs, scheme 2:\t%d\n",
2874 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2875 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2876 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2877 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2880 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2883 " cell processor:\n"
2884 " heartbeat state:\t\t");
2886 if (hb >> 16 != 0xDEAD)
2887 len +=
sprintf(page + len,
"0x%08x\n", hb);
2889 len +=
sprintf(page + len,
"*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2895 static const char* media_name[] = {
2896 "unshielded twisted pair",
2897 "multimode optical fiber ST",
2898 "multimode optical fiber SC",
2899 "single-mode optical fiber ST",
2900 "single-mode optical fiber SC",
2904 static const char* oc3_mode[] = {
2906 "diagnostic loopback",
2911 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2912 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2913 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2914 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2917 if (media_index > 4)
2920 switch (fore200e->loop_mode) {
2927 default: oc3_index = 3;
2931 " firmware release:\t\t%d.%d.%d\n"
2932 " monitor release:\t\t%d.%d\n"
2933 " media type:\t\t\t%s\n"
2934 " OC-3 revision:\t\t0x%x\n"
2935 " OC-3 mode:\t\t\t%s",
2936 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2937 mon960_release >> 16, mon960_release << 16 >> 16,
2938 media_name[ media_index ],
2940 oc3_mode[ oc3_index ]);
2944 struct cp_monitor
__iomem * cp_monitor = fore200e->cp_monitor;
2949 " version number:\t\t%d\n"
2950 " boot status word:\t\t0x%08x\n",
2951 fore200e->bus->read(&cp_monitor->mon_version),
2952 fore200e->bus->read(&cp_monitor->bstat));
2958 " device statistics:\n"
2960 " crc_header_errors:\t\t%10u\n"
2961 " framing_errors:\t\t%10u\n",
2962 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2963 be32_to_cpu(fore200e->stats->phy.framing_errors));
2968 " section_bip8_errors:\t%10u\n"
2969 " path_bip8_errors:\t\t%10u\n"
2970 " line_bip24_errors:\t\t%10u\n"
2971 " line_febe_errors:\t\t%10u\n"
2972 " path_febe_errors:\t\t%10u\n"
2973 " corr_hcs_errors:\t\t%10u\n"
2974 " ucorr_hcs_errors:\t\t%10u\n",
2975 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2976 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2977 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2978 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2979 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2980 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2981 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2985 " ATM:\t\t\t\t cells\n"
2988 " vpi out of range:\t\t%10u\n"
2989 " vpi no conn:\t\t%10u\n"
2990 " vci out of range:\t\t%10u\n"
2991 " vci no conn:\t\t%10u\n",
2992 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
3001 " AAL0:\t\t\t cells\n"
3004 " dropped:\t\t\t%10u\n",
3005 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
3006 be32_to_cpu(fore200e->stats->aal0.cells_received),
3007 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
3012 " SAR sublayer:\t\t cells\n"
3015 " dropped:\t\t\t%10u\n"
3016 " CRC errors:\t\t%10u\n"
3017 " protocol errors:\t\t%10u\n\n"
3018 " CS sublayer:\t\t PDUs\n"
3021 " dropped:\t\t\t%10u\n"
3022 " protocol errors:\t\t%10u\n",
3023 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
3024 be32_to_cpu(fore200e->stats->aal34.cells_received),
3025 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
3026 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
3027 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
3028 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
3029 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
3030 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
3031 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
3036 " SAR sublayer:\t\t cells\n"
3039 " dropped:\t\t\t%10u\n"
3040 " congestions:\t\t%10u\n\n"
3041 " CS sublayer:\t\t PDUs\n"
3044 " dropped:\t\t\t%10u\n"
3045 " CRC errors:\t\t%10u\n"
3046 " protocol errors:\t\t%10u\n",
3047 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
3048 be32_to_cpu(fore200e->stats->aal5.cells_received),
3050 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
3051 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
3052 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
3053 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
3054 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
3055 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
3059 " AUX:\t\t allocation failures\n"
3060 " small b1:\t\t\t%10u\n"
3061 " large b1:\t\t\t%10u\n"
3062 " small b2:\t\t\t%10u\n"
3063 " large b2:\t\t\t%10u\n"
3064 " RX PDUs:\t\t\t%10u\n"
3065 " TX PDUs:\t\t\t%10lu\n",
3066 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
3067 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
3068 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
3069 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
3070 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
3075 " receive carrier:\t\t\t%s\n",
3076 fore200e->stats->aux.receive_carrier ?
"ON" :
"OFF!");
3080 " VCCs:\n address VPI VCI AAL "
3081 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3084 for (i = 0; i < NBR_CONNECT; i++) {
3086 vcc = fore200e->vc_map[
i].vcc;
3095 fore200e_vcc = FORE200E_VCC(vcc);
3099 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3100 (
u32)(
unsigned long)vcc,
3101 vcc->
vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3102 fore200e_vcc->tx_pdu,
3103 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3104 fore200e_vcc->tx_max_pdu,
3105 fore200e_vcc->rx_pdu,
3106 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3107 fore200e_vcc->rx_max_pdu);
3109 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3113 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3123 static const struct atmdev_ops fore200e_ops =
3125 .open = fore200e_open,
3126 .close = fore200e_close,
3127 .ioctl = fore200e_ioctl,
3128 .getsockopt = fore200e_getsockopt,
3129 .setsockopt = fore200e_setsockopt,
3130 .send = fore200e_send,
3131 .change_qos = fore200e_change_qos,
3132 .proc_read = fore200e_proc_read,
3137 static const struct fore200e_bus fore200e_bus[] = {
3139 {
"PCA-200E",
"pca200e", 32, 4, 32,
3142 fore200e_pca_dma_map,
3143 fore200e_pca_dma_unmap,
3144 fore200e_pca_dma_sync_for_cpu,
3145 fore200e_pca_dma_sync_for_device,
3146 fore200e_pca_dma_chunk_alloc,
3147 fore200e_pca_dma_chunk_free,
3148 fore200e_pca_configure,
3151 fore200e_pca_prom_read,
3154 fore200e_pca_irq_check,
3155 fore200e_pca_irq_ack,
3156 fore200e_pca_proc_read,
3160 {
"SBA-200E",
"sba200e", 32, 64, 32,
3163 fore200e_sba_dma_map,
3164 fore200e_sba_dma_unmap,
3165 fore200e_sba_dma_sync_for_cpu,
3166 fore200e_sba_dma_sync_for_device,
3167 fore200e_sba_dma_chunk_alloc,
3168 fore200e_sba_dma_chunk_free,
3169 fore200e_sba_configure,
3172 fore200e_sba_prom_read,
3174 fore200e_sba_irq_enable,
3175 fore200e_sba_irq_check,
3176 fore200e_sba_irq_ack,
3177 fore200e_sba_proc_read,
3185 #ifdef __LITTLE_ENDIAN__