17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/slab.h>
21 #include <linux/pci.h>
32 #define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
33 #define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
34 #define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
40 #define D64RINGALIGN_BITS 13
41 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
42 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
44 #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
47 #define D64_XC_XE 0x00000001
48 #define D64_XC_SE 0x00000002
49 #define D64_XC_LE 0x00000004
50 #define D64_XC_FL 0x00000010
51 #define D64_XC_PD 0x00000800
52 #define D64_XC_AE 0x00030000
53 #define D64_XC_AE_SHIFT 16
56 #define D64_XP_LD_MASK 0x00000fff
59 #define D64_XS0_CD_MASK 0x00001fff
60 #define D64_XS0_XS_MASK 0xf0000000
61 #define D64_XS0_XS_SHIFT 28
62 #define D64_XS0_XS_DISABLED 0x00000000
63 #define D64_XS0_XS_ACTIVE 0x10000000
64 #define D64_XS0_XS_IDLE 0x20000000
65 #define D64_XS0_XS_STOPPED 0x30000000
66 #define D64_XS0_XS_SUSP 0x40000000
68 #define D64_XS1_AD_MASK 0x00001fff
69 #define D64_XS1_XE_MASK 0xf0000000
70 #define D64_XS1_XE_SHIFT 28
71 #define D64_XS1_XE_NOERR 0x00000000
72 #define D64_XS1_XE_DPE 0x10000000
73 #define D64_XS1_XE_DFU 0x20000000
74 #define D64_XS1_XE_DTE 0x30000000
75 #define D64_XS1_XE_DESRE 0x40000000
76 #define D64_XS1_XE_COREE 0x50000000
80 #define D64_RC_RE 0x00000001
82 #define D64_RC_RO_MASK 0x000000fe
83 #define D64_RC_RO_SHIFT 1
85 #define D64_RC_FM 0x00000100
87 #define D64_RC_SH 0x00000200
89 #define D64_RC_OC 0x00000400
91 #define D64_RC_PD 0x00000800
93 #define D64_RC_AE 0x00030000
94 #define D64_RC_AE_SHIFT 16
98 #define DMA_CTRL_PEN (1 << 0)
100 #define DMA_CTRL_ROC (1 << 1)
102 #define DMA_CTRL_RXMULTI (1 << 2)
104 #define DMA_CTRL_UNFRAMED (1 << 3)
107 #define D64_RP_LD_MASK 0x00000fff
110 #define D64_RS0_CD_MASK 0x00001fff
111 #define D64_RS0_RS_MASK 0xf0000000
112 #define D64_RS0_RS_SHIFT 28
113 #define D64_RS0_RS_DISABLED 0x00000000
114 #define D64_RS0_RS_ACTIVE 0x10000000
115 #define D64_RS0_RS_IDLE 0x20000000
116 #define D64_RS0_RS_STOPPED 0x30000000
117 #define D64_RS0_RS_SUSP 0x40000000
119 #define D64_RS1_AD_MASK 0x0001ffff
120 #define D64_RS1_RE_MASK 0xf0000000
121 #define D64_RS1_RE_SHIFT 28
122 #define D64_RS1_RE_NOERR 0x00000000
123 #define D64_RS1_RE_DPO 0x10000000
124 #define D64_RS1_RE_DFU 0x20000000
125 #define D64_RS1_RE_DTE 0x30000000
126 #define D64_RS1_RE_DESRE 0x40000000
127 #define D64_RS1_RE_COREE 0x50000000
130 #define D64_FA_OFF_MASK 0xffff
131 #define D64_FA_SEL_MASK 0xf0000
132 #define D64_FA_SEL_SHIFT 16
133 #define D64_FA_SEL_XDD 0x00000
134 #define D64_FA_SEL_XDP 0x10000
135 #define D64_FA_SEL_RDD 0x40000
136 #define D64_FA_SEL_RDP 0x50000
137 #define D64_FA_SEL_XFD 0x80000
138 #define D64_FA_SEL_XFP 0x90000
139 #define D64_FA_SEL_RFD 0xc0000
140 #define D64_FA_SEL_RFP 0xd0000
141 #define D64_FA_SEL_RSD 0xe0000
142 #define D64_FA_SEL_RSP 0xf0000
145 #define D64_CTRL_COREFLAGS 0x0ff00000
146 #define D64_CTRL1_EOT ((u32)1 << 28)
147 #define D64_CTRL1_IOC ((u32)1 << 29)
148 #define D64_CTRL1_EOF ((u32)1 << 30)
149 #define D64_CTRL1_SOF ((u32)1 << 31)
153 #define D64_CTRL2_BC_MASK 0x00007fff
155 #define D64_CTRL2_AE 0x00030000
156 #define D64_CTRL2_AE_SHIFT 16
158 #define D64_CTRL2_PARITY 0x00040000
161 #define D64_CTRL_CORE_MASK 0x0ff00000
163 #define D64_RX_FRM_STS_LEN 0x0000ffff
164 #define D64_RX_FRM_STS_OVFL 0x00800000
165 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000
166 #define D64_RX_FRM_STS_DATATYPE 0xf0000000
177 #define BCMEXTRAHDROOM 172
181 #define DMA_ERROR(fmt, ...) \
183 if (*di->msg_level & 1) \
184 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
186 #define DMA_TRACE(fmt, ...) \
188 if (*di->msg_level & 2) \
189 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
192 #define DMA_ERROR(fmt, ...) \
193 no_printk(fmt, ##__VA_ARGS__)
194 #define DMA_TRACE(fmt, ...) \
195 no_printk(fmt, ##__VA_ARGS__)
198 #define DMA_NONE(fmt, ...) \
199 no_printk(fmt, ##__VA_ARGS__)
204 #define B2I(bytes, type) ((bytes) / sizeof(type))
205 #define I2B(index, type) ((index) * sizeof(type))
207 #define PCI32ADDR_HIGH 0xc0000000
208 #define PCI32ADDR_HIGH_SHIFT 30
210 #define PCI64ADDR_HIGH 0x80000000
211 #define PCI64ADDR_HIGH_SHIFT 31
305 static uint dma_msg_level;
311 u32 par_data = *(
u32 *)&data;
313 par_data ^= par_data >> 16;
314 par_data ^= par_data >> 8;
315 par_data ^= par_data >> 4;
316 par_data ^= par_data >> 2;
317 par_data ^= par_data >> 1;
336 return xxd(x, di->
ntxd);
341 return xxd(x, di->
nrxd);
346 return txd(di, i + 1);
351 return txd(di, i - 1);
356 return txd(di, i + 1);
378 dmactrlflags = di->
dma.dmactrlflags;
379 dmactrlflags &= ~mask;
380 dmactrlflags |=
flags;
398 dmactrlflags &= ~DMA_CTRL_PEN;
401 di->
dma.dmactrlflags = dmactrlflags;
406 static bool _dma64_addrext(
struct dma_info *di,
uint ctrl_offset)
410 w = bcma_read32(di->
core, ctrl_offset);
419 static bool _dma_isaddrext(
struct dma_info *di)
426 DMA_ERROR(
"%s: DMA64 tx doesn't have AE set\n",
431 DMA_ERROR(
"%s: DMA64 rx doesn't have AE set\n",
439 static bool _dma_descriptor_align(
struct dma_info *di)
476 u8 dma_align_sizetobits(
uint size)
490 static void *dma_ringalloc(
struct dma_info *di,
u32 boundary,
uint size,
496 u32 alignbytes = 1 << *alignbits;
498 va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
503 desc_strtaddr = (
u32)
roundup((
unsigned long)
va, alignbytes);
504 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
506 *alignbits = dma_align_sizetobits(size);
508 va = dma_alloc_consistent(di, size, *alignbits,
525 size = (direction ==
DMA_TX) ? (di->
ntxd * ddlen) : (di->
nrxd * ddlen);
527 align = (1 << align_bits);
529 if (direction ==
DMA_TX) {
533 DMA_ERROR(
"%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
537 align = (1 << align_bits);
547 DMA_ERROR(
"%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
551 align = (1 << align_bits);
562 static bool _dma_alloc(
struct dma_info *di,
uint direction)
564 return dma64_alloc(di, direction);
570 uint rxbufsize,
int rxextheadroom,
583 di->
msg_level = msg_level ? msg_level : &dma_msg_level;
601 DMA_TRACE(
"%s: %s flags 0x%x ntxd %d nrxd %d "
602 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
603 "txregbase %u rxregbase %u\n", name,
"DMA64",
604 di->
dma.dmactrlflags, ntxd, nrxd, rxbufsize,
605 rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
648 && ((rev > 0) && (rev <= 2)))
651 ((rev == 0) || (rev == 1)))
654 di->
addrext = _dma_isaddrext(di);
667 DMA_NONE(
"DMA descriptor align_needed %d, align %d\n",
672 size = ntxd *
sizeof(
void *);
680 size = nrxd *
sizeof(
void *);
691 if (!_dma_alloc(di,
DMA_TX))
700 if (!_dma_alloc(di,
DMA_RX))
706 DMA_ERROR(
"%s: txdpa 0x%x: addrext not supported\n",
711 DMA_ERROR(
"%s: rxdpa 0x%x: addrext not supported\n",
717 DMA_TRACE(
"ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
746 pa &= ~PCI32ADDR_HIGH;
754 if (di->
dma.dmactrlflags & DMA_CTRL_PEN) {
755 if (dma64_dd_parity(&ddring[outidx]))
756 ddring[outidx].
ctrl2 =
799 || !(pa & PCI32ADDR_HIGH)) {
800 if (direction ==
DMA_TX) {
817 pa &= ~PCI32ADDR_HIGH;
819 if (direction ==
DMA_TX) {
837 static void _dma_rxenable(
struct dma_info *di)
839 uint dmactrlflags = di->
dma.dmactrlflags;
848 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
884 static struct sk_buff *dma64_getnextrxp(
struct dma_info *di,
bool forceall)
902 if (!forceall && (i == curr))
917 di->
rxin = nextrxd(di, i);
922 static struct sk_buff *_dma_getnextrxp(
struct dma_info *di,
bool forceall)
927 return dma64_getnextrxp(di, forceall);
950 skb_queue_head_init(&dma_frames);
952 p = _dma_getnextrxp(di,
false);
958 dma_spin_for_len(len, p);
962 __skb_trim(p, pkt_len);
968 while ((resid > 0) && (p = _dma_getnextrxp(di,
false))) {
970 __skb_trim(p, pkt_len);
984 DMA_ERROR(
"rxin %d rxout %d, hw_curr %d\n",
992 skb_queue_walk_safe(&dma_frames, p, next) {
1002 skb_queue_splice_tail(&dma_frames, skb_list);
1006 static bool dma64_rxidle(
struct dma_info *di)
1013 return ((bcma_read32(di->
core,
1034 uint extra_offset = 0;
1048 n = di->
nrxpost - nrxdactive(di, rxin, rxout);
1055 for (i = 0; i <
n; i++) {
1064 if (i == 0 && dma64_rxidle(di)) {
1088 if (rxout == (di->
nrxd - 1))
1091 dma64_dd_upd(di, di->
rxd64, pa, rxout, &flags,
1093 rxout = nextrxd(di, rxout);
1112 while ((p = _dma_getnextrxp(di,
true)))
1129 if (!
strcmp(name,
"&txavail"))
1130 return (
unsigned long)&(di->
dma.txavail);
1147 di->
dma.txavail = di->
ntxd - 1;
1158 if ((di->
dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1197 return (di->
ntxd == 0) ||
1198 ((bcma_read32(di->
core,
1276 unsigned char *
data;
1297 if (nexttxd(di, txout) == di->
txin)
1309 if (txout == (di->
ntxd - 1))
1312 dma64_dd_upd(di, di->
txd64, pa, txout, &flags, len);
1314 txout = nexttxd(di, txout);
1317 di->
txp[prevtxd(di, txout)] =
p;
1335 di->
dma.txavail = 0;
1383 (active_desc - di->
xmtptrbase) & D64_XS0_CD_MASK;
1385 if (end != active_desc)
1386 end = prevtxd(di, active_desc);
1390 if ((start == 0) && (end > di->
txout))
1393 for (i = start; i != end && !txp; i = nexttxd(di, i)) {
1420 DMA_NONE(
"bogus curr: start %d end %d txout %d\n",
1421 start, end, di->
txout);
1432 (
void *pkt,
void *arg_a),
void *arg_a)
1444 (callback_fnc)(tx_info, arg_a);