147 #include <linux/slab.h>
148 #include <linux/kernel.h>
149 #include <linux/if_arp.h>
150 #include <linux/netdevice.h>
152 #include <linux/export.h>
157 #define D_SUBMODULE rx
160 static int i2400m_rx_reorder_disabled;
163 "If true, RX reordering will be disabled.");
194 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
195 if (list_empty(&
list))
198 d_printf(1, dev,
"processing queued reports\n");
200 d_printf(2, dev,
"processing queued report %p\n", args);
221 d_printf(1, dev,
"flushing queued reports\n");
224 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
226 d_printf(2, dev,
"flushing queued report %p\n", args);
250 args = kzalloc(
sizeof(*args),
GFP_NOIO);
252 args->
skb_rx = skb_get(skb_rx);
257 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
258 d_printf(2, dev,
"queued report %p\n", args);
263 if (printk_ratelimit())
264 dev_err(dev,
"%s:%u: Can't allocate %zu B\n",
265 __func__, __LINE__,
sizeof(*args));
286 void i2400m_rx_ctl_ack(
struct i2400m *i2400m,
287 const void *
payload,
size_t size)
289 struct device *dev = i2400m_dev(i2400m);
297 dev_err(dev,
"Huh? reply to command with no waiters\n");
298 goto error_no_waiter;
300 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
307 d_printf(1, dev,
"Huh? waiter for command reply cancelled\n");
308 goto error_waiter_cancelled;
311 dev_err(dev,
"CMD/GET/SET ack: cannot allocate SKB\n");
313 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
317 error_waiter_cancelled:
318 if (!IS_ERR(ack_skb))
321 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
358 void i2400m_rx_ctl(
struct i2400m *i2400m,
struct sk_buff *skb_rx,
359 const void *payload,
size_t size)
362 struct device *dev = i2400m_dev(i2400m);
368 dev_err(dev,
"HW BUG? device sent a bad message: %d\n",
373 d_printf(1, dev,
"%s 0x%04x: %zu bytes\n",
376 d_dump(2, dev, l3l4_hdr, size);
400 i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
407 dev_err(dev,
"error sending report to userspace: %d\n",
410 i2400m_rx_ctl_ack(i2400m, payload, size);
434 void i2400m_rx_trace(
struct i2400m *i2400m,
435 const void *payload,
size_t size)
438 struct device *dev = i2400m_dev(i2400m);
439 struct wimax_dev *wimax_dev = &i2400m->
wimax_dev;
445 dev_err(dev,
"HW BUG? device sent a bad trace message: %d\n",
450 d_printf(1, dev,
"Trace %s 0x%04x: %zu bytes\n",
451 msg_type & I2400M_MT_REPORT_MASK ?
"REPORT" :
"CMD/SET/GET",
453 d_dump(2, dev, l3l4_hdr, size);
456 dev_err(dev,
"error sending trace to userspace: %d\n",
497 void __i2400m_roq_init(
struct i2400m_roq *roq)
500 skb_queue_head_init(&roq->
queue);
505 unsigned __i2400m_roq_index(
struct i2400m *i2400m,
struct i2400m_roq *roq)
507 return ((
unsigned long) roq - (
unsigned long) i2400m->
rx_roq)
522 unsigned __i2400m_roq_nsn(
struct i2400m_roq *roq,
unsigned sn)
525 r = ((
int) sn - (
int) roq->
ws) % 2048;
553 void i2400m_roq_log_entry_print(
struct i2400m *i2400m,
unsigned index,
555 struct i2400m_roq_log_entry *
e)
557 struct device *dev = i2400m_dev(i2400m);
561 dev_err(dev,
"q#%d reset ws %u cnt %u sn %u/%u"
563 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
566 dev_err(dev,
"q#%d queue ws %u cnt %u sn %u/%u\n",
567 index, e->ws, e->count, e->sn, e->nsn);
570 dev_err(dev,
"q#%d update_ws ws %u cnt %u sn %u/%u"
572 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
575 dev_err(dev,
"q#%d queue_update_ws ws %u cnt %u sn %u/%u"
577 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
580 dev_err(dev,
"q#%d BUG? entry %u - unknown type %u\n",
581 index, e_index, e->
type);
588 void i2400m_roq_log_add(
struct i2400m *i2400m,
590 unsigned ws,
unsigned count,
unsigned sn,
591 unsigned nsn,
unsigned new_ws)
593 struct i2400m_roq_log_entry *
e;
595 int index = __i2400m_roq_index(i2400m, roq);
601 e = &roq->
log->entry[cnt_idx];
611 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
617 void i2400m_roq_log_dump(
struct i2400m *i2400m,
struct i2400m_roq *roq)
619 unsigned cnt, cnt_idx;
620 struct i2400m_roq_log_entry *
e;
621 int index = __i2400m_roq_index(i2400m, roq);
624 for (cnt = roq->
log->out; cnt < roq->
log->in; cnt++) {
626 e = &roq->
log->entry[cnt_idx];
627 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
630 roq->
log->in = roq->
log->out = 0;
657 void __i2400m_roq_queue(
struct i2400m *i2400m,
struct i2400m_roq *roq,
658 struct sk_buff *
skb,
unsigned sn,
unsigned nsn)
660 struct device *dev = i2400m_dev(i2400m);
665 d_fnstart(4, dev,
"(i2400m %p roq %p skb %p sn %u nsn %u)\n",
666 i2400m, roq, skb, sn, nsn);
671 d_printf(3, dev,
"ERX: roq %p [ws %u] nsn %d sn %u\n",
672 roq, roq->
ws, nsn, roq_data->
sn);
676 if (skb_queue_empty(&roq->
queue)) {
677 d_printf(2, dev,
"ERX: roq %p - first one\n", roq);
678 __skb_queue_head(&roq->
queue, skb);
682 skb_itr = skb_peek_tail(&roq->
queue);
684 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->
sn);
686 if (nsn >= nsn_itr) {
687 d_printf(2, dev,
"ERX: roq %p - appended after %p (nsn %d sn %u)\n",
688 roq, skb_itr, nsn_itr, roq_data_itr->
sn);
689 __skb_queue_tail(&roq->
queue, skb);
698 skb_queue_walk(&roq->
queue, skb_itr) {
700 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->
sn);
703 d_printf(2, dev,
"ERX: roq %p - queued before %p "
704 "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr,
706 __skb_queue_before(&roq->
queue, skb_itr, skb);
712 dev_err(dev,
"SW BUG? failed to insert packet\n");
713 dev_err(dev,
"ERX: roq %p [ws %u] skb %p nsn %d sn %u\n",
714 roq, roq->
ws, skb, nsn, roq_data->
sn);
715 skb_queue_walk(&roq->
queue, skb_itr) {
717 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->
sn);
719 dev_err(dev,
"ERX: roq %p skb_itr %p nsn %d sn %u\n",
720 roq, skb_itr, nsn_itr, roq_data_itr->
sn);
724 d_fnend(4, dev,
"(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
725 i2400m, roq, skb, sn, nsn);
741 unsigned __i2400m_roq_update_ws(
struct i2400m *i2400m,
struct i2400m_roq *roq,
744 struct device *dev = i2400m_dev(i2400m);
745 struct sk_buff *skb_itr, *tmp_itr;
747 unsigned new_nws, nsn_itr;
749 new_nws = __i2400m_roq_nsn(roq, sn);
756 skb_queue_walk_safe(&roq->
queue, skb_itr, tmp_itr) {
758 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->
sn);
760 if (nsn_itr < new_nws) {
761 d_printf(2, dev,
"ERX: roq %p - release skb %p "
762 "(nsn %u/%u new nws %u)\n",
763 roq, skb_itr, nsn_itr, roq_data_itr->
sn,
765 __skb_unlink(skb_itr, &roq->
queue);
786 void i2400m_roq_reset(
struct i2400m *i2400m,
struct i2400m_roq *roq)
788 struct device *dev = i2400m_dev(i2400m);
789 struct sk_buff *skb_itr, *tmp_itr;
792 d_fnstart(2, dev,
"(i2400m %p roq %p)\n", i2400m, roq);
794 roq->
ws, skb_queue_len(&roq->
queue),
796 skb_queue_walk_safe(&roq->
queue, skb_itr, tmp_itr) {
798 d_printf(2, dev,
"ERX: roq %p - release skb %p (sn %u)\n",
799 roq, skb_itr, roq_data_itr->
sn);
800 __skb_unlink(skb_itr, &roq->
queue);
804 d_fnend(2, dev,
"(i2400m %p roq %p) = void\n", i2400m, roq);
821 void i2400m_roq_queue(
struct i2400m *i2400m,
struct i2400m_roq *roq,
824 struct device *dev = i2400m_dev(i2400m);
827 d_fnstart(2, dev,
"(i2400m %p roq %p skb %p lbn %u) = void\n",
828 i2400m, roq, skb, lbn);
829 len = skb_queue_len(&roq->
queue);
830 nsn = __i2400m_roq_nsn(roq, lbn);
832 dev_err(dev,
"SW BUG? queue nsn %d (lbn %u ws %u)\n",
834 i2400m_roq_log_dump(i2400m, roq);
837 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
839 roq->
ws, len, lbn, nsn, ~0);
841 d_fnend(2, dev,
"(i2400m %p roq %p skb %p lbn %u) = void\n",
842 i2400m, roq, skb, lbn);
855 void i2400m_roq_update_ws(
struct i2400m *i2400m,
struct i2400m_roq *roq,
858 struct device *dev = i2400m_dev(i2400m);
859 unsigned old_ws, nsn, len;
861 d_fnstart(2, dev,
"(i2400m %p roq %p sn %u)\n", i2400m, roq, sn);
863 len = skb_queue_len(&roq->
queue);
864 nsn = __i2400m_roq_update_ws(i2400m, roq, sn);
866 old_ws, len, sn, nsn, roq->
ws);
867 d_fnstart(2, dev,
"(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
884 void i2400m_roq_queue_update_ws(
struct i2400m *i2400m,
struct i2400m_roq *roq,
885 struct sk_buff * skb,
unsigned sn)
887 struct device *dev = i2400m_dev(i2400m);
888 unsigned nsn, old_ws, len;
890 d_fnstart(2, dev,
"(i2400m %p roq %p skb %p sn %u)\n",
891 i2400m, roq, skb, sn);
892 len = skb_queue_len(&roq->
queue);
893 nsn = __i2400m_roq_nsn(roq, sn);
909 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
911 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
913 old_ws, len, sn, nsn, roq->
ws);
915 d_fnend(2, dev,
"(i2400m %p roq %p skb %p sn %u) = void\n",
916 i2400m, roq, skb, sn);
927 static void i2400m_rx_roq_destroy(
struct kref *ref)
930 struct i2400m *i2400m
933 __skb_queue_purge(&i2400m->
rx_roq[itr].queue);
980 void i2400m_rx_edata(
struct i2400m *i2400m,
struct sk_buff *skb_rx,
981 unsigned single_last,
const void *payload,
size_t size)
983 struct device *dev = i2400m_dev(i2400m);
989 unsigned ro_needed, ro_type, ro_cin, ro_sn;
996 d_fnstart(2, dev,
"(i2400m %p skb_rx %p single %u payload %p "
997 "size %zu)\n", i2400m, skb_rx, single_last, payload, size);
998 if (size <
sizeof(*hdr)) {
999 dev_err(dev,
"ERX: HW BUG? message with short header (%zu "
1000 "vs %zu bytes expected)\n", size,
sizeof(*hdr));
1005 skb = skb_get(skb_rx);
1006 d_printf(3, dev,
"ERX: skb %p reusing\n", skb);
1010 dev_err(dev,
"ERX: no memory to clone skb\n");
1011 net_dev->
stats.rx_dropped++;
1012 goto error_skb_clone;
1014 d_printf(3, dev,
"ERX: skb %p cloned from %p\n", skb, skb_rx);
1020 skb_pull(skb, payload +
sizeof(*hdr) - (
void *) skb->
data);
1021 skb_trim(skb, (
void *) skb_end_pointer(skb) - payload -
sizeof(*hdr));
1034 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
1037 roq = &i2400m->
rx_roq[ro_cin];
1039 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
1042 roq_data->
sn = ro_sn;
1044 d_printf(2, dev,
"ERX: reorder needed: "
1045 "type %u cin %u [ws %u] sn %u/%u len %zuB\n",
1046 ro_type, ro_cin, roq->
ws, ro_sn,
1047 __i2400m_roq_nsn(roq, ro_sn),
size);
1048 d_dump(2, dev, payload, size);
1051 i2400m_roq_reset(i2400m, roq);
1055 i2400m_roq_queue(i2400m, roq, skb, ro_sn);
1058 i2400m_roq_update_ws(i2400m, roq, ro_sn);
1062 i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn);
1065 dev_err(dev,
"HW BUG? unknown reorder type %u\n", ro_type);
1070 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
1076 d_fnend(2, dev,
"(i2400m %p skb_rx %p single %u payload %p "
1077 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
1097 void i2400m_rx_payload(
struct i2400m *i2400m,
struct sk_buff *skb_rx,
1099 const void *payload)
1101 struct device *dev = i2400m_dev(i2400m);
1102 size_t pl_size = i2400m_pld_size(pld);
1103 enum i2400m_pt pl_type = i2400m_pld_type(pld);
1105 d_printf(7, dev,
"RX: received payload type %u, %zu bytes\n",
1107 d_dump(8, dev, payload, pl_size);
1111 d_printf(3, dev,
"RX: data payload %zu bytes\n", pl_size);
1112 i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size);
1115 i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
1118 i2400m_rx_trace(i2400m, payload, pl_size);
1121 d_printf(3, dev,
"ERX: data payload %zu bytes\n", pl_size);
1122 i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size);
1125 if (printk_ratelimit())
1126 dev_err(dev,
"RX: HW BUG? unexpected payload type %u\n",
1143 int i2400m_rx_msg_hdr_check(
struct i2400m *i2400m,
1148 struct device *dev = i2400m_dev(i2400m);
1149 if (buf_size <
sizeof(*msg_hdr)) {
1150 dev_err(dev,
"RX: HW BUG? message with short header (%zu "
1151 "vs %zu bytes expected)\n", buf_size,
sizeof(*msg_hdr));
1155 dev_err(dev,
"RX: HW BUG? message received with unknown "
1156 "barker 0x%08x (buf_size %zu bytes)\n",
1161 dev_err(dev,
"RX: HW BUG? zero payload packets in message\n");
1165 dev_err(dev,
"RX: HW BUG? message contains more payload "
1166 "than maximum; ignoring.\n");
1188 int i2400m_rx_pl_descr_check(
struct i2400m *i2400m,
1190 size_t pl_itr,
size_t buf_size)
1193 struct device *dev = i2400m_dev(i2400m);
1194 size_t pl_size = i2400m_pld_size(pld);
1195 enum i2400m_pt pl_type = i2400m_pld_type(pld);
1198 dev_err(dev,
"RX: HW BUG? payload @%zu: size %zu is "
1199 "bigger than maximum %zu; ignoring message\n",
1203 if (pl_itr + pl_size > buf_size) {
1204 dev_err(dev,
"RX: HW BUG? payload @%zu: size %zu "
1205 "goes beyond the received buffer "
1206 "size (%zu bytes); ignoring message\n",
1207 pl_itr, pl_size, buf_size);
1211 dev_err(dev,
"RX: HW BUG? illegal payload type %u; "
1212 "ignoring message\n", pl_type);
1247 struct device *dev = i2400m_dev(i2400m);
1249 size_t pl_itr, pl_size;
1250 unsigned long flags;
1251 unsigned num_pls, single_last, skb_len;
1254 d_fnstart(4, dev,
"(i2400m %p skb %p [size %u])\n",
1255 i2400m, skb, skb_len);
1257 msg_hdr = (
void *) skb->
data;
1258 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
1260 goto error_msg_hdr_check;
1263 pl_itr =
sizeof(*msg_hdr) +
1264 num_pls *
sizeof(msg_hdr->
pld[0]);
1266 if (pl_itr > skb_len) {
1267 dev_err(dev,
"RX: HW BUG? message too short (%u bytes) for "
1268 "%u payload descriptors (%zu each, total %zu)\n",
1269 skb_len, num_pls,
sizeof(msg_hdr->
pld[0]), pl_itr);
1270 goto error_pl_descr_short;
1273 for (i = 0; i <
num_pls; i++) {
1275 pl_size = i2400m_pld_size(&msg_hdr->
pld[i]);
1276 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->
pld[i],
1279 goto error_pl_descr_check;
1280 single_last = num_pls == 1 || i == num_pls - 1;
1281 i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->
pld[i],
1282 skb->
data + pl_itr);
1292 if (i < i2400m->rx_pl_min)
1296 if (skb_len < i2400m->rx_size_min)
1300 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
1301 error_pl_descr_check:
1302 error_pl_descr_short:
1303 error_msg_hdr_check:
1304 d_fnend(4, dev,
"(i2400m %p skb %p [size %u]) = %d\n",
1305 i2400m, skb, skb_len, result);
1312 const void *
buf,
size_t size)
1314 struct device *dev = i2400m_dev(i2400m);
1317 dev_err(dev,
"RX: HW BUG? unknown barker %08x, "
1318 "dropping %zu bytes\n",
le32_to_cpu(*barker), size);
1319 snprintf(prefix,
sizeof(prefix),
"%s %s: ",
1325 "dumped)\n", prefix);
1328 8, 4, buf, size, 0);
1349 struct device *dev = i2400m_dev(i2400m);
1351 i2400m->
rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
1359 size =
sizeof(i2400m->
rx_roq[0]) * (I2400M_RO_CIN + 1);
1362 dev_err(dev,
"RX: cannot allocate %zu bytes for "
1363 "reorder queues\n", size);
1364 goto error_roq_alloc;
1367 size =
sizeof(*i2400m->
rx_roq[0].log) * (I2400M_RO_CIN + 1);
1370 dev_err(dev,
"RX: cannot allocate %zu bytes for "
1371 "reorder queues log areas\n", size);
1373 goto error_roq_log_alloc;
1376 for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) {
1377 __i2400m_roq_init(&i2400m->
rx_roq[itr]);
1378 i2400m->
rx_roq[itr].log = &rd[itr];
1384 error_roq_log_alloc:
1394 unsigned long flags;
1399 spin_unlock_irqrestore(&i2400m->
rx_lock, flags);
1402 i2400m_report_hook_flush(i2400m);