27 #include <linux/module.h>
28 #include <linux/kernel.h>
31 #include <linux/netdevice.h>
35 #include <linux/string.h>
36 #include <linux/prefetch.h>
83 static void cvm_oct_enable_napi(
void *
_)
86 napi_schedule(&cvm_oct_napi[cpu].
napi);
89 static void cvm_oct_enable_one_cpu(
void)
95 v = atomic_sub_if_positive(1, &
core_state.available_cores);
105 panic(
"Can't enable NAPI.");
111 static void cvm_oct_no_more_work(
void)
138 cvm_oct_enable_napi(
NULL);
149 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *
work)
151 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
160 && ((work->word2.snoip.err_code == 5)
161 || (work->word2.snoip.err_code == 7))) {
174 gmxx_rxx_frm_ctl.
u64 =
175 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index,
interface));
176 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
179 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
182 while (i < work->len - 1) {
193 work->packet_ptr.s.addr += i + 1;
195 }
else if ((*ptr & 0xf) == 0xd) {
199 work->packet_ptr.s.addr +=
i;
201 for (i = 0; i < work->len; i++) {
203 ((*ptr & 0xf0) >> 4) |
204 ((*(ptr + 1) & 0xf) << 4);
220 work->ipprt, work->word2.snoip.err_code);
237 const int coreid = cvmx_get_core_num();
241 int did_work_request = 0;
242 int packet_not_copied;
260 did_work_request = 1;
263 while (rx_count < budget) {
275 did_work_request = 0;
284 pskb = (
struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
sizeof(
void *));
289 did_work_request = 1;
302 backlog = counts.s.iq_cnt + counts.s.ds_cnt;
303 if (backlog > budget * cores_in_use && napi !=
NULL)
304 cvm_oct_enable_one_cpu();
316 if (
unlikely(work->word2.snoip.rcv_error)) {
317 if (cvm_oct_check_rcv_error(work))
327 skb->
data = skb->
head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->
head);
329 skb->
len = work->len;
330 skb_set_tail_pointer(skb, skb->
len);
331 packet_not_copied = 1;
337 skb = dev_alloc_skb(work->len);
340 "skbuff, packet dropped\n",
350 if (
unlikely(work->word2.s.bufs == 0)) {
351 uint8_t *ptr = work->packet_data;
353 if (
likely(!work->word2.s.not_IP)) {
358 if (work->word2.s.is_v6)
366 int segments = work->word2.s.bufs;
372 *(
union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.
s.addr - 8);
384 (segment_ptr.
s.addr - (((segment_ptr.
s.addr >> 7) - segment_ptr.
s.back) << 7));
389 if (segment_size > len)
393 cvmx_phys_to_ptr(segment_ptr.
s.addr),
396 segment_ptr = next_ptr;
399 packet_not_copied = 0;
415 if (
unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
416 work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
480 cvm_oct_rx_refill_pool(0);
482 if (rx_count < budget && napi !=
NULL) {
485 cvm_oct_no_more_work();
490 #ifdef CONFIG_NET_POLL_CONTROLLER
499 cvm_oct_napi_poll(
NULL, 16);
517 if (
NULL == dev_for_napi)
518 panic(
"No net_devices were allocated.");
530 napi_enable(&cvm_oct_napi[i].napi);
537 panic(
"Could not acquire Ethernet IRQ %d\n",
544 int_thr.
s.tc_thr = 1;
554 cvm_oct_enable_one_cpu();