Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
rx.c
Go to the documentation of this file.
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2011 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include <linux/socket.h>
12 #include <linux/in.h>
13 #include <linux/slab.h>
14 #include <linux/ip.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
19 #include <net/ip.h>
20 #include <net/checksum.h>
21 #include "net_driver.h"
22 #include "efx.h"
23 #include "nic.h"
24 #include "selftest.h"
25 #include "workarounds.h"
26 
27 /* Number of RX descriptors pushed at once. */
28 #define EFX_RX_BATCH 8
29 
30 /* Maximum size of a buffer sharing a page */
31 #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
32 
33 /* Size of buffer allocated for skb header area. */
34 #define EFX_SKB_HEADERS 64u
35 
36 /*
37  * rx_alloc_method - RX buffer allocation method
38  *
39  * This driver supports two methods for allocating and using RX buffers:
40  * each RX buffer may be backed by an skb or by an order-n page.
41  *
42  * When GRO is in use then the second method has a lower overhead,
43  * since we don't have to allocate then free skbs on reassembled frames.
44  *
45  * Values:
46  * - RX_ALLOC_METHOD_AUTO = 0
47  * - RX_ALLOC_METHOD_SKB = 1
48  * - RX_ALLOC_METHOD_PAGE = 2
49  *
50  * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
51  * controlled by the parameters below.
52  *
53  * - Since pushing and popping descriptors are separated by the rx_queue
54  * size, so the watermarks should be ~rxd_size.
55  * - The performance win by using page-based allocation for GRO is less
56  * than the performance hit of using page-based allocation of non-GRO,
57  * so the watermarks should reflect this.
58  *
59  * Per channel we maintain a single variable, updated by each channel:
60  *
61  * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
62  * RX_ALLOC_FACTOR_SKB)
63  * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
64  * limits the hysteresis), and update the allocation strategy:
65  *
66  * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
67  * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
68  */
69 static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
70 
71 #define RX_ALLOC_LEVEL_GRO 0x2000
72 #define RX_ALLOC_LEVEL_MAX 0x3000
73 #define RX_ALLOC_FACTOR_GRO 1
74 #define RX_ALLOC_FACTOR_SKB (-2)
75 
76 /* This is the percentage fill level below which new RX descriptors
77  * will be added to the RX descriptor ring.
78  */
79 static unsigned int rx_refill_threshold;
80 
81 /*
82  * RX maximum head room required.
83  *
84  * This must be at least 1 to prevent overflow and at least 2 to allow
85  * pipelined receives.
86  */
87 #define EFX_RXD_HEAD_ROOM 2
88 
89 /* Offset of ethernet header within page */
90 static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
91  struct efx_rx_buffer *buf)
92 {
93  /* Offset is always within one page, so we don't need to consider
94  * the page order.
95  */
96  return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
97  efx->type->rx_buffer_hash_size;
98 }
99 static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100 {
101  return PAGE_SIZE << efx->rx_buffer_order;
102 }
103 
104 static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
105 {
106  if (buf->flags & EFX_RX_BUF_PAGE)
107  return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
108  else
109  return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
110 }
111 
112 static inline u32 efx_rx_buf_hash(const u8 *eh)
113 {
114  /* The ethernet header is always directly after any hash. */
115 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
116  return __le32_to_cpup((const __le32 *)(eh - 4));
117 #else
118  const u8 *data = eh - 4;
119  return (u32)data[0] |
120  (u32)data[1] << 8 |
121  (u32)data[2] << 16 |
122  (u32)data[3] << 24;
123 #endif
124 }
125 
136 static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
137 {
138  struct efx_nic *efx = rx_queue->efx;
139  struct net_device *net_dev = efx->net_dev;
140  struct efx_rx_buffer *rx_buf;
141  struct sk_buff *skb;
142  int skb_len = efx->rx_buffer_len;
143  unsigned index, count;
144 
145  for (count = 0; count < EFX_RX_BATCH; ++count) {
146  index = rx_queue->added_count & rx_queue->ptr_mask;
147  rx_buf = efx_rx_buffer(rx_queue, index);
148 
149  rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
150  if (unlikely(!skb))
151  return -ENOMEM;
152 
153  /* Adjust the SKB for padding */
154  skb_reserve(skb, NET_IP_ALIGN);
155  rx_buf->len = skb_len - NET_IP_ALIGN;
156  rx_buf->flags = 0;
157 
158  rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
159  skb->data, rx_buf->len,
161  if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
162  rx_buf->dma_addr))) {
163  dev_kfree_skb_any(skb);
164  rx_buf->u.skb = NULL;
165  return -EIO;
166  }
167 
168  ++rx_queue->added_count;
169  ++rx_queue->alloc_skb_count;
170  }
171 
172  return 0;
173 }
174 
185 static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
186 {
187  struct efx_nic *efx = rx_queue->efx;
188  struct efx_rx_buffer *rx_buf;
189  struct page *page;
190  void *page_addr;
191  struct efx_rx_page_state *state;
193  unsigned index, count;
194 
195  /* We can split a page between two buffers */
196  BUILD_BUG_ON(EFX_RX_BATCH & 1);
197 
198  for (count = 0; count < EFX_RX_BATCH; ++count) {
200  efx->rx_buffer_order);
201  if (unlikely(page == NULL))
202  return -ENOMEM;
203  dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
204  efx_rx_buf_size(efx),
206  if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
207  __free_pages(page, efx->rx_buffer_order);
208  return -EIO;
209  }
210  page_addr = page_address(page);
211  state = page_addr;
212  state->refcnt = 0;
213  state->dma_addr = dma_addr;
214 
215  page_addr += sizeof(struct efx_rx_page_state);
216  dma_addr += sizeof(struct efx_rx_page_state);
217 
218  split:
219  index = rx_queue->added_count & rx_queue->ptr_mask;
220  rx_buf = efx_rx_buffer(rx_queue, index);
221  rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
222  rx_buf->u.page = page;
223  rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
224  rx_buf->flags = EFX_RX_BUF_PAGE;
225  ++rx_queue->added_count;
226  ++rx_queue->alloc_page_count;
227  ++state->refcnt;
228 
229  if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
230  /* Use the second half of the page */
231  get_page(page);
232  dma_addr += (PAGE_SIZE >> 1);
233  page_addr += (PAGE_SIZE >> 1);
234  ++count;
235  goto split;
236  }
237  }
238 
239  return 0;
240 }
241 
242 static void efx_unmap_rx_buffer(struct efx_nic *efx,
243  struct efx_rx_buffer *rx_buf)
244 {
245  if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
246  struct efx_rx_page_state *state;
247 
248  state = page_address(rx_buf->u.page);
249  if (--state->refcnt == 0) {
250  dma_unmap_page(&efx->pci_dev->dev,
251  state->dma_addr,
252  efx_rx_buf_size(efx),
254  }
255  } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
256  dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
257  rx_buf->len, DMA_FROM_DEVICE);
258  }
259 }
260 
261 static void efx_free_rx_buffer(struct efx_nic *efx,
262  struct efx_rx_buffer *rx_buf)
263 {
264  if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
265  __free_pages(rx_buf->u.page, efx->rx_buffer_order);
266  rx_buf->u.page = NULL;
267  } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
268  dev_kfree_skb_any(rx_buf->u.skb);
269  rx_buf->u.skb = NULL;
270  }
271 }
272 
273 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
274  struct efx_rx_buffer *rx_buf)
275 {
276  efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
277  efx_free_rx_buffer(rx_queue->efx, rx_buf);
278 }
279 
280 /* Attempt to resurrect the other receive buffer that used to share this page,
281  * which had previously been passed up to the kernel and freed. */
282 static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
283  struct efx_rx_buffer *rx_buf)
284 {
285  struct efx_rx_page_state *state = page_address(rx_buf->u.page);
286  struct efx_rx_buffer *new_buf;
287  unsigned fill_level, index;
288 
289  /* +1 because efx_rx_packet() incremented removed_count. +1 because
290  * we'd like to insert an additional descriptor whilst leaving
291  * EFX_RXD_HEAD_ROOM for the non-recycle path */
292  fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
293  if (unlikely(fill_level > rx_queue->max_fill)) {
294  /* We could place "state" on a list, and drain the list in
295  * efx_fast_push_rx_descriptors(). For now, this will do. */
296  return;
297  }
298 
299  ++state->refcnt;
300  get_page(rx_buf->u.page);
301 
302  index = rx_queue->added_count & rx_queue->ptr_mask;
303  new_buf = efx_rx_buffer(rx_queue, index);
304  new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
305  new_buf->u.page = rx_buf->u.page;
306  new_buf->len = rx_buf->len;
307  new_buf->flags = EFX_RX_BUF_PAGE;
308  ++rx_queue->added_count;
309 }
310 
311 /* Recycle the given rx buffer directly back into the rx_queue. There is
312  * always room to add this buffer, because we've just popped a buffer. */
313 static void efx_recycle_rx_buffer(struct efx_channel *channel,
314  struct efx_rx_buffer *rx_buf)
315 {
316  struct efx_nic *efx = channel->efx;
317  struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
318  struct efx_rx_buffer *new_buf;
319  unsigned index;
320 
321  rx_buf->flags &= EFX_RX_BUF_PAGE;
322 
323  if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
325  page_count(rx_buf->u.page) == 1)
326  efx_resurrect_rx_buffer(rx_queue, rx_buf);
327 
328  index = rx_queue->added_count & rx_queue->ptr_mask;
329  new_buf = efx_rx_buffer(rx_queue, index);
330 
331  memcpy(new_buf, rx_buf, sizeof(*new_buf));
332  rx_buf->u.page = NULL;
333  ++rx_queue->added_count;
334 }
335 
349 {
350  struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
351  unsigned fill_level;
352  int space, rc = 0;
353 
354  /* Calculate current fill level, and exit if we don't need to fill */
355  fill_level = (rx_queue->added_count - rx_queue->removed_count);
356  EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
357  if (fill_level >= rx_queue->fast_fill_trigger)
358  goto out;
359 
360  /* Record minimum fill level */
361  if (unlikely(fill_level < rx_queue->min_fill)) {
362  if (fill_level)
363  rx_queue->min_fill = fill_level;
364  }
365 
366  space = rx_queue->max_fill - fill_level;
367  EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
368 
369  netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
370  "RX queue %d fast-filling descriptor ring from"
371  " level %d to level %d using %s allocation\n",
372  efx_rx_queue_index(rx_queue), fill_level,
373  rx_queue->max_fill,
374  channel->rx_alloc_push_pages ? "page" : "skb");
375 
376  do {
377  if (channel->rx_alloc_push_pages)
378  rc = efx_init_rx_buffers_page(rx_queue);
379  else
380  rc = efx_init_rx_buffers_skb(rx_queue);
381  if (unlikely(rc)) {
382  /* Ensure that we don't leave the rx queue empty */
383  if (rx_queue->added_count == rx_queue->removed_count)
384  efx_schedule_slow_fill(rx_queue);
385  goto out;
386  }
387  } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
388 
389  netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
390  "RX queue %d fast-filled descriptor ring "
391  "to level %d\n", efx_rx_queue_index(rx_queue),
392  rx_queue->added_count - rx_queue->removed_count);
393 
394  out:
395  if (rx_queue->notified_count != rx_queue->added_count)
396  efx_nic_notify_rx_desc(rx_queue);
397 }
398 
399 void efx_rx_slow_fill(unsigned long context)
400 {
401  struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
402 
403  /* Post an event to cause NAPI to run and refill the queue */
404  efx_nic_generate_fill_event(rx_queue);
405  ++rx_queue->slow_fill_count;
406 }
407 
408 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
409  struct efx_rx_buffer *rx_buf,
410  int len, bool *leak_packet)
411 {
412  struct efx_nic *efx = rx_queue->efx;
413  unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
414 
415  if (likely(len <= max_len))
416  return;
417 
418  /* The packet must be discarded, but this is only a fatal error
419  * if the caller indicated it was
420  */
421  rx_buf->flags |= EFX_RX_PKT_DISCARD;
422 
423  if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
424  if (net_ratelimit())
425  netif_err(efx, rx_err, efx->net_dev,
426  " RX queue %d seriously overlength "
427  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
428  efx_rx_queue_index(rx_queue), len, max_len,
429  efx->type->rx_buffer_padding);
430  /* If this buffer was skb-allocated, then the meta
431  * data at the end of the skb will be trashed. So
432  * we have no choice but to leak the fragment.
433  */
434  *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
436  } else {
437  if (net_ratelimit())
438  netif_err(efx, rx_err, efx->net_dev,
439  " RX queue %d overlength RX event "
440  "(0x%x > 0x%x)\n",
441  efx_rx_queue_index(rx_queue), len, max_len);
442  }
443 
444  efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
445 }
446 
447 /* Pass a received packet up through GRO. GRO can handle pages
448  * regardless of checksum state and skbs with a good checksum.
449  */
450 static void efx_rx_packet_gro(struct efx_channel *channel,
451  struct efx_rx_buffer *rx_buf,
452  const u8 *eh)
453 {
454  struct napi_struct *napi = &channel->napi_str;
456 
457  if (rx_buf->flags & EFX_RX_BUF_PAGE) {
458  struct efx_nic *efx = channel->efx;
459  struct page *page = rx_buf->u.page;
460  struct sk_buff *skb;
461 
462  rx_buf->u.page = NULL;
463 
464  skb = napi_get_frags(napi);
465  if (!skb) {
466  put_page(page);
467  return;
468  }
469 
470  if (efx->net_dev->features & NETIF_F_RXHASH)
471  skb->rxhash = efx_rx_buf_hash(eh);
472 
473  skb_fill_page_desc(skb, 0, page,
474  efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
475 
476  skb->len = rx_buf->len;
477  skb->data_len = rx_buf->len;
478  skb->truesize += rx_buf->len;
479  skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
481 
482  skb_record_rx_queue(skb, channel->rx_queue.core_index);
483 
484  gro_result = napi_gro_frags(napi);
485  } else {
486  struct sk_buff *skb = rx_buf->u.skb;
487 
489  rx_buf->u.skb = NULL;
491 
492  gro_result = napi_gro_receive(napi, skb);
493  }
494 
495  if (gro_result == GRO_NORMAL) {
497  } else if (gro_result != GRO_DROP) {
499  channel->irq_mod_score += 2;
500  }
501 }
502 
503 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
504  unsigned int len, u16 flags)
505 {
506  struct efx_nic *efx = rx_queue->efx;
507  struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
508  struct efx_rx_buffer *rx_buf;
509  bool leak_packet = false;
510 
511  rx_buf = efx_rx_buffer(rx_queue, index);
512  rx_buf->flags |= flags;
513 
514  /* This allows the refill path to post another buffer.
515  * EFX_RXD_HEAD_ROOM ensures that the slot we are using
516  * isn't overwritten yet.
517  */
518  rx_queue->removed_count++;
519 
520  /* Validate the length encoded in the event vs the descriptor pushed */
521  efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
522 
523  netif_vdbg(efx, rx_status, efx->net_dev,
524  "RX queue %d received id %x at %llx+%x %s%s\n",
525  efx_rx_queue_index(rx_queue), index,
526  (unsigned long long)rx_buf->dma_addr, len,
527  (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
528  (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
529 
530  /* Discard packet, if instructed to do so */
531  if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
532  if (unlikely(leak_packet))
533  channel->n_skbuff_leaks++;
534  else
535  efx_recycle_rx_buffer(channel, rx_buf);
536 
537  /* Don't hold off the previous receive */
538  rx_buf = NULL;
539  goto out;
540  }
541 
542  /* Release card resources - assumes all RX buffers consumed in-order
543  * per RX queue
544  */
545  efx_unmap_rx_buffer(efx, rx_buf);
546 
547  /* Prefetch nice and early so data will (hopefully) be in cache by
548  * the time we look at it.
549  */
550  prefetch(efx_rx_buf_eh(efx, rx_buf));
551 
552  /* Pipeline receives so that we give time for packet headers to be
553  * prefetched into cache.
554  */
555  rx_buf->len = len - efx->type->rx_buffer_hash_size;
556 out:
557  if (channel->rx_pkt)
558  __efx_rx_packet(channel, channel->rx_pkt);
559  channel->rx_pkt = rx_buf;
560 }
561 
562 static void efx_rx_deliver(struct efx_channel *channel,
563  struct efx_rx_buffer *rx_buf)
564 {
565  struct sk_buff *skb;
566 
567  /* We now own the SKB */
568  skb = rx_buf->u.skb;
569  rx_buf->u.skb = NULL;
570 
571  /* Set the SKB flags */
572  skb_checksum_none_assert(skb);
573 
574  /* Record the rx_queue */
575  skb_record_rx_queue(skb, channel->rx_queue.core_index);
576 
577  /* Pass the packet up */
578  if (channel->type->receive_skb)
579  channel->type->receive_skb(channel, skb);
580  else
581  netif_receive_skb(skb);
582 
583  /* Update allocation strategy method */
585 }
586 
587 /* Handle a received packet. Second half: Touches packet payload. */
588 void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
589 {
590  struct efx_nic *efx = channel->efx;
591  u8 *eh = efx_rx_buf_eh(efx, rx_buf);
592 
593  /* If we're in loopback test, then pass the packet directly to the
594  * loopback layer, and free the rx_buf here
595  */
596  if (unlikely(efx->loopback_selftest)) {
597  efx_loopback_rx_packet(efx, eh, rx_buf->len);
598  efx_free_rx_buffer(efx, rx_buf);
599  return;
600  }
601 
602  if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
603  struct sk_buff *skb = rx_buf->u.skb;
604 
605  prefetch(skb_shinfo(skb));
606 
607  skb_reserve(skb, efx->type->rx_buffer_hash_size);
608  skb_put(skb, rx_buf->len);
609 
610  if (efx->net_dev->features & NETIF_F_RXHASH)
611  skb->rxhash = efx_rx_buf_hash(eh);
612 
613  /* Move past the ethernet header. rx_buf->data still points
614  * at the ethernet header */
615  skb->protocol = eth_type_trans(skb, efx->net_dev);
616 
617  skb_record_rx_queue(skb, channel->rx_queue.core_index);
618  }
619 
620  if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
621  rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
622 
623  if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
624  !channel->type->receive_skb)
625  efx_rx_packet_gro(channel, rx_buf, eh);
626  else
627  efx_rx_deliver(channel, rx_buf);
628 }
629 
630 void efx_rx_strategy(struct efx_channel *channel)
631 {
632  enum efx_rx_alloc_method method = rx_alloc_method;
633 
634  if (channel->type->receive_skb) {
635  channel->rx_alloc_push_pages = false;
636  return;
637  }
638 
639  /* Only makes sense to use page based allocation if GRO is enabled */
640  if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
641  method = RX_ALLOC_METHOD_SKB;
642  } else if (method == RX_ALLOC_METHOD_AUTO) {
643  /* Constrain the rx_alloc_level */
644  if (channel->rx_alloc_level < 0)
645  channel->rx_alloc_level = 0;
646  else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
648 
649  /* Decide on the allocation method */
650  method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
652  }
653 
654  /* Push the option */
655  channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
656 }
657 
658 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
659 {
660  struct efx_nic *efx = rx_queue->efx;
661  unsigned int entries;
662  int rc;
663 
664  /* Create the smallest power-of-two aligned ring */
667  rx_queue->ptr_mask = entries - 1;
668 
669  netif_dbg(efx, probe, efx->net_dev,
670  "creating RX queue %d size %#x mask %#x\n",
671  efx_rx_queue_index(rx_queue), efx->rxq_entries,
672  rx_queue->ptr_mask);
673 
674  /* Allocate RX buffers */
675  rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
676  GFP_KERNEL);
677  if (!rx_queue->buffer)
678  return -ENOMEM;
679 
680  rc = efx_nic_probe_rx(rx_queue);
681  if (rc) {
682  kfree(rx_queue->buffer);
683  rx_queue->buffer = NULL;
684  }
685  return rc;
686 }
687 
688 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
689 {
690  struct efx_nic *efx = rx_queue->efx;
691  unsigned int max_fill, trigger, max_trigger;
692 
693  netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
694  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
695 
696  /* Initialise ptr fields */
697  rx_queue->added_count = 0;
698  rx_queue->notified_count = 0;
699  rx_queue->removed_count = 0;
700  rx_queue->min_fill = -1U;
701 
702  /* Initialise limit fields */
703  max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
704  max_trigger = max_fill - EFX_RX_BATCH;
705  if (rx_refill_threshold != 0) {
706  trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
707  if (trigger > max_trigger)
708  trigger = max_trigger;
709  } else {
710  trigger = max_trigger;
711  }
712 
713  rx_queue->max_fill = max_fill;
714  rx_queue->fast_fill_trigger = trigger;
715 
716  /* Set up RX descriptor ring */
717  rx_queue->enabled = true;
718  efx_nic_init_rx(rx_queue);
719 }
720 
721 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
722 {
723  int i;
724  struct efx_rx_buffer *rx_buf;
725 
726  netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
727  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
728 
729  /* A flush failure might have left rx_queue->enabled */
730  rx_queue->enabled = false;
731 
732  del_timer_sync(&rx_queue->slow_fill);
733  efx_nic_fini_rx(rx_queue);
734 
735  /* Release RX buffers NB start at index 0 not current HW ptr */
736  if (rx_queue->buffer) {
737  for (i = 0; i <= rx_queue->ptr_mask; i++) {
738  rx_buf = efx_rx_buffer(rx_queue, i);
739  efx_fini_rx_buffer(rx_queue, rx_buf);
740  }
741  }
742 }
743 
744 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
745 {
746  netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
747  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
748 
749  efx_nic_remove_rx(rx_queue);
750 
751  kfree(rx_queue->buffer);
752  rx_queue->buffer = NULL;
753 }
754 
755 
756 module_param(rx_alloc_method, int, 0644);
757 MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
758 
759 module_param(rx_refill_threshold, uint, 0444);
760 MODULE_PARM_DESC(rx_refill_threshold,
761  "RX descriptor ring refill threshold (%)");
762