Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
selftest.c
Go to the documentation of this file.
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2006-2010 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include <linux/netdevice.h>
12 #include <linux/module.h>
13 #include <linux/delay.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/pci.h>
16 #include <linux/ethtool.h>
17 #include <linux/ip.h>
18 #include <linux/in.h>
19 #include <linux/udp.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/slab.h>
22 #include "net_driver.h"
23 #include "efx.h"
24 #include "nic.h"
25 #include "selftest.h"
26 #include "workarounds.h"
27 
28 /* IRQ latency can be enormous because:
29  * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
30  * slow serial console or an old IDE driver doing error recovery
31  * - The PREEMPT_RT patches mostly deal with this, but also allow a
32  * tasklet or normal task to be given higher priority than our IRQ
33  * threads
34  * Try to avoid blaming the hardware for this.
35  */
36 #define IRQ_TIMEOUT HZ
37 
38 /*
39  * Loopback test packet structure
40  *
41  * The self-test should stress every RSS vector, and unfortunately
42  * Falcon only performs RSS on TCP/UDP packets.
43  */
45  struct ethhdr header;
46  struct iphdr ip;
47  struct udphdr udp;
49  const char msg[64];
50 } __packed;
51 
52 /* Loopback test source MAC address */
53 static const unsigned char payload_source[ETH_ALEN] = {
54  0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
55 };
56 
57 static const char payload_msg[] =
58  "Hello world! This is an Efx loopback test in progress!";
59 
60 /* Interrupt mode names */
61 static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
62 static const char *const efx_interrupt_mode_names[] = {
63  [EFX_INT_MODE_MSIX] = "MSI-X",
64  [EFX_INT_MODE_MSI] = "MSI",
65  [EFX_INT_MODE_LEGACY] = "legacy",
66 };
67 #define INT_MODE(efx) \
68  STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
69 
81  bool flush;
83  struct sk_buff **skbs;
88 };
89 
90 /* How long to wait for all the packets to arrive (in ms) */
91 #define LOOPBACK_TIMEOUT_MS 1000
92 
93 /**************************************************************************
94  *
95  * MII, NVRAM and register tests
96  *
97  **************************************************************************/
98 
99 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
100 {
101  int rc = 0;
102 
103  if (efx->phy_op->test_alive) {
104  rc = efx->phy_op->test_alive(efx);
105  tests->phy_alive = rc ? -1 : 1;
106  }
107 
108  return rc;
109 }
110 
111 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
112 {
113  int rc = 0;
114 
115  if (efx->type->test_nvram) {
116  rc = efx->type->test_nvram(efx);
117  tests->nvram = rc ? -1 : 1;
118  }
119 
120  return rc;
121 }
122 
123 /**************************************************************************
124  *
125  * Interrupt and event queue testing
126  *
127  **************************************************************************/
128 
129 /* Test generation and receipt of interrupts */
130 static int efx_test_interrupts(struct efx_nic *efx,
131  struct efx_self_tests *tests)
132 {
133  unsigned long timeout, wait;
134  int cpu;
135 
136  netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
137  tests->interrupt = -1;
138 
140  timeout = jiffies + IRQ_TIMEOUT;
141  wait = 1;
142 
143  /* Wait for arrival of test interrupt. */
144  netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
145  do {
147  cpu = efx_nic_irq_test_irq_cpu(efx);
148  if (cpu >= 0)
149  goto success;
150  wait *= 2;
151  } while (time_before(jiffies, timeout));
152 
153  netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
154  return -ETIMEDOUT;
155 
156  success:
157  netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
158  INT_MODE(efx), cpu);
159  tests->interrupt = 1;
160  return 0;
161 }
162 
163 /* Test generation and receipt of interrupting events */
164 static int efx_test_eventq_irq(struct efx_nic *efx,
165  struct efx_self_tests *tests)
166 {
167  struct efx_channel *channel;
168  unsigned int read_ptr[EFX_MAX_CHANNELS];
169  unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
170  unsigned long timeout, wait;
171 
173 
174  efx_for_each_channel(channel, efx) {
175  read_ptr[channel->channel] = channel->eventq_read_ptr;
176  set_bit(channel->channel, &dma_pend);
177  set_bit(channel->channel, &int_pend);
178  efx_nic_event_test_start(channel);
179  }
180 
181  timeout = jiffies + IRQ_TIMEOUT;
182  wait = 1;
183 
184  /* Wait for arrival of interrupts. NAPI processing may or may
185  * not complete in time, but we can cope in any case.
186  */
187  do {
189 
190  efx_for_each_channel(channel, efx) {
191  napi_disable(&channel->napi_str);
192  if (channel->eventq_read_ptr !=
193  read_ptr[channel->channel]) {
194  set_bit(channel->channel, &napi_ran);
195  clear_bit(channel->channel, &dma_pend);
196  clear_bit(channel->channel, &int_pend);
197  } else {
198  if (efx_nic_event_present(channel))
199  clear_bit(channel->channel, &dma_pend);
200  if (efx_nic_event_test_irq_cpu(channel) >= 0)
201  clear_bit(channel->channel, &int_pend);
202  }
203  napi_enable(&channel->napi_str);
204  efx_nic_eventq_read_ack(channel);
205  }
206 
207  wait *= 2;
208  } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
209 
210  efx_for_each_channel(channel, efx) {
211  bool dma_seen = !test_bit(channel->channel, &dma_pend);
212  bool int_seen = !test_bit(channel->channel, &int_pend);
213 
214  tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
215  tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
216 
217  if (dma_seen && int_seen) {
218  netif_dbg(efx, drv, efx->net_dev,
219  "channel %d event queue passed (with%s NAPI)\n",
220  channel->channel,
221  test_bit(channel->channel, &napi_ran) ?
222  "" : "out");
223  } else {
224  /* Report failure and whether either interrupt or DMA
225  * worked
226  */
227  netif_err(efx, drv, efx->net_dev,
228  "channel %d timed out waiting for event queue\n",
229  channel->channel);
230  if (int_seen)
231  netif_err(efx, drv, efx->net_dev,
232  "channel %d saw interrupt "
233  "during event queue test\n",
234  channel->channel);
235  if (dma_seen)
236  netif_err(efx, drv, efx->net_dev,
237  "channel %d event was generated, but "
238  "failed to trigger an interrupt\n",
239  channel->channel);
240  }
241  }
242 
243  return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
244 }
245 
246 static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
247  unsigned flags)
248 {
249  int rc;
250 
251  if (!efx->phy_op->run_tests)
252  return 0;
253 
254  mutex_lock(&efx->mac_lock);
255  rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
256  mutex_unlock(&efx->mac_lock);
257  return rc;
258 }
259 
260 /**************************************************************************
261  *
262  * Loopback testing
263  * NB Only one loopback test can be executing concurrently.
264  *
265  **************************************************************************/
266 
267 /* Loopback test RX callback
268  * This is called for each received packet during loopback testing.
269  */
271  const char *buf_ptr, int pkt_len)
272 {
274  struct efx_loopback_payload *received;
276 
277  BUG_ON(!buf_ptr);
278 
279  /* If we are just flushing, then drop the packet */
280  if ((state == NULL) || state->flush)
281  return;
282 
283  payload = &state->payload;
284 
285  received = (struct efx_loopback_payload *) buf_ptr;
286  received->ip.saddr = payload->ip.saddr;
287  if (state->offload_csum)
288  received->ip.check = payload->ip.check;
289 
290  /* Check that header exists */
291  if (pkt_len < sizeof(received->header)) {
292  netif_err(efx, drv, efx->net_dev,
293  "saw runt RX packet (length %d) in %s loopback "
294  "test\n", pkt_len, LOOPBACK_MODE(efx));
295  goto err;
296  }
297 
298  /* Check that the ethernet header exists */
299  if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
300  netif_err(efx, drv, efx->net_dev,
301  "saw non-loopback RX packet in %s loopback test\n",
302  LOOPBACK_MODE(efx));
303  goto err;
304  }
305 
306  /* Check packet length */
307  if (pkt_len != sizeof(*payload)) {
308  netif_err(efx, drv, efx->net_dev,
309  "saw incorrect RX packet length %d (wanted %d) in "
310  "%s loopback test\n", pkt_len, (int)sizeof(*payload),
311  LOOPBACK_MODE(efx));
312  goto err;
313  }
314 
315  /* Check that IP header matches */
316  if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
317  netif_err(efx, drv, efx->net_dev,
318  "saw corrupted IP header in %s loopback test\n",
319  LOOPBACK_MODE(efx));
320  goto err;
321  }
322 
323  /* Check that msg and padding matches */
324  if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
325  netif_err(efx, drv, efx->net_dev,
326  "saw corrupted RX packet in %s loopback test\n",
327  LOOPBACK_MODE(efx));
328  goto err;
329  }
330 
331  /* Check that iteration matches */
332  if (received->iteration != payload->iteration) {
333  netif_err(efx, drv, efx->net_dev,
334  "saw RX packet from iteration %d (wanted %d) in "
335  "%s loopback test\n", ntohs(received->iteration),
336  ntohs(payload->iteration), LOOPBACK_MODE(efx));
337  goto err;
338  }
339 
340  /* Increase correct RX count */
341  netif_vdbg(efx, drv, efx->net_dev,
342  "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
343 
344  atomic_inc(&state->rx_good);
345  return;
346 
347  err:
348 #ifdef DEBUG
349  if (atomic_read(&state->rx_bad) == 0) {
350  netif_err(efx, drv, efx->net_dev, "received packet:\n");
351  print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
352  buf_ptr, pkt_len, 0);
353  netif_err(efx, drv, efx->net_dev, "expected packet:\n");
354  print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
355  &state->payload, sizeof(state->payload), 0);
356  }
357 #endif
358  atomic_inc(&state->rx_bad);
359 }
360 
361 /* Initialise an efx_selftest_state for a new iteration */
362 static void efx_iterate_state(struct efx_nic *efx)
363 {
365  struct net_device *net_dev = efx->net_dev;
366  struct efx_loopback_payload *payload = &state->payload;
367 
368  /* Initialise the layerII header */
369  memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
370  memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
371  payload->header.h_proto = htons(ETH_P_IP);
372 
373  /* saddr set later and used as incrementing count */
374  payload->ip.daddr = htonl(INADDR_LOOPBACK);
375  payload->ip.ihl = 5;
376  payload->ip.check = htons(0xdead);
377  payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
378  payload->ip.version = IPVERSION;
379  payload->ip.protocol = IPPROTO_UDP;
380 
381  /* Initialise udp header */
382  payload->udp.source = 0;
383  payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
384  sizeof(struct iphdr));
385  payload->udp.check = 0; /* checksum ignored */
386 
387  /* Fill out payload */
388  payload->iteration = htons(ntohs(payload->iteration) + 1);
389  memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
390 
391  /* Fill out remaining state members */
392  atomic_set(&state->rx_good, 0);
393  atomic_set(&state->rx_bad, 0);
394  smp_wmb();
395 }
396 
397 static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
398 {
399  struct efx_nic *efx = tx_queue->efx;
400  struct efx_loopback_state *state = efx->loopback_selftest;
402  struct sk_buff *skb;
403  int i;
404  netdev_tx_t rc;
405 
406  /* Transmit N copies of buffer */
407  for (i = 0; i < state->packet_count; i++) {
408  /* Allocate an skb, holding an extra reference for
409  * transmit completion counting */
410  skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
411  if (!skb)
412  return -ENOMEM;
413  state->skbs[i] = skb;
414  skb_get(skb);
415 
416  /* Copy the payload in, incrementing the source address to
417  * exercise the rss vectors */
418  payload = ((struct efx_loopback_payload *)
419  skb_put(skb, sizeof(state->payload)));
420  memcpy(payload, &state->payload, sizeof(state->payload));
421  payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
422 
423  /* Ensure everything we've written is visible to the
424  * interrupt handler. */
425  smp_wmb();
426 
427  netif_tx_lock_bh(efx->net_dev);
428  rc = efx_enqueue_skb(tx_queue, skb);
429  netif_tx_unlock_bh(efx->net_dev);
430 
431  if (rc != NETDEV_TX_OK) {
432  netif_err(efx, drv, efx->net_dev,
433  "TX queue %d could not transmit packet %d of "
434  "%d in %s loopback test\n", tx_queue->queue,
435  i + 1, state->packet_count,
436  LOOPBACK_MODE(efx));
437 
438  /* Defer cleaning up the other skbs for the caller */
439  kfree_skb(skb);
440  return -EPIPE;
441  }
442  }
443 
444  return 0;
445 }
446 
447 static int efx_poll_loopback(struct efx_nic *efx)
448 {
449  struct efx_loopback_state *state = efx->loopback_selftest;
450  struct efx_channel *channel;
451 
452  /* NAPI polling is not enabled, so process channels
453  * synchronously */
454  efx_for_each_channel(channel, efx) {
455  if (channel->work_pending)
456  efx_process_channel_now(channel);
457  }
458  return atomic_read(&state->rx_good) == state->packet_count;
459 }
460 
461 static int efx_end_loopback(struct efx_tx_queue *tx_queue,
462  struct efx_loopback_self_tests *lb_tests)
463 {
464  struct efx_nic *efx = tx_queue->efx;
465  struct efx_loopback_state *state = efx->loopback_selftest;
466  struct sk_buff *skb;
467  int tx_done = 0, rx_good, rx_bad;
468  int i, rc = 0;
469 
470  netif_tx_lock_bh(efx->net_dev);
471 
472  /* Count the number of tx completions, and decrement the refcnt. Any
473  * skbs not already completed will be free'd when the queue is flushed */
474  for (i = 0; i < state->packet_count; i++) {
475  skb = state->skbs[i];
476  if (skb && !skb_shared(skb))
477  ++tx_done;
478  dev_kfree_skb(skb);
479  }
480 
481  netif_tx_unlock_bh(efx->net_dev);
482 
483  /* Check TX completion and received packet counts */
484  rx_good = atomic_read(&state->rx_good);
485  rx_bad = atomic_read(&state->rx_bad);
486  if (tx_done != state->packet_count) {
487  /* Don't free the skbs; they will be picked up on TX
488  * overflow or channel teardown.
489  */
490  netif_err(efx, drv, efx->net_dev,
491  "TX queue %d saw only %d out of an expected %d "
492  "TX completion events in %s loopback test\n",
493  tx_queue->queue, tx_done, state->packet_count,
494  LOOPBACK_MODE(efx));
495  rc = -ETIMEDOUT;
496  /* Allow to fall through so we see the RX errors as well */
497  }
498 
499  /* We may always be up to a flush away from our desired packet total */
500  if (rx_good != state->packet_count) {
501  netif_dbg(efx, drv, efx->net_dev,
502  "TX queue %d saw only %d out of an expected %d "
503  "received packets in %s loopback test\n",
504  tx_queue->queue, rx_good, state->packet_count,
505  LOOPBACK_MODE(efx));
506  rc = -ETIMEDOUT;
507  /* Fall through */
508  }
509 
510  /* Update loopback test structure */
511  lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
512  lb_tests->tx_done[tx_queue->queue] += tx_done;
513  lb_tests->rx_good += rx_good;
514  lb_tests->rx_bad += rx_bad;
515 
516  return rc;
517 }
518 
519 static int
520 efx_test_loopback(struct efx_tx_queue *tx_queue,
521  struct efx_loopback_self_tests *lb_tests)
522 {
523  struct efx_nic *efx = tx_queue->efx;
524  struct efx_loopback_state *state = efx->loopback_selftest;
525  int i, begin_rc, end_rc;
526 
527  for (i = 0; i < 3; i++) {
528  /* Determine how many packets to send */
529  state->packet_count = efx->txq_entries / 3;
530  state->packet_count = min(1 << (i << 2), state->packet_count);
531  state->skbs = kcalloc(state->packet_count,
532  sizeof(state->skbs[0]), GFP_KERNEL);
533  if (!state->skbs)
534  return -ENOMEM;
535  state->flush = false;
536 
537  netif_dbg(efx, drv, efx->net_dev,
538  "TX queue %d testing %s loopback with %d packets\n",
539  tx_queue->queue, LOOPBACK_MODE(efx),
540  state->packet_count);
541 
542  efx_iterate_state(efx);
543  begin_rc = efx_begin_loopback(tx_queue);
544 
545  /* This will normally complete very quickly, but be
546  * prepared to wait much longer. */
547  msleep(1);
548  if (!efx_poll_loopback(efx)) {
550  efx_poll_loopback(efx);
551  }
552 
553  end_rc = efx_end_loopback(tx_queue, lb_tests);
554  kfree(state->skbs);
555 
556  if (begin_rc || end_rc) {
557  /* Wait a while to ensure there are no packets
558  * floating around after a failure. */
560  return begin_rc ? begin_rc : end_rc;
561  }
562  }
563 
564  netif_dbg(efx, drv, efx->net_dev,
565  "TX queue %d passed %s loopback test with a burst length "
566  "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
567  state->packet_count);
568 
569  return 0;
570 }
571 
572 /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
573  * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
574  * to delay and retry. Therefore, it's safer to just poll directly. Wait
575  * for link up and any faults to dissipate. */
576 static int efx_wait_for_link(struct efx_nic *efx)
577 {
578  struct efx_link_state *link_state = &efx->link_state;
579  int count, link_up_count = 0;
580  bool link_up;
581 
582  for (count = 0; count < 40; count++) {
584 
585  if (efx->type->monitor != NULL) {
586  mutex_lock(&efx->mac_lock);
587  efx->type->monitor(efx);
588  mutex_unlock(&efx->mac_lock);
589  } else {
590  struct efx_channel *channel = efx_get_channel(efx, 0);
591  if (channel->work_pending)
592  efx_process_channel_now(channel);
593  }
594 
595  mutex_lock(&efx->mac_lock);
596  link_up = link_state->up;
597  if (link_up)
598  link_up = !efx->type->check_mac_fault(efx);
599  mutex_unlock(&efx->mac_lock);
600 
601  if (link_up) {
602  if (++link_up_count == 2)
603  return 0;
604  } else {
605  link_up_count = 0;
606  }
607  }
608 
609  return -ETIMEDOUT;
610 }
611 
612 static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
613  unsigned int loopback_modes)
614 {
615  enum efx_loopback_mode mode;
616  struct efx_loopback_state *state;
617  struct efx_channel *channel =
618  efx_get_channel(efx, efx->tx_channel_offset);
619  struct efx_tx_queue *tx_queue;
620  int rc = 0;
621 
622  /* Set the port loopback_selftest member. From this point on
623  * all received packets will be dropped. Mark the state as
624  * "flushing" so all inflight packets are dropped */
625  state = kzalloc(sizeof(*state), GFP_KERNEL);
626  if (state == NULL)
627  return -ENOMEM;
629  state->flush = true;
630  efx->loopback_selftest = state;
631 
632  /* Test all supported loopback modes */
633  for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
634  if (!(loopback_modes & (1 << mode)))
635  continue;
636 
637  /* Move the port into the specified loopback mode. */
638  state->flush = true;
639  mutex_lock(&efx->mac_lock);
640  efx->loopback_mode = mode;
641  rc = __efx_reconfigure_port(efx);
642  mutex_unlock(&efx->mac_lock);
643  if (rc) {
644  netif_err(efx, drv, efx->net_dev,
645  "unable to move into %s loopback\n",
646  LOOPBACK_MODE(efx));
647  goto out;
648  }
649 
650  rc = efx_wait_for_link(efx);
651  if (rc) {
652  netif_err(efx, drv, efx->net_dev,
653  "loopback %s never came up\n",
654  LOOPBACK_MODE(efx));
655  goto out;
656  }
657 
658  /* Test all enabled types of TX queue */
659  efx_for_each_channel_tx_queue(tx_queue, channel) {
660  state->offload_csum = (tx_queue->queue &
662  rc = efx_test_loopback(tx_queue,
663  &tests->loopback[mode]);
664  if (rc)
665  goto out;
666  }
667  }
668 
669  out:
670  /* Remove the flush. The caller will remove the loopback setting */
671  state->flush = true;
672  efx->loopback_selftest = NULL;
673  wmb();
674  kfree(state);
675 
676  return rc;
677 }
678 
679 /**************************************************************************
680  *
681  * Entry point
682  *
683  *************************************************************************/
684 
685 int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
686  unsigned flags)
687 {
688  enum efx_loopback_mode loopback_mode = efx->loopback_mode;
689  int phy_mode = efx->phy_mode;
690  int rc_test = 0, rc_reset, rc;
691 
693 
694  /* Online (i.e. non-disruptive) testing
695  * This checks interrupt generation, event delivery and PHY presence. */
696 
697  rc = efx_test_phy_alive(efx, tests);
698  if (rc && !rc_test)
699  rc_test = rc;
700 
701  rc = efx_test_nvram(efx, tests);
702  if (rc && !rc_test)
703  rc_test = rc;
704 
705  rc = efx_test_interrupts(efx, tests);
706  if (rc && !rc_test)
707  rc_test = rc;
708 
709  rc = efx_test_eventq_irq(efx, tests);
710  if (rc && !rc_test)
711  rc_test = rc;
712 
713  if (rc_test)
714  return rc_test;
715 
716  if (!(flags & ETH_TEST_FL_OFFLINE))
717  return efx_test_phy(efx, tests, flags);
718 
719  /* Offline (i.e. disruptive) testing
720  * This checks MAC and PHY loopback on the specified port. */
721 
722  /* Detach the device so the kernel doesn't transmit during the
723  * loopback test and the watchdog timeout doesn't fire.
724  */
726 
727  if (efx->type->test_chip) {
728  rc_reset = efx->type->test_chip(efx, tests);
729  if (rc_reset) {
730  netif_err(efx, hw, efx->net_dev,
731  "Unable to recover from chip test\n");
733  return rc_reset;
734  }
735 
736  if ((tests->registers < 0) && !rc_test)
737  rc_test = -EIO;
738  }
739 
740  /* Ensure that the phy is powered and out of loopback
741  * for the bist and loopback tests */
742  mutex_lock(&efx->mac_lock);
743  efx->phy_mode &= ~PHY_MODE_LOW_POWER;
746  mutex_unlock(&efx->mac_lock);
747 
748  rc = efx_test_phy(efx, tests, flags);
749  if (rc && !rc_test)
750  rc_test = rc;
751 
752  rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
753  if (rc && !rc_test)
754  rc_test = rc;
755 
756  /* restore the PHY to the previous state */
757  mutex_lock(&efx->mac_lock);
758  efx->phy_mode = phy_mode;
759  efx->loopback_mode = loopback_mode;
761  mutex_unlock(&efx->mac_lock);
762 
764 
765  return rc_test;
766 }
767 
769 {
770  struct efx_channel *channel;
771 
772  efx_for_each_channel(channel, efx)
773  efx_nic_event_test_start(channel);
774  schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
775 }
776 
778 {
780 }
781 
783 {
784  struct efx_nic *efx = container_of(data, struct efx_nic,
785  selftest_work.work);
786  struct efx_channel *channel;
787  int cpu;
788 
789  efx_for_each_channel(channel, efx) {
790  cpu = efx_nic_event_test_irq_cpu(channel);
791  if (cpu < 0)
792  netif_err(efx, ifup, efx->net_dev,
793  "channel %d failed to trigger an interrupt\n",
794  channel->channel);
795  else
796  netif_dbg(efx, ifup, efx->net_dev,
797  "channel %d triggered interrupt on CPU %d\n",
798  channel->channel, cpu);
799  }
800 }