Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mcdi.c
Go to the documentation of this file.
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2008-2011 Solarflare Communications Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation, incorporated herein by reference.
8  */
9 
10 #include <linux/delay.h>
11 #include "net_driver.h"
12 #include "nic.h"
13 #include "io.h"
14 #include "regs.h"
15 #include "mcdi_pcol.h"
16 #include "phy.h"
17 
18 /**************************************************************************
19  *
20  * Management-Controller-to-Driver Interface
21  *
22  **************************************************************************
23  */
24 
25 #define MCDI_RPC_TIMEOUT 10 /*seconds */
26 
27 #define MCDI_PDU(efx) \
28  (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
29 #define MCDI_DOORBELL(efx) \
30  (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
31 #define MCDI_STATUS(efx) \
32  (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
33 
34 /* A reboot/assertion causes the MCDI status word to be set after the
35  * command word is set or a REBOOT event is sent. If we notice a reboot
36  * via these mechanisms then wait 10ms for the status word to be set. */
37 #define MCDI_STATUS_DELAY_US 100
38 #define MCDI_STATUS_DELAY_COUNT 100
39 #define MCDI_STATUS_SLEEP_MS \
40  (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
41 
42 #define SEQ_MASK \
43  EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
44 
45 static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
46 {
47  struct siena_nic_data *nic_data;
48  EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
49  nic_data = efx->nic_data;
50  return &nic_data->mcdi;
51 }
52 
53 void efx_mcdi_init(struct efx_nic *efx)
54 {
55  struct efx_mcdi_iface *mcdi;
56 
57  if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
58  return;
59 
60  mcdi = efx_mcdi(efx);
61  init_waitqueue_head(&mcdi->wq);
62  spin_lock_init(&mcdi->iface_lock);
64  mcdi->mode = MCDI_MODE_POLL;
65 
67 }
68 
69 static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
70  const u8 *inbuf, size_t inlen)
71 {
72  struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73  unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
74  unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
75  unsigned int i;
77  u32 xflags, seqno;
78 
80  BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN);
81 
82  seqno = mcdi->seqno & SEQ_MASK;
83  xflags = 0;
84  if (mcdi->mode == MCDI_MODE_EVENTS)
85  xflags |= MCDI_HEADER_XFLAGS_EVREQ;
86 
88  MCDI_HEADER_RESPONSE, 0,
89  MCDI_HEADER_RESYNC, 1,
90  MCDI_HEADER_CODE, cmd,
91  MCDI_HEADER_DATALEN, inlen,
92  MCDI_HEADER_SEQ, seqno,
93  MCDI_HEADER_XFLAGS, xflags);
94 
95  efx_writed(efx, &hdr, pdu);
96 
97  for (i = 0; i < inlen; i += 4)
98  _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
99 
100  /* Ensure the payload is written out before the header */
101  wmb();
102 
103  /* ring the doorbell with a distinctive value */
104  _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
105 }
106 
107 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
108 {
109  struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
110  unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
111  int i;
112 
114  BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN);
115 
116  for (i = 0; i < outlen; i += 4)
117  *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
118 }
119 
120 static int efx_mcdi_poll(struct efx_nic *efx)
121 {
122  struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123  unsigned int time, finish;
124  unsigned int respseq, respcmd, error;
125  unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
126  unsigned int rc, spins;
128 
129  /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
130  rc = -efx_mcdi_poll_reboot(efx);
131  if (rc)
132  goto out;
133 
134  /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
135  * because generally mcdi responses are fast. After that, back off
136  * and poll once a jiffy (approximately)
137  */
138  spins = TICK_USEC;
139  finish = get_seconds() + MCDI_RPC_TIMEOUT;
140 
141  while (1) {
142  if (spins != 0) {
143  --spins;
144  udelay(1);
145  } else {
147  }
148 
149  time = get_seconds();
150 
151  rmb();
152  efx_readd(efx, &reg, pdu);
153 
154  /* All 1's indicates that shared memory is in reset (and is
155  * not a valid header). Wait for it to come out reset before
156  * completing the command */
157  if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
158  EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
159  break;
160 
161  if (time >= finish)
162  return -ETIMEDOUT;
163  }
164 
165  mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
166  respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
167  respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
168  error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
169 
170  if (error && mcdi->resplen == 0) {
171  netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
172  rc = EIO;
173  } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
174  netif_err(efx, hw, efx->net_dev,
175  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
176  respseq, mcdi->seqno);
177  rc = EIO;
178  } else if (error) {
179  efx_readd(efx, &reg, pdu + 4);
180  switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
181 #define TRANSLATE_ERROR(name) \
182  case MC_CMD_ERR_ ## name: \
183  rc = name; \
184  break
193 #undef TRANSLATE_ERROR
194  default:
195  rc = EIO;
196  break;
197  }
198  } else
199  rc = 0;
200 
201 out:
202  mcdi->resprc = rc;
203  if (rc)
204  mcdi->resplen = 0;
205 
206  /* Return rc=0 like wait_event_timeout() */
207  return 0;
208 }
209 
210 /* Test and clear MC-rebooted flag for this port/function */
212 {
213  unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
215  uint32_t value;
216 
217  if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
218  return false;
219 
220  efx_readd(efx, &reg, addr);
221  value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
222 
223  if (value == 0)
224  return 0;
225 
226  EFX_ZERO_DWORD(reg);
227  efx_writed(efx, &reg, addr);
228 
229  if (value == MC_STATUS_DWORD_ASSERT)
230  return -EINTR;
231  else
232  return -EIO;
233 }
234 
235 static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
236 {
237  /* Wait until the interface becomes QUIESCENT and we win the race
238  * to mark it RUNNING. */
239  wait_event(mcdi->wq,
240  atomic_cmpxchg(&mcdi->state,
244 }
245 
246 static int efx_mcdi_await_completion(struct efx_nic *efx)
247 {
248  struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
249 
250  if (wait_event_timeout(
251  mcdi->wq,
253  msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
254  return -ETIMEDOUT;
255 
256  /* Check if efx_mcdi_set_mode() switched us back to polled completions.
257  * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
258  * completed the request first, then we'll just end up completing the
259  * request again, which is safe.
260  *
261  * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
262  * wait_event_timeout() implicitly provides.
263  */
264  if (mcdi->mode == MCDI_MODE_POLL)
265  return efx_mcdi_poll(efx);
266 
267  return 0;
268 }
269 
270 static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
271 {
272  /* If the interface is RUNNING, then move to COMPLETED and wake any
273  * waiters. If the interface isn't in RUNNING then we've received a
274  * duplicate completion after we've already transitioned back to
275  * QUIESCENT. [A subsequent invocation would increment seqno, so would
276  * have failed the seqno check].
277  */
278  if (atomic_cmpxchg(&mcdi->state,
281  wake_up(&mcdi->wq);
282  return true;
283  }
284 
285  return false;
286 }
287 
288 static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
289 {
291  wake_up(&mcdi->wq);
292 }
293 
294 static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
295  unsigned int datalen, unsigned int errno)
296 {
297  struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
298  bool wake = false;
299 
300  spin_lock(&mcdi->iface_lock);
301 
302  if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
303  if (mcdi->credits)
304  /* The request has been cancelled */
305  --mcdi->credits;
306  else
307  netif_err(efx, hw, efx->net_dev,
308  "MC response mismatch tx seq 0x%x rx "
309  "seq 0x%x\n", seqno, mcdi->seqno);
310  } else {
311  mcdi->resprc = errno;
312  mcdi->resplen = datalen;
313 
314  wake = true;
315  }
316 
317  spin_unlock(&mcdi->iface_lock);
318 
319  if (wake)
320  efx_mcdi_complete(mcdi);
321 }
322 
323 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
324  const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
325  size_t *outlen_actual)
326 {
327  efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
328  return efx_mcdi_rpc_finish(efx, cmd, inlen,
329  outbuf, outlen, outlen_actual);
330 }
331 
332 void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
333  size_t inlen)
334 {
335  struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
336 
337  BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
338 
339  efx_mcdi_acquire(mcdi);
340 
341  /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
342  spin_lock_bh(&mcdi->iface_lock);
343  ++mcdi->seqno;
344  spin_unlock_bh(&mcdi->iface_lock);
345 
346  efx_mcdi_copyin(efx, cmd, inbuf, inlen);
347 }
348 
349 int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
350  u8 *outbuf, size_t outlen, size_t *outlen_actual)
351 {
352  struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
353  int rc;
354 
355  BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
356 
357  if (mcdi->mode == MCDI_MODE_POLL)
358  rc = efx_mcdi_poll(efx);
359  else
360  rc = efx_mcdi_await_completion(efx);
361 
362  if (rc != 0) {
363  /* Close the race with efx_mcdi_ev_cpl() executing just too late
364  * and completing a request we've just cancelled, by ensuring
365  * that the seqno check therein fails.
366  */
367  spin_lock_bh(&mcdi->iface_lock);
368  ++mcdi->seqno;
369  ++mcdi->credits;
370  spin_unlock_bh(&mcdi->iface_lock);
371 
372  netif_err(efx, hw, efx->net_dev,
373  "MC command 0x%x inlen %d mode %d timed out\n",
374  cmd, (int)inlen, mcdi->mode);
375  } else {
376  size_t resplen;
377 
378  /* At the very least we need a memory barrier here to ensure
379  * we pick up changes from efx_mcdi_ev_cpl(). Protect against
380  * a spurious efx_mcdi_ev_cpl() running concurrently by
381  * acquiring the iface_lock. */
382  spin_lock_bh(&mcdi->iface_lock);
383  rc = -mcdi->resprc;
384  resplen = mcdi->resplen;
385  spin_unlock_bh(&mcdi->iface_lock);
386 
387  if (rc == 0) {
388  efx_mcdi_copyout(efx, outbuf,
389  min(outlen, mcdi->resplen + 3) & ~0x3);
390  if (outlen_actual != NULL)
391  *outlen_actual = resplen;
392  } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
393  ; /* Don't reset if MC_CMD_REBOOT returns EIO */
394  else if (rc == -EIO || rc == -EINTR) {
395  netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
396  -rc);
398  } else
399  netif_dbg(efx, hw, efx->net_dev,
400  "MC command 0x%x inlen %d failed rc=%d\n",
401  cmd, (int)inlen, -rc);
402 
403  if (rc == -EIO || rc == -EINTR) {
406  }
407  }
408 
409  efx_mcdi_release(mcdi);
410  return rc;
411 }
412 
413 void efx_mcdi_mode_poll(struct efx_nic *efx)
414 {
415  struct efx_mcdi_iface *mcdi;
416 
417  if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
418  return;
419 
420  mcdi = efx_mcdi(efx);
421  if (mcdi->mode == MCDI_MODE_POLL)
422  return;
423 
424  /* We can switch from event completion to polled completion, because
425  * mcdi requests are always completed in shared memory. We do this by
426  * switching the mode to POLL'd then completing the request.
427  * efx_mcdi_await_completion() will then call efx_mcdi_poll().
428  *
429  * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
430  * which efx_mcdi_complete() provides for us.
431  */
432  mcdi->mode = MCDI_MODE_POLL;
433 
434  efx_mcdi_complete(mcdi);
435 }
436 
437 void efx_mcdi_mode_event(struct efx_nic *efx)
438 {
439  struct efx_mcdi_iface *mcdi;
440 
441  if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
442  return;
443 
444  mcdi = efx_mcdi(efx);
445 
446  if (mcdi->mode == MCDI_MODE_EVENTS)
447  return;
448 
449  /* We can't switch from polled to event completion in the middle of a
450  * request, because the completion method is specified in the request.
451  * So acquire the interface to serialise the requestors. We don't need
452  * to acquire the iface_lock to change the mode here, but we do need a
453  * write memory barrier ensure that efx_mcdi_rpc() sees it, which
454  * efx_mcdi_acquire() provides.
455  */
456  efx_mcdi_acquire(mcdi);
457  mcdi->mode = MCDI_MODE_EVENTS;
458  efx_mcdi_release(mcdi);
459 }
460 
461 static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
462 {
463  struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
464 
465  /* If there is an outstanding MCDI request, it has been terminated
466  * either by a BADASSERT or REBOOT event. If the mcdi interface is
467  * in polled mode, then do nothing because the MC reboot handler will
468  * set the header correctly. However, if the mcdi interface is waiting
469  * for a CMDDONE event it won't receive it [and since all MCDI events
470  * are sent to the same queue, we can't be racing with
471  * efx_mcdi_ev_cpl()]
472  *
473  * There's a race here with efx_mcdi_rpc(), because we might receive
474  * a REBOOT event *before* the request has been copied out. In polled
475  * mode (during startup) this is irrelevant, because efx_mcdi_complete()
476  * is ignored. In event mode, this condition is just an edge-case of
477  * receiving a REBOOT event after posting the MCDI request. Did the mc
478  * reboot before or after the copyout? The best we can do always is
479  * just return failure.
480  */
481  spin_lock(&mcdi->iface_lock);
482  if (efx_mcdi_complete(mcdi)) {
483  if (mcdi->mode == MCDI_MODE_EVENTS) {
484  mcdi->resprc = rc;
485  mcdi->resplen = 0;
486  ++mcdi->credits;
487  }
488  } else {
489  int count;
490 
491  /* Nobody was waiting for an MCDI request, so trigger a reset */
493 
494  /* Consume the status word since efx_mcdi_rpc_finish() won't */
495  for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
496  if (efx_mcdi_poll_reboot(efx))
497  break;
499  }
500  }
501 
502  spin_unlock(&mcdi->iface_lock);
503 }
504 
505 static unsigned int efx_mcdi_event_link_speed[] = {
509 };
510 
511 
512 static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
513 {
514  u32 flags, fcntl, speed, lpa;
515 
516  speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
517  EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
518  speed = efx_mcdi_event_link_speed[speed];
519 
520  flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
521  fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
522  lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
523 
524  /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
525  * which is only run after flushing the event queues. Therefore, it
526  * is safe to modify the link state outside of the mac_lock here.
527  */
528  efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
529 
530  efx_mcdi_phy_check_fcntl(efx, lpa);
531 
533 }
534 
535 /* Called from falcon_process_eventq for MCDI events */
538 {
539  struct efx_nic *efx = channel->efx;
540  int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
541  u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
542 
543  switch (code) {
545  netif_err(efx, hw, efx->net_dev,
546  "MC watchdog or assertion failure at 0x%x\n", data);
547  efx_mcdi_ev_death(efx, EINTR);
548  break;
549 
551  netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
552  break;
553 
555  efx_mcdi_ev_cpl(efx,
556  MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
557  MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
558  MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
559  break;
560 
562  efx_mcdi_process_link_change(efx, event);
563  break;
565  efx_mcdi_sensor_event(efx, event);
566  break;
568  netif_info(efx, hw, efx->net_dev,
569  "MC Scheduler error address=0x%x\n", data);
570  break;
572  netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
573  efx_mcdi_ev_death(efx, EIO);
574  break;
576  /* MAC stats are gather lazily. We can ignore this. */
577  break;
578  case MCDI_EVENT_CODE_FLR:
579  efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
580  break;
584  efx_ptp_event(efx, event);
585  break;
586 
587  default:
588  netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
589  code);
590  }
591 }
592 
593 /**************************************************************************
594  *
595  * Specific request functions
596  *
597  **************************************************************************
598  */
599 
600 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
601 {
602  u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)];
603  size_t outlength;
604  const __le16 *ver_words;
605  int rc;
606 
608 
609  rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
610  outbuf, sizeof(outbuf), &outlength);
611  if (rc)
612  goto fail;
613 
614  if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
615  rc = -EIO;
616  goto fail;
617  }
618 
619  ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
620  snprintf(buf, len, "%u.%u.%u.%u",
621  le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
622  le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
623  return;
624 
625 fail:
626  netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
627  buf[0] = 0;
628 }
629 
630 int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
631  bool *was_attached)
632 {
635  size_t outlen;
636  int rc;
637 
638  MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
639  driver_operating ? 1 : 0);
640  MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
641 
642  rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
643  outbuf, sizeof(outbuf), &outlen);
644  if (rc)
645  goto fail;
646  if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
647  rc = -EIO;
648  goto fail;
649  }
650 
651  if (was_attached != NULL)
652  *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
653  return 0;
654 
655 fail:
656  netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
657  return rc;
658 }
659 
661  u16 *fw_subtype_list, u32 *capabilities)
662 {
664  size_t outlen, offset, i;
665  int port_num = efx_port_num(efx);
666  int rc;
667 
669 
670  rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
671  outbuf, sizeof(outbuf), &outlen);
672  if (rc)
673  goto fail;
674 
675  if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
676  rc = -EIO;
677  goto fail;
678  }
679 
680  offset = (port_num)
683  if (mac_address)
684  memcpy(mac_address, outbuf + offset, ETH_ALEN);
685  if (fw_subtype_list) {
686  /* Byte-swap and truncate or zero-pad as necessary */
688  for (i = 0;
690  i++) {
691  fw_subtype_list[i] =
692  (offset + 2 <= outlen) ?
693  le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
694  offset += 2;
695  }
696  }
697  if (capabilities) {
698  if (port_num)
699  *capabilities = MCDI_DWORD(outbuf,
700  GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
701  else
702  *capabilities = MCDI_DWORD(outbuf,
703  GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
704  }
705 
706  return 0;
707 
708 fail:
709  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
710  __func__, rc, (int)outlen);
711 
712  return rc;
713 }
714 
715 int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
716 {
717  u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
718  u32 dest = 0;
719  int rc;
720 
721  if (uart)
723  if (evq)
725 
726  MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
727  MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
728 
730 
731  rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
732  NULL, 0, NULL);
733  if (rc)
734  goto fail;
735 
736  return 0;
737 
738 fail:
739  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
740  return rc;
741 }
742 
743 int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
744 {
746  size_t outlen;
747  int rc;
748 
750 
751  rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
752  outbuf, sizeof(outbuf), &outlen);
753  if (rc)
754  goto fail;
755  if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
756  rc = -EIO;
757  goto fail;
758  }
759 
760  *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
761  return 0;
762 
763 fail:
764  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
765  __func__, rc);
766  return rc;
767 }
768 
769 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
770  size_t *size_out, size_t *erase_size_out,
771  bool *protected_out)
772 {
775  size_t outlen;
776  int rc;
777 
778  MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
779 
780  rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
781  outbuf, sizeof(outbuf), &outlen);
782  if (rc)
783  goto fail;
784  if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
785  rc = -EIO;
786  goto fail;
787  }
788 
789  *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
790  *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
791  *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
793  return 0;
794 
795 fail:
796  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
797  return rc;
798 }
799 
800 int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
801 {
803  int rc;
804 
805  MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
806 
808 
809  rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
810  NULL, 0, NULL);
811  if (rc)
812  goto fail;
813 
814  return 0;
815 
816 fail:
817  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
818  return rc;
819 }
820 
821 int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
822  loff_t offset, u8 *buffer, size_t length)
823 {
826  size_t outlen;
827  int rc;
828 
829  MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
830  MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
831  MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
832 
833  rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
834  outbuf, sizeof(outbuf), &outlen);
835  if (rc)
836  goto fail;
837 
838  memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
839  return 0;
840 
841 fail:
842  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
843  return rc;
844 }
845 
846 int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
847  loff_t offset, const u8 *buffer, size_t length)
848 {
850  int rc;
851 
852  MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
853  MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
854  MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
855  memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
856 
858 
859  rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
860  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
861  NULL, 0, NULL);
862  if (rc)
863  goto fail;
864 
865  return 0;
866 
867 fail:
868  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
869  return rc;
870 }
871 
872 int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
873  loff_t offset, size_t length)
874 {
876  int rc;
877 
878  MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
879  MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
880  MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
881 
883 
884  rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
885  NULL, 0, NULL);
886  if (rc)
887  goto fail;
888 
889  return 0;
890 
891 fail:
892  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
893  return rc;
894 }
895 
896 int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
897 {
899  int rc;
900 
901  MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
902 
904 
905  rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
906  NULL, 0, NULL);
907  if (rc)
908  goto fail;
909 
910  return 0;
911 
912 fail:
913  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
914  return rc;
915 }
916 
917 static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
918 {
921  int rc;
922 
923  MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
924 
925  rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
926  outbuf, sizeof(outbuf), NULL);
927  if (rc)
928  return rc;
929 
930  switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
933  return 0;
934  default:
935  return -EIO;
936  }
937 }
938 
940 {
941  u32 nvram_types;
942  unsigned int type;
943  int rc;
944 
945  rc = efx_mcdi_nvram_types(efx, &nvram_types);
946  if (rc)
947  goto fail1;
948 
949  type = 0;
950  while (nvram_types != 0) {
951  if (nvram_types & 1) {
952  rc = efx_mcdi_nvram_test(efx, type);
953  if (rc)
954  goto fail2;
955  }
956  type++;
957  nvram_types >>= 1;
958  }
959 
960  return 0;
961 
962 fail2:
963  netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
964  __func__, type);
965 fail1:
966  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
967  return rc;
968 }
969 
970 static int efx_mcdi_read_assertion(struct efx_nic *efx)
971 {
974  unsigned int flags, index, ofst;
975  const char *reason;
976  size_t outlen;
977  int retry;
978  int rc;
979 
980  /* Attempt to read any stored assertion state before we reboot
981  * the mcfw out of the assertion handler. Retry twice, once
982  * because a boot-time assertion might cause this command to fail
983  * with EINTR. And once again because GET_ASSERTS can race with
984  * MC_CMD_REBOOT running on the other port. */
985  retry = 2;
986  do {
987  MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
990  outbuf, sizeof(outbuf), &outlen);
991  } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
992 
993  if (rc)
994  return rc;
995  if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
996  return -EIO;
997 
998  /* Print out any recorded assertion state */
999  flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1000  if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1001  return 0;
1002 
1003  reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1004  ? "system-level assertion"
1006  ? "thread-level assertion"
1008  ? "watchdog reset"
1009  : "unknown assertion";
1010  netif_err(efx, hw, efx->net_dev,
1011  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
1012  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1013  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1014 
1015  /* Print out the registers */
1017  for (index = 1; index < 32; index++) {
1018  netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
1019  MCDI_DWORD2(outbuf, ofst));
1020  ofst += sizeof(efx_dword_t);
1021  }
1022 
1023  return 0;
1024 }
1025 
1026 static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1027 {
1028  u8 inbuf[MC_CMD_REBOOT_IN_LEN];
1029 
1030  /* If the MC is running debug firmware, it might now be
1031  * waiting for a debugger to attach, but we just want it to
1032  * reboot. We set a flag that makes the command a no-op if it
1033  * has already done so. We don't know what return code to
1034  * expect (0 or -EIO), so ignore it.
1035  */
1037  MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1040  NULL, 0, NULL);
1041 }
1042 
1044 {
1045  int rc;
1046 
1047  rc = efx_mcdi_read_assertion(efx);
1048  if (rc)
1049  return rc;
1050 
1051  efx_mcdi_exit_assertion(efx);
1052 
1053  return 0;
1054 }
1055 
1057 {
1059  int rc;
1060 
1064 
1066 
1067  MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
1068 
1069  rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
1070  NULL, 0, NULL);
1071  if (rc)
1072  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1073  __func__, rc);
1074 }
1075 
1077 {
1078  int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
1079  if (rc)
1080  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1081  __func__, rc);
1082  return rc;
1083 }
1084 
1085 int efx_mcdi_reset_mc(struct efx_nic *efx)
1086 {
1087  u8 inbuf[MC_CMD_REBOOT_IN_LEN];
1088  int rc;
1089 
1091  MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
1092  rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1093  NULL, 0, NULL);
1094  /* White is black, and up is down */
1095  if (rc == -EIO)
1096  return 0;
1097  if (rc == 0)
1098  rc = -EIO;
1099  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1100  return rc;
1101 }
1102 
1103 static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1104  const u8 *mac, int *id_out)
1105 {
1108  size_t outlen;
1109  int rc;
1110 
1111  MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1112  MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1114  memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1115 
1116  rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1117  outbuf, sizeof(outbuf), &outlen);
1118  if (rc)
1119  goto fail;
1120 
1121  if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1122  rc = -EIO;
1123  goto fail;
1124  }
1125 
1126  *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1127 
1128  return 0;
1129 
1130 fail:
1131  *id_out = -1;
1132  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1133  return rc;
1134 
1135 }
1136 
1137 
1138 int
1139 efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
1140 {
1141  return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1142 }
1143 
1144 
1145 int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1146 {
1148  size_t outlen;
1149  int rc;
1150 
1151  rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1152  outbuf, sizeof(outbuf), &outlen);
1153  if (rc)
1154  goto fail;
1155 
1156  if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1157  rc = -EIO;
1158  goto fail;
1159  }
1160 
1161  *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1162 
1163  return 0;
1164 
1165 fail:
1166  *id_out = -1;
1167  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1168  return rc;
1169 }
1170 
1171 
1172 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1173 {
1175  int rc;
1176 
1177  MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1178 
1179  rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1180  NULL, 0, NULL);
1181  if (rc)
1182  goto fail;
1183 
1184  return 0;
1185 
1186 fail:
1187  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1188  return rc;
1189 }
1190 
1192 {
1193  struct efx_channel *channel;
1194  struct efx_rx_queue *rx_queue;
1195  __le32 *qid;
1196  int rc, count;
1197 
1200 
1201  qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
1202  if (qid == NULL)
1203  return -ENOMEM;
1204 
1205  count = 0;
1206  efx_for_each_channel(channel, efx) {
1207  efx_for_each_channel_rx_queue(rx_queue, channel) {
1208  if (rx_queue->flush_pending) {
1209  rx_queue->flush_pending = false;
1211  qid[count++] = cpu_to_le32(
1212  efx_rx_queue_index(rx_queue));
1213  }
1214  }
1215  }
1216 
1217  rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
1218  count * sizeof(*qid), NULL, 0, NULL);
1219  WARN_ON(rc > 0);
1220 
1221  kfree(qid);
1222 
1223  return rc;
1224 }
1225 
1227 {
1228  int rc;
1229 
1230  rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1231  if (rc)
1232  goto fail;
1233 
1234  return 0;
1235 
1236 fail:
1237  netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1238  return rc;
1239 }
1240