Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bfa_ioc.c
Go to the documentation of this file.
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 
19 #include "bfa_ioc.h"
20 #include "bfi_reg.h"
21 #include "bfa_defs.h"
22 
23 /* IOC local definitions */
24 
25 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
26 
27 #define bfa_ioc_firmware_lock(__ioc) \
28  ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
29 #define bfa_ioc_firmware_unlock(__ioc) \
30  ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
31 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
32 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
33 #define bfa_ioc_notify_fail(__ioc) \
34  ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
35 #define bfa_ioc_sync_start(__ioc) \
36  ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
37 #define bfa_ioc_sync_join(__ioc) \
38  ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
39 #define bfa_ioc_sync_leave(__ioc) \
40  ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
41 #define bfa_ioc_sync_ack(__ioc) \
42  ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
43 #define bfa_ioc_sync_complete(__ioc) \
44  ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
45 
46 #define bfa_ioc_mbox_cmd_pending(__ioc) \
47  (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
48  readl((__ioc)->ioc_regs.hfn_mbox_cmd))
49 
50 static bool bfa_nw_auto_recover = true;
51 
52 /*
53  * forward declarations
54  */
55 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
56 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
57 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
58 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
59 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
60 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
61 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
62 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
63 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
64 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
65 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
66 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
67 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
68 static void bfa_ioc_recover(struct bfa_ioc *ioc);
69 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
70 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
71 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
72 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
73 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
74 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
75 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
76 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
77 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
79 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
80  u32 boot_param);
81 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
82 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
83  char *serial_num);
84 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
85  char *fw_ver);
86 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
87  char *chip_rev);
88 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
89  char *optrom_ver);
90 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
91  char *manufacturer);
92 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
93 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
94 
95 /* IOC state machine definitions/declarations */
96 enum ioc_event {
109 };
110 
111 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
112 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
113 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
121 
122 static struct bfa_sm_table ioc_sm_table[] = {
123  {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
124  {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
125  {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
126  {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
127  {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
128  {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
129  {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
130  {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
131  {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
132  {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
133 };
134 
135 /*
136  * Forward declareations for iocpf state machine
137  */
138 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
139 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
140 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
141 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
142 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
143 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
144 
145 /* IOCPF state machine events */
159 };
160 
161 /* IOCPF states */
172 };
173 
174 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
175 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
177 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
178 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
179 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
181 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
182  enum iocpf_event);
183 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
184 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
188  enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
190 
191 static struct bfa_sm_table iocpf_sm_table[] = {
192  {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
193  {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
194  {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
195  {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
196  {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
197  {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
198  {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
199  {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
200  {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
201  {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
202  {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
203  {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
204  {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
205  {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
206 };
207 
208 /* IOC State Machine */
209 
210 /* Beginning state. IOC uninit state. */
211 static void
212 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
213 {
214 }
215 
216 /* IOC is in uninit state. */
217 static void
218 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
219 {
220  switch (event) {
221  case IOC_E_RESET:
222  bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
223  break;
224 
225  default:
226  bfa_sm_fault(event);
227  }
228 }
229 
230 /* Reset entry actions -- initialize state machine */
231 static void
232 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
233 {
234  bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
235 }
236 
237 /* IOC is in reset state. */
238 static void
239 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
240 {
241  switch (event) {
242  case IOC_E_ENABLE:
243  bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
244  break;
245 
246  case IOC_E_DISABLE:
247  bfa_ioc_disable_comp(ioc);
248  break;
249 
250  case IOC_E_DETACH:
251  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
252  break;
253 
254  default:
255  bfa_sm_fault(event);
256  }
257 }
258 
259 static void
260 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
261 {
262  bfa_iocpf_enable(ioc);
263 }
264 
265 /* Host IOC function is being enabled, awaiting response from firmware.
266  * Semaphore is acquired.
267  */
268 static void
269 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
270 {
271  switch (event) {
272  case IOC_E_ENABLED:
273  bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
274  break;
275 
276  case IOC_E_PFFAILED:
277  /* !!! fall through !!! */
278  case IOC_E_HWERROR:
279  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
280  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
281  if (event != IOC_E_PFFAILED)
282  bfa_iocpf_initfail(ioc);
283  break;
284 
285  case IOC_E_HWFAILED:
286  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
287  bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
288  break;
289 
290  case IOC_E_DISABLE:
291  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
292  break;
293 
294  case IOC_E_DETACH:
295  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
296  bfa_iocpf_stop(ioc);
297  break;
298 
299  case IOC_E_ENABLE:
300  break;
301 
302  default:
303  bfa_sm_fault(event);
304  }
305 }
306 
307 /* Semaphore should be acquired for version check. */
308 static void
309 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
310 {
311  mod_timer(&ioc->ioc_timer, jiffies +
313  bfa_ioc_send_getattr(ioc);
314 }
315 
316 /* IOC configuration in progress. Timer is active. */
317 static void
318 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
319 {
320  switch (event) {
321  case IOC_E_FWRSP_GETATTR:
322  del_timer(&ioc->ioc_timer);
323  bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
324  break;
325 
326  case IOC_E_PFFAILED:
327  case IOC_E_HWERROR:
328  del_timer(&ioc->ioc_timer);
329  /* fall through */
330  case IOC_E_TIMEOUT:
331  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
332  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
333  if (event != IOC_E_PFFAILED)
334  bfa_iocpf_getattrfail(ioc);
335  break;
336 
337  case IOC_E_DISABLE:
338  del_timer(&ioc->ioc_timer);
339  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
340  break;
341 
342  case IOC_E_ENABLE:
343  break;
344 
345  default:
346  bfa_sm_fault(event);
347  }
348 }
349 
350 static void
351 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
352 {
353  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
354  bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
355  bfa_ioc_hb_monitor(ioc);
356 }
357 
358 static void
359 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
360 {
361  switch (event) {
362  case IOC_E_ENABLE:
363  break;
364 
365  case IOC_E_DISABLE:
366  bfa_ioc_hb_stop(ioc);
367  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
368  break;
369 
370  case IOC_E_PFFAILED:
371  case IOC_E_HWERROR:
372  bfa_ioc_hb_stop(ioc);
373  /* !!! fall through !!! */
374  case IOC_E_HBFAIL:
375  if (ioc->iocpf.auto_recover)
376  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
377  else
378  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
379 
380  bfa_ioc_fail_notify(ioc);
381 
382  if (event != IOC_E_PFFAILED)
383  bfa_iocpf_fail(ioc);
384  break;
385 
386  default:
387  bfa_sm_fault(event);
388  }
389 }
390 
391 static void
392 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
393 {
394  bfa_iocpf_disable(ioc);
395 }
396 
397 /* IOC is being disabled */
398 static void
399 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
400 {
401  switch (event) {
402  case IOC_E_DISABLED:
403  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
404  break;
405 
406  case IOC_E_HWERROR:
407  /*
408  * No state change. Will move to disabled state
409  * after iocpf sm completes failure processing and
410  * moves to disabled state.
411  */
412  bfa_iocpf_fail(ioc);
413  break;
414 
415  case IOC_E_HWFAILED:
416  bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
417  bfa_ioc_disable_comp(ioc);
418  break;
419 
420  default:
421  bfa_sm_fault(event);
422  }
423 }
424 
425 /* IOC disable completion entry. */
426 static void
427 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
428 {
429  bfa_ioc_disable_comp(ioc);
430 }
431 
432 static void
433 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
434 {
435  switch (event) {
436  case IOC_E_ENABLE:
437  bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
438  break;
439 
440  case IOC_E_DISABLE:
441  ioc->cbfn->disable_cbfn(ioc->bfa);
442  break;
443 
444  case IOC_E_DETACH:
445  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
446  bfa_iocpf_stop(ioc);
447  break;
448 
449  default:
450  bfa_sm_fault(event);
451  }
452 }
453 
454 static void
455 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
456 {
457 }
458 
459 /* Hardware initialization retry. */
460 static void
461 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
462 {
463  switch (event) {
464  case IOC_E_ENABLED:
465  bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
466  break;
467 
468  case IOC_E_PFFAILED:
469  case IOC_E_HWERROR:
473  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
474  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
475  if (event != IOC_E_PFFAILED)
476  bfa_iocpf_initfail(ioc);
477  break;
478 
479  case IOC_E_HWFAILED:
480  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
481  bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
482  break;
483 
484  case IOC_E_ENABLE:
485  break;
486 
487  case IOC_E_DISABLE:
488  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
489  break;
490 
491  case IOC_E_DETACH:
492  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
493  bfa_iocpf_stop(ioc);
494  break;
495 
496  default:
497  bfa_sm_fault(event);
498  }
499 }
500 
501 static void
502 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
503 {
504 }
505 
506 /* IOC failure. */
507 static void
508 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
509 {
510  switch (event) {
511  case IOC_E_ENABLE:
512  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
513  break;
514 
515  case IOC_E_DISABLE:
516  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
517  break;
518 
519  case IOC_E_DETACH:
520  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
521  bfa_iocpf_stop(ioc);
522  break;
523 
524  case IOC_E_HWERROR:
525  /* HB failure notification, ignore. */
526  break;
527 
528  default:
529  bfa_sm_fault(event);
530  }
531 }
532 
533 static void
534 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
535 {
536 }
537 
538 /* IOC failure. */
539 static void
540 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
541 {
542  switch (event) {
543 
544  case IOC_E_ENABLE:
545  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
546  break;
547 
548  case IOC_E_DISABLE:
549  ioc->cbfn->disable_cbfn(ioc->bfa);
550  break;
551 
552  case IOC_E_DETACH:
553  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
554  break;
555 
556  default:
557  bfa_sm_fault(event);
558  }
559 }
560 
561 /* IOCPF State Machine */
562 
563 /* Reset entry actions -- initialize state machine */
564 static void
565 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
566 {
567  iocpf->fw_mismatch_notified = false;
568  iocpf->auto_recover = bfa_nw_auto_recover;
569 }
570 
571 /* Beginning state. IOC is in reset state. */
572 static void
573 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
574 {
575  switch (event) {
576  case IOCPF_E_ENABLE:
577  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
578  break;
579 
580  case IOCPF_E_STOP:
581  break;
582 
583  default:
584  bfa_sm_fault(event);
585  }
586 }
587 
588 /* Semaphore should be acquired for version check. */
589 static void
590 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
591 {
592  bfa_ioc_hw_sem_init(iocpf->ioc);
593  bfa_ioc_hw_sem_get(iocpf->ioc);
594 }
595 
596 /* Awaiting h/w semaphore to continue with version check. */
597 static void
598 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
599 {
600  struct bfa_ioc *ioc = iocpf->ioc;
601 
602  switch (event) {
603  case IOCPF_E_SEMLOCKED:
604  if (bfa_ioc_firmware_lock(ioc)) {
605  if (bfa_ioc_sync_start(ioc)) {
606  bfa_ioc_sync_join(ioc);
607  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
608  } else {
611  mod_timer(&ioc->sem_timer, jiffies +
613  }
614  } else {
616  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
617  }
618  break;
619 
620  case IOCPF_E_SEM_ERROR:
621  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
622  bfa_ioc_pf_hwfailed(ioc);
623  break;
624 
625  case IOCPF_E_DISABLE:
626  bfa_ioc_hw_sem_get_cancel(ioc);
627  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
628  bfa_ioc_pf_disabled(ioc);
629  break;
630 
631  case IOCPF_E_STOP:
632  bfa_ioc_hw_sem_get_cancel(ioc);
633  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
634  break;
635 
636  default:
637  bfa_sm_fault(event);
638  }
639 }
640 
641 /* Notify enable completion callback */
642 static void
643 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
644 {
645  /* Call only the first time sm enters fwmismatch state. */
646  if (!iocpf->fw_mismatch_notified)
647  bfa_ioc_pf_fwmismatch(iocpf->ioc);
648 
649  iocpf->fw_mismatch_notified = true;
650  mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
652 }
653 
654 /* Awaiting firmware version match. */
655 static void
656 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
657 {
658  struct bfa_ioc *ioc = iocpf->ioc;
659 
660  switch (event) {
661  case IOCPF_E_TIMEOUT:
662  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
663  break;
664 
665  case IOCPF_E_DISABLE:
666  del_timer(&ioc->iocpf_timer);
667  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668  bfa_ioc_pf_disabled(ioc);
669  break;
670 
671  case IOCPF_E_STOP:
672  del_timer(&ioc->iocpf_timer);
673  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674  break;
675 
676  default:
677  bfa_sm_fault(event);
678  }
679 }
680 
681 /* Request for semaphore. */
682 static void
683 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
684 {
685  bfa_ioc_hw_sem_get(iocpf->ioc);
686 }
687 
688 /* Awaiting semaphore for h/w initialzation. */
689 static void
690 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
691 {
692  struct bfa_ioc *ioc = iocpf->ioc;
693 
694  switch (event) {
695  case IOCPF_E_SEMLOCKED:
696  if (bfa_ioc_sync_complete(ioc)) {
697  bfa_ioc_sync_join(ioc);
698  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
699  } else {
701  mod_timer(&ioc->sem_timer, jiffies +
703  }
704  break;
705 
706  case IOCPF_E_SEM_ERROR:
707  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
708  bfa_ioc_pf_hwfailed(ioc);
709  break;
710 
711  case IOCPF_E_DISABLE:
712  bfa_ioc_hw_sem_get_cancel(ioc);
713  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
714  break;
715 
716  default:
717  bfa_sm_fault(event);
718  }
719 }
720 
721 static void
722 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
723 {
724  iocpf->poll_time = 0;
725  bfa_ioc_reset(iocpf->ioc, false);
726 }
727 
728 /* Hardware is being initialized. Interrupts are enabled.
729  * Holding hardware semaphore lock.
730  */
731 static void
732 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
733 {
734  struct bfa_ioc *ioc = iocpf->ioc;
735 
736  switch (event) {
737  case IOCPF_E_FWREADY:
738  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
739  break;
740 
741  case IOCPF_E_TIMEOUT:
743  bfa_ioc_pf_failed(ioc);
744  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
745  break;
746 
747  case IOCPF_E_DISABLE:
748  del_timer(&ioc->iocpf_timer);
749  bfa_ioc_sync_leave(ioc);
751  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
752  break;
753 
754  default:
755  bfa_sm_fault(event);
756  }
757 }
758 
759 static void
760 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
761 {
762  mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
767  iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
768  bfa_ioc_send_enable(iocpf->ioc);
769 }
770 
771 /* Host IOC function is being enabled, awaiting response from firmware.
772  * Semaphore is acquired.
773  */
774 static void
775 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
776 {
777  struct bfa_ioc *ioc = iocpf->ioc;
778 
779  switch (event) {
781  del_timer(&ioc->iocpf_timer);
783  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
784  break;
785 
786  case IOCPF_E_INITFAIL:
787  del_timer(&ioc->iocpf_timer);
788  /*
789  * !!! fall through !!!
790  */
791  case IOCPF_E_TIMEOUT:
793  if (event == IOCPF_E_TIMEOUT)
794  bfa_ioc_pf_failed(ioc);
795  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
796  break;
797 
798  case IOCPF_E_DISABLE:
799  del_timer(&ioc->iocpf_timer);
801  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
802  break;
803 
804  default:
805  bfa_sm_fault(event);
806  }
807 }
808 
809 static void
810 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
811 {
812  bfa_ioc_pf_enabled(iocpf->ioc);
813 }
814 
815 static void
816 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
817 {
818  switch (event) {
819  case IOCPF_E_DISABLE:
820  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
821  break;
822 
823  case IOCPF_E_GETATTRFAIL:
824  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
825  break;
826 
827  case IOCPF_E_FAIL:
828  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
829  break;
830 
831  default:
832  bfa_sm_fault(event);
833  }
834 }
835 
836 static void
837 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
838 {
839  mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
841  bfa_ioc_send_disable(iocpf->ioc);
842 }
843 
844 /* IOC is being disabled */
845 static void
846 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
847 {
848  struct bfa_ioc *ioc = iocpf->ioc;
849 
850  switch (event) {
852  del_timer(&ioc->iocpf_timer);
853  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
854  break;
855 
856  case IOCPF_E_FAIL:
857  del_timer(&ioc->iocpf_timer);
858  /*
859  * !!! fall through !!!
860  */
861 
862  case IOCPF_E_TIMEOUT:
863  writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
864  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
865  break;
866 
868  break;
869 
870  default:
871  bfa_sm_fault(event);
872  }
873 }
874 
875 static void
876 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
877 {
878  bfa_ioc_hw_sem_get(iocpf->ioc);
879 }
880 
881 /* IOC hb ack request is being removed. */
882 static void
883 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
884 {
885  struct bfa_ioc *ioc = iocpf->ioc;
886 
887  switch (event) {
888  case IOCPF_E_SEMLOCKED:
889  bfa_ioc_sync_leave(ioc);
891  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
892  break;
893 
894  case IOCPF_E_SEM_ERROR:
895  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
896  bfa_ioc_pf_hwfailed(ioc);
897  break;
898 
899  case IOCPF_E_FAIL:
900  break;
901 
902  default:
903  bfa_sm_fault(event);
904  }
905 }
906 
907 /* IOC disable completion entry. */
908 static void
909 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
910 {
911  bfa_ioc_mbox_flush(iocpf->ioc);
912  bfa_ioc_pf_disabled(iocpf->ioc);
913 }
914 
915 static void
916 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
917 {
918  struct bfa_ioc *ioc = iocpf->ioc;
919 
920  switch (event) {
921  case IOCPF_E_ENABLE:
922  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
923  break;
924 
925  case IOCPF_E_STOP:
927  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
928  break;
929 
930  default:
931  bfa_sm_fault(event);
932  }
933 }
934 
935 static void
936 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
937 {
938  bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
939  bfa_ioc_hw_sem_get(iocpf->ioc);
940 }
941 
942 /* Hardware initialization failed. */
943 static void
944 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
945 {
946  struct bfa_ioc *ioc = iocpf->ioc;
947 
948  switch (event) {
949  case IOCPF_E_SEMLOCKED:
950  bfa_ioc_notify_fail(ioc);
951  bfa_ioc_sync_leave(ioc);
952  writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
954  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
955  break;
956 
957  case IOCPF_E_SEM_ERROR:
958  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
959  bfa_ioc_pf_hwfailed(ioc);
960  break;
961 
962  case IOCPF_E_DISABLE:
963  bfa_ioc_hw_sem_get_cancel(ioc);
964  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
965  break;
966 
967  case IOCPF_E_STOP:
968  bfa_ioc_hw_sem_get_cancel(ioc);
970  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
971  break;
972 
973  case IOCPF_E_FAIL:
974  break;
975 
976  default:
977  bfa_sm_fault(event);
978  }
979 }
980 
981 static void
982 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
983 {
984 }
985 
986 /* Hardware initialization failed. */
987 static void
988 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
989 {
990  struct bfa_ioc *ioc = iocpf->ioc;
991 
992  switch (event) {
993  case IOCPF_E_DISABLE:
994  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
995  break;
996 
997  case IOCPF_E_STOP:
999  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1000  break;
1001 
1002  default:
1003  bfa_sm_fault(event);
1004  }
1005 }
1006 
1007 static void
1008 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1009 {
1013  bfa_ioc_lpu_stop(iocpf->ioc);
1014 
1018  bfa_ioc_mbox_flush(iocpf->ioc);
1019  bfa_ioc_hw_sem_get(iocpf->ioc);
1020 }
1021 
1022 /* IOC is in failed state. */
1023 static void
1024 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1025 {
1026  struct bfa_ioc *ioc = iocpf->ioc;
1027 
1028  switch (event) {
1029  case IOCPF_E_SEMLOCKED:
1030  bfa_ioc_sync_ack(ioc);
1031  bfa_ioc_notify_fail(ioc);
1032  if (!iocpf->auto_recover) {
1033  bfa_ioc_sync_leave(ioc);
1034  writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1036  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1037  } else {
1038  if (bfa_ioc_sync_complete(ioc))
1039  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1040  else {
1042  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1043  }
1044  }
1045  break;
1046 
1047  case IOCPF_E_SEM_ERROR:
1048  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1049  bfa_ioc_pf_hwfailed(ioc);
1050  break;
1051 
1052  case IOCPF_E_DISABLE:
1053  bfa_ioc_hw_sem_get_cancel(ioc);
1054  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055  break;
1056 
1057  case IOCPF_E_FAIL:
1058  break;
1059 
1060  default:
1061  bfa_sm_fault(event);
1062  }
1063 }
1064 
1065 static void
1066 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1067 {
1068 }
1069 
1070 /* IOC is in failed state. */
1071 static void
1072 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1073 {
1074  switch (event) {
1075  case IOCPF_E_DISABLE:
1076  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1077  break;
1078 
1079  default:
1080  bfa_sm_fault(event);
1081  }
1082 }
1083 
1084 /* BFA IOC private functions */
1085 
1086 /* Notify common modules registered for notification. */
1087 static void
1088 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1089 {
1090  struct bfa_ioc_notify *notify;
1091  struct list_head *qe;
1092 
1093  list_for_each(qe, &ioc->notify_q) {
1094  notify = (struct bfa_ioc_notify *)qe;
1095  notify->cbfn(notify->cbarg, event);
1096  }
1097 }
1098 
1099 static void
1100 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1101 {
1102  ioc->cbfn->disable_cbfn(ioc->bfa);
1103  bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1104 }
1105 
1106 bool
1108 {
1109  u32 r32;
1110  int cnt = 0;
1111 #define BFA_SEM_SPINCNT 3000
1112 
1113  r32 = readl(sem_reg);
1114 
1115  while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1116  cnt++;
1117  udelay(2);
1118  r32 = readl(sem_reg);
1119  }
1120 
1121  if (!(r32 & 1))
1122  return true;
1123 
1124  return false;
1125 }
1126 
1127 void
1129 {
1130  readl(sem_reg);
1131  writel(1, sem_reg);
1132 }
1133 
1134 /* Clear fwver hdr */
1135 static void
1136 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1137 {
1138  u32 pgnum, pgoff, loff = 0;
1139  int i;
1140 
1141  pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1142  pgoff = PSS_SMEM_PGOFF(loff);
1143  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1144 
1145  for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1146  writel(0, ioc->ioc_regs.smem_page_start + loff);
1147  loff += sizeof(u32);
1148  }
1149 }
1150 
1151 
1152 static void
1153 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1154 {
1155  struct bfi_ioc_image_hdr fwhdr;
1156  u32 fwstate, r32;
1157 
1158  /* Spin on init semaphore to serialize. */
1159  r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1160  while (r32 & 0x1) {
1161  udelay(20);
1162  r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1163  }
1164 
1165  fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1166  if (fwstate == BFI_IOC_UNINIT) {
1167  writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1168  return;
1169  }
1170 
1171  bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1172 
1173  if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1174  writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1175  return;
1176  }
1177 
1178  bfa_ioc_fwver_clear(ioc);
1179  writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1180  writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1181 
1182  /*
1183  * Try to lock and then unlock the semaphore.
1184  */
1185  readl(ioc->ioc_regs.ioc_sem_reg);
1186  writel(1, ioc->ioc_regs.ioc_sem_reg);
1187 
1188  /* Unlock init semaphore */
1189  writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1190 }
1191 
1192 static void
1193 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1194 {
1195  u32 r32;
1196 
1201  r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1202  if (r32 == ~0) {
1204  return;
1205  }
1206  if (!(r32 & 1)) {
1208  return;
1209  }
1210 
1211  mod_timer(&ioc->sem_timer, jiffies +
1213 }
1214 
1215 void
1217 {
1218  writel(1, ioc->ioc_regs.ioc_sem_reg);
1219 }
1220 
1221 static void
1222 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1223 {
1224  del_timer(&ioc->sem_timer);
1225 }
1226 
1227 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1228 static void
1229 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1230 {
1231  u32 pss_ctl;
1232  int i;
1233 #define PSS_LMEM_INIT_TIME 10000
1234 
1235  pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1236  pss_ctl &= ~__PSS_LMEM_RESET;
1237  pss_ctl |= __PSS_LMEM_INIT_EN;
1238 
1239  /*
1240  * i2c workaround 12.5khz clock
1241  */
1242  pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1243  writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1244 
1248  i = 0;
1249  do {
1250  pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1251  i++;
1252  } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1253 
1258  BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1259 
1260  pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1261  writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1262 }
1263 
1264 static void
1265 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1266 {
1267  u32 pss_ctl;
1268 
1272  pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1273  pss_ctl &= ~__PSS_LPU0_RESET;
1274 
1275  writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1276 }
1277 
1278 static void
1279 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1280 {
1281  u32 pss_ctl;
1282 
1286  pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1287  pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1288 
1289  writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1290 }
1291 
1292 /* Get driver and firmware versions. */
1293 void
1295 {
1296  u32 pgnum;
1297  u32 loff = 0;
1298  int i;
1299  u32 *fwsig = (u32 *) fwhdr;
1300 
1301  pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1302  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1303 
1304  for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1305  i++) {
1306  fwsig[i] =
1307  swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1308  loff += sizeof(u32);
1309  }
1310 }
1311 
1312 /* Returns TRUE if same. */
1313 bool
1315 {
1316  struct bfi_ioc_image_hdr *drv_fwhdr;
1317  int i;
1318 
1319  drv_fwhdr = (struct bfi_ioc_image_hdr *)
1321 
1322  for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1323  if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1324  return false;
1325  }
1326 
1327  return true;
1328 }
1329 
1330 /* Return true if current running version is valid. Firmware signature and
1331  * execution context (driver/bios) must match.
1332  */
1333 static bool
1334 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1335 {
1336  struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1337 
1338  bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1339  drv_fwhdr = (struct bfi_ioc_image_hdr *)
1341 
1342  if (fwhdr.signature != drv_fwhdr->signature)
1343  return false;
1344 
1345  if (swab32(fwhdr.bootenv) != boot_env)
1346  return false;
1347 
1348  return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1349 }
1350 
1351 /* Conditionally flush any pending message from firmware at start. */
1352 static void
1353 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1354 {
1355  u32 r32;
1356 
1357  r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1358  if (r32)
1359  writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1360 }
1361 
1362 static void
1363 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1364 {
1365  enum bfi_ioc_state ioc_fwstate;
1366  bool fwvalid;
1367  u32 boot_env;
1368 
1369  ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1370 
1371  if (force)
1372  ioc_fwstate = BFI_IOC_UNINIT;
1373 
1374  boot_env = BFI_FWBOOT_ENV_OS;
1375 
1379  fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1380  false : bfa_ioc_fwver_valid(ioc, boot_env);
1381 
1382  if (!fwvalid) {
1383  bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1384  bfa_ioc_poll_fwinit(ioc);
1385  return;
1386  }
1387 
1392  if (ioc_fwstate == BFI_IOC_INITING) {
1393  bfa_ioc_poll_fwinit(ioc);
1394  return;
1395  }
1396 
1401  if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1406  bfa_ioc_msgflush(ioc);
1408  return;
1409  }
1410 
1414  bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1415  bfa_ioc_poll_fwinit(ioc);
1416 }
1417 
1418 void
1419 bfa_nw_ioc_timeout(void *ioc_arg)
1420 {
1421  struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1422 
1424 }
1425 
1426 static void
1427 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1428 {
1429  u32 *msgp = (u32 *) ioc_msg;
1430  u32 i;
1431 
1432  BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1433 
1434  /*
1435  * first write msg to mailbox registers
1436  */
1437  for (i = 0; i < len / sizeof(u32); i++)
1438  writel(cpu_to_le32(msgp[i]),
1439  ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1440 
1441  for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1442  writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1443 
1444  /*
1445  * write 1 to mailbox CMD to trigger LPU event
1446  */
1447  writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1448  (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1449 }
1450 
1451 static void
1452 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1453 {
1454  struct bfi_ioc_ctrl_req enable_req;
1455  struct timeval tv;
1456 
1458  bfa_ioc_portid(ioc));
1459  enable_req.clscode = htons(ioc->clscode);
1460  do_gettimeofday(&tv);
1461  enable_req.tv_sec = ntohl(tv.tv_sec);
1462  bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1463 }
1464 
1465 static void
1466 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1467 {
1468  struct bfi_ioc_ctrl_req disable_req;
1469 
1471  bfa_ioc_portid(ioc));
1472  bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1473 }
1474 
1475 static void
1476 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1477 {
1478  struct bfi_ioc_getattr_req attr_req;
1479 
1481  bfa_ioc_portid(ioc));
1482  bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1483  bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1484 }
1485 
1486 void
1488 {
1489  struct bfa_ioc *ioc = cbarg;
1490  u32 hb_count;
1491 
1492  hb_count = readl(ioc->ioc_regs.heartbeat);
1493  if (ioc->hb_count == hb_count) {
1494  bfa_ioc_recover(ioc);
1495  return;
1496  } else {
1497  ioc->hb_count = hb_count;
1498  }
1499 
1500  bfa_ioc_mbox_poll(ioc);
1501  mod_timer(&ioc->hb_timer, jiffies +
1503 }
1504 
1505 static void
1506 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1507 {
1508  ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1509  mod_timer(&ioc->hb_timer, jiffies +
1511 }
1512 
1513 static void
1514 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1515 {
1516  del_timer(&ioc->hb_timer);
1517 }
1518 
1519 /* Initiate a full firmware download. */
1520 static void
1521 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1522  u32 boot_env)
1523 {
1524  u32 *fwimg;
1525  u32 pgnum;
1526  u32 loff = 0;
1527  u32 chunkno = 0;
1528  u32 i;
1529  u32 asicmode;
1530 
1531  fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1532 
1533  pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1534 
1535  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1536 
1537  for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1538  if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1539  chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1541  BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1542  }
1543 
1548  ((ioc->ioc_regs.smem_page_start) + (loff)));
1549 
1550  loff += sizeof(u32);
1551 
1555  loff = PSS_SMEM_PGOFF(loff);
1556  if (loff == 0) {
1557  pgnum++;
1558  writel(pgnum,
1559  ioc->ioc_regs.host_page_num_fn);
1560  }
1561  }
1562 
1563  writel(bfa_ioc_smem_pgnum(ioc, 0),
1564  ioc->ioc_regs.host_page_num_fn);
1565 
1566  /*
1567  * Set boot type, env and device mode at the end.
1568  */
1569  asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1570  ioc->port0_mode, ioc->port1_mode);
1571  writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1573  writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1574  + (BFI_FWBOOT_TYPE_OFF)));
1575  writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1576  + (BFI_FWBOOT_ENV_OFF)));
1577 }
1578 
1579 static void
1580 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1581 {
1582  bfa_ioc_hwinit(ioc, force);
1583 }
1584 
1585 /* BFA ioc enable reply by firmware */
1586 static void
1587 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1588  u8 cap_bm)
1589 {
1590  struct bfa_iocpf *iocpf = &ioc->iocpf;
1591 
1592  ioc->port_mode = ioc->port_mode_cfg = port_mode;
1593  ioc->ad_cap_bm = cap_bm;
1595 }
1596 
1597 /* Update BFA configuration from firmware configuration. */
1598 static void
1599 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1600 {
1601  struct bfi_ioc_attr *attr = ioc->attr;
1602 
1603  attr->adapter_prop = ntohl(attr->adapter_prop);
1604  attr->card_type = ntohl(attr->card_type);
1605  attr->maxfrsize = ntohs(attr->maxfrsize);
1606 
1608 }
1609 
1610 /* Attach time initialization of mbox logic. */
1611 static void
1612 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1613 {
1614  struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1615  int mc;
1616 
1617  INIT_LIST_HEAD(&mod->cmd_q);
1618  for (mc = 0; mc < BFI_MC_MAX; mc++) {
1619  mod->mbhdlr[mc].cbfn = NULL;
1620  mod->mbhdlr[mc].cbarg = ioc->bfa;
1621  }
1622 }
1623 
1624 /* Mbox poll timer -- restarts any pending mailbox requests. */
1625 static void
1626 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1627 {
1628  struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1629  struct bfa_mbox_cmd *cmd;
1631  void *cbarg;
1632  u32 stat;
1633 
1637  if (list_empty(&mod->cmd_q))
1638  return;
1639 
1643  stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1644  if (stat)
1645  return;
1646 
1650  bfa_q_deq(&mod->cmd_q, &cmd);
1651  bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1652 
1656  if (cmd->cbfn) {
1657  cbfn = cmd->cbfn;
1658  cbarg = cmd->cbarg;
1659  cmd->cbfn = NULL;
1660  cbfn(cbarg);
1661  }
1662 }
1663 
1664 /* Cleanup any pending requests. */
1665 static void
1666 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1667 {
1668  struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1669  struct bfa_mbox_cmd *cmd;
1670 
1671  while (!list_empty(&mod->cmd_q))
1672  bfa_q_deq(&mod->cmd_q, &cmd);
1673 }
1674 
1683 static int
1684 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1685 {
1686  u32 pgnum, loff, r32;
1687  int i, len;
1688  u32 *buf = tbuf;
1689 
1690  pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1691  loff = PSS_SMEM_PGOFF(soff);
1692 
1693  /*
1694  * Hold semaphore to serialize pll init and fwtrc.
1695  */
1696  if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
1697  return 1;
1698 
1699  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1700 
1701  len = sz/sizeof(u32);
1702  for (i = 0; i < len; i++) {
1703  r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1704  buf[i] = be32_to_cpu(r32);
1705  loff += sizeof(u32);
1706 
1710  loff = PSS_SMEM_PGOFF(loff);
1711  if (loff == 0) {
1712  pgnum++;
1713  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1714  }
1715  }
1716 
1717  writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1718  ioc->ioc_regs.host_page_num_fn);
1719 
1720  /*
1721  * release semaphore
1722  */
1723  readl(ioc->ioc_regs.ioc_init_sem_reg);
1724  writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1725  return 0;
1726 }
1727 
1728 /* Retrieve saved firmware trace from a prior IOC failure. */
1729 int
1730 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1731 {
1732  u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
1733  int tlen, status = 0;
1734 
1735  tlen = *trclen;
1736  if (tlen > BNA_DBG_FWTRC_LEN)
1737  tlen = BNA_DBG_FWTRC_LEN;
1738 
1739  status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
1740  *trclen = tlen;
1741  return status;
1742 }
1743 
1744 /* Save firmware trace if configured. */
1745 static void
1746 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1747 {
1748  int tlen;
1749 
1750  if (ioc->dbg_fwsave_once) {
1751  ioc->dbg_fwsave_once = 0;
1752  if (ioc->dbg_fwsave_len) {
1753  tlen = ioc->dbg_fwsave_len;
1754  bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
1755  }
1756  }
1757 }
1758 
1759 /* Retrieve saved firmware trace from a prior IOC failure. */
1760 int
1761 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1762 {
1763  int tlen;
1764 
1765  if (ioc->dbg_fwsave_len == 0)
1766  return BFA_STATUS_ENOFSAVE;
1767 
1768  tlen = *trclen;
1769  if (tlen > ioc->dbg_fwsave_len)
1770  tlen = ioc->dbg_fwsave_len;
1771 
1772  memcpy(trcdata, ioc->dbg_fwsave, tlen);
1773  *trclen = tlen;
1774  return BFA_STATUS_OK;
1775 }
1776 
1777 static void
1778 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1779 {
1783  ioc->cbfn->hbfail_cbfn(ioc->bfa);
1784  bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1785  bfa_nw_ioc_debug_save_ftrc(ioc);
1786 }
1787 
1788 /* IOCPF to IOC interface */
1789 static void
1790 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1791 {
1793 }
1794 
1795 static void
1796 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1797 {
1799 }
1800 
1801 static void
1802 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1803 {
1805 }
1806 
1807 static void
1808 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1809 {
1811 }
1812 
1813 static void
1814 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1815 {
1819  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1820 }
1821 
1822 /* IOC public */
1823 static enum bfa_status
1824 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1825 {
1826  /*
1827  * Hold semaphore so that nobody can access the chip during init.
1828  */
1829  bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1830 
1831  bfa_ioc_pll_init_asic(ioc);
1832 
1833  ioc->pllinit = true;
1834 
1835  /* Initialize LMEM */
1836  bfa_ioc_lmem_init(ioc);
1837 
1838  /*
1839  * release semaphore.
1840  */
1841  bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1842 
1843  return BFA_STATUS_OK;
1844 }
1845 
1846 /* Interface used by diag module to do firmware boot with memory test
1847  * as the entry vector.
1848  */
1849 static void
1850 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1851  u32 boot_env)
1852 {
1853  bfa_ioc_stats(ioc, ioc_boots);
1854 
1855  if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1856  return;
1857 
1861  if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1862  writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1863  writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1864  } else {
1865  writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1866  writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1867  }
1868 
1869  bfa_ioc_msgflush(ioc);
1870  bfa_ioc_download_fw(ioc, boot_type, boot_env);
1871  bfa_ioc_lpu_start(ioc);
1872 }
1873 
1874 /* Enable/disable IOC failure auto recovery. */
1875 void
1876 bfa_nw_ioc_auto_recover(bool auto_recover)
1877 {
1878  bfa_nw_auto_recover = auto_recover;
1879 }
1880 
1881 static bool
1882 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1883 {
1884  u32 *msgp = mbmsg;
1885  u32 r32;
1886  int i;
1887 
1888  r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1889  if ((r32 & 1) == 0)
1890  return false;
1891 
1895  for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1896  i++) {
1897  r32 = readl(ioc->ioc_regs.lpu_mbox +
1898  i * sizeof(u32));
1899  msgp[i] = htonl(r32);
1900  }
1901 
1905  writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1906  readl(ioc->ioc_regs.lpu_mbox_cmd);
1907 
1908  return true;
1909 }
1910 
1911 static void
1912 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1913 {
1914  union bfi_ioc_i2h_msg_u *msg;
1915  struct bfa_iocpf *iocpf = &ioc->iocpf;
1916 
1917  msg = (union bfi_ioc_i2h_msg_u *) m;
1918 
1919  bfa_ioc_stats(ioc, ioc_isrs);
1920 
1921  switch (msg->mh.msg_id) {
1922  case BFI_IOC_I2H_HBEAT:
1923  break;
1924 
1926  bfa_ioc_enable_reply(ioc,
1927  (enum bfa_mode)msg->fw_event.port_mode,
1928  msg->fw_event.cap_bm);
1929  break;
1930 
1933  break;
1934 
1936  bfa_ioc_getattr_reply(ioc);
1937  break;
1938 
1939  default:
1940  BUG_ON(1);
1941  }
1942 }
1943 
1950 void
1951 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1952 {
1953  ioc->bfa = bfa;
1954  ioc->cbfn = cbfn;
1955  ioc->fcmode = false;
1956  ioc->pllinit = false;
1957  ioc->dbg_fwsave_once = true;
1958  ioc->iocpf.ioc = ioc;
1959 
1960  bfa_ioc_mbox_attach(ioc);
1961  INIT_LIST_HEAD(&ioc->notify_q);
1962 
1963  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1965 }
1966 
1967 /* Driver detach time IOC cleanup. */
1968 void
1970 {
1972 
1973  /* Done with detach, empty the notify_q. */
1974  INIT_LIST_HEAD(&ioc->notify_q);
1975 }
1976 
1982 void
1984  enum bfi_pcifn_class clscode)
1985 {
1986  ioc->clscode = clscode;
1987  ioc->pcidev = *pcidev;
1988 
1992  ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
1993  ioc->asic_mode = BFI_ASIC_MODE_FC;
1994 
1995  switch (pcidev->device_id) {
1997  ioc->asic_gen = BFI_ASIC_GEN_CT;
1998  ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2000  ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2001  ioc->ad_cap_bm = BFA_CM_CNA;
2002  break;
2003 
2004  case BFA_PCI_DEVICE_ID_CT2:
2005  ioc->asic_gen = BFI_ASIC_GEN_CT2;
2006  if (clscode == BFI_PCIFN_CLASS_FC &&
2007  pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2009  ioc->fcmode = true;
2010  ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2011  ioc->ad_cap_bm = BFA_CM_HBA;
2012  } else {
2013  ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2015  if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2016  ioc->port_mode =
2017  ioc->port_mode_cfg = BFA_MODE_CNA;
2018  ioc->ad_cap_bm = BFA_CM_CNA;
2019  } else {
2020  ioc->port_mode =
2021  ioc->port_mode_cfg = BFA_MODE_NIC;
2022  ioc->ad_cap_bm = BFA_CM_NIC;
2023  }
2024  }
2025  break;
2026 
2027  default:
2028  BUG_ON(1);
2029  }
2030 
2034  if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2036  else {
2040  }
2041 
2042  bfa_ioc_map_port(ioc);
2043  bfa_ioc_reg_init(ioc);
2044 }
2045 
2052 void
2053 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2054 {
2058  ioc->attr_dma.kva = dm_kva;
2059  ioc->attr_dma.pa = dm_pa;
2060  ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2061 }
2062 
2063 /* Return size of dma memory required. */
2064 u32
2066 {
2067  return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2068 }
2069 
2070 void
2072 {
2073  bfa_ioc_stats(ioc, ioc_enables);
2074  ioc->dbg_fwsave_once = true;
2075 
2077 }
2078 
2079 void
2081 {
2082  bfa_ioc_stats(ioc, ioc_disables);
2084 }
2085 
2086 /* Initialize memory for saving firmware trace. */
2087 void
2088 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2089 {
2090  ioc->dbg_fwsave = dbg_fwsave;
2091  ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2092 }
2093 
2094 static u32
2095 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2096 {
2097  return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2098 }
2099 
2100 /* Register mailbox message handler function, to be called by common modules */
2101 void
2103  bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2104 {
2105  struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2106 
2107  mod->mbhdlr[mc].cbfn = cbfn;
2108  mod->mbhdlr[mc].cbarg = cbarg;
2109 }
2110 
2119 bool
2120 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2121  bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2122 {
2123  struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2124  u32 stat;
2125 
2126  cmd->cbfn = cbfn;
2127  cmd->cbarg = cbarg;
2128 
2132  if (!list_empty(&mod->cmd_q)) {
2133  list_add_tail(&cmd->qe, &mod->cmd_q);
2134  return true;
2135  }
2136 
2140  stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2141  if (stat) {
2142  list_add_tail(&cmd->qe, &mod->cmd_q);
2143  return true;
2144  }
2145 
2149  bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2150 
2151  return false;
2152 }
2153 
2154 /* Handle mailbox interrupts */
2155 void
2157 {
2158  struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2159  struct bfi_mbmsg m;
2160  int mc;
2161 
2162  if (bfa_ioc_msgget(ioc, &m)) {
2166  mc = m.mh.msg_class;
2167  if (mc == BFI_MC_IOC) {
2168  bfa_ioc_isr(ioc, &m);
2169  return;
2170  }
2171 
2172  if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2173  return;
2174 
2175  mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2176  }
2177 
2178  bfa_ioc_lpu_read_stat(ioc);
2179 
2183  bfa_ioc_mbox_poll(ioc);
2184 }
2185 
2186 void
2188 {
2189  bfa_ioc_stats(ioc, ioc_hbfails);
2190  bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2192 }
2193 
2194 /* return true if IOC is disabled */
2195 bool
2197 {
2198  return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2199  bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2200 }
2201 
2202 /* return true if IOC is operational */
2203 bool
2205 {
2206  return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2207 }
2208 
2209 /* Add to IOC heartbeat failure notification queue. To be used by common
2210  * modules such as cee, port, diag.
2211  */
2212 void
2214  struct bfa_ioc_notify *notify)
2215 {
2216  list_add_tail(&notify->qe, &ioc->notify_q);
2217 }
2218 
2219 #define BFA_MFG_NAME "Brocade"
2220 static void
2221 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2222  struct bfa_adapter_attr *ad_attr)
2223 {
2224  struct bfi_ioc_attr *ioc_attr;
2225 
2226  ioc_attr = ioc->attr;
2227 
2228  bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2229  bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2230  bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2231  bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2232  memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2233  sizeof(struct bfa_mfg_vpd));
2234 
2235  ad_attr->nports = bfa_ioc_get_nports(ioc);
2236  ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2237 
2238  bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2239  /* For now, model descr uses same model string */
2240  bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2241 
2242  ad_attr->card_type = ioc_attr->card_type;
2243  ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2244 
2245  if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2246  ad_attr->prototype = 1;
2247  else
2248  ad_attr->prototype = 0;
2249 
2250  ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2251  ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
2252 
2253  ad_attr->pcie_gen = ioc_attr->pcie_gen;
2254  ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2255  ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2256  ad_attr->asic_rev = ioc_attr->asic_rev;
2257 
2258  bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2259 }
2260 
2261 static enum bfa_ioc_type
2262 bfa_ioc_get_type(struct bfa_ioc *ioc)
2263 {
2264  if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2265  return BFA_IOC_TYPE_LL;
2266 
2267  BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2268 
2269  return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2271 }
2272 
2273 static void
2274 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2275 {
2276  memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2277  memcpy(serial_num,
2278  (void *)ioc->attr->brcd_serialnum,
2280 }
2281 
2282 static void
2283 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2284 {
2285  memset(fw_ver, 0, BFA_VERSION_LEN);
2286  memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2287 }
2288 
2289 static void
2290 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2291 {
2292  BUG_ON(!(chip_rev));
2293 
2294  memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2295 
2296  chip_rev[0] = 'R';
2297  chip_rev[1] = 'e';
2298  chip_rev[2] = 'v';
2299  chip_rev[3] = '-';
2300  chip_rev[4] = ioc->attr->asic_rev;
2301  chip_rev[5] = '\0';
2302 }
2303 
2304 static void
2305 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2306 {
2307  memset(optrom_ver, 0, BFA_VERSION_LEN);
2308  memcpy(optrom_ver, ioc->attr->optrom_version,
2309  BFA_VERSION_LEN);
2310 }
2311 
2312 static void
2313 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2314 {
2315  memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2317 }
2318 
2319 static void
2320 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2321 {
2322  struct bfi_ioc_attr *ioc_attr;
2323 
2324  BUG_ON(!(model));
2326 
2327  ioc_attr = ioc->attr;
2328 
2329  snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2330  BFA_MFG_NAME, ioc_attr->card_type);
2331 }
2332 
2333 static enum bfa_ioc_state
2334 bfa_ioc_get_state(struct bfa_ioc *ioc)
2335 {
2336  enum bfa_iocpf_state iocpf_st;
2337  enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2338 
2339  if (ioc_st == BFA_IOC_ENABLING ||
2340  ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2341 
2342  iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2343 
2344  switch (iocpf_st) {
2345  case BFA_IOCPF_SEMWAIT:
2346  ioc_st = BFA_IOC_SEMWAIT;
2347  break;
2348 
2349  case BFA_IOCPF_HWINIT:
2350  ioc_st = BFA_IOC_HWINIT;
2351  break;
2352 
2353  case BFA_IOCPF_FWMISMATCH:
2354  ioc_st = BFA_IOC_FWMISMATCH;
2355  break;
2356 
2357  case BFA_IOCPF_FAIL:
2358  ioc_st = BFA_IOC_FAIL;
2359  break;
2360 
2361  case BFA_IOCPF_INITFAIL:
2362  ioc_st = BFA_IOC_INITFAIL;
2363  break;
2364 
2365  default:
2366  break;
2367  }
2368  }
2369  return ioc_st;
2370 }
2371 
2372 void
2373 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2374 {
2375  memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2376 
2377  ioc_attr->state = bfa_ioc_get_state(ioc);
2378  ioc_attr->port_id = ioc->port_id;
2379  ioc_attr->port_mode = ioc->port_mode;
2380 
2381  ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2382  ioc_attr->cap_bm = ioc->ad_cap_bm;
2383 
2384  ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2385 
2386  bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2387 
2388  ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2389  ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2390  bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2391 }
2392 
2393 /* WWN public */
2394 static u64
2395 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2396 {
2397  return ioc->attr->pwwn;
2398 }
2399 
2400 mac_t
2402 {
2403  return ioc->attr->mac;
2404 }
2405 
2406 /* Firmware failure detected. Start recovery actions. */
2407 static void
2408 bfa_ioc_recover(struct bfa_ioc *ioc)
2409 {
2410  pr_crit("Heart Beat of IOC has failed\n");
2411  bfa_ioc_stats(ioc, ioc_hbfails);
2412  bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2414 }
2415 
2416 /* BFA IOC PF private functions */
2417 
2418 static void
2419 bfa_iocpf_enable(struct bfa_ioc *ioc)
2420 {
2422 }
2423 
2424 static void
2425 bfa_iocpf_disable(struct bfa_ioc *ioc)
2426 {
2428 }
2429 
2430 static void
2431 bfa_iocpf_fail(struct bfa_ioc *ioc)
2432 {
2434 }
2435 
2436 static void
2437 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2438 {
2440 }
2441 
2442 static void
2443 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2444 {
2446 }
2447 
2448 static void
2449 bfa_iocpf_stop(struct bfa_ioc *ioc)
2450 {
2452 }
2453 
2454 void
2455 bfa_nw_iocpf_timeout(void *ioc_arg)
2456 {
2457  struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2458  enum bfa_iocpf_state iocpf_st;
2459 
2460  iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2461 
2462  if (iocpf_st == BFA_IOCPF_HWINIT)
2463  bfa_ioc_poll_fwinit(ioc);
2464  else
2466 }
2467 
2468 void
2470 {
2471  struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2472 
2473  bfa_ioc_hw_sem_get(ioc);
2474 }
2475 
2476 static void
2477 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2478 {
2479  u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2480 
2481  if (fwstate == BFI_IOC_DISABLED) {
2483  return;
2484  }
2485 
2486  if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2487  bfa_nw_iocpf_timeout(ioc);
2488  } else {
2489  ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2490  mod_timer(&ioc->iocpf_timer, jiffies +
2492  }
2493 }
2494 
2495 /*
2496  * Flash module specific
2497  */
2498 
2499 /*
2500  * FLASH DMA buffer should be big enough to hold both MFG block and
2501  * asic block(64k) at the same time and also should be 2k aligned to
2502  * avoid write segement to cross sector boundary.
2503  */
2504 #define BFA_FLASH_SEG_SZ 2048
2505 #define BFA_FLASH_DMA_BUF_SZ \
2506  roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
2507 
2508 static void
2509 bfa_flash_cb(struct bfa_flash *flash)
2510 {
2511  flash->op_busy = 0;
2512  if (flash->cbfn)
2513  flash->cbfn(flash->cbarg, flash->status);
2514 }
2515 
2516 static void
2517 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2518 {
2519  struct bfa_flash *flash = cbarg;
2520 
2521  switch (event) {
2522  case BFA_IOC_E_DISABLED:
2523  case BFA_IOC_E_FAILED:
2524  if (flash->op_busy) {
2525  flash->status = BFA_STATUS_IOC_FAILURE;
2526  flash->cbfn(flash->cbarg, flash->status);
2527  flash->op_busy = 0;
2528  }
2529  break;
2530  default:
2531  break;
2532  }
2533 }
2534 
2535 /*
2536  * Send flash write request.
2537  */
2538 static void
2539 bfa_flash_write_send(struct bfa_flash *flash)
2540 {
2541  struct bfi_flash_write_req *msg =
2542  (struct bfi_flash_write_req *) flash->mb.msg;
2543  u32 len;
2544 
2545  msg->type = be32_to_cpu(flash->type);
2546  msg->instance = flash->instance;
2547  msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2548  len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2549  flash->residue : BFA_FLASH_DMA_BUF_SZ;
2550  msg->length = be32_to_cpu(len);
2551 
2552  /* indicate if it's the last msg of the whole write operation */
2553  msg->last = (len == flash->residue) ? 1 : 0;
2554 
2556  bfa_ioc_portid(flash->ioc));
2557  bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2558  memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
2559  bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2560 
2561  flash->residue -= len;
2562  flash->offset += len;
2563 }
2564 
2570 static void
2571 bfa_flash_read_send(void *cbarg)
2572 {
2573  struct bfa_flash *flash = cbarg;
2574  struct bfi_flash_read_req *msg =
2575  (struct bfi_flash_read_req *) flash->mb.msg;
2576  u32 len;
2577 
2578  msg->type = be32_to_cpu(flash->type);
2579  msg->instance = flash->instance;
2580  msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2581  len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2582  flash->residue : BFA_FLASH_DMA_BUF_SZ;
2583  msg->length = be32_to_cpu(len);
2585  bfa_ioc_portid(flash->ioc));
2586  bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2587  bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2588 }
2589 
2596 static void
2597 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
2598 {
2599  struct bfa_flash *flash = flasharg;
2600  u32 status;
2601 
2602  union {
2603  struct bfi_flash_query_rsp *query;
2604  struct bfi_flash_write_rsp *write;
2605  struct bfi_flash_read_rsp *read;
2606  struct bfi_mbmsg *msg;
2607  } m;
2608 
2609  m.msg = msg;
2610 
2611  /* receiving response after ioc failure */
2612  if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
2613  return;
2614 
2615  switch (msg->mh.msg_id) {
2617  status = be32_to_cpu(m.query->status);
2618  if (status == BFA_STATUS_OK) {
2619  u32 i;
2620  struct bfa_flash_attr *attr, *f;
2621 
2622  attr = (struct bfa_flash_attr *) flash->ubuf;
2623  f = (struct bfa_flash_attr *) flash->dbuf_kva;
2624  attr->status = be32_to_cpu(f->status);
2625  attr->npart = be32_to_cpu(f->npart);
2626  for (i = 0; i < attr->npart; i++) {
2627  attr->part[i].part_type =
2628  be32_to_cpu(f->part[i].part_type);
2629  attr->part[i].part_instance =
2630  be32_to_cpu(f->part[i].part_instance);
2631  attr->part[i].part_off =
2632  be32_to_cpu(f->part[i].part_off);
2633  attr->part[i].part_size =
2634  be32_to_cpu(f->part[i].part_size);
2635  attr->part[i].part_len =
2636  be32_to_cpu(f->part[i].part_len);
2637  attr->part[i].part_status =
2638  be32_to_cpu(f->part[i].part_status);
2639  }
2640  }
2641  flash->status = status;
2642  bfa_flash_cb(flash);
2643  break;
2645  status = be32_to_cpu(m.write->status);
2646  if (status != BFA_STATUS_OK || flash->residue == 0) {
2647  flash->status = status;
2648  bfa_flash_cb(flash);
2649  } else
2650  bfa_flash_write_send(flash);
2651  break;
2653  status = be32_to_cpu(m.read->status);
2654  if (status != BFA_STATUS_OK) {
2655  flash->status = status;
2656  bfa_flash_cb(flash);
2657  } else {
2658  u32 len = be32_to_cpu(m.read->length);
2659  memcpy(flash->ubuf + flash->offset,
2660  flash->dbuf_kva, len);
2661  flash->residue -= len;
2662  flash->offset += len;
2663  if (flash->residue == 0) {
2664  flash->status = status;
2665  bfa_flash_cb(flash);
2666  } else
2667  bfa_flash_read_send(flash);
2668  }
2669  break;
2671  case BFI_FLASH_I2H_EVENT:
2672  break;
2673  default:
2674  WARN_ON(1);
2675  }
2676 }
2677 
2678 /*
2679  * Flash memory info API.
2680  */
2681 u32
2683 {
2685 }
2686 
2694 void
2695 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2696 {
2697  flash->ioc = ioc;
2698  flash->cbfn = NULL;
2699  flash->cbarg = NULL;
2700  flash->op_busy = 0;
2701 
2702  bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
2703  bfa_q_qe_init(&flash->ioc_notify);
2704  bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
2705  list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2706 }
2707 
2715 void
2716 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2717 {
2718  flash->dbuf_kva = dm_kva;
2719  flash->dbuf_pa = dm_pa;
2720  memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
2723 }
2724 
2735 enum bfa_status
2736 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2737  bfa_cb_flash cbfn, void *cbarg)
2738 {
2739  struct bfi_flash_query_req *msg =
2740  (struct bfi_flash_query_req *) flash->mb.msg;
2741 
2742  if (!bfa_nw_ioc_is_operational(flash->ioc))
2743  return BFA_STATUS_IOC_NON_OP;
2744 
2745  if (flash->op_busy)
2746  return BFA_STATUS_DEVBUSY;
2747 
2748  flash->op_busy = 1;
2749  flash->cbfn = cbfn;
2750  flash->cbarg = cbarg;
2751  flash->ubuf = (u8 *) attr;
2752 
2754  bfa_ioc_portid(flash->ioc));
2755  bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
2756  bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2757 
2758  return BFA_STATUS_OK;
2759 }
2760 
2775 enum bfa_status
2776 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2777  void *buf, u32 len, u32 offset,
2778  bfa_cb_flash cbfn, void *cbarg)
2779 {
2780  if (!bfa_nw_ioc_is_operational(flash->ioc))
2781  return BFA_STATUS_IOC_NON_OP;
2782 
2783  /*
2784  * 'len' must be in word (4-byte) boundary
2785  */
2786  if (!len || (len & 0x03))
2787  return BFA_STATUS_FLASH_BAD_LEN;
2788 
2789  if (type == BFA_FLASH_PART_MFG)
2790  return BFA_STATUS_EINVAL;
2791 
2792  if (flash->op_busy)
2793  return BFA_STATUS_DEVBUSY;
2794 
2795  flash->op_busy = 1;
2796  flash->cbfn = cbfn;
2797  flash->cbarg = cbarg;
2798  flash->type = type;
2799  flash->instance = instance;
2800  flash->residue = len;
2801  flash->offset = 0;
2802  flash->addr_off = offset;
2803  flash->ubuf = buf;
2804 
2805  bfa_flash_write_send(flash);
2806 
2807  return BFA_STATUS_OK;
2808 }
2809 
2824 enum bfa_status
2825 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
2826  void *buf, u32 len, u32 offset,
2827  bfa_cb_flash cbfn, void *cbarg)
2828 {
2829  if (!bfa_nw_ioc_is_operational(flash->ioc))
2830  return BFA_STATUS_IOC_NON_OP;
2831 
2832  /*
2833  * 'len' must be in word (4-byte) boundary
2834  */
2835  if (!len || (len & 0x03))
2836  return BFA_STATUS_FLASH_BAD_LEN;
2837 
2838  if (flash->op_busy)
2839  return BFA_STATUS_DEVBUSY;
2840 
2841  flash->op_busy = 1;
2842  flash->cbfn = cbfn;
2843  flash->cbarg = cbarg;
2844  flash->type = type;
2845  flash->instance = instance;
2846  flash->residue = len;
2847  flash->offset = 0;
2848  flash->addr_off = offset;
2849  flash->ubuf = buf;
2850 
2851  bfa_flash_read_send(flash);
2852 
2853  return BFA_STATUS_OK;
2854 }