Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bfa_ioc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_ioc.h"
21 #include "bfi_reg.h"
22 #include "bfa_defs.h"
23 #include "bfa_defs_svc.h"
24 
25 BFA_TRC_FILE(CNA, IOC);
26 
27 /*
28  * IOC local definitions
29  */
30 #define BFA_IOC_TOV 3000 /* msecs */
31 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
32 #define BFA_IOC_HB_TOV 500 /* msecs */
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
35 
36 #define bfa_ioc_timer_start(__ioc) \
37  bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38  bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40 
41 #define bfa_hb_timer_start(__ioc) \
42  bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43  bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45 
46 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
47 
48 /*
49  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50  */
51 
52 #define bfa_ioc_firmware_lock(__ioc) \
53  ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55  ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc) \
59  ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc) \
61  ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc) \
63  ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc) \
65  ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc) \
67  ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc) \
69  ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
70 
71 #define bfa_ioc_mbox_cmd_pending(__ioc) \
72  (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73  readl((__ioc)->ioc_regs.hfn_mbox_cmd))
74 
76 
77 /*
78  * forward declarations
79  */
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82 static void bfa_ioc_timeout(void *ioc);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92  enum bfa_ioc_event_e event);
93 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
97 
98 /*
99  * IOC state machine definitions/declarations
100  */
101 enum ioc_event {
102  IOC_E_RESET = 1, /* IOC reset request */
103  IOC_E_ENABLE = 2, /* IOC enable request */
104  IOC_E_DISABLE = 3, /* IOC disable request */
105  IOC_E_DETACH = 4, /* driver detach cleanup */
106  IOC_E_ENABLED = 5, /* f/w enabled */
107  IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
108  IOC_E_DISABLED = 7, /* f/w disabled */
109  IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
110  IOC_E_HBFAIL = 9, /* heartbeat failure */
111  IOC_E_HWERROR = 10, /* hardware error interrupt */
112  IOC_E_TIMEOUT = 11, /* timeout */
113  IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
114 };
115 
116 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
126 
127 static struct bfa_sm_table_s ioc_sm_table[] = {
128  {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
129  {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
130  {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
131  {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
132  {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
133  {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
134  {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
135  {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
136  {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
137  {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
138 };
139 
140 /*
141  * IOCPF state machine definitions/declarations
142  */
143 
144 #define bfa_iocpf_timer_start(__ioc) \
145  bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
146  bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
147 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
148 
149 #define bfa_iocpf_poll_timer_start(__ioc) \
150  bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151  bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
152 
153 #define bfa_sem_timer_start(__ioc) \
154  bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
155  bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
156 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
157 
158 /*
159  * Forward declareations for iocpf state machine
160  */
161 static void bfa_iocpf_timeout(void *ioc_arg);
162 static void bfa_iocpf_sem_timeout(void *ioc_arg);
163 static void bfa_iocpf_poll_timeout(void *ioc_arg);
164 
165 /*
166  * IOCPF state machine events
167  */
169  IOCPF_E_ENABLE = 1, /* IOCPF enable request */
170  IOCPF_E_DISABLE = 2, /* IOCPF disable request */
171  IOCPF_E_STOP = 3, /* stop on driver detach */
172  IOCPF_E_FWREADY = 4, /* f/w initialization done */
173  IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
174  IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
175  IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
176  IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
177  IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
178  IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
179  IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
180  IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
181 };
182 
183 /*
184  * IOCPF states
185  */
187  BFA_IOCPF_RESET = 1, /* IOC is in reset state */
188  BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
189  BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
190  BFA_IOCPF_READY = 4, /* IOCPF is initialized */
191  BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
192  BFA_IOCPF_FAIL = 6, /* IOCPF failed */
193  BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
194  BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
195  BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
196 };
197 
199 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
206  enum iocpf_event);
207 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
211 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
212  enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
214 
215 static struct bfa_sm_table_s iocpf_sm_table[] = {
216  {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
217  {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
218  {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
219  {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
220  {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
221  {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
222  {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
223  {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
224  {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
225  {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
226  {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
227  {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
228  {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
229  {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
230 };
231 
232 /*
233  * IOC State Machine
234  */
235 
236 /*
237  * Beginning state. IOC uninit state.
238  */
239 
240 static void
241 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
242 {
243 }
244 
245 /*
246  * IOC is in uninit state.
247  */
248 static void
249 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
250 {
251  bfa_trc(ioc, event);
252 
253  switch (event) {
254  case IOC_E_RESET:
255  bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
256  break;
257 
258  default:
259  bfa_sm_fault(ioc, event);
260  }
261 }
262 /*
263  * Reset entry actions -- initialize state machine
264  */
265 static void
266 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
267 {
268  bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
269 }
270 
271 /*
272  * IOC is in reset state.
273  */
274 static void
275 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
276 {
277  bfa_trc(ioc, event);
278 
279  switch (event) {
280  case IOC_E_ENABLE:
281  bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
282  break;
283 
284  case IOC_E_DISABLE:
285  bfa_ioc_disable_comp(ioc);
286  break;
287 
288  case IOC_E_DETACH:
289  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
290  break;
291 
292  default:
293  bfa_sm_fault(ioc, event);
294  }
295 }
296 
297 
298 static void
299 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
300 {
302 }
303 
304 /*
305  * Host IOC function is being enabled, awaiting response from firmware.
306  * Semaphore is acquired.
307  */
308 static void
309 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
310 {
311  bfa_trc(ioc, event);
312 
313  switch (event) {
314  case IOC_E_ENABLED:
315  bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
316  break;
317 
318  case IOC_E_PFFAILED:
319  /* !!! fall through !!! */
320  case IOC_E_HWERROR:
321  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
322  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
323  if (event != IOC_E_PFFAILED)
325  break;
326 
327  case IOC_E_HWFAILED:
328  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
329  bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
330  break;
331 
332  case IOC_E_DISABLE:
333  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
334  break;
335 
336  case IOC_E_DETACH:
337  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
339  break;
340 
341  case IOC_E_ENABLE:
342  break;
343 
344  default:
345  bfa_sm_fault(ioc, event);
346  }
347 }
348 
349 
350 static void
351 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
352 {
353  bfa_ioc_timer_start(ioc);
354  bfa_ioc_send_getattr(ioc);
355 }
356 
357 /*
358  * IOC configuration in progress. Timer is active.
359  */
360 static void
361 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
362 {
363  bfa_trc(ioc, event);
364 
365  switch (event) {
366  case IOC_E_FWRSP_GETATTR:
367  bfa_ioc_timer_stop(ioc);
368  bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
369  break;
370 
371  case IOC_E_PFFAILED:
372  case IOC_E_HWERROR:
373  bfa_ioc_timer_stop(ioc);
374  /* !!! fall through !!! */
375  case IOC_E_TIMEOUT:
376  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
377  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
378  if (event != IOC_E_PFFAILED)
380  break;
381 
382  case IOC_E_DISABLE:
383  bfa_ioc_timer_stop(ioc);
384  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
385  break;
386 
387  case IOC_E_ENABLE:
388  break;
389 
390  default:
391  bfa_sm_fault(ioc, event);
392  }
393 }
394 
395 static void
396 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
397 {
398  struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
399 
400  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
401  bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
402  bfa_ioc_hb_monitor(ioc);
403  BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
405 }
406 
407 static void
408 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
409 {
410  bfa_trc(ioc, event);
411 
412  switch (event) {
413  case IOC_E_ENABLE:
414  break;
415 
416  case IOC_E_DISABLE:
417  bfa_hb_timer_stop(ioc);
418  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
419  break;
420 
421  case IOC_E_PFFAILED:
422  case IOC_E_HWERROR:
423  bfa_hb_timer_stop(ioc);
424  /* !!! fall through !!! */
425  case IOC_E_HBFAIL:
426  if (ioc->iocpf.auto_recover)
427  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
428  else
429  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
430 
431  bfa_ioc_fail_notify(ioc);
432 
433  if (event != IOC_E_PFFAILED)
435  break;
436 
437  default:
438  bfa_sm_fault(ioc, event);
439  }
440 }
441 
442 
443 static void
444 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
445 {
446  struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
448  BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
450 }
451 
452 /*
453  * IOC is being disabled
454  */
455 static void
456 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
457 {
458  bfa_trc(ioc, event);
459 
460  switch (event) {
461  case IOC_E_DISABLED:
462  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
463  break;
464 
465  case IOC_E_HWERROR:
466  /*
467  * No state change. Will move to disabled state
468  * after iocpf sm completes failure processing and
469  * moves to disabled state.
470  */
472  break;
473 
474  case IOC_E_HWFAILED:
475  bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
476  bfa_ioc_disable_comp(ioc);
477  break;
478 
479  default:
480  bfa_sm_fault(ioc, event);
481  }
482 }
483 
484 /*
485  * IOC disable completion entry.
486  */
487 static void
488 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
489 {
490  bfa_ioc_disable_comp(ioc);
491 }
492 
493 static void
494 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
495 {
496  bfa_trc(ioc, event);
497 
498  switch (event) {
499  case IOC_E_ENABLE:
500  bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
501  break;
502 
503  case IOC_E_DISABLE:
504  ioc->cbfn->disable_cbfn(ioc->bfa);
505  break;
506 
507  case IOC_E_DETACH:
508  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
510  break;
511 
512  default:
513  bfa_sm_fault(ioc, event);
514  }
515 }
516 
517 
518 static void
519 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
520 {
521  bfa_trc(ioc, 0);
522 }
523 
524 /*
525  * Hardware initialization retry.
526  */
527 static void
528 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
529 {
530  bfa_trc(ioc, event);
531 
532  switch (event) {
533  case IOC_E_ENABLED:
534  bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
535  break;
536 
537  case IOC_E_PFFAILED:
538  case IOC_E_HWERROR:
539  /*
540  * Initialization retry failed.
541  */
542  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
543  bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
544  if (event != IOC_E_PFFAILED)
546  break;
547 
548  case IOC_E_HWFAILED:
549  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
550  bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
551  break;
552 
553  case IOC_E_ENABLE:
554  break;
555 
556  case IOC_E_DISABLE:
557  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
558  break;
559 
560  case IOC_E_DETACH:
561  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
563  break;
564 
565  default:
566  bfa_sm_fault(ioc, event);
567  }
568 }
569 
570 
571 static void
572 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
573 {
574  bfa_trc(ioc, 0);
575 }
576 
577 /*
578  * IOC failure.
579  */
580 static void
581 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
582 {
583  bfa_trc(ioc, event);
584 
585  switch (event) {
586 
587  case IOC_E_ENABLE:
588  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
589  break;
590 
591  case IOC_E_DISABLE:
592  bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
593  break;
594 
595  case IOC_E_DETACH:
596  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
598  break;
599 
600  case IOC_E_HWERROR:
601  case IOC_E_HWFAILED:
602  /*
603  * HB failure / HW error notification, ignore.
604  */
605  break;
606  default:
607  bfa_sm_fault(ioc, event);
608  }
609 }
610 
611 static void
612 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
613 {
614  bfa_trc(ioc, 0);
615 }
616 
617 static void
618 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
619 {
620  bfa_trc(ioc, event);
621 
622  switch (event) {
623  case IOC_E_ENABLE:
624  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
625  break;
626 
627  case IOC_E_DISABLE:
628  ioc->cbfn->disable_cbfn(ioc->bfa);
629  break;
630 
631  case IOC_E_DETACH:
632  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
633  break;
634 
635  case IOC_E_HWERROR:
636  /* Ignore - already in hwfail state */
637  break;
638 
639  default:
640  bfa_sm_fault(ioc, event);
641  }
642 }
643 
644 /*
645  * IOCPF State Machine
646  */
647 
648 /*
649  * Reset entry actions -- initialize state machine
650  */
651 static void
652 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
653 {
656 }
657 
658 /*
659  * Beginning state. IOC is in reset state.
660  */
661 static void
662 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
663 {
664  struct bfa_ioc_s *ioc = iocpf->ioc;
665 
666  bfa_trc(ioc, event);
667 
668  switch (event) {
669  case IOCPF_E_ENABLE:
670  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
671  break;
672 
673  case IOCPF_E_STOP:
674  break;
675 
676  default:
677  bfa_sm_fault(ioc, event);
678  }
679 }
680 
681 /*
682  * Semaphore should be acquired for version check.
683  */
684 static void
685 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
686 {
687  struct bfi_ioc_image_hdr_s fwhdr;
688  u32 r32, fwstate, pgnum, pgoff, loff = 0;
689  int i;
690 
691  /*
692  * Spin on init semaphore to serialize.
693  */
694  r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
695  while (r32 & 0x1) {
696  udelay(20);
697  r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
698  }
699 
700  /* h/w sem init */
701  fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
702  if (fwstate == BFI_IOC_UNINIT) {
703  writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
704  goto sem_get;
705  }
706 
707  bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
708 
709  if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
710  writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
711  goto sem_get;
712  }
713 
714  /*
715  * Clear fwver hdr
716  */
717  pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
718  pgoff = PSS_SMEM_PGOFF(loff);
719  writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
720 
721  for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
722  bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
723  loff += sizeof(u32);
724  }
725 
726  bfa_trc(iocpf->ioc, fwstate);
727  bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
728  writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
729  writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
730 
731  /*
732  * Unlock the hw semaphore. Should be here only once per boot.
733  */
735 
736  /*
737  * unlock init semaphore.
738  */
739  writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
740 
741 sem_get:
742  bfa_ioc_hw_sem_get(iocpf->ioc);
743 }
744 
745 /*
746  * Awaiting h/w semaphore to continue with version check.
747  */
748 static void
749 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
750 {
751  struct bfa_ioc_s *ioc = iocpf->ioc;
752 
753  bfa_trc(ioc, event);
754 
755  switch (event) {
756  case IOCPF_E_SEMLOCKED:
757  if (bfa_ioc_firmware_lock(ioc)) {
758  if (bfa_ioc_sync_start(ioc)) {
759  bfa_ioc_sync_join(ioc);
760  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
761  } else {
763  writel(1, ioc->ioc_regs.ioc_sem_reg);
764  bfa_sem_timer_start(ioc);
765  }
766  } else {
767  writel(1, ioc->ioc_regs.ioc_sem_reg);
768  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
769  }
770  break;
771 
772  case IOCPF_E_SEM_ERROR:
773  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
775  break;
776 
777  case IOCPF_E_DISABLE:
778  bfa_sem_timer_stop(ioc);
779  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
781  break;
782 
783  case IOCPF_E_STOP:
784  bfa_sem_timer_stop(ioc);
785  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
786  break;
787 
788  default:
789  bfa_sm_fault(ioc, event);
790  }
791 }
792 
793 /*
794  * Notify enable completion callback.
795  */
796 static void
797 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
798 {
799  /*
800  * Call only the first time sm enters fwmismatch state.
801  */
802  if (iocpf->fw_mismatch_notified == BFA_FALSE)
803  bfa_ioc_pf_fwmismatch(iocpf->ioc);
804 
806  bfa_iocpf_timer_start(iocpf->ioc);
807 }
808 
809 /*
810  * Awaiting firmware version match.
811  */
812 static void
813 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
814 {
815  struct bfa_ioc_s *ioc = iocpf->ioc;
816 
817  bfa_trc(ioc, event);
818 
819  switch (event) {
820  case IOCPF_E_TIMEOUT:
821  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
822  break;
823 
824  case IOCPF_E_DISABLE:
826  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
828  break;
829 
830  case IOCPF_E_STOP:
832  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
833  break;
834 
835  default:
836  bfa_sm_fault(ioc, event);
837  }
838 }
839 
840 /*
841  * Request for semaphore.
842  */
843 static void
844 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
845 {
846  bfa_ioc_hw_sem_get(iocpf->ioc);
847 }
848 
849 /*
850  * Awaiting semaphore for h/w initialzation.
851  */
852 static void
853 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
854 {
855  struct bfa_ioc_s *ioc = iocpf->ioc;
856 
857  bfa_trc(ioc, event);
858 
859  switch (event) {
860  case IOCPF_E_SEMLOCKED:
861  if (bfa_ioc_sync_complete(ioc)) {
862  bfa_ioc_sync_join(ioc);
863  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
864  } else {
865  writel(1, ioc->ioc_regs.ioc_sem_reg);
866  bfa_sem_timer_start(ioc);
867  }
868  break;
869 
870  case IOCPF_E_SEM_ERROR:
871  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
873  break;
874 
875  case IOCPF_E_DISABLE:
876  bfa_sem_timer_stop(ioc);
877  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
878  break;
879 
880  default:
881  bfa_sm_fault(ioc, event);
882  }
883 }
884 
885 static void
886 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
887 {
888  iocpf->poll_time = 0;
889  bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
890 }
891 
892 /*
893  * Hardware is being initialized. Interrupts are enabled.
894  * Holding hardware semaphore lock.
895  */
896 static void
897 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
898 {
899  struct bfa_ioc_s *ioc = iocpf->ioc;
900 
901  bfa_trc(ioc, event);
902 
903  switch (event) {
904  case IOCPF_E_FWREADY:
905  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
906  break;
907 
908  case IOCPF_E_TIMEOUT:
909  writel(1, ioc->ioc_regs.ioc_sem_reg);
911  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
912  break;
913 
914  case IOCPF_E_DISABLE:
916  bfa_ioc_sync_leave(ioc);
917  writel(1, ioc->ioc_regs.ioc_sem_reg);
918  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
919  break;
920 
921  default:
922  bfa_sm_fault(ioc, event);
923  }
924 }
925 
926 static void
927 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
928 {
929  bfa_iocpf_timer_start(iocpf->ioc);
930  /*
931  * Enable Interrupts before sending fw IOC ENABLE cmd.
932  */
933  iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
934  bfa_ioc_send_enable(iocpf->ioc);
935 }
936 
937 /*
938  * Host IOC function is being enabled, awaiting response from firmware.
939  * Semaphore is acquired.
940  */
941 static void
942 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
943 {
944  struct bfa_ioc_s *ioc = iocpf->ioc;
945 
946  bfa_trc(ioc, event);
947 
948  switch (event) {
951  writel(1, ioc->ioc_regs.ioc_sem_reg);
952  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
953  break;
954 
955  case IOCPF_E_INITFAIL:
957  /*
958  * !!! fall through !!!
959  */
960 
961  case IOCPF_E_TIMEOUT:
962  writel(1, ioc->ioc_regs.ioc_sem_reg);
963  if (event == IOCPF_E_TIMEOUT)
965  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
966  break;
967 
968  case IOCPF_E_DISABLE:
970  writel(1, ioc->ioc_regs.ioc_sem_reg);
971  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
972  break;
973 
974  default:
975  bfa_sm_fault(ioc, event);
976  }
977 }
978 
979 static void
980 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
981 {
983 }
984 
985 static void
986 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
987 {
988  struct bfa_ioc_s *ioc = iocpf->ioc;
989 
990  bfa_trc(ioc, event);
991 
992  switch (event) {
993  case IOCPF_E_DISABLE:
994  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
995  break;
996 
997  case IOCPF_E_GETATTRFAIL:
998  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
999  break;
1000 
1001  case IOCPF_E_FAIL:
1002  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1003  break;
1004 
1005  default:
1006  bfa_sm_fault(ioc, event);
1007  }
1008 }
1009 
1010 static void
1011 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1012 {
1013  bfa_iocpf_timer_start(iocpf->ioc);
1014  bfa_ioc_send_disable(iocpf->ioc);
1015 }
1016 
1017 /*
1018  * IOC is being disabled
1019  */
1020 static void
1021 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1022 {
1023  struct bfa_ioc_s *ioc = iocpf->ioc;
1024 
1025  bfa_trc(ioc, event);
1026 
1027  switch (event) {
1028  case IOCPF_E_FWRSP_DISABLE:
1029  bfa_iocpf_timer_stop(ioc);
1030  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1031  break;
1032 
1033  case IOCPF_E_FAIL:
1034  bfa_iocpf_timer_stop(ioc);
1035  /*
1036  * !!! fall through !!!
1037  */
1038 
1039  case IOCPF_E_TIMEOUT:
1040  writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1041  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1042  break;
1043 
1044  case IOCPF_E_FWRSP_ENABLE:
1045  break;
1046 
1047  default:
1048  bfa_sm_fault(ioc, event);
1049  }
1050 }
1051 
1052 static void
1053 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1054 {
1055  bfa_ioc_hw_sem_get(iocpf->ioc);
1056 }
1057 
1058 /*
1059  * IOC hb ack request is being removed.
1060  */
1061 static void
1062 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1063 {
1064  struct bfa_ioc_s *ioc = iocpf->ioc;
1065 
1066  bfa_trc(ioc, event);
1067 
1068  switch (event) {
1069  case IOCPF_E_SEMLOCKED:
1070  bfa_ioc_sync_leave(ioc);
1071  writel(1, ioc->ioc_regs.ioc_sem_reg);
1072  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1073  break;
1074 
1075  case IOCPF_E_SEM_ERROR:
1076  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1078  break;
1079 
1080  case IOCPF_E_FAIL:
1081  break;
1082 
1083  default:
1084  bfa_sm_fault(ioc, event);
1085  }
1086 }
1087 
1088 /*
1089  * IOC disable completion entry.
1090  */
1091 static void
1092 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1093 {
1094  bfa_ioc_mbox_flush(iocpf->ioc);
1096 }
1097 
1098 static void
1099 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1100 {
1101  struct bfa_ioc_s *ioc = iocpf->ioc;
1102 
1103  bfa_trc(ioc, event);
1104 
1105  switch (event) {
1106  case IOCPF_E_ENABLE:
1107  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1108  break;
1109 
1110  case IOCPF_E_STOP:
1112  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1113  break;
1114 
1115  default:
1116  bfa_sm_fault(ioc, event);
1117  }
1118 }
1119 
1120 static void
1121 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1122 {
1123  bfa_ioc_debug_save_ftrc(iocpf->ioc);
1124  bfa_ioc_hw_sem_get(iocpf->ioc);
1125 }
1126 
1127 /*
1128  * Hardware initialization failed.
1129  */
1130 static void
1131 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1132 {
1133  struct bfa_ioc_s *ioc = iocpf->ioc;
1134 
1135  bfa_trc(ioc, event);
1136 
1137  switch (event) {
1138  case IOCPF_E_SEMLOCKED:
1139  bfa_ioc_notify_fail(ioc);
1140  bfa_ioc_sync_leave(ioc);
1141  writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1142  writel(1, ioc->ioc_regs.ioc_sem_reg);
1143  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1144  break;
1145 
1146  case IOCPF_E_SEM_ERROR:
1147  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1149  break;
1150 
1151  case IOCPF_E_DISABLE:
1152  bfa_sem_timer_stop(ioc);
1153  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1154  break;
1155 
1156  case IOCPF_E_STOP:
1157  bfa_sem_timer_stop(ioc);
1159  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1160  break;
1161 
1162  case IOCPF_E_FAIL:
1163  break;
1164 
1165  default:
1166  bfa_sm_fault(ioc, event);
1167  }
1168 }
1169 
1170 static void
1171 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1172 {
1173  bfa_trc(iocpf->ioc, 0);
1174 }
1175 
1176 /*
1177  * Hardware initialization failed.
1178  */
1179 static void
1180 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1181 {
1182  struct bfa_ioc_s *ioc = iocpf->ioc;
1183 
1184  bfa_trc(ioc, event);
1185 
1186  switch (event) {
1187  case IOCPF_E_DISABLE:
1188  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1189  break;
1190 
1191  case IOCPF_E_STOP:
1193  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1194  break;
1195 
1196  default:
1197  bfa_sm_fault(ioc, event);
1198  }
1199 }
1200 
1201 static void
1202 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1203 {
1204  /*
1205  * Mark IOC as failed in hardware and stop firmware.
1206  */
1207  bfa_ioc_lpu_stop(iocpf->ioc);
1208 
1209  /*
1210  * Flush any queued up mailbox requests.
1211  */
1212  bfa_ioc_mbox_flush(iocpf->ioc);
1213 
1214  bfa_ioc_hw_sem_get(iocpf->ioc);
1215 }
1216 
1217 static void
1218 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1219 {
1220  struct bfa_ioc_s *ioc = iocpf->ioc;
1221 
1222  bfa_trc(ioc, event);
1223 
1224  switch (event) {
1225  case IOCPF_E_SEMLOCKED:
1226  bfa_ioc_sync_ack(ioc);
1227  bfa_ioc_notify_fail(ioc);
1228  if (!iocpf->auto_recover) {
1229  bfa_ioc_sync_leave(ioc);
1230  writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1231  writel(1, ioc->ioc_regs.ioc_sem_reg);
1232  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1233  } else {
1234  if (bfa_ioc_sync_complete(ioc))
1235  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1236  else {
1237  writel(1, ioc->ioc_regs.ioc_sem_reg);
1238  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1239  }
1240  }
1241  break;
1242 
1243  case IOCPF_E_SEM_ERROR:
1244  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1246  break;
1247 
1248  case IOCPF_E_DISABLE:
1249  bfa_sem_timer_stop(ioc);
1250  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1251  break;
1252 
1253  case IOCPF_E_FAIL:
1254  break;
1255 
1256  default:
1257  bfa_sm_fault(ioc, event);
1258  }
1259 }
1260 
1261 static void
1262 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1263 {
1264  bfa_trc(iocpf->ioc, 0);
1265 }
1266 
1267 /*
1268  * IOC is in failed state.
1269  */
1270 static void
1271 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1272 {
1273  struct bfa_ioc_s *ioc = iocpf->ioc;
1274 
1275  bfa_trc(ioc, event);
1276 
1277  switch (event) {
1278  case IOCPF_E_DISABLE:
1279  bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1280  break;
1281 
1282  default:
1283  bfa_sm_fault(ioc, event);
1284  }
1285 }
1286 
1287 /*
1288  * BFA IOC private functions
1289  */
1290 
1291 /*
1292  * Notify common modules registered for notification.
1293  */
1294 static void
1295 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1296 {
1297  struct bfa_ioc_notify_s *notify;
1298  struct list_head *qe;
1299 
1300  list_for_each(qe, &ioc->notify_q) {
1301  notify = (struct bfa_ioc_notify_s *)qe;
1302  notify->cbfn(notify->cbarg, event);
1303  }
1304 }
1305 
1306 static void
1307 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1308 {
1309  ioc->cbfn->disable_cbfn(ioc->bfa);
1310  bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1311 }
1312 
1314 bfa_ioc_sem_get(void __iomem *sem_reg)
1315 {
1316  u32 r32;
1317  int cnt = 0;
1318 #define BFA_SEM_SPINCNT 3000
1319 
1320  r32 = readl(sem_reg);
1321 
1322  while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1323  cnt++;
1324  udelay(2);
1325  r32 = readl(sem_reg);
1326  }
1327 
1328  if (!(r32 & 1))
1329  return BFA_TRUE;
1330 
1331  return BFA_FALSE;
1332 }
1333 
1334 static void
1335 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1336 {
1337  u32 r32;
1338 
1339  /*
1340  * First read to the semaphore register will return 0, subsequent reads
1341  * will return 1. Semaphore is released by writing 1 to the register
1342  */
1343  r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1344  if (r32 == ~0) {
1345  WARN_ON(r32 == ~0);
1347  return;
1348  }
1349  if (!(r32 & 1)) {
1351  return;
1352  }
1353 
1354  bfa_sem_timer_start(ioc);
1355 }
1356 
1357 /*
1358  * Initialize LPU local memory (aka secondary memory / SRAM)
1359  */
1360 static void
1361 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1362 {
1363  u32 pss_ctl;
1364  int i;
1365 #define PSS_LMEM_INIT_TIME 10000
1366 
1367  pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1368  pss_ctl &= ~__PSS_LMEM_RESET;
1369  pss_ctl |= __PSS_LMEM_INIT_EN;
1370 
1371  /*
1372  * i2c workaround 12.5khz clock
1373  */
1374  pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1375  writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1376 
1377  /*
1378  * wait for memory initialization to be complete
1379  */
1380  i = 0;
1381  do {
1382  pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1383  i++;
1384  } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1385 
1386  /*
1387  * If memory initialization is not successful, IOC timeout will catch
1388  * such failures.
1389  */
1390  WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1391  bfa_trc(ioc, pss_ctl);
1392 
1393  pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1394  writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1395 }
1396 
1397 static void
1398 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1399 {
1400  u32 pss_ctl;
1401 
1402  /*
1403  * Take processor out of reset.
1404  */
1405  pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1406  pss_ctl &= ~__PSS_LPU0_RESET;
1407 
1408  writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1409 }
1410 
1411 static void
1412 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1413 {
1414  u32 pss_ctl;
1415 
1416  /*
1417  * Put processors in reset.
1418  */
1419  pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1420  pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1421 
1422  writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1423 }
1424 
1425 /*
1426  * Get driver and firmware versions.
1427  */
1428 void
1430 {
1431  u32 pgnum, pgoff;
1432  u32 loff = 0;
1433  int i;
1434  u32 *fwsig = (u32 *) fwhdr;
1435 
1436  pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1437  pgoff = PSS_SMEM_PGOFF(loff);
1438  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1439 
1440  for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1441  i++) {
1442  fwsig[i] =
1443  bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1444  loff += sizeof(u32);
1445  }
1446 }
1447 
1448 /*
1449  * Returns TRUE if same.
1450  */
1453 {
1454  struct bfi_ioc_image_hdr_s *drv_fwhdr;
1455  int i;
1456 
1457  drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1459 
1460  for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1461  if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
1462  bfa_trc(ioc, i);
1463  bfa_trc(ioc, fwhdr->md5sum[i]);
1464  bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1465  return BFA_FALSE;
1466  }
1467  }
1468 
1469  bfa_trc(ioc, fwhdr->md5sum[0]);
1470  return BFA_TRUE;
1471 }
1472 
1473 /*
1474  * Return true if current running version is valid. Firmware signature and
1475  * execution context (driver/bios) must match.
1476  */
1477 static bfa_boolean_t
1478 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1479 {
1480  struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1481 
1482  bfa_ioc_fwver_get(ioc, &fwhdr);
1483  drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1485 
1486  if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
1487  bfa_trc(ioc, fwhdr.signature);
1488  bfa_trc(ioc, drv_fwhdr->signature);
1489  return BFA_FALSE;
1490  }
1491 
1492  if (swab32(fwhdr.bootenv) != boot_env) {
1493  bfa_trc(ioc, fwhdr.bootenv);
1494  bfa_trc(ioc, boot_env);
1495  return BFA_FALSE;
1496  }
1497 
1498  return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1499 }
1500 
1501 /*
1502  * Conditionally flush any pending message from firmware at start.
1503  */
1504 static void
1505 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1506 {
1507  u32 r32;
1508 
1509  r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1510  if (r32)
1511  writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1512 }
1513 
1514 static void
1515 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1516 {
1517  enum bfi_ioc_state ioc_fwstate;
1518  bfa_boolean_t fwvalid;
1519  u32 boot_type;
1520  u32 boot_env;
1521 
1522  ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1523 
1524  if (force)
1525  ioc_fwstate = BFI_IOC_UNINIT;
1526 
1527  bfa_trc(ioc, ioc_fwstate);
1528 
1529  boot_type = BFI_FWBOOT_TYPE_NORMAL;
1530  boot_env = BFI_FWBOOT_ENV_OS;
1531 
1532  /*
1533  * check if firmware is valid
1534  */
1535  fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1536  BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1537 
1538  if (!fwvalid) {
1539  bfa_ioc_boot(ioc, boot_type, boot_env);
1540  bfa_ioc_poll_fwinit(ioc);
1541  return;
1542  }
1543 
1544  /*
1545  * If hardware initialization is in progress (initialized by other IOC),
1546  * just wait for an initialization completion interrupt.
1547  */
1548  if (ioc_fwstate == BFI_IOC_INITING) {
1549  bfa_ioc_poll_fwinit(ioc);
1550  return;
1551  }
1552 
1553  /*
1554  * If IOC function is disabled and firmware version is same,
1555  * just re-enable IOC.
1556  *
1557  * If option rom, IOC must not be in operational state. With
1558  * convergence, IOC will be in operational state when 2nd driver
1559  * is loaded.
1560  */
1561  if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1562 
1563  /*
1564  * When using MSI-X any pending firmware ready event should
1565  * be flushed. Otherwise MSI-X interrupts are not delivered.
1566  */
1567  bfa_ioc_msgflush(ioc);
1569  return;
1570  }
1571 
1572  /*
1573  * Initialize the h/w for any other states.
1574  */
1575  bfa_ioc_boot(ioc, boot_type, boot_env);
1576  bfa_ioc_poll_fwinit(ioc);
1577 }
1578 
1579 static void
1580 bfa_ioc_timeout(void *ioc_arg)
1581 {
1582  struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1583 
1584  bfa_trc(ioc, 0);
1586 }
1587 
1588 void
1589 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1590 {
1591  u32 *msgp = (u32 *) ioc_msg;
1592  u32 i;
1593 
1594  bfa_trc(ioc, msgp[0]);
1595  bfa_trc(ioc, len);
1596 
1597  WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1598 
1599  /*
1600  * first write msg to mailbox registers
1601  */
1602  for (i = 0; i < len / sizeof(u32); i++)
1603  writel(cpu_to_le32(msgp[i]),
1604  ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1605 
1606  for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1607  writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1608 
1609  /*
1610  * write 1 to mailbox CMD to trigger LPU event
1611  */
1612  writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1613  (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1614 }
1615 
1616 static void
1617 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1618 {
1619  struct bfi_ioc_ctrl_req_s enable_req;
1620  struct timeval tv;
1621 
1623  bfa_ioc_portid(ioc));
1624  enable_req.clscode = cpu_to_be16(ioc->clscode);
1625  do_gettimeofday(&tv);
1626  enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1627  bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1628 }
1629 
1630 static void
1631 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1632 {
1633  struct bfi_ioc_ctrl_req_s disable_req;
1634 
1636  bfa_ioc_portid(ioc));
1637  bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1638 }
1639 
1640 static void
1641 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1642 {
1643  struct bfi_ioc_getattr_req_s attr_req;
1644 
1646  bfa_ioc_portid(ioc));
1647  bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1648  bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1649 }
1650 
1651 static void
1652 bfa_ioc_hb_check(void *cbarg)
1653 {
1654  struct bfa_ioc_s *ioc = cbarg;
1655  u32 hb_count;
1656 
1657  hb_count = readl(ioc->ioc_regs.heartbeat);
1658  if (ioc->hb_count == hb_count) {
1659  bfa_ioc_recover(ioc);
1660  return;
1661  } else {
1662  ioc->hb_count = hb_count;
1663  }
1664 
1665  bfa_ioc_mbox_poll(ioc);
1666  bfa_hb_timer_start(ioc);
1667 }
1668 
1669 static void
1670 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1671 {
1672  ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1673  bfa_hb_timer_start(ioc);
1674 }
1675 
1676 /*
1677  * Initiate a full firmware download.
1678  */
1679 static void
1680 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1681  u32 boot_env)
1682 {
1683  u32 *fwimg;
1684  u32 pgnum, pgoff;
1685  u32 loff = 0;
1686  u32 chunkno = 0;
1687  u32 i;
1688  u32 asicmode;
1689 
1691  fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1692 
1693  pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1694  pgoff = PSS_SMEM_PGOFF(loff);
1695 
1696  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1697 
1698  for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1699 
1700  if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1701  chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1703  BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1704  }
1705 
1706  /*
1707  * write smem
1708  */
1709  bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1711 
1712  loff += sizeof(u32);
1713 
1714  /*
1715  * handle page offset wrap around
1716  */
1717  loff = PSS_SMEM_PGOFF(loff);
1718  if (loff == 0) {
1719  pgnum++;
1720  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1721  }
1722  }
1723 
1724  writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1725  ioc->ioc_regs.host_page_num_fn);
1726 
1727  /*
1728  * Set boot type and device mode at the end.
1729  */
1730  asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1731  ioc->port0_mode, ioc->port1_mode);
1732  bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1733  swab32(asicmode));
1734  bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1735  swab32(boot_type));
1736  bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1737  swab32(boot_env));
1738 }
1739 
1740 
1741 /*
1742  * Update BFA configuration from firmware configuration.
1743  */
1744 static void
1745 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1746 {
1747  struct bfi_ioc_attr_s *attr = ioc->attr;
1748 
1749  attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1750  attr->card_type = be32_to_cpu(attr->card_type);
1751  attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1752  ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1753  attr->mfg_year = be16_to_cpu(attr->mfg_year);
1754 
1756 }
1757 
1758 /*
1759  * Attach time initialization of mbox logic.
1760  */
1761 static void
1762 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1763 {
1764  struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1765  int mc;
1766 
1767  INIT_LIST_HEAD(&mod->cmd_q);
1768  for (mc = 0; mc < BFI_MC_MAX; mc++) {
1769  mod->mbhdlr[mc].cbfn = NULL;
1770  mod->mbhdlr[mc].cbarg = ioc->bfa;
1771  }
1772 }
1773 
1774 /*
1775  * Mbox poll timer -- restarts any pending mailbox requests.
1776  */
1777 static void
1778 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1779 {
1780  struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1781  struct bfa_mbox_cmd_s *cmd;
1782  u32 stat;
1783 
1784  /*
1785  * If no command pending, do nothing
1786  */
1787  if (list_empty(&mod->cmd_q))
1788  return;
1789 
1790  /*
1791  * If previous command is not yet fetched by firmware, do nothing
1792  */
1793  stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1794  if (stat)
1795  return;
1796 
1797  /*
1798  * Enqueue command to firmware.
1799  */
1800  bfa_q_deq(&mod->cmd_q, &cmd);
1801  bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1802 }
1803 
1804 /*
1805  * Cleanup any pending requests.
1806  */
1807 static void
1808 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1809 {
1810  struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1811  struct bfa_mbox_cmd_s *cmd;
1812 
1813  while (!list_empty(&mod->cmd_q))
1814  bfa_q_deq(&mod->cmd_q, &cmd);
1815 }
1816 
1817 /*
1818  * Read data from SMEM to host through PCI memmap
1819  *
1820  * @param[in] ioc memory for IOC
1821  * @param[in] tbuf app memory to store data from smem
1822  * @param[in] soff smem offset
1823  * @param[in] sz size of smem in bytes
1824  */
1825 static bfa_status_t
1826 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1827 {
1828  u32 pgnum, loff;
1829  __be32 r32;
1830  int i, len;
1831  u32 *buf = tbuf;
1832 
1833  pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1834  loff = PSS_SMEM_PGOFF(soff);
1835  bfa_trc(ioc, pgnum);
1836  bfa_trc(ioc, loff);
1837  bfa_trc(ioc, sz);
1838 
1839  /*
1840  * Hold semaphore to serialize pll init and fwtrc.
1841  */
1842  if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1843  bfa_trc(ioc, 0);
1844  return BFA_STATUS_FAILED;
1845  }
1846 
1847  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1848 
1849  len = sz/sizeof(u32);
1850  bfa_trc(ioc, len);
1851  for (i = 0; i < len; i++) {
1852  r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1853  buf[i] = be32_to_cpu(r32);
1854  loff += sizeof(u32);
1855 
1856  /*
1857  * handle page offset wrap around
1858  */
1859  loff = PSS_SMEM_PGOFF(loff);
1860  if (loff == 0) {
1861  pgnum++;
1862  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1863  }
1864  }
1865  writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1866  ioc->ioc_regs.host_page_num_fn);
1867  /*
1868  * release semaphore.
1869  */
1870  readl(ioc->ioc_regs.ioc_init_sem_reg);
1871  writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1872 
1873  bfa_trc(ioc, pgnum);
1874  return BFA_STATUS_OK;
1875 }
1876 
1877 /*
1878  * Clear SMEM data from host through PCI memmap
1879  *
1880  * @param[in] ioc memory for IOC
1881  * @param[in] soff smem offset
1882  * @param[in] sz size of smem in bytes
1883  */
1884 static bfa_status_t
1885 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1886 {
1887  int i, len;
1888  u32 pgnum, loff;
1889 
1890  pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1891  loff = PSS_SMEM_PGOFF(soff);
1892  bfa_trc(ioc, pgnum);
1893  bfa_trc(ioc, loff);
1894  bfa_trc(ioc, sz);
1895 
1896  /*
1897  * Hold semaphore to serialize pll init and fwtrc.
1898  */
1899  if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1900  bfa_trc(ioc, 0);
1901  return BFA_STATUS_FAILED;
1902  }
1903 
1904  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1905 
1906  len = sz/sizeof(u32); /* len in words */
1907  bfa_trc(ioc, len);
1908  for (i = 0; i < len; i++) {
1909  bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1910  loff += sizeof(u32);
1911 
1912  /*
1913  * handle page offset wrap around
1914  */
1915  loff = PSS_SMEM_PGOFF(loff);
1916  if (loff == 0) {
1917  pgnum++;
1918  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1919  }
1920  }
1921  writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1922  ioc->ioc_regs.host_page_num_fn);
1923 
1924  /*
1925  * release semaphore.
1926  */
1927  readl(ioc->ioc_regs.ioc_init_sem_reg);
1928  writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1929  bfa_trc(ioc, pgnum);
1930  return BFA_STATUS_OK;
1931 }
1932 
1933 static void
1934 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1935 {
1936  struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1937 
1938  /*
1939  * Notify driver and common modules registered for notification.
1940  */
1941  ioc->cbfn->hbfail_cbfn(ioc->bfa);
1942  bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1943 
1945 
1947  "Heart Beat of IOC has failed\n");
1949 
1950 }
1951 
1952 static void
1953 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1954 {
1955  struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1956  /*
1957  * Provide enable completion callback.
1958  */
1959  ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1961  "Running firmware version is incompatible "
1962  "with the driver version\n");
1964 }
1965 
1968 {
1969 
1970  /*
1971  * Hold semaphore so that nobody can access the chip during init.
1972  */
1973  bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1974 
1975  bfa_ioc_pll_init_asic(ioc);
1976 
1977  ioc->pllinit = BFA_TRUE;
1978 
1979  /*
1980  * Initialize LMEM
1981  */
1982  bfa_ioc_lmem_init(ioc);
1983 
1984  /*
1985  * release semaphore.
1986  */
1987  readl(ioc->ioc_regs.ioc_init_sem_reg);
1988  writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1989 
1990  return BFA_STATUS_OK;
1991 }
1992 
1993 /*
1994  * Interface used by diag module to do firmware boot with memory test
1995  * as the entry vector.
1996  */
1997 void
1998 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1999 {
2000  bfa_ioc_stats(ioc, ioc_boots);
2001 
2002  if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2003  return;
2004 
2005  /*
2006  * Initialize IOC state of all functions on a chip reset.
2007  */
2008  if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2009  writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2010  writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
2011  } else {
2012  writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2013  writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
2014  }
2015 
2016  bfa_ioc_msgflush(ioc);
2017  bfa_ioc_download_fw(ioc, boot_type, boot_env);
2018  bfa_ioc_lpu_start(ioc);
2019 }
2020 
2021 /*
2022  * Enable/disable IOC failure auto recovery.
2023  */
2024 void
2026 {
2027  bfa_auto_recover = auto_recover;
2028 }
2029 
2030 
2031 
2034 {
2035  return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2036 }
2037 
2040 {
2041  u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
2042 
2043  return ((r32 != BFI_IOC_UNINIT) &&
2044  (r32 != BFI_IOC_INITING) &&
2045  (r32 != BFI_IOC_MEMTEST));
2046 }
2047 
2049 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2050 {
2051  __be32 *msgp = mbmsg;
2052  u32 r32;
2053  int i;
2054 
2055  r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2056  if ((r32 & 1) == 0)
2057  return BFA_FALSE;
2058 
2059  /*
2060  * read the MBOX msg
2061  */
2062  for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2063  i++) {
2064  r32 = readl(ioc->ioc_regs.lpu_mbox +
2065  i * sizeof(u32));
2066  msgp[i] = cpu_to_be32(r32);
2067  }
2068 
2069  /*
2070  * turn off mailbox interrupt by clearing mailbox status
2071  */
2072  writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2073  readl(ioc->ioc_regs.lpu_mbox_cmd);
2074 
2075  return BFA_TRUE;
2076 }
2077 
2078 void
2079 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2080 {
2081  union bfi_ioc_i2h_msg_u *msg;
2082  struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2083 
2084  msg = (union bfi_ioc_i2h_msg_u *) m;
2085 
2086  bfa_ioc_stats(ioc, ioc_isrs);
2087 
2088  switch (msg->mh.msg_id) {
2089  case BFI_IOC_I2H_HBEAT:
2090  break;
2091 
2093  ioc->port_mode = ioc->port_mode_cfg =
2094  (enum bfa_mode_s)msg->fw_event.port_mode;
2095  ioc->ad_cap_bm = msg->fw_event.cap_bm;
2097  break;
2098 
2101  break;
2102 
2104  bfa_ioc_getattr_reply(ioc);
2105  break;
2106 
2107  default:
2108  bfa_trc(ioc, msg->mh.msg_id);
2109  WARN_ON(1);
2110  }
2111 }
2112 
2113 /*
2114  * IOC attach time initialization and setup.
2115  *
2116  * @param[in] ioc memory for IOC
2117  * @param[in] bfa driver instance structure
2118  */
2119 void
2120 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2121  struct bfa_timer_mod_s *timer_mod)
2122 {
2123  ioc->bfa = bfa;
2124  ioc->cbfn = cbfn;
2125  ioc->timer_mod = timer_mod;
2126  ioc->fcmode = BFA_FALSE;
2127  ioc->pllinit = BFA_FALSE;
2128  ioc->dbg_fwsave_once = BFA_TRUE;
2129  ioc->iocpf.ioc = ioc;
2130 
2131  bfa_ioc_mbox_attach(ioc);
2132  INIT_LIST_HEAD(&ioc->notify_q);
2133 
2134  bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2136 }
2137 
2138 /*
2139  * Driver detach time IOC cleanup.
2140  */
2141 void
2143 {
2145  INIT_LIST_HEAD(&ioc->notify_q);
2146 }
2147 
2148 /*
2149  * Setup IOC PCI properties.
2150  *
2151  * @param[in] pcidev PCI device information for this IOC
2152  */
2153 void
2155  enum bfi_pcifn_class clscode)
2156 {
2157  ioc->clscode = clscode;
2158  ioc->pcidev = *pcidev;
2159 
2160  /*
2161  * Initialize IOC and device personality
2162  */
2163  ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2164  ioc->asic_mode = BFI_ASIC_MODE_FC;
2165 
2166  switch (pcidev->device_id) {
2169  ioc->asic_gen = BFI_ASIC_GEN_CB;
2170  ioc->fcmode = BFA_TRUE;
2171  ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2172  ioc->ad_cap_bm = BFA_CM_HBA;
2173  break;
2174 
2175  case BFA_PCI_DEVICE_ID_CT:
2176  ioc->asic_gen = BFI_ASIC_GEN_CT;
2177  ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2179  ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2180  ioc->ad_cap_bm = BFA_CM_CNA;
2181  break;
2182 
2184  ioc->asic_gen = BFI_ASIC_GEN_CT;
2185  ioc->fcmode = BFA_TRUE;
2186  ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2187  ioc->ad_cap_bm = BFA_CM_HBA;
2188  break;
2189 
2190  case BFA_PCI_DEVICE_ID_CT2:
2191  ioc->asic_gen = BFI_ASIC_GEN_CT2;
2192  if (clscode == BFI_PCIFN_CLASS_FC &&
2193  pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2195  ioc->fcmode = BFA_TRUE;
2196  ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2197  ioc->ad_cap_bm = BFA_CM_HBA;
2198  } else {
2199  ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2201  if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2202  ioc->port_mode =
2203  ioc->port_mode_cfg = BFA_MODE_CNA;
2204  ioc->ad_cap_bm = BFA_CM_CNA;
2205  } else {
2206  ioc->port_mode =
2207  ioc->port_mode_cfg = BFA_MODE_NIC;
2208  ioc->ad_cap_bm = BFA_CM_NIC;
2209  }
2210  }
2211  break;
2212 
2213  default:
2214  WARN_ON(1);
2215  }
2216 
2217  /*
2218  * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2219  */
2220  if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2221  bfa_ioc_set_cb_hwif(ioc);
2222  else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2223  bfa_ioc_set_ct_hwif(ioc);
2224  else {
2226  bfa_ioc_set_ct2_hwif(ioc);
2227  bfa_ioc_ct2_poweron(ioc);
2228  }
2229 
2230  bfa_ioc_map_port(ioc);
2231  bfa_ioc_reg_init(ioc);
2232 }
2233 
2234 /*
2235  * Initialize IOC dma memory
2236  *
2237  * @param[in] dm_kva kernel virtual address of IOC dma memory
2238  * @param[in] dm_pa physical address of IOC dma memory
2239  */
2240 void
2241 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2242 {
2243  /*
2244  * dma memory for firmware attribute
2245  */
2246  ioc->attr_dma.kva = dm_kva;
2247  ioc->attr_dma.pa = dm_pa;
2248  ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2249 }
2250 
2251 void
2253 {
2254  bfa_ioc_stats(ioc, ioc_enables);
2255  ioc->dbg_fwsave_once = BFA_TRUE;
2256 
2258 }
2259 
2260 void
2262 {
2263  bfa_ioc_stats(ioc, ioc_disables);
2265 }
2266 
2267 void
2269 {
2270  ioc->dbg_fwsave_once = BFA_TRUE;
2272 }
2273 
2274 /*
2275  * Initialize memory for saving firmware trace. Driver must initialize
2276  * trace memory before call bfa_ioc_enable().
2277  */
2278 void
2279 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2280 {
2281  ioc->dbg_fwsave = dbg_fwsave;
2283 }
2284 
2285 /*
2286  * Register mailbox message handler functions
2287  *
2288  * @param[in] ioc IOC instance
2289  * @param[in] mcfuncs message class handler functions
2290  */
2291 void
2293 {
2294  struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2295  int mc;
2296 
2297  for (mc = 0; mc < BFI_MC_MAX; mc++)
2298  mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2299 }
2300 
2301 /*
2302  * Register mailbox message handler function, to be called by common modules
2303  */
2304 void
2306  bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2307 {
2308  struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2309 
2310  mod->mbhdlr[mc].cbfn = cbfn;
2311  mod->mbhdlr[mc].cbarg = cbarg;
2312 }
2313 
2314 /*
2315  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2316  * Responsibility of caller to serialize
2317  *
2318  * @param[in] ioc IOC instance
2319  * @param[i] cmd Mailbox command
2320  */
2321 void
2323 {
2324  struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2325  u32 stat;
2326 
2327  /*
2328  * If a previous command is pending, queue new command
2329  */
2330  if (!list_empty(&mod->cmd_q)) {
2331  list_add_tail(&cmd->qe, &mod->cmd_q);
2332  return;
2333  }
2334 
2335  /*
2336  * If mailbox is busy, queue command for poll timer
2337  */
2338  stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2339  if (stat) {
2340  list_add_tail(&cmd->qe, &mod->cmd_q);
2341  return;
2342  }
2343 
2344  /*
2345  * mailbox is free -- queue command to firmware
2346  */
2347  bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2348 }
2349 
2350 /*
2351  * Handle mailbox interrupts
2352  */
2353 void
2355 {
2356  struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2357  struct bfi_mbmsg_s m;
2358  int mc;
2359 
2360  if (bfa_ioc_msgget(ioc, &m)) {
2361  /*
2362  * Treat IOC message class as special.
2363  */
2364  mc = m.mh.msg_class;
2365  if (mc == BFI_MC_IOC) {
2366  bfa_ioc_isr(ioc, &m);
2367  return;
2368  }
2369 
2370  if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2371  return;
2372 
2373  mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2374  }
2375 
2376  bfa_ioc_lpu_read_stat(ioc);
2377 
2378  /*
2379  * Try to send pending mailbox commands
2380  */
2381  bfa_ioc_mbox_poll(ioc);
2382 }
2383 
2384 void
2386 {
2387  bfa_ioc_stats(ioc, ioc_hbfails);
2388  ioc->stats.hb_count = ioc->hb_count;
2390 }
2391 
2392 /*
2393  * return true if IOC is disabled
2394  */
2397 {
2398  return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2399  bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2400 }
2401 
2402 /*
2403  * return true if IOC firmware is different.
2404  */
2407 {
2408  return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2409  bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2410  bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2411 }
2412 
2413 #define bfa_ioc_state_disabled(__sm) \
2414  (((__sm) == BFI_IOC_UNINIT) || \
2415  ((__sm) == BFI_IOC_INITING) || \
2416  ((__sm) == BFI_IOC_HWINIT) || \
2417  ((__sm) == BFI_IOC_DISABLED) || \
2418  ((__sm) == BFI_IOC_FAIL) || \
2419  ((__sm) == BFI_IOC_CFG_DISABLED))
2420 
2421 /*
2422  * Check if adapter is disabled -- both IOCs should be in a disabled
2423  * state.
2424  */
2427 {
2428  u32 ioc_state;
2429 
2430  if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2431  return BFA_FALSE;
2432 
2433  ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2434  if (!bfa_ioc_state_disabled(ioc_state))
2435  return BFA_FALSE;
2436 
2437  if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2438  ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2439  if (!bfa_ioc_state_disabled(ioc_state))
2440  return BFA_FALSE;
2441  }
2442 
2443  return BFA_TRUE;
2444 }
2445 
2446 /*
2447  * Reset IOC fwstate registers.
2448  */
2449 void
2451 {
2452  writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2453  writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2454 }
2455 
2456 #define BFA_MFG_NAME "Brocade"
2457 void
2459  struct bfa_adapter_attr_s *ad_attr)
2460 {
2461  struct bfi_ioc_attr_s *ioc_attr;
2462 
2463  ioc_attr = ioc->attr;
2464 
2466  bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2469  memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2470  sizeof(struct bfa_mfg_vpd_s));
2471 
2472  ad_attr->nports = bfa_ioc_get_nports(ioc);
2473  ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2474 
2475  bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2476  /* For now, model descr uses same model string */
2477  bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2478 
2479  ad_attr->card_type = ioc_attr->card_type;
2480  ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2481 
2482  if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2483  ad_attr->prototype = 1;
2484  else
2485  ad_attr->prototype = 0;
2486 
2487  ad_attr->pwwn = ioc->attr->pwwn;
2488  ad_attr->mac = bfa_ioc_get_mac(ioc);
2489 
2490  ad_attr->pcie_gen = ioc_attr->pcie_gen;
2491  ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2492  ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2493  ad_attr->asic_rev = ioc_attr->asic_rev;
2494 
2495  bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2496 
2497  ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2498  ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2499  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2500  ad_attr->mfg_day = ioc_attr->mfg_day;
2501  ad_attr->mfg_month = ioc_attr->mfg_month;
2502  ad_attr->mfg_year = ioc_attr->mfg_year;
2503 }
2504 
2505 enum bfa_ioc_type_e
2507 {
2508  if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2509  return BFA_IOC_TYPE_LL;
2510 
2512 
2513  return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2515 }
2516 
2517 void
2519 {
2520  memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2521  memcpy((void *)serial_num,
2522  (void *)ioc->attr->brcd_serialnum,
2524 }
2525 
2526 void
2527 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2528 {
2529  memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2530  memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2531 }
2532 
2533 void
2534 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2535 {
2536  WARN_ON(!chip_rev);
2537 
2538  memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2539 
2540  chip_rev[0] = 'R';
2541  chip_rev[1] = 'e';
2542  chip_rev[2] = 'v';
2543  chip_rev[3] = '-';
2544  chip_rev[4] = ioc->attr->asic_rev;
2545  chip_rev[5] = '\0';
2546 }
2547 
2548 void
2549 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2550 {
2551  memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2552  memcpy(optrom_ver, ioc->attr->optrom_version,
2553  BFA_VERSION_LEN);
2554 }
2555 
2556 void
2558 {
2559  memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2561 }
2562 
2563 void
2564 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2565 {
2566  struct bfi_ioc_attr_s *ioc_attr;
2567 
2568  WARN_ON(!model);
2569  memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2570 
2571  ioc_attr = ioc->attr;
2572 
2573  snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2574  BFA_MFG_NAME, ioc_attr->card_type);
2575 }
2576 
2577 enum bfa_ioc_state
2579 {
2580  enum bfa_iocpf_state iocpf_st;
2581  enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2582 
2583  if (ioc_st == BFA_IOC_ENABLING ||
2584  ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2585 
2586  iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2587 
2588  switch (iocpf_st) {
2589  case BFA_IOCPF_SEMWAIT:
2590  ioc_st = BFA_IOC_SEMWAIT;
2591  break;
2592 
2593  case BFA_IOCPF_HWINIT:
2594  ioc_st = BFA_IOC_HWINIT;
2595  break;
2596 
2597  case BFA_IOCPF_FWMISMATCH:
2598  ioc_st = BFA_IOC_FWMISMATCH;
2599  break;
2600 
2601  case BFA_IOCPF_FAIL:
2602  ioc_st = BFA_IOC_FAIL;
2603  break;
2604 
2605  case BFA_IOCPF_INITFAIL:
2606  ioc_st = BFA_IOC_INITFAIL;
2607  break;
2608 
2609  default:
2610  break;
2611  }
2612  }
2613 
2614  return ioc_st;
2615 }
2616 
2617 void
2618 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2619 {
2620  memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2621 
2622  ioc_attr->state = bfa_ioc_get_state(ioc);
2623  ioc_attr->port_id = ioc->port_id;
2624  ioc_attr->port_mode = ioc->port_mode;
2625  ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2626  ioc_attr->cap_bm = ioc->ad_cap_bm;
2627 
2628  ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2629 
2630  bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2631 
2632  ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2633  ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2634  bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2635 }
2636 
2637 mac_t
2639 {
2640  /*
2641  * Check the IOC type and return the appropriate MAC
2642  */
2643  if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2644  return ioc->attr->fcoe_mac;
2645  else
2646  return ioc->attr->mac;
2647 }
2648 
2649 mac_t
2651 {
2652  mac_t m;
2653 
2654  m = ioc->attr->mfg_mac;
2655  if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2656  m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2657  else
2659  bfa_ioc_pcifn(ioc));
2660 
2661  return m;
2662 }
2663 
2664 /*
2665  * Send AEN notification
2666  */
2667 void
2669 {
2670  struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2671  struct bfa_aen_entry_s *aen_entry;
2672  enum bfa_ioc_type_e ioc_type;
2673 
2674  bfad_get_aen_entry(bfad, aen_entry);
2675  if (!aen_entry)
2676  return;
2677 
2678  ioc_type = bfa_ioc_get_type(ioc);
2679  switch (ioc_type) {
2680  case BFA_IOC_TYPE_FC:
2681  aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2682  break;
2683  case BFA_IOC_TYPE_FCoE:
2684  aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2685  aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2686  break;
2687  case BFA_IOC_TYPE_LL:
2688  aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2689  break;
2690  default:
2691  WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2692  break;
2693  }
2694 
2695  /* Send the AEN notification */
2696  aen_entry->aen_data.ioc.ioc_type = ioc_type;
2697  bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2698  BFA_AEN_CAT_IOC, event);
2699 }
2700 
2701 /*
2702  * Retrieve saved firmware trace from a prior IOC failure.
2703  */
2705 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2706 {
2707  int tlen;
2708 
2709  if (ioc->dbg_fwsave_len == 0)
2710  return BFA_STATUS_ENOFSAVE;
2711 
2712  tlen = *trclen;
2713  if (tlen > ioc->dbg_fwsave_len)
2714  tlen = ioc->dbg_fwsave_len;
2715 
2716  memcpy(trcdata, ioc->dbg_fwsave, tlen);
2717  *trclen = tlen;
2718  return BFA_STATUS_OK;
2719 }
2720 
2721 
2722 /*
2723  * Retrieve saved firmware trace from a prior IOC failure.
2724  */
2726 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2727 {
2728  u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2729  int tlen;
2731 
2732  bfa_trc(ioc, *trclen);
2733 
2734  tlen = *trclen;
2735  if (tlen > BFA_DBG_FWTRC_LEN)
2736  tlen = BFA_DBG_FWTRC_LEN;
2737 
2738  status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2739  *trclen = tlen;
2740  return status;
2741 }
2742 
2743 static void
2744 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2745 {
2746  struct bfa_mbox_cmd_s cmd;
2747  struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2748 
2750  bfa_ioc_portid(ioc));
2751  req->clscode = cpu_to_be16(ioc->clscode);
2752  bfa_ioc_mbox_queue(ioc, &cmd);
2753 }
2754 
2755 static void
2756 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2757 {
2758  u32 fwsync_iter = 1000;
2759 
2760  bfa_ioc_send_fwsync(ioc);
2761 
2762  /*
2763  * After sending a fw sync mbox command wait for it to
2764  * take effect. We will not wait for a response because
2765  * 1. fw_sync mbox cmd doesn't have a response.
2766  * 2. Even if we implement that, interrupts might not
2767  * be enabled when we call this function.
2768  * So, just keep checking if any mbox cmd is pending, and
2769  * after waiting for a reasonable amount of time, go ahead.
2770  * It is possible that fw has crashed and the mbox command
2771  * is never acknowledged.
2772  */
2773  while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2774  fwsync_iter--;
2775 }
2776 
2777 /*
2778  * Dump firmware smem
2779  */
2781 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2782  u32 *offset, int *buflen)
2783 {
2784  u32 loff;
2785  int dlen;
2787  u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2788 
2789  if (*offset >= smem_len) {
2790  *offset = *buflen = 0;
2791  return BFA_STATUS_EINVAL;
2792  }
2793 
2794  loff = *offset;
2795  dlen = *buflen;
2796 
2797  /*
2798  * First smem read, sync smem before proceeding
2799  * No need to sync before reading every chunk.
2800  */
2801  if (loff == 0)
2802  bfa_ioc_fwsync(ioc);
2803 
2804  if ((loff + dlen) >= smem_len)
2805  dlen = smem_len - loff;
2806 
2807  status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2808 
2809  if (status != BFA_STATUS_OK) {
2810  *offset = *buflen = 0;
2811  return status;
2812  }
2813 
2814  *offset += dlen;
2815 
2816  if (*offset >= smem_len)
2817  *offset = 0;
2818 
2819  *buflen = dlen;
2820 
2821  return status;
2822 }
2823 
2824 /*
2825  * Firmware statistics
2826  */
2829 {
2830  u32 loff = BFI_IOC_FWSTATS_OFF + \
2831  BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2832  int tlen;
2834 
2835  if (ioc->stats_busy) {
2836  bfa_trc(ioc, ioc->stats_busy);
2837  return BFA_STATUS_DEVBUSY;
2838  }
2839  ioc->stats_busy = BFA_TRUE;
2840 
2841  tlen = sizeof(struct bfa_fw_stats_s);
2842  status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2843 
2844  ioc->stats_busy = BFA_FALSE;
2845  return status;
2846 }
2847 
2850 {
2851  u32 loff = BFI_IOC_FWSTATS_OFF + \
2852  BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2853  int tlen;
2855 
2856  if (ioc->stats_busy) {
2857  bfa_trc(ioc, ioc->stats_busy);
2858  return BFA_STATUS_DEVBUSY;
2859  }
2860  ioc->stats_busy = BFA_TRUE;
2861 
2862  tlen = sizeof(struct bfa_fw_stats_s);
2863  status = bfa_ioc_smem_clr(ioc, loff, tlen);
2864 
2865  ioc->stats_busy = BFA_FALSE;
2866  return status;
2867 }
2868 
2869 /*
2870  * Save firmware trace if configured.
2871  */
2872 void
2874 {
2875  int tlen;
2876 
2877  if (ioc->dbg_fwsave_once) {
2878  ioc->dbg_fwsave_once = BFA_FALSE;
2879  if (ioc->dbg_fwsave_len) {
2880  tlen = ioc->dbg_fwsave_len;
2881  bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2882  }
2883  }
2884 }
2885 
2886 /*
2887  * Firmware failure detected. Start recovery actions.
2888  */
2889 static void
2890 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2891 {
2892  bfa_ioc_stats(ioc, ioc_hbfails);
2893  ioc->stats.hb_count = ioc->hb_count;
2895 }
2896 
2897 /*
2898  * BFA IOC PF private functions
2899  */
2900 static void
2901 bfa_iocpf_timeout(void *ioc_arg)
2902 {
2903  struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2904 
2905  bfa_trc(ioc, 0);
2907 }
2908 
2909 static void
2910 bfa_iocpf_sem_timeout(void *ioc_arg)
2911 {
2912  struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2913 
2914  bfa_ioc_hw_sem_get(ioc);
2915 }
2916 
2917 static void
2918 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2919 {
2920  u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2921 
2922  bfa_trc(ioc, fwstate);
2923 
2924  if (fwstate == BFI_IOC_DISABLED) {
2926  return;
2927  }
2928 
2929  if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
2930  bfa_iocpf_timeout(ioc);
2931  else {
2932  ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2934  }
2935 }
2936 
2937 static void
2938 bfa_iocpf_poll_timeout(void *ioc_arg)
2939 {
2940  struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2941 
2942  bfa_ioc_poll_fwinit(ioc);
2943 }
2944 
2945 /*
2946  * bfa timer function
2947  */
2948 void
2950 {
2951  struct list_head *qh = &mod->timer_q;
2952  struct list_head *qe, *qe_next;
2953  struct bfa_timer_s *elem;
2954  struct list_head timedout_q;
2955 
2956  INIT_LIST_HEAD(&timedout_q);
2957 
2958  qe = bfa_q_next(qh);
2959 
2960  while (qe != qh) {
2961  qe_next = bfa_q_next(qe);
2962 
2963  elem = (struct bfa_timer_s *) qe;
2964  if (elem->timeout <= BFA_TIMER_FREQ) {
2965  elem->timeout = 0;
2966  list_del(&elem->qe);
2967  list_add_tail(&elem->qe, &timedout_q);
2968  } else {
2969  elem->timeout -= BFA_TIMER_FREQ;
2970  }
2971 
2972  qe = qe_next; /* go to next elem */
2973  }
2974 
2975  /*
2976  * Pop all the timeout entries
2977  */
2978  while (!list_empty(&timedout_q)) {
2979  bfa_q_deq(&timedout_q, &elem);
2980  elem->timercb(elem->arg);
2981  }
2982 }
2983 
2984 /*
2985  * Should be called with lock protection
2986  */
2987 void
2989  void (*timercb) (void *), void *arg, unsigned int timeout)
2990 {
2991 
2992  WARN_ON(timercb == NULL);
2993  WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2994 
2995  timer->timeout = timeout;
2996  timer->timercb = timercb;
2997  timer->arg = arg;
2998 
2999  list_add_tail(&timer->qe, &mod->timer_q);
3000 }
3001 
3002 /*
3003  * Should be called with lock protection
3004  */
3005 void
3007 {
3008  WARN_ON(list_empty(&timer->qe));
3009 
3010  list_del(&timer->qe);
3011 }
3012 
3013 /*
3014  * ASIC block related
3015  */
3016 static void
3017 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3018 {
3019  struct bfa_ablk_cfg_inst_s *cfg_inst;
3020  int i, j;
3021  u16 be16;
3022 
3023  for (i = 0; i < BFA_ABLK_MAX; i++) {
3024  cfg_inst = &cfg->inst[i];
3025  for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3026  be16 = cfg_inst->pf_cfg[j].pers;
3027  cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3028  be16 = cfg_inst->pf_cfg[j].num_qpairs;
3029  cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3030  be16 = cfg_inst->pf_cfg[j].num_vectors;
3031  cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3032  be16 = cfg_inst->pf_cfg[j].bw_min;
3033  cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3034  be16 = cfg_inst->pf_cfg[j].bw_max;
3035  cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3036  }
3037  }
3038 }
3039 
3040 static void
3041 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3042 {
3043  struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3044  struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3045  bfa_ablk_cbfn_t cbfn;
3046 
3047  WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3048  bfa_trc(ablk->ioc, msg->mh.msg_id);
3049 
3050  switch (msg->mh.msg_id) {
3051  case BFI_ABLK_I2H_QUERY:
3052  if (rsp->status == BFA_STATUS_OK) {
3053  memcpy(ablk->cfg, ablk->dma_addr.kva,
3054  sizeof(struct bfa_ablk_cfg_s));
3055  bfa_ablk_config_swap(ablk->cfg);
3056  ablk->cfg = NULL;
3057  }
3058  break;
3059 
3062  /* update config port mode */
3063  ablk->ioc->port_mode_cfg = rsp->port_mode;
3064 
3069  /* No-op */
3070  break;
3071 
3073  *(ablk->pcifn) = rsp->pcifn;
3074  ablk->pcifn = NULL;
3075  break;
3076 
3077  default:
3078  WARN_ON(1);
3079  }
3080 
3081  ablk->busy = BFA_FALSE;
3082  if (ablk->cbfn) {
3083  cbfn = ablk->cbfn;
3084  ablk->cbfn = NULL;
3085  cbfn(ablk->cbarg, rsp->status);
3086  }
3087 }
3088 
3089 static void
3090 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3091 {
3092  struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3093 
3094  bfa_trc(ablk->ioc, event);
3095 
3096  switch (event) {
3097  case BFA_IOC_E_ENABLED:
3098  WARN_ON(ablk->busy != BFA_FALSE);
3099  break;
3100 
3101  case BFA_IOC_E_DISABLED:
3102  case BFA_IOC_E_FAILED:
3103  /* Fail any pending requests */
3104  ablk->pcifn = NULL;
3105  if (ablk->busy) {
3106  if (ablk->cbfn)
3107  ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3108  ablk->cbfn = NULL;
3109  ablk->busy = BFA_FALSE;
3110  }
3111  break;
3112 
3113  default:
3114  WARN_ON(1);
3115  break;
3116  }
3117 }
3118 
3119 u32
3121 {
3122  return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3123 }
3124 
3125 void
3126 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3127 {
3128  ablk->dma_addr.kva = dma_kva;
3129  ablk->dma_addr.pa = dma_pa;
3130 }
3131 
3132 void
3133 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3134 {
3135  ablk->ioc = ioc;
3136 
3137  bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3138  bfa_q_qe_init(&ablk->ioc_notify);
3139  bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3140  list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3141 }
3142 
3144 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3145  bfa_ablk_cbfn_t cbfn, void *cbarg)
3146 {
3147  struct bfi_ablk_h2i_query_s *m;
3148 
3149  WARN_ON(!ablk_cfg);
3150 
3151  if (!bfa_ioc_is_operational(ablk->ioc)) {
3153  return BFA_STATUS_IOC_FAILURE;
3154  }
3155 
3156  if (ablk->busy) {
3157  bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3158  return BFA_STATUS_DEVBUSY;
3159  }
3160 
3161  ablk->cfg = ablk_cfg;
3162  ablk->cbfn = cbfn;
3163  ablk->cbarg = cbarg;
3164  ablk->busy = BFA_TRUE;
3165 
3166  m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3168  bfa_ioc_portid(ablk->ioc));
3169  bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3170  bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3171 
3172  return BFA_STATUS_OK;
3173 }
3174 
3176 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3178  u16 bw_min, u16 bw_max,
3179  bfa_ablk_cbfn_t cbfn, void *cbarg)
3180 {
3181  struct bfi_ablk_h2i_pf_req_s *m;
3182 
3183  if (!bfa_ioc_is_operational(ablk->ioc)) {
3185  return BFA_STATUS_IOC_FAILURE;
3186  }
3187 
3188  if (ablk->busy) {
3189  bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3190  return BFA_STATUS_DEVBUSY;
3191  }
3192 
3193  ablk->pcifn = pcifn;
3194  ablk->cbfn = cbfn;
3195  ablk->cbarg = cbarg;
3196  ablk->busy = BFA_TRUE;
3197 
3198  m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3200  bfa_ioc_portid(ablk->ioc));
3201  m->pers = cpu_to_be16((u16)personality);
3202  m->bw_min = cpu_to_be16(bw_min);
3203  m->bw_max = cpu_to_be16(bw_max);
3204  m->port = port;
3205  bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3206 
3207  return BFA_STATUS_OK;
3208 }
3209 
3212  bfa_ablk_cbfn_t cbfn, void *cbarg)
3213 {
3214  struct bfi_ablk_h2i_pf_req_s *m;
3215 
3216  if (!bfa_ioc_is_operational(ablk->ioc)) {
3218  return BFA_STATUS_IOC_FAILURE;
3219  }
3220 
3221  if (ablk->busy) {
3222  bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3223  return BFA_STATUS_DEVBUSY;
3224  }
3225 
3226  ablk->cbfn = cbfn;
3227  ablk->cbarg = cbarg;
3228  ablk->busy = BFA_TRUE;
3229 
3230  m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3232  bfa_ioc_portid(ablk->ioc));
3233  m->pcifn = (u8)pcifn;
3234  bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3235 
3236  return BFA_STATUS_OK;
3237 }
3238 
3241  int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3242 {
3243  struct bfi_ablk_h2i_cfg_req_s *m;
3244 
3245  if (!bfa_ioc_is_operational(ablk->ioc)) {
3247  return BFA_STATUS_IOC_FAILURE;
3248  }
3249 
3250  if (ablk->busy) {
3251  bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3252  return BFA_STATUS_DEVBUSY;
3253  }
3254 
3255  ablk->cbfn = cbfn;
3256  ablk->cbarg = cbarg;
3257  ablk->busy = BFA_TRUE;
3258 
3259  m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3261  bfa_ioc_portid(ablk->ioc));
3262  m->mode = (u8)mode;
3263  m->max_pf = (u8)max_pf;
3264  m->max_vf = (u8)max_vf;
3265  bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3266 
3267  return BFA_STATUS_OK;
3268 }
3269 
3272  int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3273 {
3274  struct bfi_ablk_h2i_cfg_req_s *m;
3275 
3276  if (!bfa_ioc_is_operational(ablk->ioc)) {
3278  return BFA_STATUS_IOC_FAILURE;
3279  }
3280 
3281  if (ablk->busy) {
3282  bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3283  return BFA_STATUS_DEVBUSY;
3284  }
3285 
3286  ablk->cbfn = cbfn;
3287  ablk->cbarg = cbarg;
3288  ablk->busy = BFA_TRUE;
3289 
3290  m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3292  bfa_ioc_portid(ablk->ioc));
3293  m->port = (u8)port;
3294  m->mode = (u8)mode;
3295  m->max_pf = (u8)max_pf;
3296  m->max_vf = (u8)max_vf;
3297  bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3298 
3299  return BFA_STATUS_OK;
3300 }
3301 
3303 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3304  u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3305 {
3306  struct bfi_ablk_h2i_pf_req_s *m;
3307 
3308  if (!bfa_ioc_is_operational(ablk->ioc)) {
3310  return BFA_STATUS_IOC_FAILURE;
3311  }
3312 
3313  if (ablk->busy) {
3314  bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3315  return BFA_STATUS_DEVBUSY;
3316  }
3317 
3318  ablk->cbfn = cbfn;
3319  ablk->cbarg = cbarg;
3320  ablk->busy = BFA_TRUE;
3321 
3322  m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3324  bfa_ioc_portid(ablk->ioc));
3325  m->pcifn = (u8)pcifn;
3326  m->bw_min = cpu_to_be16(bw_min);
3327  m->bw_max = cpu_to_be16(bw_max);
3328  bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3329 
3330  return BFA_STATUS_OK;
3331 }
3332 
3334 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3335 {
3336  struct bfi_ablk_h2i_optrom_s *m;
3337 
3338  if (!bfa_ioc_is_operational(ablk->ioc)) {
3340  return BFA_STATUS_IOC_FAILURE;
3341  }
3342 
3343  if (ablk->busy) {
3344  bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3345  return BFA_STATUS_DEVBUSY;
3346  }
3347 
3348  ablk->cbfn = cbfn;
3349  ablk->cbarg = cbarg;
3350  ablk->busy = BFA_TRUE;
3351 
3352  m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3354  bfa_ioc_portid(ablk->ioc));
3355  bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3356 
3357  return BFA_STATUS_OK;
3358 }
3359 
3361 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3362 {
3363  struct bfi_ablk_h2i_optrom_s *m;
3364 
3365  if (!bfa_ioc_is_operational(ablk->ioc)) {
3367  return BFA_STATUS_IOC_FAILURE;
3368  }
3369 
3370  if (ablk->busy) {
3371  bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3372  return BFA_STATUS_DEVBUSY;
3373  }
3374 
3375  ablk->cbfn = cbfn;
3376  ablk->cbarg = cbarg;
3377  ablk->busy = BFA_TRUE;
3378 
3379  m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3381  bfa_ioc_portid(ablk->ioc));
3382  bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3383 
3384  return BFA_STATUS_OK;
3385 }
3386 
3387 /*
3388  * SFP module specific
3389  */
3390 
3391 /* forward declarations */
3392 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3393 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3394 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3395  enum bfa_port_speed portspeed);
3396 
3397 static void
3398 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3399 {
3400  bfa_trc(sfp, sfp->lock);
3401  if (sfp->cbfn)
3402  sfp->cbfn(sfp->cbarg, sfp->status);
3403  sfp->lock = 0;
3404  sfp->cbfn = NULL;
3405 }
3406 
3407 static void
3408 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3409 {
3410  bfa_trc(sfp, sfp->portspeed);
3411  if (sfp->media) {
3412  bfa_sfp_media_get(sfp);
3413  if (sfp->state_query_cbfn)
3415  sfp->status);
3416  sfp->media = NULL;
3417  }
3418 
3419  if (sfp->portspeed) {
3420  sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3421  if (sfp->state_query_cbfn)
3423  sfp->status);
3425  }
3426 
3427  sfp->state_query_lock = 0;
3428  sfp->state_query_cbfn = NULL;
3429 }
3430 
3431 /*
3432  * IOC event handler.
3433  */
3434 static void
3435 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3436 {
3437  struct bfa_sfp_s *sfp = sfp_arg;
3438 
3439  bfa_trc(sfp, event);
3440  bfa_trc(sfp, sfp->lock);
3441  bfa_trc(sfp, sfp->state_query_lock);
3442 
3443  switch (event) {
3444  case BFA_IOC_E_DISABLED:
3445  case BFA_IOC_E_FAILED:
3446  if (sfp->lock) {
3448  bfa_cb_sfp_show(sfp);
3449  }
3450 
3451  if (sfp->state_query_lock) {
3453  bfa_cb_sfp_state_query(sfp);
3454  }
3455  break;
3456 
3457  default:
3458  break;
3459  }
3460 }
3461 
3462 /*
3463  * SFP's State Change Notification post to AEN
3464  */
3465 static void
3466 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3467 {
3468  struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3469  struct bfa_aen_entry_s *aen_entry;
3470  enum bfa_port_aen_event aen_evt = 0;
3471 
3472  bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3473  ((u64)rsp->event));
3474 
3475  bfad_get_aen_entry(bfad, aen_entry);
3476  if (!aen_entry)
3477  return;
3478 
3479  aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3480  aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3481  aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3482 
3483  switch (rsp->event) {
3484  case BFA_SFP_SCN_INSERTED:
3485  aen_evt = BFA_PORT_AEN_SFP_INSERT;
3486  break;
3487  case BFA_SFP_SCN_REMOVED:
3488  aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3489  break;
3490  case BFA_SFP_SCN_FAILED:
3492  break;
3493  case BFA_SFP_SCN_UNSUPPORT:
3494  aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3495  break;
3496  case BFA_SFP_SCN_POM:
3497  aen_evt = BFA_PORT_AEN_SFP_POM;
3498  aen_entry->aen_data.port.level = rsp->pomlvl;
3499  break;
3500  default:
3501  bfa_trc(sfp, rsp->event);
3502  WARN_ON(1);
3503  }
3504 
3505  /* Send the AEN notification */
3506  bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3507  BFA_AEN_CAT_PORT, aen_evt);
3508 }
3509 
3510 /*
3511  * SFP get data send
3512  */
3513 static void
3514 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3515 {
3516  struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3517 
3518  bfa_trc(sfp, req->memtype);
3519 
3520  /* build host command */
3522  bfa_ioc_portid(sfp->ioc));
3523 
3524  /* send mbox cmd */
3525  bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3526 }
3527 
3528 /*
3529  * SFP is valid, read sfp data
3530  */
3531 static void
3532 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3533 {
3534  struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3535 
3536  WARN_ON(sfp->lock != 0);
3537  bfa_trc(sfp, sfp->state);
3538 
3539  sfp->lock = 1;
3540  sfp->memtype = memtype;
3541  req->memtype = memtype;
3542 
3543  /* Setup SG list */
3544  bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3545 
3546  bfa_sfp_getdata_send(sfp);
3547 }
3548 
3549 /*
3550  * SFP scn handler
3551  */
3552 static void
3553 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3554 {
3555  struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3556 
3557  switch (rsp->event) {
3558  case BFA_SFP_SCN_INSERTED:
3560  sfp->data_valid = 0;
3561  bfa_sfp_scn_aen_post(sfp, rsp);
3562  break;
3563  case BFA_SFP_SCN_REMOVED:
3565  sfp->data_valid = 0;
3566  bfa_sfp_scn_aen_post(sfp, rsp);
3567  break;
3568  case BFA_SFP_SCN_FAILED:
3569  sfp->state = BFA_SFP_STATE_FAILED;
3570  sfp->data_valid = 0;
3571  bfa_sfp_scn_aen_post(sfp, rsp);
3572  break;
3573  case BFA_SFP_SCN_UNSUPPORT:
3575  bfa_sfp_scn_aen_post(sfp, rsp);
3576  if (!sfp->lock)
3577  bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3578  break;
3579  case BFA_SFP_SCN_POM:
3580  bfa_sfp_scn_aen_post(sfp, rsp);
3581  break;
3582  case BFA_SFP_SCN_VALID:
3583  sfp->state = BFA_SFP_STATE_VALID;
3584  if (!sfp->lock)
3585  bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3586  break;
3587  default:
3588  bfa_trc(sfp, rsp->event);
3589  WARN_ON(1);
3590  }
3591 }
3592 
3593 /*
3594  * SFP show complete
3595  */
3596 static void
3597 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3598 {
3599  struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3600 
3601  if (!sfp->lock) {
3602  /*
3603  * receiving response after ioc failure
3604  */
3605  bfa_trc(sfp, sfp->lock);
3606  return;
3607  }
3608 
3609  bfa_trc(sfp, rsp->status);
3610  if (rsp->status == BFA_STATUS_OK) {
3611  sfp->data_valid = 1;
3612  if (sfp->state == BFA_SFP_STATE_VALID)
3613  sfp->status = BFA_STATUS_OK;
3614  else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3616  else
3617  bfa_trc(sfp, sfp->state);
3618  } else {
3619  sfp->data_valid = 0;
3620  sfp->status = rsp->status;
3621  /* sfpshow shouldn't change sfp state */
3622  }
3623 
3624  bfa_trc(sfp, sfp->memtype);
3625  if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3626  bfa_trc(sfp, sfp->data_valid);
3627  if (sfp->data_valid) {
3628  u32 size = sizeof(struct sfp_mem_s);
3629  u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3630  memcpy(des, sfp->dbuf_kva, size);
3631  }
3632  /*
3633  * Queue completion callback.
3634  */
3635  bfa_cb_sfp_show(sfp);
3636  } else
3637  sfp->lock = 0;
3638 
3639  bfa_trc(sfp, sfp->state_query_lock);
3640  if (sfp->state_query_lock) {
3641  sfp->state = rsp->state;
3642  /* Complete callback */
3643  bfa_cb_sfp_state_query(sfp);
3644  }
3645 }
3646 
3647 /*
3648  * SFP query fw sfp state
3649  */
3650 static void
3651 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3652 {
3653  struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3654 
3655  /* Should not be doing query if not in _INIT state */
3656  WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3657  WARN_ON(sfp->state_query_lock != 0);
3658  bfa_trc(sfp, sfp->state);
3659 
3660  sfp->state_query_lock = 1;
3661  req->memtype = 0;
3662 
3663  if (!sfp->lock)
3664  bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3665 }
3666 
3667 static void
3668 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3669 {
3670  enum bfa_defs_sfp_media_e *media = sfp->media;
3671 
3672  *media = BFA_SFP_MEDIA_UNKNOWN;
3673 
3674  if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3675  *media = BFA_SFP_MEDIA_UNSUPPORT;
3676  else if (sfp->state == BFA_SFP_STATE_VALID) {
3677  union sfp_xcvr_e10g_code_u e10g;
3678  struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3679  u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3680  (sfpmem->srlid_base.xcvr[5] >> 1);
3681 
3682  e10g.b = sfpmem->srlid_base.xcvr[0];
3683  bfa_trc(sfp, e10g.b);
3684  bfa_trc(sfp, xmtr_tech);
3685  /* check fc transmitter tech */
3686  if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3687  (xmtr_tech & SFP_XMTR_TECH_CP) ||
3688  (xmtr_tech & SFP_XMTR_TECH_CA))
3689  *media = BFA_SFP_MEDIA_CU;
3690  else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3691  (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3692  *media = BFA_SFP_MEDIA_EL;
3693  else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3694  (xmtr_tech & SFP_XMTR_TECH_LC))
3695  *media = BFA_SFP_MEDIA_LW;
3696  else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3697  (xmtr_tech & SFP_XMTR_TECH_SN) ||
3698  (xmtr_tech & SFP_XMTR_TECH_SA))
3699  *media = BFA_SFP_MEDIA_SW;
3700  /* Check 10G Ethernet Compilance code */
3701  else if (e10g.r.e10g_sr)
3702  *media = BFA_SFP_MEDIA_SW;
3703  else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3704  *media = BFA_SFP_MEDIA_LW;
3705  else if (e10g.r.e10g_unall)
3706  *media = BFA_SFP_MEDIA_UNKNOWN;
3707  else
3708  bfa_trc(sfp, 0);
3709  } else
3710  bfa_trc(sfp, sfp->state);
3711 }
3712 
3713 static bfa_status_t
3714 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3715 {
3716  struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3717  struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3718  union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3719  union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3720 
3721  if (portspeed == BFA_PORT_SPEED_10GBPS) {
3722  if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3723  return BFA_STATUS_OK;
3724  else {
3725  bfa_trc(sfp, e10g.b);
3726  return BFA_STATUS_UNSUPP_SPEED;
3727  }
3728  }
3729  if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3730  ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3731  ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3732  ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3733  ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3734  return BFA_STATUS_OK;
3735  else {
3736  bfa_trc(sfp, portspeed);
3737  bfa_trc(sfp, fc3.b);
3738  bfa_trc(sfp, e10g.b);
3739  return BFA_STATUS_UNSUPP_SPEED;
3740  }
3741 }
3742 
3743 /*
3744  * SFP hmbox handler
3745  */
3746 void
3747 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3748 {
3749  struct bfa_sfp_s *sfp = sfparg;
3750 
3751  switch (msg->mh.msg_id) {
3752  case BFI_SFP_I2H_SHOW:
3753  bfa_sfp_show_comp(sfp, msg);
3754  break;
3755 
3756  case BFI_SFP_I2H_SCN:
3757  bfa_sfp_scn(sfp, msg);
3758  break;
3759 
3760  default:
3761  bfa_trc(sfp, msg->mh.msg_id);
3762  WARN_ON(1);
3763  }
3764 }
3765 
3766 /*
3767  * Return DMA memory needed by sfp module.
3768  */
3769 u32
3771 {
3772  return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3773 }
3774 
3775 /*
3776  * Attach virtual and physical memory for SFP.
3777  */
3778 void
3779 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3780  struct bfa_trc_mod_s *trcmod)
3781 {
3782  sfp->dev = dev;
3783  sfp->ioc = ioc;
3784  sfp->trcmod = trcmod;
3785 
3786  sfp->cbfn = NULL;
3787  sfp->cbarg = NULL;
3788  sfp->sfpmem = NULL;
3789  sfp->lock = 0;
3790  sfp->data_valid = 0;
3791  sfp->state = BFA_SFP_STATE_INIT;
3792  sfp->state_query_lock = 0;
3793  sfp->state_query_cbfn = NULL;
3794  sfp->state_query_cbarg = NULL;
3795  sfp->media = NULL;
3797  sfp->is_elb = BFA_FALSE;
3798 
3800  bfa_q_qe_init(&sfp->ioc_notify);
3801  bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3802  list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3803 }
3804 
3805 /*
3806  * Claim Memory for SFP
3807  */
3808 void
3809 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3810 {
3811  sfp->dbuf_kva = dm_kva;
3812  sfp->dbuf_pa = dm_pa;
3813  memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3814 
3815  dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3816  dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3817 }
3818 
3819 /*
3820  * Show SFP eeprom content
3821  *
3822  * @param[in] sfp - bfa sfp module
3823  *
3824  * @param[out] sfpmem - sfp eeprom data
3825  *
3826  */
3828 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3829  bfa_cb_sfp_t cbfn, void *cbarg)
3830 {
3831 
3832  if (!bfa_ioc_is_operational(sfp->ioc)) {
3833  bfa_trc(sfp, 0);
3834  return BFA_STATUS_IOC_NON_OP;
3835  }
3836 
3837  if (sfp->lock) {
3838  bfa_trc(sfp, 0);
3839  return BFA_STATUS_DEVBUSY;
3840  }
3841 
3842  sfp->cbfn = cbfn;
3843  sfp->cbarg = cbarg;
3844  sfp->sfpmem = sfpmem;
3845 
3846  bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3847  return BFA_STATUS_OK;
3848 }
3849 
3850 /*
3851  * Return SFP Media type
3852  *
3853  * @param[in] sfp - bfa sfp module
3854  *
3855  * @param[out] media - port speed from user
3856  *
3857  */
3860  bfa_cb_sfp_t cbfn, void *cbarg)
3861 {
3862  if (!bfa_ioc_is_operational(sfp->ioc)) {
3863  bfa_trc(sfp, 0);
3864  return BFA_STATUS_IOC_NON_OP;
3865  }
3866 
3867  sfp->media = media;
3868  if (sfp->state == BFA_SFP_STATE_INIT) {
3869  if (sfp->state_query_lock) {
3870  bfa_trc(sfp, 0);
3871  return BFA_STATUS_DEVBUSY;
3872  } else {
3873  sfp->state_query_cbfn = cbfn;
3874  sfp->state_query_cbarg = cbarg;
3875  bfa_sfp_state_query(sfp);
3876  return BFA_STATUS_SFP_NOT_READY;
3877  }
3878  }
3879 
3880  bfa_sfp_media_get(sfp);
3881  return BFA_STATUS_OK;
3882 }
3883 
3884 /*
3885  * Check if user set port speed is allowed by the SFP
3886  *
3887  * @param[in] sfp - bfa sfp module
3888  * @param[in] portspeed - port speed from user
3889  *
3890  */
3892 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3893  bfa_cb_sfp_t cbfn, void *cbarg)
3894 {
3895  WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3896 
3897  if (!bfa_ioc_is_operational(sfp->ioc))
3898  return BFA_STATUS_IOC_NON_OP;
3899 
3900  /* For Mezz card, all speed is allowed */
3901  if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3902  return BFA_STATUS_OK;
3903 
3904  /* Check SFP state */
3905  sfp->portspeed = portspeed;
3906  if (sfp->state == BFA_SFP_STATE_INIT) {
3907  if (sfp->state_query_lock) {
3908  bfa_trc(sfp, 0);
3909  return BFA_STATUS_DEVBUSY;
3910  } else {
3911  sfp->state_query_cbfn = cbfn;
3912  sfp->state_query_cbarg = cbarg;
3913  bfa_sfp_state_query(sfp);
3914  return BFA_STATUS_SFP_NOT_READY;
3915  }
3916  }
3917 
3918  if (sfp->state == BFA_SFP_STATE_REMOVED ||
3919  sfp->state == BFA_SFP_STATE_FAILED) {
3920  bfa_trc(sfp, sfp->state);
3921  return BFA_STATUS_NO_SFP_DEV;
3922  }
3923 
3924  if (sfp->state == BFA_SFP_STATE_INSERTED) {
3925  bfa_trc(sfp, sfp->state);
3926  return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3927  }
3928 
3929  /* For eloopback, all speed is allowed */
3930  if (sfp->is_elb)
3931  return BFA_STATUS_OK;
3932 
3933  return bfa_sfp_speed_valid(sfp, portspeed);
3934 }
3935 
3936 /*
3937  * Flash module specific
3938  */
3939 
3940 /*
3941  * FLASH DMA buffer should be big enough to hold both MFG block and
3942  * asic block(64k) at the same time and also should be 2k aligned to
3943  * avoid write segement to cross sector boundary.
3944  */
3945 #define BFA_FLASH_SEG_SZ 2048
3946 #define BFA_FLASH_DMA_BUF_SZ \
3947  BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3948 
3949 static void
3950 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3951  int inst, int type)
3952 {
3953  struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3954  struct bfa_aen_entry_s *aen_entry;
3955 
3956  bfad_get_aen_entry(bfad, aen_entry);
3957  if (!aen_entry)
3958  return;
3959 
3960  aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3961  aen_entry->aen_data.audit.partition_inst = inst;
3962  aen_entry->aen_data.audit.partition_type = type;
3963 
3964  /* Send the AEN notification */
3965  bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3966  BFA_AEN_CAT_AUDIT, event);
3967 }
3968 
3969 static void
3970 bfa_flash_cb(struct bfa_flash_s *flash)
3971 {
3972  flash->op_busy = 0;
3973  if (flash->cbfn)
3974  flash->cbfn(flash->cbarg, flash->status);
3975 }
3976 
3977 static void
3978 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3979 {
3980  struct bfa_flash_s *flash = cbarg;
3981 
3982  bfa_trc(flash, event);
3983  switch (event) {
3984  case BFA_IOC_E_DISABLED:
3985  case BFA_IOC_E_FAILED:
3986  if (flash->op_busy) {
3987  flash->status = BFA_STATUS_IOC_FAILURE;
3988  flash->cbfn(flash->cbarg, flash->status);
3989  flash->op_busy = 0;
3990  }
3991  break;
3992 
3993  default:
3994  break;
3995  }
3996 }
3997 
3998 /*
3999  * Send flash attribute query request.
4000  *
4001  * @param[in] cbarg - callback argument
4002  */
4003 static void
4004 bfa_flash_query_send(void *cbarg)
4005 {
4006  struct bfa_flash_s *flash = cbarg;
4007  struct bfi_flash_query_req_s *msg =
4008  (struct bfi_flash_query_req_s *) flash->mb.msg;
4009 
4011  bfa_ioc_portid(flash->ioc));
4012  bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4013  flash->dbuf_pa);
4014  bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4015 }
4016 
4017 /*
4018  * Send flash write request.
4019  *
4020  * @param[in] cbarg - callback argument
4021  */
4022 static void
4023 bfa_flash_write_send(struct bfa_flash_s *flash)
4024 {
4025  struct bfi_flash_write_req_s *msg =
4026  (struct bfi_flash_write_req_s *) flash->mb.msg;
4027  u32 len;
4028 
4029  msg->type = be32_to_cpu(flash->type);
4030  msg->instance = flash->instance;
4031  msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4032  len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4033  flash->residue : BFA_FLASH_DMA_BUF_SZ;
4034  msg->length = be32_to_cpu(len);
4035 
4036  /* indicate if it's the last msg of the whole write operation */
4037  msg->last = (len == flash->residue) ? 1 : 0;
4038 
4040  bfa_ioc_portid(flash->ioc));
4041  bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4042  memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4043  bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4044 
4045  flash->residue -= len;
4046  flash->offset += len;
4047 }
4048 
4049 /*
4050  * Send flash read request.
4051  *
4052  * @param[in] cbarg - callback argument
4053  */
4054 static void
4055 bfa_flash_read_send(void *cbarg)
4056 {
4057  struct bfa_flash_s *flash = cbarg;
4058  struct bfi_flash_read_req_s *msg =
4059  (struct bfi_flash_read_req_s *) flash->mb.msg;
4060  u32 len;
4061 
4062  msg->type = be32_to_cpu(flash->type);
4063  msg->instance = flash->instance;
4064  msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4065  len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4066  flash->residue : BFA_FLASH_DMA_BUF_SZ;
4067  msg->length = be32_to_cpu(len);
4069  bfa_ioc_portid(flash->ioc));
4070  bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4071  bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4072 }
4073 
4074 /*
4075  * Send flash erase request.
4076  *
4077  * @param[in] cbarg - callback argument
4078  */
4079 static void
4080 bfa_flash_erase_send(void *cbarg)
4081 {
4082  struct bfa_flash_s *flash = cbarg;
4083  struct bfi_flash_erase_req_s *msg =
4084  (struct bfi_flash_erase_req_s *) flash->mb.msg;
4085 
4086  msg->type = be32_to_cpu(flash->type);
4087  msg->instance = flash->instance;
4089  bfa_ioc_portid(flash->ioc));
4090  bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4091 }
4092 
4093 /*
4094  * Process flash response messages upon receiving interrupts.
4095  *
4096  * @param[in] flasharg - flash structure
4097  * @param[in] msg - message structure
4098  */
4099 static void
4100 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4101 {
4102  struct bfa_flash_s *flash = flasharg;
4103  u32 status;
4104 
4105  union {
4106  struct bfi_flash_query_rsp_s *query;
4107  struct bfi_flash_erase_rsp_s *erase;
4108  struct bfi_flash_write_rsp_s *write;
4109  struct bfi_flash_read_rsp_s *read;
4110  struct bfi_flash_event_s *event;
4111  struct bfi_mbmsg_s *msg;
4112  } m;
4113 
4114  m.msg = msg;
4115  bfa_trc(flash, msg->mh.msg_id);
4116 
4117  if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4118  /* receiving response after ioc failure */
4119  bfa_trc(flash, 0x9999);
4120  return;
4121  }
4122 
4123  switch (msg->mh.msg_id) {
4125  status = be32_to_cpu(m.query->status);
4126  bfa_trc(flash, status);
4127  if (status == BFA_STATUS_OK) {
4128  u32 i;
4129  struct bfa_flash_attr_s *attr, *f;
4130 
4131  attr = (struct bfa_flash_attr_s *) flash->ubuf;
4132  f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4133  attr->status = be32_to_cpu(f->status);
4134  attr->npart = be32_to_cpu(f->npart);
4135  bfa_trc(flash, attr->status);
4136  bfa_trc(flash, attr->npart);
4137  for (i = 0; i < attr->npart; i++) {
4138  attr->part[i].part_type =
4139  be32_to_cpu(f->part[i].part_type);
4140  attr->part[i].part_instance =
4141  be32_to_cpu(f->part[i].part_instance);
4142  attr->part[i].part_off =
4143  be32_to_cpu(f->part[i].part_off);
4144  attr->part[i].part_size =
4145  be32_to_cpu(f->part[i].part_size);
4146  attr->part[i].part_len =
4147  be32_to_cpu(f->part[i].part_len);
4148  attr->part[i].part_status =
4149  be32_to_cpu(f->part[i].part_status);
4150  }
4151  }
4152  flash->status = status;
4153  bfa_flash_cb(flash);
4154  break;
4156  status = be32_to_cpu(m.erase->status);
4157  bfa_trc(flash, status);
4158  flash->status = status;
4159  bfa_flash_cb(flash);
4160  break;
4162  status = be32_to_cpu(m.write->status);
4163  bfa_trc(flash, status);
4164  if (status != BFA_STATUS_OK || flash->residue == 0) {
4165  flash->status = status;
4166  bfa_flash_cb(flash);
4167  } else {
4168  bfa_trc(flash, flash->offset);
4169  bfa_flash_write_send(flash);
4170  }
4171  break;
4173  status = be32_to_cpu(m.read->status);
4174  bfa_trc(flash, status);
4175  if (status != BFA_STATUS_OK) {
4176  flash->status = status;
4177  bfa_flash_cb(flash);
4178  } else {
4179  u32 len = be32_to_cpu(m.read->length);
4180  bfa_trc(flash, flash->offset);
4181  bfa_trc(flash, len);
4182  memcpy(flash->ubuf + flash->offset,
4183  flash->dbuf_kva, len);
4184  flash->residue -= len;
4185  flash->offset += len;
4186  if (flash->residue == 0) {
4187  flash->status = status;
4188  bfa_flash_cb(flash);
4189  } else
4190  bfa_flash_read_send(flash);
4191  }
4192  break;
4194  break;
4195  case BFI_FLASH_I2H_EVENT:
4196  status = be32_to_cpu(m.event->status);
4197  bfa_trc(flash, status);
4198  if (status == BFA_STATUS_BAD_FWCFG)
4200  else if (status == BFA_STATUS_INVALID_VENDOR) {
4201  u32 param;
4202  param = be32_to_cpu(m.event->param);
4203  bfa_trc(flash, param);
4204  bfa_ioc_aen_post(flash->ioc,
4206  }
4207  break;
4208 
4209  default:
4210  WARN_ON(1);
4211  }
4212 }
4213 
4214 /*
4215  * Flash memory info API.
4216  *
4217  * @param[in] mincfg - minimal cfg variable
4218  */
4219 u32
4221 {
4222  /* min driver doesn't need flash */
4223  if (mincfg)
4224  return 0;
4226 }
4227 
4228 /*
4229  * Flash attach API.
4230  *
4231  * @param[in] flash - flash structure
4232  * @param[in] ioc - ioc structure
4233  * @param[in] dev - device structure
4234  * @param[in] trcmod - trace module
4235  * @param[in] logmod - log module
4236  */
4237 void
4238 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4239  struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4240 {
4241  flash->ioc = ioc;
4242  flash->trcmod = trcmod;
4243  flash->cbfn = NULL;
4244  flash->cbarg = NULL;
4245  flash->op_busy = 0;
4246 
4247  bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4248  bfa_q_qe_init(&flash->ioc_notify);
4249  bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4250  list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4251 
4252  /* min driver doesn't need flash */
4253  if (mincfg) {
4254  flash->dbuf_kva = NULL;
4255  flash->dbuf_pa = 0;
4256  }
4257 }
4258 
4259 /*
4260  * Claim memory for flash
4261  *
4262  * @param[in] flash - flash structure
4263  * @param[in] dm_kva - pointer to virtual memory address
4264  * @param[in] dm_pa - physical memory address
4265  * @param[in] mincfg - minimal cfg variable
4266  */
4267 void
4268 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4269  bfa_boolean_t mincfg)
4270 {
4271  if (mincfg)
4272  return;
4273 
4274  flash->dbuf_kva = dm_kva;
4275  flash->dbuf_pa = dm_pa;
4276  memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4279 }
4280 
4281 /*
4282  * Get flash attribute.
4283  *
4284  * @param[in] flash - flash structure
4285  * @param[in] attr - flash attribute structure
4286  * @param[in] cbfn - callback function
4287  * @param[in] cbarg - callback argument
4288  *
4289  * Return status.
4290  */
4293  bfa_cb_flash_t cbfn, void *cbarg)
4294 {
4296 
4297  if (!bfa_ioc_is_operational(flash->ioc))
4298  return BFA_STATUS_IOC_NON_OP;
4299 
4300  if (flash->op_busy) {
4301  bfa_trc(flash, flash->op_busy);
4302  return BFA_STATUS_DEVBUSY;
4303  }
4304 
4305  flash->op_busy = 1;
4306  flash->cbfn = cbfn;
4307  flash->cbarg = cbarg;
4308  flash->ubuf = (u8 *) attr;
4309  bfa_flash_query_send(flash);
4310 
4311  return BFA_STATUS_OK;
4312 }
4313 
4314 /*
4315  * Erase flash partition.
4316  *
4317  * @param[in] flash - flash structure
4318  * @param[in] type - flash partition type
4319  * @param[in] instance - flash partition instance
4320  * @param[in] cbfn - callback function
4321  * @param[in] cbarg - callback argument
4322  *
4323  * Return status.
4324  */
4327  u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4328 {
4330  bfa_trc(flash, type);
4331  bfa_trc(flash, instance);
4332 
4333  if (!bfa_ioc_is_operational(flash->ioc))
4334  return BFA_STATUS_IOC_NON_OP;
4335 
4336  if (flash->op_busy) {
4337  bfa_trc(flash, flash->op_busy);
4338  return BFA_STATUS_DEVBUSY;
4339  }
4340 
4341  flash->op_busy = 1;
4342  flash->cbfn = cbfn;
4343  flash->cbarg = cbarg;
4344  flash->type = type;
4345  flash->instance = instance;
4346 
4347  bfa_flash_erase_send(flash);
4348  bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4349  instance, type);
4350  return BFA_STATUS_OK;
4351 }
4352 
4353 /*
4354  * Update flash partition.
4355  *
4356  * @param[in] flash - flash structure
4357  * @param[in] type - flash partition type
4358  * @param[in] instance - flash partition instance
4359  * @param[in] buf - update data buffer
4360  * @param[in] len - data buffer length
4361  * @param[in] offset - offset relative to the partition starting address
4362  * @param[in] cbfn - callback function
4363  * @param[in] cbarg - callback argument
4364  *
4365  * Return status.
4366  */
4369  u8 instance, void *buf, u32 len, u32 offset,
4370  bfa_cb_flash_t cbfn, void *cbarg)
4371 {
4373  bfa_trc(flash, type);
4374  bfa_trc(flash, instance);
4375  bfa_trc(flash, len);
4376  bfa_trc(flash, offset);
4377 
4378  if (!bfa_ioc_is_operational(flash->ioc))
4379  return BFA_STATUS_IOC_NON_OP;
4380 
4381  /*
4382  * 'len' must be in word (4-byte) boundary
4383  * 'offset' must be in sector (16kb) boundary
4384  */
4385  if (!len || (len & 0x03) || (offset & 0x00003FFF))
4386  return BFA_STATUS_FLASH_BAD_LEN;
4387 
4388  if (type == BFA_FLASH_PART_MFG)
4389  return BFA_STATUS_EINVAL;
4390 
4391  if (flash->op_busy) {
4392  bfa_trc(flash, flash->op_busy);
4393  return BFA_STATUS_DEVBUSY;
4394  }
4395 
4396  flash->op_busy = 1;
4397  flash->cbfn = cbfn;
4398  flash->cbarg = cbarg;
4399  flash->type = type;
4400  flash->instance = instance;
4401  flash->residue = len;
4402  flash->offset = 0;
4403  flash->addr_off = offset;
4404  flash->ubuf = buf;
4405 
4406  bfa_flash_write_send(flash);
4407  return BFA_STATUS_OK;
4408 }
4409 
4410 /*
4411  * Read flash partition.
4412  *
4413  * @param[in] flash - flash structure
4414  * @param[in] type - flash partition type
4415  * @param[in] instance - flash partition instance
4416  * @param[in] buf - read data buffer
4417  * @param[in] len - data buffer length
4418  * @param[in] offset - offset relative to the partition starting address
4419  * @param[in] cbfn - callback function
4420  * @param[in] cbarg - callback argument
4421  *
4422  * Return status.
4423  */
4426  u8 instance, void *buf, u32 len, u32 offset,
4427  bfa_cb_flash_t cbfn, void *cbarg)
4428 {
4430  bfa_trc(flash, type);
4431  bfa_trc(flash, instance);
4432  bfa_trc(flash, len);
4433  bfa_trc(flash, offset);
4434 
4435  if (!bfa_ioc_is_operational(flash->ioc))
4436  return BFA_STATUS_IOC_NON_OP;
4437 
4438  /*
4439  * 'len' must be in word (4-byte) boundary
4440  * 'offset' must be in sector (16kb) boundary
4441  */
4442  if (!len || (len & 0x03) || (offset & 0x00003FFF))
4443  return BFA_STATUS_FLASH_BAD_LEN;
4444 
4445  if (flash->op_busy) {
4446  bfa_trc(flash, flash->op_busy);
4447  return BFA_STATUS_DEVBUSY;
4448  }
4449 
4450  flash->op_busy = 1;
4451  flash->cbfn = cbfn;
4452  flash->cbarg = cbarg;
4453  flash->type = type;
4454  flash->instance = instance;
4455  flash->residue = len;
4456  flash->offset = 0;
4457  flash->addr_off = offset;
4458  flash->ubuf = buf;
4459  bfa_flash_read_send(flash);
4460 
4461  return BFA_STATUS_OK;
4462 }
4463 
4464 /*
4465  * DIAG module specific
4466  */
4467 
4468 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4469 #define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
4470 
4471 /* IOC event handler */
4472 static void
4473 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4474 {
4475  struct bfa_diag_s *diag = diag_arg;
4476 
4477  bfa_trc(diag, event);
4478  bfa_trc(diag, diag->block);
4479  bfa_trc(diag, diag->fwping.lock);
4480  bfa_trc(diag, diag->tsensor.lock);
4481 
4482  switch (event) {
4483  case BFA_IOC_E_DISABLED:
4484  case BFA_IOC_E_FAILED:
4485  if (diag->fwping.lock) {
4486  diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4487  diag->fwping.cbfn(diag->fwping.cbarg,
4488  diag->fwping.status);
4489  diag->fwping.lock = 0;
4490  }
4491 
4492  if (diag->tsensor.lock) {
4493  diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4494  diag->tsensor.cbfn(diag->tsensor.cbarg,
4495  diag->tsensor.status);
4496  diag->tsensor.lock = 0;
4497  }
4498 
4499  if (diag->block) {
4500  if (diag->timer_active) {
4501  bfa_timer_stop(&diag->timer);
4502  diag->timer_active = 0;
4503  }
4504 
4506  diag->cbfn(diag->cbarg, diag->status);
4507  diag->block = 0;
4508  }
4509  break;
4510 
4511  default:
4512  break;
4513  }
4514 }
4515 
4516 static void
4517 bfa_diag_memtest_done(void *cbarg)
4518 {
4519  struct bfa_diag_s *diag = cbarg;
4520  struct bfa_ioc_s *ioc = diag->ioc;
4521  struct bfa_diag_memtest_result *res = diag->result;
4523  u32 pgnum, pgoff, i;
4524 
4525  pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4526  pgoff = PSS_SMEM_PGOFF(loff);
4527 
4528  writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4529 
4530  for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4531  sizeof(u32)); i++) {
4532  /* read test result from smem */
4533  *((u32 *) res + i) =
4534  bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4535  loff += sizeof(u32);
4536  }
4537 
4538  /* Reset IOC fwstates to BFI_IOC_UNINIT */
4539  bfa_ioc_reset_fwstate(ioc);
4540 
4541  res->status = swab32(res->status);
4542  bfa_trc(diag, res->status);
4543 
4544  if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4545  diag->status = BFA_STATUS_OK;
4546  else {
4548  res->addr = swab32(res->addr);
4549  res->exp = swab32(res->exp);
4550  res->act = swab32(res->act);
4551  res->err_status = swab32(res->err_status);
4552  res->err_status1 = swab32(res->err_status1);
4553  res->err_addr = swab32(res->err_addr);
4554  bfa_trc(diag, res->addr);
4555  bfa_trc(diag, res->exp);
4556  bfa_trc(diag, res->act);
4557  bfa_trc(diag, res->err_status);
4558  bfa_trc(diag, res->err_status1);
4559  bfa_trc(diag, res->err_addr);
4560  }
4561  diag->timer_active = 0;
4562  diag->cbfn(diag->cbarg, diag->status);
4563  diag->block = 0;
4564 }
4565 
4566 /*
4567  * Firmware ping
4568  */
4569 
4570 /*
4571  * Perform DMA test directly
4572  */
4573 static void
4574 diag_fwping_send(struct bfa_diag_s *diag)
4575 {
4576  struct bfi_diag_fwping_req_s *fwping_req;
4577  u32 i;
4578 
4579  bfa_trc(diag, diag->fwping.dbuf_pa);
4580 
4581  /* fill DMA area with pattern */
4582  for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4583  *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4584 
4585  /* Fill mbox msg */
4586  fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4587 
4588  /* Setup SG list */
4589  bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4590  diag->fwping.dbuf_pa);
4591  /* Set up dma count */
4592  fwping_req->count = cpu_to_be32(diag->fwping.count);
4593  /* Set up data pattern */
4594  fwping_req->data = diag->fwping.data;
4595 
4596  /* build host command */
4598  bfa_ioc_portid(diag->ioc));
4599 
4600  /* send mbox cmd */
4601  bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4602 }
4603 
4604 static void
4605 diag_fwping_comp(struct bfa_diag_s *diag,
4606  struct bfi_diag_fwping_rsp_s *diag_rsp)
4607 {
4608  u32 rsp_data = diag_rsp->data;
4609  u8 rsp_dma_status = diag_rsp->dma_status;
4610 
4611  bfa_trc(diag, rsp_data);
4612  bfa_trc(diag, rsp_dma_status);
4613 
4614  if (rsp_dma_status == BFA_STATUS_OK) {
4615  u32 i, pat;
4616  pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4617  diag->fwping.data;
4618  /* Check mbox data */
4619  if (diag->fwping.data != rsp_data) {
4620  bfa_trc(diag, rsp_data);
4621  diag->fwping.result->dmastatus =
4623  diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4624  diag->fwping.cbfn(diag->fwping.cbarg,
4625  diag->fwping.status);
4626  diag->fwping.lock = 0;
4627  return;
4628  }
4629  /* Check dma pattern */
4630  for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4631  if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4632  bfa_trc(diag, i);
4633  bfa_trc(diag, pat);
4634  bfa_trc(diag,
4635  *((u32 *)diag->fwping.dbuf_kva + i));
4636  diag->fwping.result->dmastatus =
4638  diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4639  diag->fwping.cbfn(diag->fwping.cbarg,
4640  diag->fwping.status);
4641  diag->fwping.lock = 0;
4642  return;
4643  }
4644  }
4645  diag->fwping.result->dmastatus = BFA_STATUS_OK;
4646  diag->fwping.status = BFA_STATUS_OK;
4647  diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4648  diag->fwping.lock = 0;
4649  } else {
4650  diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4651  diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4652  diag->fwping.lock = 0;
4653  }
4654 }
4655 
4656 /*
4657  * Temperature Sensor
4658  */
4659 
4660 static void
4661 diag_tempsensor_send(struct bfa_diag_s *diag)
4662 {
4663  struct bfi_diag_ts_req_s *msg;
4664 
4665  msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4666  bfa_trc(diag, msg->temp);
4667  /* build host command */
4669  bfa_ioc_portid(diag->ioc));
4670  /* send mbox cmd */
4671  bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4672 }
4673 
4674 static void
4675 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4676 {
4677  if (!diag->tsensor.lock) {
4678  /* receiving response after ioc failure */
4679  bfa_trc(diag, diag->tsensor.lock);
4680  return;
4681  }
4682 
4683  /*
4684  * ASIC junction tempsensor is a reg read operation
4685  * it will always return OK
4686  */
4687  diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4688  diag->tsensor.temp->ts_junc = rsp->ts_junc;
4689  diag->tsensor.temp->ts_brd = rsp->ts_brd;
4690 
4691  if (rsp->ts_brd) {
4692  /* tsensor.temp->status is brd_temp status */
4693  diag->tsensor.temp->status = rsp->status;
4694  if (rsp->status == BFA_STATUS_OK) {
4695  diag->tsensor.temp->brd_temp =
4696  be16_to_cpu(rsp->brd_temp);
4697  } else
4698  diag->tsensor.temp->brd_temp = 0;
4699  }
4700 
4701  bfa_trc(diag, rsp->status);
4702  bfa_trc(diag, rsp->ts_junc);
4703  bfa_trc(diag, rsp->temp);
4704  bfa_trc(diag, rsp->ts_brd);
4705  bfa_trc(diag, rsp->brd_temp);
4706 
4707  /* tsensor status is always good bcos we always have junction temp */
4708  diag->tsensor.status = BFA_STATUS_OK;
4709  diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4710  diag->tsensor.lock = 0;
4711 }
4712 
4713 /*
4714  * LED Test command
4715  */
4716 static void
4717 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4718 {
4719  struct bfi_diag_ledtest_req_s *msg;
4720 
4721  msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4722  /* build host command */
4724  bfa_ioc_portid(diag->ioc));
4725 
4726  /*
4727  * convert the freq from N blinks per 10 sec to
4728  * crossbow ontime value. We do it here because division is need
4729  */
4730  if (ledtest->freq)
4731  ledtest->freq = 500 / ledtest->freq;
4732 
4733  if (ledtest->freq == 0)
4734  ledtest->freq = 1;
4735 
4736  bfa_trc(diag, ledtest->freq);
4737  /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4738  msg->cmd = (u8) ledtest->cmd;
4739  msg->color = (u8) ledtest->color;
4740  msg->portid = bfa_ioc_portid(diag->ioc);
4741  msg->led = ledtest->led;
4742  msg->freq = cpu_to_be16(ledtest->freq);
4743 
4744  /* send mbox cmd */
4745  bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4746 }
4747 
4748 static void
4749 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4750 {
4751  bfa_trc(diag, diag->ledtest.lock);
4752  diag->ledtest.lock = BFA_FALSE;
4753  /* no bfa_cb_queue is needed because driver is not waiting */
4754 }
4755 
4756 /*
4757  * Port beaconing
4758  */
4759 static void
4760 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4761 {
4763 
4764  msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4765  /* build host command */
4767  bfa_ioc_portid(diag->ioc));
4768  msg->beacon = beacon;
4769  msg->period = cpu_to_be32(sec);
4770  /* send mbox cmd */
4771  bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4772 }
4773 
4774 static void
4775 diag_portbeacon_comp(struct bfa_diag_s *diag)
4776 {
4777  bfa_trc(diag, diag->beacon.state);
4778  diag->beacon.state = BFA_FALSE;
4779  if (diag->cbfn_beacon)
4780  diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4781 }
4782 
4783 /*
4784  * Diag hmbox handler
4785  */
4786 void
4787 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4788 {
4789  struct bfa_diag_s *diag = diagarg;
4790 
4791  switch (msg->mh.msg_id) {
4793  diag_portbeacon_comp(diag);
4794  break;
4795  case BFI_DIAG_I2H_FWPING:
4796  diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4797  break;
4799  diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4800  break;
4801  case BFI_DIAG_I2H_LEDTEST:
4802  diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4803  break;
4804  default:
4805  bfa_trc(diag, msg->mh.msg_id);
4806  WARN_ON(1);
4807  }
4808 }
4809 
4810 /*
4811  * Gen RAM Test
4812  *
4813  * @param[in] *diag - diag data struct
4814  * @param[in] *memtest - mem test params input from upper layer,
4815  * @param[in] pattern - mem test pattern
4816  * @param[in] *result - mem test result
4817  * @param[in] cbfn - mem test callback functioin
4818  * @param[in] cbarg - callback functioin arg
4819  *
4820  * @param[out]
4821  */
4823 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4825  bfa_cb_diag_t cbfn, void *cbarg)
4826 {
4827  u32 memtest_tov;
4828 
4829  bfa_trc(diag, pattern);
4830 
4831  if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4833 
4834  /* check to see if there is another destructive diag cmd running */
4835  if (diag->block) {
4836  bfa_trc(diag, diag->block);
4837  return BFA_STATUS_DEVBUSY;
4838  } else
4839  diag->block = 1;
4840 
4841  diag->result = result;
4842  diag->cbfn = cbfn;
4843  diag->cbarg = cbarg;
4844 
4845  /* download memtest code and take LPU0 out of reset */
4847 
4848  memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
4850  bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4851  bfa_diag_memtest_done, diag, memtest_tov);
4852  diag->timer_active = 1;
4853  return BFA_STATUS_OK;
4854 }
4855 
4856 /*
4857  * DIAG firmware ping command
4858  *
4859  * @param[in] *diag - diag data struct
4860  * @param[in] cnt - dma loop count for testing PCIE
4861  * @param[in] data - data pattern to pass in fw
4862  * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4863  * @param[in] cbfn - callback function
4864  * @param[in] *cbarg - callback functioin arg
4865  *
4866  * @param[out]
4867  */
4871  void *cbarg)
4872 {
4873  bfa_trc(diag, cnt);
4874  bfa_trc(diag, data);
4875 
4876  if (!bfa_ioc_is_operational(diag->ioc))
4877  return BFA_STATUS_IOC_NON_OP;
4878 
4879  if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4880  ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4881  return BFA_STATUS_CMD_NOTSUPP;
4882 
4883  /* check to see if there is another destructive diag cmd running */
4884  if (diag->block || diag->fwping.lock) {
4885  bfa_trc(diag, diag->block);
4886  bfa_trc(diag, diag->fwping.lock);
4887  return BFA_STATUS_DEVBUSY;
4888  }
4889 
4890  /* Initialization */
4891  diag->fwping.lock = 1;
4892  diag->fwping.cbfn = cbfn;
4893  diag->fwping.cbarg = cbarg;
4894  diag->fwping.result = result;
4895  diag->fwping.data = data;
4896  diag->fwping.count = cnt;
4897 
4898  /* Init test results */
4899  diag->fwping.result->data = 0;
4900  diag->fwping.result->status = BFA_STATUS_OK;
4901 
4902  /* kick off the first ping */
4903  diag_fwping_send(diag);
4904  return BFA_STATUS_OK;
4905 }
4906 
4907 /*
4908  * Read Temperature Sensor
4909  *
4910  * @param[in] *diag - diag data struct
4911  * @param[in] *result - pt to bfa_diag_temp_t data struct
4912  * @param[in] cbfn - callback function
4913  * @param[in] *cbarg - callback functioin arg
4914  *
4915  * @param[out]
4916  */
4920  bfa_cb_diag_t cbfn, void *cbarg)
4921 {
4922  /* check to see if there is a destructive diag cmd running */
4923  if (diag->block || diag->tsensor.lock) {
4924  bfa_trc(diag, diag->block);
4925  bfa_trc(diag, diag->tsensor.lock);
4926  return BFA_STATUS_DEVBUSY;
4927  }
4928 
4929  if (!bfa_ioc_is_operational(diag->ioc))
4930  return BFA_STATUS_IOC_NON_OP;
4931 
4932  /* Init diag mod params */
4933  diag->tsensor.lock = 1;
4934  diag->tsensor.temp = result;
4935  diag->tsensor.cbfn = cbfn;
4936  diag->tsensor.cbarg = cbarg;
4937  diag->tsensor.status = BFA_STATUS_OK;
4938 
4939  /* Send msg to fw */
4940  diag_tempsensor_send(diag);
4941 
4942  return BFA_STATUS_OK;
4943 }
4944 
4945 /*
4946  * LED Test command
4947  *
4948  * @param[in] *diag - diag data struct
4949  * @param[in] *ledtest - pt to ledtest data structure
4950  *
4951  * @param[out]
4952  */
4954 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4955 {
4956  bfa_trc(diag, ledtest->cmd);
4957 
4958  if (!bfa_ioc_is_operational(diag->ioc))
4959  return BFA_STATUS_IOC_NON_OP;
4960 
4961  if (diag->beacon.state)
4962  return BFA_STATUS_BEACON_ON;
4963 
4964  if (diag->ledtest.lock)
4965  return BFA_STATUS_LEDTEST_OP;
4966 
4967  /* Send msg to fw */
4968  diag->ledtest.lock = BFA_TRUE;
4969  diag_ledtest_send(diag, ledtest);
4970 
4971  return BFA_STATUS_OK;
4972 }
4973 
4974 /*
4975  * Port beaconing command
4976  *
4977  * @param[in] *diag - diag data struct
4978  * @param[in] beacon - port beaconing 1:ON 0:OFF
4979  * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
4980  * @param[in] sec - beaconing duration in seconds
4981  *
4982  * @param[out]
4983  */
4986  bfa_boolean_t link_e2e_beacon, uint32_t sec)
4987 {
4988  bfa_trc(diag, beacon);
4989  bfa_trc(diag, link_e2e_beacon);
4990  bfa_trc(diag, sec);
4991 
4992  if (!bfa_ioc_is_operational(diag->ioc))
4993  return BFA_STATUS_IOC_NON_OP;
4994 
4995  if (diag->ledtest.lock)
4996  return BFA_STATUS_LEDTEST_OP;
4997 
4998  if (diag->beacon.state && beacon) /* beacon alread on */
4999  return BFA_STATUS_BEACON_ON;
5000 
5001  diag->beacon.state = beacon;
5002  diag->beacon.link_e2e = link_e2e_beacon;
5003  if (diag->cbfn_beacon)
5004  diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5005 
5006  /* Send msg to fw */
5007  diag_portbeacon_send(diag, beacon, sec);
5008 
5009  return BFA_STATUS_OK;
5010 }
5011 
5012 /*
5013  * Return DMA memory needed by diag module.
5014  */
5015 u32
5017 {
5019 }
5020 
5021 /*
5022  * Attach virtual and physical memory for Diag.
5023  */
5024 void
5025 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5027 {
5028  diag->dev = dev;
5029  diag->ioc = ioc;
5030  diag->trcmod = trcmod;
5031 
5032  diag->block = 0;
5033  diag->cbfn = NULL;
5034  diag->cbarg = NULL;
5035  diag->result = NULL;
5036  diag->cbfn_beacon = cbfn_beacon;
5037 
5039  bfa_q_qe_init(&diag->ioc_notify);
5040  bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5041  list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5042 }
5043 
5044 void
5045 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5046 {
5047  diag->fwping.dbuf_kva = dm_kva;
5048  diag->fwping.dbuf_pa = dm_pa;
5049  memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5050 }
5051 
5052 /*
5053  * PHY module specific
5054  */
5055 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5056 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5057 
5058 static void
5059 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5060 {
5061  int i, m = sz >> 2;
5062 
5063  for (i = 0; i < m; i++)
5064  obuf[i] = be32_to_cpu(ibuf[i]);
5065 }
5066 
5067 static bfa_boolean_t
5068 bfa_phy_present(struct bfa_phy_s *phy)
5069 {
5070  return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5071 }
5072 
5073 static void
5074 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5075 {
5076  struct bfa_phy_s *phy = cbarg;
5077 
5078  bfa_trc(phy, event);
5079 
5080  switch (event) {
5081  case BFA_IOC_E_DISABLED:
5082  case BFA_IOC_E_FAILED:
5083  if (phy->op_busy) {
5085  phy->cbfn(phy->cbarg, phy->status);
5086  phy->op_busy = 0;
5087  }
5088  break;
5089 
5090  default:
5091  break;
5092  }
5093 }
5094 
5095 /*
5096  * Send phy attribute query request.
5097  *
5098  * @param[in] cbarg - callback argument
5099  */
5100 static void
5101 bfa_phy_query_send(void *cbarg)
5102 {
5103  struct bfa_phy_s *phy = cbarg;
5104  struct bfi_phy_query_req_s *msg =
5105  (struct bfi_phy_query_req_s *) phy->mb.msg;
5106 
5107  msg->instance = phy->instance;
5109  bfa_ioc_portid(phy->ioc));
5110  bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5111  bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5112 }
5113 
5114 /*
5115  * Send phy write request.
5116  *
5117  * @param[in] cbarg - callback argument
5118  */
5119 static void
5120 bfa_phy_write_send(void *cbarg)
5121 {
5122  struct bfa_phy_s *phy = cbarg;
5123  struct bfi_phy_write_req_s *msg =
5124  (struct bfi_phy_write_req_s *) phy->mb.msg;
5125  u32 len;
5126  u16 *buf, *dbuf;
5127  int i, sz;
5128 
5129  msg->instance = phy->instance;
5130  msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5131  len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5132  phy->residue : BFA_PHY_DMA_BUF_SZ;
5133  msg->length = cpu_to_be32(len);
5134 
5135  /* indicate if it's the last msg of the whole write operation */
5136  msg->last = (len == phy->residue) ? 1 : 0;
5137 
5139  bfa_ioc_portid(phy->ioc));
5140  bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5141 
5142  buf = (u16 *) (phy->ubuf + phy->offset);
5143  dbuf = (u16 *)phy->dbuf_kva;
5144  sz = len >> 1;
5145  for (i = 0; i < sz; i++)
5146  buf[i] = cpu_to_be16(dbuf[i]);
5147 
5148  bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5149 
5150  phy->residue -= len;
5151  phy->offset += len;
5152 }
5153 
5154 /*
5155  * Send phy read request.
5156  *
5157  * @param[in] cbarg - callback argument
5158  */
5159 static void
5160 bfa_phy_read_send(void *cbarg)
5161 {
5162  struct bfa_phy_s *phy = cbarg;
5163  struct bfi_phy_read_req_s *msg =
5164  (struct bfi_phy_read_req_s *) phy->mb.msg;
5165  u32 len;
5166 
5167  msg->instance = phy->instance;
5168  msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5169  len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5170  phy->residue : BFA_PHY_DMA_BUF_SZ;
5171  msg->length = cpu_to_be32(len);
5173  bfa_ioc_portid(phy->ioc));
5174  bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5175  bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5176 }
5177 
5178 /*
5179  * Send phy stats request.
5180  *
5181  * @param[in] cbarg - callback argument
5182  */
5183 static void
5184 bfa_phy_stats_send(void *cbarg)
5185 {
5186  struct bfa_phy_s *phy = cbarg;
5187  struct bfi_phy_stats_req_s *msg =
5188  (struct bfi_phy_stats_req_s *) phy->mb.msg;
5189 
5190  msg->instance = phy->instance;
5192  bfa_ioc_portid(phy->ioc));
5193  bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5194  bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5195 }
5196 
5197 /*
5198  * Flash memory info API.
5199  *
5200  * @param[in] mincfg - minimal cfg variable
5201  */
5202 u32
5204 {
5205  /* min driver doesn't need phy */
5206  if (mincfg)
5207  return 0;
5208 
5210 }
5211 
5212 /*
5213  * Flash attach API.
5214  *
5215  * @param[in] phy - phy structure
5216  * @param[in] ioc - ioc structure
5217  * @param[in] dev - device structure
5218  * @param[in] trcmod - trace module
5219  * @param[in] logmod - log module
5220  */
5221 void
5222 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5223  struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5224 {
5225  phy->ioc = ioc;
5226  phy->trcmod = trcmod;
5227  phy->cbfn = NULL;
5228  phy->cbarg = NULL;
5229  phy->op_busy = 0;
5230 
5232  bfa_q_qe_init(&phy->ioc_notify);
5233  bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5234  list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5235 
5236  /* min driver doesn't need phy */
5237  if (mincfg) {
5238  phy->dbuf_kva = NULL;
5239  phy->dbuf_pa = 0;
5240  }
5241 }
5242 
5243 /*
5244  * Claim memory for phy
5245  *
5246  * @param[in] phy - phy structure
5247  * @param[in] dm_kva - pointer to virtual memory address
5248  * @param[in] dm_pa - physical memory address
5249  * @param[in] mincfg - minimal cfg variable
5250  */
5251 void
5252 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5253  bfa_boolean_t mincfg)
5254 {
5255  if (mincfg)
5256  return;
5257 
5258  phy->dbuf_kva = dm_kva;
5259  phy->dbuf_pa = dm_pa;
5263 }
5264 
5267 {
5268  void __iomem *rb;
5269 
5270  rb = bfa_ioc_bar0(ioc);
5271  return readl(rb + BFA_PHY_LOCK_STATUS);
5272 }
5273 
5274 /*
5275  * Get phy attribute.
5276  *
5277  * @param[in] phy - phy structure
5278  * @param[in] attr - phy attribute structure
5279  * @param[in] cbfn - callback function
5280  * @param[in] cbarg - callback argument
5281  *
5282  * Return status.
5283  */
5286  struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5287 {
5289  bfa_trc(phy, instance);
5290 
5291  if (!bfa_phy_present(phy))
5293 
5294  if (!bfa_ioc_is_operational(phy->ioc))
5295  return BFA_STATUS_IOC_NON_OP;
5296 
5297  if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5298  bfa_trc(phy, phy->op_busy);
5299  return BFA_STATUS_DEVBUSY;
5300  }
5301 
5302  phy->op_busy = 1;
5303  phy->cbfn = cbfn;
5304  phy->cbarg = cbarg;
5305  phy->instance = instance;
5306  phy->ubuf = (uint8_t *) attr;
5307  bfa_phy_query_send(phy);
5308 
5309  return BFA_STATUS_OK;
5310 }
5311 
5312 /*
5313  * Get phy stats.
5314  *
5315  * @param[in] phy - phy structure
5316  * @param[in] instance - phy image instance
5317  * @param[in] stats - pointer to phy stats
5318  * @param[in] cbfn - callback function
5319  * @param[in] cbarg - callback argument
5320  *
5321  * Return status.
5322  */
5325  struct bfa_phy_stats_s *stats,
5326  bfa_cb_phy_t cbfn, void *cbarg)
5327 {
5329  bfa_trc(phy, instance);
5330 
5331  if (!bfa_phy_present(phy))
5333 
5334  if (!bfa_ioc_is_operational(phy->ioc))
5335  return BFA_STATUS_IOC_NON_OP;
5336 
5337  if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5338  bfa_trc(phy, phy->op_busy);
5339  return BFA_STATUS_DEVBUSY;
5340  }
5341 
5342  phy->op_busy = 1;
5343  phy->cbfn = cbfn;
5344  phy->cbarg = cbarg;
5345  phy->instance = instance;
5346  phy->ubuf = (u8 *) stats;
5347  bfa_phy_stats_send(phy);
5348 
5349  return BFA_STATUS_OK;
5350 }
5351 
5352 /*
5353  * Update phy image.
5354  *
5355  * @param[in] phy - phy structure
5356  * @param[in] instance - phy image instance
5357  * @param[in] buf - update data buffer
5358  * @param[in] len - data buffer length
5359  * @param[in] offset - offset relative to starting address
5360  * @param[in] cbfn - callback function
5361  * @param[in] cbarg - callback argument
5362  *
5363  * Return status.
5364  */
5367  void *buf, u32 len, u32 offset,
5368  bfa_cb_phy_t cbfn, void *cbarg)
5369 {
5371  bfa_trc(phy, instance);
5372  bfa_trc(phy, len);
5373  bfa_trc(phy, offset);
5374 
5375  if (!bfa_phy_present(phy))
5377 
5378  if (!bfa_ioc_is_operational(phy->ioc))
5379  return BFA_STATUS_IOC_NON_OP;
5380 
5381  /* 'len' must be in word (4-byte) boundary */
5382  if (!len || (len & 0x03))
5383  return BFA_STATUS_FAILED;
5384 
5385  if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5386  bfa_trc(phy, phy->op_busy);
5387  return BFA_STATUS_DEVBUSY;
5388  }
5389 
5390  phy->op_busy = 1;
5391  phy->cbfn = cbfn;
5392  phy->cbarg = cbarg;
5393  phy->instance = instance;
5394  phy->residue = len;
5395  phy->offset = 0;
5396  phy->addr_off = offset;
5397  phy->ubuf = buf;
5398 
5399  bfa_phy_write_send(phy);
5400  return BFA_STATUS_OK;
5401 }
5402 
5403 /*
5404  * Read phy image.
5405  *
5406  * @param[in] phy - phy structure
5407  * @param[in] instance - phy image instance
5408  * @param[in] buf - read data buffer
5409  * @param[in] len - data buffer length
5410  * @param[in] offset - offset relative to starting address
5411  * @param[in] cbfn - callback function
5412  * @param[in] cbarg - callback argument
5413  *
5414  * Return status.
5415  */
5418  void *buf, u32 len, u32 offset,
5419  bfa_cb_phy_t cbfn, void *cbarg)
5420 {
5422  bfa_trc(phy, instance);
5423  bfa_trc(phy, len);
5424  bfa_trc(phy, offset);
5425 
5426  if (!bfa_phy_present(phy))
5428 
5429  if (!bfa_ioc_is_operational(phy->ioc))
5430  return BFA_STATUS_IOC_NON_OP;
5431 
5432  /* 'len' must be in word (4-byte) boundary */
5433  if (!len || (len & 0x03))
5434  return BFA_STATUS_FAILED;
5435 
5436  if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5437  bfa_trc(phy, phy->op_busy);
5438  return BFA_STATUS_DEVBUSY;
5439  }
5440 
5441  phy->op_busy = 1;
5442  phy->cbfn = cbfn;
5443  phy->cbarg = cbarg;
5444  phy->instance = instance;
5445  phy->residue = len;
5446  phy->offset = 0;
5447  phy->addr_off = offset;
5448  phy->ubuf = buf;
5449  bfa_phy_read_send(phy);
5450 
5451  return BFA_STATUS_OK;
5452 }
5453 
5454 /*
5455  * Process phy response messages upon receiving interrupts.
5456  *
5457  * @param[in] phyarg - phy structure
5458  * @param[in] msg - message structure
5459  */
5460 void
5461 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5462 {
5463  struct bfa_phy_s *phy = phyarg;
5464  u32 status;
5465 
5466  union {
5467  struct bfi_phy_query_rsp_s *query;
5468  struct bfi_phy_stats_rsp_s *stats;
5469  struct bfi_phy_write_rsp_s *write;
5470  struct bfi_phy_read_rsp_s *read;
5471  struct bfi_mbmsg_s *msg;
5472  } m;
5473 
5474  m.msg = msg;
5475  bfa_trc(phy, msg->mh.msg_id);
5476 
5477  if (!phy->op_busy) {
5478  /* receiving response after ioc failure */
5479  bfa_trc(phy, 0x9999);
5480  return;
5481  }
5482 
5483  switch (msg->mh.msg_id) {
5484  case BFI_PHY_I2H_QUERY_RSP:
5485  status = be32_to_cpu(m.query->status);
5486  bfa_trc(phy, status);
5487 
5488  if (status == BFA_STATUS_OK) {
5489  struct bfa_phy_attr_s *attr =
5490  (struct bfa_phy_attr_s *) phy->ubuf;
5491  bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5492  sizeof(struct bfa_phy_attr_s));
5493  bfa_trc(phy, attr->status);
5494  bfa_trc(phy, attr->length);
5495  }
5496 
5497  phy->status = status;
5498  phy->op_busy = 0;
5499  if (phy->cbfn)
5500  phy->cbfn(phy->cbarg, phy->status);
5501  break;
5502  case BFI_PHY_I2H_STATS_RSP:
5503  status = be32_to_cpu(m.stats->status);
5504  bfa_trc(phy, status);
5505 
5506  if (status == BFA_STATUS_OK) {
5507  struct bfa_phy_stats_s *stats =
5508  (struct bfa_phy_stats_s *) phy->ubuf;
5509  bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5510  sizeof(struct bfa_phy_stats_s));
5511  bfa_trc(phy, stats->status);
5512  }
5513 
5514  phy->status = status;
5515  phy->op_busy = 0;
5516  if (phy->cbfn)
5517  phy->cbfn(phy->cbarg, phy->status);
5518  break;
5519  case BFI_PHY_I2H_WRITE_RSP:
5520  status = be32_to_cpu(m.write->status);
5521  bfa_trc(phy, status);
5522 
5523  if (status != BFA_STATUS_OK || phy->residue == 0) {
5524  phy->status = status;
5525  phy->op_busy = 0;
5526  if (phy->cbfn)
5527  phy->cbfn(phy->cbarg, phy->status);
5528  } else {
5529  bfa_trc(phy, phy->offset);
5530  bfa_phy_write_send(phy);
5531  }
5532  break;
5533  case BFI_PHY_I2H_READ_RSP:
5534  status = be32_to_cpu(m.read->status);
5535  bfa_trc(phy, status);
5536 
5537  if (status != BFA_STATUS_OK) {
5538  phy->status = status;
5539  phy->op_busy = 0;
5540  if (phy->cbfn)
5541  phy->cbfn(phy->cbarg, phy->status);
5542  } else {
5543  u32 len = be32_to_cpu(m.read->length);
5544  u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5545  u16 *dbuf = (u16 *)phy->dbuf_kva;
5546  int i, sz = len >> 1;
5547 
5548  bfa_trc(phy, phy->offset);
5549  bfa_trc(phy, len);
5550 
5551  for (i = 0; i < sz; i++)
5552  buf[i] = be16_to_cpu(dbuf[i]);
5553 
5554  phy->residue -= len;
5555  phy->offset += len;
5556 
5557  if (phy->residue == 0) {
5558  phy->status = status;
5559  phy->op_busy = 0;
5560  if (phy->cbfn)
5561  phy->cbfn(phy->cbarg, phy->status);
5562  } else
5563  bfa_phy_read_send(phy);
5564  }
5565  break;
5566  default:
5567  WARN_ON(1);
5568  }
5569 }
5570 
5571 /*
5572  * DCONF module specific
5573  */
5574 
5575 BFA_MODULE(dconf);
5576 
5577 /*
5578  * DCONF state machine events
5579  */
5581  BFA_DCONF_SM_INIT = 1, /* dconf Init */
5582  BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5583  BFA_DCONF_SM_WR = 3, /* binding change, map */
5584  BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5585  BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5586  BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5587 };
5588 
5589 /* forward declaration of DCONF state machine */
5590 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5591  enum bfa_dconf_event event);
5592 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5593  enum bfa_dconf_event event);
5594 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5595  enum bfa_dconf_event event);
5596 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5597  enum bfa_dconf_event event);
5598 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5599  enum bfa_dconf_event event);
5600 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5601  enum bfa_dconf_event event);
5602 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5603  enum bfa_dconf_event event);
5604 
5605 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5606 static void bfa_dconf_timer(void *cbarg);
5607 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5608 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5609 
5610 /*
5611  * Beginning state of dconf module. Waiting for an event to start.
5612  */
5613 static void
5614 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5615 {
5617  bfa_trc(dconf->bfa, event);
5618 
5619  switch (event) {
5620  case BFA_DCONF_SM_INIT:
5621  if (dconf->min_cfg) {
5622  bfa_trc(dconf->bfa, dconf->min_cfg);
5623  bfa_fsm_send_event(&dconf->bfa->iocfc,
5625  return;
5626  }
5627  bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5628  bfa_timer_start(dconf->bfa, &dconf->timer,
5629  bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5630  bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5631  BFA_FLASH_PART_DRV, dconf->instance,
5632  dconf->dconf,
5633  sizeof(struct bfa_dconf_s), 0,
5634  bfa_dconf_init_cb, dconf->bfa);
5635  if (bfa_status != BFA_STATUS_OK) {
5636  bfa_timer_stop(&dconf->timer);
5637  bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5638  bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5639  return;
5640  }
5641  break;
5642  case BFA_DCONF_SM_EXIT:
5643  bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5645  case BFA_DCONF_SM_WR:
5647  break;
5648  default:
5649  bfa_sm_fault(dconf->bfa, event);
5650  }
5651 }
5652 
5653 /*
5654  * Read flash for dconf entries and make a call back to the driver once done.
5655  */
5656 static void
5657 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5658  enum bfa_dconf_event event)
5659 {
5660  bfa_trc(dconf->bfa, event);
5661 
5662  switch (event) {
5664  bfa_timer_stop(&dconf->timer);
5665  bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5666  break;
5667  case BFA_DCONF_SM_TIMEOUT:
5668  bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5669  bfa_ioc_suspend(&dconf->bfa->ioc);
5670  break;
5671  case BFA_DCONF_SM_EXIT:
5672  bfa_timer_stop(&dconf->timer);
5673  bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5674  bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5675  break;
5677  bfa_timer_stop(&dconf->timer);
5678  bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5679  break;
5680  default:
5681  bfa_sm_fault(dconf->bfa, event);
5682  }
5683 }
5684 
5685 /*
5686  * DCONF Module is in ready state. Has completed the initialization.
5687  */
5688 static void
5689 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5690 {
5691  bfa_trc(dconf->bfa, event);
5692 
5693  switch (event) {
5694  case BFA_DCONF_SM_WR:
5695  bfa_timer_start(dconf->bfa, &dconf->timer,
5696  bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5697  bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5698  break;
5699  case BFA_DCONF_SM_EXIT:
5700  bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5701  bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5702  break;
5703  case BFA_DCONF_SM_INIT:
5705  break;
5706  default:
5707  bfa_sm_fault(dconf->bfa, event);
5708  }
5709 }
5710 
5711 /*
5712  * entries are dirty, write back to the flash.
5713  */
5714 
5715 static void
5716 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5717 {
5718  bfa_trc(dconf->bfa, event);
5719 
5720  switch (event) {
5721  case BFA_DCONF_SM_TIMEOUT:
5722  bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5723  bfa_dconf_flash_write(dconf);
5724  break;
5725  case BFA_DCONF_SM_WR:
5726  bfa_timer_stop(&dconf->timer);
5727  bfa_timer_start(dconf->bfa, &dconf->timer,
5728  bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5729  break;
5730  case BFA_DCONF_SM_EXIT:
5731  bfa_timer_stop(&dconf->timer);
5732  bfa_timer_start(dconf->bfa, &dconf->timer,
5733  bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5734  bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5735  bfa_dconf_flash_write(dconf);
5736  break;
5738  break;
5740  bfa_timer_stop(&dconf->timer);
5741  bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5742  break;
5743  default:
5744  bfa_sm_fault(dconf->bfa, event);
5745  }
5746 }
5747 
5748 /*
5749  * Sync the dconf entries to the flash.
5750  */
5751 static void
5752 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5753  enum bfa_dconf_event event)
5754 {
5755  bfa_trc(dconf->bfa, event);
5756 
5757  switch (event) {
5760  bfa_timer_stop(&dconf->timer);
5761  case BFA_DCONF_SM_TIMEOUT:
5762  bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5763  bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5764  break;
5765  default:
5766  bfa_sm_fault(dconf->bfa, event);
5767  }
5768 }
5769 
5770 static void
5771 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5772 {
5773  bfa_trc(dconf->bfa, event);
5774 
5775  switch (event) {
5777  bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5778  break;
5779  case BFA_DCONF_SM_WR:
5780  bfa_timer_start(dconf->bfa, &dconf->timer,
5781  bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5782  bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5783  break;
5784  case BFA_DCONF_SM_EXIT:
5785  bfa_timer_start(dconf->bfa, &dconf->timer,
5786  bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5787  bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5788  break;
5790  bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5791  break;
5792  default:
5793  bfa_sm_fault(dconf->bfa, event);
5794  }
5795 }
5796 
5797 static void
5798 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5799  enum bfa_dconf_event event)
5800 {
5801  bfa_trc(dconf->bfa, event);
5802 
5803  switch (event) {
5804  case BFA_DCONF_SM_INIT:
5805  bfa_timer_start(dconf->bfa, &dconf->timer,
5806  bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5807  bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5808  break;
5809  case BFA_DCONF_SM_EXIT:
5810  bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5811  bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5812  break;
5814  break;
5815  default:
5816  bfa_sm_fault(dconf->bfa, event);
5817  }
5818 }
5819 
5820 /*
5821  * Compute and return memory needed by DRV_CFG module.
5822  */
5823 static void
5824 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5825  struct bfa_s *bfa)
5826 {
5827  struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5828 
5829  if (cfg->drvcfg.min_cfg)
5830  bfa_mem_kva_setup(meminfo, dconf_kva,
5831  sizeof(struct bfa_dconf_hdr_s));
5832  else
5833  bfa_mem_kva_setup(meminfo, dconf_kva,
5834  sizeof(struct bfa_dconf_s));
5835 }
5836 
5837 static void
5838 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5839  struct bfa_pcidev_s *pcidev)
5840 {
5841  struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5842 
5843  dconf->bfad = bfad;
5844  dconf->bfa = bfa;
5845  dconf->instance = bfa->ioc.port_id;
5846  bfa_trc(bfa, dconf->instance);
5847 
5848  dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5849  if (cfg->drvcfg.min_cfg) {
5850  bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5851  dconf->min_cfg = BFA_TRUE;
5852  } else {
5853  dconf->min_cfg = BFA_FALSE;
5854  bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5855  }
5856 
5858  bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5859 }
5860 
5861 static void
5862 bfa_dconf_init_cb(void *arg, bfa_status_t status)
5863 {
5864  struct bfa_s *bfa = arg;
5865  struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5866 
5867  if (status == BFA_STATUS_OK) {
5869  if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5870  dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5871  if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5872  dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5873  }
5876 }
5877 
5878 void
5880 {
5881  struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5883 }
5884 static void
5885 bfa_dconf_start(struct bfa_s *bfa)
5886 {
5887 }
5888 
5889 static void
5890 bfa_dconf_stop(struct bfa_s *bfa)
5891 {
5892 }
5893 
5894 static void bfa_dconf_timer(void *cbarg)
5895 {
5896  struct bfa_dconf_mod_s *dconf = cbarg;
5898 }
5899 static void
5900 bfa_dconf_iocdisable(struct bfa_s *bfa)
5901 {
5902  struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5904 }
5905 
5906 static void
5907 bfa_dconf_detach(struct bfa_s *bfa)
5908 {
5909 }
5910 
5911 static bfa_status_t
5912 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5913 {
5915  bfa_trc(dconf->bfa, 0);
5916 
5917  bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5918  BFA_FLASH_PART_DRV, dconf->instance,
5919  dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5920  bfa_dconf_cbfn, dconf);
5921  if (bfa_status != BFA_STATUS_OK)
5922  WARN_ON(bfa_status);
5923  bfa_trc(dconf->bfa, bfa_status);
5924 
5925  return bfa_status;
5926 }
5927 
5930 {
5931  struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5932  bfa_trc(dconf->bfa, 0);
5933  if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5934  return BFA_STATUS_FAILED;
5935 
5936  if (dconf->min_cfg) {
5937  bfa_trc(dconf->bfa, dconf->min_cfg);
5938  return BFA_STATUS_FAILED;
5939  }
5940 
5942  return BFA_STATUS_OK;
5943 }
5944 
5945 static void
5946 bfa_dconf_cbfn(void *arg, bfa_status_t status)
5947 {
5948  struct bfa_dconf_mod_s *dconf = arg;
5949  WARN_ON(status);
5951 }
5952 
5953 void
5955 {
5956  struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5958 }
5959 
5960 /*
5961  * FRU specific functions
5962  */
5963 
5964 #define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5965 #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
5966 #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
5967 
5968 static void
5969 bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
5970 {
5971  struct bfa_fru_s *fru = cbarg;
5972 
5973  bfa_trc(fru, event);
5974 
5975  switch (event) {
5976  case BFA_IOC_E_DISABLED:
5977  case BFA_IOC_E_FAILED:
5978  if (fru->op_busy) {
5980  fru->cbfn(fru->cbarg, fru->status);
5981  fru->op_busy = 0;
5982  }
5983  break;
5984 
5985  default:
5986  break;
5987  }
5988 }
5989 
5990 /*
5991  * Send fru write request.
5992  *
5993  * @param[in] cbarg - callback argument
5994  */
5995 static void
5996 bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
5997 {
5998  struct bfa_fru_s *fru = cbarg;
5999  struct bfi_fru_write_req_s *msg =
6000  (struct bfi_fru_write_req_s *) fru->mb.msg;
6001  u32 len;
6002 
6003  msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6004  len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6005  fru->residue : BFA_FRU_DMA_BUF_SZ;
6006  msg->length = cpu_to_be32(len);
6007 
6008  /*
6009  * indicate if it's the last msg of the whole write operation
6010  */
6011  msg->last = (len == fru->residue) ? 1 : 0;
6012 
6013  bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6014  bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6015 
6016  memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6017  bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6018 
6019  fru->residue -= len;
6020  fru->offset += len;
6021 }
6022 
6023 /*
6024  * Send fru read request.
6025  *
6026  * @param[in] cbarg - callback argument
6027  */
6028 static void
6029 bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6030 {
6031  struct bfa_fru_s *fru = cbarg;
6032  struct bfi_fru_read_req_s *msg =
6033  (struct bfi_fru_read_req_s *) fru->mb.msg;
6034  u32 len;
6035 
6036  msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6037  len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6038  fru->residue : BFA_FRU_DMA_BUF_SZ;
6039  msg->length = cpu_to_be32(len);
6040  bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6041  bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6042  bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6043 }
6044 
6045 /*
6046  * Flash memory info API.
6047  *
6048  * @param[in] mincfg - minimal cfg variable
6049  */
6050 u32
6052 {
6053  /* min driver doesn't need fru */
6054  if (mincfg)
6055  return 0;
6056 
6058 }
6059 
6060 /*
6061  * Flash attach API.
6062  *
6063  * @param[in] fru - fru structure
6064  * @param[in] ioc - ioc structure
6065  * @param[in] dev - device structure
6066  * @param[in] trcmod - trace module
6067  * @param[in] logmod - log module
6068  */
6069 void
6070 bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6071  struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6072 {
6073  fru->ioc = ioc;
6074  fru->trcmod = trcmod;
6075  fru->cbfn = NULL;
6076  fru->cbarg = NULL;
6077  fru->op_busy = 0;
6078 
6080  bfa_q_qe_init(&fru->ioc_notify);
6081  bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6082  list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6083 
6084  /* min driver doesn't need fru */
6085  if (mincfg) {
6086  fru->dbuf_kva = NULL;
6087  fru->dbuf_pa = 0;
6088  }
6089 }
6090 
6091 /*
6092  * Claim memory for fru
6093  *
6094  * @param[in] fru - fru structure
6095  * @param[in] dm_kva - pointer to virtual memory address
6096  * @param[in] dm_pa - frusical memory address
6097  * @param[in] mincfg - minimal cfg variable
6098  */
6099 void
6100 bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6101  bfa_boolean_t mincfg)
6102 {
6103  if (mincfg)
6104  return;
6105 
6106  fru->dbuf_kva = dm_kva;
6107  fru->dbuf_pa = dm_pa;
6111 }
6112 
6113 /*
6114  * Update fru vpd image.
6115  *
6116  * @param[in] fru - fru structure
6117  * @param[in] buf - update data buffer
6118  * @param[in] len - data buffer length
6119  * @param[in] offset - offset relative to starting address
6120  * @param[in] cbfn - callback function
6121  * @param[in] cbarg - callback argument
6122  *
6123  * Return status.
6124  */
6126 bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6127  bfa_cb_fru_t cbfn, void *cbarg)
6128 {
6130  bfa_trc(fru, len);
6131  bfa_trc(fru, offset);
6132 
6133  if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6135 
6136  if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6137  return BFA_STATUS_CMD_NOTSUPP;
6138 
6139  if (!bfa_ioc_is_operational(fru->ioc))
6140  return BFA_STATUS_IOC_NON_OP;
6141 
6142  if (fru->op_busy) {
6143  bfa_trc(fru, fru->op_busy);
6144  return BFA_STATUS_DEVBUSY;
6145  }
6146 
6147  fru->op_busy = 1;
6148 
6149  fru->cbfn = cbfn;
6150  fru->cbarg = cbarg;
6151  fru->residue = len;
6152  fru->offset = 0;
6153  fru->addr_off = offset;
6154  fru->ubuf = buf;
6155 
6156  bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6157 
6158  return BFA_STATUS_OK;
6159 }
6160 
6161 /*
6162  * Read fru vpd image.
6163  *
6164  * @param[in] fru - fru structure
6165  * @param[in] buf - read data buffer
6166  * @param[in] len - data buffer length
6167  * @param[in] offset - offset relative to starting address
6168  * @param[in] cbfn - callback function
6169  * @param[in] cbarg - callback argument
6170  *
6171  * Return status.
6172  */
6174 bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6175  bfa_cb_fru_t cbfn, void *cbarg)
6176 {
6178  bfa_trc(fru, len);
6179  bfa_trc(fru, offset);
6180 
6181  if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6183 
6184  if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6185  return BFA_STATUS_CMD_NOTSUPP;
6186 
6187  if (!bfa_ioc_is_operational(fru->ioc))
6188  return BFA_STATUS_IOC_NON_OP;
6189 
6190  if (fru->op_busy) {
6191  bfa_trc(fru, fru->op_busy);
6192  return BFA_STATUS_DEVBUSY;
6193  }
6194 
6195  fru->op_busy = 1;
6196 
6197  fru->cbfn = cbfn;
6198  fru->cbarg = cbarg;
6199  fru->residue = len;
6200  fru->offset = 0;
6201  fru->addr_off = offset;
6202  fru->ubuf = buf;
6203  bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6204 
6205  return BFA_STATUS_OK;
6206 }
6207 
6208 /*
6209  * Get maximum size fru vpd image.
6210  *
6211  * @param[in] fru - fru structure
6212  * @param[out] size - maximum size of fru vpd data
6213  *
6214  * Return status.
6215  */
6218 {
6219  if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6221 
6222  if (!bfa_ioc_is_operational(fru->ioc))
6223  return BFA_STATUS_IOC_NON_OP;
6224 
6225  if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK)
6226  *max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6227  else
6228  return BFA_STATUS_CMD_NOTSUPP;
6229  return BFA_STATUS_OK;
6230 }
6231 /*
6232  * tfru write.
6233  *
6234  * @param[in] fru - fru structure
6235  * @param[in] buf - update data buffer
6236  * @param[in] len - data buffer length
6237  * @param[in] offset - offset relative to starting address
6238  * @param[in] cbfn - callback function
6239  * @param[in] cbarg - callback argument
6240  *
6241  * Return status.
6242  */
6244 bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6245  bfa_cb_fru_t cbfn, void *cbarg)
6246 {
6248  bfa_trc(fru, len);
6249  bfa_trc(fru, offset);
6250  bfa_trc(fru, *((u8 *) buf));
6251 
6252  if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6254 
6255  if (!bfa_ioc_is_operational(fru->ioc))
6256  return BFA_STATUS_IOC_NON_OP;
6257 
6258  if (fru->op_busy) {
6259  bfa_trc(fru, fru->op_busy);
6260  return BFA_STATUS_DEVBUSY;
6261  }
6262 
6263  fru->op_busy = 1;
6264 
6265  fru->cbfn = cbfn;
6266  fru->cbarg = cbarg;
6267  fru->residue = len;
6268  fru->offset = 0;
6269  fru->addr_off = offset;
6270  fru->ubuf = buf;
6271 
6272  bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6273 
6274  return BFA_STATUS_OK;
6275 }
6276 
6277 /*
6278  * tfru read.
6279  *
6280  * @param[in] fru - fru structure
6281  * @param[in] buf - read data buffer
6282  * @param[in] len - data buffer length
6283  * @param[in] offset - offset relative to starting address
6284  * @param[in] cbfn - callback function
6285  * @param[in] cbarg - callback argument
6286  *
6287  * Return status.
6288  */
6290 bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6291  bfa_cb_fru_t cbfn, void *cbarg)
6292 {
6294  bfa_trc(fru, len);
6295  bfa_trc(fru, offset);
6296 
6297  if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6299 
6300  if (!bfa_ioc_is_operational(fru->ioc))
6301  return BFA_STATUS_IOC_NON_OP;
6302 
6303  if (fru->op_busy) {
6304  bfa_trc(fru, fru->op_busy);
6305  return BFA_STATUS_DEVBUSY;
6306  }
6307 
6308  fru->op_busy = 1;
6309 
6310  fru->cbfn = cbfn;
6311  fru->cbarg = cbarg;
6312  fru->residue = len;
6313  fru->offset = 0;
6314  fru->addr_off = offset;
6315  fru->ubuf = buf;
6316  bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6317 
6318  return BFA_STATUS_OK;
6319 }
6320 
6321 /*
6322  * Process fru response messages upon receiving interrupts.
6323  *
6324  * @param[in] fruarg - fru structure
6325  * @param[in] msg - message structure
6326  */
6327 void
6328 bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6329 {
6330  struct bfa_fru_s *fru = fruarg;
6331  struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6332  u32 status;
6333 
6334  bfa_trc(fru, msg->mh.msg_id);
6335 
6336  if (!fru->op_busy) {
6337  /*
6338  * receiving response after ioc failure
6339  */
6340  bfa_trc(fru, 0x9999);
6341  return;
6342  }
6343 
6344  switch (msg->mh.msg_id) {
6347  status = be32_to_cpu(rsp->status);
6348  bfa_trc(fru, status);
6349 
6350  if (status != BFA_STATUS_OK || fru->residue == 0) {
6351  fru->status = status;
6352  fru->op_busy = 0;
6353  if (fru->cbfn)
6354  fru->cbfn(fru->cbarg, fru->status);
6355  } else {
6356  bfa_trc(fru, fru->offset);
6357  if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6358  bfa_fru_write_send(fru,
6360  else
6361  bfa_fru_write_send(fru,
6363  }
6364  break;
6366  case BFI_TFRU_I2H_READ_RSP:
6367  status = be32_to_cpu(rsp->status);
6368  bfa_trc(fru, status);
6369 
6370  if (status != BFA_STATUS_OK) {
6371  fru->status = status;
6372  fru->op_busy = 0;
6373  if (fru->cbfn)
6374  fru->cbfn(fru->cbarg, fru->status);
6375  } else {
6376  u32 len = be32_to_cpu(rsp->length);
6377 
6378  bfa_trc(fru, fru->offset);
6379  bfa_trc(fru, len);
6380 
6381  memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6382  fru->residue -= len;
6383  fru->offset += len;
6384 
6385  if (fru->residue == 0) {
6386  fru->status = status;
6387  fru->op_busy = 0;
6388  if (fru->cbfn)
6389  fru->cbfn(fru->cbarg, fru->status);
6390  } else {
6391  if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6392  bfa_fru_read_send(fru,
6394  else
6395  bfa_fru_read_send(fru,
6397  }
6398  }
6399  break;
6400  default:
6401  WARN_ON(1);
6402  }
6403 }