Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bfa_fcpim.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
20 
21 BFA_TRC_FILE(HAL, FCPIM);
22 
23 /*
24  * BFA ITNIM Related definitions
25  */
26 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27 static void bfa_ioim_lm_init(struct bfa_s *bfa);
28 
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
30  (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
31 
32 #define bfa_fcpim_additn(__itnim) \
33  list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim) do { \
35  WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
36  bfa_itnim_update_del_itn_stats(__itnim); \
37  list_del(&(__itnim)->qe); \
38  WARN_ON(!list_empty(&(__itnim)->io_q)); \
39  WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
40  WARN_ON(!list_empty(&(__itnim)->pending_q)); \
41 } while (0)
42 
43 #define bfa_itnim_online_cb(__itnim) do { \
44  if ((__itnim)->bfa->fcs) \
45  bfa_cb_itnim_online((__itnim)->ditn); \
46  else { \
47  bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
48  __bfa_cb_itnim_online, (__itnim)); \
49  } \
50 } while (0)
51 
52 #define bfa_itnim_offline_cb(__itnim) do { \
53  if ((__itnim)->bfa->fcs) \
54  bfa_cb_itnim_offline((__itnim)->ditn); \
55  else { \
56  bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
57  __bfa_cb_itnim_offline, (__itnim)); \
58  } \
59 } while (0)
60 
61 #define bfa_itnim_sler_cb(__itnim) do { \
62  if ((__itnim)->bfa->fcs) \
63  bfa_cb_itnim_sler((__itnim)->ditn); \
64  else { \
65  bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
66  __bfa_cb_itnim_sler, (__itnim)); \
67  } \
68 } while (0)
69 
73 };
74 
75 /*
76  * itnim state machine event
77  */
79  BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
80  BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
81  BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
82  BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
83  BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
84  BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
85  BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
86  BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
87  BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
88 };
89 
90 /*
91  * BFA IOIM related definitions
92  */
93 #define bfa_ioim_move_to_comp_q(__ioim) do { \
94  list_del(&(__ioim)->qe); \
95  list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
96 } while (0)
97 
98 
99 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
100  if ((__fcpim)->profile_comp) \
101  (__fcpim)->profile_comp(__ioim); \
102 } while (0)
103 
104 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
105  if ((__fcpim)->profile_start) \
106  (__fcpim)->profile_start(__ioim); \
107 } while (0)
108 
109 /*
110  * IO state machine events
111  */
113  BFA_IOIM_SM_START = 1, /* io start request from host */
114  BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
115  BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
116  BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
117  BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
118  BFA_IOIM_SM_FREE = 6, /* io resource is freed */
119  BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
120  BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
121  BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
122  BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
123  BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
124  BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
125  BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
126  BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
127  BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
128  BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
129  BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
130  BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
131 };
132 
133 
134 /*
135  * BFA TSKIM related definitions
136  */
137 
138 /*
139  * task management completion handling
140  */
141 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
142  bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
143  bfa_tskim_notify_comp(__tskim); \
144 } while (0)
145 
146 #define bfa_tskim_notify_comp(__tskim) do { \
147  if ((__tskim)->notify) \
148  bfa_itnim_tskdone((__tskim)->itnim); \
149 } while (0)
150 
151 
153  BFA_TSKIM_SM_START = 1, /* TM command start */
154  BFA_TSKIM_SM_DONE = 2, /* TM completion */
155  BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
156  BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
157  BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
158  BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
159  BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
160  BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
161  BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
162 };
163 
164 /*
165  * forward declaration for BFA ITNIM functions
166  */
167 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
168 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
169 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
170 static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
171 static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
172 static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
173 static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
174 static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
175 static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
176 static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
177 static void bfa_itnim_iotov(void *itnim_arg);
178 static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
179 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
180 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
181 
182 /*
183  * forward declaration of ITNIM state machine
184  */
185 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
186  enum bfa_itnim_event event);
187 static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
188  enum bfa_itnim_event event);
189 static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
190  enum bfa_itnim_event event);
191 static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
192  enum bfa_itnim_event event);
193 static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
194  enum bfa_itnim_event event);
195 static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
196  enum bfa_itnim_event event);
197 static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
198  enum bfa_itnim_event event);
199 static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
200  enum bfa_itnim_event event);
201 static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
202  enum bfa_itnim_event event);
203 static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
204  enum bfa_itnim_event event);
205 static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
206  enum bfa_itnim_event event);
207 static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
208  enum bfa_itnim_event event);
209 static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
210  enum bfa_itnim_event event);
211 static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
212  enum bfa_itnim_event event);
213 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
214  enum bfa_itnim_event event);
215 
216 /*
217  * forward declaration for BFA IOIM functions
218  */
219 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
220 static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
221 static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
222 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
223 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
224 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
225 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
226 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
227 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
228 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
229 
230 /*
231  * forward declaration of BFA IO state machine
232  */
233 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
234  enum bfa_ioim_event event);
235 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
236  enum bfa_ioim_event event);
237 static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
238  enum bfa_ioim_event event);
239 static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
240  enum bfa_ioim_event event);
241 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
242  enum bfa_ioim_event event);
243 static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
244  enum bfa_ioim_event event);
245 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
246  enum bfa_ioim_event event);
247 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
248  enum bfa_ioim_event event);
249 static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
250  enum bfa_ioim_event event);
251 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
252  enum bfa_ioim_event event);
253 static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
254  enum bfa_ioim_event event);
255 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
256  enum bfa_ioim_event event);
257 /*
258  * forward declaration for BFA TSKIM functions
259  */
260 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
261 static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
262 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
263  struct scsi_lun lun);
264 static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
265 static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
266 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
267 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
268 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
269 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
270 
271 /*
272  * forward declaration of BFA TSKIM state machine
273  */
274 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
275  enum bfa_tskim_event event);
276 static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
277  enum bfa_tskim_event event);
278 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
279  enum bfa_tskim_event event);
280 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
281  enum bfa_tskim_event event);
282 static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
283  enum bfa_tskim_event event);
284 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
285  enum bfa_tskim_event event);
286 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
287  enum bfa_tskim_event event);
288 /*
289  * BFA FCP Initiator Mode module
290  */
291 
292 /*
293  * Compute and return memory needed by FCP(im) module.
294  */
295 static void
296 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
297 {
298  bfa_itnim_meminfo(cfg, km_len);
299 
300  /*
301  * IO memory
302  */
303  *km_len += cfg->fwcfg.num_ioim_reqs *
304  (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
305 
306  /*
307  * task management command memory
308  */
309  if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
310  cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
311  *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
312 }
313 
314 
315 static void
316 bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
317  struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
318 {
319  struct bfa_fcpim_s *fcpim = &fcp->fcpim;
320  struct bfa_s *bfa = fcp->bfa;
321 
322  bfa_trc(bfa, cfg->drvcfg.path_tov);
323  bfa_trc(bfa, cfg->fwcfg.num_rports);
324  bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
325  bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
326 
327  fcpim->fcp = fcp;
328  fcpim->bfa = bfa;
329  fcpim->num_itnims = cfg->fwcfg.num_rports;
330  fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
331  fcpim->path_tov = cfg->drvcfg.path_tov;
332  fcpim->delay_comp = cfg->drvcfg.delay_comp;
333  fcpim->profile_comp = NULL;
334  fcpim->profile_start = NULL;
335 
336  bfa_itnim_attach(fcpim);
337  bfa_tskim_attach(fcpim);
338  bfa_ioim_attach(fcpim);
339 }
340 
341 static void
342 bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
343 {
344  struct bfa_fcpim_s *fcpim = &fcp->fcpim;
345  struct bfa_itnim_s *itnim;
346  struct list_head *qe, *qen;
347 
348  /* Enqueue unused ioim resources to free_q */
349  list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
350 
351  list_for_each_safe(qe, qen, &fcpim->itnim_q) {
352  itnim = (struct bfa_itnim_s *) qe;
353  bfa_itnim_iocdisable(itnim);
354  }
355 }
356 
357 void
359 {
360  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
361 
362  fcpim->path_tov = path_tov * 1000;
363  if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
365 }
366 
367 u16
369 {
370  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
371 
372  return fcpim->path_tov / 1000;
373 }
374 
375 #define bfa_fcpim_add_iostats(__l, __r, __stats) \
376  (__l->__stats += __r->__stats)
377 
378 void
380  struct bfa_itnim_iostats_s *rstats)
381 {
382  bfa_fcpim_add_iostats(lstats, rstats, total_ios);
383  bfa_fcpim_add_iostats(lstats, rstats, qresumes);
384  bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
385  bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
386  bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
387  bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
388  bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
389  bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
390  bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
391  bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
392  bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
393  bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
394  bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
395  bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
396  bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
397  bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
398  bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
399  bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
400  bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
401  bfa_fcpim_add_iostats(lstats, rstats, onlines);
402  bfa_fcpim_add_iostats(lstats, rstats, offlines);
403  bfa_fcpim_add_iostats(lstats, rstats, creates);
404  bfa_fcpim_add_iostats(lstats, rstats, deletes);
405  bfa_fcpim_add_iostats(lstats, rstats, create_comps);
406  bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
407  bfa_fcpim_add_iostats(lstats, rstats, sler_events);
408  bfa_fcpim_add_iostats(lstats, rstats, fw_create);
409  bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
410  bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
411  bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
412  bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
413  bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
414  bfa_fcpim_add_iostats(lstats, rstats, tm_success);
415  bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
416  bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
417  bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
418  bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
419  bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
420  bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
421  bfa_fcpim_add_iostats(lstats, rstats, io_comps);
422  bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
423  bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
424  bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
425  bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
426 }
427 
430  struct bfa_itnim_iostats_s *stats, u8 lp_tag)
431 {
432  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
433  struct list_head *qe, *qen;
434  struct bfa_itnim_s *itnim;
435 
436  /* accumulate IO stats from itnim */
437  memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
438  list_for_each_safe(qe, qen, &fcpim->itnim_q) {
439  itnim = (struct bfa_itnim_s *) qe;
440  if (itnim->rport->rport_info.lp_tag != lp_tag)
441  continue;
442  bfa_fcpim_add_stats(stats, &(itnim->stats));
443  }
444  return BFA_STATUS_OK;
445 }
446 
447 void
449 {
450  struct bfa_itnim_latency_s *io_lat =
451  &(ioim->itnim->ioprofile.io_latency);
452  u32 val, idx;
453 
454  val = (u32)(jiffies - ioim->start_time);
455  idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
456  bfa_itnim_ioprofile_update(ioim->itnim, idx);
457 
458  io_lat->count[idx]++;
459  io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
460  io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
461  io_lat->avg[idx] += val;
462 }
463 
464 void
466 {
467  ioim->start_time = jiffies;
468 }
469 
472 {
473  struct bfa_itnim_s *itnim;
474  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
475  struct list_head *qe, *qen;
476 
477  /* accumulate IO stats from itnim */
478  list_for_each_safe(qe, qen, &fcpim->itnim_q) {
479  itnim = (struct bfa_itnim_s *) qe;
480  bfa_itnim_clear_stats(itnim);
481  }
482  fcpim->io_profile = BFA_TRUE;
483  fcpim->io_profile_start_time = time;
486  return BFA_STATUS_OK;
487 }
488 
491 {
492  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
493  fcpim->io_profile = BFA_FALSE;
494  fcpim->io_profile_start_time = 0;
495  fcpim->profile_comp = NULL;
496  fcpim->profile_start = NULL;
497  return BFA_STATUS_OK;
498 }
499 
500 u16
502 {
503  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
504 
505  return fcpim->q_depth;
506 }
507 
508 /*
509  * BFA ITNIM module state machine functions
510  */
511 
512 /*
513  * Beginning/unallocated state - no events expected.
514  */
515 static void
516 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
517 {
518  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
519  bfa_trc(itnim->bfa, event);
520 
521  switch (event) {
522  case BFA_ITNIM_SM_CREATE:
523  bfa_sm_set_state(itnim, bfa_itnim_sm_created);
524  itnim->is_online = BFA_FALSE;
525  bfa_fcpim_additn(itnim);
526  break;
527 
528  default:
529  bfa_sm_fault(itnim->bfa, event);
530  }
531 }
532 
533 /*
534  * Beginning state, only online event expected.
535  */
536 static void
537 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
538 {
539  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
540  bfa_trc(itnim->bfa, event);
541 
542  switch (event) {
543  case BFA_ITNIM_SM_ONLINE:
544  if (bfa_itnim_send_fwcreate(itnim))
545  bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
546  else
547  bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
548  break;
549 
550  case BFA_ITNIM_SM_DELETE:
551  bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
552  bfa_fcpim_delitn(itnim);
553  break;
554 
555  case BFA_ITNIM_SM_HWFAIL:
556  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
557  break;
558 
559  default:
560  bfa_sm_fault(itnim->bfa, event);
561  }
562 }
563 
564 /*
565  * Waiting for itnim create response from firmware.
566  */
567 static void
568 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
569 {
570  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
571  bfa_trc(itnim->bfa, event);
572 
573  switch (event) {
574  case BFA_ITNIM_SM_FWRSP:
575  bfa_sm_set_state(itnim, bfa_itnim_sm_online);
576  itnim->is_online = BFA_TRUE;
577  bfa_itnim_iotov_online(itnim);
578  bfa_itnim_online_cb(itnim);
579  break;
580 
581  case BFA_ITNIM_SM_DELETE:
582  bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
583  break;
584 
586  if (bfa_itnim_send_fwdelete(itnim))
587  bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
588  else
589  bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
590  break;
591 
592  case BFA_ITNIM_SM_HWFAIL:
593  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
594  break;
595 
596  default:
597  bfa_sm_fault(itnim->bfa, event);
598  }
599 }
600 
601 static void
602 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
603  enum bfa_itnim_event event)
604 {
605  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
606  bfa_trc(itnim->bfa, event);
607 
608  switch (event) {
610  bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
611  bfa_itnim_send_fwcreate(itnim);
612  break;
613 
614  case BFA_ITNIM_SM_DELETE:
615  bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
616  bfa_reqq_wcancel(&itnim->reqq_wait);
617  bfa_fcpim_delitn(itnim);
618  break;
619 
621  bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
622  bfa_reqq_wcancel(&itnim->reqq_wait);
623  bfa_itnim_offline_cb(itnim);
624  break;
625 
626  case BFA_ITNIM_SM_HWFAIL:
627  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
628  bfa_reqq_wcancel(&itnim->reqq_wait);
629  break;
630 
631  default:
632  bfa_sm_fault(itnim->bfa, event);
633  }
634 }
635 
636 /*
637  * Waiting for itnim create response from firmware, a delete is pending.
638  */
639 static void
640 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
641  enum bfa_itnim_event event)
642 {
643  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
644  bfa_trc(itnim->bfa, event);
645 
646  switch (event) {
647  case BFA_ITNIM_SM_FWRSP:
648  if (bfa_itnim_send_fwdelete(itnim))
649  bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
650  else
651  bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
652  break;
653 
654  case BFA_ITNIM_SM_HWFAIL:
655  bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
656  bfa_fcpim_delitn(itnim);
657  break;
658 
659  default:
660  bfa_sm_fault(itnim->bfa, event);
661  }
662 }
663 
664 /*
665  * Online state - normal parking state.
666  */
667 static void
668 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
669 {
670  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
671  bfa_trc(itnim->bfa, event);
672 
673  switch (event) {
675  bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
676  itnim->is_online = BFA_FALSE;
677  bfa_itnim_iotov_start(itnim);
678  bfa_itnim_cleanup(itnim);
679  break;
680 
681  case BFA_ITNIM_SM_DELETE:
682  bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
683  itnim->is_online = BFA_FALSE;
684  bfa_itnim_cleanup(itnim);
685  break;
686 
687  case BFA_ITNIM_SM_SLER:
688  bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
689  itnim->is_online = BFA_FALSE;
690  bfa_itnim_iotov_start(itnim);
691  bfa_itnim_sler_cb(itnim);
692  break;
693 
694  case BFA_ITNIM_SM_HWFAIL:
695  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
696  itnim->is_online = BFA_FALSE;
697  bfa_itnim_iotov_start(itnim);
698  bfa_itnim_iocdisable_cleanup(itnim);
699  break;
700 
701  default:
702  bfa_sm_fault(itnim->bfa, event);
703  }
704 }
705 
706 /*
707  * Second level error recovery need.
708  */
709 static void
710 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
711 {
712  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
713  bfa_trc(itnim->bfa, event);
714 
715  switch (event) {
717  bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
718  bfa_itnim_cleanup(itnim);
719  break;
720 
721  case BFA_ITNIM_SM_DELETE:
722  bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
723  bfa_itnim_cleanup(itnim);
724  bfa_itnim_iotov_delete(itnim);
725  break;
726 
727  case BFA_ITNIM_SM_HWFAIL:
728  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
729  bfa_itnim_iocdisable_cleanup(itnim);
730  break;
731 
732  default:
733  bfa_sm_fault(itnim->bfa, event);
734  }
735 }
736 
737 /*
738  * Going offline. Waiting for active IO cleanup.
739  */
740 static void
741 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
742  enum bfa_itnim_event event)
743 {
744  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
745  bfa_trc(itnim->bfa, event);
746 
747  switch (event) {
749  if (bfa_itnim_send_fwdelete(itnim))
750  bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
751  else
752  bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
753  break;
754 
755  case BFA_ITNIM_SM_DELETE:
756  bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
757  bfa_itnim_iotov_delete(itnim);
758  break;
759 
760  case BFA_ITNIM_SM_HWFAIL:
761  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
762  bfa_itnim_iocdisable_cleanup(itnim);
763  bfa_itnim_offline_cb(itnim);
764  break;
765 
766  case BFA_ITNIM_SM_SLER:
767  break;
768 
769  default:
770  bfa_sm_fault(itnim->bfa, event);
771  }
772 }
773 
774 /*
775  * Deleting itnim. Waiting for active IO cleanup.
776  */
777 static void
778 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
779  enum bfa_itnim_event event)
780 {
781  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
782  bfa_trc(itnim->bfa, event);
783 
784  switch (event) {
786  if (bfa_itnim_send_fwdelete(itnim))
787  bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
788  else
789  bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
790  break;
791 
792  case BFA_ITNIM_SM_HWFAIL:
793  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
794  bfa_itnim_iocdisable_cleanup(itnim);
795  break;
796 
797  default:
798  bfa_sm_fault(itnim->bfa, event);
799  }
800 }
801 
802 /*
803  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
804  */
805 static void
806 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
807 {
808  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
809  bfa_trc(itnim->bfa, event);
810 
811  switch (event) {
812  case BFA_ITNIM_SM_FWRSP:
813  bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
814  bfa_itnim_offline_cb(itnim);
815  break;
816 
817  case BFA_ITNIM_SM_DELETE:
818  bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
819  break;
820 
821  case BFA_ITNIM_SM_HWFAIL:
822  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
823  bfa_itnim_offline_cb(itnim);
824  break;
825 
826  default:
827  bfa_sm_fault(itnim->bfa, event);
828  }
829 }
830 
831 static void
832 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
833  enum bfa_itnim_event event)
834 {
835  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
836  bfa_trc(itnim->bfa, event);
837 
838  switch (event) {
840  bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
841  bfa_itnim_send_fwdelete(itnim);
842  break;
843 
844  case BFA_ITNIM_SM_DELETE:
845  bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
846  break;
847 
848  case BFA_ITNIM_SM_HWFAIL:
849  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
850  bfa_reqq_wcancel(&itnim->reqq_wait);
851  bfa_itnim_offline_cb(itnim);
852  break;
853 
854  default:
855  bfa_sm_fault(itnim->bfa, event);
856  }
857 }
858 
859 /*
860  * Offline state.
861  */
862 static void
863 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
864 {
865  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
866  bfa_trc(itnim->bfa, event);
867 
868  switch (event) {
869  case BFA_ITNIM_SM_DELETE:
870  bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
871  bfa_itnim_iotov_delete(itnim);
872  bfa_fcpim_delitn(itnim);
873  break;
874 
875  case BFA_ITNIM_SM_ONLINE:
876  if (bfa_itnim_send_fwcreate(itnim))
877  bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
878  else
879  bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
880  break;
881 
882  case BFA_ITNIM_SM_HWFAIL:
883  bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
884  break;
885 
886  default:
887  bfa_sm_fault(itnim->bfa, event);
888  }
889 }
890 
891 static void
892 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
893  enum bfa_itnim_event event)
894 {
895  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
896  bfa_trc(itnim->bfa, event);
897 
898  switch (event) {
899  case BFA_ITNIM_SM_DELETE:
900  bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
901  bfa_itnim_iotov_delete(itnim);
902  bfa_fcpim_delitn(itnim);
903  break;
904 
906  bfa_itnim_offline_cb(itnim);
907  break;
908 
909  case BFA_ITNIM_SM_ONLINE:
910  if (bfa_itnim_send_fwcreate(itnim))
911  bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
912  else
913  bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
914  break;
915 
916  case BFA_ITNIM_SM_HWFAIL:
917  break;
918 
919  default:
920  bfa_sm_fault(itnim->bfa, event);
921  }
922 }
923 
924 /*
925  * Itnim is deleted, waiting for firmware response to delete.
926  */
927 static void
928 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
929 {
930  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
931  bfa_trc(itnim->bfa, event);
932 
933  switch (event) {
934  case BFA_ITNIM_SM_FWRSP:
935  case BFA_ITNIM_SM_HWFAIL:
936  bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
937  bfa_fcpim_delitn(itnim);
938  break;
939 
940  default:
941  bfa_sm_fault(itnim->bfa, event);
942  }
943 }
944 
945 static void
946 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
947  enum bfa_itnim_event event)
948 {
949  bfa_trc(itnim->bfa, itnim->rport->rport_tag);
950  bfa_trc(itnim->bfa, event);
951 
952  switch (event) {
954  bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
955  bfa_itnim_send_fwdelete(itnim);
956  break;
957 
958  case BFA_ITNIM_SM_HWFAIL:
959  bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
960  bfa_reqq_wcancel(&itnim->reqq_wait);
961  bfa_fcpim_delitn(itnim);
962  break;
963 
964  default:
965  bfa_sm_fault(itnim->bfa, event);
966  }
967 }
968 
969 /*
970  * Initiate cleanup of all IOs on an IOC failure.
971  */
972 static void
973 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
974 {
975  struct bfa_tskim_s *tskim;
976  struct bfa_ioim_s *ioim;
977  struct list_head *qe, *qen;
978 
979  list_for_each_safe(qe, qen, &itnim->tsk_q) {
980  tskim = (struct bfa_tskim_s *) qe;
981  bfa_tskim_iocdisable(tskim);
982  }
983 
984  list_for_each_safe(qe, qen, &itnim->io_q) {
985  ioim = (struct bfa_ioim_s *) qe;
986  bfa_ioim_iocdisable(ioim);
987  }
988 
989  /*
990  * For IO request in pending queue, we pretend an early timeout.
991  */
992  list_for_each_safe(qe, qen, &itnim->pending_q) {
993  ioim = (struct bfa_ioim_s *) qe;
994  bfa_ioim_tov(ioim);
995  }
996 
997  list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
998  ioim = (struct bfa_ioim_s *) qe;
999  bfa_ioim_iocdisable(ioim);
1000  }
1001 }
1002 
1003 /*
1004  * IO cleanup completion
1005  */
1006 static void
1007 bfa_itnim_cleanp_comp(void *itnim_cbarg)
1008 {
1009  struct bfa_itnim_s *itnim = itnim_cbarg;
1010 
1011  bfa_stats(itnim, cleanup_comps);
1013 }
1014 
1015 /*
1016  * Initiate cleanup of all IOs.
1017  */
1018 static void
1019 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1020 {
1021  struct bfa_ioim_s *ioim;
1022  struct bfa_tskim_s *tskim;
1023  struct list_head *qe, *qen;
1024 
1025  bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1026 
1027  list_for_each_safe(qe, qen, &itnim->io_q) {
1028  ioim = (struct bfa_ioim_s *) qe;
1029 
1030  /*
1031  * Move IO to a cleanup queue from active queue so that a later
1032  * TM will not pickup this IO.
1033  */
1034  list_del(&ioim->qe);
1035  list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1036 
1037  bfa_wc_up(&itnim->wc);
1038  bfa_ioim_cleanup(ioim);
1039  }
1040 
1041  list_for_each_safe(qe, qen, &itnim->tsk_q) {
1042  tskim = (struct bfa_tskim_s *) qe;
1043  bfa_wc_up(&itnim->wc);
1044  bfa_tskim_cleanup(tskim);
1045  }
1046 
1047  bfa_wc_wait(&itnim->wc);
1048 }
1049 
1050 static void
1051 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1052 {
1053  struct bfa_itnim_s *itnim = cbarg;
1054 
1055  if (complete)
1056  bfa_cb_itnim_online(itnim->ditn);
1057 }
1058 
1059 static void
1060 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1061 {
1062  struct bfa_itnim_s *itnim = cbarg;
1063 
1064  if (complete)
1065  bfa_cb_itnim_offline(itnim->ditn);
1066 }
1067 
1068 static void
1069 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1070 {
1071  struct bfa_itnim_s *itnim = cbarg;
1072 
1073  if (complete)
1074  bfa_cb_itnim_sler(itnim->ditn);
1075 }
1076 
1077 /*
1078  * Call to resume any I/O requests waiting for room in request queue.
1079  */
1080 static void
1081 bfa_itnim_qresume(void *cbarg)
1082 {
1083  struct bfa_itnim_s *itnim = cbarg;
1084 
1086 }
1087 
1088 /*
1089  * bfa_itnim_public
1090  */
1091 
1092 void
1094 {
1095  bfa_wc_down(&itnim->wc);
1096 }
1097 
1098 void
1100 {
1101  bfa_wc_down(&itnim->wc);
1102 }
1103 
1104 void
1106 {
1107  /*
1108  * ITN memory
1109  */
1110  *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1111 }
1112 
1113 void
1115 {
1116  struct bfa_s *bfa = fcpim->bfa;
1117  struct bfa_fcp_mod_s *fcp = fcpim->fcp;
1118  struct bfa_itnim_s *itnim;
1119  int i, j;
1120 
1121  INIT_LIST_HEAD(&fcpim->itnim_q);
1122 
1123  itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1124  fcpim->itnim_arr = itnim;
1125 
1126  for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1127  memset(itnim, 0, sizeof(struct bfa_itnim_s));
1128  itnim->bfa = bfa;
1129  itnim->fcpim = fcpim;
1130  itnim->reqq = BFA_REQQ_QOS_LO;
1131  itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1132  itnim->iotov_active = BFA_FALSE;
1133  bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1134 
1135  INIT_LIST_HEAD(&itnim->io_q);
1136  INIT_LIST_HEAD(&itnim->io_cleanup_q);
1137  INIT_LIST_HEAD(&itnim->pending_q);
1138  INIT_LIST_HEAD(&itnim->tsk_q);
1139  INIT_LIST_HEAD(&itnim->delay_comp_q);
1140  for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1141  itnim->ioprofile.io_latency.min[j] = ~0;
1142  bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1143  }
1144 
1145  bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1146 }
1147 
1148 void
1150 {
1151  bfa_stats(itnim, ioc_disabled);
1153 }
1154 
1155 static bfa_boolean_t
1156 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1157 {
1158  struct bfi_itn_create_req_s *m;
1159 
1160  itnim->msg_no++;
1161 
1162  /*
1163  * check for room in queue to send request now
1164  */
1165  m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1166  if (!m) {
1167  bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1168  return BFA_FALSE;
1169  }
1170 
1172  bfa_fn_lpu(itnim->bfa));
1173  m->fw_handle = itnim->rport->fw_handle;
1174  m->class = FC_CLASS_3;
1175  m->seq_rec = itnim->seq_rec;
1176  m->msg_no = itnim->msg_no;
1177  bfa_stats(itnim, fw_create);
1178 
1179  /*
1180  * queue I/O message to firmware
1181  */
1182  bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1183  return BFA_TRUE;
1184 }
1185 
1186 static bfa_boolean_t
1187 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1188 {
1189  struct bfi_itn_delete_req_s *m;
1190 
1191  /*
1192  * check for room in queue to send request now
1193  */
1194  m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1195  if (!m) {
1196  bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1197  return BFA_FALSE;
1198  }
1199 
1201  bfa_fn_lpu(itnim->bfa));
1202  m->fw_handle = itnim->rport->fw_handle;
1203  bfa_stats(itnim, fw_delete);
1204 
1205  /*
1206  * queue I/O message to firmware
1207  */
1208  bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1209  return BFA_TRUE;
1210 }
1211 
1212 /*
1213  * Cleanup all pending failed inflight requests.
1214  */
1215 static void
1216 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1217 {
1218  struct bfa_ioim_s *ioim;
1219  struct list_head *qe, *qen;
1220 
1221  list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1222  ioim = (struct bfa_ioim_s *)qe;
1223  bfa_ioim_delayed_comp(ioim, iotov);
1224  }
1225 }
1226 
1227 /*
1228  * Start all pending IO requests.
1229  */
1230 static void
1231 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1232 {
1233  struct bfa_ioim_s *ioim;
1234 
1235  bfa_itnim_iotov_stop(itnim);
1236 
1237  /*
1238  * Abort all inflight IO requests in the queue
1239  */
1240  bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1241 
1242  /*
1243  * Start all pending IO requests.
1244  */
1245  while (!list_empty(&itnim->pending_q)) {
1246  bfa_q_deq(&itnim->pending_q, &ioim);
1247  list_add_tail(&ioim->qe, &itnim->io_q);
1248  bfa_ioim_start(ioim);
1249  }
1250 }
1251 
1252 /*
1253  * Fail all pending IO requests
1254  */
1255 static void
1256 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1257 {
1258  struct bfa_ioim_s *ioim;
1259 
1260  /*
1261  * Fail all inflight IO requests in the queue
1262  */
1263  bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1264 
1265  /*
1266  * Fail any pending IO requests.
1267  */
1268  while (!list_empty(&itnim->pending_q)) {
1269  bfa_q_deq(&itnim->pending_q, &ioim);
1270  list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1271  bfa_ioim_tov(ioim);
1272  }
1273 }
1274 
1275 /*
1276  * IO TOV timer callback. Fail any pending IO requests.
1277  */
1278 static void
1279 bfa_itnim_iotov(void *itnim_arg)
1280 {
1281  struct bfa_itnim_s *itnim = itnim_arg;
1282 
1283  itnim->iotov_active = BFA_FALSE;
1284 
1285  bfa_cb_itnim_tov_begin(itnim->ditn);
1286  bfa_itnim_iotov_cleanup(itnim);
1287  bfa_cb_itnim_tov(itnim->ditn);
1288 }
1289 
1290 /*
1291  * Start IO TOV timer for failing back pending IO requests in offline state.
1292  */
1293 static void
1294 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1295 {
1296  if (itnim->fcpim->path_tov > 0) {
1297 
1298  itnim->iotov_active = BFA_TRUE;
1299  WARN_ON(!bfa_itnim_hold_io(itnim));
1300  bfa_timer_start(itnim->bfa, &itnim->timer,
1301  bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1302  }
1303 }
1304 
1305 /*
1306  * Stop IO TOV timer.
1307  */
1308 static void
1309 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1310 {
1311  if (itnim->iotov_active) {
1312  itnim->iotov_active = BFA_FALSE;
1313  bfa_timer_stop(&itnim->timer);
1314  }
1315 }
1316 
1317 /*
1318  * Stop IO TOV timer.
1319  */
1320 static void
1321 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1322 {
1323  bfa_boolean_t pathtov_active = BFA_FALSE;
1324 
1325  if (itnim->iotov_active)
1326  pathtov_active = BFA_TRUE;
1327 
1328  bfa_itnim_iotov_stop(itnim);
1329  if (pathtov_active)
1330  bfa_cb_itnim_tov_begin(itnim->ditn);
1331  bfa_itnim_iotov_cleanup(itnim);
1332  if (pathtov_active)
1333  bfa_cb_itnim_tov(itnim->ditn);
1334 }
1335 
1336 static void
1337 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1338 {
1339  struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1340  fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1341  itnim->stats.iocomp_aborted;
1342  fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1343  itnim->stats.iocomp_timedout;
1344  fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1345  itnim->stats.iocom_sqer_needed;
1346  fcpim->del_itn_stats.del_itn_iocom_res_free +=
1347  itnim->stats.iocom_res_free;
1348  fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1349  itnim->stats.iocom_hostabrts;
1350  fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1351  fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1352  fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1353 }
1354 
1355 /*
1356  * bfa_itnim_public
1357  */
1358 
1359 /*
1360  * Itnim interrupt processing.
1361  */
1362 void
1363 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1364 {
1365  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1366  union bfi_itn_i2h_msg_u msg;
1367  struct bfa_itnim_s *itnim;
1368 
1369  bfa_trc(bfa, m->mhdr.msg_id);
1370 
1371  msg.msg = m;
1372 
1373  switch (m->mhdr.msg_id) {
1375  itnim = BFA_ITNIM_FROM_TAG(fcpim,
1376  msg.create_rsp->bfa_handle);
1377  WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1378  bfa_stats(itnim, create_comps);
1380  break;
1381 
1383  itnim = BFA_ITNIM_FROM_TAG(fcpim,
1384  msg.delete_rsp->bfa_handle);
1385  WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1386  bfa_stats(itnim, delete_comps);
1388  break;
1389 
1391  itnim = BFA_ITNIM_FROM_TAG(fcpim,
1392  msg.sler_event->bfa_handle);
1393  bfa_stats(itnim, sler_events);
1395  break;
1396 
1397  default:
1398  bfa_trc(bfa, m->mhdr.msg_id);
1399  WARN_ON(1);
1400  }
1401 }
1402 
1403 /*
1404  * bfa_itnim_api
1405  */
1406 
1407 struct bfa_itnim_s *
1409 {
1410  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1411  struct bfa_itnim_s *itnim;
1412 
1413  bfa_itn_create(bfa, rport, bfa_itnim_isr);
1414 
1415  itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1416  WARN_ON(itnim->rport != rport);
1417 
1418  itnim->ditn = ditn;
1419 
1420  bfa_stats(itnim, creates);
1422 
1423  return itnim;
1424 }
1425 
1426 void
1428 {
1429  bfa_stats(itnim, deletes);
1431 }
1432 
1433 void
1435 {
1436  itnim->seq_rec = seq_rec;
1437  bfa_stats(itnim, onlines);
1439 }
1440 
1441 void
1443 {
1444  bfa_stats(itnim, offlines);
1446 }
1447 
1448 /*
1449  * Return true if itnim is considered offline for holding off IO request.
1450  * IO is not held if itnim is being deleted.
1451  */
1454 {
1455  return itnim->fcpim->path_tov && itnim->iotov_active &&
1456  (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1457  bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1458  bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1459  bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1460  bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1461  bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1462 }
1463 
1464 #define bfa_io_lat_clock_res_div HZ
1465 #define bfa_io_lat_clock_res_mul 1000
1469 {
1470  struct bfa_fcpim_s *fcpim;
1471 
1472  if (!itnim)
1474 
1475  fcpim = BFA_FCPIM(itnim->bfa);
1476 
1477  if (!fcpim->io_profile)
1478  return BFA_STATUS_IOPROFILE_OFF;
1479 
1480  itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1481  itnim->ioprofile.io_profile_start_time =
1483  itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1484  itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1485  *ioprofile = itnim->ioprofile;
1486 
1487  return BFA_STATUS_OK;
1488 }
1489 
1490 void
1492 {
1493  int j;
1494 
1495  if (!itnim)
1496  return;
1497 
1498  memset(&itnim->stats, 0, sizeof(itnim->stats));
1499  memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1500  for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1501  itnim->ioprofile.io_latency.min[j] = ~0;
1502 }
1503 
1504 /*
1505  * BFA IO module state machine functions
1506  */
1507 
1508 /*
1509  * IO is not started (unallocated).
1510  */
1511 static void
1512 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1513 {
1514  switch (event) {
1515  case BFA_IOIM_SM_START:
1516  if (!bfa_itnim_is_online(ioim->itnim)) {
1517  if (!bfa_itnim_hold_io(ioim->itnim)) {
1518  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1519  list_del(&ioim->qe);
1520  list_add_tail(&ioim->qe,
1521  &ioim->fcpim->ioim_comp_q);
1522  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1523  __bfa_cb_ioim_pathtov, ioim);
1524  } else {
1525  list_del(&ioim->qe);
1526  list_add_tail(&ioim->qe,
1527  &ioim->itnim->pending_q);
1528  }
1529  break;
1530  }
1531 
1532  if (ioim->nsges > BFI_SGE_INLINE) {
1533  if (!bfa_ioim_sgpg_alloc(ioim)) {
1534  bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1535  return;
1536  }
1537  }
1538 
1539  if (!bfa_ioim_send_ioreq(ioim)) {
1540  bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1541  break;
1542  }
1543 
1544  bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1545  break;
1546 
1547  case BFA_IOIM_SM_IOTOV:
1548  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1550  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1551  __bfa_cb_ioim_pathtov, ioim);
1552  break;
1553 
1554  case BFA_IOIM_SM_ABORT:
1555  /*
1556  * IO in pending queue can get abort requests. Complete abort
1557  * requests immediately.
1558  */
1559  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1560  WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1561  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1562  __bfa_cb_ioim_abort, ioim);
1563  break;
1564 
1565  default:
1566  bfa_sm_fault(ioim->bfa, event);
1567  }
1568 }
1569 
1570 /*
1571  * IO is waiting for SG pages.
1572  */
1573 static void
1574 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1575 {
1576  bfa_trc(ioim->bfa, ioim->iotag);
1577  bfa_trc(ioim->bfa, event);
1578 
1579  switch (event) {
1580  case BFA_IOIM_SM_SGALLOCED:
1581  if (!bfa_ioim_send_ioreq(ioim)) {
1582  bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1583  break;
1584  }
1585  bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1586  break;
1587 
1588  case BFA_IOIM_SM_CLEANUP:
1589  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1590  bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1591  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1592  ioim);
1593  bfa_ioim_notify_cleanup(ioim);
1594  break;
1595 
1596  case BFA_IOIM_SM_ABORT:
1597  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1598  bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1600  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1601  ioim);
1602  break;
1603 
1604  case BFA_IOIM_SM_HWFAIL:
1605  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1606  bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1608  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1609  ioim);
1610  break;
1611 
1612  default:
1613  bfa_sm_fault(ioim->bfa, event);
1614  }
1615 }
1616 
1617 /*
1618  * IO is active.
1619  */
1620 static void
1621 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1622 {
1623  switch (event) {
1624  case BFA_IOIM_SM_COMP_GOOD:
1625  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1627  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1628  __bfa_cb_ioim_good_comp, ioim);
1629  break;
1630 
1631  case BFA_IOIM_SM_COMP:
1632  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1634  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1635  ioim);
1636  break;
1637 
1638  case BFA_IOIM_SM_DONE:
1639  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1641  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1642  ioim);
1643  break;
1644 
1645  case BFA_IOIM_SM_ABORT:
1646  ioim->iosp->abort_explicit = BFA_TRUE;
1647  ioim->io_cbfn = __bfa_cb_ioim_abort;
1648 
1649  if (bfa_ioim_send_abort(ioim))
1650  bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1651  else {
1652  bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1653  bfa_stats(ioim->itnim, qwait);
1654  bfa_reqq_wait(ioim->bfa, ioim->reqq,
1655  &ioim->iosp->reqq_wait);
1656  }
1657  break;
1658 
1659  case BFA_IOIM_SM_CLEANUP:
1660  ioim->iosp->abort_explicit = BFA_FALSE;
1661  ioim->io_cbfn = __bfa_cb_ioim_failed;
1662 
1663  if (bfa_ioim_send_abort(ioim))
1664  bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1665  else {
1666  bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1667  bfa_stats(ioim->itnim, qwait);
1668  bfa_reqq_wait(ioim->bfa, ioim->reqq,
1669  &ioim->iosp->reqq_wait);
1670  }
1671  break;
1672 
1673  case BFA_IOIM_SM_HWFAIL:
1674  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1676  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1677  ioim);
1678  break;
1679 
1680  case BFA_IOIM_SM_SQRETRY:
1681  if (bfa_ioim_maxretry_reached(ioim)) {
1682  /* max retry reached, free IO */
1683  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1685  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1686  __bfa_cb_ioim_failed, ioim);
1687  break;
1688  }
1689  /* waiting for IO tag resource free */
1690  bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1691  break;
1692 
1693  default:
1694  bfa_sm_fault(ioim->bfa, event);
1695  }
1696 }
1697 
1698 /*
1699  * IO is retried with new tag.
1700  */
1701 static void
1702 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1703 {
1704  switch (event) {
1705  case BFA_IOIM_SM_FREE:
1706  /* abts and rrq done. Now retry the IO with new tag */
1707  bfa_ioim_update_iotag(ioim);
1708  if (!bfa_ioim_send_ioreq(ioim)) {
1709  bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1710  break;
1711  }
1712  bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1713  break;
1714 
1715  case BFA_IOIM_SM_CLEANUP:
1716  ioim->iosp->abort_explicit = BFA_FALSE;
1717  ioim->io_cbfn = __bfa_cb_ioim_failed;
1718 
1719  if (bfa_ioim_send_abort(ioim))
1720  bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1721  else {
1722  bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1723  bfa_stats(ioim->itnim, qwait);
1724  bfa_reqq_wait(ioim->bfa, ioim->reqq,
1725  &ioim->iosp->reqq_wait);
1726  }
1727  break;
1728 
1729  case BFA_IOIM_SM_HWFAIL:
1730  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1732  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1733  __bfa_cb_ioim_failed, ioim);
1734  break;
1735 
1736  case BFA_IOIM_SM_ABORT:
1737  /* in this state IO abort is done.
1738  * Waiting for IO tag resource free.
1739  */
1740  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1741  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1742  ioim);
1743  break;
1744 
1745  default:
1746  bfa_sm_fault(ioim->bfa, event);
1747  }
1748 }
1749 
1750 /*
1751  * IO is being aborted, waiting for completion from firmware.
1752  */
1753 static void
1754 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1755 {
1756  bfa_trc(ioim->bfa, ioim->iotag);
1757  bfa_trc(ioim->bfa, event);
1758 
1759  switch (event) {
1760  case BFA_IOIM_SM_COMP_GOOD:
1761  case BFA_IOIM_SM_COMP:
1762  case BFA_IOIM_SM_DONE:
1763  case BFA_IOIM_SM_FREE:
1764  break;
1765 
1767  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1768  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1769  ioim);
1770  break;
1771 
1773  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1775  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1776  ioim);
1777  break;
1778 
1779  case BFA_IOIM_SM_COMP_UTAG:
1780  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1782  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1783  ioim);
1784  break;
1785 
1786  case BFA_IOIM_SM_CLEANUP:
1787  WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1788  ioim->iosp->abort_explicit = BFA_FALSE;
1789 
1790  if (bfa_ioim_send_abort(ioim))
1791  bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1792  else {
1793  bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1794  bfa_stats(ioim->itnim, qwait);
1795  bfa_reqq_wait(ioim->bfa, ioim->reqq,
1796  &ioim->iosp->reqq_wait);
1797  }
1798  break;
1799 
1800  case BFA_IOIM_SM_HWFAIL:
1801  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1803  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1804  ioim);
1805  break;
1806 
1807  default:
1808  bfa_sm_fault(ioim->bfa, event);
1809  }
1810 }
1811 
1812 /*
1813  * IO is being cleaned up (implicit abort), waiting for completion from
1814  * firmware.
1815  */
1816 static void
1817 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1818 {
1819  bfa_trc(ioim->bfa, ioim->iotag);
1820  bfa_trc(ioim->bfa, event);
1821 
1822  switch (event) {
1823  case BFA_IOIM_SM_COMP_GOOD:
1824  case BFA_IOIM_SM_COMP:
1825  case BFA_IOIM_SM_DONE:
1826  case BFA_IOIM_SM_FREE:
1827  break;
1828 
1829  case BFA_IOIM_SM_ABORT:
1830  /*
1831  * IO is already being aborted implicitly
1832  */
1833  ioim->io_cbfn = __bfa_cb_ioim_abort;
1834  break;
1835 
1837  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1838  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1839  bfa_ioim_notify_cleanup(ioim);
1840  break;
1841 
1843  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1844  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1845  bfa_ioim_notify_cleanup(ioim);
1846  break;
1847 
1848  case BFA_IOIM_SM_COMP_UTAG:
1849  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1850  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1851  bfa_ioim_notify_cleanup(ioim);
1852  break;
1853 
1854  case BFA_IOIM_SM_HWFAIL:
1855  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1857  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1858  ioim);
1859  break;
1860 
1861  case BFA_IOIM_SM_CLEANUP:
1862  /*
1863  * IO can be in cleanup state already due to TM command.
1864  * 2nd cleanup request comes from ITN offline event.
1865  */
1866  break;
1867 
1868  default:
1869  bfa_sm_fault(ioim->bfa, event);
1870  }
1871 }
1872 
1873 /*
1874  * IO is waiting for room in request CQ
1875  */
1876 static void
1877 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1878 {
1879  bfa_trc(ioim->bfa, ioim->iotag);
1880  bfa_trc(ioim->bfa, event);
1881 
1882  switch (event) {
1883  case BFA_IOIM_SM_QRESUME:
1884  bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1885  bfa_ioim_send_ioreq(ioim);
1886  break;
1887 
1888  case BFA_IOIM_SM_ABORT:
1889  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1890  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1892  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1893  ioim);
1894  break;
1895 
1896  case BFA_IOIM_SM_CLEANUP:
1897  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1898  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1899  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1900  ioim);
1901  bfa_ioim_notify_cleanup(ioim);
1902  break;
1903 
1904  case BFA_IOIM_SM_HWFAIL:
1905  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1906  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1908  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1909  ioim);
1910  break;
1911 
1912  default:
1913  bfa_sm_fault(ioim->bfa, event);
1914  }
1915 }
1916 
1917 /*
1918  * Active IO is being aborted, waiting for room in request CQ.
1919  */
1920 static void
1921 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1922 {
1923  bfa_trc(ioim->bfa, ioim->iotag);
1924  bfa_trc(ioim->bfa, event);
1925 
1926  switch (event) {
1927  case BFA_IOIM_SM_QRESUME:
1928  bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1929  bfa_ioim_send_abort(ioim);
1930  break;
1931 
1932  case BFA_IOIM_SM_CLEANUP:
1933  WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1934  ioim->iosp->abort_explicit = BFA_FALSE;
1935  bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1936  break;
1937 
1938  case BFA_IOIM_SM_COMP_GOOD:
1939  case BFA_IOIM_SM_COMP:
1940  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1941  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1943  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1944  ioim);
1945  break;
1946 
1947  case BFA_IOIM_SM_DONE:
1948  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1949  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1951  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1952  ioim);
1953  break;
1954 
1955  case BFA_IOIM_SM_HWFAIL:
1956  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1957  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1959  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1960  ioim);
1961  break;
1962 
1963  default:
1964  bfa_sm_fault(ioim->bfa, event);
1965  }
1966 }
1967 
1968 /*
1969  * Active IO is being cleaned up, waiting for room in request CQ.
1970  */
1971 static void
1972 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1973 {
1974  bfa_trc(ioim->bfa, ioim->iotag);
1975  bfa_trc(ioim->bfa, event);
1976 
1977  switch (event) {
1978  case BFA_IOIM_SM_QRESUME:
1979  bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1980  bfa_ioim_send_abort(ioim);
1981  break;
1982 
1983  case BFA_IOIM_SM_ABORT:
1984  /*
1985  * IO is already being cleaned up implicitly
1986  */
1987  ioim->io_cbfn = __bfa_cb_ioim_abort;
1988  break;
1989 
1990  case BFA_IOIM_SM_COMP_GOOD:
1991  case BFA_IOIM_SM_COMP:
1992  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1993  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1994  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1995  bfa_ioim_notify_cleanup(ioim);
1996  break;
1997 
1998  case BFA_IOIM_SM_DONE:
1999  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2000  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2001  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2002  bfa_ioim_notify_cleanup(ioim);
2003  break;
2004 
2005  case BFA_IOIM_SM_HWFAIL:
2006  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2007  bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2009  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2010  ioim);
2011  break;
2012 
2013  default:
2014  bfa_sm_fault(ioim->bfa, event);
2015  }
2016 }
2017 
2018 /*
2019  * IO bfa callback is pending.
2020  */
2021 static void
2022 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2023 {
2024  switch (event) {
2025  case BFA_IOIM_SM_HCB:
2026  bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2027  bfa_ioim_free(ioim);
2028  break;
2029 
2030  case BFA_IOIM_SM_CLEANUP:
2031  bfa_ioim_notify_cleanup(ioim);
2032  break;
2033 
2034  case BFA_IOIM_SM_HWFAIL:
2035  break;
2036 
2037  default:
2038  bfa_sm_fault(ioim->bfa, event);
2039  }
2040 }
2041 
2042 /*
2043  * IO bfa callback is pending. IO resource cannot be freed.
2044  */
2045 static void
2046 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2047 {
2048  bfa_trc(ioim->bfa, ioim->iotag);
2049  bfa_trc(ioim->bfa, event);
2050 
2051  switch (event) {
2052  case BFA_IOIM_SM_HCB:
2053  bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2054  list_del(&ioim->qe);
2055  list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2056  break;
2057 
2058  case BFA_IOIM_SM_FREE:
2059  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2060  break;
2061 
2062  case BFA_IOIM_SM_CLEANUP:
2063  bfa_ioim_notify_cleanup(ioim);
2064  break;
2065 
2066  case BFA_IOIM_SM_HWFAIL:
2067  bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2068  break;
2069 
2070  default:
2071  bfa_sm_fault(ioim->bfa, event);
2072  }
2073 }
2074 
2075 /*
2076  * IO is completed, waiting resource free from firmware.
2077  */
2078 static void
2079 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2080 {
2081  bfa_trc(ioim->bfa, ioim->iotag);
2082  bfa_trc(ioim->bfa, event);
2083 
2084  switch (event) {
2085  case BFA_IOIM_SM_FREE:
2086  bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2087  bfa_ioim_free(ioim);
2088  break;
2089 
2090  case BFA_IOIM_SM_CLEANUP:
2091  bfa_ioim_notify_cleanup(ioim);
2092  break;
2093 
2094  case BFA_IOIM_SM_HWFAIL:
2095  break;
2096 
2097  default:
2098  bfa_sm_fault(ioim->bfa, event);
2099  }
2100 }
2101 
2102 /*
2103  * This is called from bfa_fcpim_start after the bfa_init() with flash read
2104  * is complete by driver. now invalidate the stale content of lun mask
2105  * like unit attention, rp tag and lp tag.
2106  */
2107 static void
2108 bfa_ioim_lm_init(struct bfa_s *bfa)
2109 {
2110  struct bfa_lun_mask_s *lunm_list;
2111  int i;
2112 
2114  return;
2115 
2116  lunm_list = bfa_get_lun_mask_list(bfa);
2117  for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2118  lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2119  lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2120  lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2121  }
2122 }
2123 
2124 static void
2125 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2126 {
2127  struct bfa_ioim_s *ioim = cbarg;
2128 
2129  if (!complete) {
2131  return;
2132  }
2133 
2134  bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2135 }
2136 
2137 static void
2138 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2139 {
2140  struct bfa_ioim_s *ioim = cbarg;
2141  struct bfi_ioim_rsp_s *m;
2142  u8 *snsinfo = NULL;
2143  u8 sns_len = 0;
2144  s32 residue = 0;
2145 
2146  if (!complete) {
2148  return;
2149  }
2150 
2151  m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2152  if (m->io_status == BFI_IOIM_STS_OK) {
2153  /*
2154  * setup sense information, if present
2155  */
2157  m->sns_len) {
2158  sns_len = m->sns_len;
2159  snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2160  ioim->iotag);
2161  }
2162 
2163  /*
2164  * setup residue value correctly for normal completions
2165  */
2166  if (m->resid_flags == FCP_RESID_UNDER) {
2167  residue = be32_to_cpu(m->residue);
2168  bfa_stats(ioim->itnim, iocomp_underrun);
2169  }
2170  if (m->resid_flags == FCP_RESID_OVER) {
2171  residue = be32_to_cpu(m->residue);
2172  residue = -residue;
2173  bfa_stats(ioim->itnim, iocomp_overrun);
2174  }
2175  }
2176 
2177  bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2178  m->scsi_status, sns_len, snsinfo, residue);
2179 }
2180 
2181 void
2182 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2183  u16 rp_tag, u8 lp_tag)
2184 {
2185  struct bfa_lun_mask_s *lun_list;
2186  u8 i;
2187 
2189  return;
2190 
2191  lun_list = bfa_get_lun_mask_list(bfa);
2192  for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2193  if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2194  if ((lun_list[i].lp_wwn == lp_wwn) &&
2195  (lun_list[i].rp_wwn == rp_wwn)) {
2196  lun_list[i].rp_tag = rp_tag;
2197  lun_list[i].lp_tag = lp_tag;
2198  }
2199  }
2200  }
2201 }
2202 
2203 /*
2204  * set UA for all active luns in LM DB
2205  */
2206 static void
2207 bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2208 {
2209  struct bfa_lun_mask_s *lunm_list;
2210  int i;
2211 
2212  lunm_list = bfa_get_lun_mask_list(bfa);
2213  for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2214  if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2215  continue;
2216  lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2217  }
2218 }
2219 
2222 {
2223  struct bfa_lunmask_cfg_s *lun_mask;
2224 
2225  bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2227  return BFA_STATUS_FAILED;
2228 
2229  if (bfa_get_lun_mask_status(bfa) == update)
2230  return BFA_STATUS_NO_CHANGE;
2231 
2232  lun_mask = bfa_get_lun_mask(bfa);
2233  lun_mask->status = update;
2234 
2236  bfa_ioim_lm_set_ua(bfa);
2237 
2238  return bfa_dconf_update(bfa);
2239 }
2240 
2243 {
2244  int i;
2245  struct bfa_lun_mask_s *lunm_list;
2246 
2247  bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2249  return BFA_STATUS_FAILED;
2250 
2251  lunm_list = bfa_get_lun_mask_list(bfa);
2252  for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2253  if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2254  if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2256  BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2257  }
2258  }
2259 
2260  memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2261  return bfa_dconf_update(bfa);
2262 }
2263 
2265 bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2266 {
2267  struct bfa_lunmask_cfg_s *lun_mask;
2268 
2269  bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2271  return BFA_STATUS_FAILED;
2272 
2273  lun_mask = bfa_get_lun_mask(bfa);
2274  memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2275  return BFA_STATUS_OK;
2276 }
2277 
2279 bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2280  wwn_t rpwwn, struct scsi_lun lun)
2281 {
2282  struct bfa_lun_mask_s *lunm_list;
2283  struct bfa_rport_s *rp = NULL;
2284  int i, free_index = MAX_LUN_MASK_CFG + 1;
2285  struct bfa_fcs_lport_s *port = NULL;
2286  struct bfa_fcs_rport_s *rp_fcs;
2287 
2288  bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2290  return BFA_STATUS_FAILED;
2291 
2292  port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2293  vf_id, *pwwn);
2294  if (port) {
2295  *pwwn = port->port_cfg.pwwn;
2296  rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2297  if (rp_fcs)
2298  rp = rp_fcs->bfa_rport;
2299  }
2300 
2301  lunm_list = bfa_get_lun_mask_list(bfa);
2302  /* if entry exists */
2303  for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2304  if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2305  free_index = i;
2306  if ((lunm_list[i].lp_wwn == *pwwn) &&
2307  (lunm_list[i].rp_wwn == rpwwn) &&
2308  (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2309  scsilun_to_int((struct scsi_lun *)&lun)))
2310  return BFA_STATUS_ENTRY_EXISTS;
2311  }
2312 
2313  if (free_index > MAX_LUN_MASK_CFG)
2315 
2316  if (rp) {
2317  lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2318  rp->rport_info.local_pid);
2319  lunm_list[free_index].rp_tag = rp->rport_tag;
2320  } else {
2321  lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2322  lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2323  }
2324 
2325  lunm_list[free_index].lp_wwn = *pwwn;
2326  lunm_list[free_index].rp_wwn = rpwwn;
2327  lunm_list[free_index].lun = lun;
2328  lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2329 
2330  /* set for all luns in this rp */
2331  for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2332  if ((lunm_list[i].lp_wwn == *pwwn) &&
2333  (lunm_list[i].rp_wwn == rpwwn))
2334  lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2335  }
2336 
2337  return bfa_dconf_update(bfa);
2338 }
2339 
2342  wwn_t rpwwn, struct scsi_lun lun)
2343 {
2344  struct bfa_lun_mask_s *lunm_list;
2345  struct bfa_rport_s *rp = NULL;
2346  struct bfa_fcs_lport_s *port = NULL;
2347  struct bfa_fcs_rport_s *rp_fcs;
2348  int i;
2349 
2350  /* in min cfg lunm_list could be NULL but no commands should run. */
2352  return BFA_STATUS_FAILED;
2353 
2354  bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2355  bfa_trc(bfa, *pwwn);
2356  bfa_trc(bfa, rpwwn);
2357  bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2358 
2359  if (*pwwn == 0) {
2360  port = bfa_fcs_lookup_port(
2361  &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2362  vf_id, *pwwn);
2363  if (port) {
2364  *pwwn = port->port_cfg.pwwn;
2365  rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2366  if (rp_fcs)
2367  rp = rp_fcs->bfa_rport;
2368  }
2369  }
2370 
2371  lunm_list = bfa_get_lun_mask_list(bfa);
2372  for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2373  if ((lunm_list[i].lp_wwn == *pwwn) &&
2374  (lunm_list[i].rp_wwn == rpwwn) &&
2375  (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2376  scsilun_to_int((struct scsi_lun *)&lun))) {
2377  lunm_list[i].lp_wwn = 0;
2378  lunm_list[i].rp_wwn = 0;
2379  int_to_scsilun(0, &lunm_list[i].lun);
2380  lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2381  if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2382  lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2383  lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2384  }
2385  return bfa_dconf_update(bfa);
2386  }
2387  }
2388 
2389  /* set for all luns in this rp */
2390  for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2391  if ((lunm_list[i].lp_wwn == *pwwn) &&
2392  (lunm_list[i].rp_wwn == rpwwn))
2393  lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2394  }
2395 
2397 }
2398 
2399 static void
2400 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2401 {
2402  struct bfa_ioim_s *ioim = cbarg;
2403 
2404  if (!complete) {
2406  return;
2407  }
2408 
2409  bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2410  0, 0, NULL, 0);
2411 }
2412 
2413 static void
2414 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2415 {
2416  struct bfa_ioim_s *ioim = cbarg;
2417 
2418  bfa_stats(ioim->itnim, path_tov_expired);
2419  if (!complete) {
2421  return;
2422  }
2423 
2424  bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2425  0, 0, NULL, 0);
2426 }
2427 
2428 static void
2429 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2430 {
2431  struct bfa_ioim_s *ioim = cbarg;
2432 
2433  if (!complete) {
2435  return;
2436  }
2437 
2438  bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2439 }
2440 
2441 static void
2442 bfa_ioim_sgpg_alloced(void *cbarg)
2443 {
2444  struct bfa_ioim_s *ioim = cbarg;
2445 
2446  ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2447  list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2448  ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2450 }
2451 
2452 /*
2453  * Send I/O request to firmware.
2454  */
2455 static bfa_boolean_t
2456 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2457 {
2458  struct bfa_itnim_s *itnim = ioim->itnim;
2459  struct bfi_ioim_req_s *m;
2460  static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2461  struct bfi_sge_s *sge, *sgpge;
2462  u32 pgdlen = 0;
2463  u32 fcp_dl;
2464  u64 addr;
2465  struct scatterlist *sg;
2466  struct bfa_sgpg_s *sgpg;
2467  struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2468  u32 i, sge_id, pgcumsz;
2469  enum dma_data_direction dmadir;
2470 
2471  /*
2472  * check for room in queue to send request now
2473  */
2474  m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2475  if (!m) {
2476  bfa_stats(ioim->itnim, qwait);
2477  bfa_reqq_wait(ioim->bfa, ioim->reqq,
2478  &ioim->iosp->reqq_wait);
2479  return BFA_FALSE;
2480  }
2481 
2482  /*
2483  * build i/o request message next
2484  */
2485  m->io_tag = cpu_to_be16(ioim->iotag);
2486  m->rport_hdl = ioim->itnim->rport->fw_handle;
2487  m->io_timeout = 0;
2488 
2489  sge = &m->sges[0];
2490  sgpg = ioim->sgpg;
2491  sge_id = 0;
2492  sgpge = NULL;
2493  pgcumsz = 0;
2494  scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2495  if (i == 0) {
2496  /* build inline IO SG element */
2497  addr = bfa_sgaddr_le(sg_dma_address(sg));
2498  sge->sga = *(union bfi_addr_u *) &addr;
2499  pgdlen = sg_dma_len(sg);
2500  sge->sg_len = pgdlen;
2501  sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2503  bfa_sge_to_be(sge);
2504  sge++;
2505  } else {
2506  if (sge_id == 0)
2507  sgpge = sgpg->sgpg->sges;
2508 
2509  addr = bfa_sgaddr_le(sg_dma_address(sg));
2510  sgpge->sga = *(union bfi_addr_u *) &addr;
2511  sgpge->sg_len = sg_dma_len(sg);
2512  pgcumsz += sgpge->sg_len;
2513 
2514  /* set flags */
2515  if (i < (ioim->nsges - 1) &&
2516  sge_id < (BFI_SGPG_DATA_SGES - 1))
2517  sgpge->flags = BFI_SGE_DATA;
2518  else if (i < (ioim->nsges - 1))
2519  sgpge->flags = BFI_SGE_DATA_CPL;
2520  else
2521  sgpge->flags = BFI_SGE_DATA_LAST;
2522 
2523  bfa_sge_to_le(sgpge);
2524 
2525  sgpge++;
2526  if (i == (ioim->nsges - 1)) {
2527  sgpge->flags = BFI_SGE_PGDLEN;
2528  sgpge->sga.a32.addr_lo = 0;
2529  sgpge->sga.a32.addr_hi = 0;
2530  sgpge->sg_len = pgcumsz;
2531  bfa_sge_to_le(sgpge);
2532  } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2533  sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2534  sgpge->flags = BFI_SGE_LINK;
2535  sgpge->sga = sgpg->sgpg_pa;
2536  sgpge->sg_len = pgcumsz;
2537  bfa_sge_to_le(sgpge);
2538  sge_id = 0;
2539  pgcumsz = 0;
2540  }
2541  }
2542  }
2543 
2544  if (ioim->nsges > BFI_SGE_INLINE) {
2545  sge->sga = ioim->sgpg->sgpg_pa;
2546  } else {
2547  sge->sga.a32.addr_lo = 0;
2548  sge->sga.a32.addr_hi = 0;
2549  }
2550  sge->sg_len = pgdlen;
2551  sge->flags = BFI_SGE_PGDLEN;
2552  bfa_sge_to_be(sge);
2553 
2554  /*
2555  * set up I/O command parameters
2556  */
2557  m->cmnd = cmnd_z0;
2558  int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2559  dmadir = cmnd->sc_data_direction;
2560  if (dmadir == DMA_TO_DEVICE)
2561  m->cmnd.iodir = FCP_IODIR_WRITE;
2562  else if (dmadir == DMA_FROM_DEVICE)
2563  m->cmnd.iodir = FCP_IODIR_READ;
2564  else
2565  m->cmnd.iodir = FCP_IODIR_NONE;
2566 
2567  m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2568  fcp_dl = scsi_bufflen(cmnd);
2569  m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2570 
2571  /*
2572  * set up I/O message header
2573  */
2574  switch (m->cmnd.iodir) {
2575  case FCP_IODIR_READ:
2576  bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2577  bfa_stats(itnim, input_reqs);
2578  ioim->itnim->stats.rd_throughput += fcp_dl;
2579  break;
2580  case FCP_IODIR_WRITE:
2581  bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2582  bfa_stats(itnim, output_reqs);
2583  ioim->itnim->stats.wr_throughput += fcp_dl;
2584  break;
2585  case FCP_IODIR_RW:
2586  bfa_stats(itnim, input_reqs);
2587  bfa_stats(itnim, output_reqs);
2588  default:
2589  bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2590  }
2591  if (itnim->seq_rec ||
2592  (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2593  bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2594 
2595  /*
2596  * queue I/O message to firmware
2597  */
2598  bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2599  return BFA_TRUE;
2600 }
2601 
2602 /*
2603  * Setup any additional SG pages needed.Inline SG element is setup
2604  * at queuing time.
2605  */
2606 static bfa_boolean_t
2607 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2608 {
2609  u16 nsgpgs;
2610 
2611  WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2612 
2613  /*
2614  * allocate SG pages needed
2615  */
2616  nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2617  if (!nsgpgs)
2618  return BFA_TRUE;
2619 
2620  if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2621  != BFA_STATUS_OK) {
2622  bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2623  return BFA_FALSE;
2624  }
2625 
2626  ioim->nsgpgs = nsgpgs;
2627  ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2628 
2629  return BFA_TRUE;
2630 }
2631 
2632 /*
2633  * Send I/O abort request to firmware.
2634  */
2635 static bfa_boolean_t
2636 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2637 {
2638  struct bfi_ioim_abort_req_s *m;
2639  enum bfi_ioim_h2i msgop;
2640 
2641  /*
2642  * check for room in queue to send request now
2643  */
2644  m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2645  if (!m)
2646  return BFA_FALSE;
2647 
2648  /*
2649  * build i/o request message next
2650  */
2651  if (ioim->iosp->abort_explicit)
2652  msgop = BFI_IOIM_H2I_IOABORT_REQ;
2653  else
2655 
2656  bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2657  m->io_tag = cpu_to_be16(ioim->iotag);
2658  m->abort_tag = ++ioim->abort_tag;
2659 
2660  /*
2661  * queue I/O message to firmware
2662  */
2663  bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2664  return BFA_TRUE;
2665 }
2666 
2667 /*
2668  * Call to resume any I/O requests waiting for room in request queue.
2669  */
2670 static void
2671 bfa_ioim_qresume(void *cbarg)
2672 {
2673  struct bfa_ioim_s *ioim = cbarg;
2674 
2675  bfa_stats(ioim->itnim, qresumes);
2677 }
2678 
2679 
2680 static void
2681 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2682 {
2683  /*
2684  * Move IO from itnim queue to fcpim global queue since itnim will be
2685  * freed.
2686  */
2687  list_del(&ioim->qe);
2688  list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2689 
2690  if (!ioim->iosp->tskim) {
2691  if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2692  bfa_cb_dequeue(&ioim->hcb_qe);
2693  list_del(&ioim->qe);
2694  list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2695  }
2696  bfa_itnim_iodone(ioim->itnim);
2697  } else
2698  bfa_wc_down(&ioim->iosp->tskim->wc);
2699 }
2700 
2701 static bfa_boolean_t
2702 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2703 {
2704  if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2705  (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2706  (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2707  (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2708  (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2709  (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2710  (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2711  return BFA_FALSE;
2712 
2713  return BFA_TRUE;
2714 }
2715 
2716 void
2718 {
2719  /*
2720  * If path tov timer expired, failback with PATHTOV status - these
2721  * IO requests are not normally retried by IO stack.
2722  *
2723  * Otherwise device cameback online and fail it with normal failed
2724  * status so that IO stack retries these failed IO requests.
2725  */
2726  if (iotov)
2727  ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2728  else {
2729  ioim->io_cbfn = __bfa_cb_ioim_failed;
2730  bfa_stats(ioim->itnim, iocom_nexus_abort);
2731  }
2732  bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2733 
2734  /*
2735  * Move IO to fcpim global queue since itnim will be
2736  * freed.
2737  */
2738  list_del(&ioim->qe);
2739  list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2740 }
2741 
2742 
2743 /*
2744  * Memory allocation and initialization.
2745  */
2746 void
2748 {
2749  struct bfa_ioim_s *ioim;
2750  struct bfa_fcp_mod_s *fcp = fcpim->fcp;
2751  struct bfa_ioim_sp_s *iosp;
2752  u16 i;
2753 
2754  /*
2755  * claim memory first
2756  */
2757  ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2758  fcpim->ioim_arr = ioim;
2759  bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2760 
2761  iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2762  fcpim->ioim_sp_arr = iosp;
2763  bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2764 
2765  /*
2766  * Initialize ioim free queues
2767  */
2768  INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2769  INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2770 
2771  for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2772  i++, ioim++, iosp++) {
2773  /*
2774  * initialize IOIM
2775  */
2776  memset(ioim, 0, sizeof(struct bfa_ioim_s));
2777  ioim->iotag = i;
2778  ioim->bfa = fcpim->bfa;
2779  ioim->fcpim = fcpim;
2780  ioim->iosp = iosp;
2781  INIT_LIST_HEAD(&ioim->sgpg_q);
2782  bfa_reqq_winit(&ioim->iosp->reqq_wait,
2783  bfa_ioim_qresume, ioim);
2784  bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2785  bfa_ioim_sgpg_alloced, ioim);
2786  bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2787  }
2788 }
2789 
2790 void
2791 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2792 {
2793  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2794  struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2795  struct bfa_ioim_s *ioim;
2796  u16 iotag;
2798 
2799  iotag = be16_to_cpu(rsp->io_tag);
2800 
2801  ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2802  WARN_ON(ioim->iotag != iotag);
2803 
2804  bfa_trc(ioim->bfa, ioim->iotag);
2805  bfa_trc(ioim->bfa, rsp->io_status);
2806  bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2807 
2808  if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2809  ioim->iosp->comp_rspmsg = *m;
2810 
2811  switch (rsp->io_status) {
2812  case BFI_IOIM_STS_OK:
2813  bfa_stats(ioim->itnim, iocomp_ok);
2814  if (rsp->reuse_io_tag == 0)
2815  evt = BFA_IOIM_SM_DONE;
2816  else
2817  evt = BFA_IOIM_SM_COMP;
2818  break;
2819 
2820  case BFI_IOIM_STS_TIMEDOUT:
2821  bfa_stats(ioim->itnim, iocomp_timedout);
2822  case BFI_IOIM_STS_ABORTED:
2824  bfa_stats(ioim->itnim, iocomp_aborted);
2825  if (rsp->reuse_io_tag == 0)
2826  evt = BFA_IOIM_SM_DONE;
2827  else
2828  evt = BFA_IOIM_SM_COMP;
2829  break;
2830 
2832  bfa_stats(ioim->itnim, iocom_proto_err);
2833  WARN_ON(!rsp->reuse_io_tag);
2834  evt = BFA_IOIM_SM_COMP;
2835  break;
2836 
2838  bfa_stats(ioim->itnim, iocom_sqer_needed);
2839  WARN_ON(rsp->reuse_io_tag != 0);
2840  evt = BFA_IOIM_SM_SQRETRY;
2841  break;
2842 
2843  case BFI_IOIM_STS_RES_FREE:
2844  bfa_stats(ioim->itnim, iocom_res_free);
2845  evt = BFA_IOIM_SM_FREE;
2846  break;
2847 
2849  bfa_stats(ioim->itnim, iocom_hostabrts);
2850  if (rsp->abort_tag != ioim->abort_tag) {
2851  bfa_trc(ioim->bfa, rsp->abort_tag);
2852  bfa_trc(ioim->bfa, ioim->abort_tag);
2853  return;
2854  }
2855 
2856  if (rsp->reuse_io_tag)
2857  evt = BFA_IOIM_SM_ABORT_COMP;
2858  else
2859  evt = BFA_IOIM_SM_ABORT_DONE;
2860  break;
2861 
2862  case BFI_IOIM_STS_UTAG:
2863  bfa_stats(ioim->itnim, iocom_utags);
2864  evt = BFA_IOIM_SM_COMP_UTAG;
2865  break;
2866 
2867  default:
2868  WARN_ON(1);
2869  }
2870 
2871  bfa_sm_send_event(ioim, evt);
2872 }
2873 
2874 void
2875 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2876 {
2877  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2878  struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2879  struct bfa_ioim_s *ioim;
2880  u16 iotag;
2881 
2882  iotag = be16_to_cpu(rsp->io_tag);
2883 
2884  ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2885  WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2886 
2887  bfa_ioim_cb_profile_comp(fcpim, ioim);
2888 
2890 }
2891 
2892 /*
2893  * Called by itnim to clean up IO while going offline.
2894  */
2895 void
2897 {
2898  bfa_trc(ioim->bfa, ioim->iotag);
2899  bfa_stats(ioim->itnim, io_cleanups);
2900 
2901  ioim->iosp->tskim = NULL;
2903 }
2904 
2905 void
2906 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2907 {
2908  bfa_trc(ioim->bfa, ioim->iotag);
2909  bfa_stats(ioim->itnim, io_tmaborts);
2910 
2911  ioim->iosp->tskim = tskim;
2913 }
2914 
2915 /*
2916  * IOC failure handling.
2917  */
2918 void
2920 {
2921  bfa_trc(ioim->bfa, ioim->iotag);
2922  bfa_stats(ioim->itnim, io_iocdowns);
2924 }
2925 
2926 /*
2927  * IO offline TOV popped. Fail the pending IO.
2928  */
2929 void
2931 {
2932  bfa_trc(ioim->bfa, ioim->iotag);
2934 }
2935 
2936 
2937 /*
2938  * Allocate IOIM resource for initiator mode I/O request.
2939  */
2940 struct bfa_ioim_s *
2941 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2942  struct bfa_itnim_s *itnim, u16 nsges)
2943 {
2944  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2945  struct bfa_ioim_s *ioim;
2946  struct bfa_iotag_s *iotag = NULL;
2947 
2948  /*
2949  * alocate IOIM resource
2950  */
2951  bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2952  if (!iotag) {
2953  bfa_stats(itnim, no_iotags);
2954  return NULL;
2955  }
2956 
2957  ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2958 
2959  ioim->dio = dio;
2960  ioim->itnim = itnim;
2961  ioim->nsges = nsges;
2962  ioim->nsgpgs = 0;
2963 
2964  bfa_stats(itnim, total_ios);
2965  fcpim->ios_active++;
2966 
2967  list_add_tail(&ioim->qe, &itnim->io_q);
2968 
2969  return ioim;
2970 }
2971 
2972 void
2974 {
2975  struct bfa_fcpim_s *fcpim = ioim->fcpim;
2976  struct bfa_iotag_s *iotag;
2977 
2978  if (ioim->nsgpgs > 0)
2979  bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2980 
2981  bfa_stats(ioim->itnim, io_comps);
2982  fcpim->ios_active--;
2983 
2984  ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2985 
2986  WARN_ON(!(ioim->iotag <
2987  (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2988  iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2989 
2990  if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2991  list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2992  else
2993  list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2994 
2995  list_del(&ioim->qe);
2996 }
2997 
2998 void
3000 {
3001  bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3002 
3003  /*
3004  * Obtain the queue over which this request has to be issued
3005  */
3006  ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
3007  BFA_FALSE : bfa_itnim_get_reqq(ioim);
3008 
3010 }
3011 
3012 /*
3013  * Driver I/O abort request.
3014  */
3017 {
3018 
3019  bfa_trc(ioim->bfa, ioim->iotag);
3020 
3021  if (!bfa_ioim_is_abortable(ioim))
3022  return BFA_STATUS_FAILED;
3023 
3024  bfa_stats(ioim->itnim, io_aborts);
3026 
3027  return BFA_STATUS_OK;
3028 }
3029 
3030 /*
3031  * BFA TSKIM state machine functions
3032  */
3033 
3034 /*
3035  * Task management command beginning state.
3036  */
3037 static void
3038 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3039 {
3040  bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3041 
3042  switch (event) {
3043  case BFA_TSKIM_SM_START:
3044  bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3045  bfa_tskim_gather_ios(tskim);
3046 
3047  /*
3048  * If device is offline, do not send TM on wire. Just cleanup
3049  * any pending IO requests and complete TM request.
3050  */
3051  if (!bfa_itnim_is_online(tskim->itnim)) {
3052  bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3053  tskim->tsk_status = BFI_TSKIM_STS_OK;
3054  bfa_tskim_cleanup_ios(tskim);
3055  return;
3056  }
3057 
3058  if (!bfa_tskim_send(tskim)) {
3059  bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3060  bfa_stats(tskim->itnim, tm_qwait);
3061  bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3062  &tskim->reqq_wait);
3063  }
3064  break;
3065 
3066  default:
3067  bfa_sm_fault(tskim->bfa, event);
3068  }
3069 }
3070 
3071 /*
3072  * TM command is active, awaiting completion from firmware to
3073  * cleanup IO requests in TM scope.
3074  */
3075 static void
3076 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3077 {
3078  bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3079 
3080  switch (event) {
3081  case BFA_TSKIM_SM_DONE:
3082  bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3083  bfa_tskim_cleanup_ios(tskim);
3084  break;
3085 
3086  case BFA_TSKIM_SM_CLEANUP:
3087  bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3088  if (!bfa_tskim_send_abort(tskim)) {
3089  bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3090  bfa_stats(tskim->itnim, tm_qwait);
3091  bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3092  &tskim->reqq_wait);
3093  }
3094  break;
3095 
3096  case BFA_TSKIM_SM_HWFAIL:
3097  bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3098  bfa_tskim_iocdisable_ios(tskim);
3099  bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3100  break;
3101 
3102  default:
3103  bfa_sm_fault(tskim->bfa, event);
3104  }
3105 }
3106 
3107 /*
3108  * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3109  * completion event from firmware.
3110  */
3111 static void
3112 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3113 {
3114  bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3115 
3116  switch (event) {
3117  case BFA_TSKIM_SM_DONE:
3118  /*
3119  * Ignore and wait for ABORT completion from firmware.
3120  */
3121  break;
3122 
3123  case BFA_TSKIM_SM_UTAG:
3125  bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3126  bfa_tskim_cleanup_ios(tskim);
3127  break;
3128 
3129  case BFA_TSKIM_SM_HWFAIL:
3130  bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3131  bfa_tskim_iocdisable_ios(tskim);
3132  bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3133  break;
3134 
3135  default:
3136  bfa_sm_fault(tskim->bfa, event);
3137  }
3138 }
3139 
3140 static void
3141 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3142 {
3143  bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3144 
3145  switch (event) {
3146  case BFA_TSKIM_SM_IOS_DONE:
3147  bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3148  bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3149  break;
3150 
3151  case BFA_TSKIM_SM_CLEANUP:
3152  /*
3153  * Ignore, TM command completed on wire.
3154  * Notify TM conmpletion on IO cleanup completion.
3155  */
3156  break;
3157 
3158  case BFA_TSKIM_SM_HWFAIL:
3159  bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3160  bfa_tskim_iocdisable_ios(tskim);
3161  bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3162  break;
3163 
3164  default:
3165  bfa_sm_fault(tskim->bfa, event);
3166  }
3167 }
3168 
3169 /*
3170  * Task management command is waiting for room in request CQ
3171  */
3172 static void
3173 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3174 {
3175  bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3176 
3177  switch (event) {
3178  case BFA_TSKIM_SM_QRESUME:
3179  bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3180  bfa_tskim_send(tskim);
3181  break;
3182 
3183  case BFA_TSKIM_SM_CLEANUP:
3184  /*
3185  * No need to send TM on wire since ITN is offline.
3186  */
3187  bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3188  bfa_reqq_wcancel(&tskim->reqq_wait);
3189  bfa_tskim_cleanup_ios(tskim);
3190  break;
3191 
3192  case BFA_TSKIM_SM_HWFAIL:
3193  bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3194  bfa_reqq_wcancel(&tskim->reqq_wait);
3195  bfa_tskim_iocdisable_ios(tskim);
3196  bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3197  break;
3198 
3199  default:
3200  bfa_sm_fault(tskim->bfa, event);
3201  }
3202 }
3203 
3204 /*
3205  * Task management command is active, awaiting for room in request CQ
3206  * to send clean up request.
3207  */
3208 static void
3209 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3210  enum bfa_tskim_event event)
3211 {
3212  bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3213 
3214  switch (event) {
3215  case BFA_TSKIM_SM_DONE:
3216  bfa_reqq_wcancel(&tskim->reqq_wait);
3217  /*
3218  * Fall through !!!
3219  */
3220  case BFA_TSKIM_SM_QRESUME:
3221  bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3222  bfa_tskim_send_abort(tskim);
3223  break;
3224 
3225  case BFA_TSKIM_SM_HWFAIL:
3226  bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3227  bfa_reqq_wcancel(&tskim->reqq_wait);
3228  bfa_tskim_iocdisable_ios(tskim);
3229  bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3230  break;
3231 
3232  default:
3233  bfa_sm_fault(tskim->bfa, event);
3234  }
3235 }
3236 
3237 /*
3238  * BFA callback is pending
3239  */
3240 static void
3241 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3242 {
3243  bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3244 
3245  switch (event) {
3246  case BFA_TSKIM_SM_HCB:
3247  bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3248  bfa_tskim_free(tskim);
3249  break;
3250 
3251  case BFA_TSKIM_SM_CLEANUP:
3252  bfa_tskim_notify_comp(tskim);
3253  break;
3254 
3255  case BFA_TSKIM_SM_HWFAIL:
3256  break;
3257 
3258  default:
3259  bfa_sm_fault(tskim->bfa, event);
3260  }
3261 }
3262 
3263 static void
3264 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3265 {
3266  struct bfa_tskim_s *tskim = cbarg;
3267 
3268  if (!complete) {
3270  return;
3271  }
3272 
3273  bfa_stats(tskim->itnim, tm_success);
3274  bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3275 }
3276 
3277 static void
3278 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3279 {
3280  struct bfa_tskim_s *tskim = cbarg;
3281 
3282  if (!complete) {
3284  return;
3285  }
3286 
3287  bfa_stats(tskim->itnim, tm_failures);
3288  bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3290 }
3291 
3292 static bfa_boolean_t
3293 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3294 {
3295  switch (tskim->tm_cmnd) {
3296  case FCP_TM_TARGET_RESET:
3297  return BFA_TRUE;
3298 
3299  case FCP_TM_ABORT_TASK_SET:
3300  case FCP_TM_CLEAR_TASK_SET:
3301  case FCP_TM_LUN_RESET:
3302  case FCP_TM_CLEAR_ACA:
3303  return !memcmp(&tskim->lun, &lun, sizeof(lun));
3304 
3305  default:
3306  WARN_ON(1);
3307  }
3308 
3309  return BFA_FALSE;
3310 }
3311 
3312 /*
3313  * Gather affected IO requests and task management commands.
3314  */
3315 static void
3316 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3317 {
3318  struct bfa_itnim_s *itnim = tskim->itnim;
3319  struct bfa_ioim_s *ioim;
3320  struct list_head *qe, *qen;
3321  struct scsi_cmnd *cmnd;
3322  struct scsi_lun scsilun;
3323 
3324  INIT_LIST_HEAD(&tskim->io_q);
3325 
3326  /*
3327  * Gather any active IO requests first.
3328  */
3329  list_for_each_safe(qe, qen, &itnim->io_q) {
3330  ioim = (struct bfa_ioim_s *) qe;
3331  cmnd = (struct scsi_cmnd *) ioim->dio;
3332  int_to_scsilun(cmnd->device->lun, &scsilun);
3333  if (bfa_tskim_match_scope(tskim, scsilun)) {
3334  list_del(&ioim->qe);
3335  list_add_tail(&ioim->qe, &tskim->io_q);
3336  }
3337  }
3338 
3339  /*
3340  * Failback any pending IO requests immediately.
3341  */
3342  list_for_each_safe(qe, qen, &itnim->pending_q) {
3343  ioim = (struct bfa_ioim_s *) qe;
3344  cmnd = (struct scsi_cmnd *) ioim->dio;
3345  int_to_scsilun(cmnd->device->lun, &scsilun);
3346  if (bfa_tskim_match_scope(tskim, scsilun)) {
3347  list_del(&ioim->qe);
3348  list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3349  bfa_ioim_tov(ioim);
3350  }
3351  }
3352 }
3353 
3354 /*
3355  * IO cleanup completion
3356  */
3357 static void
3358 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3359 {
3360  struct bfa_tskim_s *tskim = tskim_cbarg;
3361 
3362  bfa_stats(tskim->itnim, tm_io_comps);
3364 }
3365 
3366 /*
3367  * Gather affected IO requests and task management commands.
3368  */
3369 static void
3370 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3371 {
3372  struct bfa_ioim_s *ioim;
3373  struct list_head *qe, *qen;
3374 
3375  bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3376 
3377  list_for_each_safe(qe, qen, &tskim->io_q) {
3378  ioim = (struct bfa_ioim_s *) qe;
3379  bfa_wc_up(&tskim->wc);
3380  bfa_ioim_cleanup_tm(ioim, tskim);
3381  }
3382 
3383  bfa_wc_wait(&tskim->wc);
3384 }
3385 
3386 /*
3387  * Send task management request to firmware.
3388  */
3389 static bfa_boolean_t
3390 bfa_tskim_send(struct bfa_tskim_s *tskim)
3391 {
3392  struct bfa_itnim_s *itnim = tskim->itnim;
3393  struct bfi_tskim_req_s *m;
3394 
3395  /*
3396  * check for room in queue to send request now
3397  */
3398  m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3399  if (!m)
3400  return BFA_FALSE;
3401 
3402  /*
3403  * build i/o request message next
3404  */
3406  bfa_fn_lpu(tskim->bfa));
3407 
3408  m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3409  m->itn_fhdl = tskim->itnim->rport->fw_handle;
3410  m->t_secs = tskim->tsecs;
3411  m->lun = tskim->lun;
3412  m->tm_flags = tskim->tm_cmnd;
3413 
3414  /*
3415  * queue I/O message to firmware
3416  */
3417  bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3418  return BFA_TRUE;
3419 }
3420 
3421 /*
3422  * Send abort request to cleanup an active TM to firmware.
3423  */
3424 static bfa_boolean_t
3425 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3426 {
3427  struct bfa_itnim_s *itnim = tskim->itnim;
3428  struct bfi_tskim_abortreq_s *m;
3429 
3430  /*
3431  * check for room in queue to send request now
3432  */
3433  m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3434  if (!m)
3435  return BFA_FALSE;
3436 
3437  /*
3438  * build i/o request message next
3439  */
3441  bfa_fn_lpu(tskim->bfa));
3442 
3443  m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3444 
3445  /*
3446  * queue I/O message to firmware
3447  */
3448  bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3449  return BFA_TRUE;
3450 }
3451 
3452 /*
3453  * Call to resume task management cmnd waiting for room in request queue.
3454  */
3455 static void
3456 bfa_tskim_qresume(void *cbarg)
3457 {
3458  struct bfa_tskim_s *tskim = cbarg;
3459 
3460  bfa_stats(tskim->itnim, tm_qresumes);
3462 }
3463 
3464 /*
3465  * Cleanup IOs associated with a task mangement command on IOC failures.
3466  */
3467 static void
3468 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3469 {
3470  struct bfa_ioim_s *ioim;
3471  struct list_head *qe, *qen;
3472 
3473  list_for_each_safe(qe, qen, &tskim->io_q) {
3474  ioim = (struct bfa_ioim_s *) qe;
3475  bfa_ioim_iocdisable(ioim);
3476  }
3477 }
3478 
3479 /*
3480  * Notification on completions from related ioim.
3481  */
3482 void
3484 {
3485  bfa_wc_down(&tskim->wc);
3486 }
3487 
3488 /*
3489  * Handle IOC h/w failure notification from itnim.
3490  */
3491 void
3493 {
3494  tskim->notify = BFA_FALSE;
3495  bfa_stats(tskim->itnim, tm_iocdowns);
3497 }
3498 
3499 /*
3500  * Cleanup TM command and associated IOs as part of ITNIM offline.
3501  */
3502 void
3504 {
3505  tskim->notify = BFA_TRUE;
3506  bfa_stats(tskim->itnim, tm_cleanups);
3508 }
3509 
3510 /*
3511  * Memory allocation and initialization.
3512  */
3513 void
3515 {
3516  struct bfa_tskim_s *tskim;
3517  struct bfa_fcp_mod_s *fcp = fcpim->fcp;
3518  u16 i;
3519 
3520  INIT_LIST_HEAD(&fcpim->tskim_free_q);
3521  INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3522 
3523  tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3524  fcpim->tskim_arr = tskim;
3525 
3526  for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3527  /*
3528  * initialize TSKIM
3529  */
3530  memset(tskim, 0, sizeof(struct bfa_tskim_s));
3531  tskim->tsk_tag = i;
3532  tskim->bfa = fcpim->bfa;
3533  tskim->fcpim = fcpim;
3534  tskim->notify = BFA_FALSE;
3535  bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3536  tskim);
3537  bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3538 
3539  list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3540  }
3541 
3542  bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3543 }
3544 
3545 void
3546 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3547 {
3548  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3549  struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3550  struct bfa_tskim_s *tskim;
3551  u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
3552 
3553  tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3554  WARN_ON(tskim->tsk_tag != tsk_tag);
3555 
3556  tskim->tsk_status = rsp->tsk_status;
3557 
3558  /*
3559  * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3560  * requests. All other statuses are for normal completions.
3561  */
3562  if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3563  bfa_stats(tskim->itnim, tm_cleanup_comps);
3565  } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
3567  } else {
3568  bfa_stats(tskim->itnim, tm_fw_rsps);
3570  }
3571 }
3572 
3573 
3574 struct bfa_tskim_s *
3575 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3576 {
3577  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3578  struct bfa_tskim_s *tskim;
3579 
3580  bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3581 
3582  if (tskim)
3583  tskim->dtsk = dtsk;
3584 
3585  return tskim;
3586 }
3587 
3588 void
3590 {
3591  WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3592  list_del(&tskim->qe);
3593  list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3594 }
3595 
3596 /*
3597  * Start a task management command.
3598  *
3599  * @param[in] tskim BFA task management command instance
3600  * @param[in] itnim i-t nexus for the task management command
3601  * @param[in] lun lun, if applicable
3602  * @param[in] tm_cmnd Task management command code.
3603  * @param[in] t_secs Timeout in seconds
3604  *
3605  * @return None.
3606  */
3607 void
3608 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3609  struct scsi_lun lun,
3610  enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3611 {
3612  tskim->itnim = itnim;
3613  tskim->lun = lun;
3614  tskim->tm_cmnd = tm_cmnd;
3615  tskim->tsecs = tsecs;
3616  tskim->notify = BFA_FALSE;
3617  bfa_stats(itnim, tm_cmnds);
3618 
3619  list_add_tail(&tskim->qe, &itnim->tsk_q);
3621 }
3622 
3623 void
3624 bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3625 {
3626  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3627  struct list_head *qe;
3628  int i;
3629 
3630  for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3631  bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3632  list_add_tail(qe, &fcpim->tskim_unused_q);
3633  }
3634 }
3635 
3636 /* BFA FCP module - parent module for fcpim */
3637 
3638 BFA_MODULE(fcp);
3639 
3640 static void
3641 bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3642  struct bfa_s *bfa)
3643 {
3644  struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3645  struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3646  struct bfa_mem_dma_s *seg_ptr;
3647  u16 nsegs, idx, per_seg_ios, num_io_req;
3648  u32 km_len = 0;
3649 
3650  /*
3651  * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3652  * So if the values are non zero, adjust them appropriately.
3653  */
3654  if (cfg->fwcfg.num_ioim_reqs &&
3655  cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3656  cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3657  else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3658  cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3659 
3660  if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3661  cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3662 
3663  num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3664  if (num_io_req > BFA_IO_MAX) {
3665  if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3666  cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3667  cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3668  } else if (cfg->fwcfg.num_fwtio_reqs)
3669  cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3670  else
3671  cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3672  }
3673 
3674  bfa_fcpim_meminfo(cfg, &km_len);
3675 
3676  num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3677  km_len += num_io_req * sizeof(struct bfa_iotag_s);
3678  km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3679 
3680  /* dma memory */
3681  nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3682  per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3683 
3684  bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3685  if (num_io_req >= per_seg_ios) {
3686  num_io_req -= per_seg_ios;
3687  bfa_mem_dma_setup(minfo, seg_ptr,
3688  per_seg_ios * BFI_IOIM_SNSLEN);
3689  } else
3690  bfa_mem_dma_setup(minfo, seg_ptr,
3691  num_io_req * BFI_IOIM_SNSLEN);
3692  }
3693 
3694  /* kva memory */
3695  bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3696 }
3697 
3698 static void
3699 bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3700  struct bfa_pcidev_s *pcidev)
3701 {
3702  struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3703  struct bfa_mem_dma_s *seg_ptr;
3704  u16 idx, nsegs, num_io_req;
3705 
3706  fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3707  fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3708  fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
3709  fcp->num_itns = cfg->fwcfg.num_rports;
3710  fcp->bfa = bfa;
3711 
3712  /*
3713  * Setup the pool of snsbase addr's, that is passed to fw as
3714  * part of bfi_iocfc_cfg_s.
3715  */
3716  num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3717  nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3718 
3719  bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3720 
3721  if (!bfa_mem_dma_virt(seg_ptr))
3722  break;
3723 
3724  fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3725  fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3726  bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3727  }
3728 
3729  fcp->throttle_update_required = 1;
3730  bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3731 
3732  bfa_iotag_attach(fcp);
3733 
3734  fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3735  bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3736  (fcp->num_itns * sizeof(struct bfa_itn_s));
3737  memset(fcp->itn_arr, 0,
3738  (fcp->num_itns * sizeof(struct bfa_itn_s)));
3739 }
3740 
3741 static void
3742 bfa_fcp_detach(struct bfa_s *bfa)
3743 {
3744 }
3745 
3746 static void
3747 bfa_fcp_start(struct bfa_s *bfa)
3748 {
3749  struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3750 
3751  /*
3752  * bfa_init() with flash read is complete. now invalidate the stale
3753  * content of lun mask like unit attention, rp tag and lp tag.
3754  */
3755  bfa_ioim_lm_init(fcp->bfa);
3756 }
3757 
3758 static void
3759 bfa_fcp_stop(struct bfa_s *bfa)
3760 {
3761 }
3762 
3763 static void
3764 bfa_fcp_iocdisable(struct bfa_s *bfa)
3765 {
3766  struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3767 
3768  bfa_fcpim_iocdisable(fcp);
3769 }
3770 
3771 void
3772 bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
3773 {
3774  struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
3775  struct list_head *qe;
3776  int i;
3777 
3778  /* Update io throttle value only once during driver load time */
3779  if (!mod->throttle_update_required)
3780  return;
3781 
3782  for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3783  bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3784  list_add_tail(qe, &mod->iotag_unused_q);
3785  }
3786 
3787  if (mod->num_ioim_reqs != num_ioim_fw) {
3788  bfa_trc(bfa, mod->num_ioim_reqs);
3789  bfa_trc(bfa, num_ioim_fw);
3790  }
3791 
3792  mod->max_ioim_reqs = max_ioim_fw;
3793  mod->num_ioim_reqs = num_ioim_fw;
3794  mod->throttle_update_required = 0;
3795 }
3796 
3797 void
3798 bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3799  void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3800 {
3801  struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3802  struct bfa_itn_s *itn;
3803 
3804  itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3805  itn->isr = isr;
3806 }
3807 
3808 /*
3809  * Itn interrupt processing.
3810  */
3811 void
3812 bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3813 {
3814  struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3815  union bfi_itn_i2h_msg_u msg;
3816  struct bfa_itn_s *itn;
3817 
3818  msg.msg = m;
3819  itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3820 
3821  if (itn->isr)
3822  itn->isr(bfa, m);
3823  else
3824  WARN_ON(1);
3825 }
3826 
3827 void
3829 {
3830  struct bfa_iotag_s *iotag;
3831  u16 num_io_req, i;
3832 
3833  iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3834  fcp->iotag_arr = iotag;
3835 
3836  INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3837  INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3838  INIT_LIST_HEAD(&fcp->iotag_unused_q);
3839 
3840  num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3841  for (i = 0; i < num_io_req; i++, iotag++) {
3842  memset(iotag, 0, sizeof(struct bfa_iotag_s));
3843  iotag->tag = i;
3844  if (i < fcp->num_ioim_reqs)
3845  list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3846  else
3847  list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3848  }
3849 
3850  bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3851 }
3852 
3853 
3860 u16
3861 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
3862 {
3863  u16 tmp;
3864  struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3865 
3866  /*
3867  * If throttle value from flash is already in effect after driver is
3868  * loaded then until next load, always return current value instead
3869  * of actual flash value
3870  */
3871  if (!fcp->throttle_update_required)
3872  return (u16)fcp->num_ioim_reqs;
3873 
3874  tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
3875  if (!tmp || (tmp > drv_cfg_param))
3876  tmp = drv_cfg_param;
3877 
3878  return tmp;
3879 }
3880 
3883 {
3884  if (!bfa_dconf_get_min_cfg(bfa)) {
3885  BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
3886  BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
3887  return BFA_STATUS_OK;
3888  }
3889 
3890  return BFA_STATUS_FAILED;
3891 }
3892 
3893 u16
3895 {
3896  struct bfa_throttle_cfg_s *throttle_cfg =
3897  &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
3898 
3899  return ((!bfa_dconf_get_min_cfg(bfa)) ?
3900  ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
3901 }
3902 
3905 {
3906  /* in min cfg no commands should run. */
3907  if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3908  (!bfa_dconf_read_data_valid(bfa)))
3909  return BFA_STATUS_FAILED;
3910 
3911  bfa_fcpim_write_throttle(bfa, value);
3912 
3913  return bfa_dconf_update(bfa);
3914 }
3915 
3917 bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
3918 {
3919  struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3920  struct bfa_defs_fcpim_throttle_s throttle;
3921 
3922  if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3923  (!bfa_dconf_read_data_valid(bfa)))
3924  return BFA_STATUS_FAILED;
3925 
3926  memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
3927 
3928  throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
3929  throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
3930  if (!throttle.cfg_value)
3931  throttle.cfg_value = throttle.cur_value;
3932  throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
3933  memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
3934 
3935  return BFA_STATUS_OK;
3936 }