Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bfad_bsg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * General Public License for more details.
16  */
17 
18 #include <linux/uaccess.h>
19 #include "bfad_drv.h"
20 #include "bfad_im.h"
21 #include "bfad_bsg.h"
22 
23 BFA_TRC_FILE(LDRV, BSG);
24 
25 int
26 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
27 {
28  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
29  int rc = 0;
30  unsigned long flags;
31 
32  spin_lock_irqsave(&bfad->bfad_lock, flags);
33  /* If IOC is not in disabled state - return */
34  if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36  iocmd->status = BFA_STATUS_OK;
37  return rc;
38  }
39 
40  init_completion(&bfad->enable_comp);
41  bfa_iocfc_enable(&bfad->bfa);
42  iocmd->status = BFA_STATUS_OK;
43  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
45 
46  return rc;
47 }
48 
49 int
50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
51 {
52  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
53  int rc = 0;
54  unsigned long flags;
55 
56  spin_lock_irqsave(&bfad->bfad_lock, flags);
57  if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
58  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59  iocmd->status = BFA_STATUS_OK;
60  return rc;
61  }
62 
63  if (bfad->disable_active) {
64  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
65  return -EBUSY;
66  }
67 
68  bfad->disable_active = BFA_TRUE;
69  init_completion(&bfad->disable_comp);
70  bfa_iocfc_disable(&bfad->bfa);
71  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
72 
74  bfad->disable_active = BFA_FALSE;
75  iocmd->status = BFA_STATUS_OK;
76 
77  return rc;
78 }
79 
80 static int
81 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
82 {
83  int i;
84  struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
85  struct bfad_im_port_s *im_port;
86  struct bfa_port_attr_s pattr;
87  unsigned long flags;
88 
89  spin_lock_irqsave(&bfad->bfad_lock, flags);
90  bfa_fcport_get_attr(&bfad->bfa, &pattr);
91  iocmd->nwwn = pattr.nwwn;
92  iocmd->pwwn = pattr.pwwn;
93  iocmd->ioc_type = bfa_get_type(&bfad->bfa);
94  iocmd->mac = bfa_get_mac(&bfad->bfa);
95  iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
97  iocmd->factorynwwn = pattr.factorynwwn;
98  iocmd->factorypwwn = pattr.factorypwwn;
99  iocmd->bfad_num = bfad->inst_no;
100  im_port = bfad->pport.im_port;
101  iocmd->host = im_port->shost->host_no;
102  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
103 
104  strcpy(iocmd->name, bfad->adapter_name);
105  strcpy(iocmd->port_name, bfad->port_name);
106  strcpy(iocmd->hwpath, bfad->pci_name);
107 
108  /* set adapter hw path */
109  strcpy(iocmd->adapter_hwpath, bfad->pci_name);
110  for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
111  ;
112  for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
113  ;
114  iocmd->adapter_hwpath[i] = '\0';
115  iocmd->status = BFA_STATUS_OK;
116  return 0;
117 }
118 
119 static int
120 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
121 {
122  struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
123  unsigned long flags;
124 
125  spin_lock_irqsave(&bfad->bfad_lock, flags);
126  bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
127  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
128 
129  /* fill in driver attr info */
130  strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
131  strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
133  strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
134  iocmd->ioc_attr.adapter_attr.fw_ver);
135  strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
136  iocmd->ioc_attr.adapter_attr.optrom_ver);
137 
138  /* copy chip rev info first otherwise it will be overwritten */
139  memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
140  sizeof(bfad->pci_attr.chip_rev));
141  memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
142  sizeof(struct bfa_ioc_pci_attr_s));
143 
144  iocmd->status = BFA_STATUS_OK;
145  return 0;
146 }
147 
148 int
149 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
150 {
151  struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
152 
153  bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
154  iocmd->status = BFA_STATUS_OK;
155  return 0;
156 }
157 
158 int
159 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
160  unsigned int payload_len)
161 {
162  struct bfa_bsg_ioc_fwstats_s *iocmd =
163  (struct bfa_bsg_ioc_fwstats_s *)cmd;
164  void *iocmd_bufptr;
165  unsigned long flags;
166 
167  if (bfad_chk_iocmd_sz(payload_len,
168  sizeof(struct bfa_bsg_ioc_fwstats_s),
169  sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
171  goto out;
172  }
173 
174  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
175  spin_lock_irqsave(&bfad->bfad_lock, flags);
176  iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
177  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
178 
179  if (iocmd->status != BFA_STATUS_OK) {
180  bfa_trc(bfad, iocmd->status);
181  goto out;
182  }
183 out:
184  bfa_trc(bfad, 0x6666);
185  return 0;
186 }
187 
188 int
189 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
190 {
191  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
192  unsigned long flags;
193 
194  if (v_cmd == IOCMD_IOC_RESET_STATS) {
195  bfa_ioc_clear_stats(&bfad->bfa);
196  iocmd->status = BFA_STATUS_OK;
197  } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
198  spin_lock_irqsave(&bfad->bfad_lock, flags);
199  iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
200  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
201  }
202 
203  return 0;
204 }
205 
206 int
207 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
208 {
209  struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
210 
211  if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
212  strcpy(bfad->adapter_name, iocmd->name);
213  else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
214  strcpy(bfad->port_name, iocmd->name);
215 
216  iocmd->status = BFA_STATUS_OK;
217  return 0;
218 }
219 
220 int
221 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
222 {
223  struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
224 
225  iocmd->status = BFA_STATUS_OK;
226  bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
227 
228  return 0;
229 }
230 
231 int
232 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
233 {
234  struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
235  unsigned long flags;
236 
237  spin_lock_irqsave(&bfad->bfad_lock, flags);
238  iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
239  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
240 
241  return 0;
242 }
243 
244 int
245 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
246 {
247  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
248  struct bfad_hal_comp fcomp;
249  unsigned long flags;
250 
251  init_completion(&fcomp.comp);
252  spin_lock_irqsave(&bfad->bfad_lock, flags);
253  iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
254  bfad_hcb_comp, &fcomp);
255  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
256  if (iocmd->status != BFA_STATUS_OK) {
257  bfa_trc(bfad, iocmd->status);
258  return 0;
259  }
260  wait_for_completion(&fcomp.comp);
261  iocmd->status = fcomp.status;
262  return 0;
263 }
264 
265 int
266 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
267 {
268  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
269  struct bfad_hal_comp fcomp;
270  unsigned long flags;
271 
272  init_completion(&fcomp.comp);
273  spin_lock_irqsave(&bfad->bfad_lock, flags);
274  iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
275  bfad_hcb_comp, &fcomp);
276  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
277 
278  if (iocmd->status != BFA_STATUS_OK) {
279  bfa_trc(bfad, iocmd->status);
280  return 0;
281  }
282  wait_for_completion(&fcomp.comp);
283  iocmd->status = fcomp.status;
284  return 0;
285 }
286 
287 static int
288 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
289 {
290  struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
291  struct bfa_lport_attr_s port_attr;
292  unsigned long flags;
293 
294  spin_lock_irqsave(&bfad->bfad_lock, flags);
295  bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
296  bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
297  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
298 
299  if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
300  iocmd->attr.pid = port_attr.pid;
301  else
302  iocmd->attr.pid = 0;
303 
304  iocmd->attr.port_type = port_attr.port_type;
305  iocmd->attr.loopback = port_attr.loopback;
306  iocmd->attr.authfail = port_attr.authfail;
307  strncpy(iocmd->attr.port_symname.symname,
308  port_attr.port_cfg.sym_name.symname,
309  sizeof(port_attr.port_cfg.sym_name.symname));
310 
311  iocmd->status = BFA_STATUS_OK;
312  return 0;
313 }
314 
315 int
316 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
317  unsigned int payload_len)
318 {
319  struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
320  struct bfad_hal_comp fcomp;
321  void *iocmd_bufptr;
322  unsigned long flags;
323 
324  if (bfad_chk_iocmd_sz(payload_len,
325  sizeof(struct bfa_bsg_port_stats_s),
326  sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
328  return 0;
329  }
330 
331  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
332 
333  init_completion(&fcomp.comp);
334  spin_lock_irqsave(&bfad->bfad_lock, flags);
335  iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
336  iocmd_bufptr, bfad_hcb_comp, &fcomp);
337  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
338  if (iocmd->status != BFA_STATUS_OK) {
339  bfa_trc(bfad, iocmd->status);
340  goto out;
341  }
342 
343  wait_for_completion(&fcomp.comp);
344  iocmd->status = fcomp.status;
345 out:
346  return 0;
347 }
348 
349 int
350 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
351 {
352  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
353  struct bfad_hal_comp fcomp;
354  unsigned long flags;
355 
356  init_completion(&fcomp.comp);
357  spin_lock_irqsave(&bfad->bfad_lock, flags);
358  iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
359  bfad_hcb_comp, &fcomp);
360  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
361  if (iocmd->status != BFA_STATUS_OK) {
362  bfa_trc(bfad, iocmd->status);
363  return 0;
364  }
365  wait_for_completion(&fcomp.comp);
366  iocmd->status = fcomp.status;
367  return 0;
368 }
369 
370 int
371 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
372 {
373  struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
374  unsigned long flags;
375 
376  spin_lock_irqsave(&bfad->bfad_lock, flags);
377  if (v_cmd == IOCMD_PORT_CFG_TOPO)
378  cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
379  else if (v_cmd == IOCMD_PORT_CFG_SPEED)
380  cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
381  else if (v_cmd == IOCMD_PORT_CFG_ALPA)
382  cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
383  else if (v_cmd == IOCMD_PORT_CLR_ALPA)
384  cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
385  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
386 
387  return 0;
388 }
389 
390 int
391 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
392 {
393  struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
394  (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
395  unsigned long flags;
396 
397  spin_lock_irqsave(&bfad->bfad_lock, flags);
398  iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
399  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
400 
401  return 0;
402 }
403 
404 int
405 bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
406 {
407  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
408  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
409  unsigned long flags;
410 
411  spin_lock_irqsave(&bfad->bfad_lock, flags);
412  if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
413  if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
414  fcport->cfg.bb_scn_state = BFA_TRUE;
415  else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
416  fcport->cfg.bb_scn_state = BFA_FALSE;
417  }
418  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
419 
420  iocmd->status = BFA_STATUS_OK;
421  return 0;
422 }
423 
424 static int
425 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
426 {
427  struct bfa_fcs_lport_s *fcs_port;
428  struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
429  unsigned long flags;
430 
431  spin_lock_irqsave(&bfad->bfad_lock, flags);
432  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
433  iocmd->vf_id, iocmd->pwwn);
434  if (fcs_port == NULL) {
435  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
437  goto out;
438  }
439 
440  bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
441  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
442  iocmd->status = BFA_STATUS_OK;
443 out:
444  return 0;
445 }
446 
447 int
448 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
449 {
450  struct bfa_fcs_lport_s *fcs_port;
451  struct bfa_bsg_lport_stats_s *iocmd =
452  (struct bfa_bsg_lport_stats_s *)cmd;
453  unsigned long flags;
454 
455  spin_lock_irqsave(&bfad->bfad_lock, flags);
456  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
457  iocmd->vf_id, iocmd->pwwn);
458  if (fcs_port == NULL) {
459  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
461  goto out;
462  }
463 
464  bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
465  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
466  iocmd->status = BFA_STATUS_OK;
467 out:
468  return 0;
469 }
470 
471 int
472 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
473 {
474  struct bfa_fcs_lport_s *fcs_port;
475  struct bfa_bsg_reset_stats_s *iocmd =
476  (struct bfa_bsg_reset_stats_s *)cmd;
477  struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
478  struct list_head *qe, *qen;
479  struct bfa_itnim_s *itnim;
480  unsigned long flags;
481 
482  spin_lock_irqsave(&bfad->bfad_lock, flags);
483  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
484  iocmd->vf_id, iocmd->vpwwn);
485  if (fcs_port == NULL) {
486  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
488  goto out;
489  }
490 
491  bfa_fcs_lport_clear_stats(fcs_port);
492  /* clear IO stats from all active itnims */
493  list_for_each_safe(qe, qen, &fcpim->itnim_q) {
494  itnim = (struct bfa_itnim_s *) qe;
495  if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
496  continue;
497  bfa_itnim_clear_stats(itnim);
498  }
499  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
500  iocmd->status = BFA_STATUS_OK;
501 out:
502  return 0;
503 }
504 
505 int
506 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
507 {
508  struct bfa_fcs_lport_s *fcs_port;
509  struct bfa_bsg_lport_iostats_s *iocmd =
510  (struct bfa_bsg_lport_iostats_s *)cmd;
511  unsigned long flags;
512 
513  spin_lock_irqsave(&bfad->bfad_lock, flags);
514  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
515  iocmd->vf_id, iocmd->pwwn);
516  if (fcs_port == NULL) {
517  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
519  goto out;
520  }
521 
522  bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
523  fcs_port->lp_tag);
524  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
525  iocmd->status = BFA_STATUS_OK;
526 out:
527  return 0;
528 }
529 
530 int
531 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
532  unsigned int payload_len)
533 {
534  struct bfa_bsg_lport_get_rports_s *iocmd =
535  (struct bfa_bsg_lport_get_rports_s *)cmd;
536  struct bfa_fcs_lport_s *fcs_port;
537  unsigned long flags;
538  void *iocmd_bufptr;
539 
540  if (iocmd->nrports == 0)
541  return -EINVAL;
542 
543  if (bfad_chk_iocmd_sz(payload_len,
544  sizeof(struct bfa_bsg_lport_get_rports_s),
545  sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
546  != BFA_STATUS_OK) {
548  return 0;
549  }
550 
551  iocmd_bufptr = (char *)iocmd +
552  sizeof(struct bfa_bsg_lport_get_rports_s);
553  spin_lock_irqsave(&bfad->bfad_lock, flags);
554  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
555  iocmd->vf_id, iocmd->pwwn);
556  if (fcs_port == NULL) {
557  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
558  bfa_trc(bfad, 0);
560  goto out;
561  }
562 
564  (struct bfa_rport_qualifier_s *)iocmd_bufptr,
565  &iocmd->nrports);
566  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
567  iocmd->status = BFA_STATUS_OK;
568 out:
569  return 0;
570 }
571 
572 int
573 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
574 {
575  struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
576  struct bfa_fcs_lport_s *fcs_port;
577  struct bfa_fcs_rport_s *fcs_rport;
578  unsigned long flags;
579 
580  spin_lock_irqsave(&bfad->bfad_lock, flags);
581  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
582  iocmd->vf_id, iocmd->pwwn);
583  if (fcs_port == NULL) {
584  bfa_trc(bfad, 0);
585  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
587  goto out;
588  }
589 
590  if (iocmd->pid)
591  fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
592  iocmd->rpwwn, iocmd->pid);
593  else
594  fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
595  if (fcs_rport == NULL) {
596  bfa_trc(bfad, 0);
597  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
599  goto out;
600  }
601 
602  bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
603  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
604  iocmd->status = BFA_STATUS_OK;
605 out:
606  return 0;
607 }
608 
609 static int
610 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
611 {
612  struct bfa_bsg_rport_scsi_addr_s *iocmd =
613  (struct bfa_bsg_rport_scsi_addr_s *)cmd;
614  struct bfa_fcs_lport_s *fcs_port;
615  struct bfa_fcs_itnim_s *fcs_itnim;
616  struct bfad_itnim_s *drv_itnim;
617  unsigned long flags;
618 
619  spin_lock_irqsave(&bfad->bfad_lock, flags);
620  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
621  iocmd->vf_id, iocmd->pwwn);
622  if (fcs_port == NULL) {
623  bfa_trc(bfad, 0);
624  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
626  goto out;
627  }
628 
629  fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
630  if (fcs_itnim == NULL) {
631  bfa_trc(bfad, 0);
632  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
634  goto out;
635  }
636 
637  drv_itnim = fcs_itnim->itnim_drv;
638 
639  if (drv_itnim && drv_itnim->im_port)
640  iocmd->host = drv_itnim->im_port->shost->host_no;
641  else {
642  bfa_trc(bfad, 0);
643  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
645  goto out;
646  }
647 
648  iocmd->target = drv_itnim->scsi_tgt_id;
649  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
650 
651  iocmd->bus = 0;
652  iocmd->lun = 0;
653  iocmd->status = BFA_STATUS_OK;
654 out:
655  return 0;
656 }
657 
658 int
659 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
660 {
661  struct bfa_bsg_rport_stats_s *iocmd =
662  (struct bfa_bsg_rport_stats_s *)cmd;
663  struct bfa_fcs_lport_s *fcs_port;
664  struct bfa_fcs_rport_s *fcs_rport;
665  unsigned long flags;
666 
667  spin_lock_irqsave(&bfad->bfad_lock, flags);
668  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
669  iocmd->vf_id, iocmd->pwwn);
670  if (fcs_port == NULL) {
671  bfa_trc(bfad, 0);
672  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
674  goto out;
675  }
676 
677  fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
678  if (fcs_rport == NULL) {
679  bfa_trc(bfad, 0);
680  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
682  goto out;
683  }
684 
685  memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
686  sizeof(struct bfa_rport_stats_s));
687  if (bfa_fcs_rport_get_halrport(fcs_rport)) {
688  memcpy((void *)&iocmd->stats.hal_stats,
689  (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
690  sizeof(struct bfa_rport_hal_stats_s));
691  }
692 
693  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
694  iocmd->status = BFA_STATUS_OK;
695 out:
696  return 0;
697 }
698 
699 int
700 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
701 {
702  struct bfa_bsg_rport_reset_stats_s *iocmd =
703  (struct bfa_bsg_rport_reset_stats_s *)cmd;
704  struct bfa_fcs_lport_s *fcs_port;
705  struct bfa_fcs_rport_s *fcs_rport;
706  struct bfa_rport_s *rport;
707  unsigned long flags;
708 
709  spin_lock_irqsave(&bfad->bfad_lock, flags);
710  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
711  iocmd->vf_id, iocmd->pwwn);
712  if (fcs_port == NULL) {
713  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
715  goto out;
716  }
717 
718  fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
719  if (fcs_rport == NULL) {
720  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
722  goto out;
723  }
724 
725  memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
726  rport = bfa_fcs_rport_get_halrport(fcs_rport);
727  if (rport)
728  memset(&rport->stats, 0, sizeof(rport->stats));
729  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
730  iocmd->status = BFA_STATUS_OK;
731 out:
732  return 0;
733 }
734 
735 int
736 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
737 {
738  struct bfa_bsg_rport_set_speed_s *iocmd =
739  (struct bfa_bsg_rport_set_speed_s *)cmd;
740  struct bfa_fcs_lport_s *fcs_port;
741  struct bfa_fcs_rport_s *fcs_rport;
742  unsigned long flags;
743 
744  spin_lock_irqsave(&bfad->bfad_lock, flags);
745  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
746  iocmd->vf_id, iocmd->pwwn);
747  if (fcs_port == NULL) {
748  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
750  goto out;
751  }
752 
753  fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
754  if (fcs_rport == NULL) {
755  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
757  goto out;
758  }
759 
760  fcs_rport->rpf.assigned_speed = iocmd->speed;
761  /* Set this speed in f/w only if the RPSC speed is not available */
762  if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
763  if (fcs_rport->bfa_rport)
764  bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
765  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
766  iocmd->status = BFA_STATUS_OK;
767 out:
768  return 0;
769 }
770 
771 int
772 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
773 {
774  struct bfa_fcs_vport_s *fcs_vport;
775  struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
776  unsigned long flags;
777 
778  spin_lock_irqsave(&bfad->bfad_lock, flags);
779  fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
780  iocmd->vf_id, iocmd->vpwwn);
781  if (fcs_vport == NULL) {
782  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
784  goto out;
785  }
786 
787  bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
788  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
789  iocmd->status = BFA_STATUS_OK;
790 out:
791  return 0;
792 }
793 
794 int
795 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
796 {
797  struct bfa_fcs_vport_s *fcs_vport;
798  struct bfa_bsg_vport_stats_s *iocmd =
799  (struct bfa_bsg_vport_stats_s *)cmd;
800  unsigned long flags;
801 
802  spin_lock_irqsave(&bfad->bfad_lock, flags);
803  fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
804  iocmd->vf_id, iocmd->vpwwn);
805  if (fcs_vport == NULL) {
806  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
808  goto out;
809  }
810 
811  memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
812  sizeof(struct bfa_vport_stats_s));
813  memcpy((void *)&iocmd->vport_stats.port_stats,
814  (void *)&fcs_vport->lport.stats,
815  sizeof(struct bfa_lport_stats_s));
816  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
817  iocmd->status = BFA_STATUS_OK;
818 out:
819  return 0;
820 }
821 
822 int
823 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
824 {
825  struct bfa_fcs_vport_s *fcs_vport;
826  struct bfa_bsg_reset_stats_s *iocmd =
827  (struct bfa_bsg_reset_stats_s *)cmd;
828  unsigned long flags;
829 
830  spin_lock_irqsave(&bfad->bfad_lock, flags);
831  fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
832  iocmd->vf_id, iocmd->vpwwn);
833  if (fcs_vport == NULL) {
834  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
836  goto out;
837  }
838 
839  memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
840  memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
841  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
842  iocmd->status = BFA_STATUS_OK;
843 out:
844  return 0;
845 }
846 
847 static int
848 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
849  unsigned int payload_len)
850 {
851  struct bfa_bsg_fabric_get_lports_s *iocmd =
852  (struct bfa_bsg_fabric_get_lports_s *)cmd;
853  bfa_fcs_vf_t *fcs_vf;
854  uint32_t nports = iocmd->nports;
855  unsigned long flags;
856  void *iocmd_bufptr;
857 
858  if (nports == 0) {
859  iocmd->status = BFA_STATUS_EINVAL;
860  goto out;
861  }
862 
863  if (bfad_chk_iocmd_sz(payload_len,
864  sizeof(struct bfa_bsg_fabric_get_lports_s),
865  sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
867  goto out;
868  }
869 
870  iocmd_bufptr = (char *)iocmd +
871  sizeof(struct bfa_bsg_fabric_get_lports_s);
872 
873  spin_lock_irqsave(&bfad->bfad_lock, flags);
874  fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
875  if (fcs_vf == NULL) {
876  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
878  goto out;
879  }
880  bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
881  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
882 
883  iocmd->nports = nports;
884  iocmd->status = BFA_STATUS_OK;
885 out:
886  return 0;
887 }
888 
889 int
890 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
891 {
892  struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
893  unsigned long flags;
894 
895  spin_lock_irqsave(&bfad->bfad_lock, flags);
896  iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
897  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
898 
899  return 0;
900 }
901 
902 int
903 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
904 {
905  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
906  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
907  unsigned long flags;
908 
909  spin_lock_irqsave(&bfad->bfad_lock, flags);
910 
911  if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
912  (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
914  else {
915  if (cmd == IOCMD_RATELIM_ENABLE)
916  fcport->cfg.ratelimit = BFA_TRUE;
917  else if (cmd == IOCMD_RATELIM_DISABLE)
918  fcport->cfg.ratelimit = BFA_FALSE;
919 
920  if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
921  fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
922 
923  iocmd->status = BFA_STATUS_OK;
924  }
925 
926  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
927 
928  return 0;
929 }
930 
931 int
932 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
933 {
934  struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
935  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
936  unsigned long flags;
937 
938  spin_lock_irqsave(&bfad->bfad_lock, flags);
939 
940  /* Auto and speeds greater than the supported speed, are invalid */
941  if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
942  (iocmd->speed > fcport->speed_sup)) {
944  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
945  return 0;
946  }
947 
948  if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
949  (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
951  else {
952  fcport->cfg.trl_def_speed = iocmd->speed;
953  iocmd->status = BFA_STATUS_OK;
954  }
955  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
956 
957  return 0;
958 }
959 
960 int
961 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
962 {
963  struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
964  unsigned long flags;
965 
966  spin_lock_irqsave(&bfad->bfad_lock, flags);
967  bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
968  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
969  iocmd->status = BFA_STATUS_OK;
970  return 0;
971 }
972 
973 int
974 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
975 {
976  struct bfa_bsg_fcpim_modstats_s *iocmd =
977  (struct bfa_bsg_fcpim_modstats_s *)cmd;
978  struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
979  struct list_head *qe, *qen;
980  struct bfa_itnim_s *itnim;
981  unsigned long flags;
982 
983  spin_lock_irqsave(&bfad->bfad_lock, flags);
984  /* accumulate IO stats from itnim */
985  memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
986  list_for_each_safe(qe, qen, &fcpim->itnim_q) {
987  itnim = (struct bfa_itnim_s *) qe;
988  bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
989  }
990  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
991  iocmd->status = BFA_STATUS_OK;
992  return 0;
993 }
994 
995 int
996 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
997 {
998  struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
999  (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
1000  struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1001  struct list_head *qe, *qen;
1002  struct bfa_itnim_s *itnim;
1003  unsigned long flags;
1004 
1005  spin_lock_irqsave(&bfad->bfad_lock, flags);
1006  list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1007  itnim = (struct bfa_itnim_s *) qe;
1008  bfa_itnim_clear_stats(itnim);
1009  }
1010  memset(&fcpim->del_itn_stats, 0,
1011  sizeof(struct bfa_fcpim_del_itn_stats_s));
1012  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1013  iocmd->status = BFA_STATUS_OK;
1014  return 0;
1015 }
1016 
1017 int
1019 {
1020  struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
1021  (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
1022  struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1023  unsigned long flags;
1024 
1025  spin_lock_irqsave(&bfad->bfad_lock, flags);
1026  memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
1027  sizeof(struct bfa_fcpim_del_itn_stats_s));
1028  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1029 
1030  iocmd->status = BFA_STATUS_OK;
1031  return 0;
1032 }
1033 
1034 static int
1035 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
1036 {
1037  struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
1038  struct bfa_fcs_lport_s *fcs_port;
1039  unsigned long flags;
1040 
1041  spin_lock_irqsave(&bfad->bfad_lock, flags);
1042  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1043  iocmd->vf_id, iocmd->lpwwn);
1044  if (!fcs_port)
1046  else
1047  iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
1048  iocmd->rpwwn, &iocmd->attr);
1049  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1050  return 0;
1051 }
1052 
1053 static int
1054 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
1055 {
1056  struct bfa_bsg_itnim_iostats_s *iocmd =
1057  (struct bfa_bsg_itnim_iostats_s *)cmd;
1058  struct bfa_fcs_lport_s *fcs_port;
1059  struct bfa_fcs_itnim_s *itnim;
1060  unsigned long flags;
1061 
1062  spin_lock_irqsave(&bfad->bfad_lock, flags);
1063  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1064  iocmd->vf_id, iocmd->lpwwn);
1065  if (!fcs_port) {
1067  bfa_trc(bfad, 0);
1068  } else {
1069  itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1070  if (itnim == NULL)
1072  else {
1073  iocmd->status = BFA_STATUS_OK;
1074  if (bfa_fcs_itnim_get_halitn(itnim))
1075  memcpy((void *)&iocmd->iostats, (void *)
1076  &(bfa_fcs_itnim_get_halitn(itnim)->stats),
1077  sizeof(struct bfa_itnim_iostats_s));
1078  }
1079  }
1080  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1081  return 0;
1082 }
1083 
1084 static int
1085 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1086 {
1087  struct bfa_bsg_rport_reset_stats_s *iocmd =
1088  (struct bfa_bsg_rport_reset_stats_s *)cmd;
1089  struct bfa_fcs_lport_s *fcs_port;
1090  struct bfa_fcs_itnim_s *itnim;
1091  unsigned long flags;
1092 
1093  spin_lock_irqsave(&bfad->bfad_lock, flags);
1094  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1095  iocmd->vf_id, iocmd->pwwn);
1096  if (!fcs_port)
1098  else {
1099  itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1100  if (itnim == NULL)
1102  else {
1103  iocmd->status = BFA_STATUS_OK;
1104  bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1105  bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1106  }
1107  }
1108  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1109 
1110  return 0;
1111 }
1112 
1113 static int
1114 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
1115 {
1116  struct bfa_bsg_itnim_itnstats_s *iocmd =
1117  (struct bfa_bsg_itnim_itnstats_s *)cmd;
1118  struct bfa_fcs_lport_s *fcs_port;
1119  struct bfa_fcs_itnim_s *itnim;
1120  unsigned long flags;
1121 
1122  spin_lock_irqsave(&bfad->bfad_lock, flags);
1123  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1124  iocmd->vf_id, iocmd->lpwwn);
1125  if (!fcs_port) {
1127  bfa_trc(bfad, 0);
1128  } else {
1129  itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1130  if (itnim == NULL)
1132  else {
1133  iocmd->status = BFA_STATUS_OK;
1134  bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
1135  &iocmd->itnstats);
1136  }
1137  }
1138  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1139  return 0;
1140 }
1141 
1142 int
1143 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
1144 {
1145  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1146  unsigned long flags;
1147 
1148  spin_lock_irqsave(&bfad->bfad_lock, flags);
1149  iocmd->status = bfa_fcport_enable(&bfad->bfa);
1150  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1151 
1152  return 0;
1153 }
1154 
1155 int
1156 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
1157 {
1158  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1159  unsigned long flags;
1160 
1161  spin_lock_irqsave(&bfad->bfad_lock, flags);
1162  iocmd->status = bfa_fcport_disable(&bfad->bfa);
1163  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1164 
1165  return 0;
1166 }
1167 
1168 int
1169 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
1170 {
1171  struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
1172  struct bfad_hal_comp fcomp;
1173  unsigned long flags;
1174 
1175  init_completion(&fcomp.comp);
1176  spin_lock_irqsave(&bfad->bfad_lock, flags);
1177  iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
1178  &iocmd->pcifn_cfg,
1179  bfad_hcb_comp, &fcomp);
1180  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1181  if (iocmd->status != BFA_STATUS_OK)
1182  goto out;
1183 
1184  wait_for_completion(&fcomp.comp);
1185  iocmd->status = fcomp.status;
1186 out:
1187  return 0;
1188 }
1189 
1190 int
1191 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1192 {
1193  struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1194  struct bfad_hal_comp fcomp;
1195  unsigned long flags;
1196 
1197  init_completion(&fcomp.comp);
1198  spin_lock_irqsave(&bfad->bfad_lock, flags);
1199  iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1200  &iocmd->pcifn_id, iocmd->port,
1201  iocmd->pcifn_class, iocmd->bw_min,
1202  iocmd->bw_max, bfad_hcb_comp, &fcomp);
1203  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1204  if (iocmd->status != BFA_STATUS_OK)
1205  goto out;
1206 
1207  wait_for_completion(&fcomp.comp);
1208  iocmd->status = fcomp.status;
1209 out:
1210  return 0;
1211 }
1212 
1213 int
1214 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
1215 {
1216  struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1217  struct bfad_hal_comp fcomp;
1218  unsigned long flags;
1219 
1220  init_completion(&fcomp.comp);
1221  spin_lock_irqsave(&bfad->bfad_lock, flags);
1222  iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
1223  iocmd->pcifn_id,
1224  bfad_hcb_comp, &fcomp);
1225  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1226  if (iocmd->status != BFA_STATUS_OK)
1227  goto out;
1228 
1229  wait_for_completion(&fcomp.comp);
1230  iocmd->status = fcomp.status;
1231 out:
1232  return 0;
1233 }
1234 
1235 int
1236 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1237 {
1238  struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1239  struct bfad_hal_comp fcomp;
1240  unsigned long flags;
1241 
1242  init_completion(&fcomp.comp);
1243  spin_lock_irqsave(&bfad->bfad_lock, flags);
1244  iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1245  iocmd->pcifn_id, iocmd->bw_min,
1246  iocmd->bw_max, bfad_hcb_comp, &fcomp);
1247  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1248  bfa_trc(bfad, iocmd->status);
1249  if (iocmd->status != BFA_STATUS_OK)
1250  goto out;
1251 
1252  wait_for_completion(&fcomp.comp);
1253  iocmd->status = fcomp.status;
1254  bfa_trc(bfad, iocmd->status);
1255 out:
1256  return 0;
1257 }
1258 
1259 int
1260 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
1261 {
1262  struct bfa_bsg_adapter_cfg_mode_s *iocmd =
1263  (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
1264  struct bfad_hal_comp fcomp;
1265  unsigned long flags = 0;
1266 
1267  init_completion(&fcomp.comp);
1268  spin_lock_irqsave(&bfad->bfad_lock, flags);
1269  iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
1270  iocmd->cfg.mode, iocmd->cfg.max_pf,
1271  iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
1272  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1273  if (iocmd->status != BFA_STATUS_OK)
1274  goto out;
1275 
1276  wait_for_completion(&fcomp.comp);
1277  iocmd->status = fcomp.status;
1278 out:
1279  return 0;
1280 }
1281 
1282 int
1283 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
1284 {
1285  struct bfa_bsg_port_cfg_mode_s *iocmd =
1286  (struct bfa_bsg_port_cfg_mode_s *)cmd;
1287  struct bfad_hal_comp fcomp;
1288  unsigned long flags = 0;
1289 
1290  init_completion(&fcomp.comp);
1291  spin_lock_irqsave(&bfad->bfad_lock, flags);
1292  iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
1293  iocmd->instance, iocmd->cfg.mode,
1294  iocmd->cfg.max_pf, iocmd->cfg.max_vf,
1295  bfad_hcb_comp, &fcomp);
1296  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1297  if (iocmd->status != BFA_STATUS_OK)
1298  goto out;
1299 
1300  wait_for_completion(&fcomp.comp);
1301  iocmd->status = fcomp.status;
1302 out:
1303  return 0;
1304 }
1305 
1306 int
1307 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1308 {
1309  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1310  struct bfad_hal_comp fcomp;
1311  unsigned long flags;
1312 
1313  init_completion(&fcomp.comp);
1314  spin_lock_irqsave(&bfad->bfad_lock, flags);
1315  if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
1316  iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
1317  bfad_hcb_comp, &fcomp);
1318  else
1319  iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
1320  bfad_hcb_comp, &fcomp);
1321  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1322 
1323  if (iocmd->status != BFA_STATUS_OK)
1324  goto out;
1325 
1326  wait_for_completion(&fcomp.comp);
1327  iocmd->status = fcomp.status;
1328 out:
1329  return 0;
1330 }
1331 
1332 int
1333 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
1334 {
1335  struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
1336  struct bfad_hal_comp fcomp;
1337  unsigned long flags;
1338 
1339  init_completion(&fcomp.comp);
1340  iocmd->status = BFA_STATUS_OK;
1341  spin_lock_irqsave(&bfad->bfad_lock, flags);
1342  iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
1343  bfad_hcb_comp, &fcomp);
1344  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1345 
1346  if (iocmd->status != BFA_STATUS_OK)
1347  goto out;
1348 
1349  wait_for_completion(&fcomp.comp);
1350  iocmd->status = fcomp.status;
1351 out:
1352  return 0;
1353 }
1354 
1355 int
1356 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1357 {
1358  struct bfa_bsg_cee_attr_s *iocmd =
1359  (struct bfa_bsg_cee_attr_s *)cmd;
1360  void *iocmd_bufptr;
1361  struct bfad_hal_comp cee_comp;
1362  unsigned long flags;
1363 
1364  if (bfad_chk_iocmd_sz(payload_len,
1365  sizeof(struct bfa_bsg_cee_attr_s),
1366  sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
1368  return 0;
1369  }
1370 
1371  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
1372 
1373  cee_comp.status = 0;
1374  init_completion(&cee_comp.comp);
1376  spin_lock_irqsave(&bfad->bfad_lock, flags);
1377  iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
1378  bfad_hcb_comp, &cee_comp);
1379  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1380  if (iocmd->status != BFA_STATUS_OK) {
1382  bfa_trc(bfad, 0x5555);
1383  goto out;
1384  }
1385  wait_for_completion(&cee_comp.comp);
1387 out:
1388  return 0;
1389 }
1390 
1391 int
1392 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
1393  unsigned int payload_len)
1394 {
1395  struct bfa_bsg_cee_stats_s *iocmd =
1396  (struct bfa_bsg_cee_stats_s *)cmd;
1397  void *iocmd_bufptr;
1398  struct bfad_hal_comp cee_comp;
1399  unsigned long flags;
1400 
1401  if (bfad_chk_iocmd_sz(payload_len,
1402  sizeof(struct bfa_bsg_cee_stats_s),
1403  sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1405  return 0;
1406  }
1407 
1408  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1409 
1410  cee_comp.status = 0;
1411  init_completion(&cee_comp.comp);
1413  spin_lock_irqsave(&bfad->bfad_lock, flags);
1414  iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1415  bfad_hcb_comp, &cee_comp);
1416  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1417  if (iocmd->status != BFA_STATUS_OK) {
1419  bfa_trc(bfad, 0x5555);
1420  goto out;
1421  }
1422  wait_for_completion(&cee_comp.comp);
1424 out:
1425  return 0;
1426 }
1427 
1428 int
1429 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1430 {
1431  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1432  unsigned long flags;
1433 
1434  spin_lock_irqsave(&bfad->bfad_lock, flags);
1435  iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1436  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1437  if (iocmd->status != BFA_STATUS_OK)
1438  bfa_trc(bfad, 0x5555);
1439  return 0;
1440 }
1441 
1442 int
1443 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1444 {
1445  struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1446  struct bfad_hal_comp fcomp;
1447  unsigned long flags;
1448 
1449  init_completion(&fcomp.comp);
1450  spin_lock_irqsave(&bfad->bfad_lock, flags);
1451  iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1452  bfad_hcb_comp, &fcomp);
1453  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1454  bfa_trc(bfad, iocmd->status);
1455  if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1456  goto out;
1457 
1458  wait_for_completion(&fcomp.comp);
1459  iocmd->status = fcomp.status;
1460 out:
1461  return 0;
1462 }
1463 
1464 int
1465 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1466 {
1467  struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1468  struct bfad_hal_comp fcomp;
1469  unsigned long flags;
1470 
1471  init_completion(&fcomp.comp);
1472  spin_lock_irqsave(&bfad->bfad_lock, flags);
1473  iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1474  bfad_hcb_comp, &fcomp);
1475  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1476  bfa_trc(bfad, iocmd->status);
1477  if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1478  goto out;
1479  wait_for_completion(&fcomp.comp);
1480  iocmd->status = fcomp.status;
1481 out:
1482  return 0;
1483 }
1484 
1485 int
1486 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1487 {
1488  struct bfa_bsg_flash_attr_s *iocmd =
1489  (struct bfa_bsg_flash_attr_s *)cmd;
1490  struct bfad_hal_comp fcomp;
1491  unsigned long flags;
1492 
1493  init_completion(&fcomp.comp);
1494  spin_lock_irqsave(&bfad->bfad_lock, flags);
1495  iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1496  bfad_hcb_comp, &fcomp);
1497  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1498  if (iocmd->status != BFA_STATUS_OK)
1499  goto out;
1500  wait_for_completion(&fcomp.comp);
1501  iocmd->status = fcomp.status;
1502 out:
1503  return 0;
1504 }
1505 
1506 int
1507 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1508 {
1509  struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1510  struct bfad_hal_comp fcomp;
1511  unsigned long flags;
1512 
1513  init_completion(&fcomp.comp);
1514  spin_lock_irqsave(&bfad->bfad_lock, flags);
1515  iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1516  iocmd->instance, bfad_hcb_comp, &fcomp);
1517  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1518  if (iocmd->status != BFA_STATUS_OK)
1519  goto out;
1520  wait_for_completion(&fcomp.comp);
1521  iocmd->status = fcomp.status;
1522 out:
1523  return 0;
1524 }
1525 
1526 int
1527 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1528  unsigned int payload_len)
1529 {
1530  struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1531  void *iocmd_bufptr;
1532  struct bfad_hal_comp fcomp;
1533  unsigned long flags;
1534 
1535  if (bfad_chk_iocmd_sz(payload_len,
1536  sizeof(struct bfa_bsg_flash_s),
1537  iocmd->bufsz) != BFA_STATUS_OK) {
1539  return 0;
1540  }
1541 
1542  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1543 
1544  init_completion(&fcomp.comp);
1545  spin_lock_irqsave(&bfad->bfad_lock, flags);
1546  iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1547  iocmd->type, iocmd->instance, iocmd_bufptr,
1548  iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1549  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1550  if (iocmd->status != BFA_STATUS_OK)
1551  goto out;
1552  wait_for_completion(&fcomp.comp);
1553  iocmd->status = fcomp.status;
1554 out:
1555  return 0;
1556 }
1557 
1558 int
1559 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1560  unsigned int payload_len)
1561 {
1562  struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1563  struct bfad_hal_comp fcomp;
1564  void *iocmd_bufptr;
1565  unsigned long flags;
1566 
1567  if (bfad_chk_iocmd_sz(payload_len,
1568  sizeof(struct bfa_bsg_flash_s),
1569  iocmd->bufsz) != BFA_STATUS_OK) {
1571  return 0;
1572  }
1573 
1574  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1575 
1576  init_completion(&fcomp.comp);
1577  spin_lock_irqsave(&bfad->bfad_lock, flags);
1578  iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1579  iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1580  bfad_hcb_comp, &fcomp);
1581  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1582  if (iocmd->status != BFA_STATUS_OK)
1583  goto out;
1584  wait_for_completion(&fcomp.comp);
1585  iocmd->status = fcomp.status;
1586 out:
1587  return 0;
1588 }
1589 
1590 int
1591 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1592 {
1593  struct bfa_bsg_diag_get_temp_s *iocmd =
1594  (struct bfa_bsg_diag_get_temp_s *)cmd;
1595  struct bfad_hal_comp fcomp;
1596  unsigned long flags;
1597 
1598  init_completion(&fcomp.comp);
1599  spin_lock_irqsave(&bfad->bfad_lock, flags);
1600  iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1601  &iocmd->result, bfad_hcb_comp, &fcomp);
1602  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1603  bfa_trc(bfad, iocmd->status);
1604  if (iocmd->status != BFA_STATUS_OK)
1605  goto out;
1606  wait_for_completion(&fcomp.comp);
1607  iocmd->status = fcomp.status;
1608 out:
1609  return 0;
1610 }
1611 
1612 int
1613 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1614 {
1615  struct bfa_bsg_diag_memtest_s *iocmd =
1616  (struct bfa_bsg_diag_memtest_s *)cmd;
1617  struct bfad_hal_comp fcomp;
1618  unsigned long flags;
1619 
1620  init_completion(&fcomp.comp);
1621  spin_lock_irqsave(&bfad->bfad_lock, flags);
1622  iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1623  &iocmd->memtest, iocmd->pat,
1624  &iocmd->result, bfad_hcb_comp, &fcomp);
1625  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1626  bfa_trc(bfad, iocmd->status);
1627  if (iocmd->status != BFA_STATUS_OK)
1628  goto out;
1629  wait_for_completion(&fcomp.comp);
1630  iocmd->status = fcomp.status;
1631 out:
1632  return 0;
1633 }
1634 
1635 int
1636 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1637 {
1638  struct bfa_bsg_diag_loopback_s *iocmd =
1639  (struct bfa_bsg_diag_loopback_s *)cmd;
1640  struct bfad_hal_comp fcomp;
1641  unsigned long flags;
1642 
1643  init_completion(&fcomp.comp);
1644  spin_lock_irqsave(&bfad->bfad_lock, flags);
1645  iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1646  iocmd->speed, iocmd->lpcnt, iocmd->pat,
1647  &iocmd->result, bfad_hcb_comp, &fcomp);
1648  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1649  bfa_trc(bfad, iocmd->status);
1650  if (iocmd->status != BFA_STATUS_OK)
1651  goto out;
1652  wait_for_completion(&fcomp.comp);
1653  iocmd->status = fcomp.status;
1654 out:
1655  return 0;
1656 }
1657 
1658 int
1659 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1660 {
1661  struct bfa_bsg_diag_fwping_s *iocmd =
1662  (struct bfa_bsg_diag_fwping_s *)cmd;
1663  struct bfad_hal_comp fcomp;
1664  unsigned long flags;
1665 
1666  init_completion(&fcomp.comp);
1667  spin_lock_irqsave(&bfad->bfad_lock, flags);
1668  iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1669  iocmd->pattern, &iocmd->result,
1670  bfad_hcb_comp, &fcomp);
1671  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1672  bfa_trc(bfad, iocmd->status);
1673  if (iocmd->status != BFA_STATUS_OK)
1674  goto out;
1675  bfa_trc(bfad, 0x77771);
1676  wait_for_completion(&fcomp.comp);
1677  iocmd->status = fcomp.status;
1678 out:
1679  return 0;
1680 }
1681 
1682 int
1683 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1684 {
1685  struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1686  struct bfad_hal_comp fcomp;
1687  unsigned long flags;
1688 
1689  init_completion(&fcomp.comp);
1690  spin_lock_irqsave(&bfad->bfad_lock, flags);
1691  iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1692  iocmd->queue, &iocmd->result,
1693  bfad_hcb_comp, &fcomp);
1694  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1695  if (iocmd->status != BFA_STATUS_OK)
1696  goto out;
1697  wait_for_completion(&fcomp.comp);
1698  iocmd->status = fcomp.status;
1699 out:
1700  return 0;
1701 }
1702 
1703 int
1704 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1705 {
1706  struct bfa_bsg_sfp_show_s *iocmd =
1707  (struct bfa_bsg_sfp_show_s *)cmd;
1708  struct bfad_hal_comp fcomp;
1709  unsigned long flags;
1710 
1711  init_completion(&fcomp.comp);
1712  spin_lock_irqsave(&bfad->bfad_lock, flags);
1713  iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1714  bfad_hcb_comp, &fcomp);
1715  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1716  bfa_trc(bfad, iocmd->status);
1717  if (iocmd->status != BFA_STATUS_OK)
1718  goto out;
1719  wait_for_completion(&fcomp.comp);
1720  iocmd->status = fcomp.status;
1721  bfa_trc(bfad, iocmd->status);
1722 out:
1723  return 0;
1724 }
1725 
1726 int
1727 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1728 {
1729  struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1730  unsigned long flags;
1731 
1732  spin_lock_irqsave(&bfad->bfad_lock, flags);
1733  iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1734  &iocmd->ledtest);
1735  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1736  return 0;
1737 }
1738 
1739 int
1740 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1741 {
1742  struct bfa_bsg_diag_beacon_s *iocmd =
1743  (struct bfa_bsg_diag_beacon_s *)cmd;
1744  unsigned long flags;
1745 
1746  spin_lock_irqsave(&bfad->bfad_lock, flags);
1747  iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1748  iocmd->beacon, iocmd->link_e2e_beacon,
1749  iocmd->second);
1750  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1751  return 0;
1752 }
1753 
1754 int
1755 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1756 {
1757  struct bfa_bsg_diag_lb_stat_s *iocmd =
1758  (struct bfa_bsg_diag_lb_stat_s *)cmd;
1759  unsigned long flags;
1760 
1761  spin_lock_irqsave(&bfad->bfad_lock, flags);
1762  iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1763  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1764  bfa_trc(bfad, iocmd->status);
1765 
1766  return 0;
1767 }
1768 
1769 int
1770 bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1771 {
1772  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1773  unsigned long flags;
1774  struct bfad_hal_comp fcomp;
1775 
1776  init_completion(&fcomp.comp);
1777  spin_lock_irqsave(&bfad->bfad_lock, flags);
1778  if (cmd == IOCMD_DIAG_DPORT_ENABLE)
1779  iocmd->status = bfa_dport_enable(&bfad->bfa,
1780  bfad_hcb_comp, &fcomp);
1781  else if (cmd == IOCMD_DIAG_DPORT_DISABLE)
1782  iocmd->status = bfa_dport_disable(&bfad->bfa,
1783  bfad_hcb_comp, &fcomp);
1784  else {
1785  bfa_trc(bfad, 0);
1786  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1787  return -EINVAL;
1788  }
1789  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1790 
1791  if (iocmd->status != BFA_STATUS_OK)
1792  bfa_trc(bfad, iocmd->status);
1793  else {
1794  wait_for_completion(&fcomp.comp);
1795  iocmd->status = fcomp.status;
1796  }
1797 
1798  return 0;
1799 }
1800 
1801 int
1802 bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd)
1803 {
1804  struct bfa_bsg_diag_dport_get_state_s *iocmd =
1805  (struct bfa_bsg_diag_dport_get_state_s *)pcmd;
1806  unsigned long flags;
1807 
1808  spin_lock_irqsave(&bfad->bfad_lock, flags);
1809  iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state);
1810  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1811 
1812  return 0;
1813 }
1814 
1815 int
1816 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1817 {
1818  struct bfa_bsg_phy_attr_s *iocmd =
1819  (struct bfa_bsg_phy_attr_s *)cmd;
1820  struct bfad_hal_comp fcomp;
1821  unsigned long flags;
1822 
1823  init_completion(&fcomp.comp);
1824  spin_lock_irqsave(&bfad->bfad_lock, flags);
1825  iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1826  &iocmd->attr, bfad_hcb_comp, &fcomp);
1827  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1828  if (iocmd->status != BFA_STATUS_OK)
1829  goto out;
1830  wait_for_completion(&fcomp.comp);
1831  iocmd->status = fcomp.status;
1832 out:
1833  return 0;
1834 }
1835 
1836 int
1837 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1838 {
1839  struct bfa_bsg_phy_stats_s *iocmd =
1840  (struct bfa_bsg_phy_stats_s *)cmd;
1841  struct bfad_hal_comp fcomp;
1842  unsigned long flags;
1843 
1844  init_completion(&fcomp.comp);
1845  spin_lock_irqsave(&bfad->bfad_lock, flags);
1846  iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1847  &iocmd->stats, bfad_hcb_comp, &fcomp);
1848  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1849  if (iocmd->status != BFA_STATUS_OK)
1850  goto out;
1851  wait_for_completion(&fcomp.comp);
1852  iocmd->status = fcomp.status;
1853 out:
1854  return 0;
1855 }
1856 
1857 int
1858 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1859 {
1860  struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1861  struct bfad_hal_comp fcomp;
1862  void *iocmd_bufptr;
1863  unsigned long flags;
1864 
1865  if (bfad_chk_iocmd_sz(payload_len,
1866  sizeof(struct bfa_bsg_phy_s),
1867  iocmd->bufsz) != BFA_STATUS_OK) {
1869  return 0;
1870  }
1871 
1872  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1873  init_completion(&fcomp.comp);
1874  spin_lock_irqsave(&bfad->bfad_lock, flags);
1875  iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1876  iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1877  0, bfad_hcb_comp, &fcomp);
1878  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1879  if (iocmd->status != BFA_STATUS_OK)
1880  goto out;
1881  wait_for_completion(&fcomp.comp);
1882  iocmd->status = fcomp.status;
1883  if (iocmd->status != BFA_STATUS_OK)
1884  goto out;
1885 out:
1886  return 0;
1887 }
1888 
1889 int
1890 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
1891 {
1892  struct bfa_bsg_vhba_attr_s *iocmd =
1893  (struct bfa_bsg_vhba_attr_s *)cmd;
1894  struct bfa_vhba_attr_s *attr = &iocmd->attr;
1895  unsigned long flags;
1896 
1897  spin_lock_irqsave(&bfad->bfad_lock, flags);
1898  attr->pwwn = bfad->bfa.ioc.attr->pwwn;
1899  attr->nwwn = bfad->bfa.ioc.attr->nwwn;
1900  attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
1901  attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
1902  attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
1903  iocmd->status = BFA_STATUS_OK;
1904  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1905  return 0;
1906 }
1907 
1908 int
1909 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1910 {
1911  struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1912  void *iocmd_bufptr;
1913  struct bfad_hal_comp fcomp;
1914  unsigned long flags;
1915 
1916  if (bfad_chk_iocmd_sz(payload_len,
1917  sizeof(struct bfa_bsg_phy_s),
1918  iocmd->bufsz) != BFA_STATUS_OK) {
1920  return 0;
1921  }
1922 
1923  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1924  init_completion(&fcomp.comp);
1925  spin_lock_irqsave(&bfad->bfad_lock, flags);
1926  iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1927  iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1928  0, bfad_hcb_comp, &fcomp);
1929  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1930  if (iocmd->status != BFA_STATUS_OK)
1931  goto out;
1932  wait_for_completion(&fcomp.comp);
1933  iocmd->status = fcomp.status;
1934 out:
1935  return 0;
1936 }
1937 
1938 int
1939 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
1940 {
1941  struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1942  void *iocmd_bufptr;
1943 
1944  if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
1945  bfa_trc(bfad, sizeof(struct bfa_plog_s));
1946  iocmd->status = BFA_STATUS_EINVAL;
1947  goto out;
1948  }
1949 
1950  iocmd->status = BFA_STATUS_OK;
1951  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1952  memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
1953 out:
1954  return 0;
1955 }
1956 
1957 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
1958 int
1959 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
1960  unsigned int payload_len)
1961 {
1962  struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1963  void *iocmd_bufptr;
1964  unsigned long flags;
1965  u32 offset;
1966 
1967  if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
1970  return 0;
1971  }
1972 
1973  if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
1974  !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
1975  !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
1977  iocmd->status = BFA_STATUS_EINVAL;
1978  goto out;
1979  }
1980 
1981  iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1982  spin_lock_irqsave(&bfad->bfad_lock, flags);
1983  offset = iocmd->offset;
1984  iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
1985  &offset, &iocmd->bufsz);
1986  iocmd->offset = offset;
1987  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1988 out:
1989  return 0;
1990 }
1991 
1992 int
1993 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1994 {
1995  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1996  unsigned long flags;
1997 
1998  if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
1999  spin_lock_irqsave(&bfad->bfad_lock, flags);
2000  bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
2001  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2002  } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
2003  bfad->plog_buf.head = bfad->plog_buf.tail = 0;
2004  else if (v_cmd == IOCMD_DEBUG_START_DTRC)
2005  bfa_trc_init(bfad->trcmod);
2006  else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
2007  bfa_trc_stop(bfad->trcmod);
2008 
2009  iocmd->status = BFA_STATUS_OK;
2010  return 0;
2011 }
2012 
2013 int
2014 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
2015 {
2016  struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
2017 
2018  if (iocmd->ctl == BFA_TRUE)
2019  bfad->plog_buf.plog_enabled = 1;
2020  else
2021  bfad->plog_buf.plog_enabled = 0;
2022 
2023  iocmd->status = BFA_STATUS_OK;
2024  return 0;
2025 }
2026 
2027 int
2028 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2029 {
2030  struct bfa_bsg_fcpim_profile_s *iocmd =
2031  (struct bfa_bsg_fcpim_profile_s *)cmd;
2032  struct timeval tv;
2033  unsigned long flags;
2034 
2035  do_gettimeofday(&tv);
2036  spin_lock_irqsave(&bfad->bfad_lock, flags);
2037  if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
2038  iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
2039  else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
2040  iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
2041  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2042 
2043  return 0;
2044 }
2045 
2046 static int
2047 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2048 {
2049  struct bfa_bsg_itnim_ioprofile_s *iocmd =
2050  (struct bfa_bsg_itnim_ioprofile_s *)cmd;
2051  struct bfa_fcs_lport_s *fcs_port;
2052  struct bfa_fcs_itnim_s *itnim;
2053  unsigned long flags;
2054 
2055  spin_lock_irqsave(&bfad->bfad_lock, flags);
2056  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2057  iocmd->vf_id, iocmd->lpwwn);
2058  if (!fcs_port)
2060  else {
2061  itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2062  if (itnim == NULL)
2064  else
2066  bfa_fcs_itnim_get_halitn(itnim),
2067  &iocmd->ioprofile);
2068  }
2069  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2070  return 0;
2071 }
2072 
2073 int
2074 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2075 {
2076  struct bfa_bsg_fcport_stats_s *iocmd =
2077  (struct bfa_bsg_fcport_stats_s *)cmd;
2078  struct bfad_hal_comp fcomp;
2079  unsigned long flags;
2080  struct bfa_cb_pending_q_s cb_qe;
2081 
2082  init_completion(&fcomp.comp);
2084  &fcomp, &iocmd->stats);
2085  spin_lock_irqsave(&bfad->bfad_lock, flags);
2086  iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2087  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2088  if (iocmd->status != BFA_STATUS_OK) {
2089  bfa_trc(bfad, iocmd->status);
2090  goto out;
2091  }
2092  wait_for_completion(&fcomp.comp);
2093  iocmd->status = fcomp.status;
2094 out:
2095  return 0;
2096 }
2097 
2098 int
2099 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2100 {
2101  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2102  struct bfad_hal_comp fcomp;
2103  unsigned long flags;
2104  struct bfa_cb_pending_q_s cb_qe;
2105 
2106  init_completion(&fcomp.comp);
2108 
2109  spin_lock_irqsave(&bfad->bfad_lock, flags);
2110  iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2111  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2112  if (iocmd->status != BFA_STATUS_OK) {
2113  bfa_trc(bfad, iocmd->status);
2114  goto out;
2115  }
2116  wait_for_completion(&fcomp.comp);
2117  iocmd->status = fcomp.status;
2118 out:
2119  return 0;
2120 }
2121 
2122 int
2123 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2124 {
2125  struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2126  struct bfad_hal_comp fcomp;
2127  unsigned long flags;
2128 
2129  init_completion(&fcomp.comp);
2130  spin_lock_irqsave(&bfad->bfad_lock, flags);
2131  iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2132  BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2133  &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2134  bfad_hcb_comp, &fcomp);
2135  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2136  if (iocmd->status != BFA_STATUS_OK)
2137  goto out;
2138  wait_for_completion(&fcomp.comp);
2139  iocmd->status = fcomp.status;
2140 out:
2141  return 0;
2142 }
2143 
2144 int
2145 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2146 {
2147  struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2148  struct bfad_hal_comp fcomp;
2149  unsigned long flags;
2150 
2151  init_completion(&fcomp.comp);
2152  spin_lock_irqsave(&bfad->bfad_lock, flags);
2153  iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2154  BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2155  &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2156  bfad_hcb_comp, &fcomp);
2157  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2158  if (iocmd->status != BFA_STATUS_OK)
2159  goto out;
2160  wait_for_completion(&fcomp.comp);
2161  iocmd->status = fcomp.status;
2162 out:
2163  return 0;
2164 }
2165 
2166 int
2167 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2168 {
2169  struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2170  struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2171  struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2172  unsigned long flags;
2173 
2174  spin_lock_irqsave(&bfad->bfad_lock, flags);
2175  pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2176  pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2177  pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2178  memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2179  iocmd->status = BFA_STATUS_OK;
2180  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2181 
2182  return 0;
2183 }
2184 
2185 int
2186 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2187 {
2188  struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2189  struct bfad_hal_comp fcomp;
2190  unsigned long flags;
2191 
2192  init_completion(&fcomp.comp);
2193  spin_lock_irqsave(&bfad->bfad_lock, flags);
2194  iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2196  bfad->bfa.ioc.port_id, &iocmd->cfg,
2197  sizeof(struct bfa_ethboot_cfg_s), 0,
2198  bfad_hcb_comp, &fcomp);
2199  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2200  if (iocmd->status != BFA_STATUS_OK)
2201  goto out;
2202  wait_for_completion(&fcomp.comp);
2203  iocmd->status = fcomp.status;
2204 out:
2205  return 0;
2206 }
2207 
2208 int
2209 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2210 {
2211  struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2212  struct bfad_hal_comp fcomp;
2213  unsigned long flags;
2214 
2215  init_completion(&fcomp.comp);
2216  spin_lock_irqsave(&bfad->bfad_lock, flags);
2217  iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2219  bfad->bfa.ioc.port_id, &iocmd->cfg,
2220  sizeof(struct bfa_ethboot_cfg_s), 0,
2221  bfad_hcb_comp, &fcomp);
2222  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2223  if (iocmd->status != BFA_STATUS_OK)
2224  goto out;
2225  wait_for_completion(&fcomp.comp);
2226  iocmd->status = fcomp.status;
2227 out:
2228  return 0;
2229 }
2230 
2231 int
2232 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2233 {
2234  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2235  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2236  struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2237  unsigned long flags;
2238 
2239  spin_lock_irqsave(&bfad->bfad_lock, flags);
2240 
2241  if (bfa_fcport_is_dport(&bfad->bfa))
2242  return BFA_STATUS_DPORT_ERR;
2243 
2244  if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2245  (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2247  else {
2248  if (v_cmd == IOCMD_TRUNK_ENABLE) {
2249  trunk->attr.state = BFA_TRUNK_OFFLINE;
2250  bfa_fcport_disable(&bfad->bfa);
2251  fcport->cfg.trunked = BFA_TRUE;
2252  } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2253  trunk->attr.state = BFA_TRUNK_DISABLED;
2254  bfa_fcport_disable(&bfad->bfa);
2255  fcport->cfg.trunked = BFA_FALSE;
2256  }
2257 
2258  if (!bfa_fcport_is_disabled(&bfad->bfa))
2259  bfa_fcport_enable(&bfad->bfa);
2260 
2261  iocmd->status = BFA_STATUS_OK;
2262  }
2263 
2264  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2265 
2266  return 0;
2267 }
2268 
2269 int
2270 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2271 {
2272  struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2273  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2274  struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2275  unsigned long flags;
2276 
2277  spin_lock_irqsave(&bfad->bfad_lock, flags);
2278  if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2279  (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2281  else {
2282  memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2283  sizeof(struct bfa_trunk_attr_s));
2284  iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2285  iocmd->status = BFA_STATUS_OK;
2286  }
2287  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2288 
2289  return 0;
2290 }
2291 
2292 int
2293 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2294 {
2295  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2296  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2297  unsigned long flags;
2298 
2299  spin_lock_irqsave(&bfad->bfad_lock, flags);
2300  if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2301  if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2302  (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2304  else {
2305  if (v_cmd == IOCMD_QOS_ENABLE)
2306  fcport->cfg.qos_enabled = BFA_TRUE;
2307  else if (v_cmd == IOCMD_QOS_DISABLE) {
2308  fcport->cfg.qos_enabled = BFA_FALSE;
2309  fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
2310  fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
2311  fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
2312  }
2313  }
2314  }
2315  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2316 
2317  return 0;
2318 }
2319 
2320 int
2321 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2322 {
2323  struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2324  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2325  unsigned long flags;
2326 
2327  spin_lock_irqsave(&bfad->bfad_lock, flags);
2328  if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2329  (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2331  else {
2332  iocmd->attr.state = fcport->qos_attr.state;
2333  iocmd->attr.total_bb_cr =
2334  be32_to_cpu(fcport->qos_attr.total_bb_cr);
2335  iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
2336  iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
2337  iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
2338  iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
2339  iocmd->status = BFA_STATUS_OK;
2340  }
2341  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2342 
2343  return 0;
2344 }
2345 
2346 int
2347 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2348 {
2349  struct bfa_bsg_qos_vc_attr_s *iocmd =
2350  (struct bfa_bsg_qos_vc_attr_s *)cmd;
2351  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2352  struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2353  unsigned long flags;
2354  u32 i = 0;
2355 
2356  spin_lock_irqsave(&bfad->bfad_lock, flags);
2357  iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2358  iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
2359  iocmd->attr.elp_opmode_flags =
2360  be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2361 
2362  /* Individual VC info */
2363  while (i < iocmd->attr.total_vc_count) {
2364  iocmd->attr.vc_info[i].vc_credit =
2365  bfa_vc_attr->vc_info[i].vc_credit;
2366  iocmd->attr.vc_info[i].borrow_credit =
2367  bfa_vc_attr->vc_info[i].borrow_credit;
2368  iocmd->attr.vc_info[i].priority =
2369  bfa_vc_attr->vc_info[i].priority;
2370  i++;
2371  }
2372  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2373 
2374  iocmd->status = BFA_STATUS_OK;
2375  return 0;
2376 }
2377 
2378 int
2379 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2380 {
2381  struct bfa_bsg_fcport_stats_s *iocmd =
2382  (struct bfa_bsg_fcport_stats_s *)cmd;
2383  struct bfad_hal_comp fcomp;
2384  unsigned long flags;
2385  struct bfa_cb_pending_q_s cb_qe;
2386  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2387 
2388  init_completion(&fcomp.comp);
2390  &fcomp, &iocmd->stats);
2391 
2392  spin_lock_irqsave(&bfad->bfad_lock, flags);
2393  WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2394  if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2395  (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2397  else
2398  iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2399  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2400  if (iocmd->status != BFA_STATUS_OK) {
2401  bfa_trc(bfad, iocmd->status);
2402  goto out;
2403  }
2404  wait_for_completion(&fcomp.comp);
2405  iocmd->status = fcomp.status;
2406 out:
2407  return 0;
2408 }
2409 
2410 int
2411 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2412 {
2413  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2414  struct bfad_hal_comp fcomp;
2415  unsigned long flags;
2416  struct bfa_cb_pending_q_s cb_qe;
2417  struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2418 
2419  init_completion(&fcomp.comp);
2421  &fcomp, NULL);
2422 
2423  spin_lock_irqsave(&bfad->bfad_lock, flags);
2424  WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2425  if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2426  (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2428  else
2429  iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2430  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2431  if (iocmd->status != BFA_STATUS_OK) {
2432  bfa_trc(bfad, iocmd->status);
2433  goto out;
2434  }
2435  wait_for_completion(&fcomp.comp);
2436  iocmd->status = fcomp.status;
2437 out:
2438  return 0;
2439 }
2440 
2441 int
2442 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2443 {
2444  struct bfa_bsg_vf_stats_s *iocmd =
2445  (struct bfa_bsg_vf_stats_s *)cmd;
2446  struct bfa_fcs_fabric_s *fcs_vf;
2447  unsigned long flags;
2448 
2449  spin_lock_irqsave(&bfad->bfad_lock, flags);
2450  fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2451  if (fcs_vf == NULL) {
2452  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2454  goto out;
2455  }
2456  memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2457  sizeof(struct bfa_vf_stats_s));
2458  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2459  iocmd->status = BFA_STATUS_OK;
2460 out:
2461  return 0;
2462 }
2463 
2464 int
2465 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2466 {
2467  struct bfa_bsg_vf_reset_stats_s *iocmd =
2468  (struct bfa_bsg_vf_reset_stats_s *)cmd;
2469  struct bfa_fcs_fabric_s *fcs_vf;
2470  unsigned long flags;
2471 
2472  spin_lock_irqsave(&bfad->bfad_lock, flags);
2473  fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2474  if (fcs_vf == NULL) {
2475  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2477  goto out;
2478  }
2479  memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2480  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2481  iocmd->status = BFA_STATUS_OK;
2482 out:
2483  return 0;
2484 }
2485 
2486 /* Function to reset the LUN SCAN mode */
2487 static void
2488 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2489 {
2490  struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2491  struct bfad_vport_s *vport = NULL;
2492 
2493  /* Set the scsi device LUN SCAN flags for base port */
2494  bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2495 
2496  /* Set the scsi device LUN SCAN flags for the vports */
2497  list_for_each_entry(vport, &bfad->vport_list, list_entry)
2498  bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2499 }
2500 
2501 int
2502 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2503 {
2504  struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2505  unsigned long flags;
2506 
2507  spin_lock_irqsave(&bfad->bfad_lock, flags);
2508  if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2509  iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2510  /* Set the LUN Scanning mode to be Sequential scan */
2511  if (iocmd->status == BFA_STATUS_OK)
2512  bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2513  } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2514  iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2515  /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2516  if (iocmd->status == BFA_STATUS_OK)
2517  bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2518  } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2519  iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2520  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2521  return 0;
2522 }
2523 
2524 int
2525 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2526 {
2527  struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2528  (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2529  struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2530  unsigned long flags;
2531 
2532  spin_lock_irqsave(&bfad->bfad_lock, flags);
2533  iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2534  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2535  return 0;
2536 }
2537 
2538 int
2539 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2540 {
2541  struct bfa_bsg_fcpim_lunmask_s *iocmd =
2542  (struct bfa_bsg_fcpim_lunmask_s *)cmd;
2543  unsigned long flags;
2544 
2545  spin_lock_irqsave(&bfad->bfad_lock, flags);
2546  if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2547  iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2548  &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2549  else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2550  iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2551  iocmd->vf_id, &iocmd->pwwn,
2552  iocmd->rpwwn, iocmd->lun);
2553  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2554  return 0;
2555 }
2556 
2557 int
2558 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
2559 {
2560  struct bfa_bsg_fcpim_throttle_s *iocmd =
2561  (struct bfa_bsg_fcpim_throttle_s *)cmd;
2562  unsigned long flags;
2563 
2564  spin_lock_irqsave(&bfad->bfad_lock, flags);
2565  iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
2566  (void *)&iocmd->throttle);
2567  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2568 
2569  return 0;
2570 }
2571 
2572 int
2573 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
2574 {
2575  struct bfa_bsg_fcpim_throttle_s *iocmd =
2576  (struct bfa_bsg_fcpim_throttle_s *)cmd;
2577  unsigned long flags;
2578 
2579  spin_lock_irqsave(&bfad->bfad_lock, flags);
2580  iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
2581  iocmd->throttle.cfg_value);
2582  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2583 
2584  return 0;
2585 }
2586 
2587 int
2588 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
2589 {
2590  struct bfa_bsg_tfru_s *iocmd =
2591  (struct bfa_bsg_tfru_s *)cmd;
2592  struct bfad_hal_comp fcomp;
2593  unsigned long flags = 0;
2594 
2595  init_completion(&fcomp.comp);
2596  spin_lock_irqsave(&bfad->bfad_lock, flags);
2597  iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
2598  &iocmd->data, iocmd->len, iocmd->offset,
2599  bfad_hcb_comp, &fcomp);
2600  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2601  if (iocmd->status == BFA_STATUS_OK) {
2602  wait_for_completion(&fcomp.comp);
2603  iocmd->status = fcomp.status;
2604  }
2605 
2606  return 0;
2607 }
2608 
2609 int
2610 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
2611 {
2612  struct bfa_bsg_tfru_s *iocmd =
2613  (struct bfa_bsg_tfru_s *)cmd;
2614  struct bfad_hal_comp fcomp;
2615  unsigned long flags = 0;
2616 
2617  init_completion(&fcomp.comp);
2618  spin_lock_irqsave(&bfad->bfad_lock, flags);
2619  iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
2620  &iocmd->data, iocmd->len, iocmd->offset,
2621  bfad_hcb_comp, &fcomp);
2622  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2623  if (iocmd->status == BFA_STATUS_OK) {
2624  wait_for_completion(&fcomp.comp);
2625  iocmd->status = fcomp.status;
2626  }
2627 
2628  return 0;
2629 }
2630 
2631 int
2632 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
2633 {
2634  struct bfa_bsg_fruvpd_s *iocmd =
2635  (struct bfa_bsg_fruvpd_s *)cmd;
2636  struct bfad_hal_comp fcomp;
2637  unsigned long flags = 0;
2638 
2639  init_completion(&fcomp.comp);
2640  spin_lock_irqsave(&bfad->bfad_lock, flags);
2641  iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
2642  &iocmd->data, iocmd->len, iocmd->offset,
2643  bfad_hcb_comp, &fcomp);
2644  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2645  if (iocmd->status == BFA_STATUS_OK) {
2646  wait_for_completion(&fcomp.comp);
2647  iocmd->status = fcomp.status;
2648  }
2649 
2650  return 0;
2651 }
2652 
2653 int
2654 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
2655 {
2656  struct bfa_bsg_fruvpd_s *iocmd =
2657  (struct bfa_bsg_fruvpd_s *)cmd;
2658  struct bfad_hal_comp fcomp;
2659  unsigned long flags = 0;
2660 
2661  init_completion(&fcomp.comp);
2662  spin_lock_irqsave(&bfad->bfad_lock, flags);
2663  iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
2664  &iocmd->data, iocmd->len, iocmd->offset,
2665  bfad_hcb_comp, &fcomp);
2666  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2667  if (iocmd->status == BFA_STATUS_OK) {
2668  wait_for_completion(&fcomp.comp);
2669  iocmd->status = fcomp.status;
2670  }
2671 
2672  return 0;
2673 }
2674 
2675 int
2676 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
2677 {
2678  struct bfa_bsg_fruvpd_max_size_s *iocmd =
2679  (struct bfa_bsg_fruvpd_max_size_s *)cmd;
2680  unsigned long flags = 0;
2681 
2682  spin_lock_irqsave(&bfad->bfad_lock, flags);
2683  iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
2684  &iocmd->max_size);
2685  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2686 
2687  return 0;
2688 }
2689 
2690 static int
2691 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2692  unsigned int payload_len)
2693 {
2694  int rc = -EINVAL;
2695 
2696  switch (cmd) {
2697  case IOCMD_IOC_ENABLE:
2698  rc = bfad_iocmd_ioc_enable(bfad, iocmd);
2699  break;
2700  case IOCMD_IOC_DISABLE:
2701  rc = bfad_iocmd_ioc_disable(bfad, iocmd);
2702  break;
2703  case IOCMD_IOC_GET_INFO:
2704  rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
2705  break;
2706  case IOCMD_IOC_GET_ATTR:
2707  rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
2708  break;
2709  case IOCMD_IOC_GET_STATS:
2710  rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
2711  break;
2712  case IOCMD_IOC_GET_FWSTATS:
2713  rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
2714  break;
2715  case IOCMD_IOC_RESET_STATS:
2717  rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2718  break;
2721  rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2722  break;
2723  case IOCMD_IOCFC_GET_ATTR:
2724  rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
2725  break;
2726  case IOCMD_IOCFC_SET_INTR:
2727  rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
2728  break;
2729  case IOCMD_PORT_ENABLE:
2730  rc = bfad_iocmd_port_enable(bfad, iocmd);
2731  break;
2732  case IOCMD_PORT_DISABLE:
2733  rc = bfad_iocmd_port_disable(bfad, iocmd);
2734  break;
2735  case IOCMD_PORT_GET_ATTR:
2736  rc = bfad_iocmd_port_get_attr(bfad, iocmd);
2737  break;
2738  case IOCMD_PORT_GET_STATS:
2739  rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
2740  break;
2742  rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2743  break;
2744  case IOCMD_PORT_CFG_TOPO:
2745  case IOCMD_PORT_CFG_SPEED:
2746  case IOCMD_PORT_CFG_ALPA:
2747  case IOCMD_PORT_CLR_ALPA:
2748  rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2749  break;
2751  rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2752  break;
2755  rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
2756  break;
2757  case IOCMD_LPORT_GET_ATTR:
2758  rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
2759  break;
2760  case IOCMD_LPORT_GET_STATS:
2761  rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
2762  break;
2764  rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2765  break;
2767  rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
2768  break;
2770  rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
2771  break;
2772  case IOCMD_RPORT_GET_ATTR:
2773  rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
2774  break;
2775  case IOCMD_RPORT_GET_ADDR:
2776  rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
2777  break;
2778  case IOCMD_RPORT_GET_STATS:
2779  rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
2780  break;
2782  rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2783  break;
2784  case IOCMD_RPORT_SET_SPEED:
2785  rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2786  break;
2787  case IOCMD_VPORT_GET_ATTR:
2788  rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2789  break;
2790  case IOCMD_VPORT_GET_STATS:
2791  rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2792  break;
2794  rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2795  break;
2797  rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
2798  break;
2799  case IOCMD_RATELIM_ENABLE:
2800  case IOCMD_RATELIM_DISABLE:
2801  rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2802  break;
2804  rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2805  break;
2806  case IOCMD_FCPIM_FAILOVER:
2807  rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2808  break;
2809  case IOCMD_FCPIM_MODSTATS:
2810  rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
2811  break;
2813  rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2814  break;
2816  rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
2817  break;
2818  case IOCMD_ITNIM_GET_ATTR:
2819  rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
2820  break;
2822  rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
2823  break;
2825  rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2826  break;
2828  rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
2829  break;
2830  case IOCMD_FCPORT_ENABLE:
2831  rc = bfad_iocmd_fcport_enable(bfad, iocmd);
2832  break;
2833  case IOCMD_FCPORT_DISABLE:
2834  rc = bfad_iocmd_fcport_disable(bfad, iocmd);
2835  break;
2836  case IOCMD_IOC_PCIFN_CFG:
2837  rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
2838  break;
2839  case IOCMD_PCIFN_CREATE:
2840  rc = bfad_iocmd_pcifn_create(bfad, iocmd);
2841  break;
2842  case IOCMD_PCIFN_DELETE:
2843  rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
2844  break;
2845  case IOCMD_PCIFN_BW:
2846  rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
2847  break;
2849  rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
2850  break;
2851  case IOCMD_PORT_CFG_MODE:
2852  rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
2853  break;
2856  rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
2857  break;
2858  case IOCMD_FAA_QUERY:
2859  rc = bfad_iocmd_faa_query(bfad, iocmd);
2860  break;
2861  case IOCMD_CEE_GET_ATTR:
2862  rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
2863  break;
2864  case IOCMD_CEE_GET_STATS:
2865  rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
2866  break;
2867  case IOCMD_CEE_RESET_STATS:
2868  rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
2869  break;
2870  case IOCMD_SFP_MEDIA:
2871  rc = bfad_iocmd_sfp_media(bfad, iocmd);
2872  break;
2873  case IOCMD_SFP_SPEED:
2874  rc = bfad_iocmd_sfp_speed(bfad, iocmd);
2875  break;
2876  case IOCMD_FLASH_GET_ATTR:
2877  rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
2878  break;
2880  rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
2881  break;
2883  rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
2884  break;
2885  case IOCMD_FLASH_READ_PART:
2886  rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
2887  break;
2888  case IOCMD_DIAG_TEMP:
2889  rc = bfad_iocmd_diag_temp(bfad, iocmd);
2890  break;
2891  case IOCMD_DIAG_MEMTEST:
2892  rc = bfad_iocmd_diag_memtest(bfad, iocmd);
2893  break;
2894  case IOCMD_DIAG_LOOPBACK:
2895  rc = bfad_iocmd_diag_loopback(bfad, iocmd);
2896  break;
2897  case IOCMD_DIAG_FWPING:
2898  rc = bfad_iocmd_diag_fwping(bfad, iocmd);
2899  break;
2900  case IOCMD_DIAG_QUEUETEST:
2901  rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
2902  break;
2903  case IOCMD_DIAG_SFP:
2904  rc = bfad_iocmd_diag_sfp(bfad, iocmd);
2905  break;
2906  case IOCMD_DIAG_LED:
2907  rc = bfad_iocmd_diag_led(bfad, iocmd);
2908  break;
2910  rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
2911  break;
2912  case IOCMD_DIAG_LB_STAT:
2913  rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
2914  break;
2917  rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd);
2918  break;
2920  rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd);
2921  break;
2922  case IOCMD_PHY_GET_ATTR:
2923  rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
2924  break;
2925  case IOCMD_PHY_GET_STATS:
2926  rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
2927  break;
2928  case IOCMD_PHY_UPDATE_FW:
2929  rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
2930  break;
2931  case IOCMD_PHY_READ_FW:
2932  rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
2933  break;
2934  case IOCMD_VHBA_QUERY:
2935  rc = bfad_iocmd_vhba_query(bfad, iocmd);
2936  break;
2937  case IOCMD_DEBUG_PORTLOG:
2938  rc = bfad_iocmd_porglog_get(bfad, iocmd);
2939  break;
2940  case IOCMD_DEBUG_FW_CORE:
2941  rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
2942  break;
2946  case IOCMD_DEBUG_STOP_DTRC:
2947  rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
2948  break;
2950  rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
2951  break;
2954  rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
2955  break;
2957  rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
2958  break;
2960  rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
2961  break;
2963  rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
2964  break;
2965  case IOCMD_BOOT_CFG:
2966  rc = bfad_iocmd_boot_cfg(bfad, iocmd);
2967  break;
2968  case IOCMD_BOOT_QUERY:
2969  rc = bfad_iocmd_boot_query(bfad, iocmd);
2970  break;
2971  case IOCMD_PREBOOT_QUERY:
2972  rc = bfad_iocmd_preboot_query(bfad, iocmd);
2973  break;
2974  case IOCMD_ETHBOOT_CFG:
2975  rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
2976  break;
2977  case IOCMD_ETHBOOT_QUERY:
2978  rc = bfad_iocmd_ethboot_query(bfad, iocmd);
2979  break;
2980  case IOCMD_TRUNK_ENABLE:
2981  case IOCMD_TRUNK_DISABLE:
2982  rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
2983  break;
2984  case IOCMD_TRUNK_GET_ATTR:
2985  rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
2986  break;
2987  case IOCMD_QOS_ENABLE:
2988  case IOCMD_QOS_DISABLE:
2989  rc = bfad_iocmd_qos(bfad, iocmd, cmd);
2990  break;
2991  case IOCMD_QOS_GET_ATTR:
2992  rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
2993  break;
2994  case IOCMD_QOS_GET_VC_ATTR:
2995  rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
2996  break;
2997  case IOCMD_QOS_GET_STATS:
2998  rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
2999  break;
3000  case IOCMD_QOS_RESET_STATS:
3001  rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
3002  break;
3003  case IOCMD_QOS_SET_BW:
3004  rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
3005  break;
3006  case IOCMD_VF_GET_STATS:
3007  rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
3008  break;
3009  case IOCMD_VF_RESET_STATS:
3010  rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
3011  break;
3015  rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
3016  break;
3018  rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
3019  break;
3022  rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
3023  break;
3025  rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
3026  break;
3028  rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
3029  break;
3030  /* TFRU */
3031  case IOCMD_TFRU_READ:
3032  rc = bfad_iocmd_tfru_read(bfad, iocmd);
3033  break;
3034  case IOCMD_TFRU_WRITE:
3035  rc = bfad_iocmd_tfru_write(bfad, iocmd);
3036  break;
3037  /* FRU */
3038  case IOCMD_FRUVPD_READ:
3039  rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
3040  break;
3041  case IOCMD_FRUVPD_UPDATE:
3042  rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
3043  break;
3045  rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
3046  break;
3047  default:
3048  rc = -EINVAL;
3049  break;
3050  }
3051  return rc;
3052 }
3053 
3054 static int
3055 bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
3056 {
3057  uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
3058  struct bfad_im_port_s *im_port =
3059  (struct bfad_im_port_s *) job->shost->hostdata[0];
3060  struct bfad_s *bfad = im_port->bfad;
3061  struct request_queue *request_q = job->req->q;
3062  void *payload_kbuf;
3063  int rc = -EINVAL;
3064 
3065  /*
3066  * Set the BSG device request_queue size to 256 to support
3067  * payloads larger than 512*1024K bytes.
3068  */
3069  blk_queue_max_segments(request_q, 256);
3070 
3071  /* Allocate a temp buffer to hold the passed in user space command */
3072  payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3073  if (!payload_kbuf) {
3074  rc = -ENOMEM;
3075  goto out;
3076  }
3077 
3078  /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3079  sg_copy_to_buffer(job->request_payload.sg_list,
3080  job->request_payload.sg_cnt, payload_kbuf,
3081  job->request_payload.payload_len);
3082 
3083  /* Invoke IOCMD handler - to handle all the vendor command requests */
3084  rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
3085  job->request_payload.payload_len);
3086  if (rc != BFA_STATUS_OK)
3087  goto error;
3088 
3089  /* Copy the response data to the job->reply_payload sg_list */
3090  sg_copy_from_buffer(job->reply_payload.sg_list,
3091  job->reply_payload.sg_cnt,
3092  payload_kbuf,
3093  job->reply_payload.payload_len);
3094 
3095  /* free the command buffer */
3096  kfree(payload_kbuf);
3097 
3098  /* Fill the BSG job reply data */
3099  job->reply_len = job->reply_payload.payload_len;
3100  job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
3101  job->reply->result = rc;
3102 
3103  job->job_done(job);
3104  return rc;
3105 error:
3106  /* free the command buffer */
3107  kfree(payload_kbuf);
3108 out:
3109  job->reply->result = rc;
3110  job->reply_len = sizeof(uint32_t);
3111  job->reply->reply_payload_rcv_len = 0;
3112  return rc;
3113 }
3114 
3115 /* FC passthru call backs */
3116 u64
3118 {
3119  struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3120  struct bfa_sge_s *sge;
3121  u64 addr;
3122 
3123  sge = drv_fcxp->req_sge + sgeid;
3124  addr = (u64)(size_t) sge->sg_addr;
3125  return addr;
3126 }
3127 
3128 u32
3130 {
3131  struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3132  struct bfa_sge_s *sge;
3133 
3134  sge = drv_fcxp->req_sge + sgeid;
3135  return sge->sg_len;
3136 }
3137 
3138 u64
3140 {
3141  struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3142  struct bfa_sge_s *sge;
3143  u64 addr;
3144 
3145  sge = drv_fcxp->rsp_sge + sgeid;
3146  addr = (u64)(size_t) sge->sg_addr;
3147  return addr;
3148 }
3149 
3150 u32
3152 {
3153  struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3154  struct bfa_sge_s *sge;
3155 
3156  sge = drv_fcxp->rsp_sge + sgeid;
3157  return sge->sg_len;
3158 }
3159 
3160 void
3161 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
3162  bfa_status_t req_status, u32 rsp_len, u32 resid_len,
3163  struct fchs_s *rsp_fchs)
3164 {
3165  struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3166 
3167  drv_fcxp->req_status = req_status;
3168  drv_fcxp->rsp_len = rsp_len;
3169 
3170  /* bfa_fcxp will be automatically freed by BFA */
3171  drv_fcxp->bfa_fcxp = NULL;
3172  complete(&drv_fcxp->comp);
3173 }
3174 
3175 struct bfad_buf_info *
3176 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3177  uint32_t payload_len, uint32_t *num_sgles)
3178 {
3179  struct bfad_buf_info *buf_base, *buf_info;
3180  struct bfa_sge_s *sg_table;
3181  int sge_num = 1;
3182 
3183  buf_base = kzalloc((sizeof(struct bfad_buf_info) +
3184  sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
3185  if (!buf_base)
3186  return NULL;
3187 
3188  sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
3189  (sizeof(struct bfad_buf_info) * sge_num));
3190 
3191  /* Allocate dma coherent memory */
3192  buf_info = buf_base;
3193  buf_info->size = payload_len;
3194  buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
3195  &buf_info->phys, GFP_KERNEL);
3196  if (!buf_info->virt)
3197  goto out_free_mem;
3198 
3199  /* copy the linear bsg buffer to buf_info */
3200  memset(buf_info->virt, 0, buf_info->size);
3201  memcpy(buf_info->virt, payload_kbuf, buf_info->size);
3202 
3203  /*
3204  * Setup SG table
3205  */
3206  sg_table->sg_len = buf_info->size;
3207  sg_table->sg_addr = (void *)(size_t) buf_info->phys;
3208 
3209  *num_sgles = sge_num;
3210 
3211  return buf_base;
3212 
3213 out_free_mem:
3214  kfree(buf_base);
3215  return NULL;
3216 }
3217 
3218 void
3220  uint32_t num_sgles)
3221 {
3222  int i;
3223  struct bfad_buf_info *buf_info = buf_base;
3224 
3225  if (buf_base) {
3226  for (i = 0; i < num_sgles; buf_info++, i++) {
3227  if (buf_info->virt != NULL)
3228  dma_free_coherent(&bfad->pcidev->dev,
3229  buf_info->size, buf_info->virt,
3230  buf_info->phys);
3231  }
3232  kfree(buf_base);
3233  }
3234 }
3235 
3236 int
3237 bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
3238  bfa_bsg_fcpt_t *bsg_fcpt)
3239 {
3240  struct bfa_fcxp_s *hal_fcxp;
3241  struct bfad_s *bfad = drv_fcxp->port->bfad;
3242  unsigned long flags;
3243  uint8_t lp_tag;
3244 
3245  spin_lock_irqsave(&bfad->bfad_lock, flags);
3246 
3247  /* Allocate bfa_fcxp structure */
3248  hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
3249  drv_fcxp->num_req_sgles,
3250  drv_fcxp->num_rsp_sgles,
3255  if (!hal_fcxp) {
3256  bfa_trc(bfad, 0);
3257  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3258  return BFA_STATUS_ENOMEM;
3259  }
3260 
3261  drv_fcxp->bfa_fcxp = hal_fcxp;
3262 
3263  lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
3264 
3265  bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
3266  bsg_fcpt->cts, bsg_fcpt->cos,
3267  job->request_payload.payload_len,
3268  &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
3269  job->reply_payload.payload_len, bsg_fcpt->tsecs);
3270 
3271  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3272 
3273  return BFA_STATUS_OK;
3274 }
3275 
3276 int
3278 {
3279  struct bfa_bsg_data *bsg_data;
3280  struct bfad_im_port_s *im_port =
3281  (struct bfad_im_port_s *) job->shost->hostdata[0];
3282  struct bfad_s *bfad = im_port->bfad;
3283  bfa_bsg_fcpt_t *bsg_fcpt;
3284  struct bfad_fcxp *drv_fcxp;
3285  struct bfa_fcs_lport_s *fcs_port;
3286  struct bfa_fcs_rport_s *fcs_rport;
3287  uint32_t command_type = job->request->msgcode;
3288  unsigned long flags;
3289  struct bfad_buf_info *rsp_buf_info;
3290  void *req_kbuf = NULL, *rsp_kbuf = NULL;
3291  int rc = -EINVAL;
3292 
3293  job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
3294  job->reply->reply_payload_rcv_len = 0;
3295 
3296  /* Get the payload passed in from userspace */
3297  bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
3298  sizeof(struct fc_bsg_request));
3299  if (bsg_data == NULL)
3300  goto out;
3301 
3302  /*
3303  * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3304  * buffer of size bsg_data->payload_len
3305  */
3306  bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3307  if (!bsg_fcpt) {
3308  rc = -ENOMEM;
3309  goto out;
3310  }
3311 
3312  if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
3313  bsg_data->payload_len)) {
3314  kfree(bsg_fcpt);
3315  rc = -EIO;
3316  goto out;
3317  }
3318 
3319  drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
3320  if (drv_fcxp == NULL) {
3321  kfree(bsg_fcpt);
3322  rc = -ENOMEM;
3323  goto out;
3324  }
3325 
3326  spin_lock_irqsave(&bfad->bfad_lock, flags);
3327  fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
3328  bsg_fcpt->lpwwn);
3329  if (fcs_port == NULL) {
3330  bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
3331  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3332  goto out_free_mem;
3333  }
3334 
3335  /* Check if the port is online before sending FC Passthru cmd */
3336  if (!bfa_fcs_lport_is_online(fcs_port)) {
3337  bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
3338  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3339  goto out_free_mem;
3340  }
3341 
3342  drv_fcxp->port = fcs_port->bfad_port;
3343 
3344  if (drv_fcxp->port->bfad == 0)
3345  drv_fcxp->port->bfad = bfad;
3346 
3347  /* Fetch the bfa_rport - if nexus needed */
3348  if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
3349  command_type == FC_BSG_HST_CT) {
3350  /* BSG HST commands: no nexus needed */
3351  drv_fcxp->bfa_rport = NULL;
3352 
3353  } else if (command_type == FC_BSG_RPT_ELS ||
3354  command_type == FC_BSG_RPT_CT) {
3355  /* BSG RPT commands: nexus needed */
3356  fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
3357  bsg_fcpt->dpwwn);
3358  if (fcs_rport == NULL) {
3359  bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
3360  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3361  goto out_free_mem;
3362  }
3363 
3364  drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
3365 
3366  } else { /* Unknown BSG msgcode; return -EINVAL */
3367  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3368  goto out_free_mem;
3369  }
3370 
3371  spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3372 
3373  /* allocate memory for req / rsp buffers */
3374  req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3375  if (!req_kbuf) {
3376  printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
3377  bfad->pci_name);
3378  rc = -ENOMEM;
3379  goto out_free_mem;
3380  }
3381 
3382  rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
3383  if (!rsp_kbuf) {
3384  printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
3385  bfad->pci_name);
3386  rc = -ENOMEM;
3387  goto out_free_mem;
3388  }
3389 
3390  /* map req sg - copy the sg_list passed in to the linear buffer */
3391  sg_copy_to_buffer(job->request_payload.sg_list,
3392  job->request_payload.sg_cnt, req_kbuf,
3393  job->request_payload.payload_len);
3394 
3395  drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
3396  job->request_payload.payload_len,
3397  &drv_fcxp->num_req_sgles);
3398  if (!drv_fcxp->reqbuf_info) {
3399  printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
3400  bfad->pci_name);
3401  rc = -ENOMEM;
3402  goto out_free_mem;
3403  }
3404 
3405  drv_fcxp->req_sge = (struct bfa_sge_s *)
3406  (((uint8_t *)drv_fcxp->reqbuf_info) +
3407  (sizeof(struct bfad_buf_info) *
3408  drv_fcxp->num_req_sgles));
3409 
3410  /* map rsp sg */
3411  drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
3412  job->reply_payload.payload_len,
3413  &drv_fcxp->num_rsp_sgles);
3414  if (!drv_fcxp->rspbuf_info) {
3415  printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
3416  bfad->pci_name);
3417  rc = -ENOMEM;
3418  goto out_free_mem;
3419  }
3420 
3421  rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
3422  drv_fcxp->rsp_sge = (struct bfa_sge_s *)
3423  (((uint8_t *)drv_fcxp->rspbuf_info) +
3424  (sizeof(struct bfad_buf_info) *
3425  drv_fcxp->num_rsp_sgles));
3426 
3427  /* fcxp send */
3428  init_completion(&drv_fcxp->comp);
3429  rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
3430  if (rc == BFA_STATUS_OK) {
3431  wait_for_completion(&drv_fcxp->comp);
3432  bsg_fcpt->status = drv_fcxp->req_status;
3433  } else {
3434  bsg_fcpt->status = rc;
3435  goto out_free_mem;
3436  }
3437 
3438  /* fill the job->reply data */
3439  if (drv_fcxp->req_status == BFA_STATUS_OK) {
3440  job->reply_len = drv_fcxp->rsp_len;
3441  job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
3442  job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
3443  } else {
3444  job->reply->reply_payload_rcv_len =
3445  sizeof(struct fc_bsg_ctels_reply);
3446  job->reply_len = sizeof(uint32_t);
3447  job->reply->reply_data.ctels_reply.status =
3449  }
3450 
3451  /* Copy the response data to the reply_payload sg list */
3452  sg_copy_from_buffer(job->reply_payload.sg_list,
3453  job->reply_payload.sg_cnt,
3454  (uint8_t *)rsp_buf_info->virt,
3455  job->reply_payload.payload_len);
3456 
3457 out_free_mem:
3458  bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
3459  drv_fcxp->num_rsp_sgles);
3460  bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
3461  drv_fcxp->num_req_sgles);
3462  kfree(req_kbuf);
3463  kfree(rsp_kbuf);
3464 
3465  /* Need a copy to user op */
3466  if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
3467  bsg_data->payload_len))
3468  rc = -EIO;
3469 
3470  kfree(bsg_fcpt);
3471  kfree(drv_fcxp);
3472 out:
3473  job->reply->result = rc;
3474 
3475  if (rc == BFA_STATUS_OK)
3476  job->job_done(job);
3477 
3478  return rc;
3479 }
3480 
3481 int
3483 {
3484  uint32_t rc = BFA_STATUS_OK;
3485 
3486  switch (job->request->msgcode) {
3487  case FC_BSG_HST_VENDOR:
3488  /* Process BSG HST Vendor requests */
3489  rc = bfad_im_bsg_vendor_request(job);
3490  break;
3492  case FC_BSG_RPT_ELS:
3493  case FC_BSG_HST_CT:
3494  case FC_BSG_RPT_CT:
3495  /* Process BSG ELS/CT commands */
3496  rc = bfad_im_bsg_els_ct_request(job);
3497  break;
3498  default:
3499  job->reply->result = rc = -EINVAL;
3500  job->reply->reply_payload_rcv_len = 0;
3501  break;
3502  }
3503 
3504  return rc;
3505 }
3506 
3507 int
3509 {
3510  /* Don't complete the BSG job request - return -EAGAIN
3511  * to reset bsg job timeout : for ELS/CT pass thru we
3512  * already have timer to track the request.
3513  */
3514  return -EAGAIN;
3515 }