Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
scsi
bfa
bfa_ioc.h
Go to the documentation of this file.
1
/*
2
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3
* All rights reserved
4
* www.brocade.com
5
*
6
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
7
*
8
* This program is free software; you can redistribute it and/or modify it
9
* under the terms of the GNU General Public License (GPL) Version 2 as
10
* published by the Free Software Foundation
11
*
12
* This program is distributed in the hope that it will be useful, but
13
* WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* General Public License for more details.
16
*/
17
18
#ifndef __BFA_IOC_H__
19
#define __BFA_IOC_H__
20
21
#include "
bfad_drv.h
"
22
#include "
bfa_cs.h
"
23
#include "
bfi.h
"
24
25
#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
26
#define BFA_DBG_FWTRC_LEN \
27
(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
28
(sizeof(struct bfa_trc_mod_s) - \
29
BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
30
/*
31
* BFA timer declarations
32
*/
33
typedef
void
(*
bfa_timer_cbfn_t
)(
void
*);
34
35
/*
36
* BFA timer data structure
37
*/
38
struct
bfa_timer_s
{
39
struct
list_head
qe
;
40
bfa_timer_cbfn_t
timercb
;
41
void
*
arg
;
42
int
timeout
;
/* in millisecs */
43
};
44
45
/*
46
* Timer module structure
47
*/
48
struct
bfa_timer_mod_s
{
49
struct
list_head
timer_q
;
50
};
51
52
#define BFA_TIMER_FREQ 200
/* specified in millisecs */
53
54
void
bfa_timer_beat
(
struct
bfa_timer_mod_s
*
mod
);
55
void
bfa_timer_begin
(
struct
bfa_timer_mod_s
*
mod
,
struct
bfa_timer_s
*
timer
,
56
bfa_timer_cbfn_t
timercb,
void
*
arg
,
57
unsigned
int
timeout);
58
void
bfa_timer_stop
(
struct
bfa_timer_s
*
timer
);
59
60
/*
61
* Generic Scatter Gather Element used by driver
62
*/
63
struct
bfa_sge_s
{
64
u32
sg_len
;
65
void
*
sg_addr
;
66
};
67
68
#define bfa_sge_word_swap(__sge) do { \
69
((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]); \
70
((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]); \
71
((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]); \
72
} while (0)
73
74
#define bfa_swap_words(_x) ( \
75
((_x) << 32) | ((_x) >> 32))
76
77
#ifdef __BIG_ENDIAN
78
#define bfa_sge_to_be(_x)
79
#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x)
80
#define bfa_sgaddr_le(_x) bfa_swap_words(_x)
81
#else
82
#define bfa_sge_to_be(_x) bfa_sge_word_swap(_x)
83
#define bfa_sge_to_le(_x)
84
#define bfa_sgaddr_le(_x) (_x)
85
#endif
86
87
/*
88
* BFA memory resources
89
*/
90
struct
bfa_mem_dma_s
{
91
struct
list_head
qe
;
/* Queue of DMA elements */
92
u32
mem_len
;
/* Total Length in Bytes */
93
u8
*
kva
;
/* kernel virtual address */
94
u64
dma
;
/* dma address if DMA memory */
95
u8
*
kva_curp
;
/* kva allocation cursor */
96
u64
dma_curp
;
/* dma allocation cursor */
97
};
98
#define bfa_mem_dma_t struct bfa_mem_dma_s
99
100
struct
bfa_mem_kva_s
{
101
struct
list_head
qe
;
/* Queue of KVA elements */
102
u32
mem_len
;
/* Total Length in Bytes */
103
u8
*
kva
;
/* kernel virtual address */
104
u8
*
kva_curp
;
/* kva allocation cursor */
105
};
106
#define bfa_mem_kva_t struct bfa_mem_kva_s
107
108
struct
bfa_meminfo_s
{
109
struct
bfa_mem_dma_s
dma_info
;
110
struct
bfa_mem_kva_s
kva_info
;
111
};
112
113
/* BFA memory segment setup macros */
114
#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
115
((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
116
if (_seg_sz) \
117
list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
118
&(_meminfo)->dma_info.qe); \
119
} while (0)
120
121
#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
122
((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
123
if (_seg_sz) \
124
list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
125
&(_meminfo)->kva_info.qe); \
126
} while (0)
127
128
/* BFA dma memory segments iterator */
129
#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
130
#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i) \
131
for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr); \
132
_i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
133
134
#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp)
135
#define bfa_mem_dma_virt(_sptr) ((_sptr)->kva_curp)
136
#define bfa_mem_dma_phys(_sptr) ((_sptr)->dma_curp)
137
#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len)
138
139
/* Get the corresponding dma buf kva for a req - from the tag */
140
#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz) \
141
(((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
142
BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
143
144
/* Get the corresponding dma buf pa for a req - from the tag */
145
#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz) \
146
((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp + \
147
BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
148
149
/*
150
* PCI device information required by IOC
151
*/
152
struct
bfa_pcidev_s
{
153
int
pci_slot
;
154
u8
pci_func
;
155
u16
device_id
;
156
u16
ssid
;
157
void
__iomem
*
pci_bar_kva
;
158
};
159
160
/*
161
* Structure used to remember the DMA-able memory block's KVA and Physical
162
* Address
163
*/
164
struct
bfa_dma_s
{
165
void
*
kva
;
/* ! Kernel virtual address */
166
u64
pa
;
/* ! Physical address */
167
};
168
169
#define BFA_DMA_ALIGN_SZ 256
170
#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
171
172
/*
173
* smem size for Crossbow and Catapult
174
*/
175
#define BFI_SMEM_CB_SIZE 0x200000U
/* ! 2MB for crossbow */
176
#define BFI_SMEM_CT_SIZE 0x280000U
/* ! 2.5MB for catapult */
177
178
#define bfa_dma_be_addr_set(dma_addr, pa) \
179
__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
180
static
inline
void
181
__bfa_dma_be_addr_set(
union
bfi_addr_u
*
dma_addr
,
u64
pa)
182
{
183
dma_addr->
a32
.addr_lo =
cpu_to_be32
(pa);
184
dma_addr->
a32
.addr_hi =
cpu_to_be32
(pa >> 32);
185
}
186
187
#define bfa_alen_set(__alen, __len, __pa) \
188
__bfa_alen_set(__alen, __len, (u64)__pa)
189
190
static
inline
void
191
__bfa_alen_set(
struct
bfi_alen_s
*alen,
u32
len,
u64
pa)
192
{
193
alen->
al_len
=
cpu_to_be32
(len);
194
bfa_dma_be_addr_set
(alen->
al_addr
, pa);
195
}
196
197
struct
bfa_ioc_regs_s
{
198
void
__iomem
*
hfn_mbox_cmd
;
199
void
__iomem
*
hfn_mbox
;
200
void
__iomem
*
lpu_mbox_cmd
;
201
void
__iomem
*
lpu_mbox
;
202
void
__iomem
*
lpu_read_stat
;
203
void
__iomem
*
pss_ctl_reg
;
204
void
__iomem
*
pss_err_status_reg
;
205
void
__iomem
*
app_pll_fast_ctl_reg
;
206
void
__iomem
*
app_pll_slow_ctl_reg
;
207
void
__iomem
*
ioc_sem_reg
;
208
void
__iomem
*
ioc_usage_sem_reg
;
209
void
__iomem
*
ioc_init_sem_reg
;
210
void
__iomem
*
ioc_usage_reg
;
211
void
__iomem
*
host_page_num_fn
;
212
void
__iomem
*
heartbeat
;
213
void
__iomem
*
ioc_fwstate
;
214
void
__iomem
*
alt_ioc_fwstate
;
215
void
__iomem
*
ll_halt
;
216
void
__iomem
*
alt_ll_halt
;
217
void
__iomem
*
err_set
;
218
void
__iomem
*
ioc_fail_sync
;
219
void
__iomem
*
shirq_isr_next
;
220
void
__iomem
*
shirq_msk_next
;
221
void
__iomem
*
smem_page_start
;
222
u32
smem_pg0
;
223
};
224
225
#define bfa_mem_read(_raddr, _off) swab32(readl(((_raddr) + (_off))))
226
#define bfa_mem_write(_raddr, _off, _val) \
227
writel(swab32((_val)), ((_raddr) + (_off)))
228
/*
229
* IOC Mailbox structures
230
*/
231
struct
bfa_mbox_cmd_s
{
232
struct
list_head
qe
;
233
u32
msg
[
BFI_IOC_MSGSZ
];
234
};
235
236
/*
237
* IOC mailbox module
238
*/
239
typedef
void
(*
bfa_ioc_mbox_mcfunc_t
)(
void
*cbarg,
struct
bfi_mbmsg_s
*
m
);
240
struct
bfa_ioc_mbox_mod_s
{
241
struct
list_head
cmd_q
;
/* pending mbox queue */
242
int
nmclass
;
/* number of handlers */
243
struct
{
244
bfa_ioc_mbox_mcfunc_t
cbfn
;
/* message handlers */
245
void
*
cbarg
;
246
}
mbhdlr
[
BFI_MC_MAX
];
247
};
248
249
/*
250
* IOC callback function interfaces
251
*/
252
typedef
void
(*
bfa_ioc_enable_cbfn_t
)(
void
*bfa,
enum
bfa_status
status
);
253
typedef
void
(*
bfa_ioc_disable_cbfn_t
)(
void
*bfa);
254
typedef
void
(*
bfa_ioc_hbfail_cbfn_t
)(
void
*bfa);
255
typedef
void
(*
bfa_ioc_reset_cbfn_t
)(
void
*bfa);
256
struct
bfa_ioc_cbfn_s
{
257
bfa_ioc_enable_cbfn_t
enable_cbfn
;
258
bfa_ioc_disable_cbfn_t
disable_cbfn
;
259
bfa_ioc_hbfail_cbfn_t
hbfail_cbfn
;
260
bfa_ioc_reset_cbfn_t
reset_cbfn
;
261
};
262
263
/*
264
* IOC event notification mechanism.
265
*/
266
enum
bfa_ioc_event_e
{
267
BFA_IOC_E_ENABLED
= 1,
268
BFA_IOC_E_DISABLED
= 2,
269
BFA_IOC_E_FAILED
= 3,
270
};
271
272
typedef
void
(*
bfa_ioc_notify_cbfn_t
)(
void
*,
enum
bfa_ioc_event_e
);
273
274
struct
bfa_ioc_notify_s
{
275
struct
list_head
qe
;
276
bfa_ioc_notify_cbfn_t
cbfn
;
277
void
*
cbarg
;
278
};
279
280
/*
281
* Initialize a IOC event notification structure
282
*/
283
#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
284
(__notify)->cbfn = (__cbfn); \
285
(__notify)->cbarg = (__cbarg); \
286
} while (0)
287
288
struct
bfa_iocpf_s
{
289
bfa_fsm_t
fsm
;
290
struct
bfa_ioc_s
*
ioc
;
291
bfa_boolean_t
fw_mismatch_notified
;
292
bfa_boolean_t
auto_recover
;
293
u32
poll_time
;
294
};
295
296
struct
bfa_ioc_s
{
297
bfa_fsm_t
fsm
;
298
struct
bfa_s
*
bfa
;
299
struct
bfa_pcidev_s
pcidev
;
300
struct
bfa_timer_mod_s
*
timer_mod
;
301
struct
bfa_timer_s
ioc_timer
;
302
struct
bfa_timer_s
sem_timer
;
303
struct
bfa_timer_s
hb_timer
;
304
u32
hb_count
;
305
struct
list_head
notify_q
;
306
void
*
dbg_fwsave
;
307
int
dbg_fwsave_len
;
308
bfa_boolean_t
dbg_fwsave_once
;
309
enum
bfi_pcifn_class
clscode
;
310
struct
bfa_ioc_regs_s
ioc_regs
;
311
struct
bfa_trc_mod_s
*
trcmod
;
312
struct
bfa_ioc_drv_stats_s
stats
;
313
bfa_boolean_t
fcmode
;
314
bfa_boolean_t
pllinit
;
315
bfa_boolean_t
stats_busy
;
/* outstanding stats */
316
u8
port_id
;
317
struct
bfa_dma_s
attr_dma
;
318
struct
bfi_ioc_attr_s
*
attr
;
319
struct
bfa_ioc_cbfn_s
*
cbfn
;
320
struct
bfa_ioc_mbox_mod_s
mbox_mod
;
321
struct
bfa_ioc_hwif_s
*
ioc_hwif
;
322
struct
bfa_iocpf_s
iocpf
;
323
enum
bfi_asic_gen
asic_gen
;
324
enum
bfi_asic_mode
asic_mode
;
325
enum
bfi_port_mode
port0_mode
;
326
enum
bfi_port_mode
port1_mode
;
327
enum
bfa_mode_s
port_mode
;
328
u8
ad_cap_bm
;
/* adapter cap bit mask */
329
u8
port_mode_cfg
;
/* config port mode */
330
int
ioc_aen_seq
;
331
};
332
333
struct
bfa_ioc_hwif_s
{
334
bfa_status_t
(*
ioc_pll_init
) (
void
__iomem
*
rb
,
enum
bfi_asic_mode
m
);
335
bfa_boolean_t
(*
ioc_firmware_lock
) (
struct
bfa_ioc_s
*
ioc
);
336
void
(*
ioc_firmware_unlock
) (
struct
bfa_ioc_s
*
ioc
);
337
void
(*
ioc_reg_init
) (
struct
bfa_ioc_s
*
ioc
);
338
void
(*
ioc_map_port
) (
struct
bfa_ioc_s
*
ioc
);
339
void
(*
ioc_isr_mode_set
) (
struct
bfa_ioc_s
*
ioc
,
340
bfa_boolean_t
msix);
341
void
(*
ioc_notify_fail
) (
struct
bfa_ioc_s
*
ioc
);
342
void
(*
ioc_ownership_reset
) (
struct
bfa_ioc_s
*
ioc
);
343
bfa_boolean_t
(*
ioc_sync_start
) (
struct
bfa_ioc_s
*
ioc
);
344
void
(*
ioc_sync_join
) (
struct
bfa_ioc_s
*
ioc
);
345
void
(*
ioc_sync_leave
) (
struct
bfa_ioc_s
*
ioc
);
346
void
(*
ioc_sync_ack
) (
struct
bfa_ioc_s
*
ioc
);
347
bfa_boolean_t
(*
ioc_sync_complete
) (
struct
bfa_ioc_s
*
ioc
);
348
bfa_boolean_t
(*
ioc_lpu_read_stat
) (
struct
bfa_ioc_s
*
ioc
);
349
};
350
351
/*
352
* Queue element to wait for room in request queue. FIFO order is
353
* maintained when fullfilling requests.
354
*/
355
struct
bfa_reqq_wait_s
{
356
struct
list_head
qe
;
357
void
(*
qresume
) (
void
*
cbarg
);
358
void
*
cbarg
;
359
};
360
361
typedef
void
(*
bfa_cb_cbfn_t
) (
void
*cbarg,
bfa_boolean_t
complete
);
362
363
/*
364
* Generic BFA callback element.
365
*/
366
struct
bfa_cb_qe_s
{
367
struct
list_head
qe
;
368
bfa_cb_cbfn_t
cbfn
;
369
bfa_boolean_t
once
;
370
bfa_boolean_t
pre_rmv
;
/* set for stack based qe(s) */
371
bfa_status_t
fw_status
;
/* to access fw status in comp proc */
372
void
*
cbarg
;
373
};
374
375
/*
376
* IOCFC state machine definitions/declarations
377
*/
378
enum
iocfc_event
{
379
IOCFC_E_INIT
= 1,
/* IOCFC init request */
380
IOCFC_E_START
= 2,
/* IOCFC mod start request */
381
IOCFC_E_STOP
= 3,
/* IOCFC stop request */
382
IOCFC_E_ENABLE
= 4,
/* IOCFC enable request */
383
IOCFC_E_DISABLE
= 5,
/* IOCFC disable request */
384
IOCFC_E_IOC_ENABLED
= 6,
/* IOC enabled message */
385
IOCFC_E_IOC_DISABLED
= 7,
/* IOC disabled message */
386
IOCFC_E_IOC_FAILED
= 8,
/* failure notice by IOC sm */
387
IOCFC_E_DCONF_DONE
= 9,
/* dconf read/write done */
388
IOCFC_E_CFG_DONE
= 10,
/* IOCFC config complete */
389
};
390
391
/*
392
* ASIC block configurtion related
393
*/
394
395
typedef
void
(*
bfa_ablk_cbfn_t
)(
void
*,
enum
bfa_status
);
396
397
struct
bfa_ablk_s
{
398
struct
bfa_ioc_s
*
ioc
;
399
struct
bfa_ablk_cfg_s
*
cfg
;
400
u16
*
pcifn
;
401
struct
bfa_dma_s
dma_addr
;
402
bfa_boolean_t
busy
;
403
struct
bfa_mbox_cmd_s
mb
;
404
bfa_ablk_cbfn_t
cbfn
;
405
void
*
cbarg
;
406
struct
bfa_ioc_notify_s
ioc_notify
;
407
struct
bfa_mem_dma_s
ablk_dma
;
408
};
409
#define BFA_MEM_ABLK_DMA(__bfa) (&((__bfa)->modules.ablk.ablk_dma))
410
411
/*
412
* SFP module specific
413
*/
414
typedef
void
(*
bfa_cb_sfp_t
) (
void
*cbarg,
bfa_status_t
status
);
415
416
struct
bfa_sfp_s
{
417
void
*
dev
;
418
struct
bfa_ioc_s
*
ioc
;
419
struct
bfa_trc_mod_s
*
trcmod
;
420
struct
sfp_mem_s
*
sfpmem
;
421
bfa_cb_sfp_t
cbfn
;
422
void
*
cbarg
;
423
enum
bfi_sfp_mem_e
memtype
;
/* mem access type */
424
u32
status
;
425
struct
bfa_mbox_cmd_s
mbcmd
;
426
u8
*
dbuf_kva
;
/* dma buf virtual address */
427
u64
dbuf_pa
;
/* dma buf physical address */
428
struct
bfa_ioc_notify_s
ioc_notify
;
429
enum
bfa_defs_sfp_media_e
*
media
;
430
enum
bfa_port_speed
portspeed
;
431
bfa_cb_sfp_t
state_query_cbfn
;
432
void
*
state_query_cbarg
;
433
u8
lock
;
434
u8
data_valid
;
/* data in dbuf is valid */
435
u8
state
;
/* sfp state */
436
u8
state_query_lock
;
437
struct
bfa_mem_dma_s
sfp_dma
;
438
u8
is_elb
;
/* eloopback */
439
};
440
441
#define BFA_SFP_MOD(__bfa) (&(__bfa)->modules.sfp)
442
#define BFA_MEM_SFP_DMA(__bfa) (&(BFA_SFP_MOD(__bfa)->sfp_dma))
443
444
u32
bfa_sfp_meminfo
(
void
);
445
446
void
bfa_sfp_attach
(
struct
bfa_sfp_s
*sfp,
struct
bfa_ioc_s
*
ioc
,
447
void
*
dev
,
struct
bfa_trc_mod_s
*trcmod);
448
449
void
bfa_sfp_memclaim
(
struct
bfa_sfp_s
*
diag
,
u8
*dm_kva,
u64
dm_pa);
450
void
bfa_sfp_intr
(
void
*bfaarg,
struct
bfi_mbmsg_s
*
msg
);
451
452
bfa_status_t
bfa_sfp_show
(
struct
bfa_sfp_s
*sfp,
struct
sfp_mem_s
*sfpmem,
453
bfa_cb_sfp_t
cbfn,
void
*cbarg);
454
455
bfa_status_t
bfa_sfp_media
(
struct
bfa_sfp_s
*sfp,
456
enum
bfa_defs_sfp_media_e
*media,
457
bfa_cb_sfp_t
cbfn,
void
*cbarg);
458
459
bfa_status_t
bfa_sfp_speed
(
struct
bfa_sfp_s
*sfp,
460
enum
bfa_port_speed
portspeed,
461
bfa_cb_sfp_t
cbfn,
void
*cbarg);
462
463
/*
464
* Flash module specific
465
*/
466
typedef
void
(*
bfa_cb_flash_t
) (
void
*cbarg,
bfa_status_t
status
);
467
468
struct
bfa_flash_s
{
469
struct
bfa_ioc_s
*
ioc
;
/* back pointer to ioc */
470
struct
bfa_trc_mod_s
*
trcmod
;
471
u32
type
;
/* partition type */
472
u8
instance
;
/* partition instance */
473
u8
rsv
[3];
474
u32
op_busy
;
/* operation busy flag */
475
u32
residue
;
/* residual length */
476
u32
offset
;
/* offset */
477
bfa_status_t
status
;
/* status */
478
u8
*
dbuf_kva
;
/* dma buf virtual address */
479
u64
dbuf_pa
;
/* dma buf physical address */
480
struct
bfa_reqq_wait_s
reqq_wait
;
/* to wait for room in reqq */
481
bfa_cb_flash_t
cbfn
;
/* user callback function */
482
void
*
cbarg
;
/* user callback arg */
483
u8
*
ubuf
;
/* user supplied buffer */
484
struct
bfa_cb_qe_s
hcb_qe
;
/* comp: BFA callback qelem */
485
u32
addr_off
;
/* partition address offset */
486
struct
bfa_mbox_cmd_s
mb
;
/* mailbox */
487
struct
bfa_ioc_notify_s
ioc_notify
;
/* ioc event notify */
488
struct
bfa_mem_dma_s
flash_dma
;
489
};
490
491
#define BFA_FLASH(__bfa) (&(__bfa)->modules.flash)
492
#define BFA_MEM_FLASH_DMA(__bfa) (&(BFA_FLASH(__bfa)->flash_dma))
493
494
bfa_status_t
bfa_flash_get_attr
(
struct
bfa_flash_s
*
flash
,
495
struct
bfa_flash_attr_s
*
attr
,
496
bfa_cb_flash_t
cbfn,
void
*cbarg);
497
bfa_status_t
bfa_flash_erase_part
(
struct
bfa_flash_s
*
flash
,
498
enum
bfa_flash_part_type
type
,
u8
instance,
499
bfa_cb_flash_t
cbfn,
void
*cbarg);
500
bfa_status_t
bfa_flash_update_part
(
struct
bfa_flash_s
*
flash
,
501
enum
bfa_flash_part_type
type
,
u8
instance,
502
void
*
buf
,
u32
len,
u32
offset
,
503
bfa_cb_flash_t
cbfn,
void
*cbarg);
504
bfa_status_t
bfa_flash_read_part
(
struct
bfa_flash_s
*
flash
,
505
enum
bfa_flash_part_type
type
,
u8
instance,
void
*
buf
,
506
u32
len,
u32
offset
,
bfa_cb_flash_t
cbfn,
void
*cbarg);
507
u32
bfa_flash_meminfo
(
bfa_boolean_t
mincfg);
508
void
bfa_flash_attach
(
struct
bfa_flash_s
*
flash
,
struct
bfa_ioc_s
*
ioc
,
509
void
*
dev
,
struct
bfa_trc_mod_s
*trcmod,
bfa_boolean_t
mincfg);
510
void
bfa_flash_memclaim
(
struct
bfa_flash_s
*
flash
,
511
u8
*dm_kva,
u64
dm_pa,
bfa_boolean_t
mincfg);
512
513
/*
514
* DIAG module specific
515
*/
516
517
typedef
void
(*
bfa_cb_diag_t
) (
void
*cbarg,
bfa_status_t
status
);
518
typedef
void
(*
bfa_cb_diag_beacon_t
) (
void
*
dev
,
bfa_boolean_t
beacon
,
519
bfa_boolean_t
link_e2e_beacon);
520
521
/*
522
* Firmware ping test results
523
*/
524
struct
bfa_diag_results_fwping
{
525
u32
data
;
/* store the corrupted data */
526
u32
status
;
527
u32
dmastatus
;
528
u8
rsvd
[4];
529
};
530
531
struct
bfa_diag_qtest_result_s
{
532
u32
status
;
533
u16
count
;
/* successful queue test count */
534
u8
queue
;
535
u8
rsvd
;
/* 64-bit align */
536
};
537
538
/*
539
* Firmware ping test results
540
*/
541
struct
bfa_diag_fwping_s
{
542
struct
bfa_diag_results_fwping
*
result
;
543
bfa_cb_diag_t
cbfn
;
544
void
*
cbarg
;
545
u32
data
;
546
u8
lock
;
547
u8
rsv
[3];
548
u32
status
;
549
u32
count
;
550
struct
bfa_mbox_cmd_s
mbcmd
;
551
u8
*
dbuf_kva
;
/* dma buf virtual address */
552
u64
dbuf_pa
;
/* dma buf physical address */
553
};
554
555
/*
556
* Temperature sensor query results
557
*/
558
struct
bfa_diag_results_tempsensor_s
{
559
u32
status
;
560
u16
temp
;
/* 10-bit A/D value */
561
u16
brd_temp
;
/* 9-bit board temp */
562
u8
ts_junc
;
/* show junction tempsensor */
563
u8
ts_brd
;
/* show board tempsensor */
564
u8
rsvd
[6];
/* keep 8 bytes alignment */
565
};
566
567
struct
bfa_diag_tsensor_s
{
568
bfa_cb_diag_t
cbfn
;
569
void
*
cbarg
;
570
struct
bfa_diag_results_tempsensor_s
*
temp
;
571
u8
lock
;
572
u8
rsv
[3];
573
u32
status
;
574
struct
bfa_mbox_cmd_s
mbcmd
;
575
};
576
577
struct
bfa_diag_sfpshow_s
{
578
struct
sfp_mem_s
*
sfpmem
;
579
bfa_cb_diag_t
cbfn
;
580
void
*
cbarg
;
581
u8
lock
;
582
u8
static_data
;
583
u8
rsv
[2];
584
u32
status
;
585
struct
bfa_mbox_cmd_s
mbcmd
;
586
u8
*
dbuf_kva
;
/* dma buf virtual address */
587
u64
dbuf_pa
;
/* dma buf physical address */
588
};
589
590
struct
bfa_diag_led_s
{
591
struct
bfa_mbox_cmd_s
mbcmd
;
592
bfa_boolean_t
lock
;
/* 1: ledtest is operating */
593
};
594
595
struct
bfa_diag_beacon_s
{
596
struct
bfa_mbox_cmd_s
mbcmd
;
597
bfa_boolean_t
state
;
/* port beacon state */
598
bfa_boolean_t
link_e2e
;
/* link beacon state */
599
};
600
601
struct
bfa_diag_s
{
602
void
*
dev
;
603
struct
bfa_ioc_s
*
ioc
;
604
struct
bfa_trc_mod_s
*
trcmod
;
605
struct
bfa_diag_fwping_s
fwping
;
606
struct
bfa_diag_tsensor_s
tsensor
;
607
struct
bfa_diag_sfpshow_s
sfpshow
;
608
struct
bfa_diag_led_s
ledtest
;
609
struct
bfa_diag_beacon_s
beacon
;
610
void
*
result
;
611
struct
bfa_timer_s
timer
;
612
bfa_cb_diag_beacon_t
cbfn_beacon
;
613
bfa_cb_diag_t
cbfn
;
614
void
*
cbarg
;
615
u8
block
;
616
u8
timer_active
;
617
u8
rsvd
[2];
618
u32
status
;
619
struct
bfa_ioc_notify_s
ioc_notify
;
620
struct
bfa_mem_dma_s
diag_dma
;
621
};
622
623
#define BFA_DIAG_MOD(__bfa) (&(__bfa)->modules.diag_mod)
624
#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
625
626
u32
bfa_diag_meminfo
(
void
);
627
void
bfa_diag_memclaim
(
struct
bfa_diag_s
*
diag
,
u8
*dm_kva,
u64
dm_pa);
628
void
bfa_diag_attach
(
struct
bfa_diag_s
*
diag
,
struct
bfa_ioc_s
*
ioc
,
void
*
dev
,
629
bfa_cb_diag_beacon_t
cbfn_beacon,
630
struct
bfa_trc_mod_s
*trcmod);
631
bfa_status_t
bfa_diag_reg_read
(
struct
bfa_diag_s
*
diag
,
u32
offset
,
632
u32
len,
u32
*
buf
,
u32
force
);
633
bfa_status_t
bfa_diag_reg_write
(
struct
bfa_diag_s
*
diag
,
u32
offset
,
634
u32
len,
u32
value
,
u32
force
);
635
bfa_status_t
bfa_diag_tsensor_query
(
struct
bfa_diag_s
*
diag
,
636
struct
bfa_diag_results_tempsensor_s
*
result
,
637
bfa_cb_diag_t
cbfn,
void
*cbarg);
638
bfa_status_t
bfa_diag_fwping
(
struct
bfa_diag_s
*
diag
,
u32
cnt
,
639
u32
pattern
,
struct
bfa_diag_results_fwping
*
result
,
640
bfa_cb_diag_t
cbfn,
void
*cbarg);
641
bfa_status_t
bfa_diag_sfpshow
(
struct
bfa_diag_s
*
diag
,
642
struct
sfp_mem_s
*sfpmem,
u8
static_data,
643
bfa_cb_diag_t
cbfn,
void
*cbarg);
644
bfa_status_t
bfa_diag_memtest
(
struct
bfa_diag_s
*
diag
,
645
struct
bfa_diag_memtest_s
*memtest,
u32
pattern
,
646
struct
bfa_diag_memtest_result
*
result
,
647
bfa_cb_diag_t
cbfn,
void
*cbarg);
648
bfa_status_t
bfa_diag_ledtest
(
struct
bfa_diag_s
*
diag
,
649
struct
bfa_diag_ledtest_s
*ledtest);
650
bfa_status_t
bfa_diag_beacon_port
(
struct
bfa_diag_s
*
diag
,
651
bfa_boolean_t
beacon
,
bfa_boolean_t
link_e2e_beacon,
652
u32
sec
);
653
654
/*
655
* PHY module specific
656
*/
657
typedef
void
(*
bfa_cb_phy_t
) (
void
*cbarg,
bfa_status_t
status
);
658
659
struct
bfa_phy_s
{
660
struct
bfa_ioc_s
*
ioc
;
/* back pointer to ioc */
661
struct
bfa_trc_mod_s
*
trcmod
;
/* trace module */
662
u8
instance
;
/* port instance */
663
u8
op_busy
;
/* operation busy flag */
664
u8
rsv
[2];
665
u32
residue
;
/* residual length */
666
u32
offset
;
/* offset */
667
bfa_status_t
status
;
/* status */
668
u8
*
dbuf_kva
;
/* dma buf virtual address */
669
u64
dbuf_pa
;
/* dma buf physical address */
670
struct
bfa_reqq_wait_s
reqq_wait
;
/* to wait for room in reqq */
671
bfa_cb_phy_t
cbfn
;
/* user callback function */
672
void
*
cbarg
;
/* user callback arg */
673
u8
*
ubuf
;
/* user supplied buffer */
674
struct
bfa_cb_qe_s
hcb_qe
;
/* comp: BFA callback qelem */
675
u32
addr_off
;
/* phy address offset */
676
struct
bfa_mbox_cmd_s
mb
;
/* mailbox */
677
struct
bfa_ioc_notify_s
ioc_notify
;
/* ioc event notify */
678
struct
bfa_mem_dma_s
phy_dma
;
679
};
680
#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
681
#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
682
683
bfa_boolean_t
bfa_phy_busy
(
struct
bfa_ioc_s
*
ioc
);
684
bfa_status_t
bfa_phy_get_attr
(
struct
bfa_phy_s
*
phy
,
u8
instance,
685
struct
bfa_phy_attr_s
*
attr
,
686
bfa_cb_phy_t
cbfn,
void
*cbarg);
687
bfa_status_t
bfa_phy_get_stats
(
struct
bfa_phy_s
*
phy
,
u8
instance,
688
struct
bfa_phy_stats_s
*
stats
,
689
bfa_cb_phy_t
cbfn,
void
*cbarg);
690
bfa_status_t
bfa_phy_update
(
struct
bfa_phy_s
*
phy
,
u8
instance,
691
void
*
buf
,
u32
len,
u32
offset
,
692
bfa_cb_phy_t
cbfn,
void
*cbarg);
693
bfa_status_t
bfa_phy_read
(
struct
bfa_phy_s
*
phy
,
u8
instance,
694
void
*
buf
,
u32
len,
u32
offset
,
695
bfa_cb_phy_t
cbfn,
void
*cbarg);
696
697
u32
bfa_phy_meminfo
(
bfa_boolean_t
mincfg);
698
void
bfa_phy_attach
(
struct
bfa_phy_s
*
phy
,
struct
bfa_ioc_s
*
ioc
,
699
void
*
dev
,
struct
bfa_trc_mod_s
*trcmod,
bfa_boolean_t
mincfg);
700
void
bfa_phy_memclaim
(
struct
bfa_phy_s
*
phy
,
701
u8
*dm_kva,
u64
dm_pa,
bfa_boolean_t
mincfg);
702
void
bfa_phy_intr
(
void
*phyarg,
struct
bfi_mbmsg_s
*
msg
);
703
704
/*
705
* FRU module specific
706
*/
707
typedef
void
(*
bfa_cb_fru_t
) (
void
*cbarg,
bfa_status_t
status
);
708
709
struct
bfa_fru_s
{
710
struct
bfa_ioc_s
*
ioc
;
/* back pointer to ioc */
711
struct
bfa_trc_mod_s
*
trcmod
;
/* trace module */
712
u8
op_busy
;
/* operation busy flag */
713
u8
rsv
[3];
714
u32
residue
;
/* residual length */
715
u32
offset
;
/* offset */
716
bfa_status_t
status
;
/* status */
717
u8
*
dbuf_kva
;
/* dma buf virtual address */
718
u64
dbuf_pa
;
/* dma buf physical address */
719
struct
bfa_reqq_wait_s
reqq_wait
;
/* to wait for room in reqq */
720
bfa_cb_fru_t
cbfn
;
/* user callback function */
721
void
*
cbarg
;
/* user callback arg */
722
u8
*
ubuf
;
/* user supplied buffer */
723
struct
bfa_cb_qe_s
hcb_qe
;
/* comp: BFA callback qelem */
724
u32
addr_off
;
/* fru address offset */
725
struct
bfa_mbox_cmd_s
mb
;
/* mailbox */
726
struct
bfa_ioc_notify_s
ioc_notify
;
/* ioc event notify */
727
struct
bfa_mem_dma_s
fru_dma
;
728
};
729
730
#define BFA_FRU(__bfa) (&(__bfa)->modules.fru)
731
#define BFA_MEM_FRU_DMA(__bfa) (&(BFA_FRU(__bfa)->fru_dma))
732
733
bfa_status_t
bfa_fruvpd_update
(
struct
bfa_fru_s
*fru,
734
void
*
buf
,
u32
len,
u32
offset
,
735
bfa_cb_fru_t
cbfn,
void
*cbarg);
736
bfa_status_t
bfa_fruvpd_read
(
struct
bfa_fru_s
*fru,
737
void
*
buf
,
u32
len,
u32
offset
,
738
bfa_cb_fru_t
cbfn,
void
*cbarg);
739
bfa_status_t
bfa_fruvpd_get_max_size
(
struct
bfa_fru_s
*fru,
u32
*
max_size
);
740
bfa_status_t
bfa_tfru_write
(
struct
bfa_fru_s
*fru,
741
void
*
buf
,
u32
len,
u32
offset
,
742
bfa_cb_fru_t
cbfn,
void
*cbarg);
743
bfa_status_t
bfa_tfru_read
(
struct
bfa_fru_s
*fru,
744
void
*
buf
,
u32
len,
u32
offset
,
745
bfa_cb_fru_t
cbfn,
void
*cbarg);
746
u32
bfa_fru_meminfo
(
bfa_boolean_t
mincfg);
747
void
bfa_fru_attach
(
struct
bfa_fru_s
*fru,
struct
bfa_ioc_s
*
ioc
,
748
void
*
dev
,
struct
bfa_trc_mod_s
*trcmod,
bfa_boolean_t
mincfg);
749
void
bfa_fru_memclaim
(
struct
bfa_fru_s
*fru,
750
u8
*dm_kva,
u64
dm_pa,
bfa_boolean_t
mincfg);
751
void
bfa_fru_intr
(
void
*fruarg,
struct
bfi_mbmsg_s
*
msg
);
752
753
/*
754
* Driver Config( dconf) specific
755
*/
756
#define BFI_DCONF_SIGNATURE 0xabcdabcd
757
#define BFI_DCONF_VERSION 1
758
759
#pragma pack(1)
760
struct
bfa_dconf_hdr_s
{
761
u32
signature
;
762
u32
version
;
763
};
764
765
struct
bfa_dconf_s
{
766
struct
bfa_dconf_hdr_s
hdr
;
767
struct
bfa_lunmask_cfg_s
lun_mask
;
768
struct
bfa_throttle_cfg_s
throttle_cfg
;
769
};
770
#pragma pack()
771
772
struct
bfa_dconf_mod_s
{
773
bfa_sm_t
sm
;
774
u8
instance
;
775
bfa_boolean_t
read_data_valid
;
776
bfa_boolean_t
min_cfg
;
777
struct
bfa_timer_s
timer
;
778
struct
bfa_s
*
bfa
;
779
void
*
bfad
;
780
void
*
trcmod
;
781
struct
bfa_dconf_s
*
dconf
;
782
struct
bfa_mem_kva_s
kva_seg
;
783
};
784
785
#define BFA_DCONF_MOD(__bfa) \
786
(&(__bfa)->modules.dconf_mod)
787
#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg))
788
#define bfa_dconf_read_data_valid(__bfa) \
789
(BFA_DCONF_MOD(__bfa)->read_data_valid)
790
#define BFA_DCONF_UPDATE_TOV 5000
/* memtest timeout in msec */
791
#define bfa_dconf_get_min_cfg(__bfa) \
792
(BFA_DCONF_MOD(__bfa)->min_cfg)
793
794
void
bfa_dconf_modinit
(
struct
bfa_s
*bfa);
795
void
bfa_dconf_modexit
(
struct
bfa_s
*bfa);
796
bfa_status_t
bfa_dconf_update
(
struct
bfa_s
*bfa);
797
798
/*
799
* IOC specfic macros
800
*/
801
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
802
#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
803
#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
804
#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
805
#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
806
#define bfa_ioc_is_cna(__ioc) \
807
((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) || \
808
(bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
809
#define bfa_ioc_fetch_stats(__ioc, __stats) \
810
(((__stats)->drv_stats) = (__ioc)->stats)
811
#define bfa_ioc_clr_stats(__ioc) \
812
memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
813
#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
814
#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
815
#define bfa_ioc_speed_sup(__ioc) \
816
((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS : \
817
BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop))
818
#define bfa_ioc_get_nports(__ioc) \
819
BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
820
821
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
822
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
823
#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
824
((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
825
? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
826
#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
827
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
828
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
829
830
/*
831
* IOC mailbox interface
832
*/
833
void
bfa_ioc_mbox_queue
(
struct
bfa_ioc_s
*
ioc
,
struct
bfa_mbox_cmd_s
*
cmd
);
834
void
bfa_ioc_mbox_register
(
struct
bfa_ioc_s
*
ioc
,
835
bfa_ioc_mbox_mcfunc_t
*mcfuncs);
836
void
bfa_ioc_mbox_isr
(
struct
bfa_ioc_s
*
ioc
);
837
void
bfa_ioc_mbox_send
(
struct
bfa_ioc_s
*
ioc
,
void
*ioc_msg,
int
len);
838
bfa_boolean_t
bfa_ioc_msgget
(
struct
bfa_ioc_s
*
ioc
,
void
*mbmsg);
839
void
bfa_ioc_mbox_regisr
(
struct
bfa_ioc_s
*
ioc
,
enum
bfi_mclass
mc
,
840
bfa_ioc_mbox_mcfunc_t
cbfn,
void
*cbarg);
841
842
/*
843
* IOC interfaces
844
*/
845
846
#define bfa_ioc_pll_init_asic(__ioc) \
847
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
848
(__ioc)->asic_mode))
849
850
bfa_status_t
bfa_ioc_pll_init
(
struct
bfa_ioc_s
*
ioc
);
851
bfa_status_t
bfa_ioc_cb_pll_init
(
void
__iomem
*
rb
,
enum
bfi_asic_mode
mode
);
852
bfa_status_t
bfa_ioc_ct_pll_init
(
void
__iomem
*
rb
,
enum
bfi_asic_mode
mode
);
853
bfa_status_t
bfa_ioc_ct2_pll_init
(
void
__iomem
*
rb
,
enum
bfi_asic_mode
mode
);
854
855
#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
856
if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
857
((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
858
} while (0)
859
#define bfa_ioc_ownership_reset(__ioc) \
860
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
861
#define bfa_ioc_get_fcmode(__ioc) ((__ioc)->fcmode)
862
#define bfa_ioc_lpu_read_stat(__ioc) do { \
863
if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
864
((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
865
} while (0)
866
867
void
bfa_ioc_set_cb_hwif
(
struct
bfa_ioc_s
*
ioc
);
868
void
bfa_ioc_set_ct_hwif
(
struct
bfa_ioc_s
*
ioc
);
869
void
bfa_ioc_set_ct2_hwif
(
struct
bfa_ioc_s
*
ioc
);
870
void
bfa_ioc_ct2_poweron
(
struct
bfa_ioc_s
*
ioc
);
871
872
void
bfa_ioc_attach
(
struct
bfa_ioc_s
*
ioc
,
void
*bfa,
873
struct
bfa_ioc_cbfn_s
*cbfn,
struct
bfa_timer_mod_s
*timer_mod);
874
void
bfa_ioc_auto_recover
(
bfa_boolean_t
auto_recover);
875
void
bfa_ioc_detach
(
struct
bfa_ioc_s
*
ioc
);
876
void
bfa_ioc_suspend
(
struct
bfa_ioc_s
*
ioc
);
877
void
bfa_ioc_pci_init
(
struct
bfa_ioc_s
*
ioc
,
struct
bfa_pcidev_s
*
pcidev
,
878
enum
bfi_pcifn_class
clscode);
879
void
bfa_ioc_mem_claim
(
struct
bfa_ioc_s
*
ioc
,
u8
*dm_kva,
u64
dm_pa);
880
void
bfa_ioc_enable
(
struct
bfa_ioc_s
*
ioc
);
881
void
bfa_ioc_disable
(
struct
bfa_ioc_s
*
ioc
);
882
bfa_boolean_t
bfa_ioc_intx_claim
(
struct
bfa_ioc_s
*
ioc
);
883
884
void
bfa_ioc_boot
(
struct
bfa_ioc_s
*
ioc
,
u32
boot_type,
885
u32
boot_env);
886
void
bfa_ioc_isr
(
struct
bfa_ioc_s
*
ioc
,
struct
bfi_mbmsg_s
*
msg
);
887
void
bfa_ioc_error_isr
(
struct
bfa_ioc_s
*
ioc
);
888
bfa_boolean_t
bfa_ioc_is_operational
(
struct
bfa_ioc_s
*
ioc
);
889
bfa_boolean_t
bfa_ioc_is_initialized
(
struct
bfa_ioc_s
*
ioc
);
890
bfa_boolean_t
bfa_ioc_is_disabled
(
struct
bfa_ioc_s
*
ioc
);
891
bfa_boolean_t
bfa_ioc_is_acq_addr
(
struct
bfa_ioc_s
*
ioc
);
892
bfa_boolean_t
bfa_ioc_fw_mismatch
(
struct
bfa_ioc_s
*
ioc
);
893
bfa_boolean_t
bfa_ioc_adapter_is_disabled
(
struct
bfa_ioc_s
*
ioc
);
894
void
bfa_ioc_reset_fwstate
(
struct
bfa_ioc_s
*
ioc
);
895
enum
bfa_ioc_type_e
bfa_ioc_get_type
(
struct
bfa_ioc_s
*
ioc
);
896
void
bfa_ioc_get_adapter_serial_num
(
struct
bfa_ioc_s
*
ioc
,
char
*
serial_num
);
897
void
bfa_ioc_get_adapter_fw_ver
(
struct
bfa_ioc_s
*
ioc
,
char
*fw_ver);
898
void
bfa_ioc_get_adapter_optrom_ver
(
struct
bfa_ioc_s
*
ioc
,
char
*optrom_ver);
899
void
bfa_ioc_get_adapter_model
(
struct
bfa_ioc_s
*
ioc
,
char
*model);
900
void
bfa_ioc_get_adapter_manufacturer
(
struct
bfa_ioc_s
*
ioc
,
901
char
*
manufacturer
);
902
void
bfa_ioc_get_pci_chip_rev
(
struct
bfa_ioc_s
*
ioc
,
char
*chip_rev);
903
enum
bfa_ioc_state
bfa_ioc_get_state
(
struct
bfa_ioc_s
*
ioc
);
904
905
void
bfa_ioc_get_attr
(
struct
bfa_ioc_s
*
ioc
,
struct
bfa_ioc_attr_s
*ioc_attr);
906
void
bfa_ioc_get_adapter_attr
(
struct
bfa_ioc_s
*
ioc
,
907
struct
bfa_adapter_attr_s
*ad_attr);
908
void
bfa_ioc_debug_memclaim
(
struct
bfa_ioc_s
*
ioc
,
void
*dbg_fwsave);
909
bfa_status_t
bfa_ioc_debug_fwsave
(
struct
bfa_ioc_s
*
ioc
,
void
*trcdata,
910
int
*trclen);
911
bfa_status_t
bfa_ioc_debug_fwtrc
(
struct
bfa_ioc_s
*
ioc
,
void
*trcdata,
912
int
*trclen);
913
bfa_status_t
bfa_ioc_debug_fwcore
(
struct
bfa_ioc_s
*
ioc
,
void
*
buf
,
914
u32
*
offset
,
int
*
buflen
);
915
bfa_boolean_t
bfa_ioc_sem_get
(
void
__iomem
*sem_reg);
916
void
bfa_ioc_fwver_get
(
struct
bfa_ioc_s
*
ioc
,
917
struct
bfi_ioc_image_hdr_s
*fwhdr);
918
bfa_boolean_t
bfa_ioc_fwver_cmp
(
struct
bfa_ioc_s
*
ioc
,
919
struct
bfi_ioc_image_hdr_s
*fwhdr);
920
void
bfa_ioc_aen_post
(
struct
bfa_ioc_s
*
ioc
,
enum
bfa_ioc_aen_event
event
);
921
bfa_status_t
bfa_ioc_fw_stats_get
(
struct
bfa_ioc_s
*
ioc
,
void
*
stats
);
922
bfa_status_t
bfa_ioc_fw_stats_clear
(
struct
bfa_ioc_s
*
ioc
);
923
void
bfa_ioc_debug_save_ftrc
(
struct
bfa_ioc_s
*
ioc
);
924
925
/*
926
* asic block configuration related APIs
927
*/
928
u32
bfa_ablk_meminfo
(
void
);
929
void
bfa_ablk_memclaim
(
struct
bfa_ablk_s
*ablk,
u8
*dma_kva,
u64
dma_pa);
930
void
bfa_ablk_attach
(
struct
bfa_ablk_s
*ablk,
struct
bfa_ioc_s
*
ioc
);
931
bfa_status_t
bfa_ablk_query
(
struct
bfa_ablk_s
*ablk,
932
struct
bfa_ablk_cfg_s
*ablk_cfg,
933
bfa_ablk_cbfn_t
cbfn,
void
*cbarg);
934
bfa_status_t
bfa_ablk_adapter_config
(
struct
bfa_ablk_s
*ablk,
935
enum
bfa_mode_s
mode
,
int
max_pf,
int
max_vf,
936
bfa_ablk_cbfn_t
cbfn,
void
*cbarg);
937
bfa_status_t
bfa_ablk_port_config
(
struct
bfa_ablk_s
*ablk,
int
port
,
938
enum
bfa_mode_s
mode
,
int
max_pf,
int
max_vf,
939
bfa_ablk_cbfn_t
cbfn,
void
*cbarg);
940
bfa_status_t
bfa_ablk_pf_create
(
struct
bfa_ablk_s
*ablk,
u16
*pcifn,
941
u8
port
,
enum
bfi_pcifn_class
personality
,
942
u16
bw_min,
u16
bw_max,
bfa_ablk_cbfn_t
cbfn,
void
*cbarg);
943
bfa_status_t
bfa_ablk_pf_delete
(
struct
bfa_ablk_s
*ablk,
int
pcifn,
944
bfa_ablk_cbfn_t
cbfn,
void
*cbarg);
945
bfa_status_t
bfa_ablk_pf_update
(
struct
bfa_ablk_s
*ablk,
int
pcifn,
946
u16
bw_min,
u16
bw_max,
bfa_ablk_cbfn_t
cbfn,
void
*cbarg);
947
bfa_status_t
bfa_ablk_optrom_en
(
struct
bfa_ablk_s
*ablk,
948
bfa_ablk_cbfn_t
cbfn,
void
*cbarg);
949
bfa_status_t
bfa_ablk_optrom_dis
(
struct
bfa_ablk_s
*ablk,
950
bfa_ablk_cbfn_t
cbfn,
void
*cbarg);
951
952
/*
953
* bfa mfg wwn API functions
954
*/
955
mac_t
bfa_ioc_get_mac
(
struct
bfa_ioc_s
*
ioc
);
956
mac_t
bfa_ioc_get_mfg_mac
(
struct
bfa_ioc_s
*
ioc
);
957
958
/*
959
* F/W Image Size & Chunk
960
*/
961
extern
u32
bfi_image_cb_size
;
962
extern
u32
bfi_image_ct_size
;
963
extern
u32
bfi_image_ct2_size
;
964
extern
u32
*
bfi_image_cb
;
965
extern
u32
*
bfi_image_ct
;
966
extern
u32
*
bfi_image_ct2
;
967
968
static
inline
u32
*
969
bfi_image_cb_get_chunk(
u32
off)
970
{
971
return
(
u32
*)(
bfi_image_cb
+ off);
972
}
973
974
static
inline
u32
*
975
bfi_image_ct_get_chunk(
u32
off)
976
{
977
return
(
u32
*)(
bfi_image_ct
+ off);
978
}
979
980
static
inline
u32
*
981
bfi_image_ct2_get_chunk(
u32
off)
982
{
983
return
(
u32
*)(
bfi_image_ct2
+ off);
984
}
985
986
static
inline
u32
*
987
bfa_cb_image_get_chunk
(
enum
bfi_asic_gen
asic_gen,
u32
off)
988
{
989
switch
(asic_gen) {
990
case
BFI_ASIC_GEN_CB
:
991
return
bfi_image_cb_get_chunk(off);
992
break
;
993
case
BFI_ASIC_GEN_CT
:
994
return
bfi_image_ct_get_chunk(off);
995
break
;
996
case
BFI_ASIC_GEN_CT2
:
997
return
bfi_image_ct2_get_chunk(off);
998
break
;
999
default
:
1000
return
NULL
;
1001
}
1002
}
1003
1004
static
inline
u32
1005
bfa_cb_image_get_size
(
enum
bfi_asic_gen
asic_gen)
1006
{
1007
switch
(asic_gen) {
1008
case
BFI_ASIC_GEN_CB
:
1009
return
bfi_image_cb_size
;
1010
break
;
1011
case
BFI_ASIC_GEN_CT
:
1012
return
bfi_image_ct_size
;
1013
break
;
1014
case
BFI_ASIC_GEN_CT2
:
1015
return
bfi_image_ct2_size
;
1016
break
;
1017
default
:
1018
return
0;
1019
}
1020
}
1021
1022
/*
1023
* CNA TRCMOD declaration
1024
*/
1025
/*
1026
* !!! Only append to the enums defined here to avoid any versioning
1027
* !!! needed between trace utility and driver version
1028
*/
1029
enum
{
1030
BFA_TRC_CNA_PORT
= 1,
1031
BFA_TRC_CNA_IOC
= 2,
1032
BFA_TRC_CNA_IOC_CB
= 3,
1033
BFA_TRC_CNA_IOC_CT
= 4,
1034
};
1035
1036
#endif
/* __BFA_IOC_H__ */
Generated on Thu Jan 10 2013 13:59:44 for Linux Kernel by
1.8.2