40 #include <linux/pci.h>
44 #include <linux/module.h>
56 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
59 static void qib_7322_handle_hwerrors(
struct qib_devdata *,
char *,
size_t);
69 static u8 qib_7322_phys_portstate(
u64);
70 static u32 qib_7322_iblink_state(
u64);
79 static void ibsd_wr_allchans(
struct qib_pportdata *,
int,
unsigned,
unsigned);
80 static void serdes_7322_los_enable(
struct qib_pportdata *,
int);
84 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
92 #define IBSD(hw_pidx) (hw_pidx + 2)
95 static const unsigned rcv_int_timeout = 375;
96 static const unsigned rcv_int_count = 16;
97 static const unsigned sdma_idle_cnt = 64;
100 #define RXEQ_DISABLE_MSECS 2500
110 static ushort qib_chase = 1;
114 static ushort qib_long_atten = 10;
117 "attenuation cutoff (dB) for long copper cable setup");
119 static ushort qib_singleport;
121 MODULE_PARM_DESC(singleport,
"Use only IB port 1; more per-port buffer space");
123 static ushort qib_krcvq01_no_msi;
130 static unsigned qib_rcvhdrcnt;
134 static unsigned qib_rcvhdrsize;
138 static unsigned qib_rcvhdrentsize;
142 #define MAX_ATTEN_LEN 64
146 .string = txselect_list,
149 static int setup_txselect(
const char *,
struct kernel_param *);
153 "Tx serdes indices (for no QSFP or invalid QSFP data)");
155 #define BOARD_QME7342 5
156 #define BOARD_QMH7342 6
157 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
159 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
162 #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
164 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
166 #define MASK_ACROSS(lsb, msb) \
167 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
169 #define SYM_RMASK(regname, fldname) ((u64) \
170 QIB_7322_##regname##_##fldname##_RMASK)
172 #define SYM_MASK(regname, fldname) ((u64) \
173 QIB_7322_##regname##_##fldname##_RMASK << \
174 QIB_7322_##regname##_##fldname##_LSB)
176 #define SYM_FIELD(value, regname, fldname) ((u64) \
177 (((value) >> SYM_LSB(regname, fldname)) & \
178 SYM_RMASK(regname, fldname)))
181 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
182 (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
184 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
185 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
186 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
187 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
188 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
190 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
193 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
199 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
200 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT)
201 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT)
202 #define IBA7322_TID_PA_SHIFT 11U
204 #define SendIBSLIDAssignMask \
205 QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
206 #define SendIBSLMCMask \
207 QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
209 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
210 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
211 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
212 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
213 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
214 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
216 #define _QIB_GPIO_SDA_NUM 1
217 #define _QIB_GPIO_SCL_NUM 0
218 #define QIB_EEPROM_WEN_NUM 14
219 #define QIB_TWSI_EEPROM_DEV 0xA2
222 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
225 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
226 #define PORT_SPD_CAP_SHIFT 3
229 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
237 #define kr_contextcnt KREG_IDX(ContextCnt)
238 #define kr_control KREG_IDX(Control)
239 #define kr_counterregbase KREG_IDX(CntrRegBase)
240 #define kr_errclear KREG_IDX(ErrClear)
241 #define kr_errmask KREG_IDX(ErrMask)
242 #define kr_errstatus KREG_IDX(ErrStatus)
243 #define kr_extctrl KREG_IDX(EXTCtrl)
244 #define kr_extstatus KREG_IDX(EXTStatus)
245 #define kr_gpio_clear KREG_IDX(GPIOClear)
246 #define kr_gpio_mask KREG_IDX(GPIOMask)
247 #define kr_gpio_out KREG_IDX(GPIOOut)
248 #define kr_gpio_status KREG_IDX(GPIOStatus)
249 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
250 #define kr_debugportval KREG_IDX(DebugPortValueReg)
251 #define kr_fmask KREG_IDX(feature_mask)
252 #define kr_act_fmask KREG_IDX(active_feature_mask)
253 #define kr_hwerrclear KREG_IDX(HwErrClear)
254 #define kr_hwerrmask KREG_IDX(HwErrMask)
255 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
256 #define kr_intclear KREG_IDX(IntClear)
257 #define kr_intmask KREG_IDX(IntMask)
258 #define kr_intredirect KREG_IDX(IntRedirect0)
259 #define kr_intstatus KREG_IDX(IntStatus)
260 #define kr_pagealign KREG_IDX(PageAlign)
261 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
262 #define kr_rcvctrl KREG_IDX(RcvCtrl)
263 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
264 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
265 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
266 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
267 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
268 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
269 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
270 #define kr_revision KREG_IDX(Revision)
271 #define kr_scratch KREG_IDX(Scratch)
272 #define kr_sendbuffererror KREG_IDX(SendBufErr0)
273 #define kr_sendcheckmask KREG_IDX(SendCheckMask0)
274 #define kr_sendctrl KREG_IDX(SendCtrl)
275 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0)
276 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0)
277 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
278 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
279 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
280 #define kr_sendpiosize KREG_IDX(SendBufSize)
281 #define kr_sendregbase KREG_IDX(SendRegBase)
282 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
283 #define kr_userregbase KREG_IDX(UserRegBase)
284 #define kr_intgranted KREG_IDX(Int_Granted)
285 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
286 #define kr_intblocked KREG_IDX(IntBlocked)
287 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
293 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
294 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
295 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
296 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
297 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
298 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
299 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
300 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
301 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
302 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
303 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
304 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
305 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
306 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
307 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
308 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
309 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
310 #define krp_psstart KREG_IBPORT_IDX(PSStart)
311 #define krp_psstat KREG_IBPORT_IDX(PSStat)
312 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
313 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
314 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
315 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
316 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
317 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
318 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
319 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
320 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
321 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
322 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
323 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
324 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
325 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
326 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
327 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
328 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
329 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
330 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
331 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
332 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
333 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
334 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
335 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
336 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
337 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
338 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
339 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
340 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
341 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
342 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
348 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
349 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
359 #define NUM_TIDFLOWS_CTXT 0x20
360 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
363 #define TIDFLOW_ERRBITS ( \
364 (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
365 SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
366 (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
367 SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
372 #define CREG_IDX(regname) \
373 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
375 #define crp_badformat CREG_IDX(RxVersionErrCnt)
376 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
377 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
378 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
379 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
380 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
381 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
382 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
383 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
384 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
385 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
386 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
387 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
388 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
389 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
390 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
391 #define crp_pktsend CREG_IDX(TxDataPktCnt)
392 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
393 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
394 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
395 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
396 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
397 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
398 #define crp_rcvebp CREG_IDX(RxEBPCnt)
399 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
400 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
401 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
402 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
403 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
404 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
405 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
406 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
407 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
408 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
409 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
410 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
411 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
412 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
413 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
414 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
415 #define crp_wordrcv CREG_IDX(RxDwordCnt)
416 #define crp_wordsend CREG_IDX(TxDwordCnt)
417 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
420 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
421 QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
422 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
423 #define cr_lbint CREG_DEVIDX(LBIntCnt)
424 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
425 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
426 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
427 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
428 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
431 #define NUM_IB_PORTS 2
434 #define NUM_VL15_BUFS NUM_IB_PORTS
442 #define KCTXT0_EGRCNT 2048
445 #define PBC_PORT_SEL_LSB 26
446 #define PBC_PORT_SEL_RMASK 1
447 #define PBC_VL_NUM_LSB 27
448 #define PBC_VL_NUM_RMASK 7
449 #define PBC_7322_VL15_SEND (1ULL << 63)
450 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31)
461 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
462 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
465 #define IB_7322_LT_STATE_DISABLED 0x00
466 #define IB_7322_LT_STATE_LINKUP 0x01
467 #define IB_7322_LT_STATE_POLLACTIVE 0x02
468 #define IB_7322_LT_STATE_POLLQUIET 0x03
469 #define IB_7322_LT_STATE_SLEEPDELAY 0x04
470 #define IB_7322_LT_STATE_SLEEPQUIET 0x05
471 #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
472 #define IB_7322_LT_STATE_CFGRCVFCFG 0x09
473 #define IB_7322_LT_STATE_CFGWAITRMT 0x0a
474 #define IB_7322_LT_STATE_CFGIDLE 0x0b
475 #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
476 #define IB_7322_LT_STATE_TXREVLANES 0x0d
477 #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
478 #define IB_7322_LT_STATE_RECOVERIDLE 0x0f
479 #define IB_7322_LT_STATE_CFGENH 0x10
480 #define IB_7322_LT_STATE_CFGTEST 0x11
481 #define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
482 #define IB_7322_LT_STATE_CFGWAITENH 0x13
485 #define IB_7322_L_STATE_DOWN 0x0
486 #define IB_7322_L_STATE_INIT 0x1
487 #define IB_7322_L_STATE_ARM 0x2
488 #define IB_7322_L_STATE_ACTIVE 0x3
489 #define IB_7322_L_STATE_ACT_DEFER 0x4
491 static const u8 qib_7322_physportstate[0x20] = {
575 #define TXDDS_TABLE_SZ 16
576 #define TXDDS_EXTRA_SZ 13
577 #define TXDDS_MFG_SZ 2
578 #define SERDES_CHANS 4
580 #define H1_FORCE_VAL 8
581 #define H1_FORCE_QME 1
582 #define H1_FORCE_QMH 7
585 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
588 #define QDR_DFE_DISABLE_DELAY 4000
589 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL
590 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL
591 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL
592 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL
646 {
"", qib_7322intr, -1, 0 },
647 {
" (buf avail)", qib_7322bufavail,
649 {
" (sdma 0)", sdma_intr,
651 {
" (sdma 1)", sdma_intr,
653 {
" (sdmaI 0)", sdma_idle_intr,
655 {
" (sdmaI 1)", sdma_idle_intr,
657 {
" (sdmaP 0)", sdma_progress_intr,
659 {
" (sdmaP 1)", sdma_progress_intr,
661 {
" (sdmaC 0)", sdma_cleanup_intr,
663 {
" (sdmaC 1)", sdma_cleanup_intr,
668 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
670 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
672 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
673 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
675 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1
676 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2
677 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3
679 #define BLOB_7322_IBCHG 0x101
681 static inline void qib_write_kreg(
const struct qib_devdata *
dd,
684 static void write_7322_initregs(
struct qib_devdata *);
685 static void write_7322_init_portregs(
struct qib_pportdata *);
742 static inline void qib_write_ureg(
const struct qib_devdata *dd,
757 writeq(value, &ubase[regno]);
776 static inline void qib_write_kreg(
const struct qib_devdata *dd,
777 const u32 regno,
u64 value)
795 static inline void qib_write_kreg_port(
const struct qib_pportdata *ppd,
796 const u16 regno,
u64 value)
810 static inline void qib_write_kreg_ctxt(
const struct qib_devdata *dd,
811 const u16 regno,
unsigned ctxt,
814 qib_write_kreg(dd, regno + ctxt, value);
835 static inline void write_7322_creg_port(
const struct qib_pportdata *ppd,
862 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
863 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
866 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
867 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
868 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
869 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
870 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
871 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
872 #define QIB_I_C_ERROR INT_MASK(Err)
874 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
875 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
876 #define QIB_I_GPIO INT_MASK(AssertGPIO)
877 #define QIB_I_P_SDMAINT(pidx) \
878 (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
879 INT_MASK_P(SDmaProgress, pidx) | \
880 INT_MASK_PM(SDmaCleanupDone, pidx))
883 #define QIB_I_P_BITSEXTANT(pidx) \
884 (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
885 INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
886 INT_MASK_P(SDmaProgress, pidx) | \
887 INT_MASK_PM(SDmaCleanupDone, pidx))
891 #define QIB_I_C_BITSEXTANT \
892 (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
894 QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
896 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
897 QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
902 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
903 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
904 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
905 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
906 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
907 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
908 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
909 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
910 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
911 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
912 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
913 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
914 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
915 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
916 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
917 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
918 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
919 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
920 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
921 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
922 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
923 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
924 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
925 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
926 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
927 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
928 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
929 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
931 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
932 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
933 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
934 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
935 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
936 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
937 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
938 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
939 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
940 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
941 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
944 #define QIB_E_RESET ERR_MASK(ResetNegated)
945 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
946 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
956 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
957 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
958 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
959 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
960 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
961 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
962 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
963 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
974 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
975 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
976 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
982 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
983 QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
984 QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
985 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
989 #define QIB_E_P_RPKTERRS (\
990 QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
991 QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
992 QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
993 QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
994 QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
995 QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1002 #define QIB_E_P_SPKTERRS (\
1003 QIB_E_P_SUNEXP_PKTNUM |\
1004 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1005 QIB_E_P_SMAXPKTLEN |\
1006 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1007 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1008 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1010 #define QIB_E_SPKTERRS ( \
1011 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1012 ERR_MASK_N(SendUnsupportedVLErr) | \
1013 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1015 #define QIB_E_P_SDMAERRS ( \
1016 QIB_E_P_SDMAHALT | \
1017 QIB_E_P_SDMADESCADDRMISALIGN | \
1018 QIB_E_P_SDMAUNEXPDATA | \
1019 QIB_E_P_SDMAMISSINGDW | \
1020 QIB_E_P_SDMADWEN | \
1021 QIB_E_P_SDMARPYTAG | \
1022 QIB_E_P_SDMA1STDESC | \
1023 QIB_E_P_SDMABASE | \
1024 QIB_E_P_SDMATAILOUTOFBOUND | \
1025 QIB_E_P_SDMAOUTOFBOUND | \
1026 QIB_E_P_SDMAGENMISMATCH)
1033 #define QIB_E_P_BITSEXTANT ( \
1034 QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1035 QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1036 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1037 QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1047 #define QIB_E_P_LINK_PKTERRS (\
1048 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1049 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1050 QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1058 #define QIB_E_C_BITSEXTANT (\
1059 QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1060 QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1061 QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1064 #define E_SPKT_ERRS_IGNORE 0
1066 #define QIB_EXTS_MEMBIST_DISABLED \
1067 SYM_MASK(EXTStatus, MemBISTDisabled)
1068 #define QIB_EXTS_MEMBIST_ENDTEST \
1069 SYM_MASK(EXTStatus, MemBISTEndTest)
1071 #define QIB_E_SPIOARMLAUNCH \
1072 ERR_MASK(SendArmLaunchErr)
1074 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1075 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1083 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1084 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1085 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1086 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1087 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1088 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1089 SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1090 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1092 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1093 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1095 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1096 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1097 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1099 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1100 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1101 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1102 SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1103 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1104 SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1105 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1107 #define IBA7322_REDIRECT_VEC_PER_REG 12
1109 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1110 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1111 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1112 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1113 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1115 #define AUTONEG_TRIES 3
1117 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1118 .msg = #fldname , .sz = sizeof(#fldname) }
1119 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1120 fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1138 { .mask = 0, .sz = 0 }
1141 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1142 .msg = #fldname, .sz = sizeof(#fldname) }
1143 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1144 .msg = #fldname, .sz = sizeof(#fldname) }
1152 E_AUTO(SBufVL15MisUseErr),
1154 E_AUTO(RcvContextShareErr),
1155 E_AUTO(SendVLMismatchErr),
1156 E_AUTO(SendArmLaunchErr),
1157 E_AUTO(SendSpecialTriggerErr),
1158 E_AUTO(SDmaWrongPortErr),
1159 E_AUTO(SDmaBufMaskDuplicateErr),
1160 { .mask = 0, .sz = 0 }
1170 {.mask =
SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg =
"SDmaHalted",
1207 { .mask = 0, .sz = 0 }
1214 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1215 .msg = #fldname, .sz = sizeof(#fldname) }
1217 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1218 SYM_LSB(IntMask, fldname##Mask##_0), \
1219 SYM_LSB(IntMask, fldname##Mask##_1)), \
1220 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1222 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1223 SYM_LSB(IntMask, fldname##Mask##_1), \
1224 SYM_LSB(IntMask, fldname##Mask##_0)), \
1225 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1230 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1231 SYM_LSB(IntMask, fldname##0IntMask), \
1232 SYM_LSB(IntMask, fldname##17IntMask)), \
1233 .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1247 { .mask = 0, .sz = 0 }
1250 #define TXSYMPTOM_AUTO_P(fldname) \
1251 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1252 .msg = #fldname, .sz = sizeof(#fldname) }
1261 { .mask = 0, .sz = 0 }
1264 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32
1271 static void qib_disarm_7322_senderrbufs(
struct qib_pportdata *ppd)
1278 unsigned long sbuf[4];
1285 for (i = 0; i < regcnt; ++
i) {
1300 static void err_decode(
char *
msg,
size_t len,
u64 errs,
1306 while (errs && msp && msp->
mask) {
1307 multi = (msp->
mask & (msp->
mask - 1));
1308 while (errs & msp->
mask) {
1309 these = (errs & msp->
mask);
1310 lmask = (these & (these - 1)) ^ these;
1319 took =
min_t(
size_t, msp->
sz - (
size_t)1, len);
1331 while (lmask & msp->
mask) {
1344 snprintf(msg, len,
"%sMORE:%llX", n ?
"," :
"",
1345 (
unsigned long long) errs);
1356 const unsigned hdrwords = 7;
1377 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1381 hdr = (
u32 *) &ibhdr;
1386 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1396 static void qib_7322_sdma_sendctrl(
struct qib_pportdata *ppd,
unsigned op)
1399 u64 set_sendctrl = 0;
1400 u64 clr_sendctrl = 0;
1403 set_sendctrl |=
SYM_MASK(SendCtrl_0, SDmaEnable);
1405 clr_sendctrl |=
SYM_MASK(SendCtrl_0, SDmaEnable);
1408 set_sendctrl |=
SYM_MASK(SendCtrl_0, SDmaIntEnable);
1410 clr_sendctrl |=
SYM_MASK(SendCtrl_0, SDmaIntEnable);
1413 set_sendctrl |=
SYM_MASK(SendCtrl_0, SDmaHalt);
1415 clr_sendctrl |=
SYM_MASK(SendCtrl_0, SDmaHalt);
1418 set_sendctrl |=
SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1419 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1420 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1422 clr_sendctrl |=
SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1423 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1424 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1429 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1441 SYM_MASK(SendCtrl_0, SDmaCleanup));
1446 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1454 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->
dd->cspec->r1)
1458 static void qib_7322_sdma_hw_clean_up(
struct qib_pportdata *ppd)
1463 static void qib_sdma_7322_setlengen(
struct qib_pportdata *ppd)
1490 static void qib_7322_sdma_hw_start_up(
struct qib_pportdata *ppd)
1500 qib_sdma_7322_setlengen(ppd);
1501 qib_sdma_update_7322_tail(ppd, 0);
1503 qib_7322_sdma_sendctrl(ppd,
1504 ppd->
sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1507 #define DISABLES_SDMA ( \
1508 QIB_E_P_SDMAHALT | \
1509 QIB_E_P_SDMADESCADDRMISALIGN | \
1510 QIB_E_P_SDMAMISSINGDW | \
1511 QIB_E_P_SDMADWEN | \
1512 QIB_E_P_SDMARPYTAG | \
1513 QIB_E_P_SDMA1STDESC | \
1514 QIB_E_P_SDMABASE | \
1515 QIB_E_P_SDMATAILOUTOFBOUND | \
1516 QIB_E_P_SDMAOUTOFBOUND | \
1517 QIB_E_P_SDMAGENMISMATCH)
1521 unsigned long flags;
1549 if (errs & QIB_E_P_SDMAHALT)
1555 if (errs & QIB_E_P_SDMAHALT)
1566 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1584 "device error interrupt, but no error bits set!\n");
1589 errs &= dd->
cspec->errormask;
1590 msg = dd->
cspec->emsgbuf;
1595 qib_7322_handle_hwerrors(dd, msg,
sizeof dd->
cspec->emsgbuf);
1602 qib_disarm_7322_senderrbufs(dd->
pport);
1608 qib_disarm_7322_senderrbufs(dd->
pport);
1620 err_decode(msg,
sizeof dd->
cspec->emsgbuf, errs & ~mask,
1621 qib_7322error_msgs);
1631 "Got reset, requires re-init (unload and reload driver)\n");
1635 for (pidx = 0; pidx < dd->
num_pports; ++pidx)
1636 if (dd->
pport[pidx].link_speed_supported)
1652 if (errs &
ERR_MASK(RcvEgrFullErr))
1662 static void qib_error_tasklet(
unsigned long data)
1666 handle_7322_errors(dd);
1670 static void reenable_chase(
unsigned long opaque)
1674 ppd->
cpspec->chase_timer.expires = 0;
1679 static void disable_chase(
struct qib_pportdata *ppd,
unsigned long tnow,
1682 ppd->
cpspec->chase_end = 0;
1698 ibclt = (
u8)
SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1712 if (ppd->
cpspec->chase_end &&
1714 disable_chase(ppd, tnow, ibclt);
1715 else if (!ppd->
cpspec->chase_end)
1719 ppd->
cpspec->chase_end = 0;
1726 (ibcst &
SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1728 ppd->
cpspec->qdr_reforce = 1;
1729 if (!ppd->
dd->cspec->r1)
1730 serdes_7322_los_enable(ppd, 0);
1731 }
else if (ppd->
cpspec->qdr_reforce &&
1732 (ibcst &
SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1747 u8 ltstate = qib_7322_phys_portstate(ibcst);
1750 if (!ppd->
dd->cspec->r1 &&
1758 serdes_7322_los_enable(ppd, 1);
1759 if (!ppd->
cpspec->qdr_dfe_on &&
1761 ppd->
cpspec->qdr_dfe_on = 1;
1762 ppd->
cpspec->qdr_dfe_time = 0;
1765 ppd->
dd->cspec->r1 ?
1769 "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1770 ppd->
dd->unit, ppd->
port, ibclt);
1785 u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1791 check_7322_rxe_status(ppd);
1796 "Port%d error interrupt, but no error bits set!\n",
1803 msg = ppd->
cpspec->epmsgbuf;
1807 err_decode(msg,
sizeof ppd->
cpspec->epmsgbuf,
1813 "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1824 err_decode(msg,
sizeof ppd->
cpspec->epmsgbuf, symptom,
1840 err_decode(msg,
sizeof ppd->
cpspec->epmsgbuf,
1841 (errs & QIB_E_P_LINK_PKTERRS),
1842 qib_7322p_error_msgs);
1846 qib_disarm_7322_senderrbufs(ppd);
1847 }
else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1856 err_decode(msg,
sizeof ppd->
cpspec->epmsgbuf, errs,
1857 qib_7322p_error_msgs);
1864 errs &= ~ignore_this_time;
1870 if (errs & QIB_E_P_SPKTERRS)
1876 sdma_7322_p_errors(ppd, errs);
1883 ltstate = qib_7322_phys_portstate(ibcs);
1886 handle_serdes_issues(ppd, ibcs);
1887 if (!(ppd->
cpspec->ibcctrl_a &
1888 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1894 ppd->
cpspec->ibcctrl_a |=
1895 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1902 (ibcs &
SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1906 SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1911 qib_set_ib_7322_lstate(ppd, 0,
1946 if (dd->
cspec->num_msix_entries) {
1971 static void qib_7322_clear_freeze(
struct qib_devdata *dd)
1978 for (pidx = 0; pidx < dd->
num_pports; ++pidx)
1979 if (dd->
pport[pidx].link_speed_supported)
1984 qib_7322_set_intr_state(dd, 0);
2000 for (pidx = 0; pidx < dd->
num_pports; ++pidx) {
2001 if (!dd->
pport[pidx].link_speed_supported)
2006 qib_7322_set_intr_state(dd, 1);
2021 static void qib_7322_handle_hwerrors(
struct qib_devdata *dd,
char *msg,
2031 if (hwerrs == ~0ULL) {
2033 "Read of hardware error status failed (all bits set); ignoring\n");
2042 hwerrs &= dd->
cspec->hwerrmask;
2048 "Hardware error: hwerr=0x%llx (cleared)\n",
2049 (
unsigned long long) hwerrs);
2056 if ((hwerrs & ~
HWE_MASK(LATriggered)) ||
2057 dd->
cspec->stay_in_freeze) {
2068 qib_7322_clear_freeze(dd);
2071 if (hwerrs &
HWE_MASK(PowerOnBISTFailed)) {
2074 "[Memory BIST test failed, InfiniPath hardware unusable]",
2081 err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2089 "Fatal Hardware Error, no longer usable, SN %.16s\n",
2113 static void qib_7322_init_hwerrors(
struct qib_devdata *dd)
2132 for (pidx = 0; pidx < dd->
num_pports; ++pidx)
2133 if (dd->
pport[pidx].link_speed_supported)
2144 static void qib_set_7322_armlaunch(
struct qib_devdata *dd,
u32 enable)
2164 unsigned long flags;
2174 qib_7322_mini_pcs_reset(ppd);
2191 ppd->
cpspec->ibcctrl_a &=
2192 ~
SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2213 #define RCV_BUF_UNITSZ 64
2214 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2218 int i, numvls, totcred, cred_vl, vl0extra;
2235 cred_vl = totcred / numvls;
2236 vl0extra = totcred - cred_vl * numvls;
2238 for (i = 1; i < numvls; i++)
2245 val |=
SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2248 val &= ~
SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2251 for (i = 0; i < numvls; i++)
2258 ((
u64)(numvls - 1) <<
SYM_LSB(IBCCtrlA_0, NumVLane));
2273 static int qib_7322_bringup_serdes(
struct qib_pportdata *ppd)
2277 unsigned long flags;
2291 ppd->
cpspec->ibdeltainprog = 1;
2292 ppd->
cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2294 ppd->
cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2299 ibc = 0x5ULL <<
SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2305 ibc |= 24ULL <<
SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2307 ibc |= 0xfULL <<
SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2309 ibc |= 0xfULL <<
SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2315 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2316 ppd->
cpspec->ibcctrl_a = ibc;
2322 qib_7322_mini_pcs_reset(ppd);
2324 if (!ppd->
cpspec->ibcctrl_b) {
2331 ppd->
cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2337 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2338 if (lse & (lse - 1))
2339 ppd->
cpspec->ibcctrl_b |=
2354 ppd->
cpspec->ibcctrl_b |=
2367 val &= ~
SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2368 val |= 0xfULL <<
SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2371 serdes_7322_init(ppd);
2394 ppd->
cpspec->ibcctrl_a = val & ~
SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2400 spin_unlock_irqrestore(&dd->
cspec->rcvmod_lock, flags);
2416 static void qib_7322_mini_quiet_serdes(
struct qib_pportdata *ppd)
2419 unsigned long flags;
2428 if (ppd->
dd->cspec->r1)
2431 ppd->
cpspec->chase_end = 0;
2432 if (ppd->
cpspec->chase_timer.data)
2443 qib_7322_mini_pcs_reset(ppd);
2449 if (ppd->
cpspec->ibsymdelta || ppd->
cpspec->iblnkerrdelta ||
2450 ppd->
cpspec->ibdeltainprog || ppd->
cpspec->iblnkdowndelta) {
2457 diagc |
SYM_MASK(HwDiagCtrl, CounterWrEnable));
2459 if (ppd->
cpspec->ibsymdelta || ppd->
cpspec->ibdeltainprog) {
2461 if (ppd->
cpspec->ibdeltainprog)
2462 val -= val - ppd->
cpspec->ibsymsnap;
2463 val -= ppd->
cpspec->ibsymdelta;
2466 if (ppd->
cpspec->iblnkerrdelta || ppd->
cpspec->ibdeltainprog) {
2468 if (ppd->
cpspec->ibdeltainprog)
2469 val -= val - ppd->
cpspec->iblnkerrsnap;
2470 val -= ppd->
cpspec->iblnkerrdelta;
2473 if (ppd->
cpspec->iblnkdowndelta) {
2475 val += ppd->
cpspec->iblnkdowndelta;
2513 u64 extctl, ledblink = 0,
val;
2514 unsigned long flags;
2530 grn = qib_7322_phys_portstate(val) ==
2539 extctl = dd->
cspec->extctrl & (ppd->
port == 1 ?
2553 dd->
cspec->extctrl = extctl;
2555 spin_unlock_irqrestore(&dd->
cspec->gpio_lock, flags);
2566 static void qib_7322_nomsix(
struct qib_devdata *dd)
2571 dd->
cspec->main_int_mask = ~0ULL;
2572 n = dd->
cspec->num_msix_entries;
2576 dd->
cspec->num_msix_entries = 0;
2577 for (i = 0; i <
n; i++) {
2578 irq_set_affinity_hint(
2579 dd->
cspec->msix_entries[i].msix.vector,
NULL);
2580 free_cpumask_var(dd->
cspec->msix_entries[i].mask);
2582 dd->
cspec->msix_entries[i].arg);
2592 static void qib_7322_free_irq(
struct qib_devdata *dd)
2594 if (dd->
cspec->irq) {
2598 qib_7322_nomsix(dd);
2601 static void qib_setup_7322_cleanup(
struct qib_devdata *dd)
2605 qib_7322_free_irq(dd);
2612 unsigned long flags;
2619 dd->
cspec->gpio_mask &= ~mask;
2621 spin_unlock_irqrestore(&dd->
cspec->gpio_lock, flags);
2624 if (dd->
pport[i].ibport_data.smi_ah)
2653 static void qib_wantpiobuf_7322_intr(
struct qib_devdata *dd,
u32 needint)
2655 unsigned long flags;
2679 "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2680 (
unsigned long long) kills, msg);
2681 qib_write_kreg(dd,
kr_intmask, (dd->
cspec->int_enable_mask & ~kills));
2716 if (!dd->
pport[pidx].link_speed_supported)
2719 ppd = dd->
pport + pidx;
2721 if (gpiostatus & dd->
cspec->gpio_mask & mask) {
2723 qd = &ppd->
cpspec->qsfp_data;
2724 gpiostatus &= ~mask;
2726 pins >>=
SYM_LSB(EXTStatus, GPIOIn);
2727 if (!(pins & mask)) {
2735 if (gpiostatus && !handled) {
2737 u32 gpio_irq = mask & gpiostatus;
2742 dd->
cspec->gpio_mask &= ~gpio_irq;
2754 unknown_7322_ibits(dd, istat);
2756 unknown_7322_gpio_intr(dd);
2762 handle_7322_p_errors(dd->
rcd[0]->ppd);
2764 handle_7322_p_errors(dd->
rcd[1]->ppd);
2771 static void adjust_rcv_timeout(
struct qib_ctxtdata *rcd,
int npkts)
2780 if (npkts < rcv_int_count && timeout > 2)
2782 else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2783 timeout =
min(timeout << 1, rcv_int_timeout);
2787 dd->
cspec->rcvavail_timeout[rcd->
ctxt] = timeout;
2824 qib_dev_err(dd,
"Interrupt status all f's, skipping\n");
2830 istat &= dd->
cspec->main_int_mask;
2845 unlikely_7322_intr(dd, istat);
2865 if (ctxtrbits & rmask) {
2866 ctxtrbits &= ~rmask;
2880 sdma_7322_intr(dd, istat);
2893 static irqreturn_t qib_7322pintr(
int irq,
void *data)
2924 static irqreturn_t qib_7322bufavail(
int irq,
void *data)
2942 qib_write_kreg(dd,
kr_intclear, QIB_I_SPIOBUFAVAIL);
2948 qib_wantpiobuf_7322_intr(dd, 0);
2985 static irqreturn_t sdma_idle_intr(
int irq,
void *data)
3014 static irqreturn_t sdma_progress_intr(
int irq,
void *data)
3044 static irqreturn_t sdma_cleanup_intr(
int irq,
void *data)
3079 static void qib_setup_7322_interrupt(
struct qib_devdata *dd,
int clearpend)
3081 int ret,
i, msixnum;
3084 const struct cpumask *local_mask;
3085 int firstcpu, secondcpu = 0, currrcvcpu = 0;
3096 qib_7322_set_intr_state(dd, 0);
3099 qib_7322_init_hwerrors(dd);
3109 if (!dd->
cspec->num_msix_entries) {
3114 "irq is 0, BIOS error? Interrupts won't work\n");
3121 "Couldn't setup INTx interrupt (irq=%d): %d\n",
3126 dd->
cspec->main_int_mask = ~0ULL;
3131 memset(redirect, 0,
sizeof redirect);
3135 firstcpu = cpumask_first(local_mask);
3136 if (firstcpu >= nr_cpu_ids ||
3139 firstcpu = cpumask_first(local_mask);
3141 if (firstcpu < nr_cpu_ids) {
3142 secondcpu = cpumask_next(firstcpu, local_mask);
3143 if (secondcpu >= nr_cpu_ids)
3144 secondcpu = firstcpu;
3145 currrcvcpu = secondcpu;
3147 for (i = 0; msixnum < dd->
cspec->num_msix_entries; i++) {
3153 dd->
cspec->msix_entries[msixnum].
3154 name[
sizeof(dd->
cspec->msix_entries[msixnum].name) - 1]
3157 if (irq_table[i].
port) {
3161 arg = dd->
pport + irq_table[
i].port - 1;
3164 lsb = irq_table[
i].lsb;
3165 handler = irq_table[
i].handler;
3167 sizeof(dd->
cspec->msix_entries[msixnum].name)
3176 arg = dd->
rcd[ctxt];
3179 if (qib_krcvq01_no_msi && ctxt < 2)
3182 handler = qib_7322pintr;
3184 sizeof(dd->
cspec->msix_entries[msixnum].name)
3189 dd->
cspec->msix_entries[msixnum].msix.vector,
3190 handler, 0, dd->
cspec->msix_entries[msixnum].name,
3198 "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3200 dd->
cspec->msix_entries[msixnum].msix.vector,
3202 qib_7322_nomsix(dd);
3205 dd->
cspec->msix_entries[msixnum].arg =
arg;
3210 mask &= ~(1ULL <<
lsb);
3211 redirect[
reg] |= ((
u64) msixnum) << sh;
3213 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3215 if (firstcpu < nr_cpu_ids &&
3217 &dd->
cspec->msix_entries[msixnum].mask,
3219 if (handler == qib_7322pintr) {
3220 cpumask_set_cpu(currrcvcpu,
3221 dd->
cspec->msix_entries[msixnum].mask);
3222 currrcvcpu = cpumask_next(currrcvcpu,
3224 if (currrcvcpu >= nr_cpu_ids)
3225 currrcvcpu = secondcpu;
3227 cpumask_set_cpu(firstcpu,
3228 dd->
cspec->msix_entries[msixnum].mask);
3230 irq_set_affinity_hint(
3231 dd->
cspec->msix_entries[msixnum].msix.vector,
3232 dd->
cspec->msix_entries[msixnum].mask);
3251 static unsigned qib_7322_boardname(
struct qib_devdata *dd)
3262 n =
"InfiniPath_QLE7342_Emulation";
3265 n =
"InfiniPath_QLE7340";
3270 n =
"InfiniPath_QLE7342";
3274 n =
"InfiniPath_QMI7342";
3277 n =
"InfiniPath_Unsupported7342";
3278 qib_dev_err(dd,
"Unsupported version of QMH7342\n");
3282 n =
"InfiniPath_QMH7342";
3286 n =
"InfiniPath_QME7342";
3289 n =
"InfiniPath_QME7362";
3293 n =
"InfiniPath_QLE7342_TEST";
3297 n =
"InfiniPath_QLE73xy_UNKNOWN";
3298 qib_dev_err(dd,
"Unknown 7322 board type %u\n", boardid);
3306 qib_dev_err(dd,
"Failed allocation for board name: %s\n", n);
3311 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3319 "IB%u: Forced to single port mode by module parameter\n",
3331 static int qib_do_7322_reset(
struct qib_devdata *dd)
3335 int i, msix_entries, ret = 1;
3337 u8 int_line, clinesz;
3338 unsigned long flags;
3345 msix_entries = dd->
cspec->num_msix_entries;
3348 qib_7322_set_intr_state(dd, 0);
3351 qib_7322_nomsix(dd);
3353 msix_vecsave =
kmalloc(2 * dd->
cspec->num_msix_entries *
3358 msix_vecsave =
NULL;
3367 for (i = 0; i < msix_entries; i++) {
3368 u64 vecaddr, vecdata;
3369 vecaddr = qib_read_kreg64(dd, 2 * i +
3371 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3374 msix_vecsave[2 *
i] = vecaddr;
3376 msix_vecsave[1 + 2 *
i] = vecdata & ~0x100000000ULL;
3380 dd->
pport->cpspec->ibdeltainprog = 0;
3381 dd->
pport->cpspec->ibsymdelta = 0;
3382 dd->
pport->cpspec->iblnkerrdelta = 0;
3383 dd->
pport->cpspec->ibmalfdelta = 0;
3396 for (i = 1; i <= 5; i++) {
3402 msleep(1000 + (1 + i) * 3000);
3415 "Failed to initialize after reset, unusable\n");
3425 for (i = 0; i < msix_entries; i++) {
3426 dd->
cspec->msix_entries[
i].msix.entry =
i;
3427 if (!msix_vecsave || !msix_vecsave[2 * i])
3429 qib_write_kreg(dd, 2 * i +
3431 msix_vecsave[2 * i]);
3432 qib_write_kreg(dd, 1 + 2 * i +
3434 msix_vecsave[1 + 2 * i]);
3440 write_7322_init_portregs(&dd->
pport[i]);
3441 write_7322_initregs(dd);
3444 &dd->
cspec->num_msix_entries,
3445 dd->
cspec->msix_entries))
3447 "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3449 qib_setup_7322_interrupt(dd, 1);
3462 kfree(msix_vecsave);
3482 if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3483 qib_dev_err(dd,
"Physaddr %lx not 2KB aligned!\n",
3489 "Physical page address 0x%lx larger than supported\n",
3512 static void qib_7322_clear_tids(
struct qib_devdata *dd,
3516 unsigned long tidinv;
3529 ctxt * dd->
rcvtidcnt *
sizeof(*tidbase));
3551 static void qib_7322_tidtemplate(
struct qib_devdata *dd)
3578 static int qib_7322_get_base_info(
struct qib_ctxtdata *rcd,
3584 if (rcd->
dd->cspec->r1)
3595 u32 offset = qib_hdrget_offset(rhf_addr);
3604 static void qib_7322_config_ctxts(
struct qib_devdata *dd)
3606 unsigned long flags;
3610 dd->
cspec->numctxts = nchipctxts;
3627 else if (nctxts <= 10)
3629 else if (nctxts <= nchipctxts)
3631 }
else if (qib_cfgctxts < dd->num_pports)
3658 spin_unlock_irqrestore(&dd->
cspec->rcvmod_lock, flags);
3669 static int qib_7322_get_ib_cfg(
struct qib_pportdata *ppd,
int which)
3694 lsb =
SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3695 maskr =
SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3699 lsb =
SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3700 maskr =
SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3705 SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3732 ret = (ppd->
cpspec->ibcctrl_a &
3733 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
3759 ret = (
int)((ppd->
cpspec->ibcctrl_b >> lsb) & maskr);
3768 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
3769 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
3770 | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
3772 static int qib_7322_set_ib_cfg(
struct qib_pportdata *ppd,
int which,
u32 val)
3778 unsigned long flags;
3810 maskr =
SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
3811 lsb =
SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
3826 if (val & (val - 1)) {
3836 lsb =
SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
3840 lsb =
SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3841 maskr =
SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3845 lsb =
SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3846 maskr =
SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3853 ppd->
cpspec->ibcctrl_a &=
3854 ~
SYM_MASK(IBCCtrlA_0, OverrunThreshold);
3856 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
3867 ppd->
cpspec->ibcctrl_a &=
3868 ~
SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
3870 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
3887 ppd->
cpspec->ibcctrl_a &=
3888 ~
SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3890 ppd->
cpspec->ibcctrl_a |=
3891 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3907 SYM_LSB(IBCCtrlA_0, MaxPktLen);
3914 switch (val & 0xffff0000) {
3917 ppd->
cpspec->ibmalfusesnap = 1;
3918 ppd->
cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
3920 if (!ppd->
cpspec->ibdeltainprog &&
3922 ppd->
cpspec->ibdeltainprog = 1;
3924 read_7322_creg32_port(ppd,
3926 ppd->
cpspec->iblnkerrsnap =
3927 read_7322_creg32_port(ppd,
3934 if (ppd->
cpspec->ibmalfusesnap) {
3935 ppd->
cpspec->ibmalfusesnap = 0;
3936 ppd->
cpspec->ibmalfdelta +=
3937 read_7322_creg32_port(ppd,
3949 qib_dev_err(dd,
"bad linkcmd req 0x%x\n", val >> 16);
3952 switch (val & 0xffff) {
3967 ppd->
cpspec->chase_end = 0;
3972 if (ppd->
cpspec->chase_timer.expires) {
3974 ppd->
cpspec->chase_timer.expires = 0;
3984 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4009 if (ppd->
dd->cspec->r1) {
4011 ppd->
cpspec->ipg_tries = 0;
4019 ppd->
cpspec->ibcctrl_b &= ~(maskr <<
lsb);
4020 ppd->
cpspec->ibcctrl_b |= (((
u64) val & maskr) <<
lsb);
4033 if (!
strncmp(what,
"ibc", 3)) {
4037 qib_devinfo(ppd->
dd->pcidev,
"Enabling IB%u:%u IBC loopback\n",
4038 ppd->
dd->unit, ppd->
port);
4039 }
else if (!
strncmp(what,
"off", 3)) {
4045 "Disabling IB%u:%u IBC loopback (normal)\n",
4046 ppd->
dd->unit, ppd->
port);
4062 static void get_vl_weights(
struct qib_pportdata *ppd,
unsigned regno,
4067 for (i = 0; i < 16; i++, regno++, vl++) {
4068 u32 val = qib_read_kreg_port(ppd, regno);
4070 vl->
vl = (val >>
SYM_LSB(LowPriority0_0, VirtualLane)) &
4077 static void set_vl_weights(
struct qib_pportdata *ppd,
unsigned regno,
4082 for (i = 0; i < 16; i++, regno++, vl++) {
4085 val = ((vl->
vl &
SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4086 SYM_LSB(LowPriority0_0, VirtualLane)) |
4088 SYM_LSB(LowPriority0_0, Weight));
4089 qib_write_kreg_port(ppd, regno, val);
4093 unsigned long flags;
4103 static int qib_7322_get_ib_table(
struct qib_pportdata *ppd,
int which,
void *
t)
4120 static int qib_7322_set_ib_table(
struct qib_pportdata *ppd,
int which,
void *t)
4145 adjust_rcv_timeout(rcd, npkts);
4160 tail = qib_get_rcvhdrtail(rcd);
4163 return head ==
tail;
4166 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4167 QIB_RCVCTRL_CTXT_DIS | \
4168 QIB_RCVCTRL_TIDFLOW_ENB | \
4169 QIB_RCVCTRL_TIDFLOW_DIS | \
4170 QIB_RCVCTRL_TAILUPD_ENB | \
4171 QIB_RCVCTRL_TAILUPD_DIS | \
4172 QIB_RCVCTRL_INTRAVAIL_ENB | \
4173 QIB_RCVCTRL_INTRAVAIL_DIS | \
4174 QIB_RCVCTRL_BP_ENB | \
4177 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4178 QIB_RCVCTRL_CTXT_DIS | \
4179 QIB_RCVCTRL_PKEY_DIS | \
4180 QIB_RCVCTRL_PKEY_ENB)
4189 static void rcvctrl_7322_mod(
struct qib_pportdata *ppd,
unsigned int op,
4195 unsigned long flags;
4212 mask = (1ULL << dd->
ctxtcnt) - 1;
4215 mask = (1ULL <<
ctxt);
4220 (mask <<
SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4234 ~(mask <<
SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4252 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->
rcd[
ctxt]) {
4267 if (ctxt < dd->first_user_ctxt)
4270 }
else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4276 if (op & QIB_RCVCTRL_CTXT_DIS) {
4289 for (i = 0; i < dd->
cfgctxts; i++) {
4299 spin_unlock_irqrestore(&dd->
cspec->rcvmod_lock, flags);
4314 #define SENDCTRL_COMMON_MODS (\
4315 QIB_SENDCTRL_CLEAR | \
4316 QIB_SENDCTRL_AVAIL_DIS | \
4317 QIB_SENDCTRL_AVAIL_ENB | \
4318 QIB_SENDCTRL_AVAIL_BLIP | \
4319 QIB_SENDCTRL_DISARM | \
4320 QIB_SENDCTRL_DISARM_ALL | \
4321 QIB_SENDCTRL_SEND_ENB)
4323 #define SENDCTRL_PORT_MODS (\
4324 QIB_SENDCTRL_CLEAR | \
4325 QIB_SENDCTRL_SEND_ENB | \
4326 QIB_SENDCTRL_SEND_DIS | \
4332 u64 tmp_dd_sendctrl;
4333 unsigned long flags;
4363 tmp_dd_sendctrl &= ~
SYM_MASK(SendCtrl, SendBufAvailUpd);
4364 for (i = 0; i < last; i++) {
4380 SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4381 SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4382 SYM_MASK(SendCtrl_0, TxeBypassIbc);
4383 qib_write_kreg_port(ppd,
krp_sendctrl, tmp_ppd_sendctrl);
4390 tmp_dd_sendctrl |=
SYM_MASK(SendCtrl, Disarm) |
4392 SYM_LSB(SendCtrl, DisarmSendBuf));
4395 tmp_dd_sendctrl &= ~
SYM_MASK(SendCtrl, SendBufAvailUpd);
4407 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4414 if (op & QIB_SENDCTRL_FLUSH) {
4430 #define _PORT_VIRT_FLAG 0x8000U
4431 #define _PORT_64BIT_FLAG 0x10000U
4432 #define _PORT_CNTR_IDXMASK 0x7fffU
4445 static const u32 xlator[] = {
4490 "Unimplemented portcounter %u\n", reg);
4503 if (!rcd || rcd->
ppd != ppd)
4518 ret = qib_read_kreg_port(ppd, creg);
4527 ret = read_7322_creg_port(ppd, creg);
4529 ret = read_7322_creg32_port(ppd, creg);
4531 if (ppd->
cpspec->ibdeltainprog)
4532 ret -= ret - ppd->
cpspec->ibsymsnap;
4533 ret -= ppd->
cpspec->ibsymdelta;
4535 if (ppd->
cpspec->ibdeltainprog)
4536 ret -= ret - ppd->
cpspec->iblnkerrsnap;
4537 ret -= ppd->
cpspec->iblnkerrdelta;
4539 ret -= ppd->
cpspec->ibmalfdelta;
4541 ret += ppd->
cpspec->iblnkdowndelta;
4559 static const char cntr7322names[] =
4585 static const u32 cntr7322indices[] = {
4592 cr_base_egrovfl + 1,
4593 cr_base_egrovfl + 2,
4594 cr_base_egrovfl + 3,
4595 cr_base_egrovfl + 4,
4596 cr_base_egrovfl + 5,
4597 cr_base_egrovfl + 6,
4598 cr_base_egrovfl + 7,
4599 cr_base_egrovfl + 8,
4600 cr_base_egrovfl + 9,
4601 cr_base_egrovfl + 10,
4602 cr_base_egrovfl + 11,
4603 cr_base_egrovfl + 12,
4604 cr_base_egrovfl + 13,
4605 cr_base_egrovfl + 14,
4606 cr_base_egrovfl + 15,
4607 cr_base_egrovfl + 16,
4608 cr_base_egrovfl + 17,
4616 static const char portcntr7322names[] =
4656 static const u32 portcntr7322indices[] = {
4697 static void init_7322_cntrnames(
struct qib_devdata *dd)
4702 for (i = 0, s = (
char *)cntr7322names; s && j <= dd->
cfgctxts;
4705 if (!j && !
strncmp(
"Ctxt0EgrOvfl", s + 1, 12))
4714 dd->
cspec->cntrnamelen =
sizeof(cntr7322names) - 1;
4716 dd->
cspec->cntrnamelen = 1 + s - cntr7322names;
4719 if (!dd->
cspec->cntrs)
4720 qib_dev_err(dd,
"Failed allocation for counters\n");
4722 for (i = 0, s = (
char *)portcntr7322names;
s; i++)
4724 dd->
cspec->nportcntrs = i - 1;
4725 dd->
cspec->portcntrnamelen =
sizeof(portcntr7322names) - 1;
4729 if (!dd->
pport[i].cpspec->portcntrs)
4731 "Failed allocation for portcounters\n");
4741 ret = dd->
cspec->cntrnamelen;
4745 *namep = (
char *) cntr7322names;
4750 ret = dd->
cspec->ncntrs *
sizeof(
u64);
4751 if (!cntr || pos >= ret) {
4757 for (i = 0; i < dd->
cspec->ncntrs; i++)
4759 *cntr++ = read_7322_creg(dd,
4760 cntr7322indices[i] &
4763 *cntr++ = read_7322_creg32(dd,
4764 cntr7322indices[i]);
4771 char **namep,
u64 **cntrp)
4776 ret = dd->
cspec->portcntrnamelen;
4780 *namep = (
char *)portcntr7322names;
4786 ret = dd->
cspec->nportcntrs *
sizeof(
u64);
4787 if (!cntr || pos >= ret) {
4793 for (i = 0; i < dd->
cspec->nportcntrs; i++) {
4795 *cntr++ = qib_portcntr_7322(ppd,
4796 portcntr7322indices[i] &
4799 *cntr++ = read_7322_creg_port(ppd,
4800 portcntr7322indices[i] &
4803 *cntr++ = read_7322_creg32_port(ppd,
4804 portcntr7322indices[i]);
4822 static void qib_get_7322_faststats(
unsigned long opaque)
4826 unsigned long flags;
4830 for (pidx = 0; pidx < dd->
num_pports; ++pidx) {
4831 ppd = dd->
pport + pidx;
4850 traffic_wds -= ppd->
dd->traffic_wds;
4851 ppd->
dd->traffic_wds += traffic_wds;
4854 spin_unlock_irqrestore(&ppd->
dd->eep_st_lock, flags);
4859 ppd->
cpspec->qdr_dfe_time &&
4861 ppd->
cpspec->qdr_dfe_on = 0;
4864 ppd->
dd->cspec->r1 ?
4876 static int qib_7322_intr_fallback(
struct qib_devdata *dd)
4878 if (!dd->
cspec->num_msix_entries)
4882 "MSIx interrupt not detected, trying INTx interrupts\n");
4883 qib_7322_nomsix(dd);
4885 qib_setup_7322_interrupt(dd, 0);
4898 static void qib_7322_mini_pcs_reset(
struct qib_pportdata *ppd)
4902 const u64 reset_bits =
SYM_MASK(IBPCSConfig_0, xcv_rreset) |
4903 SYM_MASK(IBPCSConfig_0, xcv_treset) |
4904 SYM_MASK(IBPCSConfig_0, tx_rx_reset);
4919 SYM_MASK(HwErrClear, statusValidNoEopClear));
4942 control = qib_7322_setpbc_control(ppd, len, 0, 15);
4943 pbc = ((
u64) control << 32) | len;
4944 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
4970 static void qib_autoneg_7322_send(
struct qib_pportdata *ppd,
int which)
4975 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
4976 static u32 madpayload_start[0x40] = {
4977 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4978 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4979 0x1, 0x1388, 0x15e, 0x1,
4981 static u32 madpayload_done[0x40] = {
4982 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4983 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4984 0x40000001, 0x1388, 0x15e,
4991 for (i = 0; i < hcnt; i++) {
4995 for (i = 0; i < dcnt; i++) {
4997 madpayload_start[
i] = dw;
4999 madpayload_done[
i] = dw;
5004 data = which ? madpayload_done : madpayload_start;
5006 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5009 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5035 if (speed & (speed - 1))
5045 if (newctrlb == ppd->
cpspec->ibcctrl_b)
5048 ppd->
cpspec->ibcctrl_b = newctrlb;
5061 unsigned long flags;
5066 qib_autoneg_7322_send(ppd, 0);
5068 qib_7322_mini_pcs_reset(ppd);
5084 unsigned long flags;
5087 autoneg_work.work)->ppd;
5096 for (i = 0; i < 25; i++) {
5113 qib_7322_mini_pcs_reset(ppd);
5120 qib_7322_mini_pcs_reset(ppd);
5137 ppd->
cpspec->autoneg_tries = 0;
5163 if (IS_ERR(send_buf))
5174 ibp->
smi_ah = to_iah(ah);
5182 smp = send_buf->
mad;
5196 delay = 2 << ppd->
cpspec->ipg_tries;
5205 static void ipg_7322_work(
struct work_struct *work)
5210 ipg_work.work)->ppd;
5212 && ++ppd->
cpspec->ipg_tries <= 10)
5216 static u32 qib_7322_iblink_state(
u64 ibcs)
5241 static u8 qib_7322_phys_portstate(
u64 ibcs)
5243 u8 state = (
u8)
SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5244 return qib_7322_physportstate[
state];
5247 static int qib_7322_ib_updown(
struct qib_pportdata *ppd,
int ibup,
u64 ibcs)
5249 int ret = 0, symadj = 0;
5250 unsigned long flags;
5258 if (ibcs &
SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5261 }
else if (ibcs &
SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5268 if (ibcs &
SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5280 ppd->
cpspec->ipg_tries = 0;
5282 (
SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5283 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5294 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5295 reset_tx_deemphasis_override));
5298 qib_7322_mini_pcs_reset(ppd);
5306 if (__qib_sdma_running(ppd))
5309 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5312 if (clr == ppd->
cpspec->iblnkdownsnap)
5313 ppd->
cpspec->iblnkdowndelta++;
5322 ++ppd->
cpspec->autoneg_tries;
5323 if (!ppd->
cpspec->ibdeltainprog) {
5324 ppd->
cpspec->ibdeltainprog = 1;
5325 ppd->
cpspec->ibsymdelta +=
5326 read_7322_creg32_port(ppd,
5329 ppd->
cpspec->iblnkerrdelta +=
5330 read_7322_creg32_port(ppd,
5332 ppd->
cpspec->iblnkerrsnap;
5334 try_7322_autoneg(ppd);
5338 qib_autoneg_7322_send(ppd, 1);
5340 qib_7322_mini_pcs_reset(ppd);
5349 ppd->
cpspec->autoneg_tries = 0;
5369 if (ppd->
dd->cspec->r1 && ppd->
cpspec->ipg_tries <= 10)
5371 if (!ppd->
cpspec->recovery_init)
5372 setup_7322_link_recovery(ppd, 0);
5376 ppd->
cpspec->ibmalfusesnap = 0;
5377 ppd->
cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5381 ppd->
cpspec->iblnkdownsnap =
5383 if (ppd->
cpspec->ibdeltainprog) {
5384 ppd->
cpspec->ibdeltainprog = 0;
5385 ppd->
cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5387 ppd->
cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5391 !ppd->
cpspec->ibdeltainprog &&
5393 ppd->
cpspec->ibdeltainprog = 1;
5394 ppd->
cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5396 ppd->
cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5401 qib_setup_7322_setextled(ppd, ibup);
5414 u64 read_val, new_out;
5415 unsigned long flags;
5424 new_out = (dd->
cspec->gpio_out & ~mask) | out;
5428 dd->
cspec->gpio_out = new_out;
5429 spin_unlock_irqrestore(&dd->
cspec->gpio_lock, flags);
5440 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5444 static int qib_7322_eeprom_wen(
struct qib_devdata *dd,
int wen)
5451 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5453 return prev_wen & 1;
5461 static void get_7322_chip_params(
struct qib_devdata *dd)
5514 static void qib_7322_set_baseaddrs(
struct qib_devdata *dd)
5526 dd->
pport[0].cpspec->kpregbase =
5528 dd->
pport[1].cpspec->kpregbase =
5531 dd->
pport[0].cpspec->cpregbase =
5534 dd->
pport[1].cpspec->cpregbase =
5544 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
5545 SYM_MASK(SendCtrl_0, SDmaEnable) | \
5546 SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
5547 SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5548 SYM_MASK(SendCtrl_0, SDmaHalt) | \
5549 SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
5550 SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5554 u64 *data,
u64 mask,
int only_32)
5556 unsigned long flags;
5560 u64 local_data, all_bits;
5568 for (pidx = 0; pidx < dd->
num_pports; ++pidx) {
5572 ppd = dd->
pport + pidx;
5573 if (!ppd->
cpspec->kpregbase)
5577 psoffs = (
u32) (psptr - dd->
kregbase) *
sizeof(*psptr);
5587 idx = offs /
sizeof(
u64);
5594 if (!ppd || (mask & all_bits) != all_bits) {
5603 local_data = (
u64)qib_read_kreg32(dd, idx);
5605 local_data = qib_read_kreg64(dd, idx);
5606 *data = (local_data & ~mask) | (*data & mask);
5626 qib_write_kreg(dd, idx, tval);
5630 return only_32 ? 4 : 8;
5634 sendctrl_hook,
KREG_IDX(SendCtrl_0) *
sizeof(
u64),
5639 sendctrl_hook,
KREG_IDX(SendCtrl_1) *
sizeof(
u64),
5643 static ushort sdma_fetch_prio = 8;
5648 static void init_txdds_table(
struct qib_pportdata *ppd,
int override);
5650 static void qsfp_7322_event(
struct work_struct *work)
5654 unsigned long pwrup;
5655 unsigned long flags;
5668 ppd->
cpspec->qsfp_data.modpresent = 0;
5670 qib_set_ib_7322_lstate(ppd, 0,
5695 if (!ret && !ppd->
dd->cspec->r1) {
5698 else if (qd->
cache.atten[1] >= qib_long_atten &&
5705 ibsd_wr_allchans(ppd, 13, (le2 << 7),
BMASK(9, 7));
5712 init_txdds_table(ppd, 0);
5717 if (!ppd->
cpspec->qsfp_data.modpresent &&
5719 ppd->
cpspec->qsfp_data.modpresent = 1;
5720 qib_set_ib_7322_lstate(ppd, 0,
5735 unsigned long flags;
5744 dd->
cspec->extctrl |= (mod_prs_bit <<
SYM_LSB(EXTCtrl, GPIOInvert));
5745 dd->
cspec->gpio_mask |= mod_prs_bit;
5748 spin_unlock_irqrestore(&dd->
cspec->gpio_lock, flags);
5764 static void set_no_qsfp_atten(
struct qib_devdata *dd,
int change)
5772 str = txselect_list;
5776 for (pidx = 0; pidx < dd->
num_pports; ++pidx)
5777 dd->
pport[pidx].cpspec->no_eep = deflt;
5783 while (*nxt && nxt[1]) {
5786 if (nxt == str || !*nxt || *nxt !=
',') {
5787 while (*nxt && *nxt++ !=
' ')
5793 if (nxt == str || *nxt !=
'=') {
5794 while (*nxt && *nxt++ !=
' ')
5801 while (*nxt && *nxt++ !=
' ')
5805 if (val >= txdds_size)
5809 if (*nxt ==
',' && nxt[1]) {
5813 while (*nxt && *nxt++ !=
' ')
5826 ppd->
cpspec->h1_val = h1;
5828 init_txdds_table(ppd, 1);
5833 qib_set_ib_7322_lstate(ppd, 0,
5840 if (change && !any) {
5845 for (pidx = 0; pidx < dd->
num_pports; ++pidx)
5846 if (dd->
pport[pidx].link_speed_supported)
5847 init_txdds_table(&dd->
pport[pidx], 0);
5852 static int setup_txselect(
const char *str,
struct kernel_param *kp)
5859 pr_info(
"txselect_values string too long\n");
5862 ret = kstrtoul(str, 0, &val);
5865 pr_info(
"txselect_values must start with a number < %d\n",
5867 return ret ? ret : -
EINVAL;
5870 strcpy(txselect_list, str);
5873 set_no_qsfp_atten(dd, 1);
5892 if (val != dd->pioavailregs_phys) {
5894 "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
5895 (
unsigned long) dd->pioavailregs_phys,
5896 (
unsigned long long) val);
5916 set_no_qsfp_atten(dd, 0);
5917 for (n = 0; n < dd->num_pports; ++
n) {
5921 sdma_fetch_prio & 0xf);
5924 qib_init_7322_qsfp(ppd);
5933 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
5935 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
5936 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
5937 MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
5946 static void write_7322_init_portregs(
struct qib_pportdata *ppd)
5953 for (i = 1; i < 8; i++)
5965 val &= ~
SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
5967 SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
5978 SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
5987 if (ppd->
dd->cspec->r1)
5998 static void write_7322_initregs(
struct qib_devdata *dd)
6005 qib_write_kreg(dd,
KREG_IDX(RcvQPMulticastContext_1), 1);
6007 for (pidx = 0; pidx < dd->
num_pports; ++pidx) {
6009 unsigned long flags;
6012 !dd->
pport[pidx].link_speed_supported)
6015 ppd = &dd->
pport[pidx];
6020 spin_unlock_irqrestore(&dd->
cspec->rcvmod_lock, flags);
6029 for (i = 0; i < 32; ) {
6038 val |= ctxt << (5 * (i % 6));
6041 qib_write_kreg_port(ppd, regno, val);
6046 qib_write_kreg_port(ppd, regno, val);
6056 dd->
cspec->rcvavail_timeout[
i] = rcv_int_timeout;
6066 for (i = 0; i < dd->
cfgctxts; i++) {
6081 static int qib_init_7322_variables(
struct qib_devdata *dd)
6086 u32 sbufs, updthresh;
6098 ppd[0].
cpspec->ppd = &ppd[0];
6099 ppd[1].
cpspec->ppd = &ppd[1];
6107 if ((dd->
revision & 0xffffffffU) == 0xffffffffU) {
6109 "Revision register read failure, giving up initialization\n");
6119 get_7322_chip_params(dd);
6120 features = qib_7322_boardname(dd);
6132 if (!dd->
cspec->sendchkenable || !dd->
cspec->sendgrhchk ||
6133 !dd->
cspec->sendibchk) {
6134 qib_dev_err(dd,
"Failed allocation for hdrchk bitmaps\n");
6160 qib_7322_set_baseaddrs(dd);
6168 dd->
cspec->hwerrmask = ~0ULL;
6171 dd->
cspec->hwerrmask &=
6172 ~(
SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6173 SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6189 IBSerdesPClkNotDetectMask_0)
6191 SDmaMemReadErrMask_0));
6192 dd->
cspec->int_enable_mask &= ~(
6193 SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6194 SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6195 SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6198 SYM_MASK(IntMask, SendDoneIntMask_0));
6204 IBSerdesPClkNotDetectMask_1)
6206 SDmaMemReadErrMask_1));
6207 dd->
cspec->int_enable_mask &= ~(
6208 SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6209 SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6210 SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6213 SYM_MASK(IntMask, SendDoneIntMask_1));
6240 "Invalid num_vls %u, using 4 VLs\n",
6252 "Invalid num_vls %u for MTU %d "
6265 if (ppd->
dd->cspec->r1)
6276 "IB%u:%u: Unknown mezzanine card type\n",
6290 write_7322_init_portregs(ppd);
6310 qib_7322_tidtemplate(dd);
6321 dd->
stats_timer.function = qib_get_7322_faststats;
6328 qib_7322_config_ctxts(dd);
6354 qib_7322_set_baseaddrs(dd);
6360 qib_dev_err(dd,
"No ports enabled, giving up initialization\n");
6364 write_7322_initregs(dd);
6366 init_7322_cntrnames(dd);
6382 sbufs = updthresh > 3 ? updthresh : 3;
6384 dd->
cspec->sdmabufcnt = 0;
6388 dd->
cspec->sdmabufcnt;
6390 dd->
cspec->lastbuf_for_pio--;
6403 dd->
cspec->updthresh_dflt = updthresh;
6404 dd->
cspec->updthresh = updthresh;
6408 <<
SYM_LSB(SendCtrl, AvailUpdThld)) |
6409 SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6435 last = dd->
cspec->lastbuf_for_pio;
6450 static void qib_sdma_set_7322_desc_cnt(
struct qib_pportdata *ppd,
unsigned cnt)
6457 .go_s99_running_tofalse = 1,
6498 .go_s99_running_totrue = 1,
6502 static void qib_7322_sdma_init_early(
struct qib_pportdata *ppd)
6504 ppd->
sdma_state.set_state_action = sdma_7322_action_table;
6510 unsigned lastbuf, erstbuf;
6511 u64 senddmabufmask[3] = { 0 };
6515 qib_sdma_7322_setlengen(ppd);
6516 qib_sdma_update_7322_tail(ppd, 0);
6524 n = dd->
cspec->sdmabufcnt;
6527 dd->
cspec->sdmabufcnt);
6528 lastbuf = erstbuf +
n;
6532 for (; erstbuf < lastbuf; ++erstbuf) {
6534 unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6537 senddmabufmask[
word] |= 1ULL <<
bit;
6556 use_dmahead = __qib_sdma_running(ppd) &&
6559 hwhead = use_dmahead ?
6567 if (swhead < swtail)
6569 sane = (hwhead >= swhead) & (hwhead <= swtail);
6570 else if (swhead > swtail)
6572 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6576 sane = (hwhead == swhead);
6595 return (hwstatus &
SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6596 (hwstatus &
SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6597 !(hwstatus &
SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6598 !(hwstatus &
SYM_MASK(SendDmaStatus_0, ScbEmpty));
6611 u8 rcv_mult = ib_rate_to_delay[srate];
6614 ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6633 static void qib_7322_initvl15_bufs(
struct qib_devdata *dd)
6642 static void qib_7322_init_ctxt(
struct qib_ctxtdata *rcd)
6644 if (rcd->
ctxt < NUM_IB_PORTS) {
6645 if (rcd->
dd->num_pports > 1) {
6659 #define QTXSLEEPS 5000
6664 const int last = start + len - 1;
6668 unsigned long flags;
6672 int cstart, previ = -1;
6682 for (cstart = start; cstart <= last; cstart++) {
6686 shadow = (
unsigned long)
6692 % BITS_PER_LONG, &shadow))
6713 for (i = start; i <= last; i++)
6725 for (i = start; i <= last; i++)
6731 for (i = start; i <= last; i++) {
6738 dd->
cspec->updthresh != dd->
cspec->updthresh_dflt
6740 if (dd->
rcd[i] && dd->
rcd[i]->subctxt_cnt &&
6741 ((dd->
rcd[i]->piocnt / dd->
rcd[i]->subctxt_cnt) - 1)
6742 < dd->
cspec->updthresh_dflt)
6744 spin_unlock_irqrestore(&dd->
uctxt_lock, flags);
6747 dd->
cspec->updthresh = dd->
cspec->updthresh_dflt;
6751 SYM_LSB(SendCtrl, AvailUpdThld);
6759 for (i = start; i <= last; i++) {
6771 <<
SYM_LSB(SendCtrl, AvailUpdThld);
6782 for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++
i)
6784 dd->
cspec->sendchkenable[i]);
6786 for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++
i) {
6788 dd->
cspec->sendgrhchk[i]);
6790 dd->
cspec->sendibchk[i]);
6808 static int qib_7322_tempsense_rd(
struct qib_devdata *dd,
int regnum)
6829 u32 tabsize, actual_cnt = 0;
6860 dd->
f_reset = qib_do_7322_reset;
6899 ret = qib_init_7322_variables(dd);
6913 for (i = 0; i < tabsize; i++)
6920 if (qib_krcvq01_no_msi)
6923 tabsize = actual_cnt;
6926 if (!dd->
cspec->msix_entries) {
6930 for (i = 0; i < tabsize; i++)
6931 dd->
cspec->msix_entries[i].msix.entry = i;
6935 "Failed to setup PCIe or interrupts; continuing anyway\n");
6937 dd->
cspec->num_msix_entries = tabsize;
6940 qib_setup_7322_interrupt(dd, 1);
6963 #define DDS_ENT_AMP_LSB 14
6964 #define DDS_ENT_MAIN_LSB 9
6965 #define DDS_ENT_POST_LSB 5
6966 #define DDS_ENT_PRE_XTRA_LSB 3
6967 #define DDS_ENT_PRE_LSB 0
6995 qib_write_kreg(dd, regidx, pack_ent);
7002 { 0x41, 0x50, 0x48 },
"584470002 ",
7003 { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
7006 { 0x41, 0x50, 0x48 },
"584470004 ",
7007 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
7010 { 0x00, 0x90, 0x65 },
"FCBG410QB1C03-QL",
7011 { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
7014 { 0x00, 0x90, 0x65 },
"FCBG410QB1C30-QL",
7015 { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
7018 { 0x00, 0x90, 0x65 },
NULL,
7019 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
7022 { 0x00, 0x21, 0x77 },
"QSN3300-1 ",
7023 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
7026 { 0x00, 0x21, 0x77 },
"QSN3300-2 ",
7027 { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
7030 { 0x00, 0x21, 0x77 },
"QSN3800-1 ",
7031 { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
7034 { 0x00, 0x21, 0x77 },
"QSN3800-3 ",
7035 { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
7038 { 0x00, 0x21, 0x77 },
"QSN7000-5 ",
7039 { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
7042 { 0x00, 0x21, 0x77 },
"QSN7000-7 ",
7043 { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
7046 { 0x00, 0x21, 0x77 },
"QSN7600-5 ",
7047 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
7050 { 0x00, 0x21, 0x77 },
"QSN7600-7 ",
7051 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
7054 { 0x00, 0x30, 0xB4 },
"QLX4000CQSFP1224",
7055 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
7058 { 0x00, 0x30, 0xB4 },
"QLX4000CQSFP1028",
7059 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
7062 { 0x00, 0x30, 0xB4 },
"QLX4000CQSFP0730",
7063 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
7066 { 0x00, 0x30, 0xB4 },
"QLX4000CQSFP0532",
7067 { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
7070 { 0x00, 0x30, 0xB4 },
NULL,
7071 { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
7074 { 0x00, 0x25, 0x63 },
NULL,
7075 { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
7078 { 0x00, 0x09, 0x3A },
"74763-0025 ",
7079 { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
7082 { 0x00, 0x09, 0x3A },
"74757-2201 ",
7083 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
7223 return txdds + atten;
7233 const struct txdds_ent **qdr_dds,
int override)
7239 for (idx = 0; !
override && idx <
ARRAY_SIZE(vendor_txdds); ++
idx) {
7255 *sdr_dds = txdds_sdr + ppd->
dd->board_atten;
7256 *ddr_dds = txdds_ddr + ppd->
dd->board_atten;
7257 *qdr_dds = txdds_qdr + ppd->
dd->board_atten;
7263 *sdr_dds = get_atten_table(txdds_sdr, qd->
atten[0]);
7264 *ddr_dds = get_atten_table(txdds_ddr, qd->
atten[0]);
7265 *qdr_dds = get_atten_table(txdds_qdr, qd->
atten[1]);
7274 idx = ppd->
cpspec->no_eep;
7275 *sdr_dds = &txdds_sdr[
idx];
7276 *ddr_dds = &txdds_ddr[
idx];
7277 *qdr_dds = &txdds_qdr[
idx];
7281 *sdr_dds = &txdds_extra_sdr[
idx];
7282 *ddr_dds = &txdds_extra_ddr[
idx];
7283 *qdr_dds = &txdds_extra_qdr[
idx];
7288 pr_info(
"IB%u:%u use idx %u into txdds_mfg\n",
7289 ppd->
dd->unit, ppd->
port, idx);
7290 *sdr_dds = &txdds_extra_mfg[
idx];
7291 *ddr_dds = &txdds_extra_mfg[
idx];
7292 *qdr_dds = &txdds_extra_mfg[
idx];
7295 *sdr_dds = txdds_sdr + qib_long_atten;
7296 *ddr_dds = txdds_ddr + qib_long_atten;
7297 *qdr_dds = txdds_qdr + qib_long_atten;
7301 static void init_txdds_table(
struct qib_pportdata *ppd,
int override)
7303 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7308 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds,
override);
7315 set_txdds(ppd, 0, sdr_dds);
7324 write_tx_serdes_param(ppd, dds);
7329 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7331 single_ent ? ddr_dds : txdds_ddr + idx);
7333 single_ent ? qdr_dds : txdds_qdr + idx);
7337 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7338 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7339 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7340 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7341 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7342 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7343 #define AHB_TRANS_TRIES 10
7355 u32 ret = 0xBAD0BAD;
7360 acc = (quad << 1) | 1;
7368 if (tries >= AHB_TRANS_TRIES) {
7369 qib_dev_err(dd,
"No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7376 sz_mask = (1
UL << ((quad == 1) ? 32 : 16)) - 1;
7377 wr_data = data & mask & sz_mask;
7378 if ((~mask & sz_mask) != 0) {
7387 if (tries >= AHB_TRANS_TRIES) {
7395 wr_data |= (rd_data & ~mask & sz_mask);
7399 if (mask & sz_mask) {
7410 if (tries >= AHB_TRANS_TRIES) {
7422 static void ibsd_wr_allchans(
struct qib_pportdata *ppd,
int addr,
unsigned data,
7430 ahb_mod(dd,
IBSD(ppd->
hw_pidx), (chan + (chan >> 1)), addr,
7432 rbc = ahb_mod(dd,
IBSD(ppd->
hw_pidx), (chan + (chan >> 1)),
7437 static void serdes_7322_los_enable(
struct qib_pportdata *ppd,
int enable)
7440 u8 state =
SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7442 if (enable && !state) {
7443 pr_info(
"IB%u:%u Turning LOS on\n",
7444 ppd->
dd->unit, ppd->
port);
7445 data |=
SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7446 }
else if (!enable && state) {
7447 pr_info(
"IB%u:%u Turning LOS off\n",
7448 ppd->
dd->unit, ppd->
port);
7449 data &= ~
SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7457 if (ppd->
dd->cspec->r1)
7458 ret = serdes_7322_init_old(ppd);
7460 ret = serdes_7322_init_new(ppd);
7472 init_txdds_table(ppd, 0);
7476 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7477 reset_tx_deemphasis_override));
7481 ibsd_wr_allchans(ppd, 2, 0,
BMASK(11, 9));
7484 ibsd_wr_allchans(ppd, 11, (1 << 11),
BMASK(12, 11));
7486 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7490 ibsd_wr_allchans(ppd, 13, (le_val << 7),
BMASK(9, 7));
7494 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7497 ahb_mod(ppd->
dd,
IBSD(ppd->
hw_pidx), 5, 10, 0 << 14, 1 << 14);
7500 ibsd_wr_allchans(ppd, 5, (0 << 8),
BMASK(9, 8));
7516 ahb_mod(ppd->
dd,
IBSD(ppd->
hw_pidx), 5, 9, 1 << 15, 1 << 15);
7519 ibsd_wr_allchans(ppd, 14, (1 << 3),
BMASK(5, 3));
7520 ibsd_wr_allchans(ppd, 20, (2 << 10),
BMASK(12, 10));
7521 ibsd_wr_allchans(ppd, 20, (4 << 13),
BMASK(15, 13));
7523 serdes_7322_los_enable(ppd, 1);
7526 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7529 ibsd_wr_allchans(ppd, 16, 0 << 0,
BMASK(1, 0));
7532 le_val = (ppd->
dd->cspec->r1 ||
IS_QME(ppd->
dd)) ? 0xb6c0 : 0x6bac;
7533 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7542 ppd->
dd->cspec->r1 ?
7544 ppd->
cpspec->qdr_dfe_on = 1;
7547 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7550 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7552 if (!ppd->
dd->cspec->r1) {
7553 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7554 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7558 ibsd_wr_allchans(ppd, 2, 15 << 5,
BMASK(8, 5));
7566 u32 le_val, rxcaldone;
7570 ahb_mod(ppd->
dd,
IBSD(ppd->
hw_pidx), 5, 10, 0 << 14, 1 << 14);
7574 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7575 reset_tx_deemphasis_override));
7580 ibsd_wr_allchans(ppd, 1, 0,
BMASK(9, 1));
7582 ibsd_wr_allchans(ppd, 13, 0,
BMASK(5, 5));
7584 ibsd_wr_allchans(ppd, 1, 0,
BMASK(15, 15));
7586 ibsd_wr_allchans(ppd, 13, 0,
BMASK(6, 6));
7588 ibsd_wr_allchans(ppd, 5, 0,
BMASK(0, 0));
7590 ibsd_wr_allchans(ppd, 12, 0,
BMASK(12, 12));
7592 ibsd_wr_allchans(ppd, 2, 0,
BMASK(3, 3));
7594 ibsd_wr_allchans(ppd, 2, 0,
BMASK(4, 4));
7596 ibsd_wr_allchans(ppd, 13, 0,
BMASK(13, 13));
7598 ibsd_wr_allchans(ppd, 4, 0,
BMASK(10, 10));
7600 ibsd_wr_allchans(ppd, 12, 0,
BMASK(4, 4));
7602 ibsd_wr_allchans(ppd, 2, (1 << 15),
BMASK(15, 15));
7604 ibsd_wr_allchans(ppd, 5, 0,
BMASK(9, 8));
7606 ibsd_wr_allchans(ppd, 12, (1 << 5),
BMASK(5, 5));
7608 ibsd_wr_allchans(ppd, 2, (4 << 12),
BMASK(14, 12));
7610 ibsd_wr_allchans(ppd, 16, 0,
BMASK(1, 0));
7612 if (!ppd->
dd->cspec->r1) {
7613 ibsd_wr_allchans(ppd, 12, 1 << 12,
BMASK(12, 12));
7614 ibsd_wr_allchans(ppd, 12, 2 << 8,
BMASK(11, 8));
7616 ibsd_wr_allchans(ppd, 19, (3 << 11),
BMASK(13, 11));
7627 ibsd_wr_allchans(ppd, 0, 0,
BMASK(15, 13));
7630 ibsd_wr_allchans(ppd, 0, (1 << 14),
BMASK(14, 14));
7633 ibsd_wr_allchans(ppd, 0, (1 << 13),
BMASK(13, 13));
7650 ahb_mod(ppd->
dd,
IBSD(ppd->
hw_pidx), 5, 9, 1 << 15, 1 << 15);
7653 ibsd_wr_allchans(ppd, 14, (1 << 3),
BMASK(5, 3));
7654 ibsd_wr_allchans(ppd, 20, (2 << 10),
BMASK(12, 10));
7655 ibsd_wr_allchans(ppd, 20, (4 << 13),
BMASK(15, 13));
7658 serdes_7322_los_enable(ppd, 1);
7660 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7664 ibsd_wr_allchans(ppd, 15, 1,
BMASK(0, 0));
7666 ibsd_wr_allchans(ppd, 12, (1 << 4),
BMASK(4, 4));
7669 ibsd_wr_allchans(ppd, 4, (1 << 10),
BMASK(10, 10));
7675 (chan + (chan >> 1)),
7677 if ((~rxcaldone & (
u32)
BMASK(9, 9)) == 0 &&
7678 (~chan_done & (1 << chan)) == 0)
7679 chan_done &= ~(1 << chan);
7683 pr_info(
"Serdes %d calibration not done after .5 sec: 0x%x\n",
7688 (chan + (chan >> 1)),
7690 if ((~rxcaldone & (
u32)
BMASK(10, 10)) == 0)
7691 pr_info(
"Serdes %d chan %d calibration failed\n",
7697 ibsd_wr_allchans(ppd, 4, 0,
BMASK(10, 10));
7703 ibsd_wr_allchans(ppd, 13, (le_val << 7),
BMASK(9, 7));
7705 ibsd_wr_allchans(ppd, 3, (7 << 5),
BMASK(7, 5));
7707 ibsd_wr_allchans(ppd, 13, (1 << 6),
BMASK(6, 6));
7710 ibsd_wr_allchans(ppd, 1, 1,
BMASK(9, 1));
7712 le_val = (ppd->
dd->cspec->r1 ||
IS_QME(ppd->
dd)) ? 0xb6c0 : 0x6bac;
7713 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7715 ibsd_wr_allchans(ppd, 5, 0,
BMASK(0, 0));
7718 ibsd_wr_allchans(ppd, 2, (15 << 5),
BMASK(8, 5));
7720 ibsd_wr_allchans(ppd, 2, (1 << 4),
BMASK(4, 4));
7722 ibsd_wr_allchans(ppd, 2, 0,
BMASK(11, 9));
7724 ibsd_wr_allchans(ppd, 2, (1 << 3),
BMASK(3, 3));
7733 ppd->
dd->cspec->r1 ?
7735 ppd->
cpspec->qdr_dfe_on = 1;
7737 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
7739 ibsd_wr_allchans(ppd, 1, (0 << 15),
BMASK(15, 15));
7742 ibsd_wr_allchans(ppd, 12, (1 << 12),
BMASK(12, 12));
7744 ibsd_wr_allchans(ppd, 12, (1 << 13),
BMASK(13, 13));
7746 ibsd_wr_allchans(ppd, 11, (1 << 11),
BMASK(12, 11));
7748 ibsd_wr_allchans(ppd, 12, (3 << 2),
BMASK(3, 2));
7754 init_txdds_table(ppd, 0);
7764 9, code << 9, 0x3f << 9);
7767 static void set_man_mode_h1(
struct qib_pportdata *ppd,
int chan,
7768 int enable,
u32 tapenable)
7772 1, 3 << 10, 0x1f << 10);
7796 static void write_tx_serdes_param(
struct qib_pportdata *ppd,
7803 deemph &= ~(
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
7804 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7805 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7806 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
7808 deemph |=
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7809 tx_override_deemphasis_select);
7810 deemph |= (txdds->
amp &
SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7811 txampcntl_d2a)) <<
SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7813 deemph |= (txdds->
main &
SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7814 txc0_ena)) <<
SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7816 deemph |= (txdds->
post &
SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7817 txcp1_ena)) <<
SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7819 deemph |= (txdds->
pre &
SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7820 txcn1_ena)) <<
SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7832 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7835 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
7838 ddr_dds : sdr_dds));
7839 write_tx_serdes_param(ppd, dds);
7847 ppd->
cpspec->qdr_reforce = 0;
7848 if (!ppd->
dd->cspec->r1)
7852 set_man_mode_h1(ppd, chan, 1, 0);
7853 set_man_code(ppd, chan, ppd->
cpspec->h1_val);
7854 clock_man(ppd, chan);
7855 set_man_mode_h1(ppd, chan, 0, 0);
7859 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7860 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7862 #define R_OPCODE_LSB 3
7864 #define R_OP_SHIFT 2
7865 #define R_OP_UPDATE 3
7882 static int qib_r_wait_for_rdy(
struct qib_devdata *dd)
7886 for (timeout = 0; timeout < 100 ; ++timeout) {
7894 static int qib_r_shift(
struct qib_devdata *dd,
int bisten,
7902 ret = qib_r_wait_for_rdy(dd);
7905 for (pos = 0; pos < len; ++
pos) {
7908 outp[pos >> 3] &= ~(1 << (pos & 7));
7909 outp[pos >> 3] |= (ret << (pos & 7));
7912 int tdi = inp[pos >> 3] >> (pos & 7);
7917 ret = qib_r_wait_for_rdy(dd);
7925 ret = qib_r_wait_for_rdy(dd);
7933 static int qib_r_update(
struct qib_devdata *dd,
int bisten)
7939 ret = qib_r_wait_for_rdy(dd);
7947 #define BISTEN_PORT_SEL 15
7948 #define LEN_PORT_SEL 625
7949 #define BISTEN_AT 17
7951 #define BISTEN_ETM 16
7954 #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
7958 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7959 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7962 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7963 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7964 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
7965 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
7966 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
7967 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
7968 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7969 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
7972 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
7973 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7978 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7979 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7980 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7981 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
7982 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7983 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
7984 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
7985 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
7990 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7991 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
7992 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7993 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
7994 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
7995 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
7996 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
7997 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8002 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8003 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8004 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8005 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8006 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8007 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8008 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8009 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8014 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8015 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8016 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8017 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8018 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8019 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8020 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8021 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8026 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8027 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8028 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8029 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8030 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8031 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8032 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8033 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8047 if (!ppd->
dd->cspec->r1)
8050 dd->
cspec->recovery_ports_initted++;
8051 ppd->
cpspec->recovery_init = 1;
8053 if (!both && dd->
cspec->recovery_ports_initted == 1) {
8054 portsel = ppd->
port == 1 ? portsel_port1 : portsel_port2;
8057 portsel = portsel_2port;
8061 if (qib_r_grab(dd) < 0 ||
8067 portsel,
NULL) < 0 ||
8073 qib_dev_err(dd,
"Failed IB link recovery setup\n");
8076 static void check_7322_rxe_status(
struct qib_pportdata *ppd)
8081 if (dd->
cspec->recovery_ports_initted != 1)
8094 ppd->
dd->cspec->stay_in_freeze = 1;
8095 qib_7322_set_intr_state(ppd->
dd, 0);
8096 qib_write_kreg(dd,
kr_fmask, 0ULL);
8097 qib_dev_err(dd,
"HCA unusable until powercycled\n");
8102 SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8109 ppd->
cpspec->ibcctrl_a &=
8110 ~
SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8115 qib_set_ib_7322_lstate(ppd, 0,