39 #include <linux/pci.h>
41 #include <linux/module.h>
47 #define SD7220_FW_NAME "qlogic/sd7220.fw"
55 #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
56 #define kr_hwerrclear KREG_IDX(HwErrClear)
57 #define kr_hwerrmask KREG_IDX(HwErrMask)
58 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
59 #define kr_ibcstatus KREG_IDX(IBCStatus)
60 #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
61 #define kr_scratch KREG_IDX(Scratch)
62 #define kr_xgxs_cfg KREG_IDX(XGXSCfg)
64 #define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
65 #define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
66 #define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
67 #define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
68 #define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
74 #define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
80 #define PCIE_SERDES0 0
81 #define PCIE_SERDES1 1
87 #define EPB_ADDR_SHF 8
88 #define EPB_LOC(chn, elt, reg) \
89 (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
91 #define EPB_IB_QUAD0_CS_SHF (25)
92 #define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
93 #define EPB_IB_UC_CS_SHF (26)
94 #define EPB_PCIE_UC_CS_SHF (27)
95 #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
103 static void qib_sd_trimdone_monitor(
struct qib_devdata *
dd,
const char *where);
110 static int epb_access(
struct qib_devdata *
dd,
int sdnum,
int claim);
128 if (!dd->
cspec->serdes_first_init_done &&
129 qib_sd7220_ib_vfy(dd, fw) > 0)
130 dd->
cspec->serdes_first_init_done = 1;
131 return dd->
cspec->serdes_first_init_done;
135 #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
136 #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
137 #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
138 #define UC_PAR_CLR_D 8
139 #define UC_PAR_CLR_M 0xC
140 #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
141 #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
151 qib_dev_err(dd,
"Failed clearing IBSerDes Parity err\n");
171 #define IBSD_RESYNC_TRIES 3
172 #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
173 #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
175 static int qib_resync_ibepb(
struct qib_devdata *dd)
189 if (ret != 0xF0 && ret != 0x55 && tries == 0)
212 if ((ret & 0x70) != ((chn << 4) | 0x40)) {
213 qib_dev_err(dd,
"Bad CMUDONE value %02X, chn %d\n",
220 return (ret > 0) ? 0 :
ret;
227 static int qib_ibsd_reset(
struct qib_devdata *dd,
int assert_rst)
244 dd->
cspec->hwerrmask &
252 spin_unlock_irqrestore(&dd->
cspec->sdepb_lock, flags);
264 dd->
cspec->hwerrmask &
267 ret = qib_resync_ibepb(dd);
290 qib_dev_err(dd,
"IBUC Parity still set after RST\n");
291 dd->
cspec->hwerrmask &=
292 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
295 dd->
cspec->hwerrmask);
302 static void qib_sd_trimdone_monitor(
struct qib_devdata *dd,
314 ret = qib_resync_ibepb(dd);
316 qib_dev_err(dd,
"not able to re-sync IB EPB (%s)\n", where);
321 qib_dev_err(dd,
"Failed TRIMDONE 1st read, (%s)\n", where);
325 if (!(val & (1ULL << 11)))
326 qib_dev_err(dd,
"IBCS TRIMDONE clear (%s)\n", where);
335 qib_dev_err(dd,
"Failed Dummy RMW, (%s)\n", where);
340 for (chn = 3; chn >= 0; --
chn) {
346 "Failed checking TRIMDONE, chn %d (%s)\n",
352 baduns |= (1 <<
chn);
354 "TRIMDONE cleared on chn %d (%02X). (%s)\n",
368 "Err on TRIMDONE rewrite1\n");
371 for (chn = 3; chn >= 0; --
chn) {
373 if (baduns & (1 << chn)) {
375 "Resetting TRIMDONE on chn %d (%s)\n",
381 "Failed re-setting TRIMDONE, chn %d (%s)\n",
403 qib_ibsd_reset(dd, 1);
404 qib_sd_trimdone_monitor(dd,
"Driver-reload");
409 qib_dev_err(dd,
"Failed to load IB SERDES image\n");
414 ret = qib_ibsd_ucode_loaded(dd->
pport, fw);
423 ret = qib_sd_early(dd);
425 qib_dev_err(dd,
"Failed to set IB SERDES early defaults\n");
434 ret = qib_sd_dactrim(dd);
446 ret = qib_internal_presets(dd);
448 qib_dev_err(dd,
"Failed to set IB SERDES presets\n");
451 ret = qib_sd_trimself(dd, 0x80);
453 qib_dev_err(dd,
"Failed to set IB SERDES TRIMSELF\n");
463 ret = qib_sd7220_ib_load(dd, fw);
465 qib_dev_err(dd,
"Failed to load IB SERDES image\n");
469 vfy = qib_sd7220_ib_vfy(dd, fw);
489 ret = ibsd_mod_allchnls(dd,
START_EQ1(0), 0, 0x38);
495 qib_ibsd_reset(dd, 0);
500 trim_done = qib_sd_trimdone_poll(dd);
506 qib_ibsd_reset(dd, 1);
516 qib_sd_trimdone_monitor(dd,
"First-reset");
518 dd->
cspec->serdes_first_init_done = 1;
525 if (qib_sd_setvals(dd) >= 0)
537 #define EPB_ACC_REQ 1
538 #define EPB_ACC_GNT 0x100
539 #define EPB_DATA_MASK 0xFF
540 #define EPB_RD (1ULL << 24)
541 #define EPB_TRANS_RDY (1ULL << 31)
542 #define EPB_TRANS_ERR (1ULL << 30)
543 #define EPB_TRANS_TRIES 5
551 static int epb_access(
struct qib_devdata *dd,
int sdnum,
int claim)
582 accval = qib_read_kreg32(dd, acc);
593 qib_write_kreg(dd, acc, newval);
595 pollval = qib_read_kreg32(dd, acc);
597 pollval = qib_read_kreg32(dd, acc);
600 }
else if (claim > 0) {
604 qib_write_kreg(dd, acc, newval);
606 pollval = qib_read_kreg32(dd, acc);
608 pollval = qib_read_kreg32(dd, acc);
623 qib_write_kreg(dd, reg, i_val);
625 transval = qib_read_kreg64(dd, reg);
628 transval = qib_read_kreg32(dd, reg);
635 if (tries > 0 && o_vp)
682 owned = epb_access(dd, sdnum, 1);
684 spin_unlock_irqrestore(&dd->
cspec->sdepb_lock, flags);
689 transval = qib_read_kreg32(dd, trans);
703 tries = epb_trans(dd, trans, transval, &transval);
705 if (tries > 0 && mask != 0) {
709 wd = (wd &
mask) | (transval & ~mask);
711 tries = epb_trans(dd, trans, transval, &transval);
719 if (epb_access(dd, sdnum, -1) < 0)
724 spin_unlock_irqrestore(&dd->
cspec->sdepb_lock, flags);
730 #define EPB_ROM_R (2)
731 #define EPB_ROM_W (1)
736 #define EPB_UC_CTL EPB_LOC(6, 0, 0)
737 #define EPB_MADDRL EPB_LOC(6, 0, 2)
738 #define EPB_MADDRH EPB_LOC(6, 0, 3)
739 #define EPB_ROMDATA EPB_LOC(6, 0, 4)
740 #define EPB_RAMDATA EPB_LOC(6, 0, 5)
743 static int qib_sd7220_ram_xfer(
struct qib_devdata *dd,
int sdnum,
u32 loc,
775 op = rd_notwr ?
"Rd" :
"Wr";
778 owned = epb_access(dd, sdnum, 1);
780 spin_unlock_irqrestore(&dd->
cspec->sdepb_lock, flags);
793 transval = qib_read_kreg32(dd, trans);
810 tries = epb_trans(dd, trans, transval, &transval);
811 while (tries > 0 && sofar < cnt) {
814 int addrbyte = (addr + sofar) >> 8;
816 tries = epb_trans(dd, trans, transval,
820 addrbyte = (addr + sofar) & 0xFF;
822 tries = epb_trans(dd, trans, transval,
832 tries = epb_trans(dd, trans, transval, &transval);
841 tries = epb_trans(dd, trans, transval, &transval);
846 if (epb_access(dd, sdnum, -1) < 0)
849 spin_unlock_irqrestore(&dd->
cspec->sdepb_lock, flags);
855 #define PROG_CHUNK 64
857 static int qib_sd7220_prog_ld(
struct qib_devdata *dd,
int sdnum,
863 while (sofar < len) {
867 cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
868 (
u8 *)img + sofar, req, 0);
879 #define SD_PRAM_ERROR_LIMIT 42
881 static int qib_sd7220_prog_vfy(
struct qib_devdata *dd,
int sdnum,
889 while (sofar < len) {
893 cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
900 for (idx = 0; idx <
cnt; ++
idx) {
901 if (readback[idx] != img[idx+sofar])
906 return errors ? -errors : sofar;
924 #define IB_SERDES_TRIM_DONE (1ULL << 11)
925 #define TRIM_TMO (30)
927 static int qib_sd_trimdone_poll(
struct qib_devdata *dd)
937 for (trim_tmo = 0; trim_tmo <
TRIM_TMO; ++trim_tmo) {
945 if (trim_tmo >= TRIM_TMO) {
946 qib_dev_err(dd,
"No TRIMDONE in %d tries\n", trim_tmo);
952 #define TX_FAST_ELT (9)
961 #define NUM_DDS_REGS 6
962 #define DDS_REG_MAP 0x76A910
964 #define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
965 { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
966 (main_d << 3) | 4 | (ipre_d >> 2), \
967 (main_s << 3) | 4 | (ipre_s >> 2), \
968 ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
969 ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
971 static struct dds_init {
973 } dds_init_vals[] = {
977 DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
978 DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
979 DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
980 DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
981 DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
982 DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
983 DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
984 DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
985 DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
986 DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
987 DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
988 DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
989 DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
992 DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
993 DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
994 DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
1001 #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
1002 #define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
1003 {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
1005 #define RXEQ_VAL_ALL(elt, adr, val) \
1006 {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
1008 #define RXEQ_SDR_DFELTH 0
1009 #define RXEQ_SDR_TLTH 0
1010 #define RXEQ_SDR_G1CNT_Z1CNT 0x11
1011 #define RXEQ_SDR_ZCNT 23
1013 static struct rxeq_init {
1016 } rxeq_init_vals[] = {
1037 #define DDS_ROWS (16)
1038 #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
1059 sdctl = (sdctl & ~(0x1f << 13)) | (
RXEQ_ROWS << 13);
1068 writeq(data, iaddr + idx);
1072 for (midx = 0; midx <
DDS_ROWS; ++midx) {
1074 data = dds_init_vals[midx].reg_vals[
idx];
1095 didx = idx + min_idx;
1097 writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
1101 for (vidx = 0; vidx < 4; vidx++) {
1102 data = rxeq_init_vals[
idx].rdata[vidx];
1103 writeq(data, taddr + (vidx << 6) + idx);
1111 #define CMUCTRL5 EPB_LOC(7, 0, 0x15)
1112 #define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
1113 #define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
1114 #define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
1115 #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
1116 #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
1128 static int ibsd_mod_allchnls(
struct qib_devdata *dd,
int loc,
int val,
1145 loc & ~EPB_GLOBAL_WR, 0, 0);
1150 "pre-read failed: elt %d, addr 0x%X, chnl %d\n",
1152 (sloc >> 9) & 0x3f, chnl);
1155 val = (ret & ~mask) | (val & mask);
1163 "Global WR failed: elt %d, addr 0x%X, val %02X\n",
1164 (sloc & 0xF), (sloc >> 9) & 0x3f, val);
1171 for (chnl = 0; chnl < 4; ++chnl) {
1179 "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
1180 (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
1181 val & 0xFF, mask & 0xFF);
1192 static int set_dds_vals(
struct qib_devdata *dd,
struct dds_init *ddi)
1200 reg = (regmap & 0xF);
1202 data = ddi->reg_vals[
idx];
1204 ret = ibsd_mod_allchnls(dd,
EPB_LOC(0, 9, reg), data, 0xFF);
1215 static int set_rxeq_vals(
struct qib_devdata *dd,
int vsel)
1221 for (ridx = 0; ridx <
cnt; ++ridx) {
1224 elt = rxeq_init_vals[ridx].rdesc & 0xF;
1225 reg = rxeq_init_vals[ridx].rdesc >> 4;
1227 val = rxeq_init_vals[ridx].rdata[vsel];
1229 ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
1245 static unsigned qib_rxeq_set = 2;
1249 "Which set [0..3] of Rx Equalization values is default");
1251 static int qib_internal_presets(
struct qib_devdata *dd)
1255 ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
1258 qib_dev_err(dd,
"Failed to set default DDS values\n");
1259 ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
1261 qib_dev_err(dd,
"Failed to set default RXEQ values\n");
1269 if (!dd->
cspec->presets_needed)
1271 dd->
cspec->presets_needed = 0;
1273 qib_ibsd_reset(dd, 1);
1275 qib_sd_trimdone_monitor(dd,
"link-down");
1277 ret = qib_internal_presets(dd);
1281 static int qib_sd_trimself(
struct qib_devdata *dd,
int val)
1303 #define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
1304 #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
1305 #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
1316 ret = ibsd_mod_allchnls(dd,
VCDL_CTRL2(0), 3, 0xF);
1344 #define RELOCK_FIRST_MS 3
1345 #define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
1351 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1356 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1360 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1365 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1379 if (dd->
cspec->relock_timer_active)
1383 static unsigned qib_relock_by_timer = 1;
1388 static void qib_run_relock(
unsigned long opaque)
1404 if (qib_relock_by_timer) {