8 #include <linux/kernel.h>
10 #include <linux/pci.h>
12 #include <linux/time.h>
14 #include <linux/module.h>
34 static int pcie_disable;
37 static int enable_pcie_14459_war;
38 static int enable_pcie_bus_num_war[2];
95 static int cvmx_pcie_rc_initialize(
int pcie_port);
97 #include <dma-coherence.h>
111 pcie_addr.io.upper = 0;
113 pcie_addr.io.did = 3;
114 pcie_addr.io.subdid = 2;
116 pcie_addr.io.port = pcie_port;
117 return pcie_addr.u64;
145 pcie_addr.mem.upper = 0;
146 pcie_addr.mem.io = 1;
147 pcie_addr.mem.did = 3;
148 pcie_addr.mem.subdid = 3 + pcie_port;
149 return pcie_addr.u64;
178 pescx_cfg_rd.
u64 = 0;
179 pescx_cfg_rd.s.addr = cfg_offset;
182 return pescx_cfg_rd.s.data;
186 pemx_cfg_rd.s.addr = cfg_offset;
189 return pemx_cfg_rd.s.data;
201 static void cvmx_pcie_cfgx_write(
int pcie_port,
uint32_t cfg_offset,
206 pescx_cfg_wr.
u64 = 0;
207 pescx_cfg_wr.s.addr = cfg_offset;
208 pescx_cfg_wr.s.data =
val;
213 pemx_cfg_wr.s.addr = cfg_offset;
214 pemx_cfg_wr.s.data =
val;
230 static inline uint64_t __cvmx_pcie_build_config_addr(
int pcie_port,
int bus,
238 if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
242 pcie_addr.config.upper = 2;
243 pcie_addr.config.io = 1;
244 pcie_addr.config.did = 3;
245 pcie_addr.config.subdid = 1;
246 pcie_addr.config.es = 1;
247 pcie_addr.config.port = pcie_port;
248 pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
249 pcie_addr.config.bus =
bus;
250 pcie_addr.config.dev =
dev;
251 pcie_addr.config.func =
fn;
252 pcie_addr.config.reg =
reg;
253 return pcie_addr.u64;
267 static uint8_t cvmx_pcie_config_read8(
int pcie_port,
int bus,
int dev,
271 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
273 return cvmx_read64_uint8(address);
289 static uint16_t cvmx_pcie_config_read16(
int pcie_port,
int bus,
int dev,
293 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
311 static uint32_t cvmx_pcie_config_read32(
int pcie_port,
int bus,
int dev,
315 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
332 static void cvmx_pcie_config_write8(
int pcie_port,
int bus,
int dev,
int fn,
336 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
338 cvmx_write64_uint8(address, val);
351 static void cvmx_pcie_config_write16(
int pcie_port,
int bus,
int dev,
int fn,
355 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
370 static void cvmx_pcie_config_write32(
int pcie_port,
int bus,
int dev,
int fn,
374 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
384 static void __cvmx_pcie_rc_initialize_config_space(
int pcie_port)
416 pciercx_cfg030.s.ro_en = 1;
418 pciercx_cfg030.s.ns_en = 1;
420 pciercx_cfg030.s.ce_en = 1;
422 pciercx_cfg030.s.nfe_en = 1;
424 pciercx_cfg030.s.fe_en = 1;
426 pciercx_cfg030.s.ur_en = 1;
444 npei_ctl_status2.s.c1_b1_s = 3;
446 npei_ctl_status2.s.c0_b1_s = 3;
472 pciercx_cfg070.s.ge = 1;
473 pciercx_cfg070.s.ce = 1;
483 pciercx_cfg001.s.msae = 1;
484 pciercx_cfg001.s.me = 1;
485 pciercx_cfg001.s.i_dis = 1;
486 pciercx_cfg001.s.see = 1;
498 pciercx_cfg032.s.aslpc = 0;
510 pciercx_cfg006.u32 = 0;
511 pciercx_cfg006.s.pbnum = 1;
512 pciercx_cfg006.s.sbnum = 1;
513 pciercx_cfg006.s.subbnum = 1;
522 pciercx_cfg008.u32 = 0;
523 pciercx_cfg008.s.mb_addr = 0x100;
524 pciercx_cfg008.s.ml_addr = 0;
537 pciercx_cfg009.s.lmem_base = 0x100;
538 pciercx_cfg009.s.lmem_limit = 0;
539 pciercx_cfg010.s.umem_base = 0x100;
540 pciercx_cfg011.s.umem_limit = 0;
550 pciercx_cfg035.s.secee = 1;
551 pciercx_cfg035.s.sefee = 1;
552 pciercx_cfg035.s.senfee = 1;
553 pciercx_cfg035.s.pmeie = 1;
561 pciercx_cfg075.s.cere = 1;
562 pciercx_cfg075.s.nfere = 1;
563 pciercx_cfg075.s.fere = 1;
571 pciercx_cfg034.s.hpint_en = 1;
572 pciercx_cfg034.s.dlls_en = 1;
573 pciercx_cfg034.s.ccint_en = 1;
586 static int __cvmx_pcie_rc_initialize_link_gen1(
int pcie_port)
597 if (pescx_ctl_status.s.qlm_cfg == 0)
599 pciercx_cfg452.s.lme = 0xf;
602 pciercx_cfg452.s.lme = 0x7;
613 pciercx_cfg455.s.m_cpl_len_err = 1;
619 pescx_ctl_status.s.lane_swp = 1;
625 pescx_ctl_status.s.lnk_enb = 1;
636 start_cycle = cvmx_get_cycle();
639 cvmx_dprintf(
"PCIe: Port %d link timeout\n", pcie_port);
644 }
while (pciercx_cfg032.s.dlla == 0);
658 switch (pciercx_cfg032.s.nlw) {
660 pciercx_cfg448.s.rtl = 1677;
663 pciercx_cfg448.s.rtl = 867;
666 pciercx_cfg448.s.rtl = 462;
669 pciercx_cfg448.s.rtl = 258;
693 static int __cvmx_pcie_rc_initialize_gen1(
int pcie_port)
715 if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) {
716 cvmx_dprintf(
"PCIe: Port %d in endpoint mode\n", pcie_port);
726 if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) {
727 cvmx_dprintf(
"PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
736 npei_ctl_status.s.arb = 1;
738 npei_ctl_status.s.cfg_rtry = 0x20;
744 npei_ctl_status.s.p0_ntags = 0x20;
745 npei_ctl_status.s.p1_ntags = 0x20;
758 if (pcie_port == 0) {
766 if (ciu_soft_prst.s.soft_prst == 0) {
768 ciu_soft_prst.s.soft_prst = 1;
771 ciu_soft_prst.s.soft_prst = 1;
777 ciu_soft_prst.s.soft_prst = 0;
780 ciu_soft_prst.s.soft_prst = 0;
798 if (ciu_soft_prst.s.soft_prst == 0) {
800 ciu_soft_prst.s.soft_prst = 1;
810 ciu_soft_prst.s.soft_prst = 0;
814 ciu_soft_prst.s.soft_prst = 0;
833 pescx_ctl_status2.s.pclk_run = 1;
840 cvmx_dprintf(
"PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
851 if (pescx_ctl_status2.s.pcierst) {
852 cvmx_dprintf(
"PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
862 if (pescx_bist_status2.u64) {
863 cvmx_dprintf(
"PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n",
870 if (pescx_bist_status.u64)
871 cvmx_dprintf(
"PCIe: BIST FAILED for port %d (0x%016llx)\n",
872 pcie_port,
CAST64(pescx_bist_status.u64));
875 __cvmx_pcie_rc_initialize_config_space(pcie_port);
878 if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port)) {
879 cvmx_dprintf(
"PCIe: Failed to initialize port %d, probably the slot is empty\n",
886 npei_mem_access_ctl.s.max_word = 0;
887 npei_mem_access_ctl.s.timer = 127;
891 mem_access_subid.u64 = 0;
892 mem_access_subid.s.port = pcie_port;
893 mem_access_subid.s.nmerge = 1;
894 mem_access_subid.s.esr = 1;
895 mem_access_subid.s.esw = 1;
896 mem_access_subid.s.nsr = 0;
897 mem_access_subid.s.nsw = 0;
898 mem_access_subid.s.ror = 0;
899 mem_access_subid.s.row = 0;
900 mem_access_subid.s.ba = 0;
906 for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
908 mem_access_subid.s.ba += 1;
916 for (i = 0; i < 4; i++) {
930 bar1_index.s.end_swp = 1;
931 bar1_index.s.addr_v = 1;
933 base = pcie_port ? 16 : 0;
941 for (i = 0; i < 16; i++) {
946 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
969 npei_ctl_port.s.bar2_enb = 1;
970 npei_ctl_port.s.bar2_esx = 1;
971 npei_ctl_port.s.bar2_cax = 0;
972 npei_ctl_port.s.ptlp_ro = 1;
973 npei_ctl_port.s.ctlp_ro = 1;
974 npei_ctl_port.s.wait_com = 0;
975 npei_ctl_port.s.waitl_com = 0;
980 npei_ctl_port.s.bar2_enb = 1;
981 npei_ctl_port.s.bar2_esx = 1;
982 npei_ctl_port.s.bar2_cax = 0;
983 npei_ctl_port.s.ptlp_ro = 1;
984 npei_ctl_port.s.ctlp_ro = 1;
985 npei_ctl_port.s.wait_com = 0;
986 npei_ctl_port.s.waitl_com = 0;
1002 int old_in_fif_p_count;
1012 uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
1020 cvmx_write64_uint32(write_address, 0);
1035 old_in_fif_p_count = dbg_data.s.data & 0xff;
1036 cvmx_write64_uint32(write_address, 0);
1039 in_fif_p_count = dbg_data.s.data & 0xff;
1040 }
while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
1043 in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
1049 out_p_count = (dbg_data.s.data>>1) & 0xff;
1052 if (out_p_count != in_fif_p_count) {
1053 cvmx_dprintf(
"PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
1054 while (in_fif_p_count != 0) {
1055 cvmx_write64_uint32(write_address, 0);
1057 in_fif_p_count = (in_fif_p_count + 1) & 0xff;
1069 cvmx_pcie_rc_initialize(0);
1077 cvmx_dprintf(
"PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
1091 static int __cvmx_pcie_rc_initialize_link_gen2(
int pcie_port)
1100 pem_ctl_status.s.lnk_enb = 1;
1104 start_cycle = cvmx_get_cycle();
1110 }
while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
1121 switch (pciercx_cfg032.s.nlw) {
1123 pciercx_cfg448.s.rtl = 1677;
1126 pciercx_cfg448.s.rtl = 867;
1129 pciercx_cfg448.s.rtl = 462;
1132 pciercx_cfg448.s.rtl = 258;
1149 static int __cvmx_pcie_rc_initialize_gen2(
int pcie_port)
1177 if (qlmx_cfg.s.qlm_spd == 15) {
1178 pr_notice(
"PCIe: Port %d is disabled, skipping.\n", pcie_port);
1182 switch (qlmx_cfg.s.qlm_spd) {
1187 pr_notice(
"PCIe: Port %d is SRIO, skipping.\n", pcie_port);
1190 pr_notice(
"PCIe: Port %d is SGMII, skipping.\n", pcie_port);
1193 pr_notice(
"PCIe: Port %d is XAUI, skipping.\n", pcie_port);
1201 pr_notice(
"PCIe: Port %d is unknown, skipping.\n", pcie_port);
1206 if (sriox_status_reg.s.srio) {
1207 pr_notice(
"PCIe: Port %d is SRIO, skipping.\n", pcie_port);
1215 pr_notice(
"PCIE : init for pcie analyzer.\n");
1234 if (!mio_rst_ctl.s.host_mode) {
1235 pr_notice(
"PCIe: Port %d in endpoint mode.\n", pcie_port);
1244 ciu_qlm.s.txbypass = 1;
1245 ciu_qlm.s.txdeemph = 5;
1246 ciu_qlm.s.txmargin = 0x17;
1251 ciu_qlm.s.txbypass = 1;
1252 ciu_qlm.s.txdeemph = 5;
1253 ciu_qlm.s.txmargin = 0x17;
1267 if (ciu_soft_prst.s.soft_prst == 0) {
1269 ciu_soft_prst.s.soft_prst = 1;
1279 ciu_soft_prst.s.soft_prst = 0;
1283 ciu_soft_prst.s.soft_prst = 0;
1296 pr_notice(
"PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
1302 if (pemx_bist_status.u64)
1303 pr_notice(
"PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port,
CAST64(pemx_bist_status.u64));
1307 pemx_bist_status2.u64 &= ~0x3full;
1308 if (pemx_bist_status2.u64)
1309 pr_notice(
"PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port,
CAST64(pemx_bist_status2.u64));
1312 __cvmx_pcie_rc_initialize_config_space(pcie_port);
1316 pciercx_cfg515.s.dsc = 1;
1320 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
1328 pciercx_cfg031.s.mls = 1;
1330 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
1331 pr_notice(
"PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1338 sli_mem_access_ctl.s.max_word = 0;
1339 sli_mem_access_ctl.s.timer = 127;
1343 mem_access_subid.u64 = 0;
1344 mem_access_subid.s.port = pcie_port;
1345 mem_access_subid.s.nmerge = 0;
1346 mem_access_subid.s.esr = 1;
1347 mem_access_subid.s.esw = 1;
1348 mem_access_subid.s.wtype = 0;
1349 mem_access_subid.s.rtype = 0;
1352 mem_access_subid.cn68xx.ba = 0;
1354 mem_access_subid.cn63xx.ba = 0;
1360 for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
1363 __cvmx_increment_ba(&mem_access_subid);
1371 for (i = 0; i < 4; i++) {
1395 pemx_bar_ctl.s.bar1_siz = 3;
1396 pemx_bar_ctl.s.bar2_enb = 1;
1397 pemx_bar_ctl.s.bar2_esx = 1;
1398 pemx_bar_ctl.s.bar2_cax = 0;
1401 sli_ctl_portx.s.ptlp_ro = 1;
1402 sli_ctl_portx.s.ctlp_ro = 1;
1403 sli_ctl_portx.s.wait_com = 0;
1404 sli_ctl_portx.s.waitl_com = 0;
1412 bar1_index.s.ca = 1;
1413 bar1_index.s.end_swp = 1;
1414 bar1_index.s.addr_v = 1;
1416 for (i = 0; i < 16; i++) {
1419 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1427 pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1432 pr_notice(
"PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1444 static int cvmx_pcie_rc_initialize(
int pcie_port)
1448 result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1450 result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1477 dev->
bus && dev->
bus->parent) {
1482 while (dev->
bus && dev->
bus->parent)
1489 if ((dev->
bus->number == 1) &&
1495 pin = ((pin - 3) & 3) + 1;
1506 static void set_cfg_read_retry(
u32 retry_cnt)
1510 pemx_ctl.s.cfg_rtry = retry_cnt;
1515 static u32 disable_cfg_read_retry(
void)
1521 retry_cnt = pemx_ctl.s.cfg_rtry;
1522 pemx_ctl.s.cfg_rtry = 0;
1527 static int is_cfg_retry(
void)
1531 if (pemx_int_sum.s.crs_dr)
1540 static int octeon_pcie_read_config(
unsigned int pcie_port,
struct pci_bus *bus,
1546 int bus_number = bus->
number;
1549 int max_retry_cnt = 10;
1550 u32 cfg_retry_cnt = 0;
1552 cvmmemctl_save.u64 = 0;
1559 if (enable_pcie_bus_num_war[pcie_port])
1563 pciercx_cfg006.
u32 = cvmx_pcie_cfgx_read(pcie_port,
1565 if (pciercx_cfg006.s.pbnum != bus_number) {
1566 pciercx_cfg006.s.pbnum = bus_number;
1567 pciercx_cfg006.s.sbnum = bus_number;
1568 pciercx_cfg006.s.subbnum = bus_number;
1569 cvmx_pcie_cfgx_write(pcie_port,
1571 pciercx_cfg006.u32);
1581 if ((bus->
parent ==
NULL) && (devfn >> 3 != 0))
1608 if (bus_number == 2)
1615 if ((bus_number == 2) && (devfn >> 3 != 2))
1622 if ((bus_number == 2) && (devfn >> 3 != 3))
1626 if ((bus_number == 2) &&
1627 !((devfn == (2 << 3)) || (devfn == (3 << 3))))
1637 if ((bus_number == 4) &&
1638 !((devfn >> 3 >= 1) && (devfn >> 3 <= 4)))
1641 if ((bus_number == 5) && (devfn >> 3 != 0))
1644 if ((bus_number == 6) && (devfn >> 3 != 0))
1647 if ((bus_number == 7) && (devfn >> 3 != 0))
1650 if ((bus_number == 8) && (devfn >> 3 != 0))
1662 cvmmemctl.u64 = cvmmemctl_save.u64;
1663 cvmmemctl.s.didtto = 2;
1668 cfg_retry_cnt = disable_cfg_read_retry();
1670 pr_debug(
"pcie_cfg_rd port=%d b=%d devfn=0x%03x reg=0x%03x"
1671 " size=%d ", pcie_port, bus_number, devfn, reg, size);
1675 *val = cvmx_pcie_config_read32(pcie_port, bus_number,
1676 devfn >> 3, devfn & 0x7, reg);
1679 *val = cvmx_pcie_config_read16(pcie_port, bus_number,
1680 devfn >> 3, devfn & 0x7, reg);
1683 *val = cvmx_pcie_config_read8(pcie_port, bus_number,
1684 devfn >> 3, devfn & 0x7, reg);
1688 set_cfg_read_retry(cfg_retry_cnt);
1692 (enable_pcie_14459_war)) {
1693 cfg_retry = is_cfg_retry();
1695 if (retry_cnt > max_retry_cnt) {
1696 pr_err(
" pcie cfg_read retries failed. retry_cnt=%d\n",
1701 }
while (cfg_retry);
1704 set_cfg_read_retry(cfg_retry_cnt);
1705 pr_debug(
"val=%08x : tries=%02d\n", *val, retry_cnt);
1712 static int octeon_pcie0_read_config(
struct pci_bus *bus,
unsigned int devfn,
1713 int reg,
int size,
u32 *val)
1715 return octeon_pcie_read_config(0, bus, devfn, reg, size, val);
1718 static int octeon_pcie1_read_config(
struct pci_bus *bus,
unsigned int devfn,
1719 int reg,
int size,
u32 *val)
1721 return octeon_pcie_read_config(1, bus, devfn, reg, size, val);
1724 static int octeon_dummy_read_config(
struct pci_bus *bus,
unsigned int devfn,
1725 int reg,
int size,
u32 *val)
1733 static int octeon_pcie_write_config(
unsigned int pcie_port,
struct pci_bus *bus,
1734 unsigned int devfn,
int reg,
1737 int bus_number = bus->
number;
1741 if ((bus->
parent ==
NULL) && (enable_pcie_bus_num_war[pcie_port]))
1744 pr_debug(
"pcie_cfg_wr port=%d b=%d devfn=0x%03x"
1745 " reg=0x%03x size=%d val=%08x\n", pcie_port, bus_number, devfn,
1751 cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3,
1752 devfn & 0x7, reg, val);
1755 cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3,
1756 devfn & 0x7, reg, val);
1759 cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3,
1760 devfn & 0x7, reg, val);
1765 #if PCI_CONFIG_SPACE_DELAY
1776 static int octeon_pcie0_write_config(
struct pci_bus *bus,
unsigned int devfn,
1777 int reg,
int size,
u32 val)
1779 return octeon_pcie_write_config(0, bus, devfn, reg, size, val);
1782 static int octeon_pcie1_write_config(
struct pci_bus *bus,
unsigned int devfn,
1783 int reg,
int size,
u32 val)
1785 return octeon_pcie_write_config(1, bus, devfn, reg, size, val);
1788 static int octeon_dummy_write_config(
struct pci_bus *bus,
unsigned int devfn,
1789 int reg,
int size,
u32 val)
1794 static struct pci_ops octeon_pcie0_ops = {
1795 octeon_pcie0_read_config,
1796 octeon_pcie0_write_config,
1799 static struct resource octeon_pcie0_mem_resource = {
1800 .name =
"Octeon PCIe0 MEM",
1804 static struct resource octeon_pcie0_io_resource = {
1805 .name =
"Octeon PCIe0 IO",
1810 .pci_ops = &octeon_pcie0_ops,
1811 .mem_resource = &octeon_pcie0_mem_resource,
1812 .io_resource = &octeon_pcie0_io_resource,
1815 static struct pci_ops octeon_pcie1_ops = {
1816 octeon_pcie1_read_config,
1817 octeon_pcie1_write_config,
1820 static struct resource octeon_pcie1_mem_resource = {
1821 .name =
"Octeon PCIe1 MEM",
1825 static struct resource octeon_pcie1_io_resource = {
1826 .name =
"Octeon PCIe1 IO",
1831 .pci_ops = &octeon_pcie1_ops,
1832 .mem_resource = &octeon_pcie1_mem_resource,
1833 .io_resource = &octeon_pcie1_io_resource,
1836 static struct pci_ops octeon_dummy_ops = {
1837 octeon_dummy_read_config,
1838 octeon_dummy_write_config,
1841 static struct resource octeon_dummy_mem_resource = {
1842 .name =
"Virtual PCIe MEM",
1846 static struct resource octeon_dummy_io_resource = {
1847 .name =
"Virtual PCIe IO",
1852 .pci_ops = &octeon_dummy_ops,
1853 .mem_resource = &octeon_dummy_mem_resource,
1854 .io_resource = &octeon_dummy_io_resource,
1859 #define IDT_VENDOR_ID 0x111d
1861 if ((deviceid & 0xffff) == IDT_VENDOR_ID)
1871 static int __init octeon_pcie_setup(
void)
1875 int srio_war15205 = 0,
port;
1901 cvmx_pcie_get_io_base_address(1) -
1902 cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1;
1910 octeon_dummy_controller.io_map_base = -1;
1911 octeon_dummy_controller.mem_resource->start = (1ull<<48);
1912 octeon_dummy_controller.mem_resource->end = (1ull<<48);
1918 host_mode = npei_ctl_status.s.host_mode;
1923 host_mode = mio_rst_ctl.s.host_mode;
1928 pr_notice(
"PCIe: Initializing port 0\n");
1933 if (sriox_status_reg.s.srio) {
1938 result = cvmx_pcie_rc_initialize(0);
1942 octeon_pcie0_controller.mem_offset =
1943 cvmx_pcie_get_mem_base_address(0);
1945 octeon_pcie0_controller.io_map_base =
1948 octeon_pcie0_controller.io_offset = 0;
1956 octeon_pcie0_controller.mem_resource->start =
1957 cvmx_pcie_get_mem_base_address(0) +
1959 octeon_pcie0_controller.mem_resource->end =
1960 cvmx_pcie_get_mem_base_address(0) +
1961 cvmx_pcie_get_mem_size(0) - 1;
1966 octeon_pcie0_controller.io_resource->start = 4 << 10;
1967 octeon_pcie0_controller.io_resource->end =
1968 cvmx_pcie_get_io_size(0) - 1;
1971 device0 = cvmx_pcie_config_read32(0, 0, 0, 0, 0);
1972 enable_pcie_bus_num_war[0] =
1973 device_needs_bus_num_war(device0);
1976 pr_notice(
"PCIe: Port 0 in endpoint mode, skipping.\n");
1991 if (dbg_data.cn52xx.qlm0_link_width)
1997 host_mode = mio_rst_ctl.s.host_mode;
2001 pr_notice(
"PCIe: Initializing port 1\n");
2006 if (sriox_status_reg.s.srio) {
2011 result = cvmx_pcie_rc_initialize(1);
2015 octeon_pcie1_controller.mem_offset =
2016 cvmx_pcie_get_mem_base_address(1);
2026 octeon_pcie1_controller.io_map_base =
2029 octeon_pcie1_controller.io_offset =
2030 cvmx_pcie_get_io_base_address(1) -
2031 cvmx_pcie_get_io_base_address(0);
2038 octeon_pcie1_controller.mem_resource->start =
2039 cvmx_pcie_get_mem_base_address(1) + (4ul << 30) -
2041 octeon_pcie1_controller.mem_resource->end =
2042 cvmx_pcie_get_mem_base_address(1) +
2043 cvmx_pcie_get_mem_size(1) - 1;
2048 octeon_pcie1_controller.io_resource->start =
2049 cvmx_pcie_get_io_base_address(1) -
2050 cvmx_pcie_get_io_base_address(0);
2051 octeon_pcie1_controller.io_resource->end =
2052 octeon_pcie1_controller.io_resource->start +
2053 cvmx_pcie_get_io_size(1) - 1;
2056 device0 = cvmx_pcie_config_read32(1, 0, 0, 0, 0);
2057 enable_pcie_bus_num_war[1] =
2058 device_needs_bus_num_war(device0);
2061 pr_notice(
"PCIe: Port 1 not in root complex mode, skipping.\n");
2078 if (srio_war15205 == 1) {
2080 sli_ctl_portx.s.inta_map = 1;
2081 sli_ctl_portx.s.intb_map = 1;
2082 sli_ctl_portx.s.intc_map = 1;
2083 sli_ctl_portx.s.intd_map = 1;
2087 sli_ctl_portx.s.inta_map = 0;
2088 sli_ctl_portx.s.intb_map = 0;
2089 sli_ctl_portx.s.intc_map = 0;
2090 sli_ctl_portx.s.intd_map = 0;