6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/module.h>
11 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/ethtool.h>
18 #include <linux/bitops.h>
19 #include <linux/mii.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
25 #include <linux/ipv6.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
37 #define DRV_MODULE_NAME "niu"
38 #define DRV_MODULE_VERSION "1.1"
39 #define DRV_MODULE_RELDATE "Apr 22, 2010"
57 writel(val & 0xffffffff, reg);
58 writel(val >> 32, reg + 0x4UL);
69 #define NIU_TX_TIMEOUT (5 * HZ)
71 #define nr64(reg) readq(np->regs + (reg))
72 #define nw64(reg, val) writeq((val), np->regs + (reg))
74 #define nr64_mac(reg) readq(np->mac_regs + (reg))
75 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
77 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
78 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
80 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
81 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
83 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
84 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
86 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
89 static int debug = -1;
93 #define niu_lock_parent(np, flags) \
94 spin_lock_irqsave(&np->parent->lock, flags)
95 #define niu_unlock_parent(np, flags) \
96 spin_unlock_irqrestore(&np->parent->lock, flags)
98 static int serdes_init_10g_serdes(
struct niu *np);
100 static int __niu_wait_bits_clear_mac(
struct niu *np,
unsigned long reg,
103 while (--limit >= 0) {
115 static int __niu_set_and_wait_clear_mac(
struct niu *np,
unsigned long reg,
116 u64 bits,
int limit,
int delay,
122 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
124 netdev_err(np->
dev,
"bits (%llx) of register %s would not clear, val[%llx]\n",
125 (
unsigned long long)bits, reg_name,
130 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
131 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
132 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
135 static int __niu_wait_bits_clear_ipp(
struct niu *np,
unsigned long reg,
136 u64 bits,
int limit,
int delay)
138 while (--limit >= 0) {
150 static int __niu_set_and_wait_clear_ipp(
struct niu *np,
unsigned long reg,
151 u64 bits,
int limit,
int delay,
152 const char *reg_name)
161 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
163 netdev_err(np->
dev,
"bits (%llx) of register %s would not clear, val[%llx]\n",
164 (
unsigned long long)bits, reg_name,
169 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
170 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
171 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
174 static int __niu_wait_bits_clear(
struct niu *np,
unsigned long reg,
175 u64 bits,
int limit,
int delay)
177 while (--limit >= 0) {
189 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
190 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
191 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
194 static int __niu_set_and_wait_clear(
struct niu *np,
unsigned long reg,
195 u64 bits,
int limit,
int delay,
196 const char *reg_name)
201 err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
203 netdev_err(np->
dev,
"bits (%llx) of register %s would not clear, val[%llx]\n",
204 (
unsigned long long)bits, reg_name,
205 (
unsigned long long)
nr64(reg));
209 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
210 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
211 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
214 static void niu_ldg_rearm(
struct niu *np,
struct niu_ldg *
lp,
int on)
224 static int niu_ldn_irq_enable(
struct niu *np,
int ldn,
int on)
226 unsigned long mask_reg,
bits;
236 mask_reg =
LD_IM1(ldn - 64);
240 val =
nr64(mask_reg);
250 static int niu_enable_ldn_in_ldg(
struct niu *np,
struct niu_ldg *
lp,
int on)
255 for (i = 0; i <=
LDN_MAX; i++) {
261 err = niu_ldn_irq_enable(np, i, on);
268 static int niu_enable_interrupts(
struct niu *np,
int on)
272 for (i = 0; i < np->
num_ldg; i++) {
276 err = niu_enable_ldn_in_ldg(np, lp, on);
280 for (i = 0; i < np->
num_ldg; i++)
281 niu_ldg_rearm(np, &np->
ldg[i], on);
288 return type << (port * 2);
296 static int mdio_wait(
struct niu *np)
301 while (--limit > 0) {
312 static int mdio_read(
struct niu *np,
int port,
int dev,
int reg)
322 return mdio_wait(np);
325 static int mdio_write(
struct niu *np,
int port,
int dev,
int reg,
int data)
342 static int mii_read(
struct niu *np,
int port,
int reg)
345 return mdio_wait(np);
348 static int mii_write(
struct niu *np,
int port,
int reg,
int data)
360 static int esr2_set_tx_cfg(
struct niu *np,
unsigned long channel,
u32 val)
374 static int esr2_set_rx_cfg(
struct niu *np,
unsigned long channel,
u32 val)
389 static int serdes_init_niu_10g_fiber(
struct niu *np)
411 for (i = 0; i < 4; i++) {
412 int err = esr2_set_tx_cfg(np, i, tx_cfg);
417 for (i = 0; i < 4; i++) {
418 int err = esr2_set_rx_cfg(np, i, rx_cfg);
426 static int serdes_init_niu_1g_serdes(
struct niu *np)
429 u16 pll_cfg, pll_sts;
461 netdev_err(np->
dev,
"NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
471 netdev_err(np->
dev,
"NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
479 for (i = 0; i < 4; i++) {
480 err = esr2_set_tx_cfg(np, i, tx_cfg);
485 for (i = 0; i < 4; i++) {
486 err = esr2_set_rx_cfg(np, i, rx_cfg);
506 while (max_retry--) {
508 if ((
sig & mask) == val)
514 if ((
sig & mask) != val) {
515 netdev_err(np->
dev,
"Port %u signal bits [%08x] are not [%08x]\n",
516 np->
port, (
int)(
sig & mask), (
int)val);
523 static int serdes_init_niu_10g_serdes(
struct niu *np)
526 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
553 netdev_err(np->
dev,
"NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
563 netdev_err(np->
dev,
"NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
571 for (i = 0; i < 4; i++) {
572 err = esr2_set_tx_cfg(np, i, tx_cfg);
577 for (i = 0; i < 4; i++) {
578 err = esr2_set_rx_cfg(np, i, rx_cfg);
612 while (max_retry--) {
614 if ((
sig & mask) == val)
620 if ((
sig & mask) != val) {
621 pr_info(
"NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
622 np->
port, (
int)(
sig & mask), (
int)val);
625 err = serdes_init_niu_1g_serdes(np);
630 netdev_err(np->
dev,
"Port %u 10G/1G SERDES Link Failed\n",
638 static int esr_read_rxtx_ctrl(
struct niu *np,
unsigned long chan,
u32 *val)
644 *val = (err & 0xffff);
648 *val |= ((err & 0xffff) << 16);
654 static int esr_read_glue0(
struct niu *np,
unsigned long chan,
u32 *val)
661 *val = (err & 0xffff);
665 *val |= ((err & 0xffff) << 16);
672 static int esr_read_reset(
struct niu *np,
u32 *val)
679 *val = (err & 0xffff);
683 *val |= ((err & 0xffff) << 16);
690 static int esr_write_rxtx_ctrl(
struct niu *np,
unsigned long chan,
u32 val)
702 static int esr_write_glue0(
struct niu *np,
unsigned long chan,
u32 val)
714 static int esr_reset(
struct niu *np)
741 err = esr_read_reset(np, &reset);
745 netdev_err(np->
dev,
"Port %u ESR_RESET did not clear [%08x]\n",
753 static int serdes_init_10g(
struct niu *np)
798 nw64(ctrl_reg, ctrl_val);
799 nw64(test_cfg_reg, test_cfg_val);
802 for (i = 0; i < 4; i++) {
803 u32 rxtx_ctrl, glue0;
805 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
808 err = esr_read_glue0(np, i, &glue0);
825 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
828 err = esr_write_glue0(np, i, glue0);
865 if ((sig & mask) !=
val) {
870 netdev_err(np->
dev,
"Port %u signal bits [%08x] are not [%08x]\n",
871 np->
port, (
int)(sig & mask), (
int)val);
879 static int serdes_init_1g(
struct niu *np)
906 static int serdes_init_1g_serdes(
struct niu *np)
909 unsigned long ctrl_reg, test_cfg_reg, pll_cfg,
i;
912 u64 reset_val, val_rd;
962 val_rd &= ~reset_val;
964 nw64(ctrl_reg, ctrl_val);
965 nw64(test_cfg_reg, test_cfg_val);
970 for (i = 0; i < 4; i++) {
971 u32 rxtx_ctrl, glue0;
973 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
976 err = esr_read_glue0(np, i, &glue0);
993 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
996 err = esr_write_glue0(np, i, glue0);
1018 if ((sig & mask) !=
val) {
1019 netdev_err(np->
dev,
"Port %u signal bits [%08x] are not [%08x]\n",
1020 np->
port, (
int)(sig & mask), (
int)val);
1027 static int link_status_1g_serdes(
struct niu *np,
int *link_up_p)
1033 unsigned long flags;
1052 spin_unlock_irqrestore(&np->
lock, flags);
1058 static int link_status_10g_serdes(
struct niu *np,
int *link_up_p)
1060 unsigned long flags;
1069 return link_status_1g_serdes(np, link_up_p);
1077 if (val2 & 0x01000000)
1080 if ((val & 0x1000ULL) && link_ok) {
1087 spin_unlock_irqrestore(&np->
lock, flags);
1092 static int link_status_mii(
struct niu *np,
int *link_up_p)
1096 int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
1135 estatus = ctrl1000 = stat1000 = 0;
1154 advertising = mii_adv_to_ethtool_adv_t(advert);
1155 advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
1164 neg1000 = (ctrl1000 << 2) & stat1000;
1205 static int link_status_1g_rgmii(
struct niu *np,
int *link_up_p)
1208 u16 current_speed, bmsr;
1209 unsigned long flags;
1252 spin_unlock_irqrestore(&np->
lock, flags);
1258 static int link_status_1g(
struct niu *np,
int *link_up_p)
1261 unsigned long flags;
1266 err = link_status_mii(np, link_up_p);
1270 spin_unlock_irqrestore(&np->
lock, flags);
1274 static int bcm8704_reset(
struct niu *np)
1280 if (err < 0 || err == 0xffff)
1289 while (--limit >= 0) {
1298 netdev_err(np->
dev,
"Port %u PHY will not reset (bmcr=%04x)\n",
1299 np->
port, (err & 0xffff));
1308 static int bcm8704_user_dev3_readback(
struct niu *np,
int reg)
1319 static int bcm8706_init_user_dev3(
struct niu *np)
1341 static int bcm8704_init_user_dev3(
struct niu *np)
1391 static int mrvl88x2011_act_led(
struct niu *np,
int val)
1407 static int mrvl88x2011_led_blink_rate(
struct niu *np,
int rate)
1424 static int xcvr_init_10g_mrvl88x2011(
struct niu *np)
1471 static int xcvr_diag_bcm870x(
struct niu *np)
1473 u16 analog_stat0, tx_alarm_status;
1481 pr_info(
"Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->
port, err);
1486 pr_info(
"Port %u USER_DEV3(0x20) [%04x]\n", np->
port, err);
1492 pr_info(
"Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->
port, err);
1514 tx_alarm_status =
err;
1516 if (analog_stat0 != 0x03fc) {
1517 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1518 pr_info(
"Port %u cable not connected or bad cable\n",
1520 }
else if (analog_stat0 == 0x639c) {
1521 pr_info(
"Port %u optical module is bad or missing\n",
1529 static int xcvr_10g_set_lb_bcm870x(
struct niu *np)
1552 static int xcvr_init_10g_bcm8706(
struct niu *np)
1570 err = bcm8704_reset(np);
1574 err = xcvr_10g_set_lb_bcm870x(np);
1578 err = bcm8706_init_user_dev3(np);
1582 err = xcvr_diag_bcm870x(np);
1589 static int xcvr_init_10g_bcm8704(
struct niu *np)
1593 err = bcm8704_reset(np);
1597 err = bcm8704_init_user_dev3(np);
1601 err = xcvr_10g_set_lb_bcm870x(np);
1605 err = xcvr_diag_bcm870x(np);
1612 static int xcvr_init_10g(
struct niu *np)
1627 phy_id = phy_decode(np->
parent->port_phy, np->
port);
1633 err = xcvr_init_10g_mrvl88x2011(np);
1637 err = xcvr_init_10g_bcm8704(np);
1644 static int mii_reset(
struct niu *np)
1653 while (--limit >= 0) {
1662 netdev_err(np->
dev,
"Port %u MII would not reset, bmcr[%04x]\n",
1670 static int xcvr_init_1g_rgmii(
struct niu *np)
1674 u16 bmcr, bmsr, estat;
1680 err = mii_reset(np);
1690 if (bmsr & BMSR_ESTATEN) {
1702 if (bmsr & BMSR_ESTATEN) {
1705 if (estat & ESTATUS_1000_TFULL)
1730 static int mii_init_common(
struct niu *np)
1733 u16 bmcr, bmsr, adv, estat;
1736 err = mii_reset(np);
1746 if (bmsr & BMSR_ESTATEN) {
1780 if ((bmsr & BMSR_10HALF) &&
1783 if ((bmsr & BMSR_10FULL) &&
1786 if ((bmsr & BMSR_100HALF) &&
1789 if ((bmsr & BMSR_100FULL) &&
1796 if (
likely(bmsr & BMSR_ESTATEN)) {
1798 if ((estat & ESTATUS_1000_THALF) &&
1801 if ((estat & ESTATUS_1000_TFULL) &&
1826 if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
1831 if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
1832 (!fulldpx && !(bmsr & BMSR_100HALF)))
1836 if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
1837 (!fulldpx && !(bmsr & BMSR_10HALF)))
1858 pr_info(
"Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1859 np->
port, bmcr, bmsr);
1865 static int xcvr_init_1g(
struct niu *np)
1874 return mii_init_common(np);
1877 static int niu_xcvr_init(
struct niu *np)
1889 static int niu_serdes_init(
struct niu *np)
1901 static void niu_init_xif(
struct niu *);
1902 static void niu_handle_led(
struct niu *,
int status);
1904 static int niu_link_status_common(
struct niu *np,
int link_up)
1908 unsigned long flags;
1910 if (!netif_carrier_ok(dev) && link_up) {
1920 niu_handle_led(np, 1);
1921 spin_unlock_irqrestore(&np->
lock, flags);
1924 }
else if (netif_carrier_ok(dev) && !link_up) {
1927 niu_handle_led(np, 0);
1928 spin_unlock_irqrestore(&np->
lock, flags);
1935 static int link_status_10g_mrvl(
struct niu *np,
int *link_up_p)
1977 link_up = (pma_status && pcs_status) ? 1 : 0;
1983 mrvl88x2011_act_led(np, (link_up ?
1991 static int link_status_10g_bcm8706(
struct niu *np,
int *link_up_p)
1998 if (err < 0 || err == 0xffff)
2042 static int link_status_10g_bcom(
struct niu *np,
int *link_up_p)
2052 if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2061 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2091 static int link_status_10g(
struct niu *np,
int *link_up_p)
2093 unsigned long flags;
2101 phy_id = phy_decode(np->
parent->port_phy, np->
port);
2107 err = link_status_10g_mrvl(np, link_up_p);
2111 err = link_status_10g_bcom(np, link_up_p);
2116 spin_unlock_irqrestore(&np->
lock, flags);
2121 static int niu_10g_phy_present(
struct niu *np)
2153 if ((sig & mask) !=
val)
2158 static int link_status_10g_hotplug(
struct niu *np,
int *link_up_p)
2160 unsigned long flags;
2163 int phy_present_prev;
2170 phy_present = niu_10g_phy_present(np);
2171 if (phy_present != phy_present_prev) {
2177 err = np->
phy_ops->xcvr_init(np);
2181 if (err == 0xffff) {
2192 "Hotplug PHY Removed\n");
2197 err = link_status_10g_bcm8706(np, link_up_p);
2198 if (err == 0xffff) {
2207 spin_unlock_irqrestore(&np->
lock, flags);
2212 static int niu_link_status(
struct niu *np,
int *link_up_p)
2224 static void niu_timer(
unsigned long __opaque)
2226 struct niu *np = (
struct niu *) __opaque;
2230 err = niu_link_status(np, &link_up);
2232 niu_link_status_common(np, link_up);
2234 if (netif_carrier_ok(np->
dev))
2243 static const struct niu_phy_ops phy_ops_10g_serdes = {
2244 .serdes_init = serdes_init_10g_serdes,
2245 .link_status = link_status_10g_serdes,
2248 static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2249 .serdes_init = serdes_init_niu_10g_serdes,
2250 .link_status = link_status_10g_serdes,
2253 static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2254 .serdes_init = serdes_init_niu_1g_serdes,
2255 .link_status = link_status_1g_serdes,
2258 static const struct niu_phy_ops phy_ops_1g_rgmii = {
2259 .xcvr_init = xcvr_init_1g_rgmii,
2260 .link_status = link_status_1g_rgmii,
2263 static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2264 .serdes_init = serdes_init_niu_10g_fiber,
2265 .xcvr_init = xcvr_init_10g,
2266 .link_status = link_status_10g,
2269 static const struct niu_phy_ops phy_ops_10g_fiber = {
2270 .serdes_init = serdes_init_10g,
2271 .xcvr_init = xcvr_init_10g,
2272 .link_status = link_status_10g,
2275 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2276 .serdes_init = serdes_init_10g,
2277 .xcvr_init = xcvr_init_10g_bcm8706,
2278 .link_status = link_status_10g_hotplug,
2281 static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
2282 .serdes_init = serdes_init_niu_10g_fiber,
2283 .xcvr_init = xcvr_init_10g_bcm8706,
2284 .link_status = link_status_10g_hotplug,
2287 static const struct niu_phy_ops phy_ops_10g_copper = {
2288 .serdes_init = serdes_init_10g,
2289 .link_status = link_status_10g,
2292 static const struct niu_phy_ops phy_ops_1g_fiber = {
2293 .serdes_init = serdes_init_1g,
2294 .xcvr_init = xcvr_init_1g,
2295 .link_status = link_status_1g,
2298 static const struct niu_phy_ops phy_ops_1g_copper = {
2299 .xcvr_init = xcvr_init_1g,
2300 .link_status = link_status_1g,
2309 .ops = &phy_ops_10g_fiber_niu,
2310 .phy_addr_base = 16,
2314 .ops = &phy_ops_10g_serdes_niu,
2319 .ops = &phy_ops_1g_serdes_niu,
2324 .ops = &phy_ops_10g_fiber,
2329 .ops = &phy_ops_10g_fiber_hotplug,
2334 .ops = &phy_ops_niu_10g_hotplug,
2339 .ops = &phy_ops_10g_copper,
2340 .phy_addr_base = 10,
2344 .ops = &phy_ops_1g_fiber,
2349 .ops = &phy_ops_1g_copper,
2354 .ops = &phy_ops_1g_rgmii,
2359 .ops = &phy_ops_10g_serdes,
2363 static int niu_atca_port_num[4] = {
2367 static int serdes_init_10g_serdes(
struct niu *np)
2370 unsigned long ctrl_reg, test_cfg_reg, pll_cfg,
i;
2415 nw64(ctrl_reg, ctrl_val);
2416 nw64(test_cfg_reg, test_cfg_val);
2419 for (i = 0; i < 4; i++) {
2420 u32 rxtx_ctrl, glue0;
2423 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2426 err = esr_read_glue0(np, i, &glue0);
2443 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2446 err = esr_write_glue0(np, i, glue0);
2480 if ((sig & mask) !=
val) {
2482 err = serdes_init_1g_serdes(np);
2487 netdev_err(np->
dev,
"Port %u 10G/1G SERDES Link Failed\n",
2496 static int niu_determine_phy_disposition(
struct niu *np)
2501 u32 phy_addr_off = 0;
2510 tp = &phy_template_niu_10g_serdes;
2514 tp = &phy_template_niu_1g_serdes;
2520 tp = &phy_template_niu_10g_hotplug;
2526 tp = &phy_template_niu_10g_fiber;
2527 phy_addr_off += np->
port;
2538 tp = &phy_template_1g_copper;
2544 phy_addr_off += (np->
port ^ 0x3);
2549 tp = &phy_template_10g_copper;
2554 tp = &phy_template_1g_fiber;
2559 tp = &phy_template_10g_fiber;
2563 phy_addr_off += np->
port;
2565 tp = &phy_template_10g_fiber_hotplug;
2579 tp = &phy_template_10g_serdes;
2583 tp = &phy_template_1g_rgmii;
2589 phy_addr_off = niu_atca_port_num[np->
port];
2603 static int niu_init_link(
struct niu *np)
2609 err = niu_xcvr_init(np);
2614 err = niu_serdes_init(np);
2618 err = niu_xcvr_init(np);
2620 niu_link_status(np, &ignore);
2624 static void niu_set_primary_mac(
struct niu *np,
unsigned char *
addr)
2626 u16 reg0 = addr[4] << 8 | addr[5];
2627 u16 reg1 = addr[2] << 8 | addr[3];
2628 u16 reg2 = addr[0] << 8 | addr[1];
2641 static int niu_num_alt_addr(
struct niu *np)
2649 static int niu_set_alt_mac(
struct niu *np,
int index,
unsigned char *addr)
2651 u16 reg0 = addr[4] << 8 | addr[5];
2652 u16 reg1 = addr[2] << 8 | addr[3];
2653 u16 reg2 = addr[0] << 8 | addr[1];
2655 if (index >= niu_num_alt_addr(np))
2671 static int niu_enable_alt_mac(
struct niu *np,
int index,
int on)
2676 if (index >= niu_num_alt_addr(np))
2684 mask = 1 << (index + 1);
2697 static void __set_rdc_table_num_hw(
struct niu *np,
unsigned long reg,
2698 int num,
int mac_pref)
2708 static int __set_rdc_table_num(
struct niu *np,
2709 int xmac_index,
int bmac_index,
2710 int rdc_table_num,
int mac_pref)
2720 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2724 static int niu_set_primary_mac_rdc_table(
struct niu *np,
int table_num,
2727 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2730 static int niu_set_multicast_mac_rdc_table(
struct niu *np,
int table_num,
2733 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2736 static int niu_set_alt_mac_rdc_table(
struct niu *np,
int idx,
2737 int table_num,
int mac_pref)
2739 if (idx >= niu_num_alt_addr(np))
2741 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2749 port01_mask = 0x00ff;
2750 port23_mask = 0xff00;
2752 if (
hweight64(reg_val & port01_mask) & 1)
2757 if (
hweight64(reg_val & port23_mask) & 1)
2765 static void vlan_tbl_write(
struct niu *np,
unsigned long index,
2778 reg_val = vlan_entry_set_parity(reg_val);
2783 static void vlan_tbl_clear(
struct niu *np)
2791 static int tcam_wait_bit(
struct niu *np,
u64 bit)
2795 while (--limit > 0) {
2806 static int tcam_flush(
struct niu *np,
int index)
2816 static int tcam_read(
struct niu *np,
int index,
2837 static int tcam_write(
struct niu *np,
int index,
2854 static int tcam_assoc_read(
struct niu *np,
int index,
u64 *data)
2867 static int tcam_assoc_write(
struct niu *np,
int index,
u64 assoc_data)
2875 static void tcam_enable(
struct niu *np,
int on)
2902 static int tcam_user_eth_class_enable(
struct niu *np,
unsigned long class,
2924 static int tcam_user_eth_class_set(
struct niu *np,
unsigned long class,
2932 (ether_type & ~(
u64)0xffff) != 0)
2945 static int tcam_user_ip_class_enable(
struct niu *np,
unsigned long class,
2966 static int tcam_user_ip_class_set(
struct niu *np,
unsigned long class,
2967 int ipv6,
u64 protocol_id,
2968 u64 tos_mask,
u64 tos_val)
2975 (protocol_id & ~(
u64)0xff) != 0 ||
2976 (tos_mask & ~(
u64)0xff) != 0 ||
2977 (tos_val & ~(
u64)0xff) != 0)
2994 static int tcam_early_init(
struct niu *np)
3000 tcam_set_lat_and_ratio(np,
3004 err = tcam_user_eth_class_enable(np, i, 0);
3009 err = tcam_user_ip_class_enable(np, i, 0);
3017 static int tcam_flush_all(
struct niu *np)
3021 for (i = 0; i < np->
parent->tcam_num_entries; i++) {
3022 int err = tcam_flush(np, i);
3029 static u64 hash_addr_regval(
unsigned long index,
unsigned long num_entries)
3035 static int hash_read(
struct niu *np,
unsigned long partition,
3036 unsigned long index,
unsigned long num_entries,
3039 u64 val = hash_addr_regval(index, num_entries);
3054 static int hash_write(
struct niu *np,
unsigned long partition,
3055 unsigned long index,
unsigned long num_entries,
3058 u64 val = hash_addr_regval(index, num_entries);
3072 static void fflp_reset(
struct niu *np)
3084 static void fflp_set_timings(
struct niu *np)
3103 static int fflp_set_partition(
struct niu *np,
u64 partition,
3110 (mask & ~(
u64)0x1f) != 0 ||
3111 (base & ~(
u64)0x1f) != 0)
3127 static int fflp_disable_all_partitions(
struct niu *np)
3132 int err = fflp_set_partition(np, 0, 0, 0, 0);
3139 static void fflp_llcsnap_enable(
struct niu *np,
int on)
3150 static void fflp_errors_enable(
struct niu *np,
int on)
3161 static int fflp_hash_clear(
struct niu *np)
3171 int err = hash_write(np, 0, i, 1, (
u64 *) &
ent);
3178 static int fflp_early_init(
struct niu *np)
3181 unsigned long flags;
3191 fflp_set_timings(np);
3192 err = fflp_disable_all_partitions(np);
3195 "fflp_disable_all_partitions failed, err=%d\n",
3201 err = tcam_early_init(np);
3204 "tcam_early_init failed, err=%d\n", err);
3207 fflp_llcsnap_enable(np, 1);
3208 fflp_errors_enable(np, 0);
3212 err = tcam_flush_all(np);
3215 "tcam_flush_all failed, err=%d\n", err);
3219 err = fflp_hash_clear(np);
3222 "fflp_hash_clear failed, err=%d\n",
3237 static int niu_set_flow_key(
struct niu *np,
unsigned long class_code,
u64 key)
3247 static int niu_set_tcam_key(
struct niu *np,
unsigned long class_code,
u64 key)
3258 static u16 tcam_get_index(
struct niu *np,
u16 idx)
3261 if (idx >= (np->
clas.tcam_sz - 1))
3263 return np->
clas.tcam_top + ((idx+1) * np->
parent->num_ports);
3266 static u16 tcam_get_size(
struct niu *np)
3269 return np->
clas.tcam_sz - 1;
3272 static u16 tcam_get_valid_entry_cnt(
struct niu *np)
3275 return np->
clas.tcam_valid_entries - 1;
3281 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
3297 struct page ***
link)
3299 unsigned int h = niu_hash_rxaddr(rp, addr);
3300 struct page *
p, **
pp;
3304 for (; (p = *
pp) !=
NULL; pp = (
struct page **) &p->
mapping) {
3305 if (p->
index == addr) {
3316 static void niu_hash_page(
struct rx_ring_info *rp,
struct page *page,
u64 base)
3318 unsigned int h = niu_hash_rxaddr(rp, base);
3326 gfp_t mask,
int start_index)
3336 addr = np->
ops->map_page(np->
device, page, 0,
3343 niu_hash_page(rp, page, addr);
3346 &compound_head(page)->
_count);
3364 int err = niu_rbr_add_page(np, rp, mask, index);
3399 page = niu_find_rxpage(rp, addr, &link);
3404 *link = (
struct page *) page->
mapping;
3433 return niu_rx_pkt_ignore(np, rp);
3438 u32 rcr_size, append_size;
3451 page = niu_find_rxpage(rp, addr, &link);
3457 append_size = rcr_size;
3468 skb_checksum_none_assert(skb);
3470 append_size = len - skb->
len;
3472 niu_rx_skb_append(skb, page, off, append_size, rcr_size);
3474 *link = (
struct page *) page->
mapping;
3484 if (!(val & RCR_ENTRY_MULTI))
3519 err = niu_rbr_add_page(np, rp, mask, index);
3523 index += blocks_per_page;
3539 struct page *
next = (
struct page *) page->
mapping;
3558 static int release_tx_packet(
struct niu *np,
struct tx_ring_info *rp,
int idx)
3573 len = skb_headlen(skb);
3586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3590 skb_frag_size(&skb_shinfo(skb)->frags[i]),
3600 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3610 txq = netdev_get_tx_queue(np->
dev, index);
3625 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3628 cons = release_tx_packet(np, rp, cons);
3634 if (
unlikely(netif_tx_queue_stopped(txq) &&
3637 if (netif_tx_queue_stopped(txq) &&
3639 netif_tx_wake_queue(txq);
3640 __netif_tx_unlock(txq);
3644 static inline void niu_sync_rx_discard_stats(
struct niu *np,
3673 dev_err(np->
device,
"rx-%d: Counter overflow RXMISC discard\n",
3677 "rx-%d: MISC drop=%u over=%u\n",
3678 rx_channel, misc, misc-limit);
3688 dev_err(np->
device,
"rx-%d: Counter overflow WRED discard\n", rx_channel);
3691 "rx-%d: WRED drop=%u over=%u\n",
3692 rx_channel, wred, wred-limit);
3699 int qlen, rcr_done = 0, work_done = 0;
3714 "%s(chan[%d]), stat[%llx] qlen=%d\n",
3715 __func__, rp->
rx_channel, (
unsigned long long)stat, qlen);
3717 rcr_done = work_done = 0;
3718 qlen =
min(qlen, budget);
3719 while (work_done < qlen) {
3720 rcr_done += niu_process_rx_pkt(napi, np, rp);
3740 niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3745 static int niu_poll_core(
struct niu *np,
struct niu_ldg *lp,
int budget)
3748 u32 tx_vec = (v0 >> 32);
3749 u32 rx_vec = (v0 & 0xffffffff);
3750 int i, work_done = 0;
3753 "%s() v0[%016llx]\n", __func__, (
unsigned long long)v0);
3758 niu_tx_work(np, rp);
3768 this_work_done = niu_rx_work(&lp->
napi, np, rp,
3771 budget -= this_work_done;
3772 work_done += this_work_done;
3780 static int niu_poll(
struct napi_struct *napi,
int budget)
3783 struct niu *np = lp->
np;
3786 work_done = niu_poll_core(np, lp, budget);
3788 if (work_done < budget) {
3790 niu_ldg_rearm(np, lp, 1);
3795 static void niu_log_rxchan_errors(
struct niu *np,
struct rx_ring_info *rp,
3798 netdev_err(np->
dev,
"RX channel %u errors ( ", rp->
rx_channel);
3843 netdev_err(np->
dev,
"RX channel %u error, stat[%llx]\n",
3845 (
unsigned long long) stat);
3847 niu_log_rxchan_errors(np, rp, stat);
3856 static void niu_log_txchan_errors(
struct niu *np,
struct tx_ring_info *rp,
3859 netdev_err(np->
dev,
"TX channel %u errors ( ", rp->
tx_channel);
3889 netdev_err(np->
dev,
"TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3891 (
unsigned long long)cs,
3892 (
unsigned long long)logh,
3893 (
unsigned long long)logl);
3895 niu_log_txchan_errors(np, rp, cs);
3900 static int niu_mif_interrupt(
struct niu *np)
3912 netdev_err(np->
dev,
"MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3913 (
unsigned long long)mif_status, phy_mdint);
3918 static void niu_xmac_interrupt(
struct niu *np)
3952 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3990 static void niu_bmac_interrupt(
struct niu *np)
4026 static int niu_mac_interrupt(
struct niu *np)
4029 niu_xmac_interrupt(np);
4031 niu_bmac_interrupt(np);
4036 static void niu_log_device_error(
struct niu *np,
u64 stat)
4038 netdev_err(np->
dev,
"Core device errors ( ");
4066 static int niu_device_error(
struct niu *np)
4070 netdev_err(np->
dev,
"Core device error, stat[%llx]\n",
4071 (
unsigned long long)stat);
4073 niu_log_device_error(np, stat);
4078 static int niu_slowpath_interrupt(
struct niu *np,
struct niu_ldg *lp,
4088 if (v1 & 0x00000000ffffffffULL) {
4089 u32 rx_vec = (v1 & 0xffffffff);
4095 int r = niu_rx_error(np, rp);
4106 if (v1 & 0x7fffffff00000000ULL) {
4107 u32 tx_vec = (v1 >> 32) & 0x7fffffff;
4113 int r = niu_tx_error(np, rp);
4119 if ((v0 | v1) & 0x8000000000000000ULL) {
4120 int r = niu_mif_interrupt(np);
4126 int r = niu_mac_interrupt(np);
4131 int r = niu_device_error(np);
4138 niu_enable_interrupts(np, 0);
4154 "%s() stat[%llx]\n", __func__, (
unsigned long long)stat);
4163 "%s() cs[%llx]\n", __func__, (
unsigned long long)rp->
tx_cs);
4166 static void __niu_fastpath_interrupt(
struct niu *np,
int ldg,
u64 v0)
4172 tx_vec = (v0 >> 32);
4173 rx_vec = (v0 & 0xffffffff);
4179 if (parent->
ldg_map[ldn] != ldg)
4184 niu_rxchan_intr(np, rp, ldn);
4191 if (parent->
ldg_map[ldn] != ldg)
4196 niu_txchan_intr(np, rp, ldn);
4200 static void niu_schedule_napi(
struct niu *np,
struct niu_ldg *lp,
4207 __niu_fastpath_interrupt(np, lp->
ldg_num, v0);
4215 struct niu *np = lp->
np;
4217 unsigned long flags;
4231 pr_cont(
" v0[%llx] v1[%llx] v2[%llx]\n",
4232 (
unsigned long long) v0,
4233 (
unsigned long long) v1,
4234 (
unsigned long long) v2);
4237 spin_unlock_irqrestore(&np->
lock, flags);
4242 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4247 niu_schedule_napi(np, lp, v0, v1, v2);
4249 niu_ldg_rearm(np, lp, 1);
4251 spin_unlock_irqrestore(&np->
lock, flags);
4256 static void niu_free_rx_ring_info(
struct niu *np,
struct rx_ring_info *rp)
4273 niu_rbr_free(np, rp);
4276 MAX_RBR_RING_SIZE *
sizeof(
__le32),
4286 static void niu_free_tx_ring_info(
struct niu *np,
struct tx_ring_info *rp)
4299 (
void) release_tx_packet(np, rp, i);
4303 MAX_TX_RING_SIZE *
sizeof(
__le64),
4313 static void niu_free_channels(
struct niu *np)
4321 niu_free_rx_ring_info(np, rp);
4332 niu_free_tx_ring_info(np, rp);
4340 static int niu_alloc_rx_ring_info(
struct niu *np,
4345 rp->
rxhash = kzalloc(MAX_RBR_RING_SIZE *
sizeof(
struct page *),
4355 if ((
unsigned long)rp->
mbox & (64
UL - 1)) {
4356 netdev_err(np->
dev,
"Coherent alloc gives misaligned RXDMA mailbox %p\n",
4366 if ((
unsigned long)rp->
rcr & (64
UL - 1)) {
4367 netdev_err(np->
dev,
"Coherent alloc gives misaligned RXDMA RCR table %p\n",
4375 MAX_RBR_RING_SIZE *
sizeof(
__le32),
4379 if ((
unsigned long)rp->
rbr & (64
UL - 1)) {
4380 netdev_err(np->
dev,
"Coherent alloc gives misaligned RXDMA RBR table %p\n",
4391 static void niu_set_max_burst(
struct niu *np,
struct tx_ring_info *rp)
4393 int mtu = np->
dev->mtu;
4403 static int niu_alloc_tx_ring_info(
struct niu *np,
4413 if ((
unsigned long)rp->
mbox & (64
UL - 1)) {
4414 netdev_err(np->
dev,
"Coherent alloc gives misaligned TXDMA mailbox %p\n",
4420 MAX_TX_RING_SIZE *
sizeof(
__le64),
4424 if ((
unsigned long)rp->
descr & (64
UL - 1)) {
4425 netdev_err(np->
dev,
"Coherent alloc gives misaligned TXDMA descr table %p\n",
4438 niu_set_max_burst(np, rp);
4470 static int niu_alloc_channels(
struct niu *np)
4473 int first_rx_channel, first_tx_channel;
4474 int num_rx_rings, num_tx_rings;
4480 first_rx_channel = first_tx_channel = 0;
4481 for (i = 0; i <
port; i++) {
4489 rx_rings = kcalloc(num_rx_rings,
sizeof(
struct rx_ring_info),
4499 netif_set_real_num_rx_queues(np->
dev, num_rx_rings);
4507 err = niu_alloc_rx_ring_info(np, rp);
4511 niu_size_rbr(np, rp);
4529 tx_rings = kcalloc(num_tx_rings,
sizeof(
struct tx_ring_info),
4547 err = niu_alloc_tx_ring_info(np, rp);
4555 niu_free_channels(np);
4559 static int niu_tx_cs_sng_poll(
struct niu *np,
int channel)
4563 while (--limit > 0) {
4571 static int niu_tx_channel_stop(
struct niu *np,
int channel)
4578 return niu_tx_cs_sng_poll(np, channel);
4581 static int niu_tx_cs_reset_poll(
struct niu *np,
int channel)
4585 while (--limit > 0) {
4593 static int niu_tx_channel_reset(
struct niu *np,
int channel)
4601 err = niu_tx_cs_reset_poll(np, channel);
4608 static int niu_tx_channel_lpage_init(
struct niu *np,
int channel)
4629 static void niu_txc_enable_port(
struct niu *np,
int on)
4631 unsigned long flags;
4648 static void niu_txc_set_imask(
struct niu *np,
u64 imask)
4650 unsigned long flags;
4660 static void niu_txc_port_dma_enable(
struct niu *np,
int on)
4668 val |= (1 << np->
tx_rings[i].tx_channel);
4673 static int niu_init_one_tx_channel(
struct niu *np,
struct tx_ring_info *rp)
4678 err = niu_tx_channel_stop(np, channel);
4682 err = niu_tx_channel_reset(np, channel);
4686 err = niu_tx_channel_lpage_init(np, channel);
4695 netdev_err(np->
dev,
"TX ring channel %d DMA addr (%llx) is not aligned\n",
4696 channel, (
unsigned long long)rp->
descr_dma);
4713 netdev_err(np->
dev,
"TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4714 channel, (
unsigned long long)rp->
mbox_dma);
4727 static void niu_init_rdc_groups(
struct niu *np)
4733 struct rdc_table *tbl = &tp->
tables[
i];
4734 int this_table = first_table_num +
i;
4745 static void niu_init_drr_weight(
struct niu *np)
4763 static int niu_init_hostinfo(
struct niu *np)
4767 int i,
err, num_alt = niu_num_alt_addr(np);
4770 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4774 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4778 for (i = 0; i < num_alt; i++) {
4779 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4787 static int niu_rx_channel_reset(
struct niu *np,
int channel)
4794 static int niu_rx_channel_lpage_init(
struct niu *np,
int channel)
4813 static void niu_rx_channel_wred_init(
struct niu *np,
struct rx_ring_info *rp)
4904 static int niu_enable_rx_channel(
struct niu *np,
int channel,
int on)
4916 while (--limit > 0) {
4926 static int niu_init_one_rx_channel(
struct niu *np,
struct rx_ring_info *rp)
4931 err = niu_rx_channel_reset(np, channel);
4935 err = niu_rx_channel_lpage_init(np, channel);
4939 niu_rx_channel_wred_init(np, rp);
4954 err = niu_compute_rbr_cfig_b(rp, &val);
4966 err = niu_enable_rx_channel(np, channel, 1);
4979 static int niu_init_rx_channels(
struct niu *np)
4981 unsigned long flags;
4992 niu_init_rdc_groups(np);
4993 niu_init_drr_weight(np);
4995 err = niu_init_hostinfo(np);
5002 err = niu_init_one_rx_channel(np, rp);
5010 static int niu_set_ip_frag_rule(
struct niu *np)
5023 memset(tp, 0,
sizeof(*tp));
5028 err = tcam_write(np, index, tp->
key, tp->
key_mask);
5031 err = tcam_assoc_write(np, index, tp->
assoc_data);
5040 static int niu_init_classifier_hw(
struct niu *np)
5049 err = niu_init_hostinfo(np);
5056 vlan_tbl_write(np, i, np->
port,
5063 err = niu_set_alt_mac_rdc_table(np, ap->
alt_mac_num,
5072 err = niu_set_tcam_key(np, i, parent->
tcam_key[index]);
5075 err = niu_set_flow_key(np, i, parent->
flow_key[index]);
5080 err = niu_set_ip_frag_rule(np);
5089 static int niu_zcp_write(
struct niu *np,
int index,
u64 *data)
5106 static int niu_zcp_read(
struct niu *np,
int index,
u64 *data)
5113 netdev_err(np->
dev,
"ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5126 netdev_err(np->
dev,
"ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5140 static void niu_zcp_cfifo_reset(
struct niu *np)
5152 static int niu_init_zcp(
struct niu *np)
5154 u64 data[5], rbuf[5];
5158 if (np->
port == 0 || np->
port == 1)
5171 for (i = 0; i <
max; i++) {
5172 err = niu_zcp_write(np, i, data);
5175 err = niu_zcp_read(np, i, rbuf);
5180 niu_zcp_cfifo_reset(np);
5189 static void niu_ipp_write(
struct niu *np,
int index,
u64 *data)
5203 static void niu_ipp_read(
struct niu *np,
int index,
u64 *data)
5213 static int niu_ipp_reset(
struct niu *np)
5216 1000, 100,
"IPP_CFIG");
5219 static int niu_init_ipp(
struct niu *np)
5221 u64 data[5], rbuf[5],
val;
5225 if (np->
port == 0 || np->
port == 1)
5238 for (i = 0; i <
max; i++) {
5239 niu_ipp_write(np, i, data);
5240 niu_ipp_read(np, i, rbuf);
5246 err = niu_ipp_reset(np);
5270 static void niu_handle_led(
struct niu *np,
int status)
5289 static void niu_init_xif_xmac(
struct niu *np)
5346 static void niu_init_xif_bmac(
struct niu *np)
5376 static void niu_init_xif(
struct niu *np)
5379 niu_init_xif_xmac(np);
5381 niu_init_xif_bmac(np);
5384 static void niu_pcs_mii_reset(
struct niu *np)
5396 static void niu_xpcs_reset(
struct niu *np)
5408 static int niu_init_pcs(
struct niu *np)
5420 niu_pcs_mii_reset(np);
5452 niu_pcs_mii_reset(np);
5462 niu_pcs_mii_reset(np);
5472 static int niu_reset_tx_xmac(
struct niu *np)
5477 1000, 100,
"XTXMAC_SW_RST");
5480 static int niu_reset_tx_bmac(
struct niu *np)
5486 while (--limit >= 0) {
5492 dev_err(np->
device,
"Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5501 static int niu_reset_tx_mac(
struct niu *np)
5504 return niu_reset_tx_xmac(np);
5506 return niu_reset_tx_bmac(np);
5509 static void niu_init_tx_xmac(
struct niu *np,
u64 min,
u64 max)
5545 static void niu_init_tx_bmac(
struct niu *np,
u64 min,
u64 max)
5562 static void niu_init_tx_mac(
struct niu *np)
5578 niu_init_tx_xmac(np, min, max);
5580 niu_init_tx_bmac(np, min, max);
5583 static int niu_reset_rx_xmac(
struct niu *np)
5590 while (--limit >= 0) {
5597 dev_err(np->
device,
"Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5606 static int niu_reset_rx_bmac(
struct niu *np)
5612 while (--limit >= 0) {
5618 dev_err(np->
device,
"Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5627 static int niu_reset_rx_mac(
struct niu *np)
5630 return niu_reset_rx_xmac(np);
5632 return niu_reset_rx_bmac(np);
5635 static void niu_init_rx_xmac(
struct niu *np)
5651 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5652 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5687 static void niu_init_rx_bmac(
struct niu *np)
5702 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5703 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5722 static void niu_init_rx_mac(
struct niu *np)
5724 niu_set_primary_mac(np, np->
dev->dev_addr);
5727 niu_init_rx_xmac(np);
5729 niu_init_rx_bmac(np);
5732 static void niu_enable_tx_xmac(
struct niu *np,
int on)
5743 static void niu_enable_tx_bmac(
struct niu *np,
int on)
5754 static void niu_enable_tx_mac(
struct niu *np,
int on)
5757 niu_enable_tx_xmac(np, on);
5759 niu_enable_tx_bmac(np, on);
5762 static void niu_enable_rx_xmac(
struct niu *np,
int on)
5781 static void niu_enable_rx_bmac(
struct niu *np,
int on)
5800 static void niu_enable_rx_mac(
struct niu *np,
int on)
5803 niu_enable_rx_xmac(np, on);
5805 niu_enable_rx_bmac(np, on);
5808 static int niu_init_mac(
struct niu *np)
5813 err = niu_init_pcs(np);
5817 err = niu_reset_tx_mac(np);
5820 niu_init_tx_mac(np);
5821 err = niu_reset_rx_mac(np);
5824 niu_init_rx_mac(np);
5831 niu_init_tx_mac(np);
5832 niu_enable_tx_mac(np, 1);
5834 niu_enable_rx_mac(np, 1);
5839 static void niu_stop_one_tx_channel(
struct niu *np,
struct tx_ring_info *rp)
5844 static void niu_stop_tx_channels(
struct niu *np)
5851 niu_stop_one_tx_channel(np, rp);
5855 static void niu_reset_one_tx_channel(
struct niu *np,
struct tx_ring_info *rp)
5860 static void niu_reset_tx_channels(
struct niu *np)
5867 niu_reset_one_tx_channel(np, rp);
5871 static void niu_stop_one_rx_channel(
struct niu *np,
struct rx_ring_info *rp)
5876 static void niu_stop_rx_channels(
struct niu *np)
5883 niu_stop_one_rx_channel(np, rp);
5887 static void niu_reset_one_rx_channel(
struct niu *np,
struct rx_ring_info *rp)
5891 (
void) niu_rx_channel_reset(np, channel);
5894 (
void) niu_enable_rx_channel(np, channel, 0);
5897 static void niu_reset_rx_channels(
struct niu *np)
5904 niu_reset_one_rx_channel(np, rp);
5908 static void niu_disable_ipp(
struct niu *np)
5916 while (--limit >= 0 && (rd != wr)) {
5921 (rd != 0 && wr != 1)) {
5922 netdev_err(np->
dev,
"IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5934 (
void) niu_ipp_reset(np);
5937 static int niu_init_hw(
struct niu *np)
5942 niu_txc_enable_port(np, 1);
5943 niu_txc_port_dma_enable(np, 1);
5944 niu_txc_set_imask(np, 0);
5950 err = niu_init_one_tx_channel(np, rp);
5956 err = niu_init_rx_channels(np);
5958 goto out_uninit_tx_channels;
5961 err = niu_init_classifier_hw(np);
5963 goto out_uninit_rx_channels;
5966 err = niu_init_zcp(np);
5968 goto out_uninit_rx_channels;
5971 err = niu_init_ipp(np);
5973 goto out_uninit_rx_channels;
5976 err = niu_init_mac(np);
5978 goto out_uninit_ipp;
5984 niu_disable_ipp(np);
5986 out_uninit_rx_channels:
5988 niu_stop_rx_channels(np);
5989 niu_reset_rx_channels(np);
5991 out_uninit_tx_channels:
5993 niu_stop_tx_channels(np);
5994 niu_reset_tx_channels(np);
5999 static void niu_stop_hw(
struct niu *np)
6002 niu_enable_interrupts(np, 0);
6005 niu_enable_rx_mac(np, 0);
6008 niu_disable_ipp(np);
6011 niu_stop_tx_channels(np);
6014 niu_stop_rx_channels(np);
6017 niu_reset_tx_channels(np);
6020 niu_reset_rx_channels(np);
6023 static void niu_set_irq_name(
struct niu *np)
6025 int port = np->
port;
6036 for (i = 0; i < np->
num_ldg -
j; i++) {
6037 if (i < np->num_rx_rings)
6046 static int niu_request_irq(
struct niu *np)
6050 niu_set_irq_name(np);
6053 for (i = 0; i < np->
num_ldg; i++) {
6066 for (j = 0; j <
i; j++) {
6074 static void niu_free_irq(
struct niu *np)
6078 for (i = 0; i < np->
num_ldg; i++) {
6085 static void niu_enable_napi(
struct niu *np)
6089 for (i = 0; i < np->
num_ldg; i++)
6090 napi_enable(&np->
ldg[i].napi);
6093 static void niu_disable_napi(
struct niu *np)
6097 for (i = 0; i < np->
num_ldg; i++)
6098 napi_disable(&np->
ldg[i].napi);
6103 struct niu *np = netdev_priv(dev);
6108 err = niu_alloc_channels(np);
6112 err = niu_enable_interrupts(np, 0);
6114 goto out_free_channels;
6116 err = niu_request_irq(np);
6118 goto out_free_channels;
6120 niu_enable_napi(np);
6122 spin_lock_irq(&np->
lock);
6124 err = niu_init_hw(np);
6129 np->
timer.function = niu_timer;
6131 err = niu_enable_interrupts(np, 1);
6136 spin_unlock_irq(&np->
lock);
6139 niu_disable_napi(np);
6143 netif_tx_start_all_queues(dev);
6156 niu_free_channels(np);
6162 static void niu_full_shutdown(
struct niu *np,
struct net_device *dev)
6166 niu_disable_napi(np);
6167 netif_tx_stop_all_queues(dev);
6171 spin_lock_irq(&np->
lock);
6175 spin_unlock_irq(&np->
lock);
6180 struct niu *np = netdev_priv(dev);
6182 niu_full_shutdown(np, dev);
6186 niu_free_channels(np);
6188 niu_handle_led(np, 0);
6193 static void niu_sync_xmac_stats(
struct niu *np)
6218 static void niu_sync_bmac_stats(
struct niu *np)
6231 static void niu_sync_mac_stats(
struct niu *np)
6234 niu_sync_xmac_stats(np);
6236 niu_sync_bmac_stats(np);
6239 static void niu_get_rx_stats(
struct niu *np,
6246 pkts = dropped = errors = bytes = 0;
6255 niu_sync_rx_discard_stats(np, rp, 0);
6270 static void niu_get_tx_stats(
struct niu *np,
6277 pkts = errors = bytes = 0;
6300 struct niu *np = netdev_priv(dev);
6302 if (netif_running(dev)) {
6303 niu_get_rx_stats(np, stats);
6304 niu_get_tx_stats(np, stats);
6310 static void niu_load_hash_xmac(
struct niu *np,
u16 *
hash)
6314 for (i = 0; i < 16; i++)
6318 static void niu_load_hash_bmac(
struct niu *np,
u16 *hash)
6322 for (i = 0; i < 16; i++)
6326 static void niu_load_hash(
struct niu *np,
u16 *hash)
6329 niu_load_hash_xmac(np, hash);
6331 niu_load_hash_bmac(np, hash);
6334 static void niu_set_rx_mode(
struct net_device *dev)
6336 struct niu *np = netdev_priv(dev);
6337 int i, alt_cnt,
err;
6339 unsigned long flags;
6340 u16 hash[16] = { 0, };
6343 niu_enable_rx_mac(np, 0);
6352 if (alt_cnt > niu_num_alt_addr(np)) {
6361 err = niu_set_alt_mac(np, index, ha->
addr);
6363 netdev_warn(dev,
"Error %d adding alt mac %d\n",
6365 err = niu_enable_alt_mac(np, index, 1);
6367 netdev_warn(dev,
"Error %d enabling alt mac %d\n",
6378 for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6379 err = niu_enable_alt_mac(np, i, 0);
6381 netdev_warn(dev,
"Error %d disabling alt mac %d\n",
6386 for (i = 0; i < 16; i++)
6393 hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
6398 niu_load_hash(np, hash);
6400 niu_enable_rx_mac(np, 1);
6401 spin_unlock_irqrestore(&np->
lock, flags);
6404 static int niu_set_mac_addr(
struct net_device *dev,
void *p)
6406 struct niu *np = netdev_priv(dev);
6408 unsigned long flags;
6410 if (!is_valid_ether_addr(addr->
sa_data))
6415 if (!netif_running(dev))
6419 niu_enable_rx_mac(np, 0);
6420 niu_set_primary_mac(np, dev->
dev_addr);
6421 niu_enable_rx_mac(np, 1);
6422 spin_unlock_irqrestore(&np->
lock, flags);
6432 static void niu_netif_stop(
struct niu *np)
6436 niu_disable_napi(np);
6438 netif_tx_disable(np->
dev);
6441 static void niu_netif_start(
struct niu *np)
6447 netif_tx_wake_all_queues(np->
dev);
6449 niu_enable_napi(np);
6451 niu_enable_interrupts(np, 1);
6454 static void niu_reset_buffers(
struct niu *np)
6468 (
struct page *) page->
mapping;
6476 err = niu_rbr_add_page(np, rp,
GFP_ATOMIC, k);
6493 (
void) release_tx_packet(np, rp, j);
6507 unsigned long flags;
6511 if (!netif_running(np->
dev)) {
6512 spin_unlock_irqrestore(&np->
lock, flags);
6516 spin_unlock_irqrestore(&np->
lock, flags);
6526 spin_unlock_irqrestore(&np->
lock, flags);
6528 niu_reset_buffers(np);
6532 err = niu_init_hw(np);
6536 niu_netif_start(np);
6539 spin_unlock_irqrestore(&np->
lock, flags);
6542 static void niu_tx_timeout(
struct net_device *dev)
6544 struct niu *np = netdev_priv(dev);
6552 static void niu_set_txd(
struct tx_ring_info *rp,
int index,
6567 u16 eth_proto, eth_proto_inner;
6568 u64 csum_bits, l3off, ihl,
ret;
6573 eth_proto_inner = eth_proto;
6585 ihl = ip_hdr(skb)->ihl;
6588 ip_proto = ipv6_hdr(skb)->
nexthdr;
6606 start = skb_checksum_start_offset(skb) -
6614 l3off = skb_network_offset(skb) -
6621 ((eth_proto_inner < 1536) ?
TXHDR_LLC : 0) |
6632 struct niu *np = netdev_priv(dev);
6633 unsigned long align, headroom;
6637 unsigned int len, nfg;
6642 i = skb_get_queue_mapping(skb);
6644 txq = netdev_get_tx_queue(dev, i);
6646 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6647 netif_tx_stop_queue(txq);
6662 if (skb_headroom(skb) < len) {
6675 align = ((
unsigned long) skb->
data & (16 - 1));
6676 headroom = align +
sizeof(
struct tx_pkt_hdr);
6685 len = skb_headlen(skb);
6702 nfg = skb_shinfo(skb)->nr_frags;
6709 unsigned int this_len = len;
6714 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6718 mapping += this_len;
6722 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6725 len = skb_frag_size(frag);
6726 mapping = np->
ops->map_page(np->
device, skb_frag_page(frag),
6733 niu_set_txd(rp, prod, mapping, len, 0, 0);
6738 if (prod < rp->prod)
6745 netif_tx_stop_queue(txq);
6747 netif_tx_wake_queue(txq);
6759 static int niu_change_mtu(
struct net_device *dev,
int new_mtu)
6761 struct niu *np = netdev_priv(dev);
6762 int err, orig_jumbo, new_jumbo;
6772 if (!netif_running(dev) ||
6773 (orig_jumbo == new_jumbo))
6776 niu_full_shutdown(np, dev);
6778 niu_free_channels(np);
6780 niu_enable_napi(np);
6782 err = niu_alloc_channels(np);
6786 spin_lock_irq(&np->
lock);
6788 err = niu_init_hw(np);
6793 np->
timer.function = niu_timer;
6795 err = niu_enable_interrupts(np, 1);
6800 spin_unlock_irq(&np->
lock);
6803 netif_tx_start_all_queues(dev);
6813 static void niu_get_drvinfo(
struct net_device *dev,
6816 struct niu *np = netdev_priv(dev);
6830 struct niu *np = netdev_priv(dev);
6835 memset(cmd, 0,
sizeof(*cmd));
6851 struct niu *np = netdev_priv(dev);
6855 lp->
speed = ethtool_cmd_speed(cmd);
6858 return niu_init_link(np);
6863 struct niu *np = netdev_priv(dev);
6869 struct niu *np = netdev_priv(dev);
6873 static int niu_nway_reset(
struct net_device *dev)
6875 struct niu *np = netdev_priv(dev);
6878 return niu_init_link(np);
6883 static int niu_get_eeprom_len(
struct net_device *dev)
6885 struct niu *np = netdev_priv(dev);
6890 static int niu_get_eeprom(
struct net_device *dev,
6893 struct niu *np = netdev_priv(dev);
6899 if (offset + len < offset)
6907 u32 b_offset, b_count;
6909 b_offset = offset & 3;
6910 b_count = 4 - b_offset;
6915 memcpy(data, ((
char *)&val) + b_offset, b_count);
6934 static void niu_ethflow_to_l3proto(
int flow_type,
u8 *
pid)
6936 switch (flow_type) {
6963 static int niu_class_to_ethflow(
u64 class,
int *flow_type)
6990 case CLASS_CODE_USER_PROG1:
7003 static int niu_ethflow_to_class(
int flow_type,
u64 *
class)
7005 switch (flow_type) {
7041 static u64 niu_flowkey_to_ethflow(
u64 flow_key)
7064 static int niu_ethflow_to_flowkey(
u64 ethflow,
u64 *flow_key)
7095 if (!niu_ethflow_to_class(nfc->
flow_type, &
class))
7098 if (np->
parent->tcam_key[
class - CLASS_CODE_USER_PROG1] &
7102 nfc->
data = niu_flowkey_to_ethflow(np->
parent->flow_key[
class -
7103 CLASS_CODE_USER_PROG1]);
7107 static void niu_get_ip4fs_from_tcam_key(
struct niu_tcam_entry *tp,
7183 static int niu_get_ethtool_tcam_entry(
struct niu *np,
7193 idx = tcam_get_index(np, (
u16)nfc->
fs.location);
7197 netdev_info(np->
dev,
"niu%d: entry [%d] invalid for idx[%d]\n",
7205 ret = niu_class_to_ethflow(
class, &fsp->
flow_type);
7208 netdev_info(np->
dev,
"niu%d: niu_class_to_ethflow failed\n",
7231 niu_get_ip4fs_from_tcam_key(tp, fsp);
7242 niu_get_ip4fs_from_tcam_key(tp, fsp);
7259 nfc->
data = tcam_get_size(np);
7264 static int niu_get_ethtool_tcam_all(
struct niu *np,
7271 unsigned long flags;
7275 nfc->
data = tcam_get_size(np);
7278 for (cnt = 0, i = 0; i < nfc->
data; i++) {
7279 idx = tcam_get_index(np, i);
7300 struct niu *np = netdev_priv(dev);
7305 ret = niu_get_hash_opts(np, cmd);
7311 cmd->
rule_cnt = tcam_get_valid_entry_cnt(np);
7314 ret = niu_get_ethtool_tcam_entry(np, cmd);
7317 ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
7331 unsigned long flags;
7333 if (!niu_ethflow_to_class(nfc->
flow_type, &
class))
7336 if (
class < CLASS_CODE_USER_PROG1 ||
7342 flow_key = np->
parent->tcam_key[
class -
7343 CLASS_CODE_USER_PROG1];
7345 nw64(
TCAM_KEY(
class - CLASS_CODE_USER_PROG1), flow_key);
7346 np->
parent->tcam_key[
class - CLASS_CODE_USER_PROG1] = flow_key;
7351 if (np->
parent->tcam_key[
class - CLASS_CODE_USER_PROG1] &
7354 flow_key = np->
parent->tcam_key[
class -
7355 CLASS_CODE_USER_PROG1];
7356 flow_key &= ~TCAM_KEY_DISC;
7359 np->
parent->tcam_key[
class - CLASS_CODE_USER_PROG1] =
7365 if (!niu_ethflow_to_flowkey(nfc->
data, &flow_key))
7369 nw64(
FLOW_KEY(
class - CLASS_CODE_USER_PROG1), flow_key);
7370 np->
parent->flow_key[
class - CLASS_CODE_USER_PROG1] = flow_key;
7378 int l2_rdc_tab,
u64 class)
7381 u32 sip, dip, sipm, dipm,
spi, spim;
7382 u16 sport, dport, spm, dpm;
7413 tp->
key[2] |= (((
u64)sport << 16) | dport);
7415 niu_ethflow_to_l3proto(fsp->
flow_type, &pid);
7424 niu_ethflow_to_l3proto(fsp->
flow_type, &pid);
7444 static int niu_add_ethtool_tcam_entry(
struct niu *np,
7454 unsigned long flags;
7459 idx = nfc->
fs.location;
7460 if (idx >= tcam_get_size(np))
7465 int add_usr_cls = 0;
7477 class = parent->l3_cls[i];
7486 class = CLASS_CODE_USER_PROG1;
7500 ret = tcam_user_ip_class_set(np,
class, 0,
7507 ret = tcam_user_ip_class_enable(np,
class, 1);
7518 netdev_info(np->
dev,
"niu%d: %s(): Could not find/insert class for pid %d\n",
7525 if (!niu_ethflow_to_class(fsp->
flow_type, &
class)) {
7532 idx = tcam_get_index(np, idx);
7535 memset(tp, 0,
sizeof(*tp));
7544 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
class);
7552 netdev_info(np->
dev,
"niu%d: In %s(): flow %d for IPv6 not implemented\n",
7557 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
class);
7560 netdev_info(np->
dev,
"niu%d: In %s(): Unknown flow type %d\n",
7571 netdev_info(np->
dev,
"niu%d: In %s(): Invalid RX ring %lld\n",
7572 parent->
index, __func__,
7587 err = tcam_assoc_write(np, idx, tp->
assoc_data);
7595 np->
clas.tcam_valid_entries++;
7602 static int niu_del_ethtool_tcam_entry(
struct niu *np,
u32 loc)
7607 unsigned long flags;
7611 if (loc >= tcam_get_size(np))
7616 idx = tcam_get_index(np, loc);
7626 if (parent->
l3_cls[i] ==
class) {
7630 ret = tcam_user_ip_class_enable(np,
7641 if (i == NIU_L3_PROG_CLS) {
7642 netdev_info(np->
dev,
"niu%d: In %s(): Usr class 0x%llx not found\n",
7643 parent->
index, __func__,
7644 (
unsigned long long)
class);
7650 ret = tcam_flush(np, idx);
7656 np->
clas.tcam_valid_entries--;
7665 struct niu *np = netdev_priv(dev);
7670 ret = niu_set_hash_opts(np, cmd);
7673 ret = niu_add_ethtool_tcam_entry(np, cmd);
7676 ret = niu_del_ethtool_tcam_entry(np, cmd->
fs.location);
7686 static const struct {
7688 } niu_xmac_stat_keys[] = {
7691 {
"tx_fifo_errors" },
7692 {
"tx_overflow_errors" },
7693 {
"tx_max_pkt_size_errors" },
7694 {
"tx_underflow_errors" },
7695 {
"rx_local_faults" },
7696 {
"rx_remote_faults" },
7697 {
"rx_link_faults" },
7698 {
"rx_align_errors" },
7710 {
"rx_code_violations" },
7711 {
"rx_len_errors" },
7712 {
"rx_crc_errors" },
7713 {
"rx_underflows" },
7715 {
"pause_off_state" },
7716 {
"pause_on_state" },
7717 {
"pause_received" },
7720 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
7722 static const struct {
7724 } niu_bmac_stat_keys[] = {
7725 {
"tx_underflow_errors" },
7726 {
"tx_max_pkt_size_errors" },
7731 {
"rx_align_errors" },
7732 {
"rx_crc_errors" },
7733 {
"rx_len_errors" },
7734 {
"pause_off_state" },
7735 {
"pause_on_state" },
7736 {
"pause_received" },
7739 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
7741 static const struct {
7743 } niu_rxchan_stat_keys[] = {
7751 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
7753 static const struct {
7755 } niu_txchan_stat_keys[] = {
7762 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
7764 static void niu_get_strings(
struct net_device *dev,
u32 stringset,
u8 *data)
7766 struct niu *np = netdev_priv(dev);
7773 memcpy(data, niu_xmac_stat_keys,
7774 sizeof(niu_xmac_stat_keys));
7775 data +=
sizeof(niu_xmac_stat_keys);
7777 memcpy(data, niu_bmac_stat_keys,
7778 sizeof(niu_bmac_stat_keys));
7779 data +=
sizeof(niu_bmac_stat_keys);
7782 memcpy(data, niu_rxchan_stat_keys,
7783 sizeof(niu_rxchan_stat_keys));
7784 data +=
sizeof(niu_rxchan_stat_keys);
7787 memcpy(data, niu_txchan_stat_keys,
7788 sizeof(niu_txchan_stat_keys));
7789 data +=
sizeof(niu_txchan_stat_keys);
7793 static int niu_get_sset_count(
struct net_device *dev,
int stringset)
7795 struct niu *np = netdev_priv(dev);
7807 static void niu_get_ethtool_stats(
struct net_device *dev,
7810 struct niu *np = netdev_priv(dev);
7813 niu_sync_mac_stats(np);
7826 niu_sync_rx_discard_stats(np, rp, 0);
7846 static u64 niu_led_state_save(
struct niu *np)
7854 static void niu_led_state_restore(
struct niu *np,
u64 val)
7862 static void niu_force_led(
struct niu *np,
int on)
7882 static int niu_set_phys_id(
struct net_device *dev,
7886 struct niu *np = netdev_priv(dev);
7888 if (!netif_running(dev))
7897 niu_force_led(np, 1);
7901 niu_force_led(np, 0);
7911 static const struct ethtool_ops niu_ethtool_ops = {
7912 .get_drvinfo = niu_get_drvinfo,
7914 .get_msglevel = niu_get_msglevel,
7915 .set_msglevel = niu_set_msglevel,
7916 .nway_reset = niu_nway_reset,
7917 .get_eeprom_len = niu_get_eeprom_len,
7918 .get_eeprom = niu_get_eeprom,
7919 .get_settings = niu_get_settings,
7920 .set_settings = niu_set_settings,
7921 .get_strings = niu_get_strings,
7922 .get_sset_count = niu_get_sset_count,
7923 .get_ethtool_stats = niu_get_ethtool_stats,
7924 .set_phys_id = niu_set_phys_id,
7925 .get_rxnfc = niu_get_nfc,
7926 .set_rxnfc = niu_set_nfc,
7929 static int niu_ldg_assign_ldn(
struct niu *np,
struct niu_parent *parent,
7934 if (ldn < 0 || ldn > LDN_MAX)
7946 dev_err(np->
device,
"Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7957 static int niu_set_ldg_timer_res(
struct niu *np,
int res)
7968 static int niu_set_ldg_sid(
struct niu *np,
int ldg,
int func,
int vector)
7970 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
7971 (func < 0 || func > 3) ||
7972 (vector < 0 || vector > 0x1f))
8000 (
unsigned long long) frame);
8010 if (frame & ESPC_PIO_STAT_READ_END)
8013 if (!(frame & ESPC_PIO_STAT_READ_END)) {
8015 (
unsigned long long) frame);
8025 int err = niu_pci_eeprom_read(np, off);
8031 err = niu_pci_eeprom_read(np, off + 1);
8034 val |= (err & 0xff);
8039 static int __devinit niu_pci_eeprom_read16_swp(
struct niu *np,
u32 off)
8041 int err = niu_pci_eeprom_read(np, off);
8048 err = niu_pci_eeprom_read(np, off + 1);
8052 val |= (err & 0xff) << 8;
8057 static int __devinit niu_pci_vpd_get_propname(
struct niu *np,
8064 for (i = 0; i < namebuf_len; i++) {
8065 int err = niu_pci_eeprom_read(np, off + i);
8072 if (i >= namebuf_len)
8078 static void __devinit niu_vpd_parse_version(
struct niu *np)
8085 for (i = 0; i < len - 5; i++) {
8086 if (!
strncmp(s + i,
"FCode ", 6))
8096 "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8105 static int __devinit niu_pci_vpd_scan_props(
struct niu *np,
8108 unsigned int found_mask = 0;
8109 #define FOUND_MASK_MODEL 0x00000001
8110 #define FOUND_MASK_BMODEL 0x00000002
8111 #define FOUND_MASK_VERS 0x00000004
8112 #define FOUND_MASK_MAC 0x00000008
8113 #define FOUND_MASK_NMAC 0x00000010
8114 #define FOUND_MASK_PHY 0x00000020
8115 #define FOUND_MASK_ALL 0x0000003f
8118 "VPD_SCAN: start[%x] end[%x]\n", start, end);
8119 while (start < end) {
8120 int len,
err, prop_len;
8125 if (found_mask == FOUND_MASK_ALL) {
8126 niu_vpd_parse_version(np);
8130 err = niu_pci_eeprom_read(np, start + 2);
8136 prop_len = niu_pci_eeprom_read(np, start + 4);
8137 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
8143 if (!
strcmp(namebuf,
"model")) {
8144 prop_buf = np->
vpd.model;
8147 }
else if (!
strcmp(namebuf,
"board-model")) {
8148 prop_buf = np->
vpd.board_model;
8151 }
else if (!
strcmp(namebuf,
"version")) {
8152 prop_buf = np->
vpd.version;
8155 }
else if (!
strcmp(namebuf,
"local-mac-address")) {
8156 prop_buf = np->
vpd.local_mac;
8159 }
else if (!
strcmp(namebuf,
"num-mac-addresses")) {
8160 prop_buf = &np->
vpd.mac_num;
8163 }
else if (!
strcmp(namebuf,
"phy-type")) {
8164 prop_buf = np->
vpd.phy_type;
8169 if (max_len && prop_len > max_len) {
8170 dev_err(np->
device,
"Property '%s' length (%d) is too long\n", namebuf, prop_len);
8175 u32 off = start + 5 +
err;
8179 "VPD_SCAN: Reading in property [%s] len[%d]\n",
8181 for (i = 0; i < prop_len; i++)
8182 *prop_buf++ = niu_pci_eeprom_read(np, off + i);
8197 err = niu_pci_eeprom_read16_swp(np, start + 1);
8207 err = niu_pci_eeprom_read(np, here);
8211 err = niu_pci_eeprom_read16_swp(np, here + 1);
8215 here = start + offset + 3;
8216 end = start + offset +
err;
8220 err = niu_pci_vpd_scan_props(np, here, end);
8221 if (err < 0 || err == 1)
8232 while (start < end) {
8236 err = niu_pci_eeprom_read16(np, start + 0);
8241 err = niu_pci_eeprom_read16(np, start + 23);
8247 err = niu_pci_eeprom_read16(np, start + 0);
8250 err = niu_pci_eeprom_read16(np, start + 2);
8255 err = niu_pci_eeprom_read(np, start + 20);
8259 err = niu_pci_eeprom_read(np, ret + 2);
8263 start = ret + (err * 512);
8267 err = niu_pci_eeprom_read16_swp(np, start + 8);
8272 err = niu_pci_eeprom_read(np, ret + 0);
8282 static int __devinit niu_phy_type_prop_decode(
struct niu *np,
8283 const char *phy_prop)
8285 if (!
strcmp(phy_prop,
"mif")) {
8290 }
else if (!
strcmp(phy_prop,
"xgf")) {
8295 }
else if (!
strcmp(phy_prop,
"pcs")) {
8300 }
else if (!
strcmp(phy_prop,
"xgc")) {
8305 }
else if (!
strcmp(phy_prop,
"xgsd") || !
strcmp(phy_prop,
"gsd")) {
8317 static int niu_pci_vpd_get_nports(
struct niu *np)
8337 static void __devinit niu_pci_vpd_validate(
struct niu *np)
8343 if (!is_valid_ether_addr(&vpd->
local_mac[0])) {
8344 dev_err(np->
device,
"VPD MAC invalid, falling back to SPROM\n");
8365 }
else if (niu_phy_type_prop_decode(np, np->
vpd.phy_type)) {
8383 static int __devinit niu_pci_probe_sprom(
struct niu *np)
8397 "SPROM: Image size %llu\n", (
unsigned long long)val);
8400 for (i = 0; i < len; i++) {
8402 sum += (val >> 0) & 0xff;
8403 sum += (val >> 8) & 0xff;
8404 sum += (val >> 16) & 0xff;
8405 sum += (val >> 24) & 0xff;
8408 "SPROM: Checksum %x\n", (
int)(sum & 0xff));
8409 if ((sum & 0xff) != 0xab) {
8410 dev_err(np->
device,
"Bad SPROM checksum (%x, should be 0xab)\n", (
int)(sum & 0xff));
8438 "SPROM: PHY type %x\n", val8);
8476 "SPROM: MAC_ADDR0[%08llx]\n", (
unsigned long long)val);
8484 "SPROM: MAC_ADDR1[%08llx]\n", (
unsigned long long)val);
8488 if (!is_valid_ether_addr(&dev->
perm_addr[0])) {
8503 "SPROM: MOD_STR_LEN[%llu]\n", (
unsigned long long)val);
8507 for (i = 0; i <
val; i += 4) {
8510 np->
vpd.model[i + 3] = (tmp >> 0) & 0xff;
8511 np->
vpd.model[i + 2] = (tmp >> 8) & 0xff;
8512 np->
vpd.model[i + 1] = (tmp >> 16) & 0xff;
8513 np->
vpd.model[i + 0] = (tmp >> 24) & 0xff;
8515 np->
vpd.model[
val] =
'\0';
8519 "SPROM: BD_MOD_STR_LEN[%llu]\n", (
unsigned long long)val);
8523 for (i = 0; i <
val; i += 4) {
8526 np->
vpd.board_model[i + 3] = (tmp >> 0) & 0xff;
8527 np->
vpd.board_model[i + 2] = (tmp >> 8) & 0xff;
8528 np->
vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
8529 np->
vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
8531 np->
vpd.board_model[
val] =
'\0';
8536 "SPROM: NUM_PORTS_MACS[%d]\n", np->
vpd.mac_num);
8541 static int __devinit niu_get_and_validate_port(
struct niu *np)
8552 parent->
num_ports = niu_pci_vpd_get_nports(np);
8577 int dev_id_1,
int dev_id_2,
u8 phy_port,
8580 u32 id = (dev_id_1 << 16) | dev_id_2;
8583 if (dev_id_1 < 0 || dev_id_2 < 0)
8597 pr_info(
"niu%d: Found PHY %08x type %s at phy_port %u\n",
8604 pr_err(
"Too many PHY ports\n");
8636 for (port = 8; port < 32; port++) {
8637 if (port_has_10g(p, port)) {
8665 pr_info(
"niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8673 int num_10g,
int num_1g)
8676 int rx_chans_per_10g, rx_chans_per_1g;
8677 int tx_chans_per_10g, tx_chans_per_1g;
8678 int i, tot_rx, tot_tx;
8680 if (!num_10g || !num_1g) {
8681 rx_chans_per_10g = rx_chans_per_1g =
8683 tx_chans_per_10g = tx_chans_per_1g =
8688 (rx_chans_per_1g * num_1g)) /
8693 (tx_chans_per_1g * num_1g)) /
8697 tot_rx = tot_tx = 0;
8699 int type = phy_decode(parent->
port_phy, i);
8708 pr_info(
"niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8717 pr_err(
"niu%d: Too many RX channels (%d), resetting to one per port\n",
8718 parent->
index, tot_rx);
8723 pr_err(
"niu%d: Too many TX channels (%d), resetting to one per port\n",
8724 parent->
index, tot_tx);
8729 pr_warning(
"niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8730 parent->
index, tot_rx, tot_tx);
8735 int num_10g,
int num_1g)
8738 int rdc_group, rdc_groups_per_port;
8739 int rdc_channel_base;
8744 rdc_channel_base = 0;
8749 int this_channel_offset;
8753 this_channel_offset = 0;
8755 struct rdc_table *rt = &tp->
tables[grp];
8758 pr_info(
"niu%d: Port %d RDC tbl(%d) [ ",
8762 rdc_channel_base + this_channel_offset;
8766 if (++this_channel_offset == num_channels)
8767 this_channel_offset = 0;
8775 rdc_group += rdc_groups_per_port;
8779 static int __devinit fill_phy_probe_info(
struct niu *np,
8783 unsigned long flags;
8786 memset(info, 0,
sizeof(*info));
8791 for (port = 8; port < 32; port++) {
8792 int dev_id_1, dev_id_2;
8794 dev_id_1 = mdio_read(np, port,
8796 dev_id_2 = mdio_read(np, port,
8798 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8802 dev_id_1 = mdio_read(np, port,
8804 dev_id_2 = mdio_read(np, port,
8806 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8812 err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8825 int lowest_10g, lowest_1g;
8826 int num_10g, num_1g;
8830 num_10g = num_1g = 0;
8859 err = fill_phy_probe_info(np, parent, info);
8863 num_10g = count_10g_ports(info, &lowest_10g);
8864 num_1g = count_1g_ports(info, &lowest_1g);
8866 switch ((num_10g << 4) | num_1g) {
8868 if (lowest_1g == 10)
8870 else if (lowest_1g == 26)
8873 goto unknown_vg_1g_port;
8893 if (lowest_1g == 10)
8895 else if (lowest_1g == 26)
8898 goto unknown_vg_1g_port;
8902 if ((lowest_10g & 0x7) == 0)
8915 if (lowest_1g == 10)
8917 else if (lowest_1g == 26)
8920 goto unknown_vg_1g_port;
8929 pr_err(
"Unsupported port config 10G[%d] 1G[%d]\n",
8938 niu_n2_divide_channels(parent);
8940 niu_divide_channels(parent, num_10g, num_1g);
8942 niu_divide_rdc_groups(parent, num_10g, num_1g);
8947 pr_err(
"Cannot identify platform type, 1gport=%d\n", lowest_1g);
8957 err = walk_phys(np, parent);
8961 niu_set_ldg_timer_res(np, 2);
8962 for (i = 0; i <=
LDN_MAX; i++)
8963 niu_ldn_irq_enable(np, i, 0);
8972 static int __devinit niu_classifier_swstate_init(
struct niu *np)
8981 return fflp_early_init(np);
8984 static void __devinit niu_link_config_init(
struct niu *np)
9009 static int __devinit niu_init_mac_ipp_pcs_base(
struct niu *np)
9041 dev_err(np->
device,
"Port %u is invalid, cannot compute MAC block offset\n", np->
port);
9048 static void __devinit niu_try_msix(
struct niu *np,
u8 *ldg_num_map)
9058 ldg_num_map[i] = first_ldg + i;
9062 (np->
port == 0 ? 3 : 1));
9067 msi_vec[
i].vector = 0;
9068 msi_vec[
i].entry =
i;
9083 np->
ldg[i].irq = msi_vec[i].vector;
9087 static int __devinit niu_n2_irq_init(
struct niu *np,
u8 *ldg_num_map)
9089 #ifdef CONFIG_SPARC64
9091 const u32 *int_prop;
9098 for (i = 0; i < op->
archdata.num_irqs; i++) {
9099 ldg_num_map[
i] = int_prop[
i];
9116 int i,
err, ldg_rotor;
9120 np->
ldg[0].irq = np->
dev->irq;
9122 err = niu_n2_irq_init(np, ldg_num_map);
9126 niu_try_msix(np, ldg_num_map);
9129 for (i = 0; i < np->
num_ldg; i++) {
9143 err = niu_set_ldg_sid(np, lp->
ldg_num, port, i);
9162 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
9172 err = niu_ldg_assign_ldn(np, parent,
9173 ldg_num_map[ldg_rotor],
9182 err = niu_ldg_assign_ldn(np, parent,
9183 ldg_num_map[ldg_rotor],
9195 for (i = 0; i <
port; i++)
9199 for (i = first_chan; i < (first_chan +
num_chan); i++) {
9200 err = niu_ldg_assign_ldn(np, parent,
9201 ldg_num_map[ldg_rotor],
9211 for (i = 0; i <
port; i++)
9214 for (i = first_chan; i < (first_chan +
num_chan); i++) {
9215 err = niu_ldg_assign_ldn(np, parent,
9216 ldg_num_map[ldg_rotor],
9236 #ifdef CONFIG_SPARC64
9239 const char *phy_type;
9245 dp = np->
op->dev.of_node;
9247 dp = pci_device_to_OF_node(np->
pdev);
9251 netdev_err(dev,
"%s: OF node lacks phy-type property\n",
9256 if (!
strcmp(phy_type,
"none"))
9261 if (niu_phy_type_prop_decode(np, np->
vpd.phy_type)) {
9262 netdev_err(dev,
"%s: Illegal phy string [%s]\n",
9269 netdev_err(dev,
"%s: OF node lacks local-mac-address property\n",
9274 netdev_err(dev,
"%s: OF MAC address prop len (%d) is wrong\n",
9278 if (!is_valid_ether_addr(&dev->
perm_addr[0])) {
9279 netdev_err(dev,
"%s: OF MAC address is invalid\n",
9303 static int __devinit niu_get_invariants(
struct niu *np)
9305 int err, have_props;
9308 err = niu_get_of_props(np);
9314 err = niu_init_mac_ipp_pcs_base(np);
9319 err = niu_get_and_validate_port(np);
9328 offset = niu_pci_vpd_offset(np);
9330 "%s() VPD offset [%08x]\n", __func__, offset);
9332 niu_pci_vpd_fetch(np, offset);
9336 niu_pci_vpd_validate(np);
9337 err = niu_get_and_validate_port(np);
9343 err = niu_get_and_validate_port(np);
9346 err = niu_pci_probe_sprom(np);
9352 err = niu_probe_ports(np);
9358 niu_classifier_swstate_init(np);
9359 niu_link_config_init(np);
9361 err = niu_determine_phy_disposition(np);
9363 err = niu_init_link(np);
9370 static int niu_parent_index;
9378 char *orig_buf =
buf;
9386 const char *type_str;
9389 type = phy_decode(port_phy, i);
9395 (i == 0) ?
"%s" :
" %s",
9399 return buf - orig_buf;
9407 const char *type_str;
9423 type_str =
"unknown";
9427 return sprintf(buf,
"%s\n", type_str);
9436 char *orig_buf =
buf;
9444 (i == 0) ?
"%d" :
" %d",
9449 return buf - orig_buf;
9455 return __show_chan_per_port(dev, attr, buf, 1);
9461 return __show_chan_per_port(dev, attr, buf, 1);
9490 plat_dev = platform_device_register_simple(
"niu-board", niu_parent_index,
9492 if (IS_ERR(plat_dev))
9495 for (i = 0;
attr_name(niu_parent_attributes[i]); i++) {
9497 &niu_parent_attributes[i]);
9499 goto fail_unregister;
9504 goto fail_unregister;
9506 p->
index = niu_parent_index++;
9508 plat_dev->
dev.platform_data =
p;
9513 INIT_LIST_HEAD(&p->
list);
9515 list_add(&p->
list, &niu_parent_list);
9537 for (i = 0; i < LDN_MAX + 1; i++)
9552 int port = np->
port;
9557 if (!
memcmp(
id, &tmp->
id,
sizeof(*
id))) {
9563 p = niu_new_parent(np,
id, ptype);
9569 sprintf(port_name,
"port%d", port);
9583 static void niu_put_parent(
struct niu *np)
9592 "%s() port[%u]\n", __func__, port);
9594 sprintf(port_name,
"port%d", port);
9611 static void *niu_pci_alloc_coherent(
struct device *dev,
size_t size,
9623 static void niu_pci_free_coherent(
struct device *dev,
size_t size,
9629 static u64 niu_pci_map_page(
struct device *dev,
struct page *page,
9630 unsigned long offset,
size_t size,
9633 return dma_map_page(dev, page, offset, size, direction);
9656 static const struct niu_ops niu_pci_ops = {
9657 .alloc_coherent = niu_pci_alloc_coherent,
9658 .free_coherent = niu_pci_free_coherent,
9659 .map_page = niu_pci_map_page,
9660 .unmap_page = niu_pci_unmap_page,
9661 .map_single = niu_pci_map_single,
9662 .unmap_single = niu_pci_unmap_single,
9665 static void __devinit niu_driver_version(
void)
9667 static int niu_version_printed;
9669 if (niu_version_printed++ == 0)
9687 np = netdev_priv(dev);
9705 .ndo_open = niu_open,
9706 .ndo_stop = niu_close,
9707 .ndo_start_xmit = niu_start_xmit,
9708 .ndo_get_stats64 = niu_get_stats,
9709 .ndo_set_rx_mode = niu_set_rx_mode,
9711 .ndo_set_mac_address = niu_set_mac_addr,
9712 .ndo_do_ioctl = niu_ioctl,
9713 .ndo_tx_timeout = niu_tx_timeout,
9714 .ndo_change_mtu = niu_change_mtu,
9724 static void __devinit niu_device_announce(
struct niu *np)
9731 pr_info(
"%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9740 pr_info(
"%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9768 niu_driver_version();
9772 dev_err(&pdev->
dev,
"Cannot enable PCI device, aborting\n");
9778 dev_err(&pdev->
dev,
"Cannot find proper PCI device base addresses, aborting\n");
9780 goto err_out_disable_pdev;
9785 dev_err(&pdev->
dev,
"Cannot obtain PCI resources, aborting\n");
9786 goto err_out_disable_pdev;
9789 if (!pci_is_pcie(pdev)) {
9790 dev_err(&pdev->
dev,
"Cannot find PCI Express capability, aborting\n");
9792 goto err_out_free_res;
9795 dev = niu_alloc_and_init(&pdev->
dev, pdev,
NULL,
9799 goto err_out_free_res;
9801 np = netdev_priv(dev);
9803 memset(&parent_id, 0,
sizeof(parent_id));
9805 parent_id.pci.bus = pdev->
bus->number;
9808 np->
parent = niu_get_parent(np, &parent_id,
9812 goto err_out_free_dev;
9822 err = pci_set_dma_mask(pdev, dma_mask);
9825 err = pci_set_consistent_dma_mask(pdev, dma_mask);
9827 dev_err(&pdev->
dev,
"Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9828 goto err_out_release_parent;
9834 dev_err(&pdev->
dev,
"No usable DMA configuration, aborting\n");
9835 goto err_out_release_parent;
9839 niu_set_basic_features(dev);
9845 dev_err(&pdev->
dev,
"Cannot map device registers, aborting\n");
9847 goto err_out_release_parent;
9855 niu_assign_netdev_ops(dev);
9857 err = niu_get_invariants(np);
9860 dev_err(&pdev->
dev,
"Problem fetching invariants of chip, aborting\n");
9861 goto err_out_iounmap;
9866 dev_err(&pdev->
dev,
"Cannot register net device, aborting\n");
9867 goto err_out_iounmap;
9870 pci_set_drvdata(pdev, dev);
9872 niu_device_announce(np);
9882 err_out_release_parent:
9891 err_out_disable_pdev:
9893 pci_set_drvdata(pdev,
NULL);
9900 struct net_device *dev = pci_get_drvdata(pdev);
9903 struct niu *np = netdev_priv(dev);
9918 pci_set_drvdata(pdev,
NULL);
9924 struct net_device *dev = pci_get_drvdata(pdev);
9925 struct niu *np = netdev_priv(dev);
9926 unsigned long flags;
9928 if (!netif_running(dev))
9937 niu_enable_interrupts(np, 0);
9938 spin_unlock_irqrestore(&np->
lock, flags);
9944 spin_unlock_irqrestore(&np->
lock, flags);
9951 static int niu_resume(
struct pci_dev *pdev)
9953 struct net_device *dev = pci_get_drvdata(pdev);
9954 struct niu *np = netdev_priv(dev);
9955 unsigned long flags;
9958 if (!netif_running(dev))
9967 err = niu_init_hw(np);
9971 niu_netif_start(np);
9974 spin_unlock_irqrestore(&np->
lock, flags);
9981 .id_table = niu_pci_tbl,
9982 .probe = niu_pci_init_one,
9984 .suspend = niu_suspend,
9985 .resume = niu_resume,
9988 #ifdef CONFIG_SPARC64
9989 static void *niu_phys_alloc_coherent(
struct device *dev,
size_t size,
9998 *dma_addr =
__pa(page);
10000 return (
void *)
page;
10003 static void niu_phys_free_coherent(
struct device *dev,
size_t size,
10008 free_pages((
unsigned long) cpu_addr, order);
10011 static u64 niu_phys_map_page(
struct device *dev,
struct page *page,
10012 unsigned long offset,
size_t size,
10024 static u64 niu_phys_map_single(
struct device *dev,
void *cpu_addr,
10028 return __pa(cpu_addr);
10038 static const struct niu_ops niu_phys_ops = {
10040 .free_coherent = niu_phys_free_coherent,
10041 .map_page = niu_phys_map_page,
10042 .unmap_page = niu_phys_unmap_page,
10043 .map_single = niu_phys_map_single,
10044 .unmap_single = niu_phys_unmap_single,
10055 niu_driver_version();
10059 dev_err(&op->
dev,
"%s: No 'reg' property, aborting\n",
10060 op->
dev.of_node->full_name);
10064 dev = niu_alloc_and_init(&op->
dev,
NULL, op,
10065 &niu_phys_ops, reg[0] & 0x1);
10070 np = netdev_priv(dev);
10072 memset(&parent_id, 0,
sizeof(parent_id));
10075 np->
parent = niu_get_parent(np, &parent_id,
10079 goto err_out_free_dev;
10082 niu_set_basic_features(dev);
10088 dev_err(&op->
dev,
"Cannot map device registers, aborting\n");
10090 goto err_out_release_parent;
10097 dev_err(&op->
dev,
"Cannot map device vir registers 1, aborting\n");
10099 goto err_out_iounmap;
10106 dev_err(&op->
dev,
"Cannot map device vir registers 2, aborting\n");
10108 goto err_out_iounmap;
10111 niu_assign_netdev_ops(dev);
10113 err = niu_get_invariants(np);
10116 dev_err(&op->
dev,
"Problem fetching invariants of chip, aborting\n");
10117 goto err_out_iounmap;
10122 dev_err(&op->
dev,
"Cannot register net device, aborting\n");
10123 goto err_out_iounmap;
10128 niu_device_announce(np);
10151 err_out_release_parent:
10152 niu_put_parent(np);
10166 struct niu *np = netdev_priv(dev);
10190 niu_put_parent(np);
10201 .compatible =
"SUNW,niusl",
10211 .of_match_table = niu_match,
10213 .probe = niu_of_probe,
10219 static int __init niu_init(
void)
10227 #ifdef CONFIG_SPARC64
10232 err = pci_register_driver(&niu_pci_driver);
10233 #ifdef CONFIG_SPARC64
10242 static void __exit niu_exit(
void)
10245 #ifdef CONFIG_SPARC64