12 #include <linux/capability.h>
15 #include <linux/netdevice.h>
17 #include <linux/if_vlan.h>
19 #include <linux/module.h>
22 #include <linux/slab.h>
30 #define DRV_NAME "octeon_mgmt"
31 #define DRV_VERSION "2.0"
32 #define DRV_DESCRIPTION \
33 "Cavium Networks Octeon MII (management) port Network Driver"
35 #define OCTEON_MGMT_NAPI_WEIGHT 16
40 #define OCTEON_MGMT_RX_RING_SIZE 512
41 #define OCTEON_MGMT_TX_RING_SIZE 128
44 #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
56 #define RING_ENTRY_CODE_DONE 0xf
57 #define RING_ENTRY_CODE_MORE 0x10
63 #define MIX_ORING1 0x0
64 #define MIX_ORING2 0x8
65 #define MIX_IRING1 0x10
66 #define MIX_IRING2 0x18
68 #define MIX_IRHWM 0x28
69 #define MIX_IRCNT 0x30
70 #define MIX_ORHWM 0x38
71 #define MIX_ORCNT 0x40
73 #define MIX_INTENA 0x50
74 #define MIX_REMCNT 0x58
77 #define AGL_GMX_PRT_CFG 0x10
78 #define AGL_GMX_RX_FRM_CTL 0x18
79 #define AGL_GMX_RX_FRM_MAX 0x30
80 #define AGL_GMX_RX_JABBER 0x38
81 #define AGL_GMX_RX_STATS_CTL 0x50
83 #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
84 #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
85 #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
87 #define AGL_GMX_RX_ADR_CTL 0x100
88 #define AGL_GMX_RX_ADR_CAM_EN 0x108
89 #define AGL_GMX_RX_ADR_CAM0 0x180
90 #define AGL_GMX_RX_ADR_CAM1 0x188
91 #define AGL_GMX_RX_ADR_CAM2 0x190
92 #define AGL_GMX_RX_ADR_CAM3 0x198
93 #define AGL_GMX_RX_ADR_CAM4 0x1a0
94 #define AGL_GMX_RX_ADR_CAM5 0x1a8
96 #define AGL_GMX_TX_CLK 0x208
97 #define AGL_GMX_TX_STATS_CTL 0x268
98 #define AGL_GMX_TX_CTL 0x270
99 #define AGL_GMX_TX_STAT0 0x280
100 #define AGL_GMX_TX_STAT1 0x288
101 #define AGL_GMX_TX_STAT2 0x290
102 #define AGL_GMX_TX_STAT3 0x298
103 #define AGL_GMX_TX_STAT4 0x2a0
104 #define AGL_GMX_TX_STAT5 0x2a8
105 #define AGL_GMX_TX_STAT6 0x2b0
106 #define AGL_GMX_TX_STAT7 0x2b8
107 #define AGL_GMX_TX_STAT8 0x2c0
108 #define AGL_GMX_TX_STAT9 0x2c8
158 mix_intena.
s.ithena = enable ? 1 : 0;
160 spin_unlock_irqrestore(&p->
lock, flags);
170 mix_intena.s.othena = enable ? 1 : 0;
172 spin_unlock_irqrestore(&p->
lock, flags);
175 static void octeon_mgmt_enable_rx_irq(
struct octeon_mgmt *
p)
177 octeon_mgmt_set_rx_irq(p, 1);
180 static void octeon_mgmt_disable_rx_irq(
struct octeon_mgmt *
p)
182 octeon_mgmt_set_rx_irq(p, 0);
185 static void octeon_mgmt_enable_tx_irq(
struct octeon_mgmt *
p)
187 octeon_mgmt_set_tx_irq(p, 1);
190 static void octeon_mgmt_disable_tx_irq(
struct octeon_mgmt *
p)
192 octeon_mgmt_set_tx_irq(p, 0);
195 static unsigned int ring_max_fill(
unsigned int ring_size)
197 return ring_size - 8;
200 static unsigned int ring_size_to_bytes(
unsigned int ring_size)
205 static void octeon_mgmt_rx_fill_ring(
struct net_device *netdev)
217 skb = netdev_alloc_skb(netdev, size);
220 skb_reserve(skb, NET_IP_ALIGN);
221 __skb_queue_tail(&p->
rx_list, skb);
264 static void octeon_mgmt_clean_tx_buffers(
struct octeon_mgmt *p)
273 while (mix_orcnt.s.orcnt) {
278 if (mix_orcnt.s.orcnt == 0) {
279 spin_unlock_irqrestore(&p->
tx_list.lock, flags);
290 skb = __skb_dequeue(&p->
tx_list);
293 mix_orcnt.s.orcnt = 1;
299 spin_unlock_irqrestore(&p->
tx_list.lock, flags);
312 ts.syststamp = ptp_to_ktime(ns);
313 ts.hwtstamp = ns_to_ktime(ns);
323 if (cleaned && netif_queue_stopped(p->
netdev))
324 netif_wake_queue(p->
netdev);
327 static void octeon_mgmt_clean_tx_tasklet(
unsigned long arg)
330 octeon_mgmt_clean_tx_buffers(p);
331 octeon_mgmt_enable_tx_irq(p);
334 static void octeon_mgmt_update_rx_stats(
struct net_device *netdev)
348 netdev->
stats.rx_dropped += drop;
349 spin_unlock_irqrestore(&p->
lock, flags);
353 static void octeon_mgmt_update_tx_stats(
struct net_device *netdev)
365 if (
s0.s.xsdef ||
s0.s.xscol ||
s1.s.scol ||
s1.s.mcol) {
368 netdev->
stats.tx_errors +=
s0.s.xsdef +
s0.s.xscol;
369 netdev->
stats.collisions +=
s1.s.scol +
s1.s.mcol;
370 spin_unlock_irqrestore(&p->
lock, flags);
390 *pskb = __skb_dequeue(&p->
rx_list);
400 static int octeon_mgmt_receive_one(
struct octeon_mgmt *p)
412 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
422 ts = skb_hwtstamps(skb);
423 ts->hwtstamp = ns_to_ktime(ns);
424 ts->syststamp = ptp_to_ktime(ns);
428 netdev->
stats.rx_packets++;
442 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
473 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
476 netdev->
stats.rx_errors++;
481 mix_ircnt.s.ircnt = 1;
488 unsigned int work_done = 0;
493 while (work_done < budget && mix_ircnt.s.ircnt) {
495 rc = octeon_mgmt_receive_one(p);
503 octeon_mgmt_rx_fill_ring(p->
netdev);
512 unsigned int work_done = 0;
514 work_done = octeon_mgmt_receive_packets(p, budget);
516 if (work_done < budget) {
519 octeon_mgmt_enable_rx_irq(p);
521 octeon_mgmt_update_rx_stats(netdev);
527 static void octeon_mgmt_reset_hw(
struct octeon_mgmt *p)
534 cvmx_write_csr(p->
mix +
MIX_CTL, mix_ctl.u64);
536 mix_ctl.u64 = cvmx_read_csr(p->
mix +
MIX_CTL);
537 }
while (mix_ctl.s.busy);
539 cvmx_write_csr(p->
mix +
MIX_CTL, mix_ctl.u64);
546 (
unsigned long long)mix_bist.u64);
549 if (agl_gmx_bist.u64)
551 (
unsigned long long)agl_gmx_bist.u64);
565 for (i = 0; i < 6; i++)
571 static void octeon_mgmt_set_rx_filtering(
struct net_device *netdev)
577 unsigned int prev_packet_enable;
578 unsigned int cam_mode = 1;
579 unsigned int multicast_mode = 1;
582 int available_cam_entries;
584 memset(&cam_state, 0,
sizeof(cam_state));
588 available_cam_entries = 8;
593 available_cam_entries = 7 - netdev->
uc.count;
606 octeon_mgmt_cam_state_add(&cam_state, netdev->
dev_addr);
608 octeon_mgmt_cam_state_add(&cam_state, ha->
addr);
610 if (multicast_mode == 0) {
612 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
619 prev_packet_enable = agl_gmx_prtx.
s.en;
620 agl_gmx_prtx.
s.en = 0;
621 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.
u64);
624 adr_ctl.
s.cam_mode = cam_mode;
625 adr_ctl.
s.mcst = multicast_mode;
639 agl_gmx_prtx.
s.en = prev_packet_enable;
640 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
642 spin_unlock_irqrestore(&p->lock, flags);
645 static
int octeon_mgmt_set_mac_address(
struct net_device *netdev,
void *addr)
652 octeon_mgmt_set_rx_filtering(netdev);
657 static int octeon_mgmt_change_mtu(
struct net_device *netdev,
int new_mtu)
665 if (size_without_fcs < 64 || size_without_fcs > 16383) {
666 dev_warn(p->
dev,
"MTU must be between %d and %d.\n",
667 64 - OCTEON_MGMT_RX_HEADROOM,
668 16383 - OCTEON_MGMT_RX_HEADROOM);
672 netdev->
mtu = new_mtu;
676 (size_without_fcs + 7) & 0xfff8);
690 cvmx_write_csr(p->
mix +
MIX_ISR, mixx_isr.u64);
693 if (mixx_isr.s.irthresh) {
694 octeon_mgmt_disable_rx_irq(p);
695 napi_schedule(&p->
napi);
697 if (mixx_isr.s.orthresh) {
698 octeon_mgmt_disable_tx_irq(p);
705 static int octeon_mgmt_ioctl_hwtstamp(
struct net_device *netdev,
712 bool have_hw_timestamps =
false;
724 if (!
ptp.s.ext_clk_en) {
732 pr_info(
"PTP Clock: Using sclk reference at %lld Hz\n",
737 pr_info(
"PTP Clock: Using GPIO %d at %lld Hz\n",
747 have_hw_timestamps =
true;
750 if (!have_hw_timestamps)
761 switch (
config.rx_filter) {
765 rxx_frm_ctl.s.ptp_mode = 0;
786 rxx_frm_ctl.s.ptp_mode = 1;
800 static int octeon_mgmt_ioctl(
struct net_device *netdev,
801 struct ifreq *rq,
int cmd)
807 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
815 static void octeon_mgmt_disable_link(
struct octeon_mgmt *p)
820 prtx_cfg.
u64 = cvmx_read_csr(p->
agl + AGL_GMX_PRT_CFG);
822 prtx_cfg.s.tx_en = 0;
823 prtx_cfg.s.rx_en = 0;
824 cvmx_write_csr(p->
agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
828 for (i = 0; i < 10; i++) {
829 prtx_cfg.u64 = cvmx_read_csr(p->
agl + AGL_GMX_PRT_CFG);
830 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
838 static void octeon_mgmt_enable_link(
struct octeon_mgmt *p)
843 prtx_cfg.
u64 = cvmx_read_csr(p->
agl + AGL_GMX_PRT_CFG);
844 prtx_cfg.s.tx_en = 1;
845 prtx_cfg.s.rx_en = 1;
847 cvmx_write_csr(p->
agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
850 static void octeon_mgmt_update_link(
struct octeon_mgmt *p)
854 prtx_cfg.
u64 = cvmx_read_csr(p->
agl + AGL_GMX_PRT_CFG);
857 prtx_cfg.s.duplex = 1;
859 prtx_cfg.s.duplex = p->
phydev->duplex;
861 switch (p->
phydev->speed) {
863 prtx_cfg.s.speed = 0;
864 prtx_cfg.s.slottime = 0;
867 prtx_cfg.s.burst = 1;
868 prtx_cfg.s.speed_msb = 1;
872 prtx_cfg.s.speed = 0;
873 prtx_cfg.s.slottime = 0;
876 prtx_cfg.s.burst = 1;
877 prtx_cfg.s.speed_msb = 0;
883 prtx_cfg.s.speed = 1;
884 prtx_cfg.s.speed_msb = 0;
886 prtx_cfg.s.slottime = 1;
887 prtx_cfg.s.burst = p->
phydev->duplex;
896 cvmx_write_csr(p->
agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
899 prtx_cfg.u64 = cvmx_read_csr(p->
agl + AGL_GMX_PRT_CFG);
908 agl_clk.s.clk_cnt = 1;
909 if (prtx_ctl.s.mode == 0) {
910 if (p->
phydev->speed == 10)
911 agl_clk.s.clk_cnt = 50;
912 else if (p->
phydev->speed == 100)
913 agl_clk.s.clk_cnt = 5;
919 static void octeon_mgmt_adjust_link(
struct net_device *netdev)
923 int link_changed = 0;
938 octeon_mgmt_disable_link(p);
940 octeon_mgmt_update_link(p);
941 octeon_mgmt_enable_link(p);
948 spin_unlock_irqrestore(&p->
lock, flags);
950 if (link_changed != 0) {
951 if (link_changed > 0) {
962 static int octeon_mgmt_init_phy(
struct net_device *netdev)
973 octeon_mgmt_adjust_link, 0,
982 static int octeon_mgmt_open(
struct net_device *netdev)
1022 octeon_mgmt_reset_hw(p);
1024 mix_ctl.u64 = cvmx_read_csr(p->
mix +
MIX_CTL);
1027 if (mix_ctl.s.reset) {
1028 mix_ctl.s.reset = 0;
1029 cvmx_write_csr(p->
mix +
MIX_CTL, mix_ctl.u64);
1031 mix_ctl.u64 = cvmx_read_csr(p->
mix +
MIX_CTL);
1032 }
while (mix_ctl.s.reset);
1036 agl_gmx_inf_mode.u64 = 0;
1037 agl_gmx_inf_mode.s.en = 1;
1049 drv_ctl.s.byp_en1 = 1;
1050 drv_ctl.s.nctl1 = 6;
1051 drv_ctl.s.pctl1 = 6;
1053 drv_ctl.s.byp_en = 1;
1071 octeon_mgmt_set_mac_address(netdev, &
sa);
1073 octeon_mgmt_change_mtu(netdev, netdev->
mtu);
1079 mix_ctl.s.crc_strip = 1;
1081 mix_ctl.s.nbtarb = 0;
1083 mix_ctl.s.mrq_hwm = 1;
1084 #ifdef __LITTLE_ENDIAN
1085 mix_ctl.s.lendian = 1;
1087 cvmx_write_csr(p->
mix +
MIX_CTL, mix_ctl.u64);
1090 if (octeon_mgmt_init_phy(netdev)) {
1098 int rgmii_mode = (p->
phydev->supported &
1102 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1109 #define NS_PER_PHY_CLK 8
1113 agl_prtx_ctl.s.clkrst = 0;
1115 agl_prtx_ctl.s.dllrst = 0;
1116 agl_prtx_ctl.s.clktx_byp = 0;
1124 ndelay(256 * NS_PER_PHY_CLK);
1128 agl_prtx_ctl.s.enable = 1;
1135 agl_prtx_ctl.s.comp = 1;
1136 agl_prtx_ctl.s.drv_byp = 0;
1142 ndelay(1040 * NS_PER_PHY_CLK);
1150 octeon_mgmt_rx_fill_ring(netdev);
1173 mix_irhwm.s.irhwm = 0;
1178 mix_orhwm.s.orhwm = 0;
1183 mix_intena.s.ithena = 1;
1184 mix_intena.s.othena = 1;
1189 rxx_frm_ctl.u64 = 0;
1191 rxx_frm_ctl.s.pre_align = 1;
1195 rxx_frm_ctl.s.pad_len = 1;
1197 rxx_frm_ctl.s.vlan_len = 1;
1199 rxx_frm_ctl.s.pre_free = 1;
1201 rxx_frm_ctl.s.ctl_smac = 0;
1203 rxx_frm_ctl.s.ctl_mcst = 1;
1205 rxx_frm_ctl.s.ctl_bck = 1;
1207 rxx_frm_ctl.s.ctl_drp = 1;
1209 rxx_frm_ctl.s.pre_strp = 1;
1213 rxx_frm_ctl.s.pre_chk = 1;
1217 octeon_mgmt_disable_link(p);
1219 octeon_mgmt_update_link(p);
1220 octeon_mgmt_enable_link(p);
1232 netif_wake_queue(netdev);
1233 napi_enable(&p->
napi);
1237 octeon_mgmt_reset_hw(p);
1250 static int octeon_mgmt_stop(
struct net_device *netdev)
1254 napi_disable(&p->
napi);
1255 netif_stop_queue(netdev);
1263 octeon_mgmt_reset_hw(p);
1288 unsigned long flags;
1293 re.s.len = skb->
len;
1301 spin_unlock_irqrestore(&p->
tx_list.lock, flags);
1302 netif_stop_queue(netdev);
1308 spin_unlock_irqrestore(&p->
tx_list.lock, flags);
1314 __skb_queue_tail(&p->
tx_list, skb);
1321 spin_unlock_irqrestore(&p->
tx_list.lock, flags);
1327 netdev->
stats.tx_packets++;
1328 netdev->
stats.tx_bytes += skb->
len;
1336 octeon_mgmt_update_tx_stats(netdev);
1340 #ifdef CONFIG_NET_POLL_CONTROLLER
1341 static void octeon_mgmt_poll_controller(
struct net_device *netdev)
1345 octeon_mgmt_receive_packets(p, 16);
1346 octeon_mgmt_update_rx_stats(netdev);
1350 static void octeon_mgmt_get_drvinfo(
struct net_device *netdev,
1363 static int octeon_mgmt_get_settings(
struct net_device *netdev,
1374 static int octeon_mgmt_set_settings(
struct net_device *netdev,
1401 static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1402 .get_drvinfo = octeon_mgmt_get_drvinfo,
1403 .get_settings = octeon_mgmt_get_settings,
1404 .set_settings = octeon_mgmt_set_settings,
1405 .nway_reset = octeon_mgmt_nway_reset,
1410 .ndo_open = octeon_mgmt_open,
1411 .ndo_stop = octeon_mgmt_stop,
1412 .ndo_start_xmit = octeon_mgmt_xmit,
1413 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1414 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1415 .ndo_do_ioctl = octeon_mgmt_ioctl,
1416 .ndo_change_mtu = octeon_mgmt_change_mtu,
1417 #ifdef CONFIG_NET_POLL_CONTROLLER
1418 .ndo_poll_controller = octeon_mgmt_poll_controller,
1434 netdev = alloc_etherdev(
sizeof(
struct octeon_mgmt));
1441 p = netdev_priv(netdev);
1450 if (data && len ==
sizeof(*data)) {
1453 dev_err(&pdev->
dev,
"no 'cell-index' property\n");
1467 if (res_mix ==
NULL) {
1474 if (res_agl ==
NULL) {
1481 if (res_agl_prt_ctl ==
NULL) {
1488 p->
mix_size = resource_size(res_mix);
1490 p->
agl_size = resource_size(res_agl);
1497 dev_err(&pdev->
dev,
"request_mem_region (%s) failed\n",
1506 dev_err(&pdev->
dev,
"request_mem_region (%s) failed\n",
1514 dev_err(&pdev->
dev,
"request_mem_region (%s) failed\n",
1515 res_agl_prt_ctl->
name);
1525 skb_queue_head_init(&p->
tx_list);
1526 skb_queue_head_init(&p->
rx_list);
1528 octeon_mgmt_clean_tx_tasklet, (
unsigned long)p);
1537 if (mac && is_valid_ether_addr(mac)) {
1541 eth_hw_addr_random(netdev);
1547 pdev->
dev.dma_mask = &pdev->
dev.coherent_dma_mask;
1573 .compatible =
"cavium,octeon-5750-mix",
1581 .name =
"octeon_mgmt",
1583 .of_match_table = octeon_mgmt_match,
1585 .probe = octeon_mgmt_probe,
1591 static int __init octeon_mgmt_mod_init(
void)
1598 static void __exit octeon_mgmt_mod_exit(
void)