38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 #include <linux/tcp.h>
45 #include <linux/udp.h>
48 #include <linux/ethtool.h>
50 #include <linux/module.h>
51 #include <linux/kernel.h>
57 #include <linux/types.h>
59 #include <linux/slab.h>
62 static char mv643xx_eth_driver_name[] =
"mv643xx_eth";
63 static char mv643xx_eth_driver_version[] =
"1.4";
69 #define PHY_ADDR 0x0000
70 #define SMI_REG 0x0004
71 #define SMI_BUSY 0x10000000
72 #define SMI_READ_VALID 0x08000000
73 #define SMI_OPCODE_READ 0x04000000
74 #define SMI_OPCODE_WRITE 0x00000000
75 #define ERR_INT_CAUSE 0x0080
76 #define ERR_INT_SMI_DONE 0x00000010
77 #define ERR_INT_MASK 0x0084
78 #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
79 #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
80 #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
81 #define WINDOW_BAR_ENABLE 0x0290
82 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
88 #define PORT_CONFIG 0x0000
89 #define UNICAST_PROMISCUOUS_MODE 0x00000001
90 #define PORT_CONFIG_EXT 0x0004
91 #define MAC_ADDR_LOW 0x0014
92 #define MAC_ADDR_HIGH 0x0018
93 #define SDMA_CONFIG 0x001c
94 #define TX_BURST_SIZE_16_64BIT 0x01000000
95 #define TX_BURST_SIZE_4_64BIT 0x00800000
96 #define BLM_TX_NO_SWAP 0x00000020
97 #define BLM_RX_NO_SWAP 0x00000010
98 #define RX_BURST_SIZE_16_64BIT 0x00000008
99 #define RX_BURST_SIZE_4_64BIT 0x00000004
100 #define PORT_SERIAL_CONTROL 0x003c
101 #define SET_MII_SPEED_TO_100 0x01000000
102 #define SET_GMII_SPEED_TO_1000 0x00800000
103 #define SET_FULL_DUPLEX_MODE 0x00200000
104 #define MAX_RX_PACKET_9700BYTE 0x000a0000
105 #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
106 #define DO_NOT_FORCE_LINK_FAIL 0x00000400
107 #define SERIAL_PORT_CONTROL_RESERVED 0x00000200
108 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
109 #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
110 #define FORCE_LINK_PASS 0x00000002
111 #define SERIAL_PORT_ENABLE 0x00000001
112 #define PORT_STATUS 0x0044
113 #define TX_FIFO_EMPTY 0x00000400
114 #define TX_IN_PROGRESS 0x00000080
115 #define PORT_SPEED_MASK 0x00000030
116 #define PORT_SPEED_1000 0x00000010
117 #define PORT_SPEED_100 0x00000020
118 #define PORT_SPEED_10 0x00000000
119 #define FLOW_CONTROL_ENABLED 0x00000008
120 #define FULL_DUPLEX 0x00000004
121 #define LINK_UP 0x00000002
122 #define TXQ_COMMAND 0x0048
123 #define TXQ_FIX_PRIO_CONF 0x004c
124 #define TX_BW_RATE 0x0050
125 #define TX_BW_MTU 0x0058
126 #define TX_BW_BURST 0x005c
127 #define INT_CAUSE 0x0060
128 #define INT_TX_END 0x07f80000
129 #define INT_TX_END_0 0x00080000
130 #define INT_RX 0x000003fc
131 #define INT_RX_0 0x00000004
132 #define INT_EXT 0x00000002
133 #define INT_CAUSE_EXT 0x0064
134 #define INT_EXT_LINK_PHY 0x00110000
135 #define INT_EXT_TX 0x000000ff
136 #define INT_MASK 0x0068
137 #define INT_MASK_EXT 0x006c
138 #define TX_FIFO_URGENT_THRESHOLD 0x0074
139 #define RX_DISCARD_FRAME_CNT 0x0084
140 #define RX_OVERRUN_FRAME_CNT 0x0088
141 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
142 #define TX_BW_RATE_MOVED 0x00e0
143 #define TX_BW_MTU_MOVED 0x00e8
144 #define TX_BW_BURST_MOVED 0x00ec
145 #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
146 #define RXQ_COMMAND 0x0280
147 #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
148 #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
149 #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
150 #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
155 #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
156 #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
157 #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
158 #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
164 #if defined(__BIG_ENDIAN)
165 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
166 (RX_BURST_SIZE_4_64BIT | \
167 TX_BURST_SIZE_4_64BIT)
168 #elif defined(__LITTLE_ENDIAN)
169 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
170 (RX_BURST_SIZE_4_64BIT | \
173 TX_BURST_SIZE_4_64BIT)
175 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
182 #define DEFAULT_RX_QUEUE_SIZE 128
183 #define DEFAULT_TX_QUEUE_SIZE 256
184 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
190 #if defined(__BIG_ENDIAN)
206 #elif defined(__LITTLE_ENDIAN)
223 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
227 #define BUFFER_OWNED_BY_DMA 0x80000000
230 #define ERROR_SUMMARY 0x00000001
233 #define LAYER_4_CHECKSUM_OK 0x40000000
234 #define RX_ENABLE_INTERRUPT 0x20000000
235 #define RX_FIRST_DESC 0x08000000
236 #define RX_LAST_DESC 0x04000000
237 #define RX_IP_HDR_OK 0x02000000
238 #define RX_PKT_IS_IPV4 0x01000000
239 #define RX_PKT_IS_ETHERNETV2 0x00800000
240 #define RX_PKT_LAYER4_TYPE_MASK 0x00600000
241 #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
242 #define RX_PKT_IS_VLAN_TAGGED 0x00080000
245 #define TX_ENABLE_INTERRUPT 0x00800000
246 #define GEN_CRC 0x00400000
247 #define TX_FIRST_DESC 0x00200000
248 #define TX_LAST_DESC 0x00100000
249 #define ZERO_PADDING 0x00080000
250 #define GEN_IP_V4_CHECKSUM 0x00040000
251 #define GEN_TCP_UDP_CHECKSUM 0x00020000
252 #define UDP_FRAME 0x00010000
253 #define MAC_HDR_EXTRA_4_BYTES 0x00008000
254 #define MAC_HDR_EXTRA_8_BYTES 0x00000200
256 #define TX_IHL_SHIFT 11
299 #define TX_BW_CONTROL_ABSENT 0
300 #define TX_BW_CONTROL_OLD_LAYOUT 1
301 #define TX_BW_CONTROL_NEW_LAYOUT 2
438 #if defined(CONFIG_HAVE_CLK)
504 static void txq_enable(
struct tx_queue *txq)
510 static void txq_disable(
struct tx_queue *txq)
520 static void txq_maybe_wake(
struct tx_queue *txq)
525 if (netif_tx_queue_stopped(nq)) {
528 netif_tx_wake_queue(nq);
529 __netif_tx_unlock(nq);
539 unsigned long cmd_sts = (
unsigned long)priv;
552 skb_reset_network_header(skb);
553 skb_set_transport_header(skb, ip_hdrlen(skb));
554 *iphdr = ip_hdr(skb);
555 *tcph = tcp_hdr(skb);
565 int lro_flush_needed;
568 lro_flush_needed = 0;
623 skb_put(skb, byte_cnt - 2 - 4);
632 lro_flush_needed = 1;
645 "received packet spanning multiple descriptors\n");
654 if (lro_flush_needed)
663 static int rxq_refill(
struct rx_queue *rxq,
int budget)
669 while (refilled < budget && rxq->rx_desc_count < rxq->
rx_ring_size) {
672 struct rx_desc *rx_desc;
712 if (refilled < budget)
721 static inline unsigned int has_tiny_unaligned_frags(
struct sk_buff *skb)
725 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
728 if (skb_frag_size(fragp) <= 8 && fragp->
page_offset & 7)
735 static void txq_submit_frag_skb(
struct tx_queue *txq,
struct sk_buff *skb)
738 int nr_frags = skb_shinfo(skb)->nr_frags;
741 for (frag = 0; frag < nr_frags; frag++) {
746 this_frag = &skb_shinfo(skb)->frags[
frag];
756 if (frag == nr_frags - 1) {
757 desc->
cmd_sts = BUFFER_OWNED_BY_DMA |
765 desc->
byte_cnt = skb_frag_size(this_frag);
766 desc->
buf_ptr = skb_frag_dma_map(mp->
dev->dev.parent,
768 skb_frag_size(this_frag),
781 int nr_frags = skb_shinfo(skb)->nr_frags;
798 hdr_len = (
void *)ip_hdr(skb) - (
void *)skb->
data;
800 if (skb->
len - hdr_len > mp->
shared->tx_csum_limit ||
820 l4i_chk =
ntohs(sum16_as_be(udp_hdr(skb)->
check));
823 l4i_chk =
ntohs(sum16_as_be(tcp_hdr(skb)->
check));
840 txq_submit_frag_skb(txq, skb);
841 length = skb_headlen(skb);
847 desc->l4i_chk = l4i_chk;
852 __skb_queue_tail(&txq->
tx_skb, skb);
854 skb_tx_timestamp(skb);
879 queue = skb_get_queue_mapping(skb);
880 txq = mp->
txq + queue;
881 nq = netdev_get_tx_queue(dev, queue);
883 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
886 "failed to linearize skb with tiny unaligned fragment\n");
892 netdev_err(dev,
"tx queue full?!\n");
899 if (!txq_submit_skb(txq, skb)) {
907 netif_tx_stop_queue(nq);
915 static void txq_kick(
struct tx_queue *txq)
931 if (hw_desc_ptr != expected_ptr)
935 __netif_tx_unlock(nq);
940 static int txq_reclaim(
struct tx_queue *txq,
int budget,
int force)
949 while (reclaimed < budget && txq->tx_desc_count > 0) {
959 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
962 desc->
cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
974 skb = __skb_dequeue(&txq->
tx_skb);
977 netdev_info(mp->
dev,
"tx error\n");
978 mp->
dev->stats.tx_errors++;
992 __netif_tx_unlock(nq);
994 if (reclaimed < budget)
1012 token_rate = ((rate / 1000) * 64) / (mp->
t_clk / 1000);
1013 if (token_rate > 1023)
1016 mtu = (mp->
dev->mtu + 255) >> 8;
1020 bucket_size = (burst + 255) >> 8;
1021 if (bucket_size > 65535)
1022 bucket_size = 65535;
1024 switch (mp->
shared->tx_bw_control) {
1038 static void txq_set_rate(
struct tx_queue *txq,
int rate,
int burst)
1044 token_rate = ((rate / 1000) * 64) / (mp->
t_clk / 1000);
1045 if (token_rate > 1023)
1048 bucket_size = (burst + 255) >> 8;
1049 if (bucket_size > 65535)
1050 bucket_size = 65535;
1056 static void txq_set_fixed_prio_mode(
struct tx_queue *txq)
1066 switch (mp->
shared->tx_bw_control) {
1076 val = rdlp(mp, off);
1077 val |= 1 << txq->
index;
1107 for (i = 0; !smi_is_done(msp); i++) {
1116 if (!smi_is_done(msp)) {
1119 if (!smi_is_done(msp))
1126 static int smi_bus_read(
struct mii_bus *
bus,
int addr,
int reg)
1132 if (smi_wait_ready(msp)) {
1133 pr_warn(
"SMI bus busy timeout\n");
1139 if (smi_wait_ready(msp)) {
1140 pr_warn(
"SMI bus busy timeout\n");
1144 ret =
readl(smi_reg);
1146 pr_warn(
"SMI bus read not valid\n");
1150 return ret & 0xffff;
1153 static int smi_bus_write(
struct mii_bus *bus,
int addr,
int reg,
u16 val)
1158 if (smi_wait_ready(msp)) {
1159 pr_warn(
"SMI bus busy timeout\n");
1164 (addr << 16) | (val & 0xffff), smi_reg);
1166 if (smi_wait_ready(msp)) {
1167 pr_warn(
"SMI bus busy timeout\n");
1202 u32 lro_aggregated = 0;
1203 u32 lro_flushed = 0;
1204 u32 lro_no_desc = 0;
1210 lro_aggregated += rxq->
lro_mgr.stats.aggregated;
1211 lro_flushed += rxq->
lro_mgr.stats.flushed;
1212 lro_no_desc += rxq->
lro_mgr.stats.no_desc;
1229 for (i = 0; i < 0x80; i += 4)
1261 p->
fc_sent += mib_read(mp, 0x54);
1280 static void mib_counters_timer_wrapper(
unsigned long _mp)
1284 mib_counters_update(mp);
1305 if (mp->
shared->extended_rx_coal_limit)
1306 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
1308 temp = (val & 0x003fff00) >> 8;
1313 return (
unsigned int)
temp;
1326 if (mp->
shared->extended_rx_coal_limit) {
1330 val |= (temp & 0x8000) << 10;
1331 val |= (temp & 0x7fff) << 7;
1336 val |= (temp & 0x3fff) << 8;
1349 return (
unsigned int)
temp;
1376 { #m, FIELD_SIZEOF(struct net_device_stats, m), \
1377 offsetof(struct net_device, stats.m), -1 }
1379 #define MIBSTAT(m) \
1380 { #m, FIELD_SIZEOF(struct mib_counters, m), \
1381 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1383 #define LROSTAT(m) \
1384 { #m, FIELD_SIZEOF(struct lro_counters, m), \
1385 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
1396 MIBSTAT(good_octets_received),
1398 MIBSTAT(internal_mac_transmit_err),
1399 MIBSTAT(good_frames_received),
1401 MIBSTAT(broadcast_frames_received),
1402 MIBSTAT(multicast_frames_received),
1404 MIBSTAT(frames_65_to_127_octets),
1405 MIBSTAT(frames_128_to_255_octets),
1406 MIBSTAT(frames_256_to_511_octets),
1407 MIBSTAT(frames_512_to_1023_octets),
1408 MIBSTAT(frames_1024_to_max_octets),
1412 MIBSTAT(multicast_frames_sent),
1413 MIBSTAT(broadcast_frames_sent),
1414 MIBSTAT(unrec_mac_control_received),
1439 err = phy_read_status(mp->
phy);
1464 ethtool_cmd_speed_set(cmd,
SPEED_10);
1493 return mv643xx_eth_get_settings_phy(mp, cmd);
1495 return mv643xx_eth_get_settings_phyless(mp, cmd);
1514 static void mv643xx_eth_get_drvinfo(
struct net_device *dev,
1518 sizeof(drvinfo->
driver));
1526 static int mv643xx_eth_nway_reset(
struct net_device *dev)
1581 if (netif_running(dev)) {
1582 mv643xx_eth_stop(dev);
1583 if (mv643xx_eth_open(dev)) {
1585 "fatal error on re-opening device after ring param change\n");
1600 wrlp(mp,
PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1605 static void mv643xx_eth_get_strings(
struct net_device *dev,
1611 for (i = 0; i <
ARRAY_SIZE(mv643xx_eth_stats); i++) {
1613 mv643xx_eth_stats[i].stat_string,
1619 static void mv643xx_eth_get_ethtool_stats(
struct net_device *dev,
1626 mv643xx_eth_get_stats(dev);
1627 mib_counters_update(mp);
1628 mv643xx_eth_grab_lro_stats(mp);
1630 for (i = 0; i <
ARRAY_SIZE(mv643xx_eth_stats); i++) {
1631 const struct mv643xx_eth_stats *
stat;
1634 stat = mv643xx_eth_stats +
i;
1639 p = ((
void *)mp) + stat->
mp_off;
1646 static int mv643xx_eth_get_sset_count(
struct net_device *dev,
int sset)
1654 static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1655 .get_settings = mv643xx_eth_get_settings,
1656 .set_settings = mv643xx_eth_set_settings,
1657 .get_drvinfo = mv643xx_eth_get_drvinfo,
1658 .nway_reset = mv643xx_eth_nway_reset,
1660 .get_coalesce = mv643xx_eth_get_coalesce,
1661 .set_coalesce = mv643xx_eth_set_coalesce,
1662 .get_ringparam = mv643xx_eth_get_ringparam,
1663 .set_ringparam = mv643xx_eth_set_ringparam,
1664 .get_strings = mv643xx_eth_get_strings,
1665 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1666 .get_sset_count = mv643xx_eth_get_sset_count,
1677 addr[0] = (mac_h >> 24) & 0xff;
1678 addr[1] = (mac_h >> 16) & 0xff;
1679 addr[2] = (mac_h >> 8) & 0xff;
1680 addr[3] = mac_h & 0xff;
1681 addr[4] = (mac_l >> 8) & 0xff;
1682 addr[5] = mac_l & 0xff;
1688 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1700 nibbles = 1 << (dev->
dev_addr[5] & 0x0f);
1707 nibbles |= 1 << (ha->
addr[5] & 0x0f);
1713 static void mv643xx_eth_program_unicast_filter(
struct net_device *dev)
1724 nibbles = uc_addr_filter_mask(dev);
1730 for (i = 0; i < 16; i += 4) {
1751 static int addr_crc(
unsigned char *addr)
1756 for (i = 0; i < 6; i++) {
1759 crc = (crc ^ addr[
i]) << 8;
1760 for (j = 7; j >= 0; j--) {
1761 if (crc & (0x100 << j))
1769 static void mv643xx_eth_program_multicast_filter(
struct net_device *dev)
1783 accept = 0x01010101;
1784 for (i = 0; i < 0x100; i += 4) {
1792 if (mc_spec ==
NULL)
1794 mc_other = mc_spec + (0x100 >> 2);
1796 memset(mc_spec, 0, 0x100);
1797 memset(mc_other, 0, 0x100);
1804 if (
memcmp(a,
"\x01\x00\x5e\x00\x00", 5) == 0) {
1809 entry = addr_crc(a);
1812 table[entry >> 2] |= 1 << (8 * (entry & 3));
1815 for (i = 0; i < 0x100; i += 4) {
1823 static void mv643xx_eth_set_rx_mode(
struct net_device *dev)
1825 mv643xx_eth_program_unicast_filter(dev);
1826 mv643xx_eth_program_multicast_filter(dev);
1829 static int mv643xx_eth_set_mac_address(
struct net_device *dev,
void *addr)
1833 if (!is_valid_ether_addr(sa->
sa_data))
1838 netif_addr_lock_bh(dev);
1839 mv643xx_eth_program_unicast_filter(dev);
1840 netif_addr_unlock_bh(dev);
1850 struct rx_desc *rx_desc;
1864 if (index == 0 && size <= mp->rx_desc_sram_size) {
1876 "can't allocate rx ring (%d bytes)\n", size);
1885 netdev_err(mp->
dev,
"can't allocate rx skb ring\n");
1898 nexti *
sizeof(
struct rx_desc);
1908 rxq->
lro_mgr.frag_align_pad = 0;
1910 rxq->
lro_mgr.get_skb_header = mv643xx_get_skb_header;
1918 if (index == 0 && size <= mp->rx_desc_sram_size)
1929 static void rxq_deinit(
struct rx_queue *rxq)
1938 dev_kfree_skb(rxq->
rx_skb[i]);
1944 netdev_err(mp->
dev,
"error freeing rx ring -- %d skbs stuck\n",
1948 if (rxq->
index == 0 &&
1975 if (index == 0 && size <= mp->tx_desc_sram_size) {
1987 "can't allocate tx ring (%d bytes)\n", size);
1996 struct tx_desc *txd = tx_desc +
i;
2005 nexti *
sizeof(
struct tx_desc);
2008 skb_queue_head_init(&txq->
tx_skb);
2013 static void txq_deinit(
struct tx_queue *txq)
2022 if (txq->
index == 0 &&
2043 int_cause &= ~INT_EXT;
2055 if (int_cause_ext) {
2065 static irqreturn_t mv643xx_eth_irq(
int irq,
void *dev_id)
2070 if (
unlikely(!mv643xx_eth_collect_events(mp)))
2074 napi_schedule(&mp->
napi);
2088 if (!(port_status &
LINK_UP)) {
2089 if (netif_carrier_ok(dev)) {
2092 netdev_info(dev,
"link down\n");
2100 txq_reset_hw_ptr(txq);
2123 netdev_info(dev,
"link up, %d Mb/s, %s duplex, flow control %sabled\n",
2124 speed, duplex ?
"full" :
"half", fc ?
"en" :
"dis");
2126 if (!netif_carrier_ok(dev))
2143 while (work_done < budget) {
2150 handle_link_event(mp);
2160 if (mv643xx_eth_collect_events(mp))
2165 queue = fls(queue_mask) - 1;
2166 queue_mask = 1 << queue;
2168 work_tbd = budget - work_done;
2173 txq_kick(mp->
txq + queue);
2174 }
else if (mp->
work_tx & queue_mask) {
2175 work_done += txq_reclaim(mp->
txq + queue, work_tbd, 0);
2176 txq_maybe_wake(mp->
txq + queue);
2177 }
else if (mp->
work_rx & queue_mask) {
2178 work_done += rxq_process(mp->
rxq + queue, work_tbd);
2180 work_done += rxq_refill(mp->
rxq + queue, work_tbd);
2186 if (work_done < budget) {
2196 static inline void oom_timer_wrapper(
unsigned long data)
2200 napi_schedule(&mp->
napi);
2231 mv643xx_eth_get_settings(mp->
dev, &cmd);
2233 mv643xx_eth_set_settings(mp->
dev, &cmd);
2252 tx_set_rate(mp, 1000000000, 16777216);
2256 txq_reset_hw_ptr(txq);
2257 txq_set_rate(txq, 1000000000, 16777216);
2258 txq_set_fixed_prio_mode(txq);
2266 mv643xx_eth_set_features(mp->
dev, mp->
dev->features);
2276 mv643xx_eth_program_unicast_filter(mp->
dev);
2303 skb_size = mp->
dev->mtu + 36;
2310 mp->
skb_size = (skb_size + 7) & ~7;
2321 static int mv643xx_eth_open(
struct net_device *dev)
2334 netdev_err(dev,
"can't assign irq\n");
2338 mv643xx_eth_recalc_skb_size(mp);
2340 napi_enable(&mp->
napi);
2345 err = rxq_init(mp, i);
2348 rxq_deinit(mp->
rxq + i);
2362 err = txq_init(mp, i);
2365 txq_deinit(mp->
txq + i);
2381 rxq_deinit(mp->
rxq + i);
2394 rxq_disable(mp->
rxq + i);
2396 txq_disable(mp->
txq + i);
2414 static int mv643xx_eth_stop(
struct net_device *dev)
2423 napi_disable(&mp->
napi);
2432 mv643xx_eth_get_stats(dev);
2433 mib_counters_update(mp);
2437 rxq_deinit(mp->
rxq + i);
2439 txq_deinit(mp->
txq + i);
2444 static int mv643xx_eth_ioctl(
struct net_device *dev,
struct ifreq *ifr,
int cmd)
2454 static int mv643xx_eth_change_mtu(
struct net_device *dev,
int new_mtu)
2458 if (new_mtu < 64 || new_mtu > 9500)
2462 mv643xx_eth_recalc_skb_size(mp);
2463 tx_set_rate(mp, 1000000000, 16777216);
2465 if (!netif_running(dev))
2474 mv643xx_eth_stop(dev);
2475 if (mv643xx_eth_open(dev)) {
2477 "fatal error on re-opening device after MTU change\n");
2488 if (netif_running(mp->
dev)) {
2489 netif_tx_stop_all_queues(mp->
dev);
2492 netif_tx_wake_all_queues(mp->
dev);
2496 static void mv643xx_eth_tx_timeout(
struct net_device *dev)
2500 netdev_info(dev,
"tx timeout\n");
2505 #ifdef CONFIG_NET_POLL_CONTROLLER
2506 static void mv643xx_eth_netpoll(
struct net_device *dev)
2513 mv643xx_eth_irq(dev->
irq, dev);
2530 for (i = 0; i < 6; i++) {
2540 for (i = 0; i < dram->
num_cs; i++) {
2541 const struct mbus_dram_window *
cs = dram->
cs +
i;
2543 writel((cs->base & 0xffff0000) |
2544 (cs->mbus_attr << 8) |
2548 win_enable &= ~(1 <<
i);
2549 win_protect |= 3 << (2 *
i);
2588 static int mv643xx_eth_version_printed;
2595 if (!mv643xx_eth_version_printed++)
2596 pr_notice(
"MV-643xx 10/100/1000 ethernet driver version %s\n",
2597 mv643xx_eth_driver_version);
2617 msp->
smi_bus = mdiobus_alloc();
2622 msp->
smi_bus->name =
"mv643xx_eth smi";
2623 msp->
smi_bus->read = smi_bus_read;
2624 msp->
smi_bus->write = smi_bus_write,
2628 msp->
smi_bus->phy_mask = 0xffffffff;
2630 goto out_free_mii_bus;
2659 mv643xx_eth_conf_mbus_windows(msp, dram);
2663 infer_hw_params(msp);
2665 platform_set_drvdata(pdev, msp);
2697 .probe = mv643xx_eth_shared_probe,
2698 .remove = mv643xx_eth_shared_remove,
2712 data |= (phy_addr & 0x1f) << addr_shift;
2722 return (data >> (5 * mp->
port_num)) & 0x1f;
2730 if (is_valid_ether_addr(pd->
mac_addr))
2762 start = phy_addr_get(mp) & 0x1f;
2765 start = phy_addr & 0x1f;
2770 for (i = 0; i < num; i++) {
2771 int addr = (start +
i) & 0x1f;
2776 if (phydev ==
NULL) {
2779 phy_addr_set(mp, addr);
2814 pscr &= ~SERIAL_PORT_ENABLE;
2837 .ndo_open = mv643xx_eth_open,
2838 .ndo_stop = mv643xx_eth_stop,
2839 .ndo_start_xmit = mv643xx_eth_xmit,
2840 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
2841 .ndo_set_mac_address = mv643xx_eth_set_mac_address,
2843 .ndo_do_ioctl = mv643xx_eth_ioctl,
2844 .ndo_change_mtu = mv643xx_eth_change_mtu,
2845 .ndo_set_features = mv643xx_eth_set_features,
2846 .ndo_tx_timeout = mv643xx_eth_tx_timeout,
2847 .ndo_get_stats = mv643xx_eth_get_stats,
2848 #ifdef CONFIG_NET_POLL_CONTROLLER
2849 .ndo_poll_controller = mv643xx_eth_netpoll,
2861 pd = pdev->
dev.platform_data;
2863 dev_err(&pdev->
dev,
"no mv643xx_eth_platform_data\n");
2868 dev_err(&pdev->
dev,
"no mv643xx_eth_platform_data->shared\n");
2876 mp = netdev_priv(dev);
2877 platform_set_drvdata(pdev, mp);
2889 mp->
t_clk = 133000000;
2890 #if defined(CONFIG_HAVE_CLK)
2892 if (!IS_ERR(mp->clk)) {
2893 clk_prepare_enable(mp->clk);
2899 netif_set_real_num_rx_queues(dev, mp->
rxq_count);
2912 mib_counters_clear(mp);
2928 mp->
rx_oom.function = oom_timer_wrapper;
2949 if (mp->
shared->win_protect)
2954 wrlp(mp,
SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2956 set_rx_coal(mp, 250);
2963 netdev_notice(dev,
"port %d with MAC address %pM\n",
2967 netdev_notice(dev,
"configured with sram\n");
2972 #if defined(CONFIG_HAVE_CLK)
2973 if (!IS_ERR(mp->clk)) {
2974 clk_disable_unprepare(mp->clk);
2992 #if defined(CONFIG_HAVE_CLK)
2993 if (!IS_ERR(mp->clk)) {
2994 clk_disable_unprepare(mp->clk);
3001 platform_set_drvdata(pdev,
NULL);
3014 if (netif_running(mp->
dev))
3019 .probe = mv643xx_eth_probe,
3020 .remove = mv643xx_eth_remove,
3021 .shutdown = mv643xx_eth_shutdown,
3028 static int __init mv643xx_eth_init_module(
void)
3043 static void __exit mv643xx_eth_cleanup_module(
void)
3050 MODULE_AUTHOR(
"Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
3051 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");