21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
27 #include <linux/netdevice.h>
30 #include <asm/dma-mapping.h>
35 #include <linux/tcp.h>
38 #include <linux/prefetch.h>
51 #define LOCAL_SKB_ALIGN 2
61 #define LRO_MAX_AGGR 64
64 #define PE_MAX_MTU 9000
65 #define PE_DEF_MTU ETH_DATA_LEN
67 #define DEFAULT_MSG_ENABLE \
81 static int debug = -1;
87 static int translation_enabled(
void)
89 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
92 return firmware_has_feature(FW_FEATURE_LPAR);
96 static void write_iob_reg(
unsigned int reg,
unsigned int val)
101 static unsigned int read_mac_reg(
const struct pasemi_mac *
mac,
unsigned int reg)
112 static unsigned int read_dma_reg(
unsigned int reg)
117 static void write_dma_reg(
unsigned int reg,
unsigned int val)
132 static inline void prefetch_skb(
const struct sk_buff *
skb)
146 int nintf, off,
i,
j;
160 for (i = 0; i < (nintf+3)/4; i++) {
161 tmp = read_dma_reg(off+4*i);
162 for (j = 0; j < 4; j++) {
163 if (((tmp >> (8*j)) & 0xff) == devfn)
170 static void pasemi_mac_intf_disable(
struct pasemi_mac *mac)
179 static void pasemi_mac_intf_enable(
struct pasemi_mac *mac)
188 static int pasemi_get_mac_addr(
struct pasemi_mac *mac)
198 "No device node for mac, not configuring\n");
204 if (maddr && len == 6) {
218 "no mac address in device tree, not configuring\n");
222 if (
sscanf(maddr,
"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
223 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
225 "can't parse mac address, not configuring\n");
234 static int pasemi_mac_set_mac_addr(
struct net_device *
dev,
void *
p)
238 unsigned int adr0, adr1;
240 if (!is_valid_ether_addr(addr->
sa_data))
253 pasemi_mac_intf_disable(mac);
256 pasemi_mac_intf_enable(mac);
262 void **tcph,
u64 *hdr_flags,
void *
data)
273 skb_reset_network_header(skb);
278 ip_len = ip_hdrlen(skb);
279 skb_set_transport_header(skb, ip_len);
280 *tcph = tcp_hdr(skb);
292 static int pasemi_mac_unmap_tx_skb(
struct pasemi_mac *mac,
302 for (f = 0; f < nfrags; f++) {
312 return (nfrags + 3) & ~1;
326 dev_err(&mac->
pdev->dev,
"Can't allocate checksum channel\n");
330 chno = ring->
chan.chno;
362 if (translation_enabled())
389 static void pasemi_mac_setup_csrings(
struct pasemi_mac *mac)
392 mac->
cs[0] = pasemi_mac_setup_csring(mac);
394 mac->
cs[1] = pasemi_mac_setup_csring(mac);
398 for (i = 0; i <
MAX_CS; i++)
413 static int pasemi_mac_setup_rx_resources(
const struct net_device *dev)
424 dev_err(&mac->
pdev->dev,
"Can't allocate RX channel\n");
427 chno = ring->
chan.chno;
459 if (translation_enabled())
475 if (translation_enabled())
496 pasemi_mac_setup_tx_resources(
const struct net_device *dev)
508 dev_err(&mac->
pdev->dev,
"Can't allocate TX channel\n");
512 chno = ring->
chan.chno;
538 if (translation_enabled())
557 static void pasemi_mac_free_tx_resources(
struct pasemi_mac *mac)
573 for (i = start; i <
limit; i += freed) {
575 if (info->
dma && info->
skb) {
576 nfrags = skb_shinfo(info->
skb)->nr_frags;
577 for (j = 0; j <= nfrags; j++)
580 freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
591 static void pasemi_mac_free_rx_buffers(
struct pasemi_mac *mac)
599 if (info->
skb && info->
dma) {
614 static void pasemi_mac_free_rx_resources(
struct pasemi_mac *mac)
616 pasemi_mac_free_rx_buffers(mac);
626 static void pasemi_mac_replenish_rx_ring(
struct net_device *dev,
629 const struct pasemi_mac *mac = netdev_priv(dev);
636 fill =
rx_ring(mac)->next_to_fill;
637 for (count = 0; count <
limit; count++) {
646 skb = netdev_alloc_skb(dev, mac->
bufsz);
675 static void pasemi_mac_restart_rx_intr(
const struct pasemi_mac *mac)
693 static void pasemi_mac_restart_tx_intr(
const struct pasemi_mac *mac)
706 static inline void pasemi_mac_rx_error(
const struct pasemi_mac *mac,
709 unsigned int rcmdsta, ccmdsta;
718 printk(
KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n",
725 static inline void pasemi_mac_tx_error(
const struct pasemi_mac *mac,
737 "tx status 0x%016llx\n", mactx, *chan->
status);
759 spin_lock(&rx->
lock);
765 for (count = 0; count <
limit; count++) {
771 pasemi_mac_rx_error(mac, macrx);
798 mac->
netdev->stats.rx_errors++;
799 mac->
netdev->stats.rx_crc_errors++;
812 skb_checksum_none_assert(skb);
835 if (n > RX_RING_SIZE) {
838 n &= (RX_RING_SIZE-1);
851 pasemi_mac_replenish_rx_ring(mac->
netdev, count);
853 mac->
netdev->stats.rx_bytes += tot_bytes;
856 spin_unlock(&
rx_ring(mac)->lock);
862 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
869 unsigned int start, descr_count, buf_count, batch_limit;
870 unsigned int ring_limit;
871 unsigned int total_count;
889 if (start > ring_limit)
896 descr_count < batch_limit && i < ring_limit;
903 pasemi_mac_tx_error(mac, mactx);
920 buf_count = 2 + nr_frags;
927 for (j = 0; j <= nr_frags; j++)
930 skbs[descr_count] =
skb;
931 nf[descr_count] = nr_frags;
940 spin_unlock_irqrestore(&txring->
lock, flags);
941 netif_wake_queue(mac->
netdev);
943 for (i = 0; i < descr_count; i++)
944 pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]);
946 total_count += descr_count;
949 if (descr_count == batch_limit)
956 static irqreturn_t pasemi_mac_rx_intr(
int irq,
void *data)
976 napi_schedule(&mac->
napi);
983 #define TX_CLEAN_INTERVAL HZ
985 static void pasemi_mac_tx_timer(
unsigned long data)
990 pasemi_mac_clean_tx(txring);
994 pasemi_mac_restart_tx_intr(mac);
997 static irqreturn_t pasemi_mac_tx_intr(
int irq,
void *data)
1016 napi_schedule(&mac->
napi);
1024 static void pasemi_adjust_link(
struct net_device *dev)
1029 unsigned int new_flags;
1031 if (!mac->
phydev->link) {
1039 pasemi_mac_intf_disable(mac);
1044 pasemi_mac_intf_enable(mac);
1052 if (!mac->
phydev->duplex)
1055 switch (mac->
phydev->speed) {
1073 msg = mac->
link != mac->
phydev->link || flags != new_flags;
1079 if (new_flags != flags)
1087 static int pasemi_mac_phy_init(
struct net_device *dev)
1093 dn = pci_device_to_OF_node(mac->
pdev);
1095 of_node_put(phy_dn);
1115 static int pasemi_mac_open(
struct net_device *dev)
1127 ret = pasemi_mac_setup_rx_resources(dev);
1129 goto out_rx_resources;
1131 mac->
tx = pasemi_mac_setup_tx_resources(dev);
1140 pasemi_mac_setup_csrings(mac);
1146 for (i = 0; i < 32; i++)
1183 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
1189 pasemi_mac_restart_rx_intr(mac);
1190 pasemi_mac_restart_tx_intr(mac);
1202 ret = pasemi_mac_phy_init(dev);
1205 pasemi_mac_intf_enable(mac);
1209 "PHY init failed: %d.\n", ret);
1211 "Defaulting to 1Gbit full duplex\n");
1215 netif_start_queue(dev);
1216 napi_enable(&mac->
napi);
1224 dev_err(&mac->
pdev->dev,
"request_irq of irq %d failed: %d\n",
1225 mac->
tx->chan.irq, ret);
1235 dev_err(&mac->
pdev->dev,
"request_irq of irq %d failed: %d\n",
1236 mac->
rx->chan.irq, ret);
1244 mac->
tx->clean_timer.function = pasemi_mac_tx_timer;
1245 mac->
tx->clean_timer.data = (
unsigned long)mac->
tx;
1246 mac->
tx->clean_timer.expires = jiffies+
HZ;
1254 napi_disable(&mac->
napi);
1255 netif_stop_queue(dev);
1258 pasemi_mac_free_tx_resources(mac);
1259 pasemi_mac_free_rx_resources(mac);
1265 #define MAX_RETRIES 5000
1267 static void pasemi_mac_pause_txchan(
struct pasemi_mac *mac)
1270 int txch =
tx_ring(mac)->chan.chno;
1275 for (retries = 0; retries <
MAX_RETRIES; retries++) {
1284 "Failed to stop tx channel, tcmdsta %08x\n", sta);
1289 static void pasemi_mac_pause_rxchan(
struct pasemi_mac *mac)
1292 int rxch =
rx_ring(mac)->chan.chno;
1296 for (retries = 0; retries <
MAX_RETRIES; retries++) {
1305 "Failed to stop rx channel, ccmdsta 08%x\n", sta);
1309 static void pasemi_mac_pause_rxint(
struct pasemi_mac *mac)
1315 for (retries = 0; retries <
MAX_RETRIES; retries++) {
1324 "Failed to stop rx interface, rcmdsta %08x\n", sta);
1328 static int pasemi_mac_close(
struct net_device *dev)
1334 rxch =
rx_ring(mac)->chan.chno;
1335 txch =
tx_ring(mac)->chan.chno;
1344 netif_stop_queue(dev);
1345 napi_disable(&mac->
napi);
1366 pasemi_mac_clean_tx(
tx_ring(mac));
1367 pasemi_mac_clean_rx(
rx_ring(mac), RX_RING_SIZE);
1369 pasemi_mac_pause_txchan(mac);
1370 pasemi_mac_pause_rxint(mac);
1371 pasemi_mac_pause_rxchan(mac);
1372 pasemi_mac_intf_disable(mac);
1377 for (i = 0; i < mac->
num_cs; i++) {
1378 pasemi_mac_free_csring(mac->
cs[i]);
1385 pasemi_mac_free_rx_resources(mac);
1386 pasemi_mac_free_tx_resources(mac);
1391 static void pasemi_mac_queue_csdesc(
const struct sk_buff *skb,
1393 const unsigned int *map_size,
1399 const int nh_off = skb_network_offset(skb);
1400 const int nh_len = skb_network_header_len(skb);
1401 const int nfrags = skb_shinfo(skb)->nr_frags;
1414 cs_dest = map[0] + skb_transport_offset(skb) + 16;
1419 cs_dest = map[0] + skb_transport_offset(skb) + 6;
1429 CS_DESC(csring, fill++) = fund;
1435 for (i = 1; i <= nfrags; i++)
1462 cs_size = fill -
hdr;
1480 struct pasemi_mac *
const mac = netdev_priv(dev);
1487 unsigned long flags;
1490 const int nh_off = skb_network_offset(skb);
1491 const int nh_len = skb_network_header_len(skb);
1497 nfrags = skb_shinfo(skb)->nr_frags;
1499 map[0] = pci_map_single(mac->
dma_pdev, skb->
data, skb_headlen(skb),
1501 map_size[0] = skb_headlen(skb);
1502 if (pci_dma_mapping_error(mac->
dma_pdev, map[0]))
1503 goto out_err_nolock;
1505 for (i = 0; i < nfrags; i++) {
1508 map[i + 1] = skb_frag_dma_map(&mac->
dma_pdev->dev, frag, 0,
1510 map_size[i+1] = skb_frag_size(frag);
1513 goto out_err_nolock;
1544 netif_stop_queue(dev);
1553 pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
1557 TX_DESC(txring, fill) = mactx;
1561 for (i = 0; i <= nfrags; i++) {
1576 dev->
stats.tx_packets++;
1579 spin_unlock_irqrestore(&txring->
lock, flags);
1586 spin_unlock_irqrestore(&txring->
lock, flags);
1589 pci_unmap_single(mac->
dma_pdev, map[nfrags], map_size[nfrags],
1595 static void pasemi_mac_set_rx_mode(
struct net_device *dev)
1597 const struct pasemi_mac *mac = netdev_priv(dev);
1617 pasemi_mac_clean_tx(
tx_ring(mac));
1618 pkts = pasemi_mac_clean_rx(
rx_ring(mac), budget);
1619 if (pkts < budget) {
1623 pasemi_mac_restart_rx_intr(mac);
1624 pasemi_mac_restart_tx_intr(mac);
1629 #ifdef CONFIG_NET_POLL_CONTROLLER
1635 static void pasemi_mac_netpoll(
struct net_device *dev)
1637 const struct pasemi_mac *mac = netdev_priv(dev);
1640 pasemi_mac_tx_intr(mac->
tx->chan.irq, mac->
tx);
1644 pasemi_mac_rx_intr(mac->
rx->chan.irq, mac->
rx);
1649 static int pasemi_mac_change_mtu(
struct net_device *dev,
int new_mtu)
1653 unsigned int rcmdsta = 0;
1657 if (new_mtu < PE_MIN_MTU || new_mtu >
PE_MAX_MTU)
1660 running = netif_running(dev);
1669 napi_disable(&mac->
napi);
1670 netif_tx_disable(dev);
1671 pasemi_mac_intf_disable(mac);
1674 pasemi_mac_pause_rxint(mac);
1675 pasemi_mac_clean_rx(
rx_ring(mac), RX_RING_SIZE);
1676 pasemi_mac_free_rx_buffers(mac);
1681 if (new_mtu > 1500 && !mac->
num_cs) {
1682 pasemi_mac_setup_csrings(mac);
1706 rx_ring(mac)->next_to_fill = 0;
1707 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
1709 napi_enable(&mac->
napi);
1710 netif_start_queue(dev);
1711 pasemi_mac_intf_enable(mac);
1718 .ndo_open = pasemi_mac_open,
1719 .ndo_stop = pasemi_mac_close,
1720 .ndo_start_xmit = pasemi_mac_start_tx,
1721 .ndo_set_rx_mode = pasemi_mac_set_rx_mode,
1722 .ndo_set_mac_address = pasemi_mac_set_mac_addr,
1723 .ndo_change_mtu = pasemi_mac_change_mtu,
1725 #ifdef CONFIG_NET_POLL_CONTROLLER
1726 .ndo_poll_controller = pasemi_mac_netpoll,
1741 dev = alloc_etherdev(
sizeof(
struct pasemi_mac));
1744 goto out_disable_device;
1747 pci_set_drvdata(pdev, dev);
1750 mac = netdev_priv(dev);
1763 mac->
lro_mgr.get_skb_header = get_skb_hdr;
1772 dev_err(&mac->
pdev->dev,
"Can't find DMA Controller\n");
1779 dev_err(&mac->
pdev->dev,
"Can't find I/O Bridge\n");
1785 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->
mac_addr)) {
1791 ret = mac_to_intf(mac);
1793 dev_err(&mac->
pdev->dev,
"Can't map DMA interface\n");
1829 dev_err(&mac->
pdev->dev,
"register_netdev failed with error %d\n",
1835 mac->
dma_if, dev->dev_addr);
1854 struct net_device *netdev = pci_get_drvdata(pdev);
1860 mac = netdev_priv(netdev);
1871 pci_set_drvdata(pdev,
NULL);
1883 static struct pci_driver pasemi_mac_driver = {
1884 .name =
"pasemi_mac",
1885 .id_table = pasemi_mac_pci_tbl,
1886 .probe = pasemi_mac_probe,
1890 static void __exit pasemi_mac_cleanup_module(
void)
1903 return pci_register_driver(&pasemi_mac_driver);