57 #include <linux/module.h>
59 #include <linux/sched.h>
60 #include <linux/slab.h>
62 #define my_VERSION MPT_LINUX_VERSION_COMMON
63 #define MYNAM "mptlan"
72 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
75 #define MPT_LAN_TRANSACTION32_SIZE \
76 (sizeof(SGETransaction32_t) - sizeof(u32))
129 static int lan_reply (MPT_ADAPTER *
ioc, MPT_FRAME_HDR *mf,
130 MPT_FRAME_HDR *reply);
135 static void mpt_lan_wake_post_buckets_task(
struct net_device *
dev,
138 static int mpt_lan_receive_post_reply(
struct net_device *
dev,
143 static int mpt_lan_ioc_reset(MPT_ADAPTER *
ioc,
int reset_phase);
145 static unsigned short mpt_lan_type_trans(
struct sk_buff *
skb,
154 static u32 max_buckets_out = 127;
155 static u32 tx_max_out_p = 127 - 16;
168 lan_reply (MPT_ADAPTER *
ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
171 int FreeReqFrame = 0;
214 (
void) mpt_lan_send_turbo(dev, tmsg);
222 mpt_lan_receive_post_turbo(dev, tmsg);
227 "that I don't know what to do with\n");
244 switch (reply->u.hdr.Function) {
251 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
261 mpt_lan_receive_post_reply(dev, pRecvRep);
266 "ReceivePostReply received.\n"));
287 "reply that I don't know what to do with\n");
300 mpt_lan_ioc_reset(MPT_ADAPTER *ioc,
int reset_phase)
308 priv = netdev_priv(dev);
311 reset_phase==MPT_IOC_SETUP_RESET ?
"setup" : (
312 reset_phase==MPT_IOC_PRE_RESET ?
"pre" :
"post")));
317 if (reset_phase == MPT_IOC_SETUP_RESET) {
319 }
else if (reset_phase == MPT_IOC_PRE_RESET) {
323 netif_stop_queue(dev);
336 mpt_lan_post_receive_buckets(priv);
337 netif_wake_queue(dev);
383 if (mpt_lan_reset(dev) != 0) {
389 printk (
"The ioc is active. Perhaps it needs to be"
392 printk (
"The ioc in inactive, most likely in the "
393 "process of being reset. Please try again in "
432 mpt_lan_post_receive_buckets(priv);
438 " Notifications. This is a bad thing! We're not going "
439 "to go ahead, but I'd be leery of system stability at "
443 netif_start_queue(dev);
497 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
506 "since driver was loaded, %d still out\n",
509 netif_stop_queue(dev);
520 "is still out\n", i));
521 pci_unmap_single(mpt_dev->pcidev, priv->
RcvCtl[i].dma,
524 dev_kfree_skb(priv->
RcvCtl[i].skb);
533 pci_unmap_single(mpt_dev->pcidev, priv->
SendCtl[i].dma,
536 dev_kfree_skb(priv->
SendCtl[i].skb);
553 mpt_lan_change_mtu(
struct net_device *dev,
int new_mtu)
567 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
569 if (mpt_dev->active) {
570 dlprintk ((
"mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->
name));
571 netif_wake_queue(dev);
581 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
589 dev->
stats.tx_packets++;
597 pci_unmap_single(mpt_dev->pcidev, priv->
SendCtl[ctx].dma,
605 netif_wake_queue(dev);
614 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
617 int FreeReqFrame = 0;
640 printk (
KERN_ERR MYNAM
": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
663 pci_unmap_single(mpt_dev->pcidev, priv->
SendCtl[ctx].dma,
678 netif_wake_queue(dev);
687 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
692 const unsigned char *
mac;
696 u16 cur_naa = 0x1000;
703 netif_stop_queue(dev);
713 netif_stop_queue(dev);
732 skb_reset_mac_header(skb);
735 dma = pci_map_single(mpt_dev->pcidev, skb->
data, skb->
len,
763 mac = skb_mac_header(skb);
831 skb->
protocol = mpt_lan_type_trans(skb, dev);
834 "delivered to upper level.\n",
838 dev->
stats.rx_packets++;
843 dioprintk((MYNAM
"/receive_skb: %d buckets remaining\n",
847 mpt_lan_wake_post_buckets_task(dev, 1);
850 "remaining, %d received back since sod\n",
862 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
875 skb = (
struct sk_buff *)dev_alloc_skb(len);
877 printk (
KERN_ERR MYNAM
": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
883 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->
RcvCtl[ctx].dma,
886 skb_copy_from_linear_data(old_skb,
skb_put(skb, len), len);
888 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->
RcvCtl[ctx].dma,
897 pci_unmap_single(mpt_dev->pcidev, priv->
RcvCtl[ctx].dma,
908 return mpt_lan_receive_skb(dev, skb);
913 mpt_lan_receive_post_free(
struct net_device *dev,
917 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
927 "IOC returned %d buckets, freeing them...\n", count));
930 for (i = 0; i <
count; i++) {
942 pci_unmap_single(mpt_dev->pcidev, priv->
RcvCtl[ctx].dma,
961 "remaining, %d received back since sod.\n",
968 mpt_lan_receive_post_reply(
struct net_device *dev,
972 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
986 return mpt_lan_receive_post_free(dev, pRecvRep);
991 "ReceivePostReply w/ PacketLength zero!\n",
1021 skb = (
struct sk_buff *)dev_alloc_skb(len);
1023 printk (
KERN_ERR MYNAM
": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1025 __FILE__, __LINE__);
1030 for (i = 0; i <
count; i++) {
1043 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1047 skb_copy_from_linear_data(old_skb,
skb_put(skb, l), l);
1049 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1057 spin_unlock_irqrestore(&priv->
rxfidx_lock, flags);
1063 skb = (
struct sk_buff *)dev_alloc_skb(len);
1065 printk (
KERN_ERR MYNAM
": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1067 __FILE__, __LINE__);
1071 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1076 skb_copy_from_linear_data(old_skb,
skb_put(skb, len), len);
1078 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1085 spin_unlock_irqrestore(&priv->
rxfidx_lock, flags);
1092 pci_unmap_single(mpt_dev->pcidev, priv->
RcvCtl[ctx].dma,
1097 spin_unlock_irqrestore(&priv->
rxfidx_lock, flags);
1107 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1117 "(priv->buckets_out = %d)\n",
1120 else if (remaining < 10)
1122 "(priv->buckets_out = %d)\n",
1126 if ((remaining < priv->bucketthresh) &&
1131 "buckets_out count and fw's BucketsRemaining "
1132 "count has crossed the threshold, issuing a "
1133 "LanReset to clear the fw's hashtable. You may "
1134 "want to check your /var/log/messages for \"CRC "
1135 "error\" event notifications.\n");
1138 mpt_lan_wake_post_buckets_task(dev, 0);
1141 return mpt_lan_receive_skb(dev, skb);
1148 mpt_lan_post_receive_buckets(
struct mpt_lan_priv *priv)
1151 MPT_ADAPTER *mpt_dev = priv->
mpt_dev;
1160 unsigned long flags;
1168 __func__, buckets, curr));
1179 __func__, buckets));
1184 i =
le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1185 mpt_dev->RequestNB[
i] = 0;
1213 if (skb && (priv->
RcvCtl[ctx].len != len)) {
1214 pci_unmap_single(mpt_dev->pcidev,
1218 dev_kfree_skb(priv->
RcvCtl[ctx].skb);
1223 skb = dev_alloc_skb(len);
1226 MYNAM
"/%s: Can't alloc skb\n",
1229 spin_unlock_irqrestore(&priv->
rxfidx_lock, flags);
1233 dma = pci_map_single(mpt_dev->pcidev, skb->
data,
1241 spin_unlock_irqrestore(&priv->
rxfidx_lock, flags);
1263 if (pSimple ==
NULL) {
1300 post_buckets_task.work));
1304 .ndo_open = mpt_lan_open,
1305 .ndo_stop = mpt_lan_close,
1306 .ndo_start_xmit = mpt_lan_sdu_send,
1307 .ndo_change_mtu = mpt_lan_change_mtu,
1308 .ndo_tx_timeout = mpt_lan_tx_timeout,
1313 mpt_register_lan_device (MPT_ADAPTER *mpt_dev,
int pnum)
1325 priv = netdev_priv(dev);
1332 mpt_lan_post_receive_buckets_work);
1345 dlprintk((
KERN_INFO MYNAM
"@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1347 mpt_dev->pfacts[0].MaxLanBuckets,
1356 a = (
u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1379 "and setting initial values\n"));
1391 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1395 for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1397 "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1398 ioc->name, ioc->pfacts[i].PortNumber,
1399 ioc->pfacts[i].ProtocolFlags,
1401 ioc->pfacts[i].ProtocolFlags));
1403 if (!(ioc->pfacts[i].ProtocolFlags &
1406 "seems to be disabled on this adapter port!\n",
1411 dev = mpt_register_lan_device(ioc, i);
1414 "port%d as a LAN device\n", ioc->name,
1415 ioc->pfacts[i].PortNumber);
1420 "registered as '%s'\n", ioc->name, dev->
name);
1435 mptlan_remove(
struct pci_dev *pdev)
1437 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1446 static struct mpt_pci_driver mptlan_driver = {
1447 .probe = mptlan_probe,
1448 .remove = mptlan_remove,
1451 static int __init mpt_lan_init (
void)
1458 printk (
KERN_ERR MYNAM
": Failed to register with MPT base driver\n");
1466 "handler with mptbase! The world is at an end! "
1467 "Everything is fading to black! Goodbye.\n");
1477 static void __exit mpt_lan_exit(
void)
1492 static unsigned short
1498 skb_reset_mac_header(skb);
1515 if (*fch->
daddr & 1) {