Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mptlan.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/message/fusion/mptlan.c
3  * IP Over Fibre Channel device driver.
4  * For use with LSI Fibre Channel PCI chip/adapters
5  * running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  * Copyright (c) 2000-2008 LSI Corporation
8  * (mailto:[email protected])
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13  This program is free software; you can redistribute it and/or modify
14  it under the terms of the GNU General Public License as published by
15  the Free Software Foundation; version 2 of the License.
16 
17  This program is distributed in the hope that it will be useful,
18  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  GNU General Public License for more details.
21 
22  NO WARRANTY
23  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27  solely responsible for determining the appropriateness of using and
28  distributing the Program and assumes all risks associated with its
29  exercise of rights under this Agreement, including but not limited to
30  the risks and costs of program errors, damage to or loss of data,
31  programs or equipment, and unavailability or interruption of operations.
32 
33  DISCLAIMER OF LIABILITY
34  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42  You should have received a copy of the GNU General Public License
43  along with this program; if not, write to the Free Software
44  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 #include <linux/sched.h>
60 #include <linux/slab.h>
61 
62 #define my_VERSION MPT_LINUX_VERSION_COMMON
63 #define MYNAM "mptlan"
64 
65 MODULE_LICENSE("GPL");
67 
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * MPT LAN message sizes without variable part.
71  */
72 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73  (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
74 
75 #define MPT_LAN_TRANSACTION32_SIZE \
76  (sizeof(SGETransaction32_t) - sizeof(u32))
77 
78 /*
79  * Fusion MPT LAN private structures
80  */
81 
82 struct BufferControl {
83  struct sk_buff *skb;
85  unsigned int len;
86 };
87 
88 struct mpt_lan_priv {
89  MPT_ADAPTER *mpt_dev;
90  u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
91 
92  atomic_t buckets_out; /* number of unused buckets on IOC */
93  int bucketthresh; /* Send more when this many left */
94 
95  int *mpt_txfidx; /* Free Tx Context list */
98 
99  int *mpt_rxfidx; /* Free Rx Context list */
102 
103  struct BufferControl *RcvCtl; /* Receive BufferControl structs */
104  struct BufferControl *SendCtl; /* Send BufferControl structs */
105 
106  int max_buckets_out; /* Max buckets to send to IOC */
107  int tx_max_out; /* IOC's Tx queue len */
108 
111 
113  struct net_device *dev;
114  unsigned long post_buckets_active;
115 };
116 
117 struct mpt_lan_ohdr {
122 };
123 
124 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
125 
126 /*
127  * Forward protos...
128  */
129 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
130  MPT_FRAME_HDR *reply);
131 static int mpt_lan_open(struct net_device *dev);
132 static int mpt_lan_reset(struct net_device *dev);
133 static int mpt_lan_close(struct net_device *dev);
134 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
135 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
136  int priority);
137 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
138 static int mpt_lan_receive_post_reply(struct net_device *dev,
139  LANReceivePostReply_t *pRecvRep);
140 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
141 static int mpt_lan_send_reply(struct net_device *dev,
142  LANSendReply_t *pSendRep);
143 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
144 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
145 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
146  struct net_device *dev);
147 
148 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
149 /*
150  * Fusion MPT LAN private data
151  */
152 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
153 
154 static u32 max_buckets_out = 127;
155 static u32 tx_max_out_p = 127 - 16;
156 
157 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
167 static int
168 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
169 {
170  struct net_device *dev = ioc->netdev;
171  int FreeReqFrame = 0;
172 
173  dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
175 
176 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
177 // mf, reply));
178 
179  if (mf == NULL) {
180  u32 tmsg = CAST_PTR_TO_U32(reply);
181 
182  dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
184  tmsg));
185 
186  switch (GET_LAN_FORM(tmsg)) {
187 
188  // NOTE! (Optimization) First case here is now caught in
189  // mptbase.c::mpt_interrupt() routine and callcack here
190  // is now skipped for this case!
191 #if 0
193 // dioprintk((KERN_INFO MYNAM "/lan_reply: "
194 // "MessageContext turbo reply received\n"));
195  FreeReqFrame = 1;
196  break;
197 #endif
198 
200 // dioprintk((MYNAM "/lan_reply: "
201 // "calling mpt_lan_send_reply (turbo)\n"));
202 
203  // Potential BUG here?
204  // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
205  // If/when mpt_lan_send_turbo would return 1 here,
206  // calling routine (mptbase.c|mpt_interrupt)
207  // would Oops because mf has already been set
208  // to NULL. So after return from this func,
209  // mpt_interrupt() will attempt to put (NULL) mf ptr
210  // item back onto its adapter FreeQ - Oops!:-(
211  // It's Ok, since mpt_lan_send_turbo() *currently*
212  // always returns 0, but..., just in case:
213 
214  (void) mpt_lan_send_turbo(dev, tmsg);
215  FreeReqFrame = 0;
216 
217  break;
218 
220 // dioprintk((KERN_INFO MYNAM "@lan_reply: "
221 // "rcv-Turbo = %08x\n", tmsg));
222  mpt_lan_receive_post_turbo(dev, tmsg);
223  break;
224 
225  default:
226  printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
227  "that I don't know what to do with\n");
228 
229  /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
230 
231  break;
232  }
233 
234  return FreeReqFrame;
235  }
236 
237 // msg = (u32 *) reply;
238 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
239 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
240 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
241 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
242 // reply->u.hdr.Function));
243 
244  switch (reply->u.hdr.Function) {
245 
247  {
248  LANSendReply_t *pSendRep;
249 
250  pSendRep = (LANSendReply_t *) reply;
251  FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
252  break;
253  }
254 
256  {
257  LANReceivePostReply_t *pRecvRep;
258 
259  pRecvRep = (LANReceivePostReply_t *) reply;
260  if (pRecvRep->NumberOfContexts) {
261  mpt_lan_receive_post_reply(dev, pRecvRep);
262  if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
263  FreeReqFrame = 1;
264  } else
265  dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
266  "ReceivePostReply received.\n"));
267  break;
268  }
269 
271  /* Just a default reply. Might want to check it to
272  * make sure that everything went ok.
273  */
274  FreeReqFrame = 1;
275  break;
276 
279  /* _EVENT_NOTIFICATION should NOT come down this path any more.
280  * Should be routed to mpt_lan_event_process(), but just in case...
281  */
282  FreeReqFrame = 1;
283  break;
284 
285  default:
286  printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
287  "reply that I don't know what to do with\n");
288 
289  /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
290  FreeReqFrame = 1;
291 
292  break;
293  }
294 
295  return FreeReqFrame;
296 }
297 
298 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
299 static int
300 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
301 {
302  struct net_device *dev = ioc->netdev;
303  struct mpt_lan_priv *priv;
304 
305  if (dev == NULL)
306  return(1);
307  else
308  priv = netdev_priv(dev);
309 
310  dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
311  reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
312  reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
313 
314  if (priv->mpt_rxfidx == NULL)
315  return (1);
316 
317  if (reset_phase == MPT_IOC_SETUP_RESET) {
318  ;
319  } else if (reset_phase == MPT_IOC_PRE_RESET) {
320  int i;
321  unsigned long flags;
322 
323  netif_stop_queue(dev);
324 
325  dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
326 
327  atomic_set(&priv->buckets_out, 0);
328 
329  /* Reset Rx Free Tail index and re-populate the queue. */
330  spin_lock_irqsave(&priv->rxfidx_lock, flags);
331  priv->mpt_rxfidx_tail = -1;
332  for (i = 0; i < priv->max_buckets_out; i++)
333  priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
334  spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
335  } else {
336  mpt_lan_post_receive_buckets(priv);
337  netif_wake_queue(dev);
338  }
339 
340  return 1;
341 }
342 
343 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
344 static int
345 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
346 {
347  dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
348 
349  switch (le32_to_cpu(pEvReply->Event)) {
350  case MPI_EVENT_NONE: /* 00 */
351  case MPI_EVENT_LOG_DATA: /* 01 */
352  case MPI_EVENT_STATE_CHANGE: /* 02 */
353  case MPI_EVENT_UNIT_ATTENTION: /* 03 */
354  case MPI_EVENT_IOC_BUS_RESET: /* 04 */
355  case MPI_EVENT_EXT_BUS_RESET: /* 05 */
356  case MPI_EVENT_RESCAN: /* 06 */
357  /* Ok, do we need to do anything here? As far as
358  I can tell, this is when a new device gets added
359  to the loop. */
360  case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
361  case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
362  case MPI_EVENT_LOGOUT: /* 09 */
363  case MPI_EVENT_EVENT_CHANGE: /* 0A */
364  default:
365  break;
366  }
367 
368  /*
369  * NOTE: pEvent->AckRequired handling now done in mptbase.c;
370  * Do NOT do it here now!
371  */
372 
373  return 1;
374 }
375 
376 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
377 static int
378 mpt_lan_open(struct net_device *dev)
379 {
380  struct mpt_lan_priv *priv = netdev_priv(dev);
381  int i;
382 
383  if (mpt_lan_reset(dev) != 0) {
384  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
385 
386  printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
387 
388  if (mpt_dev->active)
389  printk ("The ioc is active. Perhaps it needs to be"
390  " reset?\n");
391  else
392  printk ("The ioc in inactive, most likely in the "
393  "process of being reset. Please try again in "
394  "a moment.\n");
395  }
396 
397  priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
398  if (priv->mpt_txfidx == NULL)
399  goto out;
400  priv->mpt_txfidx_tail = -1;
401 
402  priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
403  GFP_KERNEL);
404  if (priv->SendCtl == NULL)
405  goto out_mpt_txfidx;
406  for (i = 0; i < priv->tx_max_out; i++)
407  priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
408 
409  dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
410 
411  priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
412  GFP_KERNEL);
413  if (priv->mpt_rxfidx == NULL)
414  goto out_SendCtl;
415  priv->mpt_rxfidx_tail = -1;
416 
417  priv->RcvCtl = kcalloc(priv->max_buckets_out,
418  sizeof(struct BufferControl),
419  GFP_KERNEL);
420  if (priv->RcvCtl == NULL)
421  goto out_mpt_rxfidx;
422  for (i = 0; i < priv->max_buckets_out; i++)
423  priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
424 
425  dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
426  for (i = 0; i < priv->tx_max_out; i++)
427  dlprintk((" %xh", priv->mpt_txfidx[i]));
428  dlprintk(("\n"));
429 
430  dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
431 
432  mpt_lan_post_receive_buckets(priv);
433  printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
435 
436  if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
437  printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
438  " Notifications. This is a bad thing! We're not going "
439  "to go ahead, but I'd be leery of system stability at "
440  "this point.\n");
441  }
442 
443  netif_start_queue(dev);
444  dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
445 
446  return 0;
447 out_mpt_rxfidx:
448  kfree(priv->mpt_rxfidx);
449  priv->mpt_rxfidx = NULL;
450 out_SendCtl:
451  kfree(priv->SendCtl);
452  priv->SendCtl = NULL;
453 out_mpt_txfidx:
454  kfree(priv->mpt_txfidx);
455  priv->mpt_txfidx = NULL;
456 out: return -ENOMEM;
457 }
458 
459 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
460 /* Send a LanReset message to the FW. This should result in the FW returning
461  any buckets it still has. */
462 static int
463 mpt_lan_reset(struct net_device *dev)
464 {
465  MPT_FRAME_HDR *mf;
466  LANResetRequest_t *pResetReq;
467  struct mpt_lan_priv *priv = netdev_priv(dev);
468 
469  mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
470 
471  if (mf == NULL) {
472 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
473  "Unable to allocate a request frame.\n"));
474 */
475  return -1;
476  }
477 
478  pResetReq = (LANResetRequest_t *) mf;
479 
480  pResetReq->Function = MPI_FUNCTION_LAN_RESET;
481  pResetReq->ChainOffset = 0;
482  pResetReq->Reserved = 0;
483  pResetReq->PortNumber = priv->pnum;
484  pResetReq->MsgFlags = 0;
485  pResetReq->Reserved2 = 0;
486 
487  mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
488 
489  return 0;
490 }
491 
492 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
493 static int
494 mpt_lan_close(struct net_device *dev)
495 {
496  struct mpt_lan_priv *priv = netdev_priv(dev);
497  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
498  unsigned long timeout;
499  int i;
500 
501  dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
502 
503  mpt_event_deregister(LanCtx);
504 
505  dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
506  "since driver was loaded, %d still out\n",
507  priv->total_posted,atomic_read(&priv->buckets_out)));
508 
509  netif_stop_queue(dev);
510 
511  mpt_lan_reset(dev);
512 
513  timeout = jiffies + 2 * HZ;
514  while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
516 
517  for (i = 0; i < priv->max_buckets_out; i++) {
518  if (priv->RcvCtl[i].skb != NULL) {
519  dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
520  "is still out\n", i));
521  pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
522  priv->RcvCtl[i].len,
524  dev_kfree_skb(priv->RcvCtl[i].skb);
525  }
526  }
527 
528  kfree(priv->RcvCtl);
529  kfree(priv->mpt_rxfidx);
530 
531  for (i = 0; i < priv->tx_max_out; i++) {
532  if (priv->SendCtl[i].skb != NULL) {
533  pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
534  priv->SendCtl[i].len,
536  dev_kfree_skb(priv->SendCtl[i].skb);
537  }
538  }
539 
540  kfree(priv->SendCtl);
541  kfree(priv->mpt_txfidx);
542 
543  atomic_set(&priv->buckets_out, 0);
544 
545  printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
547 
548  return 0;
549 }
550 
551 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
552 static int
553 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
554 {
555  if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
556  return -EINVAL;
557  dev->mtu = new_mtu;
558  return 0;
559 }
560 
561 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
562 /* Tx timeout handler. */
563 static void
564 mpt_lan_tx_timeout(struct net_device *dev)
565 {
566  struct mpt_lan_priv *priv = netdev_priv(dev);
567  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
568 
569  if (mpt_dev->active) {
570  dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
571  netif_wake_queue(dev);
572  }
573 }
574 
575 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
576 //static inline int
577 static int
578 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
579 {
580  struct mpt_lan_priv *priv = netdev_priv(dev);
581  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
582  struct sk_buff *sent;
583  unsigned long flags;
584  u32 ctx;
585 
586  ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
587  sent = priv->SendCtl[ctx].skb;
588 
589  dev->stats.tx_packets++;
590  dev->stats.tx_bytes += sent->len;
591 
592  dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
594  __func__, sent));
595 
596  priv->SendCtl[ctx].skb = NULL;
597  pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
598  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
599  dev_kfree_skb_irq(sent);
600 
601  spin_lock_irqsave(&priv->txfidx_lock, flags);
602  priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
603  spin_unlock_irqrestore(&priv->txfidx_lock, flags);
604 
605  netif_wake_queue(dev);
606  return 0;
607 }
608 
609 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
610 static int
611 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
612 {
613  struct mpt_lan_priv *priv = netdev_priv(dev);
614  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
615  struct sk_buff *sent;
616  unsigned long flags;
617  int FreeReqFrame = 0;
618  u32 *pContext;
619  u32 ctx;
620  u8 count;
621 
622  count = pSendRep->NumberOfContexts;
623 
624  dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
625  le16_to_cpu(pSendRep->IOCStatus)));
626 
627  /* Add check for Loginfo Flag in IOCStatus */
628 
629  switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
631  dev->stats.tx_packets += count;
632  break;
633 
636  break;
637 
639  dev->stats.tx_errors += count;
640  printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
642  goto out;
643 
644  default:
645  dev->stats.tx_errors += count;
646  break;
647  }
648 
649  pContext = &pSendRep->BufferContext;
650 
651  spin_lock_irqsave(&priv->txfidx_lock, flags);
652  while (count > 0) {
653  ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
654 
655  sent = priv->SendCtl[ctx].skb;
656  dev->stats.tx_bytes += sent->len;
657 
658  dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
660  __func__, sent));
661 
662  priv->SendCtl[ctx].skb = NULL;
663  pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
664  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
665  dev_kfree_skb_irq(sent);
666 
667  priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
668 
669  pContext++;
670  count--;
671  }
672  spin_unlock_irqrestore(&priv->txfidx_lock, flags);
673 
674 out:
675  if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
676  FreeReqFrame = 1;
677 
678  netif_wake_queue(dev);
679  return FreeReqFrame;
680 }
681 
682 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
683 static int
684 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
685 {
686  struct mpt_lan_priv *priv = netdev_priv(dev);
687  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
688  MPT_FRAME_HDR *mf;
689  LANSendRequest_t *pSendReq;
690  SGETransaction32_t *pTrans;
691  SGESimple64_t *pSimple;
692  const unsigned char *mac;
693  dma_addr_t dma;
694  unsigned long flags;
695  int ctx;
696  u16 cur_naa = 0x1000;
697 
698  dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
699  __func__, skb));
700 
701  spin_lock_irqsave(&priv->txfidx_lock, flags);
702  if (priv->mpt_txfidx_tail < 0) {
703  netif_stop_queue(dev);
704  spin_unlock_irqrestore(&priv->txfidx_lock, flags);
705 
706  printk (KERN_ERR "%s: no tx context available: %u\n",
707  __func__, priv->mpt_txfidx_tail);
708  return NETDEV_TX_BUSY;
709  }
710 
711  mf = mpt_get_msg_frame(LanCtx, mpt_dev);
712  if (mf == NULL) {
713  netif_stop_queue(dev);
714  spin_unlock_irqrestore(&priv->txfidx_lock, flags);
715 
716  printk (KERN_ERR "%s: Unable to alloc request frame\n",
717  __func__);
718  return NETDEV_TX_BUSY;
719  }
720 
721  ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
722  spin_unlock_irqrestore(&priv->txfidx_lock, flags);
723 
724 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
725 // IOC_AND_NETDEV_NAMES_s_s(dev)));
726 
727  pSendReq = (LANSendRequest_t *) mf;
728 
729  /* Set the mac.raw pointer, since this apparently isn't getting
730  * done before we get the skb. Pull the data pointer past the mac data.
731  */
732  skb_reset_mac_header(skb);
733  skb_pull(skb, 12);
734 
735  dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
737 
738  priv->SendCtl[ctx].skb = skb;
739  priv->SendCtl[ctx].dma = dma;
740  priv->SendCtl[ctx].len = skb->len;
741 
742  /* Message Header */
743  pSendReq->Reserved = 0;
744  pSendReq->Function = MPI_FUNCTION_LAN_SEND;
745  pSendReq->ChainOffset = 0;
746  pSendReq->Reserved2 = 0;
747  pSendReq->MsgFlags = 0;
748  pSendReq->PortNumber = priv->pnum;
749 
750  /* Transaction Context Element */
751  pTrans = (SGETransaction32_t *) pSendReq->SG_List;
752 
753  /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
754  pTrans->ContextSize = sizeof(u32);
755  pTrans->DetailsLength = 2 * sizeof(u32);
756  pTrans->Flags = 0;
757  pTrans->TransactionContext[0] = cpu_to_le32(ctx);
758 
759 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
760 // IOC_AND_NETDEV_NAMES_s_s(dev),
761 // ctx, skb, skb->data));
762 
763  mac = skb_mac_header(skb);
764 
765  pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
766  (mac[0] << 8) |
767  (mac[1] << 0));
768  pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
769  (mac[3] << 16) |
770  (mac[4] << 8) |
771  (mac[5] << 0));
772 
773  pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
774 
775  /* If we ever decide to send more than one Simple SGE per LANSend, then
776  we will need to make sure that LAST_ELEMENT only gets set on the
777  last one. Otherwise, bad voodoo and evil funkiness will commence. */
778  pSimple->FlagsLength = cpu_to_le32(
786  skb->len);
787  pSimple->Address.Low = cpu_to_le32((u32) dma);
788  if (sizeof(dma_addr_t) > sizeof(u32))
789  pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
790  else
791  pSimple->Address.High = 0;
792 
793  mpt_put_msg_frame (LanCtx, mpt_dev, mf);
794  dev->trans_start = jiffies;
795 
796  dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
798  le32_to_cpu(pSimple->FlagsLength)));
799 
800  return NETDEV_TX_OK;
801 }
802 
803 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
804 static void
805 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
806 /*
807  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
808  */
809 {
810  struct mpt_lan_priv *priv = netdev_priv(dev);
811 
812  if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
813  if (priority) {
815  } else {
817  dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
818  "timer.\n"));
819  }
820  dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
821  IOC_AND_NETDEV_NAMES_s_s(dev) ));
822  }
823 }
824 
825 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
826 static int
827 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
828 {
829  struct mpt_lan_priv *priv = netdev_priv(dev);
830 
831  skb->protocol = mpt_lan_type_trans(skb, dev);
832 
833  dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
834  "delivered to upper level.\n",
835  IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
836 
837  dev->stats.rx_bytes += skb->len;
838  dev->stats.rx_packets++;
839 
840  skb->dev = dev;
841  netif_rx(skb);
842 
843  dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
844  atomic_read(&priv->buckets_out)));
845 
846  if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
847  mpt_lan_wake_post_buckets_task(dev, 1);
848 
849  dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
850  "remaining, %d received back since sod\n",
851  atomic_read(&priv->buckets_out), priv->total_received));
852 
853  return 0;
854 }
855 
856 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
857 //static inline int
858 static int
859 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
860 {
861  struct mpt_lan_priv *priv = netdev_priv(dev);
862  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
863  struct sk_buff *skb, *old_skb;
864  unsigned long flags;
865  u32 ctx, len;
866 
867  ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
868  skb = priv->RcvCtl[ctx].skb;
869 
870  len = GET_LAN_PACKET_LENGTH(tmsg);
871 
872  if (len < MPT_LAN_RX_COPYBREAK) {
873  old_skb = skb;
874 
875  skb = (struct sk_buff *)dev_alloc_skb(len);
876  if (!skb) {
877  printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
879  __FILE__, __LINE__);
880  return -ENOMEM;
881  }
882 
883  pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
884  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
885 
886  skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
887 
888  pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
889  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
890  goto out;
891  }
892 
893  skb_put(skb, len);
894 
895  priv->RcvCtl[ctx].skb = NULL;
896 
897  pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
898  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
899 
900 out:
901  spin_lock_irqsave(&priv->rxfidx_lock, flags);
902  priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
903  spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
904 
905  atomic_dec(&priv->buckets_out);
906  priv->total_received++;
907 
908  return mpt_lan_receive_skb(dev, skb);
909 }
910 
911 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
912 static int
913 mpt_lan_receive_post_free(struct net_device *dev,
914  LANReceivePostReply_t *pRecvRep)
915 {
916  struct mpt_lan_priv *priv = netdev_priv(dev);
917  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
918  unsigned long flags;
919  struct sk_buff *skb;
920  u32 ctx;
921  int count;
922  int i;
923 
924  count = pRecvRep->NumberOfContexts;
925 
926  dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
927  "IOC returned %d buckets, freeing them...\n", count));
928 
929  spin_lock_irqsave(&priv->rxfidx_lock, flags);
930  for (i = 0; i < count; i++) {
931  ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
932 
933  skb = priv->RcvCtl[ctx].skb;
934 
935 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
936 // IOC_AND_NETDEV_NAMES_s_s(dev)));
937 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
938 // priv, &(priv->buckets_out)));
939 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
940 
941  priv->RcvCtl[ctx].skb = NULL;
942  pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
943  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
944  dev_kfree_skb_any(skb);
945 
946  priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
947  }
948  spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
949 
950  atomic_sub(count, &priv->buckets_out);
951 
952 // for (i = 0; i < priv->max_buckets_out; i++)
953 // if (priv->RcvCtl[i].skb != NULL)
954 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
955 // "is still out\n", i));
956 
957 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
958  count));
959 */
960  dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
961  "remaining, %d received back since sod.\n",
962  atomic_read(&priv->buckets_out), priv->total_received));
963  return 0;
964 }
965 
966 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
967 static int
968 mpt_lan_receive_post_reply(struct net_device *dev,
969  LANReceivePostReply_t *pRecvRep)
970 {
971  struct mpt_lan_priv *priv = netdev_priv(dev);
972  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
973  struct sk_buff *skb, *old_skb;
974  unsigned long flags;
975  u32 len, ctx, offset;
976  u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
977  int count;
978  int i, l;
979 
980  dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
981  dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
982  le16_to_cpu(pRecvRep->IOCStatus)));
983 
984  if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
986  return mpt_lan_receive_post_free(dev, pRecvRep);
987 
988  len = le32_to_cpu(pRecvRep->PacketLength);
989  if (len == 0) {
990  printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
991  "ReceivePostReply w/ PacketLength zero!\n",
993  printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
994  pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
995  return -1;
996  }
997 
998  ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
999  count = pRecvRep->NumberOfContexts;
1000  skb = priv->RcvCtl[ctx].skb;
1001 
1002  offset = le32_to_cpu(pRecvRep->PacketOffset);
1003 // if (offset != 0) {
1004 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1005 // "w/ PacketOffset %u\n",
1006 // IOC_AND_NETDEV_NAMES_s_s(dev),
1007 // offset);
1008 // }
1009 
1010  dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1012  offset, len));
1013 
1014  if (count > 1) {
1015  int szrem = len;
1016 
1017 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1018 // "for single packet, concatenating...\n",
1019 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1020 
1021  skb = (struct sk_buff *)dev_alloc_skb(len);
1022  if (!skb) {
1023  printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1025  __FILE__, __LINE__);
1026  return -ENOMEM;
1027  }
1028 
1029  spin_lock_irqsave(&priv->rxfidx_lock, flags);
1030  for (i = 0; i < count; i++) {
1031 
1032  ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1033  old_skb = priv->RcvCtl[ctx].skb;
1034 
1035  l = priv->RcvCtl[ctx].len;
1036  if (szrem < l)
1037  l = szrem;
1038 
1039 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1040 // IOC_AND_NETDEV_NAMES_s_s(dev),
1041 // i, l));
1042 
1043  pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1044  priv->RcvCtl[ctx].dma,
1045  priv->RcvCtl[ctx].len,
1047  skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1048 
1049  pci_dma_sync_single_for_device(mpt_dev->pcidev,
1050  priv->RcvCtl[ctx].dma,
1051  priv->RcvCtl[ctx].len,
1053 
1054  priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1055  szrem -= l;
1056  }
1057  spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1058 
1059  } else if (len < MPT_LAN_RX_COPYBREAK) {
1060 
1061  old_skb = skb;
1062 
1063  skb = (struct sk_buff *)dev_alloc_skb(len);
1064  if (!skb) {
1065  printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1067  __FILE__, __LINE__);
1068  return -ENOMEM;
1069  }
1070 
1071  pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1072  priv->RcvCtl[ctx].dma,
1073  priv->RcvCtl[ctx].len,
1075 
1076  skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1077 
1078  pci_dma_sync_single_for_device(mpt_dev->pcidev,
1079  priv->RcvCtl[ctx].dma,
1080  priv->RcvCtl[ctx].len,
1082 
1083  spin_lock_irqsave(&priv->rxfidx_lock, flags);
1084  priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1085  spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1086 
1087  } else {
1088  spin_lock_irqsave(&priv->rxfidx_lock, flags);
1089 
1090  priv->RcvCtl[ctx].skb = NULL;
1091 
1092  pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1093  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1094  priv->RcvCtl[ctx].dma = 0;
1095 
1096  priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1097  spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1098 
1099  skb_put(skb,len);
1100  }
1101 
1102  atomic_sub(count, &priv->buckets_out);
1103  priv->total_received += count;
1104 
1106  printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1107  "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1109  priv->mpt_rxfidx_tail,
1111 
1112  return -1;
1113  }
1114 
1115  if (remaining == 0)
1116  printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1117  "(priv->buckets_out = %d)\n",
1119  atomic_read(&priv->buckets_out));
1120  else if (remaining < 10)
1121  printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1122  "(priv->buckets_out = %d)\n",
1124  remaining, atomic_read(&priv->buckets_out));
1125 
1126  if ((remaining < priv->bucketthresh) &&
1127  ((atomic_read(&priv->buckets_out) - remaining) >
1129 
1130  printk (KERN_WARNING MYNAM " Mismatch between driver's "
1131  "buckets_out count and fw's BucketsRemaining "
1132  "count has crossed the threshold, issuing a "
1133  "LanReset to clear the fw's hashtable. You may "
1134  "want to check your /var/log/messages for \"CRC "
1135  "error\" event notifications.\n");
1136 
1137  mpt_lan_reset(dev);
1138  mpt_lan_wake_post_buckets_task(dev, 0);
1139  }
1140 
1141  return mpt_lan_receive_skb(dev, skb);
1142 }
1143 
1144 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1145 /* Simple SGE's only at the moment */
1146 
1147 static void
1148 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1149 {
1150  struct net_device *dev = priv->dev;
1151  MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1152  MPT_FRAME_HDR *mf;
1153  LANReceivePostRequest_t *pRecvReq;
1154  SGETransaction32_t *pTrans;
1155  SGESimple64_t *pSimple;
1156  struct sk_buff *skb;
1157  dma_addr_t dma;
1158  u32 curr, buckets, count, max;
1159  u32 len = (dev->mtu + dev->hard_header_len + 4);
1160  unsigned long flags;
1161  int i;
1162 
1163  curr = atomic_read(&priv->buckets_out);
1164  buckets = (priv->max_buckets_out - curr);
1165 
1166  dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1168  __func__, buckets, curr));
1169 
1170  max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1172 
1173  while (buckets) {
1174  mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1175  if (mf == NULL) {
1176  printk (KERN_ERR "%s: Unable to alloc request frame\n",
1177  __func__);
1178  dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1179  __func__, buckets));
1180  goto out;
1181  }
1182  pRecvReq = (LANReceivePostRequest_t *) mf;
1183 
1184  i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1185  mpt_dev->RequestNB[i] = 0;
1186  count = buckets;
1187  if (count > max)
1188  count = max;
1189 
1190  pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1191  pRecvReq->ChainOffset = 0;
1192  pRecvReq->MsgFlags = 0;
1193  pRecvReq->PortNumber = priv->pnum;
1194 
1195  pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1196  pSimple = NULL;
1197 
1198  for (i = 0; i < count; i++) {
1199  int ctx;
1200 
1201  spin_lock_irqsave(&priv->rxfidx_lock, flags);
1202  if (priv->mpt_rxfidx_tail < 0) {
1203  printk (KERN_ERR "%s: Can't alloc context\n",
1204  __func__);
1205  spin_unlock_irqrestore(&priv->rxfidx_lock,
1206  flags);
1207  break;
1208  }
1209 
1210  ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1211 
1212  skb = priv->RcvCtl[ctx].skb;
1213  if (skb && (priv->RcvCtl[ctx].len != len)) {
1214  pci_unmap_single(mpt_dev->pcidev,
1215  priv->RcvCtl[ctx].dma,
1216  priv->RcvCtl[ctx].len,
1218  dev_kfree_skb(priv->RcvCtl[ctx].skb);
1219  skb = priv->RcvCtl[ctx].skb = NULL;
1220  }
1221 
1222  if (skb == NULL) {
1223  skb = dev_alloc_skb(len);
1224  if (skb == NULL) {
1226  MYNAM "/%s: Can't alloc skb\n",
1227  __func__);
1228  priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1229  spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1230  break;
1231  }
1232 
1233  dma = pci_map_single(mpt_dev->pcidev, skb->data,
1234  len, PCI_DMA_FROMDEVICE);
1235 
1236  priv->RcvCtl[ctx].skb = skb;
1237  priv->RcvCtl[ctx].dma = dma;
1238  priv->RcvCtl[ctx].len = len;
1239  }
1240 
1241  spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1242 
1243  pTrans->ContextSize = sizeof(u32);
1244  pTrans->DetailsLength = 0;
1245  pTrans->Flags = 0;
1246  pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1247 
1248  pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1249 
1250  pSimple->FlagsLength = cpu_to_le32(
1254  pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1255  if (sizeof(dma_addr_t) > sizeof(u32))
1256  pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1257  else
1258  pSimple->Address.High = 0;
1259 
1260  pTrans = (SGETransaction32_t *) (pSimple + 1);
1261  }
1262 
1263  if (pSimple == NULL) {
1264  printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1265  __func__);
1266  mpt_free_msg_frame(mpt_dev, mf);
1267  goto out;
1268  }
1269 
1271 
1272  pRecvReq->BucketCount = cpu_to_le32(i);
1273 
1274 /* printk(KERN_INFO MYNAM ": posting buckets\n ");
1275  * for (i = 0; i < j + 2; i ++)
1276  * printk (" %08x", le32_to_cpu(msg[i]));
1277  * printk ("\n");
1278  */
1279 
1280  mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1281 
1282  priv->total_posted += i;
1283  buckets -= i;
1284  atomic_add(i, &priv->buckets_out);
1285  }
1286 
1287 out:
1288  dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1289  __func__, buckets, atomic_read(&priv->buckets_out)));
1290  dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1291  __func__, priv->total_posted, priv->total_received));
1292 
1293  clear_bit(0, &priv->post_buckets_active);
1294 }
1295 
1296 static void
1297 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1298 {
1299  mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1300  post_buckets_task.work));
1301 }
1302 
1303 static const struct net_device_ops mpt_netdev_ops = {
1304  .ndo_open = mpt_lan_open,
1305  .ndo_stop = mpt_lan_close,
1306  .ndo_start_xmit = mpt_lan_sdu_send,
1307  .ndo_change_mtu = mpt_lan_change_mtu,
1308  .ndo_tx_timeout = mpt_lan_tx_timeout,
1309 };
1310 
1311 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1312 static struct net_device *
1313 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1314 {
1315  struct net_device *dev;
1316  struct mpt_lan_priv *priv;
1317  u8 HWaddr[FC_ALEN], *a;
1318 
1319  dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1320  if (!dev)
1321  return NULL;
1322 
1323  dev->mtu = MPT_LAN_MTU;
1324 
1325  priv = netdev_priv(dev);
1326 
1327  priv->dev = dev;
1328  priv->mpt_dev = mpt_dev;
1329  priv->pnum = pnum;
1330 
1332  mpt_lan_post_receive_buckets_work);
1333  priv->post_buckets_active = 0;
1334 
1335  dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1336  __LINE__, dev->mtu + dev->hard_header_len + 4));
1337 
1338  atomic_set(&priv->buckets_out, 0);
1339  priv->total_posted = 0;
1340  priv->total_received = 0;
1342  if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1343  priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1344 
1345  dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1346  __LINE__,
1347  mpt_dev->pfacts[0].MaxLanBuckets,
1349  priv->max_buckets_out));
1350 
1351  priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1352  spin_lock_init(&priv->txfidx_lock);
1353  spin_lock_init(&priv->rxfidx_lock);
1354 
1355  /* Grab pre-fetched LANPage1 stuff. :-) */
1356  a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1357 
1358  HWaddr[0] = a[5];
1359  HWaddr[1] = a[4];
1360  HWaddr[2] = a[3];
1361  HWaddr[3] = a[2];
1362  HWaddr[4] = a[1];
1363  HWaddr[5] = a[0];
1364 
1365  dev->addr_len = FC_ALEN;
1366  memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1367  memset(dev->broadcast, 0xff, FC_ALEN);
1368 
1369  /* The Tx queue is 127 deep on the 909.
1370  * Give ourselves some breathing room.
1371  */
1372  priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1373  tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1374 
1375  dev->netdev_ops = &mpt_netdev_ops;
1377 
1378  dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1379  "and setting initial values\n"));
1380 
1381  if (register_netdev(dev) != 0) {
1382  free_netdev(dev);
1383  dev = NULL;
1384  }
1385  return dev;
1386 }
1387 
1388 static int
1389 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1390 {
1391  MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1392  struct net_device *dev;
1393  int i;
1394 
1395  for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1396  printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1397  "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1398  ioc->name, ioc->pfacts[i].PortNumber,
1399  ioc->pfacts[i].ProtocolFlags,
1401  ioc->pfacts[i].ProtocolFlags));
1402 
1403  if (!(ioc->pfacts[i].ProtocolFlags &
1405  printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1406  "seems to be disabled on this adapter port!\n",
1407  ioc->name);
1408  continue;
1409  }
1410 
1411  dev = mpt_register_lan_device(ioc, i);
1412  if (!dev) {
1413  printk(KERN_ERR MYNAM ": %s: Unable to register "
1414  "port%d as a LAN device\n", ioc->name,
1415  ioc->pfacts[i].PortNumber);
1416  continue;
1417  }
1418 
1419  printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1420  "registered as '%s'\n", ioc->name, dev->name);
1421  printk(KERN_INFO MYNAM ": %s/%s: "
1422  "LanAddr = %pM\n",
1424  dev->dev_addr);
1425 
1426  ioc->netdev = dev;
1427 
1428  return 0;
1429  }
1430 
1431  return -ENODEV;
1432 }
1433 
1434 static void
1435 mptlan_remove(struct pci_dev *pdev)
1436 {
1437  MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1438  struct net_device *dev = ioc->netdev;
1439 
1440  if(dev != NULL) {
1441  unregister_netdev(dev);
1442  free_netdev(dev);
1443  }
1444 }
1445 
1446 static struct mpt_pci_driver mptlan_driver = {
1447  .probe = mptlan_probe,
1448  .remove = mptlan_remove,
1449 };
1450 
1451 static int __init mpt_lan_init (void)
1452 {
1454 
1455  LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1456  "lan_reply");
1457  if (LanCtx <= 0) {
1458  printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1459  return -EBUSY;
1460  }
1461 
1462  dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1463 
1464  if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1465  printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1466  "handler with mptbase! The world is at an end! "
1467  "Everything is fading to black! Goodbye.\n");
1468  return -EBUSY;
1469  }
1470 
1471  dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1472 
1473  mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1474  return 0;
1475 }
1476 
1477 static void __exit mpt_lan_exit(void)
1478 {
1479  mpt_device_driver_deregister(MPTLAN_DRIVER);
1480  mpt_reset_deregister(LanCtx);
1481 
1482  if (LanCtx) {
1483  mpt_deregister(LanCtx);
1484  LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1485  }
1486 }
1487 
1488 module_init(mpt_lan_init);
1489 module_exit(mpt_lan_exit);
1490 
1491 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1492 static unsigned short
1493 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1494 {
1495  struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1496  struct fcllc *fcllc;
1497 
1498  skb_reset_mac_header(skb);
1499  skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1500 
1501  if (fch->dtype == htons(0xffff)) {
1502  u32 *p = (u32 *) fch;
1503 
1504  swab32s(p + 0);
1505  swab32s(p + 1);
1506  swab32s(p + 2);
1507  swab32s(p + 3);
1508 
1509  printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1511  printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1512  fch->saddr);
1513  }
1514 
1515  if (*fch->daddr & 1) {
1516  if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1517  skb->pkt_type = PACKET_BROADCAST;
1518  } else {
1519  skb->pkt_type = PACKET_MULTICAST;
1520  }
1521  } else {
1522  if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1523  skb->pkt_type = PACKET_OTHERHOST;
1524  } else {
1525  skb->pkt_type = PACKET_HOST;
1526  }
1527  }
1528 
1529  fcllc = (struct fcllc *)skb->data;
1530 
1531  /* Strip the SNAP header from ARP packets since we don't
1532  * pass them through to the 802.2/SNAP layers.
1533  */
1534  if (fcllc->dsap == EXTENDED_SAP &&
1535  (fcllc->ethertype == htons(ETH_P_IP) ||
1536  fcllc->ethertype == htons(ETH_P_ARP))) {
1537  skb_pull(skb, sizeof(struct fcllc));
1538  return fcllc->ethertype;
1539  }
1540 
1541  return htons(ETH_P_802_2);
1542 }
1543 
1544 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/