Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qib_driver.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/spinlock.h>
35 #include <linux/pci.h>
36 #include <linux/io.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/prefetch.h>
42 
43 #include "qib.h"
44 
45 /*
46  * The size has to be longer than this string, so we can append
47  * board/chip information to it in the init code.
48  */
49 const char ib_qib_version[] = QIB_DRIVER_VERSION "\n";
50 
51 DEFINE_SPINLOCK(qib_devs_lock);
52 LIST_HEAD(qib_dev_list);
53 DEFINE_MUTEX(qib_mutex); /* general driver use */
54 
55 unsigned qib_ibmtu;
57 MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
58 
60 module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
61  S_IWUSR | S_IRUGO);
62 MODULE_PARM_DESC(compat_ddr_negotiate,
63  "Attempt pre-IBTA 1.2 DDR speed negotiation");
64 
65 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_DESCRIPTION("QLogic IB driver");
69 
70 /*
71  * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
72  * PIO send buffers. This is well beyond anything currently
73  * defined in the InfiniBand spec.
74  */
75 #define QIB_PIO_MAXIBHDR 128
76 
77 /*
78  * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
79  */
80 #define QIB_MAX_PKT_RECV 64
81 
83 
84 const char *qib_get_unit_name(int unit)
85 {
86  static char iname[16];
87 
88  snprintf(iname, sizeof iname, "infinipath%u", unit);
89  return iname;
90 }
91 
92 /*
93  * Return count of units with at least one port ACTIVE.
94  */
96 {
97  struct qib_devdata *dd;
98  struct qib_pportdata *ppd;
99  unsigned long flags;
100  int pidx, nunits_active = 0;
101 
104  if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
105  continue;
106  for (pidx = 0; pidx < dd->num_pports; ++pidx) {
107  ppd = dd->pport + pidx;
108  if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
110  nunits_active++;
111  break;
112  }
113  }
114  }
115  spin_unlock_irqrestore(&qib_devs_lock, flags);
116  return nunits_active;
117 }
118 
119 /*
120  * Return count of all units, optionally return in arguments
121  * the number of usable (present) units, and the number of
122  * ports that are up.
123  */
124 int qib_count_units(int *npresentp, int *nupp)
125 {
126  int nunits = 0, npresent = 0, nup = 0;
127  struct qib_devdata *dd;
128  unsigned long flags;
129  int pidx;
130  struct qib_pportdata *ppd;
131 
133 
135  nunits++;
136  if ((dd->flags & QIB_PRESENT) && dd->kregbase)
137  npresent++;
138  for (pidx = 0; pidx < dd->num_pports; ++pidx) {
139  ppd = dd->pport + pidx;
140  if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
142  nup++;
143  }
144  }
145 
146  spin_unlock_irqrestore(&qib_devs_lock, flags);
147 
148  if (npresentp)
149  *npresentp = npresent;
150  if (nupp)
151  *nupp = nup;
152 
153  return nunits;
154 }
155 
168 int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
169 {
170  int ret;
171  unsigned long flags;
172 
173  spin_lock_irqsave(&ppd->lflags_lock, flags);
174  if (ppd->state_wanted) {
175  spin_unlock_irqrestore(&ppd->lflags_lock, flags);
176  ret = -EBUSY;
177  goto bail;
178  }
179  ppd->state_wanted = state;
180  spin_unlock_irqrestore(&ppd->lflags_lock, flags);
182  (ppd->lflags & state),
183  msecs_to_jiffies(msecs));
184  spin_lock_irqsave(&ppd->lflags_lock, flags);
185  ppd->state_wanted = 0;
186  spin_unlock_irqrestore(&ppd->lflags_lock, flags);
187 
188  if (!(ppd->lflags & state))
189  ret = -ETIMEDOUT;
190  else
191  ret = 0;
192 bail:
193  return ret;
194 }
195 
196 int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
197 {
198  u32 lstate;
199  int ret;
200  struct qib_devdata *dd = ppd->dd;
201  unsigned long flags;
202 
203  switch (newstate) {
207  /* don't wait */
208  ret = 0;
209  goto bail;
210 
211  case QIB_IB_LINKDOWN:
214  /* don't wait */
215  ret = 0;
216  goto bail;
217 
221  /* don't wait */
222  ret = 0;
223  goto bail;
224 
228  /* don't wait */
229  ret = 0;
230  goto bail;
231 
232  case QIB_IB_LINKARM:
233  if (ppd->lflags & QIBL_LINKARMED) {
234  ret = 0;
235  goto bail;
236  }
237  if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
238  ret = -EINVAL;
239  goto bail;
240  }
241  /*
242  * Since the port can be ACTIVE when we ask for ARMED,
243  * clear QIBL_LINKV so we can wait for a transition.
244  * If the link isn't ARMED, then something else happened
245  * and there is no point waiting for ARMED.
246  */
247  spin_lock_irqsave(&ppd->lflags_lock, flags);
248  ppd->lflags &= ~QIBL_LINKV;
249  spin_unlock_irqrestore(&ppd->lflags_lock, flags);
252  lstate = QIBL_LINKV;
253  break;
254 
255  case QIB_IB_LINKACTIVE:
256  if (ppd->lflags & QIBL_LINKACTIVE) {
257  ret = 0;
258  goto bail;
259  }
260  if (!(ppd->lflags & QIBL_LINKARMED)) {
261  ret = -EINVAL;
262  goto bail;
263  }
266  lstate = QIBL_LINKACTIVE;
267  break;
268 
269  default:
270  ret = -EINVAL;
271  goto bail;
272  }
273  ret = qib_wait_linkstate(ppd, lstate, 10);
274 
275 bail:
276  return ret;
277 }
278 
279 /*
280  * Get address of eager buffer from it's index (allocated in chunks, not
281  * contiguous).
282  */
283 static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
284 {
285  const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
286  const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
287 
288  return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
289 }
290 
291 /*
292  * Returns 1 if error was a CRC, else 0.
293  * Needed for some chip's synthesized error counters.
294  */
295 static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
296  u32 ctxt, u32 eflags, u32 l, u32 etail,
297  __le32 *rhf_addr, struct qib_message_header *rhdr)
298 {
299  u32 ret = 0;
300 
302  ret = 1;
303  else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
304  /* For TIDERR and RC QPs premptively schedule a NAK */
305  struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
306  struct qib_other_headers *ohdr = NULL;
307  struct qib_ibport *ibp = &ppd->ibport_data;
308  struct qib_qp *qp = NULL;
309  u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
310  u16 lid = be16_to_cpu(hdr->lrh[1]);
311  int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
312  u32 qp_num;
313  u32 opcode;
314  u32 psn;
315  int diff;
316 
317  /* Sanity check packet */
318  if (tlen < 24)
319  goto drop;
320 
321  if (lid < QIB_MULTICAST_LID_BASE) {
322  lid &= ~((1 << ppd->lmc) - 1);
323  if (unlikely(lid != ppd->lid))
324  goto drop;
325  }
326 
327  /* Check for GRH */
328  if (lnh == QIB_LRH_BTH)
329  ohdr = &hdr->u.oth;
330  else if (lnh == QIB_LRH_GRH) {
331  u32 vtf;
332 
333  ohdr = &hdr->u.l.oth;
334  if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
335  goto drop;
336  vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
337  if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
338  goto drop;
339  } else
340  goto drop;
341 
342  /* Get opcode and PSN from packet */
343  opcode = be32_to_cpu(ohdr->bth[0]);
344  opcode >>= 24;
345  psn = be32_to_cpu(ohdr->bth[2]);
346 
347  /* Get the destination QP number. */
348  qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
349  if (qp_num != QIB_MULTICAST_QPN) {
350  int ruc_res;
351  qp = qib_lookup_qpn(ibp, qp_num);
352  if (!qp)
353  goto drop;
354 
355  /*
356  * Handle only RC QPs - for other QP types drop error
357  * packet.
358  */
359  spin_lock(&qp->r_lock);
360 
361  /* Check for valid receive state. */
362  if (!(ib_qib_state_ops[qp->state] &
364  ibp->n_pkt_drops++;
365  goto unlock;
366  }
367 
368  switch (qp->ibqp.qp_type) {
369  case IB_QPT_RC:
370  ruc_res =
372  ibp, hdr,
373  lnh == QIB_LRH_GRH,
374  qp,
375  be32_to_cpu(ohdr->bth[0]));
376  if (ruc_res)
377  goto unlock;
378 
379  /* Only deal with RDMA Writes for now */
380  if (opcode <
381  IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
382  diff = qib_cmp24(psn, qp->r_psn);
383  if (!qp->r_nak_state && diff >= 0) {
384  ibp->n_rc_seqnak++;
385  qp->r_nak_state =
387  /* Use the expected PSN. */
388  qp->r_ack_psn = qp->r_psn;
389  /*
390  * Wait to send the sequence
391  * NAK until all packets
392  * in the receive queue have
393  * been processed.
394  * Otherwise, we end up
395  * propagating congestion.
396  */
397  if (list_empty(&qp->rspwait)) {
398  qp->r_flags |=
400  atomic_inc(
401  &qp->refcount);
403  &qp->rspwait,
404  &rcd->qp_wait_list);
405  }
406  } /* Out of sequence NAK */
407  } /* QP Request NAKs */
408  break;
409  case IB_QPT_SMI:
410  case IB_QPT_GSI:
411  case IB_QPT_UD:
412  case IB_QPT_UC:
413  default:
414  /* For now don't handle any other QP types */
415  break;
416  }
417 
418 unlock:
419  spin_unlock(&qp->r_lock);
420  /*
421  * Notify qib_destroy_qp() if it is waiting
422  * for us to finish.
423  */
424  if (atomic_dec_and_test(&qp->refcount))
425  wake_up(&qp->wait);
426  } /* Unicast QP */
427  } /* Valid packet with TIDErr */
428 
429 drop:
430  return ret;
431 }
432 
433 /*
434  * qib_kreceive - receive a packet
435  * @rcd: the qlogic_ib context
436  * @llic: gets count of good packets needed to clear lli,
437  * (used with chips that need need to track crcs for lli)
438  *
439  * called from interrupt handler for errors or receive interrupt
440  * Returns number of CRC error packets, needed by some chips for
441  * local link integrity tracking. crcs are adjusted down by following
442  * good packets, if any, and count of good packets is also tracked.
443  */
444 u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
445 {
446  struct qib_devdata *dd = rcd->dd;
447  struct qib_pportdata *ppd = rcd->ppd;
448  __le32 *rhf_addr;
449  void *ebuf;
450  const u32 rsize = dd->rcvhdrentsize; /* words */
451  const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
452  u32 etail = -1, l, hdrqtail;
453  struct qib_message_header *hdr;
454  u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
455  int last;
456  u64 lval;
457  struct qib_qp *qp, *nqp;
458 
459  l = rcd->head;
460  rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
461  if (dd->flags & QIB_NODMA_RTAIL) {
462  u32 seq = qib_hdrget_seq(rhf_addr);
463  if (seq != rcd->seq_cnt)
464  goto bail;
465  hdrqtail = 0;
466  } else {
467  hdrqtail = qib_get_rcvhdrtail(rcd);
468  if (l == hdrqtail)
469  goto bail;
470  smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
471  }
472 
473  for (last = 0, i = 1; !last; i += !last) {
474  hdr = dd->f_get_msgheader(dd, rhf_addr);
475  eflags = qib_hdrget_err_flags(rhf_addr);
476  etype = qib_hdrget_rcv_type(rhf_addr);
477  /* total length */
478  tlen = qib_hdrget_length_in_bytes(rhf_addr);
479  ebuf = NULL;
480  if ((dd->flags & QIB_NODMA_RTAIL) ?
481  qib_hdrget_use_egr_buf(rhf_addr) :
482  (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
483  etail = qib_hdrget_index(rhf_addr);
484  updegr = 1;
485  if (tlen > sizeof(*hdr) ||
486  etype >= RCVHQ_RCV_TYPE_NON_KD) {
487  ebuf = qib_get_egrbuf(rcd, etail);
488  prefetch_range(ebuf, tlen - sizeof(*hdr));
489  }
490  }
491  if (!eflags) {
492  u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
493 
494  if (lrh_len != tlen) {
495  qib_stats.sps_lenerrs++;
496  goto move_along;
497  }
498  }
499  if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
500  ebuf == NULL &&
501  tlen > (dd->rcvhdrentsize - 2 + 1 -
502  qib_hdrget_offset(rhf_addr)) << 2) {
503  goto move_along;
504  }
505 
506  /*
507  * Both tiderr and qibhdrerr are set for all plain IB
508  * packets; only qibhdrerr should be set.
509  */
510  if (unlikely(eflags))
511  crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
512  etail, rhf_addr, hdr);
513  else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
514  qib_ib_rcv(rcd, hdr, ebuf, tlen);
515  if (crcs)
516  crcs--;
517  else if (llic && *llic)
518  --*llic;
519  }
520 move_along:
521  l += rsize;
522  if (l >= maxcnt)
523  l = 0;
524  if (i == QIB_MAX_PKT_RECV)
525  last = 1;
526 
527  rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
528  if (dd->flags & QIB_NODMA_RTAIL) {
529  u32 seq = qib_hdrget_seq(rhf_addr);
530 
531  if (++rcd->seq_cnt > 13)
532  rcd->seq_cnt = 1;
533  if (seq != rcd->seq_cnt)
534  last = 1;
535  } else if (l == hdrqtail)
536  last = 1;
537  /*
538  * Update head regs etc., every 16 packets, if not last pkt,
539  * to help prevent rcvhdrq overflows, when many packets
540  * are processed and queue is nearly full.
541  * Don't request an interrupt for intermediate updates.
542  */
543  lval = l;
544  if (!last && !(i & 0xf)) {
545  dd->f_update_usrhead(rcd, lval, updegr, etail, i);
546  updegr = 0;
547  }
548  }
549  /*
550  * Notify qib_destroy_qp() if it is waiting
551  * for lookaside_qp to finish.
552  */
553  if (rcd->lookaside_qp) {
554  if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
555  wake_up(&rcd->lookaside_qp->wait);
556  rcd->lookaside_qp = NULL;
557  }
558 
559  rcd->head = l;
560  rcd->pkt_count += i;
561 
562  /*
563  * Iterate over all QPs waiting to respond.
564  * The list won't change since the IRQ is only run on one CPU.
565  */
567  list_del_init(&qp->rspwait);
568  if (qp->r_flags & QIB_R_RSP_NAK) {
569  qp->r_flags &= ~QIB_R_RSP_NAK;
570  qib_send_rc_ack(qp);
571  }
572  if (qp->r_flags & QIB_R_RSP_SEND) {
573  unsigned long flags;
574 
575  qp->r_flags &= ~QIB_R_RSP_SEND;
576  spin_lock_irqsave(&qp->s_lock, flags);
577  if (ib_qib_state_ops[qp->state] &
579  qib_schedule_send(qp);
580  spin_unlock_irqrestore(&qp->s_lock, flags);
581  }
582  if (atomic_dec_and_test(&qp->refcount))
583  wake_up(&qp->wait);
584  }
585 
586 bail:
587  /* Report number of packets consumed */
588  if (npkts)
589  *npkts = i;
590 
591  /*
592  * Always write head at end, and setup rcv interrupt, even
593  * if no packets were processed.
594  */
595  lval = (u64)rcd->head | dd->rhdrhead_intr_off;
596  dd->f_update_usrhead(rcd, lval, updegr, etail, i);
597  return crcs;
598 }
599 
612 int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
613 {
614  u32 piosize;
615  int ret, chk;
616 
617  if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
618  arg != 4096) {
619  ret = -EINVAL;
620  goto bail;
621  }
622  chk = ib_mtu_enum_to_int(qib_ibmtu);
623  if (chk > 0 && arg > chk) {
624  ret = -EINVAL;
625  goto bail;
626  }
627 
628  piosize = ppd->ibmaxlen;
629  ppd->ibmtu = arg;
630 
631  if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
632  /* Only if it's not the initial value (or reset to it) */
633  if (piosize != ppd->init_ibmaxlen) {
634  if (arg > piosize && arg <= ppd->init_ibmaxlen)
635  piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
636  ppd->ibmaxlen = piosize;
637  }
638  } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
639  piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
640  ppd->ibmaxlen = piosize;
641  }
642 
643  ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
644 
645  ret = 0;
646 
647 bail:
648  return ret;
649 }
650 
651 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
652 {
653  struct qib_devdata *dd = ppd->dd;
654  ppd->lid = lid;
655  ppd->lmc = lmc;
656 
658  lid | (~((1U << lmc) - 1)) << 16);
659 
660  qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
661  dd->unit, ppd->port, lid);
662 
663  return 0;
664 }
665 
666 /*
667  * Following deal with the "obviously simple" task of overriding the state
668  * of the LEDS, which normally indicate link physical and logical status.
669  * The complications arise in dealing with different hardware mappings
670  * and the board-dependent routine being called from interrupts.
671  * and then there's the requirement to _flash_ them.
672  */
673 #define LED_OVER_FREQ_SHIFT 8
674 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
675 /* Below is "non-zero" to force override, but both actual LEDs are off */
676 #define LED_OVER_BOTH_OFF (8)
677 
678 static void qib_run_led_override(unsigned long opaque)
679 {
680  struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
681  struct qib_devdata *dd = ppd->dd;
682  int timeoff;
683  int ph_idx;
684 
685  if (!(dd->flags & QIB_INITTED))
686  return;
687 
688  ph_idx = ppd->led_override_phase++ & 1;
689  ppd->led_override = ppd->led_override_vals[ph_idx];
690  timeoff = ppd->led_override_timeoff;
691 
692  dd->f_setextled(ppd, 1);
693  /*
694  * don't re-fire the timer if user asked for it to be off; we let
695  * it fire one more time after they turn it off to simplify
696  */
697  if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
698  mod_timer(&ppd->led_override_timer, jiffies + timeoff);
699 }
700 
701 void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
702 {
703  struct qib_devdata *dd = ppd->dd;
704  int timeoff, freq;
705 
706  if (!(dd->flags & QIB_INITTED))
707  return;
708 
709  /* First check if we are blinking. If not, use 1HZ polling */
710  timeoff = HZ;
711  freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
712 
713  if (freq) {
714  /* For blink, set each phase from one nybble of val */
715  ppd->led_override_vals[0] = val & 0xF;
716  ppd->led_override_vals[1] = (val >> 4) & 0xF;
717  timeoff = (HZ << 4)/freq;
718  } else {
719  /* Non-blink set both phases the same. */
720  ppd->led_override_vals[0] = val & 0xF;
721  ppd->led_override_vals[1] = val & 0xF;
722  }
723  ppd->led_override_timeoff = timeoff;
724 
725  /*
726  * If the timer has not already been started, do so. Use a "quick"
727  * timeout so the function will be called soon, to look at our request.
728  */
730  /* Need to start timer */
732  ppd->led_override_timer.function = qib_run_led_override;
733  ppd->led_override_timer.data = (unsigned long) ppd;
734  ppd->led_override_timer.expires = jiffies + 1;
736  } else {
737  if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
740  }
741 }
742 
753 {
754  int ret, i;
755  struct qib_devdata *dd = qib_lookup(unit);
756  struct qib_pportdata *ppd;
757  unsigned long flags;
758  int pidx;
759 
760  if (!dd) {
761  ret = -ENODEV;
762  goto bail;
763  }
764 
765  qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
766 
767  if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
768  qib_devinfo(dd->pcidev,
769  "Invalid unit number %u or not initialized or not present\n",
770  unit);
771  ret = -ENXIO;
772  goto bail;
773  }
774 
775  spin_lock_irqsave(&dd->uctxt_lock, flags);
776  if (dd->rcd)
777  for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
778  if (!dd->rcd[i] || !dd->rcd[i]->cnt)
779  continue;
780  spin_unlock_irqrestore(&dd->uctxt_lock, flags);
781  ret = -EBUSY;
782  goto bail;
783  }
784  spin_unlock_irqrestore(&dd->uctxt_lock, flags);
785 
786  for (pidx = 0; pidx < dd->num_pports; ++pidx) {
787  ppd = dd->pport + pidx;
789  /* Need to stop LED timer, _then_ shut off LEDs */
792  }
793 
794  /* Shut off LEDs after we are sure timer is not running */
796  dd->f_setextled(ppd, 0);
797  if (dd->flags & QIB_HAS_SEND_DMA)
798  qib_teardown_sdma(ppd);
799  }
800 
801  ret = dd->f_reset(dd);
802  if (ret == 1)
803  ret = qib_init(dd, 1);
804  else
805  ret = -EAGAIN;
806  if (ret)
807  qib_dev_err(dd,
808  "Reinitialize unit %u after reset failed with %d\n",
809  unit, ret);
810  else
811  qib_devinfo(dd->pcidev,
812  "Reinitialized unit %u after resetting\n",
813  unit);
814 
815 bail:
816  return ret;
817 }