Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fc_exch.c
Go to the documentation of this file.
1 /*
2  * Copyright(c) 2007 Intel Corporation. All rights reserved.
3  * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4  * Copyright(c) 2008 Mike Christie
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Maintained at www.Open-FCoE.org
20  */
21 
22 /*
23  * Fibre Channel exchange and sequence handling.
24  */
25 
26 #include <linux/timer.h>
27 #include <linux/slab.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
30 
31 #include <scsi/fc/fc_fc2.h>
32 
33 #include <scsi/libfc.h>
34 #include <scsi/fc_encode.h>
35 
36 #include "fc_libfc.h"
37 
38 u16 fc_cpu_mask; /* cpu mask for possible cpus */
40 static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
41 static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
42 static struct workqueue_struct *fc_exch_workqueue;
43 
44 /*
45  * Structure and function definitions for managing Fibre Channel Exchanges
46  * and Sequences.
47  *
48  * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
49  *
50  * fc_exch_mgr holds the exchange state for an N port
51  *
52  * fc_exch holds state for one exchange and links to its active sequence.
53  *
54  * fc_seq holds the state for an individual sequence.
55  */
56 
68 struct fc_exch_pool {
73 
74  /* two cache of free slot in exch array */
78 
93 struct fc_exch_mgr {
96  enum fc_class class;
97  struct kref kref;
101 
102  struct {
109  } stats;
110 };
111 
126  struct fc_exch_mgr *mp;
127  bool (*match)(struct fc_frame *);
128 };
129 
130 static void fc_exch_rrq(struct fc_exch *);
131 static void fc_seq_ls_acc(struct fc_frame *);
132 static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
133  enum fc_els_rjt_explan);
134 static void fc_exch_els_rec(struct fc_frame *);
135 static void fc_exch_els_rrq(struct fc_frame *);
136 
137 /*
138  * Internal implementation notes.
139  *
140  * The exchange manager is one by default in libfc but LLD may choose
141  * to have one per CPU. The sequence manager is one per exchange manager
142  * and currently never separated.
143  *
144  * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
145  * assigned by the Sequence Initiator that shall be unique for a specific
146  * D_ID and S_ID pair while the Sequence is open." Note that it isn't
147  * qualified by exchange ID, which one might think it would be.
148  * In practice this limits the number of open sequences and exchanges to 256
149  * per session. For most targets we could treat this limit as per exchange.
150  *
151  * The exchange and its sequence are freed when the last sequence is received.
152  * It's possible for the remote port to leave an exchange open without
153  * sending any sequences.
154  *
155  * Notes on reference counts:
156  *
157  * Exchanges are reference counted and exchange gets freed when the reference
158  * count becomes zero.
159  *
160  * Timeouts:
161  * Sequences are timed out for E_D_TOV and R_A_TOV.
162  *
163  * Sequence event handling:
164  *
165  * The following events may occur on initiator sequences:
166  *
167  * Send.
168  * For now, the whole thing is sent.
169  * Receive ACK
170  * This applies only to class F.
171  * The sequence is marked complete.
172  * ULP completion.
173  * The upper layer calls fc_exch_done() when done
174  * with exchange and sequence tuple.
175  * RX-inferred completion.
176  * When we receive the next sequence on the same exchange, we can
177  * retire the previous sequence ID. (XXX not implemented).
178  * Timeout.
179  * R_A_TOV frees the sequence ID. If we're waiting for ACK,
180  * E_D_TOV causes abort and calls upper layer response handler
181  * with FC_EX_TIMEOUT error.
182  * Receive RJT
183  * XXX defer.
184  * Send ABTS
185  * On timeout.
186  *
187  * The following events may occur on recipient sequences:
188  *
189  * Receive
190  * Allocate sequence for first frame received.
191  * Hold during receive handler.
192  * Release when final frame received.
193  * Keep status of last N of these for the ELS RES command. XXX TBD.
194  * Receive ABTS
195  * Deallocate sequence
196  * Send RJT
197  * Deallocate
198  *
199  * For now, we neglect conditions where only part of a sequence was
200  * received or transmitted, or where out-of-order receipt is detected.
201  */
202 
203 /*
204  * Locking notes:
205  *
206  * The EM code run in a per-CPU worker thread.
207  *
208  * To protect against concurrency between a worker thread code and timers,
209  * sequence allocation and deallocation must be locked.
210  * - exchange refcnt can be done atomicly without locks.
211  * - sequence allocation must be locked by exch lock.
212  * - If the EM pool lock and ex_lock must be taken at the same time, then the
213  * EM pool lock must be taken before the ex_lock.
214  */
215 
216 /*
217  * opcode names for debugging.
218  */
219 static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
220 
230 static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
231  unsigned int max_index)
232 {
233  const char *name = NULL;
234 
235  if (op < max_index)
236  name = table[op];
237  if (!name)
238  name = "unknown";
239  return name;
240 }
241 
246 static const char *fc_exch_rctl_name(unsigned int op)
247 {
248  return fc_exch_name_lookup(op, fc_exch_rctl_names,
249  ARRAY_SIZE(fc_exch_rctl_names));
250 }
251 
256 static inline void fc_exch_hold(struct fc_exch *ep)
257 {
258  atomic_inc(&ep->ex_refcnt);
259 }
260 
271 static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
272  u32 f_ctl)
273 {
274  struct fc_frame_header *fh = fc_frame_header_get(fp);
275  u16 fill;
276 
277  fr_sof(fp) = ep->class;
278  if (ep->seq.cnt)
279  fr_sof(fp) = fc_sof_normal(ep->class);
280 
281  if (f_ctl & FC_FC_END_SEQ) {
282  fr_eof(fp) = FC_EOF_T;
283  if (fc_sof_needs_ack(ep->class))
284  fr_eof(fp) = FC_EOF_N;
285  /*
286  * From F_CTL.
287  * The number of fill bytes to make the length a 4-byte
288  * multiple is the low order 2-bits of the f_ctl.
289  * The fill itself will have been cleared by the frame
290  * allocation.
291  * After this, the length will be even, as expected by
292  * the transport.
293  */
294  fill = fr_len(fp) & 3;
295  if (fill) {
296  fill = 4 - fill;
297  /* TODO, this may be a problem with fragmented skb */
298  skb_put(fp_skb(fp), fill);
299  hton24(fh->fh_f_ctl, f_ctl | fill);
300  }
301  } else {
302  WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
303  fr_eof(fp) = FC_EOF_N;
304  }
305 
306  /*
307  * Initialize remainig fh fields
308  * from fc_fill_fc_hdr
309  */
310  fh->fh_ox_id = htons(ep->oxid);
311  fh->fh_rx_id = htons(ep->rxid);
312  fh->fh_seq_id = ep->seq.id;
313  fh->fh_seq_cnt = htons(ep->seq.cnt);
314 }
315 
323 static void fc_exch_release(struct fc_exch *ep)
324 {
325  struct fc_exch_mgr *mp;
326 
327  if (atomic_dec_and_test(&ep->ex_refcnt)) {
328  mp = ep->em;
329  if (ep->destructor)
330  ep->destructor(&ep->seq, ep->arg);
331  WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
332  mempool_free(ep, mp->ep_pool);
333  }
334 }
335 
340 static inline void fc_exch_timer_cancel(struct fc_exch *ep)
341 {
342  if (cancel_delayed_work(&ep->timeout_work)) {
343  FC_EXCH_DBG(ep, "Exchange timer canceled\n");
344  atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
345  }
346 }
347 
357 static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
358  unsigned int timer_msec)
359 {
360  if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
361  return;
362 
363  FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
364 
365  if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
366  msecs_to_jiffies(timer_msec)))
367  fc_exch_hold(ep); /* hold for timer */
368 }
369 
375 static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
376 {
377  spin_lock_bh(&ep->ex_lock);
378  fc_exch_timer_set_locked(ep, timer_msec);
379  spin_unlock_bh(&ep->ex_lock);
380 }
381 
386 static int fc_exch_done_locked(struct fc_exch *ep)
387 {
388  int rc = 1;
389 
390  /*
391  * We must check for completion in case there are two threads
392  * tyring to complete this. But the rrq code will reuse the
393  * ep, and in that case we only clear the resp and set it as
394  * complete, so it can be reused by the timer to send the rrq.
395  */
396  ep->resp = NULL;
397  if (ep->state & FC_EX_DONE)
398  return rc;
399  ep->esb_stat |= ESB_ST_COMPLETE;
400 
401  if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
402  ep->state |= FC_EX_DONE;
403  fc_exch_timer_cancel(ep);
404  rc = 0;
405  }
406  return rc;
407 }
408 
418 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
419  u16 index)
420 {
421  struct fc_exch **exches = (struct fc_exch **)(pool + 1);
422  return exches[index];
423 }
424 
431 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
432  struct fc_exch *ep)
433 {
434  ((struct fc_exch **)(pool + 1))[index] = ep;
435 }
436 
441 static void fc_exch_delete(struct fc_exch *ep)
442 {
443  struct fc_exch_pool *pool;
444  u16 index;
445 
446  pool = ep->pool;
447  spin_lock_bh(&pool->lock);
448  WARN_ON(pool->total_exches <= 0);
449  pool->total_exches--;
450 
451  /* update cache of free slot */
452  index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
453  if (pool->left == FC_XID_UNKNOWN)
454  pool->left = index;
455  else if (pool->right == FC_XID_UNKNOWN)
456  pool->right = index;
457  else
458  pool->next_index = index;
459 
460  fc_exch_ptr_set(pool, index, NULL);
461  list_del(&ep->ex_list);
462  spin_unlock_bh(&pool->lock);
463  fc_exch_release(ep); /* drop hold for exch in mp */
464 }
465 
472 static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
473  struct fc_frame *fp)
474 {
475  struct fc_exch *ep;
476  struct fc_frame_header *fh = fc_frame_header_get(fp);
477  int error;
478  u32 f_ctl;
479  u8 fh_type = fh->fh_type;
480 
481  ep = fc_seq_exch(sp);
483 
484  f_ctl = ntoh24(fh->fh_f_ctl);
485  fc_exch_setup_hdr(ep, fp, f_ctl);
486  fr_encaps(fp) = ep->encaps;
487 
488  /*
489  * update sequence count if this frame is carrying
490  * multiple FC frames when sequence offload is enabled
491  * by LLD.
492  */
493  if (fr_max_payload(fp))
494  sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
495  fr_max_payload(fp));
496  else
497  sp->cnt++;
498 
499  /*
500  * Send the frame.
501  */
502  error = lport->tt.frame_send(lport, fp);
503 
504  if (fh_type == FC_TYPE_BLS)
505  return error;
506 
507  /*
508  * Update the exchange and sequence flags,
509  * assuming all frames for the sequence have been sent.
510  * We can only be called to send once for each sequence.
511  */
512  spin_lock_bh(&ep->ex_lock);
513  ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
514  if (f_ctl & FC_FC_SEQ_INIT)
515  ep->esb_stat &= ~ESB_ST_SEQ_INIT;
516  spin_unlock_bh(&ep->ex_lock);
517  return error;
518 }
519 
529 static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
530 {
531  struct fc_seq *sp;
532 
533  sp = &ep->seq;
534  sp->ssb_stat = 0;
535  sp->cnt = 0;
536  sp->id = seq_id;
537  return sp;
538 }
539 
545 static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
546 {
547  struct fc_exch *ep = fc_seq_exch(sp);
548 
549  sp = fc_seq_alloc(ep, ep->seq_id++);
550  FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
551  ep->f_ctl, sp->id);
552  return sp;
553 }
554 
560 static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
561 {
562  struct fc_exch *ep = fc_seq_exch(sp);
563 
564  spin_lock_bh(&ep->ex_lock);
565  sp = fc_seq_start_next_locked(sp);
566  spin_unlock_bh(&ep->ex_lock);
567 
568  return sp;
569 }
570 
571 /*
572  * Set the response handler for the exchange associated with a sequence.
573  */
574 static void fc_seq_set_resp(struct fc_seq *sp,
575  void (*resp)(struct fc_seq *, struct fc_frame *,
576  void *),
577  void *arg)
578 {
579  struct fc_exch *ep = fc_seq_exch(sp);
580 
581  spin_lock_bh(&ep->ex_lock);
582  ep->resp = resp;
583  ep->arg = arg;
584  spin_unlock_bh(&ep->ex_lock);
585 }
586 
596 static int fc_exch_abort_locked(struct fc_exch *ep,
597  unsigned int timer_msec)
598 {
599  struct fc_seq *sp;
600  struct fc_frame *fp;
601  int error;
602 
603  if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
605  return -ENXIO;
606 
607  /*
608  * Send the abort on a new sequence if possible.
609  */
610  sp = fc_seq_start_next_locked(&ep->seq);
611  if (!sp)
612  return -ENOMEM;
613 
615  if (timer_msec)
616  fc_exch_timer_set_locked(ep, timer_msec);
617 
618  /*
619  * If not logged into the fabric, don't send ABTS but leave
620  * sequence active until next timeout.
621  */
622  if (!ep->sid)
623  return 0;
624 
625  /*
626  * Send an abort for the sequence that timed out.
627  */
628  fp = fc_frame_alloc(ep->lp, 0);
629  if (fp) {
630  fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
631  FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
632  error = fc_seq_send(ep->lp, sp, fp);
633  } else
634  error = -ENOBUFS;
635  return error;
636 }
637 
647 static int fc_seq_exch_abort(const struct fc_seq *req_sp,
648  unsigned int timer_msec)
649 {
650  struct fc_exch *ep;
651  int error;
652 
653  ep = fc_seq_exch(req_sp);
654  spin_lock_bh(&ep->ex_lock);
655  error = fc_exch_abort_locked(ep, timer_msec);
656  spin_unlock_bh(&ep->ex_lock);
657  return error;
658 }
659 
664 static void fc_exch_timeout(struct work_struct *work)
665 {
666  struct fc_exch *ep = container_of(work, struct fc_exch,
667  timeout_work.work);
668  struct fc_seq *sp = &ep->seq;
669  void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
670  void *arg;
671  u32 e_stat;
672  int rc = 1;
673 
674  FC_EXCH_DBG(ep, "Exchange timed out\n");
675 
676  spin_lock_bh(&ep->ex_lock);
677  if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
678  goto unlock;
679 
680  e_stat = ep->esb_stat;
681  if (e_stat & ESB_ST_COMPLETE) {
682  ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
683  spin_unlock_bh(&ep->ex_lock);
684  if (e_stat & ESB_ST_REC_QUAL)
685  fc_exch_rrq(ep);
686  goto done;
687  } else {
688  resp = ep->resp;
689  arg = ep->arg;
690  ep->resp = NULL;
691  if (e_stat & ESB_ST_ABNORMAL)
692  rc = fc_exch_done_locked(ep);
693  spin_unlock_bh(&ep->ex_lock);
694  if (!rc)
695  fc_exch_delete(ep);
696  if (resp)
697  resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
698  fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
699  goto done;
700  }
701 unlock:
702  spin_unlock_bh(&ep->ex_lock);
703 done:
704  /*
705  * This release matches the hold taken when the timer was set.
706  */
707  fc_exch_release(ep);
708 }
709 
717 static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
718  struct fc_exch_mgr *mp)
719 {
720  struct fc_exch *ep;
721  unsigned int cpu;
722  u16 index;
723  struct fc_exch_pool *pool;
724 
725  /* allocate memory for exchange */
726  ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
727  if (!ep) {
728  atomic_inc(&mp->stats.no_free_exch);
729  goto out;
730  }
731  memset(ep, 0, sizeof(*ep));
732 
733  cpu = get_cpu();
734  pool = per_cpu_ptr(mp->pool, cpu);
735  spin_lock_bh(&pool->lock);
736  put_cpu();
737 
738  /* peek cache of free slot */
739  if (pool->left != FC_XID_UNKNOWN) {
740  index = pool->left;
741  pool->left = FC_XID_UNKNOWN;
742  goto hit;
743  }
744  if (pool->right != FC_XID_UNKNOWN) {
745  index = pool->right;
746  pool->right = FC_XID_UNKNOWN;
747  goto hit;
748  }
749 
750  index = pool->next_index;
751  /* allocate new exch from pool */
752  while (fc_exch_ptr_get(pool, index)) {
753  index = index == mp->pool_max_index ? 0 : index + 1;
754  if (index == pool->next_index)
755  goto err;
756  }
757  pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
758 hit:
759  fc_exch_hold(ep); /* hold for exch in mp */
760  spin_lock_init(&ep->ex_lock);
761  /*
762  * Hold exch lock for caller to prevent fc_exch_reset()
763  * from releasing exch while fc_exch_alloc() caller is
764  * still working on exch.
765  */
766  spin_lock_bh(&ep->ex_lock);
767 
768  fc_exch_ptr_set(pool, index, ep);
769  list_add_tail(&ep->ex_list, &pool->ex_list);
770  fc_seq_alloc(ep, ep->seq_id++);
771  pool->total_exches++;
772  spin_unlock_bh(&pool->lock);
773 
774  /*
775  * update exchange
776  */
777  ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
778  ep->em = mp;
779  ep->pool = pool;
780  ep->lp = lport;
781  ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
782  ep->rxid = FC_XID_UNKNOWN;
783  ep->class = mp->class;
784  INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
785 out:
786  return ep;
787 err:
788  spin_unlock_bh(&pool->lock);
789  atomic_inc(&mp->stats.no_free_exch_xid);
790  mempool_free(ep, mp->ep_pool);
791  return NULL;
792 }
793 
805 static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
806  struct fc_frame *fp)
807 {
808  struct fc_exch_mgr_anchor *ema;
809 
810  list_for_each_entry(ema, &lport->ema_list, ema_list)
811  if (!ema->match || ema->match(fp))
812  return fc_exch_em_alloc(lport, ema->mp);
813  return NULL;
814 }
815 
821 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
822 {
823  struct fc_exch_pool *pool;
824  struct fc_exch *ep = NULL;
825 
826  if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
827  pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
828  spin_lock_bh(&pool->lock);
829  ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
830  if (ep && ep->xid == xid)
831  fc_exch_hold(ep);
832  spin_unlock_bh(&pool->lock);
833  }
834  return ep;
835 }
836 
837 
843 static void fc_exch_done(struct fc_seq *sp)
844 {
845  struct fc_exch *ep = fc_seq_exch(sp);
846  int rc;
847 
848  spin_lock_bh(&ep->ex_lock);
849  rc = fc_exch_done_locked(ep);
850  spin_unlock_bh(&ep->ex_lock);
851  if (!rc)
852  fc_exch_delete(ep);
853 }
854 
863 static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
864  struct fc_exch_mgr *mp,
865  struct fc_frame *fp)
866 {
867  struct fc_exch *ep;
868  struct fc_frame_header *fh;
869 
870  ep = fc_exch_alloc(lport, fp);
871  if (ep) {
872  ep->class = fc_frame_class(fp);
873 
874  /*
875  * Set EX_CTX indicating we're responding on this exchange.
876  */
877  ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
878  ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
879  fh = fc_frame_header_get(fp);
880  ep->sid = ntoh24(fh->fh_d_id);
881  ep->did = ntoh24(fh->fh_s_id);
882  ep->oid = ep->did;
883 
884  /*
885  * Allocated exchange has placed the XID in the
886  * originator field. Move it to the responder field,
887  * and set the originator XID from the frame.
888  */
889  ep->rxid = ep->xid;
890  ep->oxid = ntohs(fh->fh_ox_id);
892  if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
893  ep->esb_stat &= ~ESB_ST_SEQ_INIT;
894 
895  fc_exch_hold(ep); /* hold for caller */
896  spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */
897  }
898  return ep;
899 }
900 
911 static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
912  struct fc_exch_mgr *mp,
913  struct fc_frame *fp)
914 {
915  struct fc_frame_header *fh = fc_frame_header_get(fp);
916  struct fc_exch *ep = NULL;
917  struct fc_seq *sp = NULL;
918  enum fc_pf_rjt_reason reject = FC_RJT_NONE;
919  u32 f_ctl;
920  u16 xid;
921 
922  f_ctl = ntoh24(fh->fh_f_ctl);
923  WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
924 
925  /*
926  * Lookup or create the exchange if we will be creating the sequence.
927  */
928  if (f_ctl & FC_FC_EX_CTX) {
929  xid = ntohs(fh->fh_ox_id); /* we originated exch */
930  ep = fc_exch_find(mp, xid);
931  if (!ep) {
932  atomic_inc(&mp->stats.xid_not_found);
933  reject = FC_RJT_OX_ID;
934  goto out;
935  }
936  if (ep->rxid == FC_XID_UNKNOWN)
937  ep->rxid = ntohs(fh->fh_rx_id);
938  else if (ep->rxid != ntohs(fh->fh_rx_id)) {
939  reject = FC_RJT_OX_ID;
940  goto rel;
941  }
942  } else {
943  xid = ntohs(fh->fh_rx_id); /* we are the responder */
944 
945  /*
946  * Special case for MDS issuing an ELS TEST with a
947  * bad rxid of 0.
948  * XXX take this out once we do the proper reject.
949  */
950  if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
951  fc_frame_payload_op(fp) == ELS_TEST) {
953  xid = FC_XID_UNKNOWN;
954  }
955 
956  /*
957  * new sequence - find the exchange
958  */
959  ep = fc_exch_find(mp, xid);
960  if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
961  if (ep) {
962  atomic_inc(&mp->stats.xid_busy);
963  reject = FC_RJT_RX_ID;
964  goto rel;
965  }
966  ep = fc_exch_resp(lport, mp, fp);
967  if (!ep) {
968  reject = FC_RJT_EXCH_EST; /* XXX */
969  goto out;
970  }
971  xid = ep->xid; /* get our XID */
972  } else if (!ep) {
973  atomic_inc(&mp->stats.xid_not_found);
974  reject = FC_RJT_RX_ID; /* XID not found */
975  goto out;
976  }
977  }
978 
979  /*
980  * At this point, we have the exchange held.
981  * Find or create the sequence.
982  */
983  if (fc_sof_is_init(fr_sof(fp))) {
984  sp = &ep->seq;
985  sp->ssb_stat |= SSB_ST_RESP;
986  sp->id = fh->fh_seq_id;
987  } else {
988  sp = &ep->seq;
989  if (sp->id != fh->fh_seq_id) {
990  atomic_inc(&mp->stats.seq_not_found);
991  if (f_ctl & FC_FC_END_SEQ) {
992  /*
993  * Update sequence_id based on incoming last
994  * frame of sequence exchange. This is needed
995  * for FC target where DDP has been used
996  * on target where, stack is indicated only
997  * about last frame's (payload _header) header.
998  * Whereas "seq_id" which is part of
999  * frame_header is allocated by initiator
1000  * which is totally different from "seq_id"
1001  * allocated when XFER_RDY was sent by target.
1002  * To avoid false -ve which results into not
1003  * sending RSP, hence write request on other
1004  * end never finishes.
1005  */
1006  spin_lock_bh(&ep->ex_lock);
1007  sp->ssb_stat |= SSB_ST_RESP;
1008  sp->id = fh->fh_seq_id;
1009  spin_unlock_bh(&ep->ex_lock);
1010  } else {
1011  /* sequence/exch should exist */
1012  reject = FC_RJT_SEQ_ID;
1013  goto rel;
1014  }
1015  }
1016  }
1017  WARN_ON(ep != fc_seq_exch(sp));
1018 
1019  if (f_ctl & FC_FC_SEQ_INIT)
1020  ep->esb_stat |= ESB_ST_SEQ_INIT;
1021 
1022  fr_seq(fp) = sp;
1023 out:
1024  return reject;
1025 rel:
1026  fc_exch_done(&ep->seq);
1027  fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
1028  return reject;
1029 }
1030 
1039 static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1040  struct fc_frame *fp)
1041 {
1042  struct fc_frame_header *fh = fc_frame_header_get(fp);
1043  struct fc_exch *ep;
1044  struct fc_seq *sp = NULL;
1045  u32 f_ctl;
1046  u16 xid;
1047 
1048  f_ctl = ntoh24(fh->fh_f_ctl);
1049  WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1050  xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1051  ep = fc_exch_find(mp, xid);
1052  if (!ep)
1053  return NULL;
1054  if (ep->seq.id == fh->fh_seq_id) {
1055  /*
1056  * Save the RX_ID if we didn't previously know it.
1057  */
1058  sp = &ep->seq;
1059  if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1060  ep->rxid == FC_XID_UNKNOWN) {
1061  ep->rxid = ntohs(fh->fh_rx_id);
1062  }
1063  }
1064  fc_exch_release(ep);
1065  return sp;
1066 }
1067 
1076 static void fc_exch_set_addr(struct fc_exch *ep,
1077  u32 orig_id, u32 resp_id)
1078 {
1079  ep->oid = orig_id;
1080  if (ep->esb_stat & ESB_ST_RESP) {
1081  ep->sid = resp_id;
1082  ep->did = orig_id;
1083  } else {
1084  ep->sid = orig_id;
1085  ep->did = resp_id;
1086  }
1087 }
1088 
1098 static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1099  struct fc_seq_els_data *els_data)
1100 {
1101  switch (els_cmd) {
1102  case ELS_LS_RJT:
1103  fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1104  break;
1105  case ELS_LS_ACC:
1106  fc_seq_ls_acc(fp);
1107  break;
1108  case ELS_RRQ:
1109  fc_exch_els_rrq(fp);
1110  break;
1111  case ELS_REC:
1112  fc_exch_els_rec(fp);
1113  break;
1114  default:
1115  FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1116  }
1117 }
1118 
1126 static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1127  enum fc_rctl rctl, enum fc_fh_type fh_type)
1128 {
1129  u32 f_ctl;
1130  struct fc_exch *ep = fc_seq_exch(sp);
1131 
1132  f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1133  f_ctl |= ep->f_ctl;
1134  fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1135  fc_seq_send(ep->lp, sp, fp);
1136 }
1137 
1145 static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1146 {
1147  struct fc_frame *fp;
1148  struct fc_frame_header *rx_fh;
1149  struct fc_frame_header *fh;
1150  struct fc_exch *ep = fc_seq_exch(sp);
1151  struct fc_lport *lport = ep->lp;
1152  unsigned int f_ctl;
1153 
1154  /*
1155  * Don't send ACKs for class 3.
1156  */
1157  if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1158  fp = fc_frame_alloc(lport, 0);
1159  if (!fp)
1160  return;
1161 
1162  fh = fc_frame_header_get(fp);
1163  fh->fh_r_ctl = FC_RCTL_ACK_1;
1164  fh->fh_type = FC_TYPE_BLS;
1165 
1166  /*
1167  * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1168  * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1169  * Bits 9-8 are meaningful (retransmitted or unidirectional).
1170  * Last ACK uses bits 7-6 (continue sequence),
1171  * bits 5-4 are meaningful (what kind of ACK to use).
1172  */
1173  rx_fh = fc_frame_header_get(rx_fp);
1174  f_ctl = ntoh24(rx_fh->fh_f_ctl);
1175  f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1176  FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1177  FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1179  f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1180  hton24(fh->fh_f_ctl, f_ctl);
1181 
1182  fc_exch_setup_hdr(ep, fp, f_ctl);
1183  fh->fh_seq_id = rx_fh->fh_seq_id;
1184  fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1185  fh->fh_parm_offset = htonl(1); /* ack single frame */
1186 
1187  fr_sof(fp) = fr_sof(rx_fp);
1188  if (f_ctl & FC_FC_END_SEQ)
1189  fr_eof(fp) = FC_EOF_T;
1190  else
1191  fr_eof(fp) = FC_EOF_N;
1192 
1193  lport->tt.frame_send(lport, fp);
1194  }
1195 }
1196 
1205 static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1206  enum fc_ba_rjt_reason reason,
1207  enum fc_ba_rjt_explan explan)
1208 {
1209  struct fc_frame *fp;
1210  struct fc_frame_header *rx_fh;
1211  struct fc_frame_header *fh;
1212  struct fc_ba_rjt *rp;
1213  struct fc_lport *lport;
1214  unsigned int f_ctl;
1215 
1216  lport = fr_dev(rx_fp);
1217  fp = fc_frame_alloc(lport, sizeof(*rp));
1218  if (!fp)
1219  return;
1220  fh = fc_frame_header_get(fp);
1221  rx_fh = fc_frame_header_get(rx_fp);
1222 
1223  memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1224 
1225  rp = fc_frame_payload_get(fp, sizeof(*rp));
1226  rp->br_reason = reason;
1227  rp->br_explan = explan;
1228 
1229  /*
1230  * seq_id, cs_ctl, df_ctl and param/offset are zero.
1231  */
1232  memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1233  memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1234  fh->fh_ox_id = rx_fh->fh_ox_id;
1235  fh->fh_rx_id = rx_fh->fh_rx_id;
1236  fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1237  fh->fh_r_ctl = FC_RCTL_BA_RJT;
1238  fh->fh_type = FC_TYPE_BLS;
1239 
1240  /*
1241  * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1242  * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1243  * Bits 9-8 are meaningful (retransmitted or unidirectional).
1244  * Last ACK uses bits 7-6 (continue sequence),
1245  * bits 5-4 are meaningful (what kind of ACK to use).
1246  * Always set LAST_SEQ, END_SEQ.
1247  */
1248  f_ctl = ntoh24(rx_fh->fh_f_ctl);
1249  f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1252  f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1253  f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1254  f_ctl &= ~FC_FC_FIRST_SEQ;
1255  hton24(fh->fh_f_ctl, f_ctl);
1256 
1257  fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1258  fr_eof(fp) = FC_EOF_T;
1259  if (fc_sof_needs_ack(fr_sof(fp)))
1260  fr_eof(fp) = FC_EOF_N;
1261 
1262  lport->tt.frame_send(lport, fp);
1263 }
1264 
1274 static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1275 {
1276  struct fc_frame *fp;
1277  struct fc_ba_acc *ap;
1278  struct fc_frame_header *fh;
1279  struct fc_seq *sp;
1280 
1281  if (!ep)
1282  goto reject;
1283  spin_lock_bh(&ep->ex_lock);
1284  if (ep->esb_stat & ESB_ST_COMPLETE) {
1285  spin_unlock_bh(&ep->ex_lock);
1286  goto reject;
1287  }
1288  if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1289  fc_exch_hold(ep); /* hold for REC_QUAL */
1290  ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1291  fc_exch_timer_set_locked(ep, ep->r_a_tov);
1292 
1293  fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1294  if (!fp) {
1295  spin_unlock_bh(&ep->ex_lock);
1296  goto free;
1297  }
1298  fh = fc_frame_header_get(fp);
1299  ap = fc_frame_payload_get(fp, sizeof(*ap));
1300  memset(ap, 0, sizeof(*ap));
1301  sp = &ep->seq;
1302  ap->ba_high_seq_cnt = htons(0xffff);
1303  if (sp->ssb_stat & SSB_ST_RESP) {
1304  ap->ba_seq_id = sp->id;
1306  ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1307  ap->ba_low_seq_cnt = htons(sp->cnt);
1308  }
1309  sp = fc_seq_start_next_locked(sp);
1310  spin_unlock_bh(&ep->ex_lock);
1311  fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1312  fc_frame_free(rx_fp);
1313  return;
1314 
1315 reject:
1316  fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1317 free:
1318  fc_frame_free(rx_fp);
1319 }
1320 
1330 static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1331 {
1332  struct fc_exch_mgr_anchor *ema;
1333 
1334  WARN_ON(lport != fr_dev(fp));
1335  WARN_ON(fr_seq(fp));
1336  fr_seq(fp) = NULL;
1337 
1338  list_for_each_entry(ema, &lport->ema_list, ema_list)
1339  if ((!ema->match || ema->match(fp)) &&
1340  fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1341  break;
1342  return fr_seq(fp);
1343 }
1344 
1349 static void fc_seq_release(struct fc_seq *sp)
1350 {
1351  fc_exch_release(fc_seq_exch(sp));
1352 }
1353 
1363 static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1364  struct fc_frame *fp)
1365 {
1366  struct fc_frame_header *fh = fc_frame_header_get(fp);
1367  struct fc_seq *sp = NULL;
1368  struct fc_exch *ep = NULL;
1369  enum fc_pf_rjt_reason reject;
1370 
1371  /* We can have the wrong fc_lport at this point with NPIV, which is a
1372  * problem now that we know a new exchange needs to be allocated
1373  */
1374  lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1375  if (!lport) {
1376  fc_frame_free(fp);
1377  return;
1378  }
1379  fr_dev(fp) = lport;
1380 
1381  BUG_ON(fr_seq(fp)); /* XXX remove later */
1382 
1383  /*
1384  * If the RX_ID is 0xffff, don't allocate an exchange.
1385  * The upper-level protocol may request one later, if needed.
1386  */
1387  if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1388  return lport->tt.lport_recv(lport, fp);
1389 
1390  reject = fc_seq_lookup_recip(lport, mp, fp);
1391  if (reject == FC_RJT_NONE) {
1392  sp = fr_seq(fp); /* sequence will be held */
1393  ep = fc_seq_exch(sp);
1394  fc_seq_send_ack(sp, fp);
1395  ep->encaps = fr_encaps(fp);
1396 
1397  /*
1398  * Call the receive function.
1399  *
1400  * The receive function may allocate a new sequence
1401  * over the old one, so we shouldn't change the
1402  * sequence after this.
1403  *
1404  * The frame will be freed by the receive function.
1405  * If new exch resp handler is valid then call that
1406  * first.
1407  */
1408  if (ep->resp)
1409  ep->resp(sp, fp, ep->arg);
1410  else
1411  lport->tt.lport_recv(lport, fp);
1412  fc_exch_release(ep); /* release from lookup */
1413  } else {
1414  FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1415  reject);
1416  fc_frame_free(fp);
1417  }
1418 }
1419 
1427 static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1428 {
1429  struct fc_frame_header *fh = fc_frame_header_get(fp);
1430  struct fc_seq *sp;
1431  struct fc_exch *ep;
1432  enum fc_sof sof;
1433  u32 f_ctl;
1434  void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1435  void *ex_resp_arg;
1436  int rc;
1437 
1438  ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1439  if (!ep) {
1440  atomic_inc(&mp->stats.xid_not_found);
1441  goto out;
1442  }
1443  if (ep->esb_stat & ESB_ST_COMPLETE) {
1444  atomic_inc(&mp->stats.xid_not_found);
1445  goto rel;
1446  }
1447  if (ep->rxid == FC_XID_UNKNOWN)
1448  ep->rxid = ntohs(fh->fh_rx_id);
1449  if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1450  atomic_inc(&mp->stats.xid_not_found);
1451  goto rel;
1452  }
1453  if (ep->did != ntoh24(fh->fh_s_id) &&
1454  ep->did != FC_FID_FLOGI) {
1455  atomic_inc(&mp->stats.xid_not_found);
1456  goto rel;
1457  }
1458  sof = fr_sof(fp);
1459  sp = &ep->seq;
1460  if (fc_sof_is_init(sof)) {
1461  sp->ssb_stat |= SSB_ST_RESP;
1462  sp->id = fh->fh_seq_id;
1463  } else if (sp->id != fh->fh_seq_id) {
1464  atomic_inc(&mp->stats.seq_not_found);
1465  goto rel;
1466  }
1467 
1468  f_ctl = ntoh24(fh->fh_f_ctl);
1469  fr_seq(fp) = sp;
1470  if (f_ctl & FC_FC_SEQ_INIT)
1471  ep->esb_stat |= ESB_ST_SEQ_INIT;
1472 
1473  if (fc_sof_needs_ack(sof))
1474  fc_seq_send_ack(sp, fp);
1475  resp = ep->resp;
1476  ex_resp_arg = ep->arg;
1477 
1478  if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1479  (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1480  (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1481  spin_lock_bh(&ep->ex_lock);
1482  resp = ep->resp;
1483  rc = fc_exch_done_locked(ep);
1484  WARN_ON(fc_seq_exch(sp) != ep);
1485  spin_unlock_bh(&ep->ex_lock);
1486  if (!rc)
1487  fc_exch_delete(ep);
1488  }
1489 
1490  /*
1491  * Call the receive function.
1492  * The sequence is held (has a refcnt) for us,
1493  * but not for the receive function.
1494  *
1495  * The receive function may allocate a new sequence
1496  * over the old one, so we shouldn't change the
1497  * sequence after this.
1498  *
1499  * The frame will be freed by the receive function.
1500  * If new exch resp handler is valid then call that
1501  * first.
1502  */
1503  if (resp)
1504  resp(sp, fp, ex_resp_arg);
1505  else
1506  fc_frame_free(fp);
1507  fc_exch_release(ep);
1508  return;
1509 rel:
1510  fc_exch_release(ep);
1511 out:
1512  fc_frame_free(fp);
1513 }
1514 
1521 static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1522 {
1523  struct fc_seq *sp;
1524 
1525  sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1526 
1527  if (!sp)
1528  atomic_inc(&mp->stats.xid_not_found);
1529  else
1530  atomic_inc(&mp->stats.non_bls_resp);
1531 
1532  fc_frame_free(fp);
1533 }
1534 
1543 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1544 {
1545  void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1546  void *ex_resp_arg;
1547  struct fc_frame_header *fh;
1548  struct fc_ba_acc *ap;
1549  struct fc_seq *sp;
1550  u16 low;
1551  u16 high;
1552  int rc = 1, has_rec = 0;
1553 
1554  fh = fc_frame_header_get(fp);
1555  FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1556  fc_exch_rctl_name(fh->fh_r_ctl));
1557 
1559  FC_EXCH_DBG(ep, "Exchange timer canceled\n");
1560  fc_exch_release(ep); /* release from pending timer hold */
1561  }
1562 
1563  spin_lock_bh(&ep->ex_lock);
1564  switch (fh->fh_r_ctl) {
1565  case FC_RCTL_BA_ACC:
1566  ap = fc_frame_payload_get(fp, sizeof(*ap));
1567  if (!ap)
1568  break;
1569 
1570  /*
1571  * Decide whether to establish a Recovery Qualifier.
1572  * We do this if there is a non-empty SEQ_CNT range and
1573  * SEQ_ID is the same as the one we aborted.
1574  */
1575  low = ntohs(ap->ba_low_seq_cnt);
1576  high = ntohs(ap->ba_high_seq_cnt);
1577  if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1578  (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1579  ap->ba_seq_id == ep->seq_id) && low != high) {
1580  ep->esb_stat |= ESB_ST_REC_QUAL;
1581  fc_exch_hold(ep); /* hold for recovery qualifier */
1582  has_rec = 1;
1583  }
1584  break;
1585  case FC_RCTL_BA_RJT:
1586  break;
1587  default:
1588  break;
1589  }
1590 
1591  resp = ep->resp;
1592  ex_resp_arg = ep->arg;
1593 
1594  /* do we need to do some other checks here. Can we reuse more of
1595  * fc_exch_recv_seq_resp
1596  */
1597  sp = &ep->seq;
1598  /*
1599  * do we want to check END_SEQ as well as LAST_SEQ here?
1600  */
1601  if (ep->fh_type != FC_TYPE_FCP &&
1603  rc = fc_exch_done_locked(ep);
1604  spin_unlock_bh(&ep->ex_lock);
1605  if (!rc)
1606  fc_exch_delete(ep);
1607 
1608  if (resp)
1609  resp(sp, fp, ex_resp_arg);
1610  else
1611  fc_frame_free(fp);
1612 
1613  if (has_rec)
1614  fc_exch_timer_set(ep, ep->r_a_tov);
1615 
1616 }
1617 
1626 static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1627 {
1628  struct fc_frame_header *fh;
1629  struct fc_exch *ep;
1630  u32 f_ctl;
1631 
1632  fh = fc_frame_header_get(fp);
1633  f_ctl = ntoh24(fh->fh_f_ctl);
1634  fr_seq(fp) = NULL;
1635 
1636  ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1637  ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1638  if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1639  spin_lock_bh(&ep->ex_lock);
1640  ep->esb_stat |= ESB_ST_SEQ_INIT;
1641  spin_unlock_bh(&ep->ex_lock);
1642  }
1643  if (f_ctl & FC_FC_SEQ_CTX) {
1644  /*
1645  * A response to a sequence we initiated.
1646  * This should only be ACKs for class 2 or F.
1647  */
1648  switch (fh->fh_r_ctl) {
1649  case FC_RCTL_ACK_1:
1650  case FC_RCTL_ACK_0:
1651  break;
1652  default:
1653  if (ep)
1654  FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
1655  fh->fh_r_ctl,
1656  fc_exch_rctl_name(fh->fh_r_ctl));
1657  break;
1658  }
1659  fc_frame_free(fp);
1660  } else {
1661  switch (fh->fh_r_ctl) {
1662  case FC_RCTL_BA_RJT:
1663  case FC_RCTL_BA_ACC:
1664  if (ep)
1665  fc_exch_abts_resp(ep, fp);
1666  else
1667  fc_frame_free(fp);
1668  break;
1669  case FC_RCTL_BA_ABTS:
1670  fc_exch_recv_abts(ep, fp);
1671  break;
1672  default: /* ignore junk */
1673  fc_frame_free(fp);
1674  break;
1675  }
1676  }
1677  if (ep)
1678  fc_exch_release(ep); /* release hold taken by fc_exch_find */
1679 }
1680 
1688 static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1689 {
1690  struct fc_lport *lport;
1691  struct fc_els_ls_acc *acc;
1692  struct fc_frame *fp;
1693 
1694  lport = fr_dev(rx_fp);
1695  fp = fc_frame_alloc(lport, sizeof(*acc));
1696  if (!fp)
1697  return;
1698  acc = fc_frame_payload_get(fp, sizeof(*acc));
1699  memset(acc, 0, sizeof(*acc));
1700  acc->la_cmd = ELS_LS_ACC;
1701  fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1702  lport->tt.frame_send(lport, fp);
1703 }
1704 
1714 static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1715  enum fc_els_rjt_explan explan)
1716 {
1717  struct fc_lport *lport;
1718  struct fc_els_ls_rjt *rjt;
1719  struct fc_frame *fp;
1720 
1721  lport = fr_dev(rx_fp);
1722  fp = fc_frame_alloc(lport, sizeof(*rjt));
1723  if (!fp)
1724  return;
1725  rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1726  memset(rjt, 0, sizeof(*rjt));
1727  rjt->er_cmd = ELS_LS_RJT;
1728  rjt->er_reason = reason;
1729  rjt->er_explan = explan;
1730  fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1731  lport->tt.frame_send(lport, fp);
1732 }
1733 
1738 static void fc_exch_reset(struct fc_exch *ep)
1739 {
1740  struct fc_seq *sp;
1741  void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1742  void *arg;
1743  int rc = 1;
1744 
1745  spin_lock_bh(&ep->ex_lock);
1746  fc_exch_abort_locked(ep, 0);
1747  ep->state |= FC_EX_RST_CLEANUP;
1748  fc_exch_timer_cancel(ep);
1749  resp = ep->resp;
1750  ep->resp = NULL;
1751  if (ep->esb_stat & ESB_ST_REC_QUAL)
1752  atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1753  ep->esb_stat &= ~ESB_ST_REC_QUAL;
1754  arg = ep->arg;
1755  sp = &ep->seq;
1756  rc = fc_exch_done_locked(ep);
1757  spin_unlock_bh(&ep->ex_lock);
1758  if (!rc)
1759  fc_exch_delete(ep);
1760 
1761  if (resp)
1762  resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1763 }
1764 
1777 static void fc_exch_pool_reset(struct fc_lport *lport,
1778  struct fc_exch_pool *pool,
1779  u32 sid, u32 did)
1780 {
1781  struct fc_exch *ep;
1782  struct fc_exch *next;
1783 
1784  spin_lock_bh(&pool->lock);
1785 restart:
1786  list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1787  if ((lport == ep->lp) &&
1788  (sid == 0 || sid == ep->sid) &&
1789  (did == 0 || did == ep->did)) {
1790  fc_exch_hold(ep);
1791  spin_unlock_bh(&pool->lock);
1792 
1793  fc_exch_reset(ep);
1794 
1795  fc_exch_release(ep);
1796  spin_lock_bh(&pool->lock);
1797 
1798  /*
1799  * must restart loop incase while lock
1800  * was down multiple eps were released.
1801  */
1802  goto restart;
1803  }
1804  }
1805  pool->next_index = 0;
1806  pool->left = FC_XID_UNKNOWN;
1807  pool->right = FC_XID_UNKNOWN;
1808  spin_unlock_bh(&pool->lock);
1809 }
1810 
1822 void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1823 {
1824  struct fc_exch_mgr_anchor *ema;
1825  unsigned int cpu;
1826 
1827  list_for_each_entry(ema, &lport->ema_list, ema_list) {
1829  fc_exch_pool_reset(lport,
1830  per_cpu_ptr(ema->mp->pool, cpu),
1831  sid, did);
1832  }
1833 }
1835 
1843 static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
1844 {
1845  struct fc_exch_mgr_anchor *ema;
1846 
1847  list_for_each_entry(ema, &lport->ema_list, ema_list)
1848  if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
1849  return fc_exch_find(ema->mp, xid);
1850  return NULL;
1851 }
1852 
1859 static void fc_exch_els_rec(struct fc_frame *rfp)
1860 {
1861  struct fc_lport *lport;
1862  struct fc_frame *fp;
1863  struct fc_exch *ep;
1864  struct fc_els_rec *rp;
1865  struct fc_els_rec_acc *acc;
1866  enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1867  enum fc_els_rjt_explan explan;
1868  u32 sid;
1869  u16 rxid;
1870  u16 oxid;
1871 
1872  lport = fr_dev(rfp);
1873  rp = fc_frame_payload_get(rfp, sizeof(*rp));
1874  explan = ELS_EXPL_INV_LEN;
1875  if (!rp)
1876  goto reject;
1877  sid = ntoh24(rp->rec_s_id);
1878  rxid = ntohs(rp->rec_rx_id);
1879  oxid = ntohs(rp->rec_ox_id);
1880 
1881  ep = fc_exch_lookup(lport,
1882  sid == fc_host_port_id(lport->host) ? oxid : rxid);
1883  explan = ELS_EXPL_OXID_RXID;
1884  if (!ep)
1885  goto reject;
1886  if (ep->oid != sid || oxid != ep->oxid)
1887  goto rel;
1888  if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
1889  goto rel;
1890  fp = fc_frame_alloc(lport, sizeof(*acc));
1891  if (!fp)
1892  goto out;
1893 
1894  acc = fc_frame_payload_get(fp, sizeof(*acc));
1895  memset(acc, 0, sizeof(*acc));
1896  acc->reca_cmd = ELS_LS_ACC;
1897  acc->reca_ox_id = rp->rec_ox_id;
1898  memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1899  acc->reca_rx_id = htons(ep->rxid);
1900  if (ep->sid == ep->oid)
1901  hton24(acc->reca_rfid, ep->did);
1902  else
1903  hton24(acc->reca_rfid, ep->sid);
1904  acc->reca_fc4value = htonl(ep->seq.rec_data);
1905  acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1906  ESB_ST_SEQ_INIT |
1907  ESB_ST_COMPLETE));
1908  fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
1909  lport->tt.frame_send(lport, fp);
1910 out:
1911  fc_exch_release(ep);
1912  return;
1913 
1914 rel:
1915  fc_exch_release(ep);
1916 reject:
1917  fc_seq_ls_rjt(rfp, reason, explan);
1918 }
1919 
1928 static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1929 {
1930  struct fc_exch *aborted_ep = arg;
1931  unsigned int op;
1932 
1933  if (IS_ERR(fp)) {
1934  int err = PTR_ERR(fp);
1935 
1936  if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
1937  goto cleanup;
1938  FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
1939  "frame error %d\n", err);
1940  return;
1941  }
1942 
1943  op = fc_frame_payload_op(fp);
1944  fc_frame_free(fp);
1945 
1946  switch (op) {
1947  case ELS_LS_RJT:
1948  FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
1949  /* fall through */
1950  case ELS_LS_ACC:
1951  goto cleanup;
1952  default:
1953  FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
1954  "for RRQ", op);
1955  return;
1956  }
1957 
1958 cleanup:
1959  fc_exch_done(&aborted_ep->seq);
1960  /* drop hold for rec qual */
1961  fc_exch_release(aborted_ep);
1962 }
1963 
1964 
1984 static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1985  struct fc_frame *fp,
1986  void (*resp)(struct fc_seq *,
1987  struct fc_frame *fp,
1988  void *arg),
1989  void (*destructor)(struct fc_seq *,
1990  void *),
1991  void *arg, u32 timer_msec)
1992 {
1993  struct fc_exch *ep;
1994  struct fc_seq *sp = NULL;
1995  struct fc_frame_header *fh;
1996  struct fc_fcp_pkt *fsp = NULL;
1997  int rc = 1;
1998 
1999  ep = fc_exch_alloc(lport, fp);
2000  if (!ep) {
2001  fc_frame_free(fp);
2002  return NULL;
2003  }
2004  ep->esb_stat |= ESB_ST_SEQ_INIT;
2005  fh = fc_frame_header_get(fp);
2006  fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
2007  ep->resp = resp;
2008  ep->destructor = destructor;
2009  ep->arg = arg;
2010  ep->r_a_tov = FC_DEF_R_A_TOV;
2011  ep->lp = lport;
2012  sp = &ep->seq;
2013 
2014  ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
2015  ep->f_ctl = ntoh24(fh->fh_f_ctl);
2016  fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2017  sp->cnt++;
2018 
2019  if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2020  fsp = fr_fsp(fp);
2021  fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2022  }
2023 
2024  if (unlikely(lport->tt.frame_send(lport, fp)))
2025  goto err;
2026 
2027  if (timer_msec)
2028  fc_exch_timer_set_locked(ep, timer_msec);
2029  ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
2030 
2031  if (ep->f_ctl & FC_FC_SEQ_INIT)
2032  ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2033  spin_unlock_bh(&ep->ex_lock);
2034  return sp;
2035 err:
2036  if (fsp)
2037  fc_fcp_ddp_done(fsp);
2038  rc = fc_exch_done_locked(ep);
2039  spin_unlock_bh(&ep->ex_lock);
2040  if (!rc)
2041  fc_exch_delete(ep);
2042  return NULL;
2043 }
2044 
2052 static void fc_exch_rrq(struct fc_exch *ep)
2053 {
2054  struct fc_lport *lport;
2055  struct fc_els_rrq *rrq;
2056  struct fc_frame *fp;
2057  u32 did;
2058 
2059  lport = ep->lp;
2060 
2061  fp = fc_frame_alloc(lport, sizeof(*rrq));
2062  if (!fp)
2063  goto retry;
2064 
2065  rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2066  memset(rrq, 0, sizeof(*rrq));
2067  rrq->rrq_cmd = ELS_RRQ;
2068  hton24(rrq->rrq_s_id, ep->sid);
2069  rrq->rrq_ox_id = htons(ep->oxid);
2070  rrq->rrq_rx_id = htons(ep->rxid);
2071 
2072  did = ep->did;
2073  if (ep->esb_stat & ESB_ST_RESP)
2074  did = ep->sid;
2075 
2076  fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2077  lport->port_id, FC_TYPE_ELS,
2078  FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2079 
2080  if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2081  lport->e_d_tov))
2082  return;
2083 
2084 retry:
2085  spin_lock_bh(&ep->ex_lock);
2086  if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2087  spin_unlock_bh(&ep->ex_lock);
2088  /* drop hold for rec qual */
2089  fc_exch_release(ep);
2090  return;
2091  }
2092  ep->esb_stat |= ESB_ST_REC_QUAL;
2093  fc_exch_timer_set_locked(ep, ep->r_a_tov);
2094  spin_unlock_bh(&ep->ex_lock);
2095 }
2096 
2101 static void fc_exch_els_rrq(struct fc_frame *fp)
2102 {
2103  struct fc_lport *lport;
2104  struct fc_exch *ep = NULL; /* request or subject exchange */
2105  struct fc_els_rrq *rp;
2106  u32 sid;
2107  u16 xid;
2108  enum fc_els_rjt_explan explan;
2109 
2110  lport = fr_dev(fp);
2111  rp = fc_frame_payload_get(fp, sizeof(*rp));
2112  explan = ELS_EXPL_INV_LEN;
2113  if (!rp)
2114  goto reject;
2115 
2116  /*
2117  * lookup subject exchange.
2118  */
2119  sid = ntoh24(rp->rrq_s_id); /* subject source */
2120  xid = fc_host_port_id(lport->host) == sid ?
2121  ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2122  ep = fc_exch_lookup(lport, xid);
2123  explan = ELS_EXPL_OXID_RXID;
2124  if (!ep)
2125  goto reject;
2126  spin_lock_bh(&ep->ex_lock);
2127  if (ep->oxid != ntohs(rp->rrq_ox_id))
2128  goto unlock_reject;
2129  if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2130  ep->rxid != FC_XID_UNKNOWN)
2131  goto unlock_reject;
2132  explan = ELS_EXPL_SID;
2133  if (ep->sid != sid)
2134  goto unlock_reject;
2135 
2136  /*
2137  * Clear Recovery Qualifier state, and cancel timer if complete.
2138  */
2139  if (ep->esb_stat & ESB_ST_REC_QUAL) {
2140  ep->esb_stat &= ~ESB_ST_REC_QUAL;
2141  atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
2142  }
2143  if (ep->esb_stat & ESB_ST_COMPLETE)
2144  fc_exch_timer_cancel(ep);
2145 
2146  spin_unlock_bh(&ep->ex_lock);
2147 
2148  /*
2149  * Send LS_ACC.
2150  */
2151  fc_seq_ls_acc(fp);
2152  goto out;
2153 
2154 unlock_reject:
2155  spin_unlock_bh(&ep->ex_lock);
2156 reject:
2157  fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2158 out:
2159  if (ep)
2160  fc_exch_release(ep); /* drop hold from fc_exch_find */
2161 }
2162 
2167 void fc_exch_update_stats(struct fc_lport *lport)
2168 {
2169  struct fc_host_statistics *st;
2170  struct fc_exch_mgr_anchor *ema;
2171  struct fc_exch_mgr *mp;
2172 
2173  st = &lport->host_stats;
2174 
2175  list_for_each_entry(ema, &lport->ema_list, ema_list) {
2176  mp = ema->mp;
2177  st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2178  st->fc_no_free_exch_xid +=
2179  atomic_read(&mp->stats.no_free_exch_xid);
2180  st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2181  st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2182  st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2183  st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2184  }
2185 }
2187 
2195  struct fc_exch_mgr *mp,
2196  bool (*match)(struct fc_frame *))
2197 {
2198  struct fc_exch_mgr_anchor *ema;
2199 
2200  ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2201  if (!ema)
2202  return ema;
2203 
2204  ema->mp = mp;
2205  ema->match = match;
2206  /* add EM anchor to EM anchors list */
2207  list_add_tail(&ema->ema_list, &lport->ema_list);
2208  kref_get(&mp->kref);
2209  return ema;
2210 }
2212 
2217 static void fc_exch_mgr_destroy(struct kref *kref)
2218 {
2219  struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2220 
2221  mempool_destroy(mp->ep_pool);
2222  free_percpu(mp->pool);
2223  kfree(mp);
2224 }
2225 
2231 {
2232  /* remove EM anchor from EM anchors list */
2233  list_del(&ema->ema_list);
2234  kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2235  kfree(ema);
2236 }
2238 
2245 {
2246  struct fc_exch_mgr_anchor *ema, *tmp;
2247 
2248  list_for_each_entry(ema, &src->ema_list, ema_list) {
2249  if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2250  goto err;
2251  }
2252  return 0;
2253 err:
2254  list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2255  fc_exch_mgr_del(ema);
2256  return -ENOMEM;
2257 }
2259 
2269  enum fc_class class,
2270  u16 min_xid, u16 max_xid,
2271  bool (*match)(struct fc_frame *))
2272 {
2273  struct fc_exch_mgr *mp;
2274  u16 pool_exch_range;
2275  size_t pool_size;
2276  unsigned int cpu;
2277  struct fc_exch_pool *pool;
2278 
2279  if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2280  (min_xid & fc_cpu_mask) != 0) {
2281  FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2282  min_xid, max_xid);
2283  return NULL;
2284  }
2285 
2286  /*
2287  * allocate memory for EM
2288  */
2289  mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2290  if (!mp)
2291  return NULL;
2292 
2293  mp->class = class;
2294  /* adjust em exch xid range for offload */
2295  mp->min_xid = min_xid;
2296 
2297  /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
2298  pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2299  sizeof(struct fc_exch *);
2300  if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2301  mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2302  min_xid - 1;
2303  } else {
2304  mp->max_xid = max_xid;
2305  pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2306  (fc_cpu_mask + 1);
2307  }
2308 
2309  mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2310  if (!mp->ep_pool)
2311  goto free_mp;
2312 
2313  /*
2314  * Setup per cpu exch pool with entire exchange id range equally
2315  * divided across all cpus. The exch pointers array memory is
2316  * allocated for exch range per pool.
2317  */
2318  mp->pool_max_index = pool_exch_range - 1;
2319 
2320  /*
2321  * Allocate and initialize per cpu exch pool
2322  */
2323  pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2324  mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2325  if (!mp->pool)
2326  goto free_mempool;
2327  for_each_possible_cpu(cpu) {
2328  pool = per_cpu_ptr(mp->pool, cpu);
2329  pool->next_index = 0;
2330  pool->left = FC_XID_UNKNOWN;
2331  pool->right = FC_XID_UNKNOWN;
2332  spin_lock_init(&pool->lock);
2333  INIT_LIST_HEAD(&pool->ex_list);
2334  }
2335 
2336  kref_init(&mp->kref);
2337  if (!fc_exch_mgr_add(lport, mp, match)) {
2338  free_percpu(mp->pool);
2339  goto free_mempool;
2340  }
2341 
2342  /*
2343  * Above kref_init() sets mp->kref to 1 and then
2344  * call to fc_exch_mgr_add incremented mp->kref again,
2345  * so adjust that extra increment.
2346  */
2347  kref_put(&mp->kref, fc_exch_mgr_destroy);
2348  return mp;
2349 
2350 free_mempool:
2351  mempool_destroy(mp->ep_pool);
2352 free_mp:
2353  kfree(mp);
2354  return NULL;
2355 }
2357 
2362 void fc_exch_mgr_free(struct fc_lport *lport)
2363 {
2364  struct fc_exch_mgr_anchor *ema, *next;
2365 
2366  flush_workqueue(fc_exch_workqueue);
2367  list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2368  fc_exch_mgr_del(ema);
2369 }
2371 
2379 static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2380  struct fc_lport *lport,
2381  struct fc_frame_header *fh)
2382 {
2383  struct fc_exch_mgr_anchor *ema;
2384  u16 xid;
2385 
2386  if (f_ctl & FC_FC_EX_CTX)
2387  xid = ntohs(fh->fh_ox_id);
2388  else {
2389  xid = ntohs(fh->fh_rx_id);
2390  if (xid == FC_XID_UNKNOWN)
2391  return list_entry(lport->ema_list.prev,
2392  typeof(*ema), ema_list);
2393  }
2394 
2395  list_for_each_entry(ema, &lport->ema_list, ema_list) {
2396  if ((xid >= ema->mp->min_xid) &&
2397  (xid <= ema->mp->max_xid))
2398  return ema;
2399  }
2400  return NULL;
2401 }
2407 void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2408 {
2409  struct fc_frame_header *fh = fc_frame_header_get(fp);
2410  struct fc_exch_mgr_anchor *ema;
2411  u32 f_ctl;
2412 
2413  /* lport lock ? */
2414  if (!lport || lport->state == LPORT_ST_DISABLED) {
2415  FC_LPORT_DBG(lport, "Receiving frames for an lport that "
2416  "has not been initialized correctly\n");
2417  fc_frame_free(fp);
2418  return;
2419  }
2420 
2421  f_ctl = ntoh24(fh->fh_f_ctl);
2422  ema = fc_find_ema(f_ctl, lport, fh);
2423  if (!ema) {
2424  FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2425  "fc_ctl <0x%x>, xid <0x%x>\n",
2426  f_ctl,
2427  (f_ctl & FC_FC_EX_CTX) ?
2428  ntohs(fh->fh_ox_id) :
2429  ntohs(fh->fh_rx_id));
2430  fc_frame_free(fp);
2431  return;
2432  }
2433 
2434  /*
2435  * If frame is marked invalid, just drop it.
2436  */
2437  switch (fr_eof(fp)) {
2438  case FC_EOF_T:
2439  if (f_ctl & FC_FC_END_SEQ)
2440  skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2441  /* fall through */
2442  case FC_EOF_N:
2443  if (fh->fh_type == FC_TYPE_BLS)
2444  fc_exch_recv_bls(ema->mp, fp);
2445  else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2446  FC_FC_EX_CTX)
2447  fc_exch_recv_seq_resp(ema->mp, fp);
2448  else if (f_ctl & FC_FC_SEQ_CTX)
2449  fc_exch_recv_resp(ema->mp, fp);
2450  else /* no EX_CTX and no SEQ_CTX */
2451  fc_exch_recv_req(lport, ema->mp, fp);
2452  break;
2453  default:
2454  FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2455  fr_eof(fp));
2456  fc_frame_free(fp);
2457  }
2458 }
2460 
2465 int fc_exch_init(struct fc_lport *lport)
2466 {
2467  if (!lport->tt.seq_start_next)
2468  lport->tt.seq_start_next = fc_seq_start_next;
2469 
2470  if (!lport->tt.seq_set_resp)
2471  lport->tt.seq_set_resp = fc_seq_set_resp;
2472 
2473  if (!lport->tt.exch_seq_send)
2474  lport->tt.exch_seq_send = fc_exch_seq_send;
2475 
2476  if (!lport->tt.seq_send)
2477  lport->tt.seq_send = fc_seq_send;
2478 
2479  if (!lport->tt.seq_els_rsp_send)
2480  lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
2481 
2482  if (!lport->tt.exch_done)
2483  lport->tt.exch_done = fc_exch_done;
2484 
2485  if (!lport->tt.exch_mgr_reset)
2486  lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2487 
2488  if (!lport->tt.seq_exch_abort)
2489  lport->tt.seq_exch_abort = fc_seq_exch_abort;
2490 
2491  if (!lport->tt.seq_assign)
2492  lport->tt.seq_assign = fc_seq_assign;
2493 
2494  if (!lport->tt.seq_release)
2495  lport->tt.seq_release = fc_seq_release;
2496 
2497  return 0;
2498 }
2500 
2505 {
2506  fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2507  0, SLAB_HWCACHE_ALIGN, NULL);
2508  if (!fc_em_cachep)
2509  return -ENOMEM;
2510 
2511  /*
2512  * Initialize fc_cpu_mask and fc_cpu_order. The
2513  * fc_cpu_mask is set for nr_cpu_ids rounded up
2514  * to order of 2's * power and order is stored
2515  * in fc_cpu_order as this is later required in
2516  * mapping between an exch id and exch array index
2517  * in per cpu exch pool.
2518  *
2519  * This round up is required to align fc_cpu_mask
2520  * to exchange id's lower bits such that all incoming
2521  * frames of an exchange gets delivered to the same
2522  * cpu on which exchange originated by simple bitwise
2523  * AND operation between fc_cpu_mask and exchange id.
2524  */
2525  fc_cpu_mask = 1;
2526  fc_cpu_order = 0;
2527  while (fc_cpu_mask < nr_cpu_ids) {
2528  fc_cpu_mask <<= 1;
2529  fc_cpu_order++;
2530  }
2531  fc_cpu_mask--;
2532 
2533  fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2534  if (!fc_exch_workqueue)
2535  goto err;
2536  return 0;
2537 err:
2538  kmem_cache_destroy(fc_em_cachep);
2539  return -ENOMEM;
2540 }
2541 
2546 {
2547  destroy_workqueue(fc_exch_workqueue);
2548  kmem_cache_destroy(fc_em_cachep);
2549 }