Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fc_disc.c
Go to the documentation of this file.
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * Target Discovery
22  *
23  * This block discovers all FC-4 remote ports, including FCP initiators. It
24  * also handles RSCN events and re-discovery if necessary.
25  */
26 
27 /*
28  * DISC LOCKING
29  *
30  * The disc mutex is can be locked when acquiring rport locks, but may not
31  * be held when acquiring the lport lock. Refer to fc_lport.c for more
32  * details.
33  */
34 
35 #include <linux/timer.h>
36 #include <linux/slab.h>
37 #include <linux/err.h>
38 #include <linux/export.h>
39 #include <asm/unaligned.h>
40 
41 #include <scsi/fc/fc_gs.h>
42 
43 #include <scsi/libfc.h>
44 
45 #include "fc_libfc.h"
46 
47 #define FC_DISC_RETRY_LIMIT 3 /* max retries */
48 #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
49 
50 static void fc_disc_gpn_ft_req(struct fc_disc *);
51 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
52 static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
53 static void fc_disc_timeout(struct work_struct *);
54 static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
55 static void fc_disc_restart(struct fc_disc *);
56 
64 static void fc_disc_stop_rports(struct fc_disc *disc)
65 {
66  struct fc_lport *lport;
67  struct fc_rport_priv *rdata;
68 
69  lport = fc_disc_lport(disc);
70 
71  mutex_lock(&disc->disc_mutex);
72  list_for_each_entry_rcu(rdata, &disc->rports, peers)
73  lport->tt.rport_logoff(rdata);
74  mutex_unlock(&disc->disc_mutex);
75 }
76 
85 static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
86 {
87  struct fc_lport *lport;
88  struct fc_els_rscn *rp;
89  struct fc_els_rscn_page *pp;
90  struct fc_seq_els_data rjt_data;
91  unsigned int len;
92  int redisc = 0;
93  enum fc_els_rscn_ev_qual ev_qual;
95  LIST_HEAD(disc_ports);
96  struct fc_disc_port *dp, *next;
97 
98  lport = fc_disc_lport(disc);
99 
100  FC_DISC_DBG(disc, "Received an RSCN event\n");
101 
102  /* make sure the frame contains an RSCN message */
103  rp = fc_frame_payload_get(fp, sizeof(*rp));
104  if (!rp)
105  goto reject;
106  /* make sure the page length is as expected (4 bytes) */
107  if (rp->rscn_page_len != sizeof(*pp))
108  goto reject;
109  /* get the RSCN payload length */
110  len = ntohs(rp->rscn_plen);
111  if (len < sizeof(*rp))
112  goto reject;
113  /* make sure the frame contains the expected payload */
114  rp = fc_frame_payload_get(fp, len);
115  if (!rp)
116  goto reject;
117  /* payload must be a multiple of the RSCN page size */
118  len -= sizeof(*rp);
119  if (len % sizeof(*pp))
120  goto reject;
121 
122  for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
123  ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
124  ev_qual &= ELS_RSCN_EV_QUAL_MASK;
126  fmt &= ELS_RSCN_ADDR_FMT_MASK;
127  /*
128  * if we get an address format other than port
129  * (area, domain, fabric), then do a full discovery
130  */
131  switch (fmt) {
132  case ELS_ADDR_FMT_PORT:
133  FC_DISC_DBG(disc, "Port address format for port "
134  "(%6.6x)\n", ntoh24(pp->rscn_fid));
135  dp = kzalloc(sizeof(*dp), GFP_KERNEL);
136  if (!dp) {
137  redisc = 1;
138  break;
139  }
140  dp->lp = lport;
141  dp->port_id = ntoh24(pp->rscn_fid);
142  list_add_tail(&dp->peers, &disc_ports);
143  break;
144  case ELS_ADDR_FMT_AREA:
145  case ELS_ADDR_FMT_DOM:
146  case ELS_ADDR_FMT_FAB:
147  default:
148  FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
149  redisc = 1;
150  break;
151  }
152  }
153  lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
154 
155  /*
156  * If not doing a complete rediscovery, do GPN_ID on
157  * the individual ports mentioned in the list.
158  * If any of these get an error, do a full rediscovery.
159  * In any case, go through the list and free the entries.
160  */
161  list_for_each_entry_safe(dp, next, &disc_ports, peers) {
162  list_del(&dp->peers);
163  if (!redisc)
164  redisc = fc_disc_single(lport, dp);
165  kfree(dp);
166  }
167  if (redisc) {
168  FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
169  fc_disc_restart(disc);
170  } else {
171  FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
172  "redisc %d state %d in_prog %d\n",
173  redisc, lport->state, disc->pending);
174  }
175  fc_frame_free(fp);
176  return;
177 reject:
178  FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
179  rjt_data.reason = ELS_RJT_LOGIC;
180  rjt_data.explan = ELS_EXPL_NONE;
181  lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
182  fc_frame_free(fp);
183 }
184 
194 static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
195 {
196  u8 op;
197  struct fc_disc *disc = &lport->disc;
198 
199  op = fc_frame_payload_op(fp);
200  switch (op) {
201  case ELS_RSCN:
202  mutex_lock(&disc->disc_mutex);
203  fc_disc_recv_rscn_req(disc, fp);
204  mutex_unlock(&disc->disc_mutex);
205  break;
206  default:
207  FC_DISC_DBG(disc, "Received an unsupported request, "
208  "the opcode is (%x)\n", op);
209  fc_frame_free(fp);
210  break;
211  }
212 }
213 
221 static void fc_disc_restart(struct fc_disc *disc)
222 {
223  if (!disc->disc_callback)
224  return;
225 
226  FC_DISC_DBG(disc, "Restarting discovery\n");
227 
228  disc->requested = 1;
229  if (disc->pending)
230  return;
231 
232  /*
233  * Advance disc_id. This is an arbitrary non-zero number that will
234  * match the value in the fc_rport_priv after discovery for all
235  * freshly-discovered remote ports. Avoid wrapping to zero.
236  */
237  disc->disc_id = (disc->disc_id + 2) | 1;
238  disc->retry_count = 0;
239  fc_disc_gpn_ft_req(disc);
240 }
241 
247 static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
248  enum fc_disc_event),
249  struct fc_lport *lport)
250 {
251  struct fc_disc *disc = &lport->disc;
252 
253  /*
254  * At this point we may have a new disc job or an existing
255  * one. Either way, let's lock when we make changes to it
256  * and send the GPN_FT request.
257  */
258  mutex_lock(&disc->disc_mutex);
260  fc_disc_restart(disc);
261  mutex_unlock(&disc->disc_mutex);
262 }
263 
273 static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
274 {
275  struct fc_lport *lport = fc_disc_lport(disc);
276  struct fc_rport_priv *rdata;
277 
278  FC_DISC_DBG(disc, "Discovery complete\n");
279 
280  disc->pending = 0;
281  if (disc->requested) {
282  fc_disc_restart(disc);
283  return;
284  }
285 
286  /*
287  * Go through all remote ports. If they were found in the latest
288  * discovery, reverify or log them in. Otherwise, log them out.
289  * Skip ports which were never discovered. These are the dNS port
290  * and ports which were created by PLOGI.
291  */
292  list_for_each_entry_rcu(rdata, &disc->rports, peers) {
293  if (!rdata->disc_id)
294  continue;
295  if (rdata->disc_id == disc->disc_id)
296  lport->tt.rport_login(rdata);
297  else
298  lport->tt.rport_logoff(rdata);
299  }
300 
301  mutex_unlock(&disc->disc_mutex);
302  disc->disc_callback(lport, event);
303  mutex_lock(&disc->disc_mutex);
304 }
305 
311 static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
312 {
313  struct fc_lport *lport = fc_disc_lport(disc);
314  unsigned long delay = 0;
315 
316  FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
317  PTR_ERR(fp), disc->retry_count,
319 
320  if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
321  /*
322  * Memory allocation failure, or the exchange timed out,
323  * retry after delay.
324  */
325  if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
326  /* go ahead and retry */
327  if (!fp)
329  else {
330  delay = msecs_to_jiffies(lport->e_d_tov);
331 
332  /* timeout faster first time */
333  if (!disc->retry_count)
334  delay /= 4;
335  }
336  disc->retry_count++;
337  schedule_delayed_work(&disc->disc_work, delay);
338  } else
339  fc_disc_done(disc, DISC_EV_FAILED);
340  } else if (PTR_ERR(fp) == -FC_EX_CLOSED) {
341  /*
342  * if discovery fails due to lport reset, clear
343  * pending flag so that subsequent discovery can
344  * continue
345  */
346  disc->pending = 0;
347  }
348 }
349 
357 static void fc_disc_gpn_ft_req(struct fc_disc *disc)
358 {
359  struct fc_frame *fp;
360  struct fc_lport *lport = fc_disc_lport(disc);
361 
362  WARN_ON(!fc_lport_test_ready(lport));
363 
364  disc->pending = 1;
365  disc->requested = 0;
366 
367  disc->buf_len = 0;
368  disc->seq_count = 0;
369  fp = fc_frame_alloc(lport,
370  sizeof(struct fc_ct_hdr) +
371  sizeof(struct fc_ns_gid_ft));
372  if (!fp)
373  goto err;
374 
375  if (lport->tt.elsct_send(lport, 0, fp,
376  FC_NS_GPN_FT,
377  fc_disc_gpn_ft_resp,
378  disc, 3 * lport->r_a_tov))
379  return;
380 err:
381  fc_disc_error(disc, NULL);
382 }
383 
392 static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
393 {
394  struct fc_lport *lport;
395  struct fc_gpn_ft_resp *np;
396  char *bp;
397  size_t plen;
398  size_t tlen;
399  int error = 0;
400  struct fc_rport_identifiers ids;
401  struct fc_rport_priv *rdata;
402 
403  lport = fc_disc_lport(disc);
404  disc->seq_count++;
405 
406  /*
407  * Handle partial name record left over from previous call.
408  */
409  bp = buf;
410  plen = len;
411  np = (struct fc_gpn_ft_resp *)bp;
412  tlen = disc->buf_len;
413  disc->buf_len = 0;
414  if (tlen) {
415  WARN_ON(tlen >= sizeof(*np));
416  plen = sizeof(*np) - tlen;
417  WARN_ON(plen <= 0);
418  WARN_ON(plen >= sizeof(*np));
419  if (plen > len)
420  plen = len;
421  np = &disc->partial_buf;
422  memcpy((char *)np + tlen, bp, plen);
423 
424  /*
425  * Set bp so that the loop below will advance it to the
426  * first valid full name element.
427  */
428  bp -= tlen;
429  len += tlen;
430  plen += tlen;
431  disc->buf_len = (unsigned char) plen;
432  if (plen == sizeof(*np))
433  disc->buf_len = 0;
434  }
435 
436  /*
437  * Handle full name records, including the one filled from above.
438  * Normally, np == bp and plen == len, but from the partial case above,
439  * bp, len describe the overall buffer, and np, plen describe the
440  * partial buffer, which if would usually be full now.
441  * After the first time through the loop, things return to "normal".
442  */
443  while (plen >= sizeof(*np)) {
444  ids.port_id = ntoh24(np->fp_fid);
445  ids.port_name = ntohll(np->fp_wwpn);
446 
447  if (ids.port_id != lport->port_id &&
448  ids.port_name != lport->wwpn) {
449  rdata = lport->tt.rport_create(lport, ids.port_id);
450  if (rdata) {
451  rdata->ids.port_name = ids.port_name;
452  rdata->disc_id = disc->disc_id;
453  } else {
454  printk(KERN_WARNING "libfc: Failed to allocate "
455  "memory for the newly discovered port "
456  "(%6.6x)\n", ids.port_id);
457  error = -ENOMEM;
458  }
459  }
460 
461  if (np->fp_flags & FC_NS_FID_LAST) {
462  fc_disc_done(disc, DISC_EV_SUCCESS);
463  len = 0;
464  break;
465  }
466  len -= sizeof(*np);
467  bp += sizeof(*np);
468  np = (struct fc_gpn_ft_resp *)bp;
469  plen = len;
470  }
471 
472  /*
473  * Save any partial record at the end of the buffer for next time.
474  */
475  if (error == 0 && len > 0 && len < sizeof(*np)) {
476  if (np != &disc->partial_buf) {
477  FC_DISC_DBG(disc, "Partial buffer remains "
478  "for discovery\n");
479  memcpy(&disc->partial_buf, np, len);
480  }
481  disc->buf_len = (unsigned char) len;
482  }
483  return error;
484 }
485 
490 static void fc_disc_timeout(struct work_struct *work)
491 {
492  struct fc_disc *disc = container_of(work,
493  struct fc_disc,
494  disc_work.work);
495  mutex_lock(&disc->disc_mutex);
496  fc_disc_gpn_ft_req(disc);
497  mutex_unlock(&disc->disc_mutex);
498 }
499 
509 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
510  void *disc_arg)
511 {
512  struct fc_disc *disc = disc_arg;
513  struct fc_ct_hdr *cp;
514  struct fc_frame_header *fh;
515  enum fc_disc_event event = DISC_EV_NONE;
516  unsigned int seq_cnt;
517  unsigned int len;
518  int error = 0;
519 
520  mutex_lock(&disc->disc_mutex);
521  FC_DISC_DBG(disc, "Received a GPN_FT response\n");
522 
523  if (IS_ERR(fp)) {
524  fc_disc_error(disc, fp);
525  mutex_unlock(&disc->disc_mutex);
526  return;
527  }
528 
529  WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
530  fh = fc_frame_header_get(fp);
531  len = fr_len(fp) - sizeof(*fh);
532  seq_cnt = ntohs(fh->fh_seq_cnt);
533  if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) {
534  cp = fc_frame_payload_get(fp, sizeof(*cp));
535  if (!cp) {
536  FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
537  fr_len(fp));
538  event = DISC_EV_FAILED;
539  } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
540 
541  /* Accepted, parse the response. */
542  len -= sizeof(*cp);
543  error = fc_disc_gpn_ft_parse(disc, cp + 1, len);
544  } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
545  FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
546  "(check zoning)\n", cp->ct_reason,
547  cp->ct_explan);
548  event = DISC_EV_FAILED;
549  if (cp->ct_reason == FC_FS_RJT_UNABL &&
550  cp->ct_explan == FC_FS_EXP_FTNR)
551  event = DISC_EV_SUCCESS;
552  } else {
553  FC_DISC_DBG(disc, "GPN_FT unexpected response code "
554  "%x\n", ntohs(cp->ct_cmd));
555  event = DISC_EV_FAILED;
556  }
557  } else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) {
558  error = fc_disc_gpn_ft_parse(disc, fh + 1, len);
559  } else {
560  FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
561  "seq_cnt %x expected %x sof %x eof %x\n",
562  seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
563  event = DISC_EV_FAILED;
564  }
565  if (error)
566  fc_disc_error(disc, fp);
567  else if (event != DISC_EV_NONE)
568  fc_disc_done(disc, event);
569  fc_frame_free(fp);
570  mutex_unlock(&disc->disc_mutex);
571 }
572 
581 static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
582  void *rdata_arg)
583 {
584  struct fc_rport_priv *rdata = rdata_arg;
585  struct fc_rport_priv *new_rdata;
586  struct fc_lport *lport;
587  struct fc_disc *disc;
588  struct fc_ct_hdr *cp;
589  struct fc_ns_gid_pn *pn;
590  u64 port_name;
591 
592  lport = rdata->local_port;
593  disc = &lport->disc;
594 
595  mutex_lock(&disc->disc_mutex);
596  if (PTR_ERR(fp) == -FC_EX_CLOSED)
597  goto out;
598  if (IS_ERR(fp))
599  goto redisc;
600 
601  cp = fc_frame_payload_get(fp, sizeof(*cp));
602  if (!cp)
603  goto redisc;
604  if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
605  if (fr_len(fp) < sizeof(struct fc_frame_header) +
606  sizeof(*cp) + sizeof(*pn))
607  goto redisc;
608  pn = (struct fc_ns_gid_pn *)(cp + 1);
609  port_name = get_unaligned_be64(&pn->fn_wwpn);
610  if (rdata->ids.port_name == -1)
611  rdata->ids.port_name = port_name;
612  else if (rdata->ids.port_name != port_name) {
613  FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
614  "Port-id %6.6x wwpn %16.16llx\n",
615  rdata->ids.port_id, port_name);
616  lport->tt.rport_logoff(rdata);
617 
618  new_rdata = lport->tt.rport_create(lport,
619  rdata->ids.port_id);
620  if (new_rdata) {
621  new_rdata->disc_id = disc->disc_id;
622  lport->tt.rport_login(new_rdata);
623  }
624  goto out;
625  }
626  rdata->disc_id = disc->disc_id;
627  lport->tt.rport_login(rdata);
628  } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
629  FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
630  cp->ct_reason, cp->ct_explan);
631  lport->tt.rport_logoff(rdata);
632  } else {
633  FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
634  ntohs(cp->ct_cmd));
635 redisc:
636  fc_disc_restart(disc);
637  }
638 out:
639  mutex_unlock(&disc->disc_mutex);
640  kref_put(&rdata->kref, lport->tt.rport_destroy);
641 }
642 
652 static int fc_disc_gpn_id_req(struct fc_lport *lport,
653  struct fc_rport_priv *rdata)
654 {
655  struct fc_frame *fp;
656 
657  fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
658  sizeof(struct fc_ns_fid));
659  if (!fp)
660  return -ENOMEM;
661  if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
662  fc_disc_gpn_id_resp, rdata,
663  3 * lport->r_a_tov))
664  return -ENOMEM;
665  kref_get(&rdata->kref);
666  return 0;
667 }
668 
677 static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
678 {
679  struct fc_rport_priv *rdata;
680 
681  rdata = lport->tt.rport_create(lport, dp->port_id);
682  if (!rdata)
683  return -ENOMEM;
684  rdata->disc_id = 0;
685  return fc_disc_gpn_id_req(lport, rdata);
686 }
687 
692 static void fc_disc_stop(struct fc_lport *lport)
693 {
694  struct fc_disc *disc = &lport->disc;
695 
696  if (disc->pending)
698  fc_disc_stop_rports(disc);
699 }
700 
708 static void fc_disc_stop_final(struct fc_lport *lport)
709 {
710  fc_disc_stop(lport);
711  lport->tt.rport_flush_queue();
712 }
713 
718 int fc_disc_init(struct fc_lport *lport)
719 {
720  struct fc_disc *disc;
721 
722  if (!lport->tt.disc_start)
723  lport->tt.disc_start = fc_disc_start;
724 
725  if (!lport->tt.disc_stop)
726  lport->tt.disc_stop = fc_disc_stop;
727 
728  if (!lport->tt.disc_stop_final)
729  lport->tt.disc_stop_final = fc_disc_stop_final;
730 
731  if (!lport->tt.disc_recv_req)
732  lport->tt.disc_recv_req = fc_disc_recv_req;
733 
734  disc = &lport->disc;
735  INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
736  mutex_init(&disc->disc_mutex);
737  INIT_LIST_HEAD(&disc->rports);
738 
739  disc->priv = lport;
740 
741  return 0;
742 }