Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnx2fc_tgt.c
Go to the documentation of this file.
1 /* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
2  * Handles operations such as session offload/upload etc, and manages
3  * session resources such as connection id and qp resources.
4  *
5  * Copyright (c) 2008 - 2011 Broadcom Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Written by: Bhanu Prakash Gollapudi ([email protected])
12  */
13 
14 #include "bnx2fc.h"
15 static void bnx2fc_upld_timer(unsigned long data);
16 static void bnx2fc_ofld_timer(unsigned long data);
17 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
18  struct fcoe_port *port,
19  struct fc_rport_priv *rdata);
20 static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
21  struct bnx2fc_rport *tgt);
22 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
23  struct bnx2fc_rport *tgt);
24 static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
25  struct bnx2fc_rport *tgt);
26 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
27 
28 static void bnx2fc_upld_timer(unsigned long data)
29 {
30 
31  struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
32 
33  BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
34  /* fake upload completion */
38 }
39 
40 static void bnx2fc_ofld_timer(unsigned long data)
41 {
42 
43  struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
44 
45  BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
46  /* NOTE: This function should never be called, as
47  * offload should never timeout
48  */
49  /*
50  * If the timer has expired, this session is dead
51  * Clear offloaded flag and logout of this device.
52  * Since OFFLOADED flag is cleared, this case
53  * will be considered as offload error and the
54  * port will be logged off, and conn_id, session
55  * resources are freed up in bnx2fc_offload_session
56  */
60 }
61 
62 static void bnx2fc_offload_session(struct fcoe_port *port,
63  struct bnx2fc_rport *tgt,
64  struct fc_rport_priv *rdata)
65 {
66  struct fc_lport *lport = rdata->local_port;
67  struct fc_rport *rport = rdata->rport;
68  struct bnx2fc_interface *interface = port->priv;
69  struct bnx2fc_hba *hba = interface->hba;
70  int rval;
71  int i = 0;
72 
73  /* Initialize bnx2fc_rport */
74  /* NOTE: tgt is already bzero'd */
75  rval = bnx2fc_init_tgt(tgt, port, rdata);
76  if (rval) {
77  printk(KERN_ERR PFX "Failed to allocate conn id for "
78  "port_id (%6x)\n", rport->port_id);
79  goto tgt_init_err;
80  }
81 
82  /* Allocate session resources */
83  rval = bnx2fc_alloc_session_resc(hba, tgt);
84  if (rval) {
85  printk(KERN_ERR PFX "Failed to allocate resources\n");
86  goto ofld_err;
87  }
88 
89  /*
90  * Initialize FCoE session offload process.
91  * Upon completion of offload process add
92  * rport to list of rports
93  */
94 retry_ofld:
96  rval = bnx2fc_send_session_ofld_req(port, tgt);
97  if (rval) {
98  printk(KERN_ERR PFX "ofld_req failed\n");
99  goto ofld_err;
100  }
101 
102  /*
103  * wait for the session is offloaded and enabled. 3 Secs
104  * should be ample time for this process to complete.
105  */
106  setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
107  mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
108 
110  (test_bit(
112  &tgt->flags)));
113  if (signal_pending(current))
115 
116  del_timer_sync(&tgt->ofld_timer);
117 
118  if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
120  &tgt->flags)) {
121  BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
122  "retry ofld..%d\n", i++);
123  msleep_interruptible(1000);
124  if (i > 3) {
125  i = 0;
126  goto ofld_err;
127  }
128  goto retry_ofld;
129  }
130  goto ofld_err;
131  }
132  if (bnx2fc_map_doorbell(tgt)) {
133  printk(KERN_ERR PFX "map doorbell failed - no mem\n");
134  /* upload will take care of cleaning up sess resc */
135  lport->tt.rport_logoff(rdata);
136  }
137  return;
138 
139 ofld_err:
140  /* couldn't offload the session. log off from this rport */
141  BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
142  /* Free session resources */
143  bnx2fc_free_session_resc(hba, tgt);
144 tgt_init_err:
145  if (tgt->fcoe_conn_id != -1)
146  bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
147  lport->tt.rport_logoff(rdata);
148 }
149 
151 {
152  struct bnx2fc_cmd *io_req;
153  struct bnx2fc_cmd *tmp;
154  int rc;
155  int i = 0;
156  BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
157  tgt->num_active_ios.counter);
158 
159  spin_lock_bh(&tgt->tgt_lock);
160  tgt->flush_in_prog = 1;
161 
162  list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
163  i++;
164  list_del_init(&io_req->link);
165  io_req->on_active_queue = 0;
166  BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
167 
168  if (cancel_delayed_work(&io_req->timeout_work)) {
170  &io_req->req_flags)) {
171  /* Handle eh_abort timeout */
172  BNX2FC_IO_DBG(io_req, "eh_abort for IO "
173  "cleaned up\n");
174  complete(&io_req->tm_done);
175  }
176  kref_put(&io_req->refcount,
177  bnx2fc_cmd_release); /* drop timer hold */
178  }
179 
182 
183  /* Do not issue cleanup when disable request failed */
185  bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
186  else {
187  rc = bnx2fc_initiate_cleanup(io_req);
188  BUG_ON(rc);
189  }
190  }
191 
192  list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
193  i++;
194  list_del_init(&io_req->link);
195  io_req->on_tmf_queue = 0;
196  BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
197  if (io_req->wait_for_comp)
198  complete(&io_req->tm_done);
199  }
200 
201  list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
202  i++;
203  list_del_init(&io_req->link);
204  io_req->on_active_queue = 0;
205 
206  BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
207 
208  if (cancel_delayed_work(&io_req->timeout_work))
209  kref_put(&io_req->refcount,
210  bnx2fc_cmd_release); /* drop timer hold */
211 
212  if ((io_req->cb_func) && (io_req->cb_arg)) {
213  io_req->cb_func(io_req->cb_arg);
214  io_req->cb_arg = NULL;
215  }
216 
217  /* Do not issue cleanup when disable request failed */
219  bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
220  else {
221  rc = bnx2fc_initiate_cleanup(io_req);
222  BUG_ON(rc);
223  }
224  }
225 
226  list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
227  i++;
228  list_del_init(&io_req->link);
229 
230  BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
231 
232  if (cancel_delayed_work(&io_req->timeout_work)) {
234  &io_req->req_flags)) {
235  /* Handle eh_abort timeout */
236  BNX2FC_IO_DBG(io_req, "eh_abort for IO "
237  "in retire_q\n");
238  if (io_req->wait_for_comp)
239  complete(&io_req->tm_done);
240  }
241  kref_put(&io_req->refcount, bnx2fc_cmd_release);
242  }
243 
245  }
246 
247  BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
248  i = 0;
249  spin_unlock_bh(&tgt->tgt_lock);
250  /* wait for active_ios to go to 0 */
251  while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
252  msleep(25);
253  if (tgt->num_active_ios.counter != 0)
254  printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
255  " active_ios = %d\n",
256  tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
257  spin_lock_bh(&tgt->tgt_lock);
258  tgt->flush_in_prog = 0;
259  spin_unlock_bh(&tgt->tgt_lock);
260 }
261 
262 static void bnx2fc_upload_session(struct fcoe_port *port,
263  struct bnx2fc_rport *tgt)
264 {
265  struct bnx2fc_interface *interface = port->priv;
266  struct bnx2fc_hba *hba = interface->hba;
267 
268  BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
269  tgt->num_active_ios.counter);
270 
271  /*
272  * Called with hba->hba_mutex held.
273  * This is a blocking call
274  */
277 
278  /*
279  * wait for upload to complete. 3 Secs
280  * should be sufficient time for this process to complete.
281  */
282  setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
283  mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
284 
285  BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
287  (test_bit(
289  &tgt->flags)));
290 
291  if (signal_pending(current))
293 
294  del_timer_sync(&tgt->upld_timer);
295 
296  /*
297  * traverse thru the active_q and tmf_q and cleanup
298  * IOs in these lists
299  */
300  BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
301  tgt->flags);
303 
304  /* Issue destroy KWQE */
305  if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
306  BNX2FC_TGT_DBG(tgt, "send destroy req\n");
309 
310  /* wait for destroy to complete */
311  setup_timer(&tgt->upld_timer,
312  bnx2fc_upld_timer, (unsigned long)tgt);
313  mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
314 
316  (test_bit(
318  &tgt->flags)));
319 
320  if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
321  printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
322 
323  BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
324  tgt->flags);
325  if (signal_pending(current))
327 
328  del_timer_sync(&tgt->upld_timer);
329 
330  } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
331  printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
332  " not sent to FW\n");
333  } else {
334  printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
335  " not sent to FW\n");
336  }
337 
338  /* Free session resources */
339  bnx2fc_free_session_resc(hba, tgt);
340  bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
341 }
342 
343 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
344  struct fcoe_port *port,
345  struct fc_rport_priv *rdata)
346 {
347 
348  struct fc_rport *rport = rdata->rport;
349  struct bnx2fc_interface *interface = port->priv;
350  struct bnx2fc_hba *hba = interface->hba;
351  struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
352  struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
353 
354  tgt->rport = rport;
355  tgt->rdata = rdata;
356  tgt->port = port;
357 
358  if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
359  BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
360  tgt->fcoe_conn_id = -1;
361  return -1;
362  }
363 
364  tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
365  if (tgt->fcoe_conn_id == -1)
366  return -1;
367 
368  BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
369 
374 
375  /* Initialize the toggle bit */
376  tgt->sq_curr_toggle_bit = 1;
377  tgt->cq_curr_toggle_bit = 1;
378  tgt->sq_prod_idx = 0;
379  tgt->cq_cons_idx = 0;
380  tgt->rq_prod_idx = 0x8000;
381  tgt->rq_cons_idx = 0;
382  atomic_set(&tgt->num_active_ios, 0);
383 
384  if (rdata->flags & FC_RP_FLAGS_RETRY) {
385  tgt->dev_type = TYPE_TAPE;
386  tgt->io_timeout = 0; /* use default ULP timeout */
387  } else {
388  tgt->dev_type = TYPE_DISK;
390  }
391 
392  /* initialize sq doorbell */
393  sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
394  sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
396  /* initialize rx doorbell */
397  rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
403 
404  spin_lock_init(&tgt->tgt_lock);
405  spin_lock_init(&tgt->cq_lock);
406 
407  /* Initialize active_cmd_queue list */
408  INIT_LIST_HEAD(&tgt->active_cmd_queue);
409 
410  /* Initialize IO retire queue */
411  INIT_LIST_HEAD(&tgt->io_retire_queue);
412 
413  INIT_LIST_HEAD(&tgt->els_queue);
414 
415  /* Initialize active_tm_queue list */
416  INIT_LIST_HEAD(&tgt->active_tm_queue);
417 
420 
421  return 0;
422 }
423 
430  struct fc_rport_priv *rdata,
431  enum fc_rport_event event)
432 {
433  struct fcoe_port *port = lport_priv(lport);
434  struct bnx2fc_interface *interface = port->priv;
435  struct bnx2fc_hba *hba = interface->hba;
436  struct fc_rport *rport = rdata->rport;
437  struct fc_rport_libfc_priv *rp;
438  struct bnx2fc_rport *tgt;
439  u32 port_id;
440 
441  BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
442  event, rdata->ids.port_id);
443  switch (event) {
444  case RPORT_EV_READY:
445  if (!rport) {
446  printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
447  break;
448  }
449 
450  rp = rport->dd_data;
451  if (rport->port_id == FC_FID_DIR_SERV) {
452  /*
453  * bnx2fc_rport structure doesn't exist for
454  * directory server.
455  * We should not come here, as lport will
456  * take care of fabric login
457  */
458  printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
459  rdata->ids.port_id);
460  break;
461  }
462 
463  if (rdata->spp_type != FC_TYPE_FCP) {
464  BNX2FC_HBA_DBG(lport, "not FCP type target."
465  " not offloading\n");
466  break;
467  }
468  if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
469  BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
470  " not offloading\n");
471  break;
472  }
473 
474  /*
475  * Offlaod process is protected with hba mutex.
476  * Use the same mutex_lock for upload process too
477  */
478  mutex_lock(&hba->hba_mutex);
479  tgt = (struct bnx2fc_rport *)&rp[1];
480 
481  /* This can happen when ADISC finds the same target */
482  if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
483  BNX2FC_TGT_DBG(tgt, "already offloaded\n");
484  mutex_unlock(&hba->hba_mutex);
485  return;
486  }
487 
488  /*
489  * Offload the session. This is a blocking call, and will
490  * wait until the session is offloaded.
491  */
492  bnx2fc_offload_session(port, tgt, rdata);
493 
494  BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
495  hba->num_ofld_sess);
496 
497  if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
498  /*
499  * Session is offloaded and enabled. Map
500  * doorbell register for this target
501  */
502  BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
503  /* This counter is protected with hba mutex */
504  hba->num_ofld_sess++;
505 
507  } else {
508  /*
509  * Offload or enable would have failed.
510  * In offload/enable completion path, the
511  * rport would have already been removed
512  */
513  BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
514  "offloaded flag not set\n");
515  }
516  mutex_unlock(&hba->hba_mutex);
517  break;
518  case RPORT_EV_LOGO:
519  case RPORT_EV_FAILED:
520  case RPORT_EV_STOP:
521  port_id = rdata->ids.port_id;
522  if (port_id == FC_FID_DIR_SERV)
523  break;
524 
525  if (!rport) {
526  printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
527  port_id);
528  break;
529  }
530  rp = rport->dd_data;
531  mutex_lock(&hba->hba_mutex);
532  /*
533  * Perform session upload. Note that rdata->peers is already
534  * removed from disc->rports list before we get this event.
535  */
536  tgt = (struct bnx2fc_rport *)&rp[1];
537 
538  if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
539  mutex_unlock(&hba->hba_mutex);
540  break;
541  }
543 
544  bnx2fc_upload_session(port, tgt);
545  hba->num_ofld_sess--;
546  BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
547  hba->num_ofld_sess);
548  /*
549  * Try to wake up the linkdown wait thread. If num_ofld_sess
550  * is 0, the waiting therad wakes up
551  */
552  if ((hba->wait_for_link_down) &&
553  (hba->num_ofld_sess == 0)) {
555  }
556  if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
557  printk(KERN_ERR PFX "Relogin to the tgt\n");
558  mutex_lock(&lport->disc.disc_mutex);
559  lport->tt.rport_login(rdata);
560  mutex_unlock(&lport->disc.disc_mutex);
561  }
562  mutex_unlock(&hba->hba_mutex);
563 
564  break;
565 
566  case RPORT_EV_NONE:
567  break;
568  }
569 }
570 
578  u32 port_id)
579 {
580  struct bnx2fc_interface *interface = port->priv;
581  struct bnx2fc_hba *hba = interface->hba;
582  struct bnx2fc_rport *tgt;
583  struct fc_rport_priv *rdata;
584  int i;
585 
586  for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
587  tgt = hba->tgt_ofld_list[i];
588  if ((tgt) && (tgt->port == port)) {
589  rdata = tgt->rdata;
590  if (rdata->ids.port_id == port_id) {
591  if (rdata->rp_state != RPORT_ST_DELETE) {
592  BNX2FC_TGT_DBG(tgt, "rport "
593  "obtained\n");
594  return tgt;
595  } else {
596  BNX2FC_TGT_DBG(tgt, "rport 0x%x "
597  "is in DELETED state\n",
598  rdata->ids.port_id);
599  return NULL;
600  }
601  }
602  }
603  }
604  return NULL;
605 }
606 
607 
614 static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
615  struct bnx2fc_rport *tgt)
616 {
617  u32 conn_id, next;
618 
619  /* called with hba mutex held */
620 
621  /*
622  * tgt_ofld_list access is synchronized using
623  * both hba mutex and hba lock. Atleast hba mutex or
624  * hba lock needs to be held for read access.
625  */
626 
627  spin_lock_bh(&hba->hba_lock);
628  next = hba->next_conn_id;
629  conn_id = hba->next_conn_id++;
630  if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
631  hba->next_conn_id = 0;
632 
633  while (hba->tgt_ofld_list[conn_id] != NULL) {
634  conn_id++;
635  if (conn_id == BNX2FC_NUM_MAX_SESS)
636  conn_id = 0;
637 
638  if (conn_id == next) {
639  /* No free conn_ids are available */
640  spin_unlock_bh(&hba->hba_lock);
641  return -1;
642  }
643  }
644  hba->tgt_ofld_list[conn_id] = tgt;
645  tgt->fcoe_conn_id = conn_id;
646  spin_unlock_bh(&hba->hba_lock);
647  return conn_id;
648 }
649 
650 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
651 {
652  /* called with hba mutex held */
653  spin_lock_bh(&hba->hba_lock);
654  hba->tgt_ofld_list[conn_id] = NULL;
655  spin_unlock_bh(&hba->hba_lock);
656 }
657 
662 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
663  struct bnx2fc_rport *tgt)
664 {
666  int num_pages;
667  u32 *pbl;
668 
669  /* Allocate and map SQ */
671  tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
672 
673  tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
674  &tgt->sq_dma, GFP_KERNEL);
675  if (!tgt->sq) {
676  printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
677  tgt->sq_mem_size);
678  goto mem_alloc_failure;
679  }
680  memset(tgt->sq, 0, tgt->sq_mem_size);
681 
682  /* Allocate and map CQ */
684  tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
685 
686  tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
687  &tgt->cq_dma, GFP_KERNEL);
688  if (!tgt->cq) {
689  printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
690  tgt->cq_mem_size);
691  goto mem_alloc_failure;
692  }
693  memset(tgt->cq, 0, tgt->cq_mem_size);
694 
695  /* Allocate and map RQ and RQ PBL */
697  tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
698 
699  tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
700  &tgt->rq_dma, GFP_KERNEL);
701  if (!tgt->rq) {
702  printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
703  tgt->rq_mem_size);
704  goto mem_alloc_failure;
705  }
706  memset(tgt->rq, 0, tgt->rq_mem_size);
707 
708  tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
709  tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
710 
711  tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
712  &tgt->rq_pbl_dma, GFP_KERNEL);
713  if (!tgt->rq_pbl) {
714  printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
715  tgt->rq_pbl_size);
716  goto mem_alloc_failure;
717  }
718 
719  memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
720  num_pages = tgt->rq_mem_size / PAGE_SIZE;
721  page = tgt->rq_dma;
722  pbl = (u32 *)tgt->rq_pbl;
723 
724  while (num_pages--) {
725  *pbl = (u32)page;
726  pbl++;
727  *pbl = (u32)((u64)page >> 32);
728  pbl++;
729  page += PAGE_SIZE;
730  }
731 
732  /* Allocate and map XFERQ */
734  tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
735  PAGE_MASK;
736 
737  tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
738  &tgt->xferq_dma, GFP_KERNEL);
739  if (!tgt->xferq) {
740  printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
741  tgt->xferq_mem_size);
742  goto mem_alloc_failure;
743  }
744  memset(tgt->xferq, 0, tgt->xferq_mem_size);
745 
746  /* Allocate and map CONFQ & CONFQ PBL */
748  tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
749  PAGE_MASK;
750 
751  tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
752  &tgt->confq_dma, GFP_KERNEL);
753  if (!tgt->confq) {
754  printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
755  tgt->confq_mem_size);
756  goto mem_alloc_failure;
757  }
758  memset(tgt->confq, 0, tgt->confq_mem_size);
759 
760  tgt->confq_pbl_size =
761  (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
762  tgt->confq_pbl_size =
763  (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
764 
765  tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
766  tgt->confq_pbl_size,
767  &tgt->confq_pbl_dma, GFP_KERNEL);
768  if (!tgt->confq_pbl) {
769  printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
770  tgt->confq_pbl_size);
771  goto mem_alloc_failure;
772  }
773 
774  memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
775  num_pages = tgt->confq_mem_size / PAGE_SIZE;
776  page = tgt->confq_dma;
777  pbl = (u32 *)tgt->confq_pbl;
778 
779  while (num_pages--) {
780  *pbl = (u32)page;
781  pbl++;
782  *pbl = (u32)((u64)page >> 32);
783  pbl++;
784  page += PAGE_SIZE;
785  }
786 
787  /* Allocate and map ConnDB */
788  tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
789 
790  tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
791  tgt->conn_db_mem_size,
792  &tgt->conn_db_dma, GFP_KERNEL);
793  if (!tgt->conn_db) {
794  printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
795  tgt->conn_db_mem_size);
796  goto mem_alloc_failure;
797  }
798  memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
799 
800 
801  /* Allocate and map LCQ */
802  tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
803  tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
804  PAGE_MASK;
805 
806  tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
807  &tgt->lcq_dma, GFP_KERNEL);
808 
809  if (!tgt->lcq) {
810  printk(KERN_ERR PFX "unable to allocate lcq %d\n",
811  tgt->lcq_mem_size);
812  goto mem_alloc_failure;
813  }
814  memset(tgt->lcq, 0, tgt->lcq_mem_size);
815 
816  tgt->conn_db->rq_prod = 0x8000;
817 
818  return 0;
819 
820 mem_alloc_failure:
821  return -ENOMEM;
822 }
823 
832 static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
833  struct bnx2fc_rport *tgt)
834 {
835  void __iomem *ctx_base_ptr;
836 
837  BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
838 
839  spin_lock_bh(&tgt->cq_lock);
840  ctx_base_ptr = tgt->ctx_base;
841  tgt->ctx_base = NULL;
842 
843  /* Free LCQ */
844  if (tgt->lcq) {
845  dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
846  tgt->lcq, tgt->lcq_dma);
847  tgt->lcq = NULL;
848  }
849  /* Free connDB */
850  if (tgt->conn_db) {
851  dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
852  tgt->conn_db, tgt->conn_db_dma);
853  tgt->conn_db = NULL;
854  }
855  /* Free confq and confq pbl */
856  if (tgt->confq_pbl) {
857  dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
858  tgt->confq_pbl, tgt->confq_pbl_dma);
859  tgt->confq_pbl = NULL;
860  }
861  if (tgt->confq) {
862  dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
863  tgt->confq, tgt->confq_dma);
864  tgt->confq = NULL;
865  }
866  /* Free XFERQ */
867  if (tgt->xferq) {
868  dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
869  tgt->xferq, tgt->xferq_dma);
870  tgt->xferq = NULL;
871  }
872  /* Free RQ PBL and RQ */
873  if (tgt->rq_pbl) {
874  dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
875  tgt->rq_pbl, tgt->rq_pbl_dma);
876  tgt->rq_pbl = NULL;
877  }
878  if (tgt->rq) {
879  dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
880  tgt->rq, tgt->rq_dma);
881  tgt->rq = NULL;
882  }
883  /* Free CQ */
884  if (tgt->cq) {
885  dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
886  tgt->cq, tgt->cq_dma);
887  tgt->cq = NULL;
888  }
889  /* Free SQ */
890  if (tgt->sq) {
891  dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
892  tgt->sq, tgt->sq_dma);
893  tgt->sq = NULL;
894  }
895  spin_unlock_bh(&tgt->cq_lock);
896 
897  if (ctx_base_ptr)
898  iounmap(ctx_base_ptr);
899 }