Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mptbase.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/message/fusion/mptbase.c
3  * This is the Fusion MPT base driver which supports multiple
4  * (SCSI + LAN) specialized protocol drivers.
5  * For use with LSI PCI chip/adapter(s)
6  * running LSI Fusion MPT (Message Passing Technology) firmware.
7  *
8  * Copyright (c) 1999-2008 LSI Corporation
9  * (mailto:[email protected])
10  *
11  */
12 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
13 /*
14  This program is free software; you can redistribute it and/or modify
15  it under the terms of the GNU General Public License as published by
16  the Free Software Foundation; version 2 of the License.
17 
18  This program is distributed in the hope that it will be useful,
19  but WITHOUT ANY WARRANTY; without even the implied warranty of
20  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  GNU General Public License for more details.
22 
23  NO WARRANTY
24  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
25  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
26  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
27  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
28  solely responsible for determining the appropriateness of using and
29  distributing the Program and assumes all risks associated with its
30  exercise of rights under this Agreement, including but not limited to
31  the risks and costs of program errors, damage to or loss of data,
32  programs or equipment, and unavailability or interruption of operations.
33 
34  DISCLAIMER OF LIABILITY
35  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
36  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
38  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
39  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
40  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
41  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
42 
43  You should have received a copy of the GNU General Public License
44  along with this program; if not, write to the Free Software
45  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 
49 #include <linux/kernel.h>
50 #include <linux/module.h>
51 #include <linux/errno.h>
52 #include <linux/init.h>
53 #include <linux/seq_file.h>
54 #include <linux/slab.h>
55 #include <linux/types.h>
56 #include <linux/pci.h>
57 #include <linux/kdev_t.h>
58 #include <linux/blkdev.h>
59 #include <linux/delay.h>
60 #include <linux/interrupt.h> /* needed for in_interrupt() proto */
61 #include <linux/dma-mapping.h>
62 #include <asm/io.h>
63 #ifdef CONFIG_MTRR
64 #include <asm/mtrr.h>
65 #endif
66 #include <linux/kthread.h>
67 #include <scsi/scsi_host.h>
68 
69 #include "mptbase.h"
70 #include "lsi/mpi_log_fc.h"
71 
72 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
73 #define my_NAME "Fusion MPT base driver"
74 #define my_VERSION MPT_LINUX_VERSION_COMMON
75 #define MYNAM "mptbase"
76 
79 MODULE_LICENSE("GPL");
81 
82 /*
83  * cmd line parameters
84  */
85 
86 static int mpt_msi_enable_spi;
87 module_param(mpt_msi_enable_spi, int, 0);
88 MODULE_PARM_DESC(mpt_msi_enable_spi,
89  " Enable MSI Support for SPI controllers (default=0)");
90 
91 static int mpt_msi_enable_fc;
92 module_param(mpt_msi_enable_fc, int, 0);
93 MODULE_PARM_DESC(mpt_msi_enable_fc,
94  " Enable MSI Support for FC controllers (default=0)");
95 
96 static int mpt_msi_enable_sas;
97 module_param(mpt_msi_enable_sas, int, 0);
98 MODULE_PARM_DESC(mpt_msi_enable_sas,
99  " Enable MSI Support for SAS controllers (default=0)");
100 
101 static int mpt_channel_mapping;
102 module_param(mpt_channel_mapping, int, 0);
103 MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
104 
105 static int mpt_debug_level;
106 static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
107 module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
108  &mpt_debug_level, 0600);
109 MODULE_PARM_DESC(mpt_debug_level,
110  " debug level - refer to mptdebug.h - (default=0)");
111 
114 module_param(mpt_fwfault_debug, int, 0600);
116  "Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
117 
118 static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
120 
121 #ifdef MFCNT
122 static int mfcounter = 0;
123 #define PRINT_MF_COUNT 20000
124 #endif
125 
126 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
127 /*
128  * Public data...
129  */
130 
131 #define WHOINIT_UNKNOWN 0xAA
132 
133 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
134 /*
135  * Private data...
136  */
137  /* Adapter link list */
138 LIST_HEAD(ioc_list);
139  /* Callback lookup table */
140 static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS];
141  /* Protocol driver class lookup table */
142 static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS];
143  /* Event handler lookup table */
144 static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
145  /* Reset handler lookup table */
146 static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
147 static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
148 
149 #ifdef CONFIG_PROC_FS
150 static struct proc_dir_entry *mpt_proc_root_dir;
151 #endif
152 
153 /*
154  * Driver Callback Index's
155  */
156 static u8 mpt_base_index = MPT_MAX_PROTOCOL_DRIVERS;
157 static u8 last_drv_idx;
158 
159 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
160 /*
161  * Forward protos...
162  */
163 static irqreturn_t mpt_interrupt(int irq, void *bus_id);
164 static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
165  MPT_FRAME_HDR *reply);
166 static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
167  u32 *req, int replyBytes, u16 *u16reply, int maxwait,
168  int sleepFlag);
169 static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag);
170 static void mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev);
171 static void mpt_adapter_disable(MPT_ADAPTER *ioc);
172 static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
173 
174 static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
175 static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
176 static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
177 static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
178 static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
179 static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
180 static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
181 static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
182 static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
183 static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
184 static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
185 static int PrimeIocFifos(MPT_ADAPTER *ioc);
186 static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
187 static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
188 static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
189 static int GetLanConfigPages(MPT_ADAPTER *ioc);
190 static int GetIoUnitPage2(MPT_ADAPTER *ioc);
191 int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
192 static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
193 static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
194 static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
195 static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
196 static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
197 static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
198  int sleepFlag);
199 static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
200 static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
201 static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
202 
203 #ifdef CONFIG_PROC_FS
204 static const struct file_operations mpt_summary_proc_fops;
205 static const struct file_operations mpt_version_proc_fops;
206 static const struct file_operations mpt_iocinfo_proc_fops;
207 #endif
208 static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
209 
210 static int ProcessEventNotification(MPT_ADAPTER *ioc,
211  EventNotificationReply_t *evReply, int *evHandlers);
212 static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
213 static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
214 static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
215 static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx);
216 static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
217 static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc);
218 
219 /* module entry point */
220 static int __init fusion_init (void);
221 static void __exit fusion_exit (void);
222 
223 #define CHIPREG_READ32(addr) readl_relaxed(addr)
224 #define CHIPREG_READ32_dmasync(addr) readl(addr)
225 #define CHIPREG_WRITE32(addr,val) writel(val, addr)
226 #define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr)
227 #define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr)
228 
229 static void
230 pci_disable_io_access(struct pci_dev *pdev)
231 {
232  u16 command_reg;
233 
234  pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
235  command_reg &= ~1;
236  pci_write_config_word(pdev, PCI_COMMAND, command_reg);
237 }
238 
239 static void
240 pci_enable_io_access(struct pci_dev *pdev)
241 {
242  u16 command_reg;
243 
244  pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
245  command_reg |= 1;
246  pci_write_config_word(pdev, PCI_COMMAND, command_reg);
247 }
248 
249 static int mpt_set_debug_level(const char *val, struct kernel_param *kp)
250 {
251  int ret = param_set_int(val, kp);
252  MPT_ADAPTER *ioc;
253 
254  if (ret)
255  return ret;
256 
257  list_for_each_entry(ioc, &ioc_list, list)
258  ioc->debug_level = mpt_debug_level;
259  return 0;
260 }
261 
268 static u8
269 mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
270 {
271  u8 cb_idx;
272 
273  for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--)
274  if (MptDriverClass[cb_idx] == dclass)
275  return cb_idx;
276  return 0;
277 }
278 
285 static int
286 mpt_is_discovery_complete(MPT_ADAPTER *ioc)
287 {
289  CONFIGPARMS cfg;
292  int rc = 0;
293 
294  memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
295  memset(&cfg, 0, sizeof(CONFIGPARMS));
299  cfg.cfghdr.ehdr = &hdr;
300  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
301 
302  if ((mpt_config(ioc, &cfg)))
303  goto out;
304  if (!hdr.ExtPageLength)
305  goto out;
306 
307  buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
308  &dma_handle);
309  if (!buffer)
310  goto out;
311 
312  cfg.physAddr = dma_handle;
314 
315  if ((mpt_config(ioc, &cfg)))
316  goto out_free_consistent;
317 
318  if (!(buffer->PhyData[0].PortFlags &
320  rc = 1;
321 
322  out_free_consistent:
323  pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
324  buffer, dma_handle);
325  out:
326  return rc;
327 }
328 
329 
337 static int mpt_remove_dead_ioc_func(void *arg)
338 {
339  MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
340  struct pci_dev *pdev;
341 
342  if ((ioc == NULL))
343  return -1;
344 
345  pdev = ioc->pcidev;
346  if ((pdev == NULL))
347  return -1;
348 
350  return 0;
351 }
352 
353 
354 
360 static void
361 mpt_fault_reset_work(struct work_struct *work)
362 {
363  MPT_ADAPTER *ioc =
364  container_of(work, MPT_ADAPTER, fault_reset_work.work);
365  u32 ioc_raw_state;
366  int rc;
367  unsigned long flags;
368  MPT_SCSI_HOST *hd;
369  struct task_struct *p;
370 
371  if (ioc->ioc_reset_in_progress || !ioc->active)
372  goto out;
373 
374 
375  ioc_raw_state = mpt_GetIocState(ioc, 0);
376  if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) {
377  printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n",
378  ioc->name, __func__);
379 
380  /*
381  * Call mptscsih_flush_pending_cmds callback so that we
382  * flush all pending commands back to OS.
383  * This call is required to aovid deadlock at block layer.
384  * Dead IOC will fail to do diag reset,and this call is safe
385  * since dead ioc will never return any command back from HW.
386  */
387  hd = shost_priv(ioc->sh);
388  ioc->schedule_dead_ioc_flush_running_cmds(hd);
389 
390  /*Remove the Dead Host */
391  p = kthread_run(mpt_remove_dead_ioc_func, ioc,
392  "mpt_dead_ioc_%d", ioc->id);
393  if (IS_ERR(p)) {
394  printk(MYIOC_s_ERR_FMT
395  "%s: Running mpt_dead_ioc thread failed !\n",
396  ioc->name, __func__);
397  } else {
398  printk(MYIOC_s_WARN_FMT
399  "%s: Running mpt_dead_ioc thread success !\n",
400  ioc->name, __func__);
401  }
402  return; /* don't rearm timer */
403  }
404 
405  if ((ioc_raw_state & MPI_IOC_STATE_MASK)
406  == MPI_IOC_STATE_FAULT) {
407  printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
408  ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
409  printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
410  ioc->name, __func__);
411  rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
412  printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
413  __func__, (rc == 0) ? "success" : "failed");
414  ioc_raw_state = mpt_GetIocState(ioc, 0);
415  if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
416  printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
417  "reset (%04xh)\n", ioc->name, ioc_raw_state &
419  } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
420  if ((mpt_is_discovery_complete(ioc))) {
421  devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
422  "discovery_quiesce_io flag\n", ioc->name));
423  ioc->sas_discovery_quiesce_io = 0;
424  }
425  }
426 
427  out:
428  /*
429  * Take turns polling alternate controller
430  */
431  if (ioc->alt_ioc)
432  ioc = ioc->alt_ioc;
433 
434  /* rearm the timer */
435  spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
436  if (ioc->reset_work_q)
437  queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
439  spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
440 }
441 
442 
443 /*
444  * Process turbo (context) reply...
445  */
446 static void
447 mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
448 {
449  MPT_FRAME_HDR *mf = NULL;
450  MPT_FRAME_HDR *mr = NULL;
451  u16 req_idx = 0;
452  u8 cb_idx;
453 
454  dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got TURBO reply req_idx=%08x\n",
455  ioc->name, pa));
456 
457  switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
459  req_idx = pa & 0x0000FFFF;
460  cb_idx = (pa & 0x00FF0000) >> 16;
461  mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
462  break;
464  cb_idx = mpt_get_cb_idx(MPTLAN_DRIVER);
465  /*
466  * Blind set of mf to NULL here was fatal
467  * after lan_reply says "freeme"
468  * Fix sort of combined with an optimization here;
469  * added explicit check for case where lan_reply
470  * was just returning 1 and doing nothing else.
471  * For this case skip the callback, but set up
472  * proper mf value first here:-)
473  */
474  if ((pa & 0x58000000) == 0x58000000) {
475  req_idx = pa & 0x0000FFFF;
476  mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
477  mpt_free_msg_frame(ioc, mf);
478  mb();
479  return;
480  break;
481  }
482  mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
483  break;
485  cb_idx = mpt_get_cb_idx(MPTSTM_DRIVER);
486  mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
487  break;
488  default:
489  cb_idx = 0;
490  BUG();
491  }
492 
493  /* Check for (valid) IO callback! */
494  if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
495  MptCallbacks[cb_idx] == NULL) {
496  printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
497  __func__, ioc->name, cb_idx);
498  goto out;
499  }
500 
501  if (MptCallbacks[cb_idx](ioc, mf, mr))
502  mpt_free_msg_frame(ioc, mf);
503  out:
504  mb();
505 }
506 
507 static void
508 mpt_reply(MPT_ADAPTER *ioc, u32 pa)
509 {
510  MPT_FRAME_HDR *mf;
511  MPT_FRAME_HDR *mr;
512  u16 req_idx;
513  u8 cb_idx;
514  int freeme;
515 
516  u32 reply_dma_low;
517  u16 ioc_stat;
518 
519  /* non-TURBO reply! Hmmm, something may be up...
520  * Newest turbo reply mechanism; get address
521  * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
522  */
523 
524  /* Map DMA address of reply header to cpu address.
525  * pa is 32 bits - but the dma address may be 32 or 64 bits
526  * get offset based only only the low addresses
527  */
528 
529  reply_dma_low = (pa <<= 1);
530  mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
531  (reply_dma_low - ioc->reply_frames_low_dma));
532 
533  req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
534  cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
535  mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
536 
537  dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
538  ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
539  DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr);
540 
541  /* Check/log IOC log info
542  */
543  ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
544  if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
545  u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
546  if (ioc->bus_type == FC)
547  mpt_fc_log_info(ioc, log_info);
548  else if (ioc->bus_type == SPI)
549  mpt_spi_log_info(ioc, log_info);
550  else if (ioc->bus_type == SAS)
551  mpt_sas_log_info(ioc, log_info, cb_idx);
552  }
553 
554  if (ioc_stat & MPI_IOCSTATUS_MASK)
555  mpt_iocstatus_info(ioc, (u32)ioc_stat, mf);
556 
557  /* Check for (valid) IO callback! */
558  if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
559  MptCallbacks[cb_idx] == NULL) {
560  printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
561  __func__, ioc->name, cb_idx);
562  freeme = 0;
563  goto out;
564  }
565 
566  freeme = MptCallbacks[cb_idx](ioc, mf, mr);
567 
568  out:
569  /* Flush (non-TURBO) reply with a WRITE! */
570  CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
571 
572  if (freeme)
573  mpt_free_msg_frame(ioc, mf);
574  mb();
575 }
576 
577 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
594 static irqreturn_t
595 mpt_interrupt(int irq, void *bus_id)
596 {
597  MPT_ADAPTER *ioc = bus_id;
598  u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
599 
600  if (pa == 0xFFFFFFFF)
601  return IRQ_NONE;
602 
603  /*
604  * Drain the reply FIFO!
605  */
606  do {
607  if (pa & MPI_ADDRESS_REPLY_A_BIT)
608  mpt_reply(ioc, pa);
609  else
610  mpt_turbo_reply(ioc, pa);
611  pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
612  } while (pa != 0xFFFFFFFF);
613 
614  return IRQ_HANDLED;
615 }
616 
617 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
631 static int
632 mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
633 {
634  EventNotificationReply_t *pEventReply;
635  u8 event;
636  int evHandlers;
637  int freereq = 1;
638 
639  switch (reply->u.hdr.Function) {
641  pEventReply = (EventNotificationReply_t *)reply;
642  evHandlers = 0;
643  ProcessEventNotification(ioc, pEventReply, &evHandlers);
644  event = le32_to_cpu(pEventReply->Event) & 0xFF;
645  if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
646  freereq = 0;
647  if (event != MPI_EVENT_EVENT_CHANGE)
648  break;
649  case MPI_FUNCTION_CONFIG:
651  ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
652  if (reply) {
653  ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
654  memcpy(ioc->mptbase_cmds.reply, reply,
656  4 * reply->u.reply.MsgLength));
657  }
658  if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
659  ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
660  complete(&ioc->mptbase_cmds.done);
661  } else
662  freereq = 0;
663  if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
664  freereq = 1;
665  break;
667  devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
668  "EventAck reply received\n", ioc->name));
669  break;
670  default:
671  printk(MYIOC_s_ERR_FMT
672  "Unexpected msg function (=%02Xh) reply received!\n",
673  ioc->name, reply->u.hdr.Function);
674  break;
675  }
676 
677  /*
678  * Conditionally tell caller to free the original
679  * EventNotification/EventAck/unexpected request frame!
680  */
681  return freereq;
682 }
683 
684 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
705 u8
706 mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
707 {
708  u8 cb_idx;
709  last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS;
710 
711  /*
712  * Search for empty callback slot in this order: {N,...,7,6,5,...,1}
713  * (slot/handle 0 is reserved!)
714  */
715  for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
716  if (MptCallbacks[cb_idx] == NULL) {
717  MptCallbacks[cb_idx] = cbfunc;
718  MptDriverClass[cb_idx] = dclass;
719  MptEvHandlers[cb_idx] = NULL;
720  last_drv_idx = cb_idx;
721  strlcpy(MptCallbacksName[cb_idx], func_name,
723  break;
724  }
725  }
726 
727  return last_drv_idx;
728 }
729 
730 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
738 void
740 {
741  if (cb_idx && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) {
742  MptCallbacks[cb_idx] = NULL;
743  MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
744  MptEvHandlers[cb_idx] = NULL;
745 
746  last_drv_idx++;
747  }
748 }
749 
750 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
761 int
762 mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
763 {
764  if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
765  return -1;
766 
767  MptEvHandlers[cb_idx] = ev_cbfunc;
768  return 0;
769 }
770 
771 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
780 void
782 {
783  if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
784  return;
785 
786  MptEvHandlers[cb_idx] = NULL;
787 }
788 
789 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
800 int
801 mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func)
802 {
803  if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
804  return -1;
805 
806  MptResetHandlers[cb_idx] = reset_func;
807  return 0;
808 }
809 
810 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
819 void
821 {
822  if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
823  return;
824 
825  MptResetHandlers[cb_idx] = NULL;
826 }
827 
828 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
834 int
835 mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx)
836 {
837  MPT_ADAPTER *ioc;
838  const struct pci_device_id *id;
839 
840  if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
841  return -EINVAL;
842 
843  MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
844 
845  /* call per pci device probe entry point */
846  list_for_each_entry(ioc, &ioc_list, list) {
847  id = ioc->pcidev->driver ?
848  ioc->pcidev->driver->id_table : NULL;
849  if (dd_cbfunc->probe)
850  dd_cbfunc->probe(ioc->pcidev, id);
851  }
852 
853  return 0;
854 }
855 
856 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
861 void
863 {
864  struct mpt_pci_driver *dd_cbfunc;
865  MPT_ADAPTER *ioc;
866 
867  if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
868  return;
869 
870  dd_cbfunc = MptDeviceDriverHandlers[cb_idx];
871 
872  list_for_each_entry(ioc, &ioc_list, list) {
873  if (dd_cbfunc->remove)
874  dd_cbfunc->remove(ioc->pcidev);
875  }
876 
877  MptDeviceDriverHandlers[cb_idx] = NULL;
878 }
879 
880 
881 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
893 MPT_FRAME_HDR*
894 mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
895 {
896  MPT_FRAME_HDR *mf;
897  unsigned long flags;
898  u16 req_idx; /* Request index */
899 
900  /* validate handle and ioc identifier */
901 
902 #ifdef MFCNT
903  if (!ioc->active)
904  printk(MYIOC_s_WARN_FMT "IOC Not Active! mpt_get_msg_frame "
905  "returning NULL!\n", ioc->name);
906 #endif
907 
908  /* If interrupts are not attached, do not return a request frame */
909  if (!ioc->active)
910  return NULL;
911 
912  spin_lock_irqsave(&ioc->FreeQlock, flags);
913  if (!list_empty(&ioc->FreeQ)) {
914  int req_offset;
915 
916  mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
917  u.frame.linkage.list);
918  list_del(&mf->u.frame.linkage.list);
919  mf->u.frame.linkage.arg1 = 0;
920  mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
921  req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
922  /* u16! */
923  req_idx = req_offset / ioc->req_sz;
924  mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
925  mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
926  /* Default, will be changed if necessary in SG generation */
927  ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame;
928 #ifdef MFCNT
929  ioc->mfcnt++;
930 #endif
931  }
932  else
933  mf = NULL;
934  spin_unlock_irqrestore(&ioc->FreeQlock, flags);
935 
936 #ifdef MFCNT
937  if (mf == NULL)
938  printk(MYIOC_s_WARN_FMT "IOC Active. No free Msg Frames! "
939  "Count 0x%x Max 0x%x\n", ioc->name, ioc->mfcnt,
940  ioc->req_depth);
941  mfcounter++;
942  if (mfcounter == PRINT_MF_COUNT)
943  printk(MYIOC_s_INFO_FMT "MF Count 0x%x Max 0x%x \n", ioc->name,
944  ioc->mfcnt, ioc->req_depth);
945 #endif
946 
947  dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_get_msg_frame(%d,%d), got mf=%p\n",
948  ioc->name, cb_idx, ioc->id, mf));
949  return mf;
950 }
951 
952 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
962 void
963 mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
964 {
965  u32 mf_dma_addr;
966  int req_offset;
967  u16 req_idx; /* Request index */
968 
969  /* ensure values are reset properly! */
970  mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
971  req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
972  /* u16! */
973  req_idx = req_offset / ioc->req_sz;
974  mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
975  mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
976 
977  DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
978 
979  mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
980  dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d "
981  "RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx,
982  ioc->RequestNB[req_idx]));
983  CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
984 }
985 
998 void
999 mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
1000 {
1001  u32 mf_dma_addr;
1002  int req_offset;
1003  u16 req_idx; /* Request index */
1004 
1005  /* ensure values are reset properly! */
1006  mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
1007  req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
1008  req_idx = req_offset / ioc->req_sz;
1009  mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
1010  mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
1011 
1012  DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
1013 
1014  mf_dma_addr = (ioc->req_frames_low_dma + req_offset);
1015  dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d\n",
1016  ioc->name, mf_dma_addr, req_idx));
1017  CHIPREG_WRITE32(&ioc->chip->RequestHiPriFifo, mf_dma_addr);
1018 }
1019 
1020 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1029 void
1030 mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
1031 {
1032  unsigned long flags;
1033 
1034  /* Put Request back on FreeQ! */
1035  spin_lock_irqsave(&ioc->FreeQlock, flags);
1036  if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
1037  goto out;
1038  /* signature to know if this mf is freed */
1039  mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
1040  list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
1041 #ifdef MFCNT
1042  ioc->mfcnt--;
1043 #endif
1044  out:
1045  spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1046 }
1047 
1048 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1058 static void
1059 mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1060 {
1061  SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
1062  pSge->FlagsLength = cpu_to_le32(flagslength);
1063  pSge->Address = cpu_to_le32(dma_addr);
1064 }
1065 
1075 static void
1076 mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1077 {
1078  SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1079  pSge->Address.Low = cpu_to_le32
1080  (lower_32_bits(dma_addr));
1081  pSge->Address.High = cpu_to_le32
1082  (upper_32_bits(dma_addr));
1083  pSge->FlagsLength = cpu_to_le32
1084  ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1085 }
1086 
1096 static void
1097 mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1098 {
1099  SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1100  u32 tmp;
1101 
1102  pSge->Address.Low = cpu_to_le32
1103  (lower_32_bits(dma_addr));
1104  tmp = (u32)(upper_32_bits(dma_addr));
1105 
1106  /*
1107  * 1078 errata workaround for the 36GB limitation
1108  */
1109  if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
1110  flagslength |=
1112  tmp |= (1<<31);
1113  if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
1114  printk(KERN_DEBUG "1078 P0M2 addressing for "
1115  "addr = 0x%llx len = %d\n",
1116  (unsigned long long)dma_addr,
1117  MPI_SGE_LENGTH(flagslength));
1118  }
1119 
1120  pSge->Address.High = cpu_to_le32(tmp);
1121  pSge->FlagsLength = cpu_to_le32(
1122  (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1123 }
1124 
1125 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1134 static void
1135 mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1136 {
1137  SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
1138  pChain->Length = cpu_to_le16(length);
1140  pChain->NextChainOffset = next;
1141  pChain->Address = cpu_to_le32(dma_addr);
1142 }
1143 
1144 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1153 static void
1154 mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1155 {
1156  SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
1157  u32 tmp = dma_addr & 0xFFFFFFFF;
1158 
1159  pChain->Length = cpu_to_le16(length);
1160  pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
1162 
1163  pChain->NextChainOffset = next;
1164 
1165  pChain->Address.Low = cpu_to_le32(tmp);
1166  tmp = (u32)(upper_32_bits(dma_addr));
1167  pChain->Address.High = cpu_to_le32(tmp);
1168 }
1169 
1170 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1187 int
1188 mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
1189 {
1190  int r = 0;
1191  u8 *req_as_bytes;
1192  int ii;
1193 
1194  /* State is known to be good upon entering
1195  * this function so issue the bus reset
1196  * request.
1197  */
1198 
1199  /*
1200  * Emulate what mpt_put_msg_frame() does /wrt to sanity
1201  * setting cb_idx/req_idx. But ONLY if this request
1202  * is in proper (pre-alloc'd) request buffer range...
1203  */
1204  ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
1205  if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) {
1206  MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
1207  mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii);
1208  mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
1209  }
1210 
1211  /* Make sure there are no doorbells */
1212  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1213 
1214  CHIPREG_WRITE32(&ioc->chip->Doorbell,
1216  ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
1217 
1218  /* Wait for IOC doorbell int */
1219  if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) {
1220  return ii;
1221  }
1222 
1223  /* Read doorbell and check for active bit */
1224  if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
1225  return -5;
1226 
1227  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_send_handshake_request start, WaitCnt=%d\n",
1228  ioc->name, ii));
1229 
1230  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1231 
1232  if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1233  return -2;
1234  }
1235 
1236  /* Send request via doorbell handshake */
1237  req_as_bytes = (u8 *) req;
1238  for (ii = 0; ii < reqBytes/4; ii++) {
1239  u32 word;
1240 
1241  word = ((req_as_bytes[(ii*4) + 0] << 0) |
1242  (req_as_bytes[(ii*4) + 1] << 8) |
1243  (req_as_bytes[(ii*4) + 2] << 16) |
1244  (req_as_bytes[(ii*4) + 3] << 24));
1245  CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
1246  if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1247  r = -3;
1248  break;
1249  }
1250  }
1251 
1252  if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0)
1253  r = 0;
1254  else
1255  r = -4;
1256 
1257  /* Make sure there are no doorbells */
1258  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1259 
1260  return r;
1261 }
1262 
1263 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1282 static int
1283 mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
1284 {
1285  int r = 0;
1286 
1287  /* return if in use */
1288  if (CHIPREG_READ32(&ioc->chip->Doorbell)
1290  return -1;
1291 
1292  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1293 
1294  CHIPREG_WRITE32(&ioc->chip->Doorbell,
1297  (access_control_value<<12)));
1298 
1299  /* Wait for IOC to clear Doorbell Status bit */
1300  if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1301  return -2;
1302  }else
1303  return 0;
1304 }
1305 
1306 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1315 static int
1316 mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1317 {
1318  char *psge;
1319  int flags_length;
1320  u32 host_page_buffer_sz=0;
1321 
1322  if(!ioc->HostPageBuffer) {
1323 
1324  host_page_buffer_sz =
1325  le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
1326 
1327  if(!host_page_buffer_sz)
1328  return 0; /* fw doesn't need any host buffers */
1329 
1330  /* spin till we get enough memory */
1331  while(host_page_buffer_sz > 0) {
1332 
1333  if((ioc->HostPageBuffer = pci_alloc_consistent(
1334  ioc->pcidev,
1335  host_page_buffer_sz,
1336  &ioc->HostPageBuffer_dma)) != NULL) {
1337 
1338  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1339  "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
1340  ioc->name, ioc->HostPageBuffer,
1341  (u32)ioc->HostPageBuffer_dma,
1342  host_page_buffer_sz));
1343  ioc->alloc_total += host_page_buffer_sz;
1344  ioc->HostPageBuffer_sz = host_page_buffer_sz;
1345  break;
1346  }
1347 
1348  host_page_buffer_sz -= (4*1024);
1349  }
1350  }
1351 
1352  if(!ioc->HostPageBuffer) {
1353  printk(MYIOC_s_ERR_FMT
1354  "Failed to alloc memory for host_page_buffer!\n",
1355  ioc->name);
1356  return -999;
1357  }
1358 
1359  psge = (char *)&ioc_init->HostPageBufferSGE;
1360  flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1364  flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1365  flags_length |= ioc->HostPageBuffer_sz;
1366  ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1367  ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1368 
1369 return 0;
1370 }
1371 
1372 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1384 int
1385 mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
1386 {
1387  MPT_ADAPTER *ioc;
1388 
1389  list_for_each_entry(ioc,&ioc_list,list) {
1390  if (ioc->id == iocid) {
1391  *iocpp =ioc;
1392  return iocid;
1393  }
1394  }
1395 
1396  *iocpp = NULL;
1397  return -1;
1398 }
1399 
1411 static void
1412 mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
1413 {
1414  char *product_str = NULL;
1415 
1416  if (vendor == PCI_VENDOR_ID_BROCADE) {
1417  switch (device)
1418  {
1420  switch (revision)
1421  {
1422  case 0x00:
1423  product_str = "BRE040 A0";
1424  break;
1425  case 0x01:
1426  product_str = "BRE040 A1";
1427  break;
1428  default:
1429  product_str = "BRE040";
1430  break;
1431  }
1432  break;
1433  }
1434  goto out;
1435  }
1436 
1437  switch (device)
1438  {
1440  product_str = "LSIFC909 B1";
1441  break;
1443  product_str = "LSIFC919 B0";
1444  break;
1446  product_str = "LSIFC929 B0";
1447  break;
1449  if (revision < 0x80)
1450  product_str = "LSIFC919X A0";
1451  else
1452  product_str = "LSIFC919XL A1";
1453  break;
1455  if (revision < 0x80)
1456  product_str = "LSIFC929X A0";
1457  else
1458  product_str = "LSIFC929XL A1";
1459  break;
1461  product_str = "LSIFC939X A1";
1462  break;
1464  product_str = "LSIFC949X A1";
1465  break;
1467  switch (revision)
1468  {
1469  case 0x00:
1470  product_str = "LSIFC949E A0";
1471  break;
1472  case 0x01:
1473  product_str = "LSIFC949E A1";
1474  break;
1475  default:
1476  product_str = "LSIFC949E";
1477  break;
1478  }
1479  break;
1481  switch (revision)
1482  {
1483  case 0x00:
1484  product_str = "LSI53C1030 A0";
1485  break;
1486  case 0x01:
1487  product_str = "LSI53C1030 B0";
1488  break;
1489  case 0x03:
1490  product_str = "LSI53C1030 B1";
1491  break;
1492  case 0x07:
1493  product_str = "LSI53C1030 B2";
1494  break;
1495  case 0x08:
1496  product_str = "LSI53C1030 C0";
1497  break;
1498  case 0x80:
1499  product_str = "LSI53C1030T A0";
1500  break;
1501  case 0x83:
1502  product_str = "LSI53C1030T A2";
1503  break;
1504  case 0x87:
1505  product_str = "LSI53C1030T A3";
1506  break;
1507  case 0xc1:
1508  product_str = "LSI53C1020A A1";
1509  break;
1510  default:
1511  product_str = "LSI53C1030";
1512  break;
1513  }
1514  break;
1516  switch (revision)
1517  {
1518  case 0x03:
1519  product_str = "LSI53C1035 A2";
1520  break;
1521  case 0x04:
1522  product_str = "LSI53C1035 B0";
1523  break;
1524  default:
1525  product_str = "LSI53C1035";
1526  break;
1527  }
1528  break;
1530  switch (revision)
1531  {
1532  case 0x00:
1533  product_str = "LSISAS1064 A1";
1534  break;
1535  case 0x01:
1536  product_str = "LSISAS1064 A2";
1537  break;
1538  case 0x02:
1539  product_str = "LSISAS1064 A3";
1540  break;
1541  case 0x03:
1542  product_str = "LSISAS1064 A4";
1543  break;
1544  default:
1545  product_str = "LSISAS1064";
1546  break;
1547  }
1548  break;
1550  switch (revision)
1551  {
1552  case 0x00:
1553  product_str = "LSISAS1064E A0";
1554  break;
1555  case 0x01:
1556  product_str = "LSISAS1064E B0";
1557  break;
1558  case 0x02:
1559  product_str = "LSISAS1064E B1";
1560  break;
1561  case 0x04:
1562  product_str = "LSISAS1064E B2";
1563  break;
1564  case 0x08:
1565  product_str = "LSISAS1064E B3";
1566  break;
1567  default:
1568  product_str = "LSISAS1064E";
1569  break;
1570  }
1571  break;
1573  switch (revision)
1574  {
1575  case 0x00:
1576  product_str = "LSISAS1068 A0";
1577  break;
1578  case 0x01:
1579  product_str = "LSISAS1068 B0";
1580  break;
1581  case 0x02:
1582  product_str = "LSISAS1068 B1";
1583  break;
1584  default:
1585  product_str = "LSISAS1068";
1586  break;
1587  }
1588  break;
1590  switch (revision)
1591  {
1592  case 0x00:
1593  product_str = "LSISAS1068E A0";
1594  break;
1595  case 0x01:
1596  product_str = "LSISAS1068E B0";
1597  break;
1598  case 0x02:
1599  product_str = "LSISAS1068E B1";
1600  break;
1601  case 0x04:
1602  product_str = "LSISAS1068E B2";
1603  break;
1604  case 0x08:
1605  product_str = "LSISAS1068E B3";
1606  break;
1607  default:
1608  product_str = "LSISAS1068E";
1609  break;
1610  }
1611  break;
1613  switch (revision)
1614  {
1615  case 0x00:
1616  product_str = "LSISAS1078 A0";
1617  break;
1618  case 0x01:
1619  product_str = "LSISAS1078 B0";
1620  break;
1621  case 0x02:
1622  product_str = "LSISAS1078 C0";
1623  break;
1624  case 0x03:
1625  product_str = "LSISAS1078 C1";
1626  break;
1627  case 0x04:
1628  product_str = "LSISAS1078 C2";
1629  break;
1630  default:
1631  product_str = "LSISAS1078";
1632  break;
1633  }
1634  break;
1635  }
1636 
1637  out:
1638  if (product_str)
1639  sprintf(prod_name, "%s", product_str);
1640 }
1641 
1647 static int
1648 mpt_mapresources(MPT_ADAPTER *ioc)
1649 {
1650  u8 __iomem *mem;
1651  int ii;
1652  resource_size_t mem_phys;
1653  unsigned long port;
1654  u32 msize;
1655  u32 psize;
1656  int r = -ENODEV;
1657  struct pci_dev *pdev;
1658 
1659  pdev = ioc->pcidev;
1660  ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1661  if (pci_enable_device_mem(pdev)) {
1662  printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
1663  "failed\n", ioc->name);
1664  return r;
1665  }
1666  if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
1667  printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
1668  "MEM failed\n", ioc->name);
1669  goto out_pci_disable_device;
1670  }
1671 
1672  if (sizeof(dma_addr_t) > 4) {
1673  const uint64_t required_mask = dma_get_required_mask
1674  (&pdev->dev);
1675  if (required_mask > DMA_BIT_MASK(32)
1676  && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1677  && !pci_set_consistent_dma_mask(pdev,
1678  DMA_BIT_MASK(64))) {
1679  ioc->dma_mask = DMA_BIT_MASK(64);
1680  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1681  ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1682  ioc->name));
1683  } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1684  && !pci_set_consistent_dma_mask(pdev,
1685  DMA_BIT_MASK(32))) {
1686  ioc->dma_mask = DMA_BIT_MASK(32);
1687  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1688  ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1689  ioc->name));
1690  } else {
1691  printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1692  ioc->name, pci_name(pdev));
1693  goto out_pci_release_region;
1694  }
1695  } else {
1696  if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1697  && !pci_set_consistent_dma_mask(pdev,
1698  DMA_BIT_MASK(32))) {
1699  ioc->dma_mask = DMA_BIT_MASK(32);
1700  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1701  ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1702  ioc->name));
1703  } else {
1704  printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1705  ioc->name, pci_name(pdev));
1706  goto out_pci_release_region;
1707  }
1708  }
1709 
1710  mem_phys = msize = 0;
1711  port = psize = 0;
1712  for (ii = 0; ii < DEVICE_COUNT_RESOURCE; ii++) {
1714  if (psize)
1715  continue;
1716  /* Get I/O space! */
1717  port = pci_resource_start(pdev, ii);
1718  psize = pci_resource_len(pdev, ii);
1719  } else {
1720  if (msize)
1721  continue;
1722  /* Get memmap */
1723  mem_phys = pci_resource_start(pdev, ii);
1724  msize = pci_resource_len(pdev, ii);
1725  }
1726  }
1727  ioc->mem_size = msize;
1728 
1729  mem = NULL;
1730  /* Get logical ptr for PciMem0 space */
1731  /*mem = ioremap(mem_phys, msize);*/
1732  mem = ioremap(mem_phys, msize);
1733  if (mem == NULL) {
1734  printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
1735  " memory!\n", ioc->name);
1736  r = -EINVAL;
1737  goto out_pci_release_region;
1738  }
1739  ioc->memmap = mem;
1740  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
1741  ioc->name, mem, (unsigned long long)mem_phys));
1742 
1743  ioc->mem_phys = mem_phys;
1744  ioc->chip = (SYSIF_REGS __iomem *)mem;
1745 
1746  /* Save Port IO values in case we need to do downloadboot */
1747  ioc->pio_mem_phys = port;
1748  ioc->pio_chip = (SYSIF_REGS __iomem *)port;
1749 
1750  return 0;
1751 
1752 out_pci_release_region:
1753  pci_release_selected_regions(pdev, ioc->bars);
1754 out_pci_disable_device:
1755  pci_disable_device(pdev);
1756  return r;
1757 }
1758 
1759 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1777 int
1778 mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1779 {
1780  MPT_ADAPTER *ioc;
1781  u8 cb_idx;
1782  int r = -ENODEV;
1783  u8 pcixcmd;
1784  static int mpt_ids = 0;
1785 #ifdef CONFIG_PROC_FS
1786  struct proc_dir_entry *dent;
1787 #endif
1788 
1789  ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
1790  if (ioc == NULL) {
1791  printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
1792  return -ENOMEM;
1793  }
1794 
1795  ioc->id = mpt_ids++;
1796  sprintf(ioc->name, "ioc%d", ioc->id);
1797  dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
1798 
1799  /*
1800  * set initial debug level
1801  * (refer to mptdebug.h)
1802  *
1803  */
1804  ioc->debug_level = mpt_debug_level;
1805  if (mpt_debug_level)
1806  printk(KERN_INFO "mpt_debug_level=%xh\n", mpt_debug_level);
1807 
1808  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name));
1809 
1810  ioc->pcidev = pdev;
1811  if (mpt_mapresources(ioc)) {
1812  kfree(ioc);
1813  return r;
1814  }
1815 
1816  /*
1817  * Setting up proper handlers for scatter gather handling
1818  */
1819  if (ioc->dma_mask == DMA_BIT_MASK(64)) {
1821  ioc->add_sge = &mpt_add_sge_64bit_1078;
1822  else
1823  ioc->add_sge = &mpt_add_sge_64bit;
1824  ioc->add_chain = &mpt_add_chain_64bit;
1825  ioc->sg_addr_size = 8;
1826  } else {
1827  ioc->add_sge = &mpt_add_sge;
1828  ioc->add_chain = &mpt_add_chain;
1829  ioc->sg_addr_size = 4;
1830  }
1831  ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
1832 
1833  ioc->alloc_total = sizeof(MPT_ADAPTER);
1834  ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1835  ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1836 
1837 
1838  spin_lock_init(&ioc->taskmgmt_lock);
1839  mutex_init(&ioc->internal_cmds.mutex);
1840  init_completion(&ioc->internal_cmds.done);
1841  mutex_init(&ioc->mptbase_cmds.mutex);
1842  init_completion(&ioc->mptbase_cmds.done);
1843  mutex_init(&ioc->taskmgmt_cmds.mutex);
1844  init_completion(&ioc->taskmgmt_cmds.done);
1845 
1846  /* Initialize the event logging.
1847  */
1848  ioc->eventTypes = 0; /* None */
1849  ioc->eventContext = 0;
1850  ioc->eventLogSize = 0;
1851  ioc->events = NULL;
1852 
1853 #ifdef MFCNT
1854  ioc->mfcnt = 0;
1855 #endif
1856 
1857  ioc->sh = NULL;
1858  ioc->cached_fw = NULL;
1859 
1860  /* Initialize SCSI Config Data structure
1861  */
1862  memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1863 
1864  /* Initialize the fc rport list head.
1865  */
1866  INIT_LIST_HEAD(&ioc->fc_rports);
1867 
1868  /* Find lookup slot. */
1869  INIT_LIST_HEAD(&ioc->list);
1870 
1871 
1872  /* Initialize workqueue */
1873  INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
1874 
1875  snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
1876  "mpt_poll_%d", ioc->id);
1877  ioc->reset_work_q =
1878  create_singlethread_workqueue(ioc->reset_work_q_name);
1879  if (!ioc->reset_work_q) {
1880  printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
1881  ioc->name);
1882  pci_release_selected_regions(pdev, ioc->bars);
1883  kfree(ioc);
1884  return -ENOMEM;
1885  }
1886 
1887  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
1888  ioc->name, &ioc->facts, &ioc->pfacts[0]));
1889 
1890  mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
1891  ioc->prod_name);
1892 
1893  switch (pdev->device)
1894  {
1897  ioc->errata_flag_1064 = 1;
1902  ioc->bus_type = FC;
1903  break;
1904 
1906  if (pdev->revision < XL_929) {
1907  /* 929X Chip Fix. Set Split transactions level
1908  * for PCIX. Set MOST bits to zero.
1909  */
1910  pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1911  pcixcmd &= 0x8F;
1912  pci_write_config_byte(pdev, 0x6a, pcixcmd);
1913  } else {
1914  /* 929XL Chip Fix. Set MMRBC to 0x08.
1915  */
1916  pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1917  pcixcmd |= 0x08;
1918  pci_write_config_byte(pdev, 0x6a, pcixcmd);
1919  }
1920  ioc->bus_type = FC;
1921  break;
1922 
1924  /* 919X Chip Fix. Set Split transactions level
1925  * for PCIX. Set MOST bits to zero.
1926  */
1927  pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1928  pcixcmd &= 0x8F;
1929  pci_write_config_byte(pdev, 0x6a, pcixcmd);
1930  ioc->bus_type = FC;
1931  break;
1932 
1934  /* 1030 Chip Fix. Disable Split transactions
1935  * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
1936  */
1937  if (pdev->revision < C0_1030) {
1938  pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1939  pcixcmd &= 0x8F;
1940  pci_write_config_byte(pdev, 0x6a, pcixcmd);
1941  }
1942 
1944  ioc->bus_type = SPI;
1945  break;
1946 
1949  ioc->errata_flag_1064 = 1;
1950  ioc->bus_type = SAS;
1951  break;
1952 
1956  ioc->bus_type = SAS;
1957  break;
1958  }
1959 
1960 
1961  switch (ioc->bus_type) {
1962 
1963  case SAS:
1964  ioc->msi_enable = mpt_msi_enable_sas;
1965  break;
1966 
1967  case SPI:
1968  ioc->msi_enable = mpt_msi_enable_spi;
1969  break;
1970 
1971  case FC:
1972  ioc->msi_enable = mpt_msi_enable_fc;
1973  break;
1974 
1975  default:
1976  ioc->msi_enable = 0;
1977  break;
1978  }
1979 
1980  ioc->fw_events_off = 1;
1981 
1982  if (ioc->errata_flag_1064)
1983  pci_disable_io_access(pdev);
1984 
1985  spin_lock_init(&ioc->FreeQlock);
1986 
1987  /* Disable all! */
1988  CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
1989  ioc->active = 0;
1990  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1991 
1992  /* Set IOC ptr in the pcidev's driver data. */
1993  pci_set_drvdata(ioc->pcidev, ioc);
1994 
1995  /* Set lookup ptr. */
1996  list_add_tail(&ioc->list, &ioc_list);
1997 
1998  /* Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets.
1999  */
2000  mpt_detect_bound_ports(ioc, pdev);
2001 
2002  INIT_LIST_HEAD(&ioc->fw_event_list);
2003  spin_lock_init(&ioc->fw_event_lock);
2004  snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
2005  ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
2006 
2007  if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
2008  CAN_SLEEP)) != 0){
2009  printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
2010  ioc->name, r);
2011 
2012  list_del(&ioc->list);
2013  if (ioc->alt_ioc)
2014  ioc->alt_ioc->alt_ioc = NULL;
2015  iounmap(ioc->memmap);
2016  if (r != -5)
2017  pci_release_selected_regions(pdev, ioc->bars);
2018 
2019  destroy_workqueue(ioc->reset_work_q);
2020  ioc->reset_work_q = NULL;
2021 
2022  kfree(ioc);
2023  pci_set_drvdata(pdev, NULL);
2024  return r;
2025  }
2026 
2027  /* call per device driver probe entry point */
2028  for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
2029  if(MptDeviceDriverHandlers[cb_idx] &&
2030  MptDeviceDriverHandlers[cb_idx]->probe) {
2031  MptDeviceDriverHandlers[cb_idx]->probe(pdev,id);
2032  }
2033  }
2034 
2035 #ifdef CONFIG_PROC_FS
2036  /*
2037  * Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter.
2038  */
2039  dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
2040  if (dent) {
2041  proc_create_data("info", S_IRUGO, dent, &mpt_iocinfo_proc_fops, ioc);
2042  proc_create_data("summary", S_IRUGO, dent, &mpt_summary_proc_fops, ioc);
2043  }
2044 #endif
2045 
2046  if (!ioc->alt_ioc)
2047  queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
2049 
2050  return 0;
2051 }
2052 
2053 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2059 void
2060 mpt_detach(struct pci_dev *pdev)
2061 {
2062  MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2063  char pname[32];
2064  u8 cb_idx;
2065  unsigned long flags;
2066  struct workqueue_struct *wq;
2067 
2068  /*
2069  * Stop polling ioc for fault condition
2070  */
2071  spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
2072  wq = ioc->reset_work_q;
2073  ioc->reset_work_q = NULL;
2074  spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
2075  cancel_delayed_work(&ioc->fault_reset_work);
2076  destroy_workqueue(wq);
2077 
2078  spin_lock_irqsave(&ioc->fw_event_lock, flags);
2079  wq = ioc->fw_event_q;
2080  ioc->fw_event_q = NULL;
2081  spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2082  destroy_workqueue(wq);
2083 
2084  sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
2085  remove_proc_entry(pname, NULL);
2086  sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
2087  remove_proc_entry(pname, NULL);
2088  sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
2089  remove_proc_entry(pname, NULL);
2090 
2091  /* call per device driver remove entry point */
2092  for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
2093  if(MptDeviceDriverHandlers[cb_idx] &&
2094  MptDeviceDriverHandlers[cb_idx]->remove) {
2095  MptDeviceDriverHandlers[cb_idx]->remove(pdev);
2096  }
2097  }
2098 
2099  /* Disable interrupts! */
2100  CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2101 
2102  ioc->active = 0;
2103  synchronize_irq(pdev->irq);
2104 
2105  /* Clear any lingering interrupt */
2106  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2107 
2108  CHIPREG_READ32(&ioc->chip->IntStatus);
2109 
2110  mpt_adapter_dispose(ioc);
2111 
2112 }
2113 
2114 /**************************************************************************
2115  * Power Management
2116  */
2117 #ifdef CONFIG_PM
2118 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2124 int
2125 mpt_suspend(struct pci_dev *pdev, pm_message_t state)
2126 {
2127  u32 device_state;
2128  MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2129 
2130  device_state = pci_choose_state(pdev, state);
2131  printk(MYIOC_s_INFO_FMT "pci-suspend: pdev=0x%p, slot=%s, Entering "
2132  "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
2133  device_state);
2134 
2135  /* put ioc into READY_STATE */
2136  if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
2137  printk(MYIOC_s_ERR_FMT
2138  "pci-suspend: IOC msg unit reset failed!\n", ioc->name);
2139  }
2140 
2141  /* disable interrupts */
2142  CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2143  ioc->active = 0;
2144 
2145  /* Clear any lingering interrupt */
2146  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2147 
2148  free_irq(ioc->pci_irq, ioc);
2149  if (ioc->msi_enable)
2150  pci_disable_msi(ioc->pcidev);
2151  ioc->pci_irq = -1;
2152  pci_save_state(pdev);
2153  pci_disable_device(pdev);
2154  pci_release_selected_regions(pdev, ioc->bars);
2155  pci_set_power_state(pdev, device_state);
2156  return 0;
2157 }
2158 
2159 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2164 int
2165 mpt_resume(struct pci_dev *pdev)
2166 {
2167  MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2168  u32 device_state = pdev->current_state;
2169  int recovery_state;
2170  int err;
2171 
2172  printk(MYIOC_s_INFO_FMT "pci-resume: pdev=0x%p, slot=%s, Previous "
2173  "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
2174  device_state);
2175 
2176  pci_set_power_state(pdev, PCI_D0);
2177  pci_enable_wake(pdev, PCI_D0, 0);
2178  pci_restore_state(pdev);
2179  ioc->pcidev = pdev;
2180  err = mpt_mapresources(ioc);
2181  if (err)
2182  return err;
2183 
2184  if (ioc->dma_mask == DMA_BIT_MASK(64)) {
2186  ioc->add_sge = &mpt_add_sge_64bit_1078;
2187  else
2188  ioc->add_sge = &mpt_add_sge_64bit;
2189  ioc->add_chain = &mpt_add_chain_64bit;
2190  ioc->sg_addr_size = 8;
2191  } else {
2192 
2193  ioc->add_sge = &mpt_add_sge;
2194  ioc->add_chain = &mpt_add_chain;
2195  ioc->sg_addr_size = 4;
2196  }
2197  ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
2198 
2199  printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
2200  ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
2201  CHIPREG_READ32(&ioc->chip->Doorbell));
2202 
2203  /*
2204  * Errata workaround for SAS pci express:
2205  * Upon returning to the D0 state, the contents of the doorbell will be
2206  * stale data, and this will incorrectly signal to the host driver that
2207  * the firmware is ready to process mpt commands. The workaround is
2208  * to issue a diagnostic reset.
2209  */
2210  if (ioc->bus_type == SAS && (pdev->device ==
2213  if (KickStart(ioc, 1, CAN_SLEEP) < 0) {
2214  printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover\n",
2215  ioc->name);
2216  goto out;
2217  }
2218  }
2219 
2220  /* bring ioc to operational state */
2221  printk(MYIOC_s_INFO_FMT "Sending mpt_do_ioc_recovery\n", ioc->name);
2222  recovery_state = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
2223  CAN_SLEEP);
2224  if (recovery_state != 0)
2225  printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover, "
2226  "error:[%x]\n", ioc->name, recovery_state);
2227  else
2228  printk(MYIOC_s_INFO_FMT
2229  "pci-resume: success\n", ioc->name);
2230  out:
2231  return 0;
2232 
2233 }
2234 #endif
2235 
2236 static int
2237 mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase)
2238 {
2239  if ((MptDriverClass[index] == MPTSPI_DRIVER &&
2240  ioc->bus_type != SPI) ||
2241  (MptDriverClass[index] == MPTFC_DRIVER &&
2242  ioc->bus_type != FC) ||
2243  (MptDriverClass[index] == MPTSAS_DRIVER &&
2244  ioc->bus_type != SAS))
2245  /* make sure we only call the relevant reset handler
2246  * for the bus */
2247  return 0;
2248  return (MptResetHandlers[index])(ioc, reset_phase);
2249 }
2250 
2251 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2273 static int
2274 mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2275 {
2276  int hard_reset_done = 0;
2277  int alt_ioc_ready = 0;
2278  int hard;
2279  int rc=0;
2280  int ii;
2281  int ret = 0;
2282  int reset_alt_ioc_active = 0;
2283  int irq_allocated = 0;
2284  u8 *a;
2285 
2286  printk(MYIOC_s_INFO_FMT "Initiating %s\n", ioc->name,
2287  reason == MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery");
2288 
2289  /* Disable reply interrupts (also blocks FreeQ) */
2290  CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2291  ioc->active = 0;
2292 
2293  if (ioc->alt_ioc) {
2294  if (ioc->alt_ioc->active ||
2295  reason == MPT_HOSTEVENT_IOC_RECOVER) {
2296  reset_alt_ioc_active = 1;
2297  /* Disable alt-IOC's reply interrupts
2298  * (and FreeQ) for a bit
2299  **/
2300  CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2301  0xFFFFFFFF);
2302  ioc->alt_ioc->active = 0;
2303  }
2304  }
2305 
2306  hard = 1;
2307  if (reason == MPT_HOSTEVENT_IOC_BRINGUP)
2308  hard = 0;
2309 
2310  if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) {
2311  if (hard_reset_done == -4) {
2312  printk(MYIOC_s_WARN_FMT "Owned by PEER..skipping!\n",
2313  ioc->name);
2314 
2315  if (reset_alt_ioc_active && ioc->alt_ioc) {
2316  /* (re)Enable alt-IOC! (reply interrupt, FreeQ) */
2317  dprintk(ioc, printk(MYIOC_s_INFO_FMT
2318  "alt_ioc reply irq re-enabled\n", ioc->alt_ioc->name));
2319  CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM);
2320  ioc->alt_ioc->active = 1;
2321  }
2322 
2323  } else {
2324  printk(MYIOC_s_WARN_FMT
2325  "NOT READY WARNING!\n", ioc->name);
2326  }
2327  ret = -1;
2328  goto out;
2329  }
2330 
2331  /* hard_reset_done = 0 if a soft reset was performed
2332  * and 1 if a hard reset was performed.
2333  */
2334  if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) {
2335  if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
2336  alt_ioc_ready = 1;
2337  else
2338  printk(MYIOC_s_WARN_FMT
2339  ": alt-ioc Not ready WARNING!\n",
2340  ioc->alt_ioc->name);
2341  }
2342 
2343  for (ii=0; ii<5; ii++) {
2344  /* Get IOC facts! Allow 5 retries */
2345  if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
2346  break;
2347  }
2348 
2349 
2350  if (ii == 5) {
2351  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2352  "Retry IocFacts failed rc=%x\n", ioc->name, rc));
2353  ret = -2;
2354  } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
2355  MptDisplayIocCapabilities(ioc);
2356  }
2357 
2358  if (alt_ioc_ready) {
2359  if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
2360  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2361  "Initial Alt IocFacts failed rc=%x\n",
2362  ioc->name, rc));
2363  /* Retry - alt IOC was initialized once
2364  */
2365  rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
2366  }
2367  if (rc) {
2368  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2369  "Retry Alt IocFacts failed rc=%x\n", ioc->name, rc));
2370  alt_ioc_ready = 0;
2371  reset_alt_ioc_active = 0;
2372  } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
2373  MptDisplayIocCapabilities(ioc->alt_ioc);
2374  }
2375  }
2376 
2377  if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) &&
2378  (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) {
2379  pci_release_selected_regions(ioc->pcidev, ioc->bars);
2380  ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
2381  IORESOURCE_IO);
2382  if (pci_enable_device(ioc->pcidev))
2383  return -5;
2384  if (pci_request_selected_regions(ioc->pcidev, ioc->bars,
2385  "mpt"))
2386  return -5;
2387  }
2388 
2389  /*
2390  * Device is reset now. It must have de-asserted the interrupt line
2391  * (if it was asserted) and it should be safe to register for the
2392  * interrupt now.
2393  */
2394  if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
2395  ioc->pci_irq = -1;
2396  if (ioc->pcidev->irq) {
2397  if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev))
2398  printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n",
2399  ioc->name);
2400  else
2401  ioc->msi_enable = 0;
2402  rc = request_irq(ioc->pcidev->irq, mpt_interrupt,
2403  IRQF_SHARED, ioc->name, ioc);
2404  if (rc < 0) {
2405  printk(MYIOC_s_ERR_FMT "Unable to allocate "
2406  "interrupt %d!\n",
2407  ioc->name, ioc->pcidev->irq);
2408  if (ioc->msi_enable)
2409  pci_disable_msi(ioc->pcidev);
2410  ret = -EBUSY;
2411  goto out;
2412  }
2413  irq_allocated = 1;
2414  ioc->pci_irq = ioc->pcidev->irq;
2415  pci_set_master(ioc->pcidev); /* ?? */
2416  pci_set_drvdata(ioc->pcidev, ioc);
2417  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2418  "installed at interrupt %d\n", ioc->name,
2419  ioc->pcidev->irq));
2420  }
2421  }
2422 
2423  /* Prime reply & request queues!
2424  * (mucho alloc's) Must be done prior to
2425  * init as upper addresses are needed for init.
2426  * If fails, continue with alt-ioc processing
2427  */
2428  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
2429  ioc->name));
2430  if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
2431  ret = -3;
2432 
2433  /* May need to check/upload firmware & data here!
2434  * If fails, continue with alt-ioc processing
2435  */
2436  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
2437  ioc->name));
2438  if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
2439  ret = -4;
2440 // NEW!
2441  if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
2442  printk(MYIOC_s_WARN_FMT
2443  ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
2444  ioc->alt_ioc->name, rc);
2445  alt_ioc_ready = 0;
2446  reset_alt_ioc_active = 0;
2447  }
2448 
2449  if (alt_ioc_ready) {
2450  if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
2451  alt_ioc_ready = 0;
2452  reset_alt_ioc_active = 0;
2453  printk(MYIOC_s_WARN_FMT
2454  ": alt-ioc: (%d) init failure WARNING!\n",
2455  ioc->alt_ioc->name, rc);
2456  }
2457  }
2458 
2459  if (reason == MPT_HOSTEVENT_IOC_BRINGUP){
2460  if (ioc->upload_fw) {
2461  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2462  "firmware upload required!\n", ioc->name));
2463 
2464  /* Controller is not operational, cannot do upload
2465  */
2466  if (ret == 0) {
2467  rc = mpt_do_upload(ioc, sleepFlag);
2468  if (rc == 0) {
2469  if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
2470  /*
2471  * Maintain only one pointer to FW memory
2472  * so there will not be two attempt to
2473  * downloadboot onboard dual function
2474  * chips (mpt_adapter_disable,
2475  * mpt_diag_reset)
2476  */
2477  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2478  "mpt_upload: alt_%s has cached_fw=%p \n",
2479  ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
2480  ioc->cached_fw = NULL;
2481  }
2482  } else {
2483  printk(MYIOC_s_WARN_FMT
2484  "firmware upload failure!\n", ioc->name);
2485  ret = -6;
2486  }
2487  }
2488  }
2489  }
2490 
2491  /* Enable MPT base driver management of EventNotification
2492  * and EventAck handling.
2493  */
2494  if ((ret == 0) && (!ioc->facts.EventState)) {
2495  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2496  "SendEventNotification\n",
2497  ioc->name));
2498  ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
2499  }
2500 
2501  if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2502  rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
2503 
2504  if (ret == 0) {
2505  /* Enable! (reply interrupt) */
2506  CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
2507  ioc->active = 1;
2508  }
2509  if (rc == 0) { /* alt ioc */
2510  if (reset_alt_ioc_active && ioc->alt_ioc) {
2511  /* (re)Enable alt-IOC! (reply interrupt) */
2512  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
2513  "reply irq re-enabled\n",
2514  ioc->alt_ioc->name));
2515  CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2516  MPI_HIM_DIM);
2517  ioc->alt_ioc->active = 1;
2518  }
2519  }
2520 
2521 
2522  /* Add additional "reason" check before call to GetLanConfigPages
2523  * (combined with GetIoUnitPage2 call). This prevents a somewhat
2524  * recursive scenario; GetLanConfigPages times out, timer expired
2525  * routine calls HardResetHandler, which calls into here again,
2526  * and we try GetLanConfigPages again...
2527  */
2528  if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
2529 
2530  /*
2531  * Initialize link list for inactive raid volumes.
2532  */
2533  mutex_init(&ioc->raid_data.inactive_list_mutex);
2534  INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
2535 
2536  switch (ioc->bus_type) {
2537 
2538  case SAS:
2539  /* clear persistency table */
2540  if(ioc->facts.IOCExceptions &
2544  if(ret != 0)
2545  goto out;
2546  }
2547 
2548  /* Find IM volumes
2549  */
2550  mpt_findImVolumes(ioc);
2551 
2552  /* Check, and possibly reset, the coalescing value
2553  */
2554  mpt_read_ioc_pg_1(ioc);
2555 
2556  break;
2557 
2558  case FC:
2559  if ((ioc->pfacts[0].ProtocolFlags &
2561  (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
2562  /*
2563  * Pre-fetch the ports LAN MAC address!
2564  * (LANPage1_t stuff)
2565  */
2566  (void) GetLanConfigPages(ioc);
2567  a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
2568  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2569  "LanAddr = %02X:%02X:%02X"
2570  ":%02X:%02X:%02X\n",
2571  ioc->name, a[5], a[4],
2572  a[3], a[2], a[1], a[0]));
2573  }
2574  break;
2575 
2576  case SPI:
2577  /* Get NVRAM and adapter maximums from SPP 0 and 2
2578  */
2579  mpt_GetScsiPortSettings(ioc, 0);
2580 
2581  /* Get version and length of SDP 1
2582  */
2583  mpt_readScsiDevicePageHeaders(ioc, 0);
2584 
2585  /* Find IM volumes
2586  */
2587  if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
2588  mpt_findImVolumes(ioc);
2589 
2590  /* Check, and possibly reset, the coalescing value
2591  */
2592  mpt_read_ioc_pg_1(ioc);
2593 
2594  mpt_read_ioc_pg_4(ioc);
2595 
2596  break;
2597  }
2598 
2599  GetIoUnitPage2(ioc);
2600  mpt_get_manufacturing_pg_0(ioc);
2601  }
2602 
2603  out:
2604  if ((ret != 0) && irq_allocated) {
2605  free_irq(ioc->pci_irq, ioc);
2606  if (ioc->msi_enable)
2607  pci_disable_msi(ioc->pcidev);
2608  }
2609  return ret;
2610 }
2611 
2612 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2625 static void
2626 mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
2627 {
2628  struct pci_dev *peer=NULL;
2629  unsigned int slot = PCI_SLOT(pdev->devfn);
2630  unsigned int func = PCI_FUNC(pdev->devfn);
2631  MPT_ADAPTER *ioc_srch;
2632 
2633  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PCI device %s devfn=%x/%x,"
2634  " searching for devfn match on %x or %x\n",
2635  ioc->name, pci_name(pdev), pdev->bus->number,
2636  pdev->devfn, func-1, func+1));
2637 
2638  peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
2639  if (!peer) {
2640  peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func+1));
2641  if (!peer)
2642  return;
2643  }
2644 
2645  list_for_each_entry(ioc_srch, &ioc_list, list) {
2646  struct pci_dev *_pcidev = ioc_srch->pcidev;
2647  if (_pcidev == peer) {
2648  /* Paranoia checks */
2649  if (ioc->alt_ioc != NULL) {
2650  printk(MYIOC_s_WARN_FMT
2651  "Oops, already bound (%s <==> %s)!\n",
2652  ioc->name, ioc->name, ioc->alt_ioc->name);
2653  break;
2654  } else if (ioc_srch->alt_ioc != NULL) {
2655  printk(MYIOC_s_WARN_FMT
2656  "Oops, already bound (%s <==> %s)!\n",
2657  ioc_srch->name, ioc_srch->name,
2658  ioc_srch->alt_ioc->name);
2659  break;
2660  }
2661  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2662  "FOUND! binding %s <==> %s\n",
2663  ioc->name, ioc->name, ioc_srch->name));
2664  ioc_srch->alt_ioc = ioc;
2665  ioc->alt_ioc = ioc_srch;
2666  }
2667  }
2668  pci_dev_put(peer);
2669 }
2670 
2671 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2676 static void
2677 mpt_adapter_disable(MPT_ADAPTER *ioc)
2678 {
2679  int sz;
2680  int ret;
2681 
2682  if (ioc->cached_fw != NULL) {
2683  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2684  "%s: Pushing FW onto adapter\n", __func__, ioc->name));
2685  if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
2686  ioc->cached_fw, CAN_SLEEP)) < 0) {
2687  printk(MYIOC_s_WARN_FMT
2688  ": firmware downloadboot failure (%d)!\n",
2689  ioc->name, ret);
2690  }
2691  }
2692 
2693  /*
2694  * Put the controller into ready state (if its not already)
2695  */
2696  if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
2697  if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
2698  CAN_SLEEP)) {
2699  if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
2700  printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
2701  "reset failed to put ioc in ready state!\n",
2702  ioc->name, __func__);
2703  } else
2704  printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
2705  "failed!\n", ioc->name, __func__);
2706  }
2707 
2708 
2709  /* Disable adapter interrupts! */
2710  synchronize_irq(ioc->pcidev->irq);
2711  CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2712  ioc->active = 0;
2713 
2714  /* Clear any lingering interrupt */
2715  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2716  CHIPREG_READ32(&ioc->chip->IntStatus);
2717 
2718  if (ioc->alloc != NULL) {
2719  sz = ioc->alloc_sz;
2720  dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
2721  ioc->name, ioc->alloc, ioc->alloc_sz));
2722  pci_free_consistent(ioc->pcidev, sz,
2723  ioc->alloc, ioc->alloc_dma);
2724  ioc->reply_frames = NULL;
2725  ioc->req_frames = NULL;
2726  ioc->alloc = NULL;
2727  ioc->alloc_total -= sz;
2728  }
2729 
2730  if (ioc->sense_buf_pool != NULL) {
2731  sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
2732  pci_free_consistent(ioc->pcidev, sz,
2733  ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
2734  ioc->sense_buf_pool = NULL;
2735  ioc->alloc_total -= sz;
2736  }
2737 
2738  if (ioc->events != NULL){
2739  sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
2740  kfree(ioc->events);
2741  ioc->events = NULL;
2742  ioc->alloc_total -= sz;
2743  }
2744 
2745  mpt_free_fw_memory(ioc);
2746 
2747  kfree(ioc->spi_data.nvram);
2748  mpt_inactive_raid_list_free(ioc);
2749  kfree(ioc->raid_data.pIocPg2);
2750  kfree(ioc->raid_data.pIocPg3);
2751  ioc->spi_data.nvram = NULL;
2752  ioc->raid_data.pIocPg3 = NULL;
2753 
2754  if (ioc->spi_data.pIocPg4 != NULL) {
2755  sz = ioc->spi_data.IocPg4Sz;
2756  pci_free_consistent(ioc->pcidev, sz,
2757  ioc->spi_data.pIocPg4,
2758  ioc->spi_data.IocPg4_dma);
2759  ioc->spi_data.pIocPg4 = NULL;
2760  ioc->alloc_total -= sz;
2761  }
2762 
2763  if (ioc->ReqToChain != NULL) {
2764  kfree(ioc->ReqToChain);
2765  kfree(ioc->RequestNB);
2766  ioc->ReqToChain = NULL;
2767  }
2768 
2769  kfree(ioc->ChainToChain);
2770  ioc->ChainToChain = NULL;
2771 
2772  if (ioc->HostPageBuffer != NULL) {
2773  if((ret = mpt_host_page_access_control(ioc,
2775  printk(MYIOC_s_ERR_FMT
2776  ": %s: host page buffers free failed (%d)!\n",
2777  ioc->name, __func__, ret);
2778  }
2779  dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2780  "HostPageBuffer free @ %p, sz=%d bytes\n",
2781  ioc->name, ioc->HostPageBuffer,
2782  ioc->HostPageBuffer_sz));
2783  pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2784  ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
2785  ioc->HostPageBuffer = NULL;
2786  ioc->HostPageBuffer_sz = 0;
2787  ioc->alloc_total -= ioc->HostPageBuffer_sz;
2788  }
2789 
2790  pci_set_drvdata(ioc->pcidev, NULL);
2791 }
2792 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2800 static void
2801 mpt_adapter_dispose(MPT_ADAPTER *ioc)
2802 {
2803  int sz_first, sz_last;
2804 
2805  if (ioc == NULL)
2806  return;
2807 
2808  sz_first = ioc->alloc_total;
2809 
2810  mpt_adapter_disable(ioc);
2811 
2812  if (ioc->pci_irq != -1) {
2813  free_irq(ioc->pci_irq, ioc);
2814  if (ioc->msi_enable)
2815  pci_disable_msi(ioc->pcidev);
2816  ioc->pci_irq = -1;
2817  }
2818 
2819  if (ioc->memmap != NULL) {
2820  iounmap(ioc->memmap);
2821  ioc->memmap = NULL;
2822  }
2823 
2824  pci_disable_device(ioc->pcidev);
2825  pci_release_selected_regions(ioc->pcidev, ioc->bars);
2826 
2827 #if defined(CONFIG_MTRR) && 0
2828  if (ioc->mtrr_reg > 0) {
2829  mtrr_del(ioc->mtrr_reg, 0, 0);
2830  dprintk(ioc, printk(MYIOC_s_INFO_FMT "MTRR region de-registered\n", ioc->name));
2831  }
2832 #endif
2833 
2834  /* Zap the adapter lookup ptr! */
2835  list_del(&ioc->list);
2836 
2837  sz_last = ioc->alloc_total;
2838  dprintk(ioc, printk(MYIOC_s_INFO_FMT "free'd %d of %d bytes\n",
2839  ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
2840 
2841  if (ioc->alt_ioc)
2842  ioc->alt_ioc->alt_ioc = NULL;
2843 
2844  kfree(ioc);
2845 }
2846 
2847 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2852 static void
2853 MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
2854 {
2855  int i = 0;
2856 
2857  printk(KERN_INFO "%s: ", ioc->name);
2858  if (ioc->prod_name)
2859  printk("%s: ", ioc->prod_name);
2860  printk("Capabilities={");
2861 
2862  if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2863  printk("Initiator");
2864  i++;
2865  }
2866 
2867  if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2868  printk("%sTarget", i ? "," : "");
2869  i++;
2870  }
2871 
2872  if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
2873  printk("%sLAN", i ? "," : "");
2874  i++;
2875  }
2876 
2877 #if 0
2878  /*
2879  * This would probably evoke more questions than it's worth
2880  */
2881  if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2882  printk("%sLogBusAddr", i ? "," : "");
2883  i++;
2884  }
2885 #endif
2886 
2887  printk("}\n");
2888 }
2889 
2890 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2905 static int
2906 MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2907 {
2908  u32 ioc_state;
2909  int statefault = 0;
2910  int cntdn;
2911  int hard_reset_done = 0;
2912  int r;
2913  int ii;
2914  int whoinit;
2915 
2916  /* Get current [raw] IOC state */
2917  ioc_state = mpt_GetIocState(ioc, 0);
2918  dhsprintk(ioc, printk(MYIOC_s_INFO_FMT "MakeIocReady [raw] state=%08x\n", ioc->name, ioc_state));
2919 
2920  /*
2921  * Check to see if IOC got left/stuck in doorbell handshake
2922  * grip of death. If so, hard reset the IOC.
2923  */
2924  if (ioc_state & MPI_DOORBELL_ACTIVE) {
2925  statefault = 1;
2926  printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n",
2927  ioc->name);
2928  }
2929 
2930  /* Is it already READY? */
2931  if (!statefault &&
2932  ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
2933  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2934  "IOC is in READY state\n", ioc->name));
2935  return 0;
2936  }
2937 
2938  /*
2939  * Check to see if IOC is in FAULT state.
2940  */
2941  if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
2942  statefault = 2;
2943  printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n",
2944  ioc->name);
2945  printk(MYIOC_s_WARN_FMT " FAULT code = %04xh\n",
2946  ioc->name, ioc_state & MPI_DOORBELL_DATA_MASK);
2947  }
2948 
2949  /*
2950  * Hmmm... Did it get left operational?
2951  */
2952  if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
2953  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC operational unexpected\n",
2954  ioc->name));
2955 
2956  /* Check WhoInit.
2957  * If PCI Peer, exit.
2958  * Else, if no fault conditions are present, issue a MessageUnitReset
2959  * Else, fall through to KickStart case
2960  */
2961  whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
2962  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2963  "whoinit 0x%x statefault %d force %d\n",
2964  ioc->name, whoinit, statefault, force));
2965  if (whoinit == MPI_WHOINIT_PCI_PEER)
2966  return -4;
2967  else {
2968  if ((statefault == 0 ) && (force == 0)) {
2969  if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0)
2970  return 0;
2971  }
2972  statefault = 3;
2973  }
2974  }
2975 
2976  hard_reset_done = KickStart(ioc, statefault||force, sleepFlag);
2977  if (hard_reset_done < 0)
2978  return -1;
2979 
2980  /*
2981  * Loop here waiting for IOC to come READY.
2982  */
2983  ii = 0;
2984  cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
2985 
2986  while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
2987  if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
2988  /*
2989  * BIOS or previous driver load left IOC in OP state.
2990  * Reset messaging FIFOs.
2991  */
2992  if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) {
2993  printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name);
2994  return -2;
2995  }
2996  } else if (ioc_state == MPI_IOC_STATE_RESET) {
2997  /*
2998  * Something is wrong. Try to get IOC back
2999  * to a known state.
3000  */
3001  if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) {
3002  printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name);
3003  return -3;
3004  }
3005  }
3006 
3007  ii++; cntdn--;
3008  if (!cntdn) {
3009  printk(MYIOC_s_ERR_FMT
3010  "Wait IOC_READY state (0x%x) timeout(%d)!\n",
3011  ioc->name, ioc_state, (int)((ii+5)/HZ));
3012  return -ETIME;
3013  }
3014 
3015  if (sleepFlag == CAN_SLEEP) {
3016  msleep(1);
3017  } else {
3018  mdelay (1); /* 1 msec delay */
3019  }
3020 
3021  }
3022 
3023  if (statefault < 3) {
3024  printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
3025  statefault == 1 ? "stuck handshake" : "IOC FAULT");
3026  }
3027 
3028  return hard_reset_done;
3029 }
3030 
3031 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3040 u32
3041 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
3042 {
3043  u32 s, sc;
3044 
3045  /* Get! */
3046  s = CHIPREG_READ32(&ioc->chip->Doorbell);
3047  sc = s & MPI_IOC_STATE_MASK;
3048 
3049  /* Save! */
3050  ioc->last_state = sc;
3051 
3052  return cooked ? sc : s;
3053 }
3054 
3055 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3064 static int
3065 GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
3066 {
3067  IOCFacts_t get_facts;
3068  IOCFactsReply_t *facts;
3069  int r;
3070  int req_sz;
3071  int reply_sz;
3072  int sz;
3073  u32 status, vv;
3074  u8 shiftFactor=1;
3075 
3076  /* IOC *must* NOT be in RESET state! */
3077  if (ioc->last_state == MPI_IOC_STATE_RESET) {
3079  ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
3080  ioc->name, ioc->last_state);
3081  return -44;
3082  }
3083 
3084  facts = &ioc->facts;
3085 
3086  /* Destination (reply area)... */
3087  reply_sz = sizeof(*facts);
3088  memset(facts, 0, reply_sz);
3089 
3090  /* Request area (get_facts on the stack right now!) */
3091  req_sz = sizeof(get_facts);
3092  memset(&get_facts, 0, req_sz);
3093 
3094  get_facts.Function = MPI_FUNCTION_IOC_FACTS;
3095  /* Assert: All other get_facts fields are zero! */
3096 
3097  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3098  "Sending get IocFacts request req_sz=%d reply_sz=%d\n",
3099  ioc->name, req_sz, reply_sz));
3100 
3101  /* No non-zero fields in the get_facts request are greater than
3102  * 1 byte in size, so we can just fire it off as is.
3103  */
3104  r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts,
3105  reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag);
3106  if (r != 0)
3107  return r;
3108 
3109  /*
3110  * Now byte swap (GRRR) the necessary fields before any further
3111  * inspection of reply contents.
3112  *
3113  * But need to do some sanity checks on MsgLength (byte) field
3114  * to make sure we don't zero IOC's req_sz!
3115  */
3116  /* Did we get a valid reply? */
3117  if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) {
3118  if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
3119  /*
3120  * If not been here, done that, save off first WhoInit value
3121  */
3122  if (ioc->FirstWhoInit == WHOINIT_UNKNOWN)
3123  ioc->FirstWhoInit = facts->WhoInit;
3124  }
3125 
3126  facts->MsgVersion = le16_to_cpu(facts->MsgVersion);
3127  facts->MsgContext = le32_to_cpu(facts->MsgContext);
3128  facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
3129  facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
3130  facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
3131  status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
3132  /* CHECKME! IOCStatus, IOCLogInfo */
3133 
3134  facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
3136 
3137  /*
3138  * FC f/w version changed between 1.1 and 1.2
3139  * Old: u16{Major(4),Minor(4),SubMinor(8)}
3140  * New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
3141  */
3142  if (facts->MsgVersion < MPI_VERSION_01_02) {
3143  /*
3144  * Handle old FC f/w style, convert to new...
3145  */
3146  u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion);
3147  facts->FWVersion.Word =
3148  ((oldv<<12) & 0xFF000000) |
3149  ((oldv<<8) & 0x000FFF00);
3150  } else
3151  facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
3152 
3153  facts->ProductID = le16_to_cpu(facts->ProductID);
3154 
3155  if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
3157  ioc->ir_firmware = 1;
3158 
3159  facts->CurrentHostMfaHighAddr =
3161  facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
3164  facts->CurReplyFrameSize =
3166  facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
3167 
3168  /*
3169  * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
3170  * Older MPI-1.00.xx struct had 13 dwords, and enlarged
3171  * to 14 in MPI-1.01.0x.
3172  */
3173  if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
3174  facts->MsgVersion > MPI_VERSION_01_00) {
3175  facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
3176  }
3177 
3178  sz = facts->FWImageSize;
3179  if ( sz & 0x01 )
3180  sz += 1;
3181  if ( sz & 0x02 )
3182  sz += 2;
3183  facts->FWImageSize = sz;
3184 
3185  if (!facts->RequestFrameSize) {
3186  /* Something is wrong! */
3187  printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
3188  ioc->name);
3189  return -55;
3190  }
3191 
3192  r = sz = facts->BlockSize;
3193  vv = ((63 / (sz * 4)) + 1) & 0x03;
3194  ioc->NB_for_64_byte_frame = vv;
3195  while ( sz )
3196  {
3197  shiftFactor++;
3198  sz = sz >> 1;
3199  }
3200  ioc->NBShiftFactor = shiftFactor;
3201  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3202  "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
3203  ioc->name, vv, shiftFactor, r));
3204 
3205  if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
3206  /*
3207  * Set values for this IOC's request & reply frame sizes,
3208  * and request & reply queue depths...
3209  */
3210  ioc->req_sz = min(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4);
3211  ioc->req_depth = min_t(int, MPT_MAX_REQ_DEPTH, facts->GlobalCredits);
3212  ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
3213  ioc->reply_depth = min_t(int, MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth);
3214 
3215  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "reply_sz=%3d, reply_depth=%4d\n",
3216  ioc->name, ioc->reply_sz, ioc->reply_depth));
3217  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "req_sz =%3d, req_depth =%4d\n",
3218  ioc->name, ioc->req_sz, ioc->req_depth));
3219 
3220  /* Get port facts! */
3221  if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 )
3222  return r;
3223  }
3224  } else {
3225  printk(MYIOC_s_ERR_FMT
3226  "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
3227  ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
3228  RequestFrameSize)/sizeof(u32)));
3229  return -66;
3230  }
3231 
3232  return 0;
3233 }
3234 
3235 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3244 static int
3245 GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
3246 {
3247  PortFacts_t get_pfacts;
3248  PortFactsReply_t *pfacts;
3249  int ii;
3250  int req_sz;
3251  int reply_sz;
3252  int max_id;
3253 
3254  /* IOC *must* NOT be in RESET state! */
3255  if (ioc->last_state == MPI_IOC_STATE_RESET) {
3256  printk(MYIOC_s_ERR_FMT "Can't get PortFacts NOT READY! (%08x)\n",
3257  ioc->name, ioc->last_state );
3258  return -4;
3259  }
3260 
3261  pfacts = &ioc->pfacts[portnum];
3262 
3263  /* Destination (reply area)... */
3264  reply_sz = sizeof(*pfacts);
3265  memset(pfacts, 0, reply_sz);
3266 
3267  /* Request area (get_pfacts on the stack right now!) */
3268  req_sz = sizeof(get_pfacts);
3269  memset(&get_pfacts, 0, req_sz);
3270 
3271  get_pfacts.Function = MPI_FUNCTION_PORT_FACTS;
3272  get_pfacts.PortNumber = portnum;
3273  /* Assert: All other get_pfacts fields are zero! */
3274 
3275  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending get PortFacts(%d) request\n",
3276  ioc->name, portnum));
3277 
3278  /* No non-zero fields in the get_pfacts request are greater than
3279  * 1 byte in size, so we can just fire it off as is.
3280  */
3281  ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts,
3282  reply_sz, (u16*)pfacts, 5 /*seconds*/, sleepFlag);
3283  if (ii != 0)
3284  return ii;
3285 
3286  /* Did we get a valid reply? */
3287 
3288  /* Now byte swap the necessary fields in the response. */
3289  pfacts->MsgContext = le32_to_cpu(pfacts->MsgContext);
3290  pfacts->IOCStatus = le16_to_cpu(pfacts->IOCStatus);
3291  pfacts->IOCLogInfo = le32_to_cpu(pfacts->IOCLogInfo);
3292  pfacts->MaxDevices = le16_to_cpu(pfacts->MaxDevices);
3293  pfacts->PortSCSIID = le16_to_cpu(pfacts->PortSCSIID);
3294  pfacts->ProtocolFlags = le16_to_cpu(pfacts->ProtocolFlags);
3296  pfacts->MaxPersistentIDs = le16_to_cpu(pfacts->MaxPersistentIDs);
3297  pfacts->MaxLanBuckets = le16_to_cpu(pfacts->MaxLanBuckets);
3298 
3299  max_id = (ioc->bus_type == SAS) ? pfacts->PortSCSIID :
3300  pfacts->MaxDevices;
3301  ioc->devices_per_bus = (max_id > 255) ? 256 : max_id;
3302  ioc->number_of_buses = (ioc->devices_per_bus < 256) ? 1 : max_id/256;
3303 
3304  /*
3305  * Place all the devices on channels
3306  *
3307  * (for debuging)
3308  */
3309  if (mpt_channel_mapping) {
3310  ioc->devices_per_bus = 1;
3311  ioc->number_of_buses = (max_id > 255) ? 255 : max_id;
3312  }
3313 
3314  return 0;
3315 }
3316 
3317 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3327 static int
3328 SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3329 {
3330  IOCInit_t ioc_init;
3331  MPIDefaultReply_t init_reply;
3332  u32 state;
3333  int r;
3334  int count;
3335  int cntdn;
3336 
3337  memset(&ioc_init, 0, sizeof(ioc_init));
3338  memset(&init_reply, 0, sizeof(init_reply));
3339 
3340  ioc_init.WhoInit = MPI_WHOINIT_HOST_DRIVER;
3341  ioc_init.Function = MPI_FUNCTION_IOC_INIT;
3342 
3343  /* If we are in a recovery mode and we uploaded the FW image,
3344  * then this pointer is not NULL. Skip the upload a second time.
3345  * Set this flag if cached_fw set for either IOC.
3346  */
3347  if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
3348  ioc->upload_fw = 1;
3349  else
3350  ioc->upload_fw = 0;
3351  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "upload_fw %d facts.Flags=%x\n",
3352  ioc->name, ioc->upload_fw, ioc->facts.Flags));
3353 
3354  ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
3355  ioc_init.MaxBuses = (U8)ioc->number_of_buses;
3356 
3357  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
3358  ioc->name, ioc->facts.MsgVersion));
3359  if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
3360  // set MsgVersion and HeaderVersion host driver was built with
3361  ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
3363 
3364  if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
3365  ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
3366  } else if(mpt_host_page_alloc(ioc, &ioc_init))
3367  return -99;
3368  }
3369  ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
3370 
3371  if (ioc->sg_addr_size == sizeof(u64)) {
3372  /* Save the upper 32-bits of the request
3373  * (reply) and sense buffers.
3374  */
3375  ioc_init.HostMfaHighAddr = cpu_to_le32((u32)((u64)ioc->alloc_dma >> 32));
3376  ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
3377  } else {
3378  /* Force 32-bit addressing */
3379  ioc_init.HostMfaHighAddr = cpu_to_le32(0);
3380  ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
3381  }
3382 
3383  ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
3384  ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
3385  ioc->facts.MaxDevices = ioc_init.MaxDevices;
3386  ioc->facts.MaxBuses = ioc_init.MaxBuses;
3387 
3388  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOCInit (req @ %p)\n",
3389  ioc->name, &ioc_init));
3390 
3391  r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
3392  sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
3393  if (r != 0) {
3394  printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
3395  return r;
3396  }
3397 
3398  /* No need to byte swap the multibyte fields in the reply
3399  * since we don't even look at its contents.
3400  */
3401 
3402  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending PortEnable (req @ %p)\n",
3403  ioc->name, &ioc_init));
3404 
3405  if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) {
3406  printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r);
3407  return r;
3408  }
3409 
3410  /* YIKES! SUPER IMPORTANT!!!
3411  * Poll IocState until _OPERATIONAL while IOC is doing
3412  * LoopInit and TargetDiscovery!
3413  */
3414  count = 0;
3415  cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 60; /* 60 seconds */
3416  state = mpt_GetIocState(ioc, 1);
3417  while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) {
3418  if (sleepFlag == CAN_SLEEP) {
3419  msleep(1);
3420  } else {
3421  mdelay(1);
3422  }
3423 
3424  if (!cntdn) {
3425  printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n",
3426  ioc->name, (int)((count+5)/HZ));
3427  return -9;
3428  }
3429 
3430  state = mpt_GetIocState(ioc, 1);
3431  count++;
3432  }
3433  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wait IOC_OPERATIONAL state (cnt=%d)\n",
3434  ioc->name, count));
3435 
3436  ioc->aen_event_read_flag=0;
3437  return r;
3438 }
3439 
3440 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3451 static int
3452 SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
3453 {
3454  PortEnable_t port_enable;
3456  int rc;
3457  int req_sz;
3458  int reply_sz;
3459 
3460  /* Destination... */
3461  reply_sz = sizeof(MPIDefaultReply_t);
3462  memset(&reply_buf, 0, reply_sz);
3463 
3464  req_sz = sizeof(PortEnable_t);
3465  memset(&port_enable, 0, req_sz);
3466 
3467  port_enable.Function = MPI_FUNCTION_PORT_ENABLE;
3468  port_enable.PortNumber = portnum;
3469 /* port_enable.ChainOffset = 0; */
3470 /* port_enable.MsgFlags = 0; */
3471 /* port_enable.MsgContext = 0; */
3472 
3473  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Port(%d)Enable (req @ %p)\n",
3474  ioc->name, portnum, &port_enable));
3475 
3476  /* RAID FW may take a long time to enable
3477  */
3478  if (ioc->ir_firmware || ioc->bus_type == SAS) {
3479  rc = mpt_handshake_req_reply_wait(ioc, req_sz,
3480  (u32*)&port_enable, reply_sz, (u16*)&reply_buf,
3481  300 /*seconds*/, sleepFlag);
3482  } else {
3483  rc = mpt_handshake_req_reply_wait(ioc, req_sz,
3484  (u32*)&port_enable, reply_sz, (u16*)&reply_buf,
3485  30 /*seconds*/, sleepFlag);
3486  }
3487  return rc;
3488 }
3489 
3500 int
3501 mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
3502 {
3503  int rc;
3504 
3505  if (ioc->cached_fw) {
3506  rc = 0; /* use already allocated memory */
3507  goto out;
3508  }
3509  else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
3510  ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
3511  ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
3512  rc = 0;
3513  goto out;
3514  }
3515  ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
3516  if (!ioc->cached_fw) {
3517  printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
3518  ioc->name);
3519  rc = -1;
3520  } else {
3521  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Image @ %p[%p], sz=%d[%x] bytes\n",
3522  ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, size, size));
3523  ioc->alloc_total += size;
3524  rc = 0;
3525  }
3526  out:
3527  return rc;
3528 }
3529 
3537 void
3538 mpt_free_fw_memory(MPT_ADAPTER *ioc)
3539 {
3540  int sz;
3541 
3542  if (!ioc->cached_fw)
3543  return;
3544 
3545  sz = ioc->facts.FWImageSize;
3546  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
3547  ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
3548  pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
3549  ioc->alloc_total -= sz;
3550  ioc->cached_fw = NULL;
3551 }
3552 
3553 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3567 static int
3568 mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3569 {
3570  u8 reply[sizeof(FWUploadReply_t)];
3571  FWUpload_t *prequest;
3572  FWUploadReply_t *preply;
3573  FWUploadTCSGE_t *ptcsge;
3574  u32 flagsLength;
3575  int ii, sz, reply_sz;
3576  int cmdStatus;
3577  int request_size;
3578  /* If the image size is 0, we are done.
3579  */
3580  if ((sz = ioc->facts.FWImageSize) == 0)
3581  return 0;
3582 
3583  if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0)
3584  return -ENOMEM;
3585 
3586  dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
3587  ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
3588 
3589  prequest = (sleepFlag == NO_SLEEP) ? kzalloc(ioc->req_sz, GFP_ATOMIC) :
3590  kzalloc(ioc->req_sz, GFP_KERNEL);
3591  if (!prequest) {
3592  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed "
3593  "while allocating memory \n", ioc->name));
3594  mpt_free_fw_memory(ioc);
3595  return -ENOMEM;
3596  }
3597 
3598  preply = (FWUploadReply_t *)&reply;
3599 
3600  reply_sz = sizeof(reply);
3601  memset(preply, 0, reply_sz);
3602 
3603  prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
3604  prequest->Function = MPI_FUNCTION_FW_UPLOAD;
3605 
3606  ptcsge = (FWUploadTCSGE_t *) &prequest->SGL;
3607  ptcsge->DetailsLength = 12;
3609  ptcsge->ImageSize = cpu_to_le32(sz);
3610  ptcsge++;
3611 
3612  flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
3613  ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
3614  request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
3615  ioc->SGE_size;
3616  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
3617  " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
3618  ioc->facts.FWImageSize, request_size));
3619  DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
3620 
3621  ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
3622  reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
3623 
3624  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
3625  "rc=%x \n", ioc->name, ii));
3626 
3627  cmdStatus = -EFAULT;
3628  if (ii == 0) {
3629  /* Handshake transfer was complete and successful.
3630  * Check the Reply Frame.
3631  */
3632  int status;
3633  status = le16_to_cpu(preply->IOCStatus) &
3635  if (status == MPI_IOCSTATUS_SUCCESS &&
3636  ioc->facts.FWImageSize ==
3637  le32_to_cpu(preply->ActualImageSize))
3638  cmdStatus = 0;
3639  }
3640  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
3641  ioc->name, cmdStatus));
3642 
3643 
3644  if (cmdStatus) {
3645  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
3646  "freeing image \n", ioc->name));
3647  mpt_free_fw_memory(ioc);
3648  }
3649  kfree(prequest);
3650 
3651  return cmdStatus;
3652 }
3653 
3654 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3668 static int
3669 mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
3670 {
3671  MpiExtImageHeader_t *pExtImage;
3672  u32 fwSize;
3673  u32 diag0val;
3674  int count;
3675  u32 *ptrFw;
3676  u32 diagRwData;
3677  u32 nextImage;
3678  u32 load_addr;
3679  u32 ioc_state=0;
3680 
3681  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
3682  ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
3683 
3684  CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
3685  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
3686  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
3687  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
3688  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
3689  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
3690 
3691  CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM));
3692 
3693  /* wait 1 msec */
3694  if (sleepFlag == CAN_SLEEP) {
3695  msleep(1);
3696  } else {
3697  mdelay (1);
3698  }
3699 
3700  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3701  CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
3702 
3703  for (count = 0; count < 30; count ++) {
3704  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3705  if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
3706  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RESET_ADAPTER cleared, count=%d\n",
3707  ioc->name, count));
3708  break;
3709  }
3710  /* wait .1 sec */
3711  if (sleepFlag == CAN_SLEEP) {
3712  msleep (100);
3713  } else {
3714  mdelay (100);
3715  }
3716  }
3717 
3718  if ( count == 30 ) {
3719  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot failed! "
3720  "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
3721  ioc->name, diag0val));
3722  return -3;
3723  }
3724 
3725  CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
3726  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
3727  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
3728  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
3729  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
3730  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
3731 
3732  /* Set the DiagRwEn and Disable ARM bits */
3733  CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
3734 
3735  fwSize = (pFwHeader->ImageSize + 3)/4;
3736  ptrFw = (u32 *) pFwHeader;
3737 
3738  /* Write the LoadStartAddress to the DiagRw Address Register
3739  * using Programmed IO
3740  */
3741  if (ioc->errata_flag_1064)
3742  pci_enable_io_access(ioc->pcidev);
3743 
3744  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->LoadStartAddress);
3745  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "LoadStart addr written 0x%x \n",
3746  ioc->name, pFwHeader->LoadStartAddress));
3747 
3748  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write FW Image: 0x%x bytes @ %p\n",
3749  ioc->name, fwSize*4, ptrFw));
3750  while (fwSize--) {
3751  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
3752  }
3753 
3754  nextImage = pFwHeader->NextImageHeaderOffset;
3755  while (nextImage) {
3756  pExtImage = (MpiExtImageHeader_t *) ((char *)pFwHeader + nextImage);
3757 
3758  load_addr = pExtImage->LoadStartAddress;
3759 
3760  fwSize = (pExtImage->ImageSize + 3) >> 2;
3761  ptrFw = (u32 *)pExtImage;
3762 
3763  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n",
3764  ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr));
3765  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
3766 
3767  while (fwSize--) {
3768  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
3769  }
3770  nextImage = pExtImage->NextImageHeaderOffset;
3771  }
3772 
3773  /* Write the IopResetVectorRegAddr */
3774  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Addr=%x! \n", ioc->name, pFwHeader->IopResetRegAddr));
3775  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr);
3776 
3777  /* Write the IopResetVectorValue */
3778  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Value=%x! \n", ioc->name, pFwHeader->IopResetVectorValue));
3779  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, pFwHeader->IopResetVectorValue);
3780 
3781  /* Clear the internal flash bad bit - autoincrementing register,
3782  * so must do two writes.
3783  */
3784  if (ioc->bus_type == SPI) {
3785  /*
3786  * 1030 and 1035 H/W errata, workaround to access
3787  * the ClearFlashBadSignatureBit
3788  */
3789  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
3790  diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
3791  diagRwData |= 0x40000000;
3792  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
3793  CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
3794 
3795  } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
3796  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3797  CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
3799 
3800  /* wait 1 msec */
3801  if (sleepFlag == CAN_SLEEP) {
3802  msleep (1);
3803  } else {
3804  mdelay (1);
3805  }
3806  }
3807 
3808  if (ioc->errata_flag_1064)
3809  pci_disable_io_access(ioc->pcidev);
3810 
3811  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3812  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot diag0val=%x, "
3813  "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
3814  ioc->name, diag0val));
3816  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot now diag0val=%x\n",
3817  ioc->name, diag0val));
3818  CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
3819 
3820  /* Write 0xFF to reset the sequencer */
3821  CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
3822 
3823  if (ioc->bus_type == SAS) {
3824  ioc_state = mpt_GetIocState(ioc, 0);
3825  if ( (GetIocFacts(ioc, sleepFlag,
3826  MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
3827  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "GetIocFacts failed: IocState=%x\n",
3828  ioc->name, ioc_state));
3829  return -EFAULT;
3830  }
3831  }
3832 
3833  for (count=0; count<HZ*20; count++) {
3834  if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
3835  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3836  "downloadboot successful! (count=%d) IocState=%x\n",
3837  ioc->name, count, ioc_state));
3838  if (ioc->bus_type == SAS) {
3839  return 0;
3840  }
3841  if ((SendIocInit(ioc, sleepFlag)) != 0) {
3842  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3843  "downloadboot: SendIocInit failed\n",
3844  ioc->name));
3845  return -EFAULT;
3846  }
3847  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3848  "downloadboot: SendIocInit successful\n",
3849  ioc->name));
3850  return 0;
3851  }
3852  if (sleepFlag == CAN_SLEEP) {
3853  msleep (10);
3854  } else {
3855  mdelay (10);
3856  }
3857  }
3858  ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3859  "downloadboot failed! IocState=%x\n",ioc->name, ioc_state));
3860  return -EFAULT;
3861 }
3862 
3863 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3889 static int
3890 KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
3891 {
3892  int hard_reset_done = 0;
3893  u32 ioc_state=0;
3894  int cnt,cntdn;
3895 
3896  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStarting!\n", ioc->name));
3897  if (ioc->bus_type == SPI) {
3898  /* Always issue a Msg Unit Reset first. This will clear some
3899  * SCSI bus hang conditions.
3900  */
3901  SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
3902 
3903  if (sleepFlag == CAN_SLEEP) {
3904  msleep (1000);
3905  } else {
3906  mdelay (1000);
3907  }
3908  }
3909 
3910  hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag);
3911  if (hard_reset_done < 0)
3912  return hard_reset_done;
3913 
3914  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset successful!\n",
3915  ioc->name));
3916 
3917  cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 2; /* 2 seconds */
3918  for (cnt=0; cnt<cntdn; cnt++) {
3919  ioc_state = mpt_GetIocState(ioc, 1);
3920  if ((ioc_state == MPI_IOC_STATE_READY) || (ioc_state == MPI_IOC_STATE_OPERATIONAL)) {
3921  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStart successful! (cnt=%d)\n",
3922  ioc->name, cnt));
3923  return hard_reset_done;
3924  }
3925  if (sleepFlag == CAN_SLEEP) {
3926  msleep (10);
3927  } else {
3928  mdelay (10);
3929  }
3930  }
3931 
3932  dinitprintk(ioc, printk(MYIOC_s_ERR_FMT "Failed to come READY after reset! IocState=%x\n",
3933  ioc->name, mpt_GetIocState(ioc, 0)));
3934  return -1;
3935 }
3936 
3937 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3956 static int
3957 mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3958 {
3959  u32 diag0val;
3960  u32 doorbell;
3961  int hard_reset_done = 0;
3962  int count = 0;
3963  u32 diag1val = 0;
3964  MpiFwHeader_t *cached_fw; /* Pointer to FW */
3965  u8 cb_idx;
3966 
3967  /* Clear any existing interrupts */
3968  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3969 
3970  if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
3971 
3972  if (!ignore)
3973  return 0;
3974 
3975  drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
3976  "address=%p\n", ioc->name, __func__,
3977  &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
3978  CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
3979  if (sleepFlag == CAN_SLEEP)
3980  msleep(1);
3981  else
3982  mdelay(1);
3983 
3984  /*
3985  * Call each currently registered protocol IOC reset handler
3986  * with pre-reset indication.
3987  * NOTE: If we're doing _IOC_BRINGUP, there can be no
3988  * MptResetHandlers[] registered yet.
3989  */
3990  for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
3991  if (MptResetHandlers[cb_idx])
3992  (*(MptResetHandlers[cb_idx]))(ioc,
3993  MPT_IOC_PRE_RESET);
3994  }
3995 
3996  for (count = 0; count < 60; count ++) {
3997  doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
3998  doorbell &= MPI_IOC_STATE_MASK;
3999 
4000  drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4001  "looking for READY STATE: doorbell=%x"
4002  " count=%d\n",
4003  ioc->name, doorbell, count));
4004 
4005  if (doorbell == MPI_IOC_STATE_READY) {
4006  return 1;
4007  }
4008 
4009  /* wait 1 sec */
4010  if (sleepFlag == CAN_SLEEP)
4011  msleep(1000);
4012  else
4013  mdelay(1000);
4014  }
4015  return -1;
4016  }
4017 
4018  /* Use "Diagnostic reset" method! (only thing available!) */
4019  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
4020 
4021  if (ioc->debug_level & MPT_DEBUG) {
4022  if (ioc->alt_ioc)
4023  diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
4024  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG1: diag0=%08x, diag1=%08x\n",
4025  ioc->name, diag0val, diag1val));
4026  }
4027 
4028  /* Do the reset if we are told to ignore the reset history
4029  * or if the reset history is 0
4030  */
4031  if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) {
4032  while ((diag0val & MPI_DIAG_DRWE) == 0) {
4033  /* Write magic sequence to WriteSequence register
4034  * Loop until in diagnostic mode
4035  */
4036  CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
4037  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
4038  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
4039  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
4040  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
4041  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
4042 
4043  /* wait 100 msec */
4044  if (sleepFlag == CAN_SLEEP) {
4045  msleep (100);
4046  } else {
4047  mdelay (100);
4048  }
4049 
4050  count++;
4051  if (count > 20) {
4052  printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
4053  ioc->name, diag0val);
4054  return -2;
4055 
4056  }
4057 
4058  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
4059 
4060  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wrote magic DiagWriteEn sequence (%x)\n",
4061  ioc->name, diag0val));
4062  }
4063 
4064  if (ioc->debug_level & MPT_DEBUG) {
4065  if (ioc->alt_ioc)
4066  diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
4067  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG2: diag0=%08x, diag1=%08x\n",
4068  ioc->name, diag0val, diag1val));
4069  }
4070  /*
4071  * Disable the ARM (Bug fix)
4072  *
4073  */
4074  CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
4075  mdelay(1);
4076 
4077  /*
4078  * Now hit the reset bit in the Diagnostic register
4079  * (THE BIG HAMMER!) (Clears DRWE bit).
4080  */
4081  CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
4082  hard_reset_done = 1;
4083  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset performed\n",
4084  ioc->name));
4085 
4086  /*
4087  * Call each currently registered protocol IOC reset handler
4088  * with pre-reset indication.
4089  * NOTE: If we're doing _IOC_BRINGUP, there can be no
4090  * MptResetHandlers[] registered yet.
4091  */
4092  for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
4093  if (MptResetHandlers[cb_idx]) {
4094  mpt_signal_reset(cb_idx,
4095  ioc, MPT_IOC_PRE_RESET);
4096  if (ioc->alt_ioc) {
4097  mpt_signal_reset(cb_idx,
4098  ioc->alt_ioc, MPT_IOC_PRE_RESET);
4099  }
4100  }
4101  }
4102 
4103  if (ioc->cached_fw)
4104  cached_fw = (MpiFwHeader_t *)ioc->cached_fw;
4105  else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
4106  cached_fw = (MpiFwHeader_t *)ioc->alt_ioc->cached_fw;
4107  else
4108  cached_fw = NULL;
4109  if (cached_fw) {
4110  /* If the DownloadBoot operation fails, the
4111  * IOC will be left unusable. This is a fatal error
4112  * case. _diag_reset will return < 0
4113  */
4114  for (count = 0; count < 30; count ++) {
4115  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
4116  if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
4117  break;
4118  }
4119 
4120  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n",
4121  ioc->name, diag0val, count));
4122  /* wait 1 sec */
4123  if (sleepFlag == CAN_SLEEP) {
4124  msleep (1000);
4125  } else {
4126  mdelay (1000);
4127  }
4128  }
4129  if ((count = mpt_downloadboot(ioc, cached_fw, sleepFlag)) < 0) {
4130  printk(MYIOC_s_WARN_FMT
4131  "firmware downloadboot failure (%d)!\n", ioc->name, count);
4132  }
4133 
4134  } else {
4135  /* Wait for FW to reload and for board
4136  * to go to the READY state.
4137  * Maximum wait is 60 seconds.
4138  * If fail, no error will check again
4139  * with calling program.
4140  */
4141  for (count = 0; count < 60; count ++) {
4142  doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
4143  doorbell &= MPI_IOC_STATE_MASK;
4144 
4145  drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4146  "looking for READY STATE: doorbell=%x"
4147  " count=%d\n", ioc->name, doorbell, count));
4148 
4149  if (doorbell == MPI_IOC_STATE_READY) {
4150  break;
4151  }
4152 
4153  /* wait 1 sec */
4154  if (sleepFlag == CAN_SLEEP) {
4155  msleep (1000);
4156  } else {
4157  mdelay (1000);
4158  }
4159  }
4160 
4161  if (doorbell != MPI_IOC_STATE_READY)
4162  printk(MYIOC_s_ERR_FMT "Failed to come READY "
4163  "after reset! IocState=%x", ioc->name,
4164  doorbell);
4165  }
4166  }
4167 
4168  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
4169  if (ioc->debug_level & MPT_DEBUG) {
4170  if (ioc->alt_ioc)
4171  diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
4172  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG3: diag0=%08x, diag1=%08x\n",
4173  ioc->name, diag0val, diag1val));
4174  }
4175 
4176  /* Clear RESET_HISTORY bit! Place board in the
4177  * diagnostic mode to update the diag register.
4178  */
4179  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
4180  count = 0;
4181  while ((diag0val & MPI_DIAG_DRWE) == 0) {
4182  /* Write magic sequence to WriteSequence register
4183  * Loop until in diagnostic mode
4184  */
4185  CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
4186  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
4187  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
4188  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
4189  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
4190  CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
4191 
4192  /* wait 100 msec */
4193  if (sleepFlag == CAN_SLEEP) {
4194  msleep (100);
4195  } else {
4196  mdelay (100);
4197  }
4198 
4199  count++;
4200  if (count > 20) {
4201  printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
4202  ioc->name, diag0val);
4203  break;
4204  }
4205  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
4206  }
4207  diag0val &= ~MPI_DIAG_RESET_HISTORY;
4208  CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
4209  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
4210  if (diag0val & MPI_DIAG_RESET_HISTORY) {
4211  printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n",
4212  ioc->name);
4213  }
4214 
4215  /* Disable Diagnostic Mode
4216  */
4217  CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF);
4218 
4219  /* Check FW reload status flags.
4220  */
4221  diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
4222  if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) {
4223  printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n",
4224  ioc->name, diag0val);
4225  return -3;
4226  }
4227 
4228  if (ioc->debug_level & MPT_DEBUG) {
4229  if (ioc->alt_ioc)
4230  diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
4231  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG4: diag0=%08x, diag1=%08x\n",
4232  ioc->name, diag0val, diag1val));
4233  }
4234 
4235  /*
4236  * Reset flag that says we've enabled event notification
4237  */
4238  ioc->facts.EventState = 0;
4239 
4240  if (ioc->alt_ioc)
4241  ioc->alt_ioc->facts.EventState = 0;
4242 
4243  return hard_reset_done;
4244 }
4245 
4246 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4258 static int
4259 SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
4260 {
4261  int r;
4262  u32 state;
4263  int cntdn, count;
4264 
4265  drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOC reset(0x%02x)!\n",
4266  ioc->name, reset_type));
4267  CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
4268  if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
4269  return r;
4270 
4271  /* FW ACK'd request, wait for READY state
4272  */
4273  count = 0;
4274  cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */
4275 
4276  while ((state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
4277  cntdn--;
4278  count++;
4279  if (!cntdn) {
4280  if (sleepFlag != CAN_SLEEP)
4281  count *= 10;
4282 
4283  printk(MYIOC_s_ERR_FMT
4284  "Wait IOC_READY state (0x%x) timeout(%d)!\n",
4285  ioc->name, state, (int)((count+5)/HZ));
4286  return -ETIME;
4287  }
4288 
4289  if (sleepFlag == CAN_SLEEP) {
4290  msleep(1);
4291  } else {
4292  mdelay (1); /* 1 msec delay */
4293  }
4294  }
4295 
4296  /* TODO!
4297  * Cleanup all event stuff for this IOC; re-issue EventNotification
4298  * request if needed.
4299  */
4300  if (ioc->facts.Function)
4301  ioc->facts.EventState = 0;
4302 
4303  return 0;
4304 }
4305 
4306 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4314 static int
4315 initChainBuffers(MPT_ADAPTER *ioc)
4316 {
4317  u8 *mem;
4318  int sz, ii, num_chain;
4319  int scale, num_sge, numSGE;
4320 
4321  /* ReqToChain size must equal the req_depth
4322  * index = req_idx
4323  */
4324  if (ioc->ReqToChain == NULL) {
4325  sz = ioc->req_depth * sizeof(int);
4326  mem = kmalloc(sz, GFP_ATOMIC);
4327  if (mem == NULL)
4328  return -1;
4329 
4330  ioc->ReqToChain = (int *) mem;
4331  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReqToChain alloc @ %p, sz=%d bytes\n",
4332  ioc->name, mem, sz));
4333  mem = kmalloc(sz, GFP_ATOMIC);
4334  if (mem == NULL)
4335  return -1;
4336 
4337  ioc->RequestNB = (int *) mem;
4338  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestNB alloc @ %p, sz=%d bytes\n",
4339  ioc->name, mem, sz));
4340  }
4341  for (ii = 0; ii < ioc->req_depth; ii++) {
4342  ioc->ReqToChain[ii] = MPT_HOST_NO_CHAIN;
4343  }
4344 
4345  /* ChainToChain size must equal the total number
4346  * of chain buffers to be allocated.
4347  * index = chain_idx
4348  *
4349  * Calculate the number of chain buffers needed(plus 1) per I/O
4350  * then multiply the maximum number of simultaneous cmds
4351  *
4352  * num_sge = num sge in request frame + last chain buffer
4353  * scale = num sge per chain buffer if no chain element
4354  */
4355  scale = ioc->req_sz / ioc->SGE_size;
4356  if (ioc->sg_addr_size == sizeof(u64))
4357  num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
4358  else
4359  num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
4360 
4361  if (ioc->sg_addr_size == sizeof(u64)) {
4362  numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
4363  (ioc->req_sz - 60) / ioc->SGE_size;
4364  } else {
4365  numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
4366  scale + (ioc->req_sz - 64) / ioc->SGE_size;
4367  }
4368  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
4369  ioc->name, num_sge, numSGE));
4370 
4371  if (ioc->bus_type == FC) {
4372  if (numSGE > MPT_SCSI_FC_SG_DEPTH)
4373  numSGE = MPT_SCSI_FC_SG_DEPTH;
4374  } else {
4375  if (numSGE > MPT_SCSI_SG_DEPTH)
4376  numSGE = MPT_SCSI_SG_DEPTH;
4377  }
4378 
4379  num_chain = 1;
4380  while (numSGE - num_sge > 0) {
4381  num_chain++;
4382  num_sge += (scale - 1);
4383  }
4384  num_chain++;
4385 
4386  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Now numSGE=%d num_sge=%d num_chain=%d\n",
4387  ioc->name, numSGE, num_sge, num_chain));
4388 
4389  if (ioc->bus_type == SPI)
4390  num_chain *= MPT_SCSI_CAN_QUEUE;
4391  else if (ioc->bus_type == SAS)
4392  num_chain *= MPT_SAS_CAN_QUEUE;
4393  else
4394  num_chain *= MPT_FC_CAN_QUEUE;
4395 
4396  ioc->num_chain = num_chain;
4397 
4398  sz = num_chain * sizeof(int);
4399  if (ioc->ChainToChain == NULL) {
4400  mem = kmalloc(sz, GFP_ATOMIC);
4401  if (mem == NULL)
4402  return -1;
4403 
4404  ioc->ChainToChain = (int *) mem;
4405  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainToChain alloc @ %p, sz=%d bytes\n",
4406  ioc->name, mem, sz));
4407  } else {
4408  mem = (u8 *) ioc->ChainToChain;
4409  }
4410  memset(mem, 0xFF, sz);
4411  return num_chain;
4412 }
4413 
4414 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4425 static int
4426 PrimeIocFifos(MPT_ADAPTER *ioc)
4427 {
4428  MPT_FRAME_HDR *mf;
4429  unsigned long flags;
4430  dma_addr_t alloc_dma;
4431  u8 *mem;
4432  int i, reply_sz, sz, total_size, num_chain;
4433  u64 dma_mask;
4434 
4435  dma_mask = 0;
4436 
4437  /* Prime reply FIFO... */
4438 
4439  if (ioc->reply_frames == NULL) {
4440  if ( (num_chain = initChainBuffers(ioc)) < 0)
4441  return -1;
4442  /*
4443  * 1078 errata workaround for the 36GB limitation
4444  */
4445  if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
4446  ioc->dma_mask > DMA_BIT_MASK(35)) {
4447  if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
4448  && !pci_set_consistent_dma_mask(ioc->pcidev,
4449  DMA_BIT_MASK(32))) {
4450  dma_mask = DMA_BIT_MASK(35);
4451  d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4452  "setting 35 bit addressing for "
4453  "Request/Reply/Chain and Sense Buffers\n",
4454  ioc->name));
4455  } else {
4456  /*Reseting DMA mask to 64 bit*/
4457  pci_set_dma_mask(ioc->pcidev,
4458  DMA_BIT_MASK(64));
4459  pci_set_consistent_dma_mask(ioc->pcidev,
4460  DMA_BIT_MASK(64));
4461 
4462  printk(MYIOC_s_ERR_FMT
4463  "failed setting 35 bit addressing for "
4464  "Request/Reply/Chain and Sense Buffers\n",
4465  ioc->name);
4466  return -1;
4467  }
4468  }
4469 
4470  total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
4471  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
4472  ioc->name, ioc->reply_sz, ioc->reply_depth));
4473  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d[%x] bytes\n",
4474  ioc->name, reply_sz, reply_sz));
4475 
4476  sz = (ioc->req_sz * ioc->req_depth);
4477  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d bytes, RequestDepth=%d\n",
4478  ioc->name, ioc->req_sz, ioc->req_depth));
4479  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d[%x] bytes\n",
4480  ioc->name, sz, sz));
4481  total_size += sz;
4482 
4483  sz = num_chain * ioc->req_sz; /* chain buffer pool size */
4484  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d bytes, ChainDepth=%d\n",
4485  ioc->name, ioc->req_sz, num_chain));
4486  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d[%x] bytes num_chain=%d\n",
4487  ioc->name, sz, sz, num_chain));
4488 
4489  total_size += sz;
4490  mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
4491  if (mem == NULL) {
4492  printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
4493  ioc->name);
4494  goto out_fail;
4495  }
4496 
4497  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Total alloc @ %p[%p], sz=%d[%x] bytes\n",
4498  ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size));
4499 
4500  memset(mem, 0, total_size);
4501  ioc->alloc_total += total_size;
4502  ioc->alloc = mem;
4503  ioc->alloc_dma = alloc_dma;
4504  ioc->alloc_sz = total_size;
4505  ioc->reply_frames = (MPT_FRAME_HDR *) mem;
4506  ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
4507 
4508  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n",
4509  ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
4510 
4511  alloc_dma += reply_sz;
4512  mem += reply_sz;
4513 
4514  /* Request FIFO - WE manage this! */
4515 
4516  ioc->req_frames = (MPT_FRAME_HDR *) mem;
4517  ioc->req_frames_dma = alloc_dma;
4518 
4519  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffers @ %p[%p]\n",
4520  ioc->name, mem, (void *)(ulong)alloc_dma));
4521 
4522  ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
4523 
4524 #if defined(CONFIG_MTRR) && 0
4525  /*
4526  * Enable Write Combining MTRR for IOC's memory region.
4527  * (at least as much as we can; "size and base must be
4528  * multiples of 4 kiB"
4529  */
4530  ioc->mtrr_reg = mtrr_add(ioc->req_frames_dma,
4531  sz,
4532  MTRR_TYPE_WRCOMB, 1);
4533  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MTRR region registered (base:size=%08x:%x)\n",
4534  ioc->name, ioc->req_frames_dma, sz));
4535 #endif
4536 
4537  for (i = 0; i < ioc->req_depth; i++) {
4538  alloc_dma += ioc->req_sz;
4539  mem += ioc->req_sz;
4540  }
4541 
4542  ioc->ChainBuffer = mem;
4543  ioc->ChainBufferDMA = alloc_dma;
4544 
4545  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffers @ %p(%p)\n",
4546  ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
4547 
4548  /* Initialize the free chain Q.
4549  */
4550 
4551  INIT_LIST_HEAD(&ioc->FreeChainQ);
4552 
4553  /* Post the chain buffers to the FreeChainQ.
4554  */
4555  mem = (u8 *)ioc->ChainBuffer;
4556  for (i=0; i < num_chain; i++) {
4557  mf = (MPT_FRAME_HDR *) mem;
4558  list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeChainQ);
4559  mem += ioc->req_sz;
4560  }
4561 
4562  /* Initialize Request frames linked list
4563  */
4564  alloc_dma = ioc->req_frames_dma;
4565  mem = (u8 *) ioc->req_frames;
4566 
4567  spin_lock_irqsave(&ioc->FreeQlock, flags);
4568  INIT_LIST_HEAD(&ioc->FreeQ);
4569  for (i = 0; i < ioc->req_depth; i++) {
4570  mf = (MPT_FRAME_HDR *) mem;
4571 
4572  /* Queue REQUESTs *internally*! */
4573  list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
4574 
4575  mem += ioc->req_sz;
4576  }
4577  spin_unlock_irqrestore(&ioc->FreeQlock, flags);
4578 
4579  sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
4580  ioc->sense_buf_pool =
4581  pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
4582  if (ioc->sense_buf_pool == NULL) {
4583  printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
4584  ioc->name);
4585  goto out_fail;
4586  }
4587 
4588  ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF);
4589  ioc->alloc_total += sz;
4590  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SenseBuffers @ %p[%p]\n",
4591  ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma));
4592 
4593  }
4594 
4595  /* Post Reply frames to FIFO
4596  */
4597  alloc_dma = ioc->alloc_dma;
4598  dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n",
4599  ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
4600 
4601  for (i = 0; i < ioc->reply_depth; i++) {
4602  /* Write each address to the IOC! */
4603  CHIPREG_WRITE32(&ioc->chip->ReplyFifo, alloc_dma);
4604  alloc_dma += ioc->reply_sz;
4605  }
4606 
4607  if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
4608  ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
4609  ioc->dma_mask))
4610  d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4611  "restoring 64 bit addressing\n", ioc->name));
4612 
4613  return 0;
4614 
4615 out_fail:
4616 
4617  if (ioc->alloc != NULL) {
4618  sz = ioc->alloc_sz;
4619  pci_free_consistent(ioc->pcidev,
4620  sz,
4621  ioc->alloc, ioc->alloc_dma);
4622  ioc->reply_frames = NULL;
4623  ioc->req_frames = NULL;
4624  ioc->alloc_total -= sz;
4625  }
4626  if (ioc->sense_buf_pool != NULL) {
4627  sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
4628  pci_free_consistent(ioc->pcidev,
4629  sz,
4630  ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
4631  ioc->sense_buf_pool = NULL;
4632  }
4633 
4634  if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
4635  DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
4636  DMA_BIT_MASK(64)))
4637  d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4638  "restoring 64 bit addressing\n", ioc->name));
4639 
4640  return -1;
4641 }
4642 
4643 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4662 static int
4663 mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
4664  int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
4665 {
4666  MPIDefaultReply_t *mptReply;
4667  int failcnt = 0;
4668  int t;
4669 
4670  /*
4671  * Get ready to cache a handshake reply
4672  */
4673  ioc->hs_reply_idx = 0;
4674  mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
4675  mptReply->MsgLength = 0;
4676 
4677  /*
4678  * Make sure there are no doorbells (WRITE 0 to IntStatus reg),
4679  * then tell IOC that we want to handshake a request of N words.
4680  * (WRITE u32val to Doorbell reg).
4681  */
4682  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
4683  CHIPREG_WRITE32(&ioc->chip->Doorbell,
4685  ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
4686 
4687  /*
4688  * Wait for IOC's doorbell handshake int
4689  */
4690  if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
4691  failcnt++;
4692 
4693  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request start reqBytes=%d, WaitCnt=%d%s\n",
4694  ioc->name, reqBytes, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
4695 
4696  /* Read doorbell and check for active bit */
4697  if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
4698  return -1;
4699 
4700  /*
4701  * Clear doorbell int (WRITE 0 to IntStatus reg),
4702  * then wait for IOC to ACKnowledge that it's ready for
4703  * our handshake request.
4704  */
4705  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
4706  if (!failcnt && (t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
4707  failcnt++;
4708 
4709  if (!failcnt) {
4710  int ii;
4711  u8 *req_as_bytes = (u8 *) req;
4712 
4713  /*
4714  * Stuff request words via doorbell handshake,
4715  * with ACK from IOC for each.
4716  */
4717  for (ii = 0; !failcnt && ii < reqBytes/4; ii++) {
4718  u32 word = ((req_as_bytes[(ii*4) + 0] << 0) |
4719  (req_as_bytes[(ii*4) + 1] << 8) |
4720  (req_as_bytes[(ii*4) + 2] << 16) |
4721  (req_as_bytes[(ii*4) + 3] << 24));
4722 
4723  CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
4724  if ((t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
4725  failcnt++;
4726  }
4727 
4728  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handshake request frame (@%p) header\n", ioc->name, req));
4729  DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)req);
4730 
4731  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request post done, WaitCnt=%d%s\n",
4732  ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : ""));
4733 
4734  /*
4735  * Wait for completion of doorbell handshake reply from the IOC
4736  */
4737  if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
4738  failcnt++;
4739 
4740  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake reply count=%d%s\n",
4741  ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
4742 
4743  /*
4744  * Copy out the cached reply...
4745  */
4746  for (ii=0; ii < min(replyBytes/2,mptReply->MsgLength*2); ii++)
4747  u16reply[ii] = ioc->hs_reply[ii];
4748  } else {
4749  return -99;
4750  }
4751 
4752  return -failcnt;
4753 }
4754 
4755 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4768 static int
4769 WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
4770 {
4771  int cntdn;
4772  int count = 0;
4773  u32 intstat=0;
4774 
4775  cntdn = 1000 * howlong;
4776 
4777  if (sleepFlag == CAN_SLEEP) {
4778  while (--cntdn) {
4779  msleep (1);
4780  intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
4781  if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
4782  break;
4783  count++;
4784  }
4785  } else {
4786  while (--cntdn) {
4787  udelay (1000);
4788  intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
4789  if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
4790  break;
4791  count++;
4792  }
4793  }
4794 
4795  if (cntdn) {
4796  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell ACK (count=%d)\n",
4797  ioc->name, count));
4798  return count;
4799  }
4800 
4801  printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout (count=%d), IntStatus=%x!\n",
4802  ioc->name, count, intstat);
4803  return -1;
4804 }
4805 
4806 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4818 static int
4819 WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
4820 {
4821  int cntdn;
4822  int count = 0;
4823  u32 intstat=0;
4824 
4825  cntdn = 1000 * howlong;
4826  if (sleepFlag == CAN_SLEEP) {
4827  while (--cntdn) {
4828  intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
4829  if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
4830  break;
4831  msleep(1);
4832  count++;
4833  }
4834  } else {
4835  while (--cntdn) {
4836  intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
4837  if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
4838  break;
4839  udelay (1000);
4840  count++;
4841  }
4842  }
4843 
4844  if (cntdn) {
4845  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell INT (cnt=%d) howlong=%d\n",
4846  ioc->name, count, howlong));
4847  return count;
4848  }
4849 
4850  printk(MYIOC_s_ERR_FMT "Doorbell INT timeout (count=%d), IntStatus=%x!\n",
4851  ioc->name, count, intstat);
4852  return -1;
4853 }
4854 
4855 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4868 static int
4869 WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
4870 {
4871  int u16cnt = 0;
4872  int failcnt = 0;
4873  int t;
4874  u16 *hs_reply = ioc->hs_reply;
4875  volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
4876  u16 hword;
4877 
4878  hs_reply[0] = hs_reply[1] = hs_reply[7] = 0;
4879 
4880  /*
4881  * Get first two u16's so we can look at IOC's intended reply MsgLength
4882  */
4883  u16cnt=0;
4884  if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) {
4885  failcnt++;
4886  } else {
4887  hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
4888  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
4889  if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
4890  failcnt++;
4891  else {
4892  hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
4893  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
4894  }
4895  }
4896 
4897  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
4898  ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
4899  failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
4900 
4901  /*
4902  * If no error (and IOC said MsgLength is > 0), piece together
4903  * reply 16 bits at a time.
4904  */
4905  for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) {
4906  if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
4907  failcnt++;
4908  hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
4909  /* don't overflow our IOC hs_reply[] buffer! */
4910  if (u16cnt < ARRAY_SIZE(ioc->hs_reply))
4911  hs_reply[u16cnt] = hword;
4912  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
4913  }
4914 
4915  if (!failcnt && (t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
4916  failcnt++;
4917  CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
4918 
4919  if (failcnt) {
4920  printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n",
4921  ioc->name);
4922  return -failcnt;
4923  }
4924 #if 0
4925  else if (u16cnt != (2 * mptReply->MsgLength)) {
4926  return -101;
4927  }
4928  else if ((mptReply->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4929  return -102;
4930  }
4931 #endif
4932 
4933  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got Handshake reply:\n", ioc->name));
4934  DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mptReply);
4935 
4936  dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell REPLY WaitCnt=%d (sz=%d)\n",
4937  ioc->name, t, u16cnt/2));
4938  return u16cnt/2;
4939 }
4940 
4941 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4952 static int
4953 GetLanConfigPages(MPT_ADAPTER *ioc)
4954 {
4956  CONFIGPARMS cfg;
4957  LANPage0_t *ppage0_alloc;
4958  dma_addr_t page0_dma;
4959  LANPage1_t *ppage1_alloc;
4960  dma_addr_t page1_dma;
4961  int rc = 0;
4962  int data_sz;
4963  int copy_sz;
4964 
4965  /* Get LAN Page 0 header */
4966  hdr.PageVersion = 0;
4967  hdr.PageLength = 0;
4968  hdr.PageNumber = 0;
4970  cfg.cfghdr.hdr = &hdr;
4971  cfg.physAddr = -1;
4972  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4973  cfg.dir = 0;
4974  cfg.pageAddr = 0;
4975  cfg.timeout = 0;
4976 
4977  if ((rc = mpt_config(ioc, &cfg)) != 0)
4978  return rc;
4979 
4980  if (hdr.PageLength > 0) {
4981  data_sz = hdr.PageLength * 4;
4982  ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
4983  rc = -ENOMEM;
4984  if (ppage0_alloc) {
4985  memset((u8 *)ppage0_alloc, 0, data_sz);
4986  cfg.physAddr = page0_dma;
4988 
4989  if ((rc = mpt_config(ioc, &cfg)) == 0) {
4990  /* save the data */
4991  copy_sz = min_t(int, sizeof(LANPage0_t), data_sz);
4992  memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz);
4993 
4994  }
4995 
4996  pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
4997 
4998  /* FIXME!
4999  * Normalize endianness of structure data,
5000  * by byte-swapping all > 1 byte fields!
5001  */
5002 
5003  }
5004 
5005  if (rc)
5006  return rc;
5007  }
5008 
5009  /* Get LAN Page 1 header */
5010  hdr.PageVersion = 0;
5011  hdr.PageLength = 0;
5012  hdr.PageNumber = 1;
5014  cfg.cfghdr.hdr = &hdr;
5015  cfg.physAddr = -1;
5016  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5017  cfg.dir = 0;
5018  cfg.pageAddr = 0;
5019 
5020  if ((rc = mpt_config(ioc, &cfg)) != 0)
5021  return rc;
5022 
5023  if (hdr.PageLength == 0)
5024  return 0;
5025 
5026  data_sz = hdr.PageLength * 4;
5027  rc = -ENOMEM;
5028  ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
5029  if (ppage1_alloc) {
5030  memset((u8 *)ppage1_alloc, 0, data_sz);
5031  cfg.physAddr = page1_dma;
5033 
5034  if ((rc = mpt_config(ioc, &cfg)) == 0) {
5035  /* save the data */
5036  copy_sz = min_t(int, sizeof(LANPage1_t), data_sz);
5037  memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
5038  }
5039 
5040  pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma);
5041 
5042  /* FIXME!
5043  * Normalize endianness of structure data,
5044  * by byte-swapping all > 1 byte fields!
5045  */
5046 
5047  }
5048 
5049  return rc;
5050 }
5051 
5052 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5067 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5068 int
5069 mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
5070 {
5071  SasIoUnitControlRequest_t *sasIoUnitCntrReq;
5072  SasIoUnitControlReply_t *sasIoUnitCntrReply;
5073  MPT_FRAME_HDR *mf = NULL;
5074  MPIHeader_t *mpi_hdr;
5075  int ret = 0;
5076  unsigned long timeleft;
5077 
5078  mutex_lock(&ioc->mptbase_cmds.mutex);
5079 
5080  /* init the internal cmd struct */
5081  memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
5082  INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
5083 
5084  /* insure garbage is not sent to fw */
5085  switch(persist_opcode) {
5086 
5089  break;
5090 
5091  default:
5092  ret = -1;
5093  goto out;
5094  }
5095 
5096  printk(KERN_DEBUG "%s: persist_opcode=%x\n",
5097  __func__, persist_opcode);
5098 
5099  /* Get a MF for this command.
5100  */
5101  if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5102  printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
5103  ret = -1;
5104  goto out;
5105  }
5106 
5107  mpi_hdr = (MPIHeader_t *) mf;
5108  sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
5109  memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
5110  sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
5111  sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
5112  sasIoUnitCntrReq->Operation = persist_opcode;
5113 
5114  mpt_put_msg_frame(mpt_base_index, ioc, mf);
5115  timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
5116  if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
5117  ret = -ETIME;
5118  printk(KERN_DEBUG "%s: failed\n", __func__);
5119  if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
5120  goto out;
5121  if (!timeleft) {
5122  printk(MYIOC_s_WARN_FMT
5123  "Issuing Reset from %s!!, doorbell=0x%08x\n",
5124  ioc->name, __func__, mpt_GetIocState(ioc, 0));
5126  mpt_free_msg_frame(ioc, mf);
5127  }
5128  goto out;
5129  }
5130 
5131  if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
5132  ret = -1;
5133  goto out;
5134  }
5135 
5136  sasIoUnitCntrReply =
5137  (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
5138  if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
5139  printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
5140  __func__, sasIoUnitCntrReply->IOCStatus,
5141  sasIoUnitCntrReply->IOCLogInfo);
5142  printk(KERN_DEBUG "%s: failed\n", __func__);
5143  ret = -1;
5144  } else
5145  printk(KERN_DEBUG "%s: success\n", __func__);
5146  out:
5147 
5148  CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
5149  mutex_unlock(&ioc->mptbase_cmds.mutex);
5150  return ret;
5151 }
5152 
5153 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5154 
5155 static void
5156 mptbase_raid_process_event_data(MPT_ADAPTER *ioc,
5157  MpiEventDataRaid_t * pRaidEventData)
5158 {
5159  int volume;
5160  int reason;
5161  int disk;
5162  int status;
5163  int flags;
5164  int state;
5165 
5166  volume = pRaidEventData->VolumeID;
5167  reason = pRaidEventData->ReasonCode;
5168  disk = pRaidEventData->PhysDiskNum;
5169  status = le32_to_cpu(pRaidEventData->SettingsStatus);
5170  flags = (status >> 0) & 0xff;
5171  state = (status >> 8) & 0xff;
5172 
5173  if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
5174  return;
5175  }
5176 
5177  if ((reason >= MPI_EVENT_RAID_RC_PHYSDISK_CREATED &&
5179  (reason == MPI_EVENT_RAID_RC_SMART_DATA)) {
5180  printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for PhysDisk %d id=%d\n",
5181  ioc->name, disk, volume);
5182  } else {
5183  printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for VolumeID %d\n",
5184  ioc->name, volume);
5185  }
5186 
5187  switch(reason) {
5189  printk(MYIOC_s_INFO_FMT " volume has been created\n",
5190  ioc->name);
5191  break;
5192 
5194 
5195  printk(MYIOC_s_INFO_FMT " volume has been deleted\n",
5196  ioc->name);
5197  break;
5198 
5200  printk(MYIOC_s_INFO_FMT " volume settings have been changed\n",
5201  ioc->name);
5202  break;
5203 
5205  printk(MYIOC_s_INFO_FMT " volume is now %s%s%s%s\n",
5206  ioc->name,
5208  ? "optimal"
5210  ? "degraded"
5212  ? "failed"
5213  : "state unknown",
5215  ? ", enabled" : "",
5217  ? ", quiesced" : "",
5219  ? ", resync in progress" : "" );
5220  break;
5221 
5223  printk(MYIOC_s_INFO_FMT " volume membership of PhysDisk %d has changed\n",
5224  ioc->name, disk);
5225  break;
5226 
5228  printk(MYIOC_s_INFO_FMT " PhysDisk has been created\n",
5229  ioc->name);
5230  break;
5231 
5233  printk(MYIOC_s_INFO_FMT " PhysDisk has been deleted\n",
5234  ioc->name);
5235  break;
5236 
5238  printk(MYIOC_s_INFO_FMT " PhysDisk settings have been changed\n",
5239  ioc->name);
5240  break;
5241 
5243  printk(MYIOC_s_INFO_FMT " PhysDisk is now %s%s%s\n",
5244  ioc->name,
5246  ? "online"
5247  : state == MPI_PHYSDISK0_STATUS_MISSING
5248  ? "missing"
5250  ? "not compatible"
5251  : state == MPI_PHYSDISK0_STATUS_FAILED
5252  ? "failed"
5254  ? "initializing"
5256  ? "offline requested"
5258  ? "failed requested"
5260  ? "offline"
5261  : "state unknown",
5263  ? ", out of sync" : "",
5265  ? ", quiesced" : "" );
5266  break;
5267 
5269  printk(MYIOC_s_INFO_FMT " Domain Validation needed for PhysDisk %d\n",
5270  ioc->name, disk);
5271  break;
5272 
5274  printk(MYIOC_s_INFO_FMT " SMART data received, ASC/ASCQ = %02xh/%02xh\n",
5275  ioc->name, pRaidEventData->ASC, pRaidEventData->ASCQ);
5276  break;
5277 
5279  printk(MYIOC_s_INFO_FMT " replacement of PhysDisk %d has started\n",
5280  ioc->name, disk);
5281  break;
5282  }
5283 }
5284 
5285 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5296 static int
5297 GetIoUnitPage2(MPT_ADAPTER *ioc)
5298 {
5300  CONFIGPARMS cfg;
5301  IOUnitPage2_t *ppage_alloc;
5302  dma_addr_t page_dma;
5303  int data_sz;
5304  int rc;
5305 
5306  /* Get the page header */
5307  hdr.PageVersion = 0;
5308  hdr.PageLength = 0;
5309  hdr.PageNumber = 2;
5311  cfg.cfghdr.hdr = &hdr;
5312  cfg.physAddr = -1;
5313  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5314  cfg.dir = 0;
5315  cfg.pageAddr = 0;
5316  cfg.timeout = 0;
5317 
5318  if ((rc = mpt_config(ioc, &cfg)) != 0)
5319  return rc;
5320 
5321  if (hdr.PageLength == 0)
5322  return 0;
5323 
5324  /* Read the config page */
5325  data_sz = hdr.PageLength * 4;
5326  rc = -ENOMEM;
5327  ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
5328  if (ppage_alloc) {
5329  memset((u8 *)ppage_alloc, 0, data_sz);
5330  cfg.physAddr = page_dma;
5332 
5333  /* If Good, save data */
5334  if ((rc = mpt_config(ioc, &cfg)) == 0)
5335  ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);
5336 
5337  pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma);
5338  }
5339 
5340  return rc;
5341 }
5342 
5343 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5364 static int
5365 mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
5366 {
5367  u8 *pbuf;
5368  dma_addr_t buf_dma;
5369  CONFIGPARMS cfg;
5371  int ii;
5372  int data, rc = 0;
5373 
5374  /* Allocate memory
5375  */
5376  if (!ioc->spi_data.nvram) {
5377  int sz;
5378  u8 *mem;
5379  sz = MPT_MAX_SCSI_DEVICES * sizeof(int);
5380  mem = kmalloc(sz, GFP_ATOMIC);
5381  if (mem == NULL)
5382  return -EFAULT;
5383 
5384  ioc->spi_data.nvram = (int *) mem;
5385 
5386  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SCSI device NVRAM settings @ %p, sz=%d\n",
5387  ioc->name, ioc->spi_data.nvram, sz));
5388  }
5389 
5390  /* Invalidate NVRAM information
5391  */
5392  for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
5393  ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID;
5394  }
5395 
5396  /* Read SPP0 header, allocate memory, then read page.
5397  */
5398  header.PageVersion = 0;
5399  header.PageLength = 0;
5400  header.PageNumber = 0;
5402  cfg.cfghdr.hdr = &header;
5403  cfg.physAddr = -1;
5404  cfg.pageAddr = portnum;
5405  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5406  cfg.dir = 0;
5407  cfg.timeout = 0; /* use default */
5408  if (mpt_config(ioc, &cfg) != 0)
5409  return -EFAULT;
5410 
5411  if (header.PageLength > 0) {
5412  pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
5413  if (pbuf) {
5415  cfg.physAddr = buf_dma;
5416  if (mpt_config(ioc, &cfg) != 0) {
5417  ioc->spi_data.maxBusWidth = MPT_NARROW;
5418  ioc->spi_data.maxSyncOffset = 0;
5419  ioc->spi_data.minSyncFactor = MPT_ASYNC;
5420  ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
5421  rc = 1;
5422  ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5423  "Unable to read PortPage0 minSyncFactor=%x\n",
5424  ioc->name, ioc->spi_data.minSyncFactor));
5425  } else {
5426  /* Save the Port Page 0 data
5427  */
5428  SCSIPortPage0_t *pPP0 = (SCSIPortPage0_t *) pbuf;
5429  pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities);
5431 
5432  if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
5433  ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
5434  ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5435  "noQas due to Capabilities=%x\n",
5436  ioc->name, pPP0->Capabilities));
5437  }
5438  ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
5440  if (data) {
5441  ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
5443  ioc->spi_data.minSyncFactor = (u8) (data >> 8);
5444  ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5445  "PortPage0 minSyncFactor=%x\n",
5446  ioc->name, ioc->spi_data.minSyncFactor));
5447  } else {
5448  ioc->spi_data.maxSyncOffset = 0;
5449  ioc->spi_data.minSyncFactor = MPT_ASYNC;
5450  }
5451 
5452  ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK;
5453 
5454  /* Update the minSyncFactor based on bus type.
5455  */
5456  if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
5457  (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) {
5458 
5459  if (ioc->spi_data.minSyncFactor < MPT_ULTRA) {
5460  ioc->spi_data.minSyncFactor = MPT_ULTRA;
5461  ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5462  "HVD or SE detected, minSyncFactor=%x\n",
5463  ioc->name, ioc->spi_data.minSyncFactor));
5464  }
5465  }
5466  }
5467  if (pbuf) {
5468  pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
5469  }
5470  }
5471  }
5472 
5473  /* SCSI Port Page 2 - Read the header then the page.
5474  */
5475  header.PageVersion = 0;
5476  header.PageLength = 0;
5477  header.PageNumber = 2;
5479  cfg.cfghdr.hdr = &header;
5480  cfg.physAddr = -1;
5481  cfg.pageAddr = portnum;
5482  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5483  cfg.dir = 0;
5484  if (mpt_config(ioc, &cfg) != 0)
5485  return -EFAULT;
5486 
5487  if (header.PageLength > 0) {
5488  /* Allocate memory and read SCSI Port Page 2
5489  */
5490  pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
5491  if (pbuf) {
5492  cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
5493  cfg.physAddr = buf_dma;
5494  if (mpt_config(ioc, &cfg) != 0) {
5495  /* Nvram data is left with INVALID mark
5496  */
5497  rc = 1;
5498  } else if (ioc->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
5499 
5500  /* This is an ATTO adapter, read Page2 accordingly
5501  */
5502  ATTO_SCSIPortPage2_t *pPP2 = (ATTO_SCSIPortPage2_t *) pbuf;
5503  ATTODeviceInfo_t *pdevice = NULL;
5504  u16 ATTOFlags;
5505 
5506  /* Save the Port Page 2 data
5507  * (reformat into a 32bit quantity)
5508  */
5509  for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
5510  pdevice = &pPP2->DeviceSettings[ii];
5511  ATTOFlags = le16_to_cpu(pdevice->ATTOFlags);
5512  data = 0;
5513 
5514  /* Translate ATTO device flags to LSI format
5515  */
5516  if (ATTOFlags & ATTOFLAG_DISC)
5518  if (ATTOFlags & ATTOFLAG_ID_ENB)
5520  if (ATTOFlags & ATTOFLAG_LUN_ENB)
5522  if (ATTOFlags & ATTOFLAG_TAGGED)
5524  if (!(ATTOFlags & ATTOFLAG_WIDE_ENB))
5526 
5527  data = (data << 16) | (pdevice->Period << 8) | 10;
5528  ioc->spi_data.nvram[ii] = data;
5529  }
5530  } else {
5531  SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t *) pbuf;
5532  MpiDeviceInfo_t *pdevice = NULL;
5533 
5534  /*
5535  * Save "Set to Avoid SCSI Bus Resets" flag
5536  */
5537  ioc->spi_data.bus_reset =
5538  (le32_to_cpu(pPP2->PortFlags) &
5540  0 : 1 ;
5541 
5542  /* Save the Port Page 2 data
5543  * (reformat into a 32bit quantity)
5544  */
5546  ioc->spi_data.PortFlags = data;
5547  for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
5548  pdevice = &pPP2->DeviceSettings[ii];
5549  data = (le16_to_cpu(pdevice->DeviceFlags) << 16) |
5550  (pdevice->SyncFactor << 8) | pdevice->Timeout;
5551  ioc->spi_data.nvram[ii] = data;
5552  }
5553  }
5554 
5555  pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
5556  }
5557  }
5558 
5559  /* Update Adapter limits with those from NVRAM
5560  * Comment: Don't need to do this. Target performance
5561  * parameters will never exceed the adapters limits.
5562  */
5563 
5564  return rc;
5565 }
5566 
5567 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5576 static int
5577 mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
5578 {
5579  CONFIGPARMS cfg;
5580  ConfigPageHeader_t header;
5581 
5582  /* Read the SCSI Device Page 1 header
5583  */
5584  header.PageVersion = 0;
5585  header.PageLength = 0;
5586  header.PageNumber = 1;
5588  cfg.cfghdr.hdr = &header;
5589  cfg.physAddr = -1;
5590  cfg.pageAddr = portnum;
5591  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5592  cfg.dir = 0;
5593  cfg.timeout = 0;
5594  if (mpt_config(ioc, &cfg) != 0)
5595  return -EFAULT;
5596 
5597  ioc->spi_data.sdp1version = cfg.cfghdr.hdr->PageVersion;
5598  ioc->spi_data.sdp1length = cfg.cfghdr.hdr->PageLength;
5599 
5600  header.PageVersion = 0;
5601  header.PageLength = 0;
5602  header.PageNumber = 0;
5604  if (mpt_config(ioc, &cfg) != 0)
5605  return -EFAULT;
5606 
5607  ioc->spi_data.sdp0version = cfg.cfghdr.hdr->PageVersion;
5608  ioc->spi_data.sdp0length = cfg.cfghdr.hdr->PageLength;
5609 
5610  dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 0: version %d length %d\n",
5611  ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
5612 
5613  dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 1: version %d length %d\n",
5614  ioc->name, ioc->spi_data.sdp1version, ioc->spi_data.sdp1length));
5615  return 0;
5616 }
5617 
5622 static void
5623 mpt_inactive_raid_list_free(MPT_ADAPTER *ioc)
5624 {
5625  struct inactive_raid_component_info *component_info, *pNext;
5626 
5627  if (list_empty(&ioc->raid_data.inactive_list))
5628  return;
5629 
5630  mutex_lock(&ioc->raid_data.inactive_list_mutex);
5631  list_for_each_entry_safe(component_info, pNext,
5632  &ioc->raid_data.inactive_list, list) {
5633  list_del(&component_info->list);
5634  kfree(component_info);
5635  }
5636  mutex_unlock(&ioc->raid_data.inactive_list_mutex);
5637 }
5638 
5646 static void
5647 mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
5648 {
5649  CONFIGPARMS cfg;
5652  pRaidVolumePage0_t buffer = NULL;
5653  int i;
5654  RaidPhysDiskPage0_t phys_disk;
5655  struct inactive_raid_component_info *component_info;
5656  int handle_inactive_volumes;
5657 
5658  memset(&cfg, 0 , sizeof(CONFIGPARMS));
5659  memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5661  cfg.pageAddr = (channel << 8) + id;
5662  cfg.cfghdr.hdr = &hdr;
5663  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5664 
5665  if (mpt_config(ioc, &cfg) != 0)
5666  goto out;
5667 
5668  if (!hdr.PageLength)
5669  goto out;
5670 
5671  buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5672  &dma_handle);
5673 
5674  if (!buffer)
5675  goto out;
5676 
5677  cfg.physAddr = dma_handle;
5679 
5680  if (mpt_config(ioc, &cfg) != 0)
5681  goto out;
5682 
5683  if (!buffer->NumPhysDisks)
5684  goto out;
5685 
5686  handle_inactive_volumes =
5691 
5692  if (!handle_inactive_volumes)
5693  goto out;
5694 
5695  mutex_lock(&ioc->raid_data.inactive_list_mutex);
5696  for (i = 0; i < buffer->NumPhysDisks; i++) {
5697  if(mpt_raid_phys_disk_pg0(ioc,
5698  buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
5699  continue;
5700 
5701  if ((component_info = kmalloc(sizeof (*component_info),
5702  GFP_KERNEL)) == NULL)
5703  continue;
5704 
5705  component_info->volumeID = id;
5706  component_info->volumeBus = channel;
5707  component_info->d.PhysDiskNum = phys_disk.PhysDiskNum;
5708  component_info->d.PhysDiskBus = phys_disk.PhysDiskBus;
5709  component_info->d.PhysDiskID = phys_disk.PhysDiskID;
5710  component_info->d.PhysDiskIOC = phys_disk.PhysDiskIOC;
5711 
5712  list_add_tail(&component_info->list,
5713  &ioc->raid_data.inactive_list);
5714  }
5715  mutex_unlock(&ioc->raid_data.inactive_list_mutex);
5716 
5717  out:
5718  if (buffer)
5719  pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5720  dma_handle);
5721 }
5722 
5734 int
5735 mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
5736  RaidPhysDiskPage0_t *phys_disk)
5737 {
5738  CONFIGPARMS cfg;
5741  pRaidPhysDiskPage0_t buffer = NULL;
5742  int rc;
5743 
5744  memset(&cfg, 0 , sizeof(CONFIGPARMS));
5745  memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5746  memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
5747 
5750  cfg.cfghdr.hdr = &hdr;
5751  cfg.physAddr = -1;
5752  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5753 
5754  if (mpt_config(ioc, &cfg) != 0) {
5755  rc = -EFAULT;
5756  goto out;
5757  }
5758 
5759  if (!hdr.PageLength) {
5760  rc = -EFAULT;
5761  goto out;
5762  }
5763 
5764  buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5765  &dma_handle);
5766 
5767  if (!buffer) {
5768  rc = -ENOMEM;
5769  goto out;
5770  }
5771 
5772  cfg.physAddr = dma_handle;
5774  cfg.pageAddr = phys_disk_num;
5775 
5776  if (mpt_config(ioc, &cfg) != 0) {
5777  rc = -EFAULT;
5778  goto out;
5779  }
5780 
5781  rc = 0;
5782  memcpy(phys_disk, buffer, sizeof(*buffer));
5783  phys_disk->MaxLBA = le32_to_cpu(buffer->MaxLBA);
5784 
5785  out:
5786 
5787  if (buffer)
5788  pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5789  dma_handle);
5790 
5791  return rc;
5792 }
5793 
5802 int
5803 mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
5804 {
5805  CONFIGPARMS cfg;
5808  pRaidPhysDiskPage1_t buffer = NULL;
5809  int rc;
5810 
5811  memset(&cfg, 0 , sizeof(CONFIGPARMS));
5812  memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5813 
5816  hdr.PageNumber = 1;
5817  cfg.cfghdr.hdr = &hdr;
5818  cfg.physAddr = -1;
5819  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5820 
5821  if (mpt_config(ioc, &cfg) != 0) {
5822  rc = 0;
5823  goto out;
5824  }
5825 
5826  if (!hdr.PageLength) {
5827  rc = 0;
5828  goto out;
5829  }
5830 
5831  buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5832  &dma_handle);
5833 
5834  if (!buffer) {
5835  rc = 0;
5836  goto out;
5837  }
5838 
5839  cfg.physAddr = dma_handle;
5841  cfg.pageAddr = phys_disk_num;
5842 
5843  if (mpt_config(ioc, &cfg) != 0) {
5844  rc = 0;
5845  goto out;
5846  }
5847 
5848  rc = buffer->NumPhysDiskPaths;
5849  out:
5850 
5851  if (buffer)
5852  pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5853  dma_handle);
5854 
5855  return rc;
5856 }
5858 
5870 int
5871 mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
5872  RaidPhysDiskPage1_t *phys_disk)
5873 {
5874  CONFIGPARMS cfg;
5877  pRaidPhysDiskPage1_t buffer = NULL;
5878  int rc;
5879  int i;
5880  __le64 sas_address;
5881 
5882  memset(&cfg, 0 , sizeof(CONFIGPARMS));
5883  memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5884  rc = 0;
5885 
5888  hdr.PageNumber = 1;
5889  cfg.cfghdr.hdr = &hdr;
5890  cfg.physAddr = -1;
5891  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5892 
5893  if (mpt_config(ioc, &cfg) != 0) {
5894  rc = -EFAULT;
5895  goto out;
5896  }
5897 
5898  if (!hdr.PageLength) {
5899  rc = -EFAULT;
5900  goto out;
5901  }
5902 
5903  buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5904  &dma_handle);
5905 
5906  if (!buffer) {
5907  rc = -ENOMEM;
5908  goto out;
5909  }
5910 
5911  cfg.physAddr = dma_handle;
5913  cfg.pageAddr = phys_disk_num;
5914 
5915  if (mpt_config(ioc, &cfg) != 0) {
5916  rc = -EFAULT;
5917  goto out;
5918  }
5919 
5920  phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
5921  phys_disk->PhysDiskNum = phys_disk_num;
5922  for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
5923  phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
5924  phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
5925  phys_disk->Path[i].OwnerIdentifier =
5926  buffer->Path[i].OwnerIdentifier;
5927  phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
5928  memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
5929  sas_address = le64_to_cpu(sas_address);
5930  memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
5931  memcpy(&sas_address,
5932  &buffer->Path[i].OwnerWWID, sizeof(__le64));
5933  sas_address = le64_to_cpu(sas_address);
5934  memcpy(&phys_disk->Path[i].OwnerWWID,
5935  &sas_address, sizeof(__le64));
5936  }
5937 
5938  out:
5939 
5940  if (buffer)
5941  pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5942  dma_handle);
5943 
5944  return rc;
5945 }
5947 
5948 
5958 int
5959 mpt_findImVolumes(MPT_ADAPTER *ioc)
5960 {
5961  IOCPage2_t *pIoc2;
5962  u8 *mem;
5963  dma_addr_t ioc2_dma;
5964  CONFIGPARMS cfg;
5965  ConfigPageHeader_t header;
5966  int rc = 0;
5967  int iocpage2sz;
5968  int i;
5969 
5970  if (!ioc->ir_firmware)
5971  return 0;
5972 
5973  /* Free the old page
5974  */
5975  kfree(ioc->raid_data.pIocPg2);
5976  ioc->raid_data.pIocPg2 = NULL;
5977  mpt_inactive_raid_list_free(ioc);
5978 
5979  /* Read IOCP2 header then the page.
5980  */
5981  header.PageVersion = 0;
5982  header.PageLength = 0;
5983  header.PageNumber = 2;
5985  cfg.cfghdr.hdr = &header;
5986  cfg.physAddr = -1;
5987  cfg.pageAddr = 0;
5988  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5989  cfg.dir = 0;
5990  cfg.timeout = 0;
5991  if (mpt_config(ioc, &cfg) != 0)
5992  return -EFAULT;
5993 
5994  if (header.PageLength == 0)
5995  return -EFAULT;
5996 
5997  iocpage2sz = header.PageLength * 4;
5998  pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma);
5999  if (!pIoc2)
6000  return -ENOMEM;
6001 
6003  cfg.physAddr = ioc2_dma;
6004  if (mpt_config(ioc, &cfg) != 0)
6005  goto out;
6006 
6007  mem = kmalloc(iocpage2sz, GFP_KERNEL);
6008  if (!mem) {
6009  rc = -ENOMEM;
6010  goto out;
6011  }
6012 
6013  memcpy(mem, (u8 *)pIoc2, iocpage2sz);
6014  ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
6015 
6016  mpt_read_ioc_pg_3(ioc);
6017 
6018  for (i = 0; i < pIoc2->NumActiveVolumes ; i++)
6019  mpt_inactive_raid_volumes(ioc,
6020  pIoc2->RaidVolume[i].VolumeBus,
6021  pIoc2->RaidVolume[i].VolumeID);
6022 
6023  out:
6024  pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma);
6025 
6026  return rc;
6027 }
6028 
6029 static int
6030 mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
6031 {
6032  IOCPage3_t *pIoc3;
6033  u8 *mem;
6034  CONFIGPARMS cfg;
6035  ConfigPageHeader_t header;
6036  dma_addr_t ioc3_dma;
6037  int iocpage3sz = 0;
6038 
6039  /* Free the old page
6040  */
6041  kfree(ioc->raid_data.pIocPg3);
6042  ioc->raid_data.pIocPg3 = NULL;
6043 
6044  /* There is at least one physical disk.
6045  * Read and save IOC Page 3
6046  */
6047  header.PageVersion = 0;
6048  header.PageLength = 0;
6049  header.PageNumber = 3;
6051  cfg.cfghdr.hdr = &header;
6052  cfg.physAddr = -1;
6053  cfg.pageAddr = 0;
6054  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
6055  cfg.dir = 0;
6056  cfg.timeout = 0;
6057  if (mpt_config(ioc, &cfg) != 0)
6058  return 0;
6059 
6060  if (header.PageLength == 0)
6061  return 0;
6062 
6063  /* Read Header good, alloc memory
6064  */
6065  iocpage3sz = header.PageLength * 4;
6066  pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma);
6067  if (!pIoc3)
6068  return 0;
6069 
6070  /* Read the Page and save the data
6071  * into malloc'd memory.
6072  */
6073  cfg.physAddr = ioc3_dma;
6075  if (mpt_config(ioc, &cfg) == 0) {
6076  mem = kmalloc(iocpage3sz, GFP_KERNEL);
6077  if (mem) {
6078  memcpy(mem, (u8 *)pIoc3, iocpage3sz);
6079  ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
6080  }
6081  }
6082 
6083  pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma);
6084 
6085  return 0;
6086 }
6087 
6088 static void
6089 mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
6090 {
6091  IOCPage4_t *pIoc4;
6092  CONFIGPARMS cfg;
6093  ConfigPageHeader_t header;
6094  dma_addr_t ioc4_dma;
6095  int iocpage4sz;
6096 
6097  /* Read and save IOC Page 4
6098  */
6099  header.PageVersion = 0;
6100  header.PageLength = 0;
6101  header.PageNumber = 4;
6103  cfg.cfghdr.hdr = &header;
6104  cfg.physAddr = -1;
6105  cfg.pageAddr = 0;
6106  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
6107  cfg.dir = 0;
6108  cfg.timeout = 0;
6109  if (mpt_config(ioc, &cfg) != 0)
6110  return;
6111 
6112  if (header.PageLength == 0)
6113  return;
6114 
6115  if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
6116  iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
6117  pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
6118  if (!pIoc4)
6119  return;
6120  ioc->alloc_total += iocpage4sz;
6121  } else {
6122  ioc4_dma = ioc->spi_data.IocPg4_dma;
6123  iocpage4sz = ioc->spi_data.IocPg4Sz;
6124  }
6125 
6126  /* Read the Page into dma memory.
6127  */
6128  cfg.physAddr = ioc4_dma;
6130  if (mpt_config(ioc, &cfg) == 0) {
6131  ioc->spi_data.pIocPg4 = (IOCPage4_t *) pIoc4;
6132  ioc->spi_data.IocPg4_dma = ioc4_dma;
6133  ioc->spi_data.IocPg4Sz = iocpage4sz;
6134  } else {
6135  pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
6136  ioc->spi_data.pIocPg4 = NULL;
6137  ioc->alloc_total -= iocpage4sz;
6138  }
6139 }
6140 
6141 static void
6142 mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
6143 {
6144  IOCPage1_t *pIoc1;
6145  CONFIGPARMS cfg;
6146  ConfigPageHeader_t header;
6147  dma_addr_t ioc1_dma;
6148  int iocpage1sz = 0;
6149  u32 tmp;
6150 
6151  /* Check the Coalescing Timeout in IOC Page 1
6152  */
6153  header.PageVersion = 0;
6154  header.PageLength = 0;
6155  header.PageNumber = 1;
6157  cfg.cfghdr.hdr = &header;
6158  cfg.physAddr = -1;
6159  cfg.pageAddr = 0;
6160  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
6161  cfg.dir = 0;
6162  cfg.timeout = 0;
6163  if (mpt_config(ioc, &cfg) != 0)
6164  return;
6165 
6166  if (header.PageLength == 0)
6167  return;
6168 
6169  /* Read Header good, alloc memory
6170  */
6171  iocpage1sz = header.PageLength * 4;
6172  pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma);
6173  if (!pIoc1)
6174  return;
6175 
6176  /* Read the Page and check coalescing timeout
6177  */
6178  cfg.physAddr = ioc1_dma;
6180  if (mpt_config(ioc, &cfg) == 0) {
6181 
6183  if (tmp == MPI_IOCPAGE1_REPLY_COALESCING) {
6184  tmp = le32_to_cpu(pIoc1->CoalescingTimeout);
6185 
6186  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Coalescing Enabled Timeout = %d\n",
6187  ioc->name, tmp));
6188 
6189  if (tmp > MPT_COALESCING_TIMEOUT) {
6191 
6192  /* Write NVRAM and current
6193  */
6194  cfg.dir = 1;
6196  if (mpt_config(ioc, &cfg) == 0) {
6197  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Reset Current Coalescing Timeout to = %d\n",
6198  ioc->name, MPT_COALESCING_TIMEOUT));
6199 
6201  if (mpt_config(ioc, &cfg) == 0) {
6202  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6203  "Reset NVRAM Coalescing Timeout to = %d\n",
6204  ioc->name, MPT_COALESCING_TIMEOUT));
6205  } else {
6206  dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6207  "Reset NVRAM Coalescing Timeout Failed\n",
6208  ioc->name));
6209  }
6210 
6211  } else {
6212  dprintk(ioc, printk(MYIOC_s_WARN_FMT
6213  "Reset of Current Coalescing Timeout Failed!\n",
6214  ioc->name));
6215  }
6216  }
6217 
6218  } else {
6219  dprintk(ioc, printk(MYIOC_s_WARN_FMT "Coalescing Disabled\n", ioc->name));
6220  }
6221  }
6222 
6223  pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma);
6224 
6225  return;
6226 }
6227 
6228 static void
6229 mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
6230 {
6231  CONFIGPARMS cfg;
6233  dma_addr_t buf_dma;
6234  ManufacturingPage0_t *pbuf = NULL;
6235 
6236  memset(&cfg, 0 , sizeof(CONFIGPARMS));
6237  memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
6238 
6240  cfg.cfghdr.hdr = &hdr;
6241  cfg.physAddr = -1;
6242  cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
6243  cfg.timeout = 10;
6244 
6245  if (mpt_config(ioc, &cfg) != 0)
6246  goto out;
6247 
6248  if (!cfg.cfghdr.hdr->PageLength)
6249  goto out;
6250 
6252  pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
6253  if (!pbuf)
6254  goto out;
6255 
6256  cfg.physAddr = buf_dma;
6257 
6258  if (mpt_config(ioc, &cfg) != 0)
6259  goto out;
6260 
6261  memcpy(ioc->board_name, pbuf->BoardName, sizeof(ioc->board_name));
6262  memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
6263  memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
6264 
6265  out:
6266 
6267  if (pbuf)
6268  pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
6269 }
6270 
6271 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6278 static int
6279 SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
6280 {
6281  EventNotification_t evn;
6283 
6284  memset(&evn, 0, sizeof(EventNotification_t));
6285  memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
6286 
6288  evn.Switch = EvSwitch;
6289  evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
6290 
6291  devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6292  "Sending EventNotification (%d) request %p\n",
6293  ioc->name, EvSwitch, &evn));
6294 
6295  return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
6296  (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
6297  sleepFlag);
6298 }
6299 
6300 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6306 static int
6307 SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
6308 {
6309  EventAck_t *pAck;
6310 
6311  if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
6312  dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
6313  ioc->name, __func__));
6314  return -1;
6315  }
6316 
6317  devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventAck\n", ioc->name));
6318 
6320  pAck->ChainOffset = 0;
6321  pAck->Reserved[0] = pAck->Reserved[1] = 0;
6322  pAck->MsgFlags = 0;
6323  pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0;
6324  pAck->Event = evnp->Event;
6325  pAck->EventContext = evnp->EventContext;
6326 
6327  mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)pAck);
6328 
6329  return 0;
6330 }
6331 
6332 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6346 int
6347 mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
6348 {
6349  Config_t *pReq;
6350  ConfigReply_t *pReply;
6351  ConfigExtendedPageHeader_t *pExtHdr = NULL;
6352  MPT_FRAME_HDR *mf;
6353  int ii;
6354  int flagsLength;
6355  long timeout;
6356  int ret;
6357  u8 page_type = 0, extend_page;
6358  unsigned long timeleft;
6359  unsigned long flags;
6360  int in_isr;
6361  u8 issue_hard_reset = 0;
6362  u8 retry_count = 0;
6363 
6364  /* Prevent calling wait_event() (below), if caller happens
6365  * to be in ISR context, because that is fatal!
6366  */
6367  in_isr = in_interrupt();
6368  if (in_isr) {
6369  dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
6370  ioc->name));
6371  return -EPERM;
6372  }
6373 
6374  /* don't send a config page during diag reset */
6375  spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6376  if (ioc->ioc_reset_in_progress) {
6377  dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6378  "%s: busy with host reset\n", ioc->name, __func__));
6379  spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6380  return -EBUSY;
6381  }
6382  spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6383 
6384  /* don't send if no chance of success */
6385  if (!ioc->active ||
6387  dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6388  "%s: ioc not operational, %d, %xh\n",
6389  ioc->name, __func__, ioc->active,
6390  mpt_GetIocState(ioc, 0)));
6391  return -EFAULT;
6392  }
6393 
6394  retry_config:
6395  mutex_lock(&ioc->mptbase_cmds.mutex);
6396  /* init the internal cmd struct */
6397  memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
6398  INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
6399 
6400  /* Get and Populate a free Frame
6401  */
6402  if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
6403  dcprintk(ioc, printk(MYIOC_s_WARN_FMT
6404  "mpt_config: no msg frames!\n", ioc->name));
6405  ret = -EAGAIN;
6406  goto out;
6407  }
6408 
6409  pReq = (Config_t *)mf;
6410  pReq->Action = pCfg->action;
6411  pReq->Reserved = 0;
6412  pReq->ChainOffset = 0;
6413  pReq->Function = MPI_FUNCTION_CONFIG;
6414 
6415  /* Assume page type is not extended and clear "reserved" fields. */
6416  pReq->ExtPageLength = 0;
6417  pReq->ExtPageType = 0;
6418  pReq->MsgFlags = 0;
6419 
6420  for (ii=0; ii < 8; ii++)
6421  pReq->Reserved2[ii] = 0;
6422 
6423  pReq->Header.PageVersion = pCfg->cfghdr.hdr->PageVersion;
6424  pReq->Header.PageLength = pCfg->cfghdr.hdr->PageLength;
6425  pReq->Header.PageNumber = pCfg->cfghdr.hdr->PageNumber;
6426  pReq->Header.PageType = (pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
6427 
6428  if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
6429  pExtHdr = (ConfigExtendedPageHeader_t *)pCfg->cfghdr.ehdr;
6430  pReq->ExtPageLength = cpu_to_le16(pExtHdr->ExtPageLength);
6431  pReq->ExtPageType = pExtHdr->ExtPageType;
6433 
6434  /* Page Length must be treated as a reserved field for the
6435  * extended header.
6436  */
6437  pReq->Header.PageLength = 0;
6438  }
6439 
6440  pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
6441 
6442  /* Add a SGE to the config request.
6443  */
6444  if (pCfg->dir)
6445  flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
6446  else
6447  flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
6448 
6449  if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
6451  flagsLength |= pExtHdr->ExtPageLength * 4;
6452  page_type = pReq->ExtPageType;
6453  extend_page = 1;
6454  } else {
6455  flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
6456  page_type = pReq->Header.PageType;
6457  extend_page = 0;
6458  }
6459 
6460  dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6461  "Sending Config request type 0x%x, page 0x%x and action %d\n",
6462  ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
6463 
6464  ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
6465  timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
6466  mpt_put_msg_frame(mpt_base_index, ioc, mf);
6467  timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
6468  timeout);
6469  if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
6470  ret = -ETIME;
6471  dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6472  "Failed Sending Config request type 0x%x, page 0x%x,"
6473  " action %d, status %xh, time left %ld\n\n",
6474  ioc->name, page_type, pReq->Header.PageNumber,
6475  pReq->Action, ioc->mptbase_cmds.status, timeleft));
6476  if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
6477  goto out;
6478  if (!timeleft) {
6479  spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6480  if (ioc->ioc_reset_in_progress) {
6481  spin_unlock_irqrestore(&ioc->taskmgmt_lock,
6482  flags);
6483  printk(MYIOC_s_INFO_FMT "%s: host reset in"
6484  " progress mpt_config timed out.!!\n",
6485  __func__, ioc->name);
6486  mutex_unlock(&ioc->mptbase_cmds.mutex);
6487  return -EFAULT;
6488  }
6489  spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6490  issue_hard_reset = 1;
6491  }
6492  goto out;
6493  }
6494 
6495  if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
6496  ret = -1;
6497  goto out;
6498  }
6499  pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
6500  ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
6501  if (ret == MPI_IOCSTATUS_SUCCESS) {
6502  if (extend_page) {
6503  pCfg->cfghdr.ehdr->ExtPageLength =
6504  le16_to_cpu(pReply->ExtPageLength);
6505  pCfg->cfghdr.ehdr->ExtPageType =
6506  pReply->ExtPageType;
6507  }
6508  pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
6509  pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
6510  pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
6511  pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
6512 
6513  }
6514 
6515  if (retry_count)
6516  printk(MYIOC_s_INFO_FMT "Retry completed "
6517  "ret=0x%x timeleft=%ld\n",
6518  ioc->name, ret, timeleft);
6519 
6520  dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
6521  ret, le32_to_cpu(pReply->IOCLogInfo)));
6522 
6523 out:
6524 
6525  CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
6526  mutex_unlock(&ioc->mptbase_cmds.mutex);
6527  if (issue_hard_reset) {
6528  issue_hard_reset = 0;
6529  printk(MYIOC_s_WARN_FMT
6530  "Issuing Reset from %s!!, doorbell=0x%08x\n",
6531  ioc->name, __func__, mpt_GetIocState(ioc, 0));
6532  if (retry_count == 0) {
6533  if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
6534  retry_count++;
6535  } else
6537 
6538  mpt_free_msg_frame(ioc, mf);
6539  /* attempt one retry for a timed out command */
6540  if (retry_count < 2) {
6541  printk(MYIOC_s_INFO_FMT
6542  "Attempting Retry Config request"
6543  " type 0x%x, page 0x%x,"
6544  " action %d\n", ioc->name, page_type,
6545  pCfg->cfghdr.hdr->PageNumber, pCfg->action);
6546  retry_count++;
6547  goto retry_config;
6548  }
6549  }
6550  return ret;
6551 
6552 }
6553 
6554 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6562 static int
6563 mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
6564 {
6565  switch (reset_phase) {
6566  case MPT_IOC_SETUP_RESET:
6567  ioc->taskmgmt_quiesce_io = 1;
6568  dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6569  "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
6570  break;
6571  case MPT_IOC_PRE_RESET:
6572  dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6573  "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
6574  break;
6575  case MPT_IOC_POST_RESET:
6576  dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6577  "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
6578 /* wake up mptbase_cmds */
6579  if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
6580  ioc->mptbase_cmds.status |=
6581  MPT_MGMT_STATUS_DID_IOCRESET;
6582  complete(&ioc->mptbase_cmds.done);
6583  }
6584 /* wake up taskmgmt_cmds */
6585  if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
6586  ioc->taskmgmt_cmds.status |=
6587  MPT_MGMT_STATUS_DID_IOCRESET;
6588  complete(&ioc->taskmgmt_cmds.done);
6589  }
6590  break;
6591  default:
6592  break;
6593  }
6594 
6595  return 1; /* currently means nothing really */
6596 }
6597 
6598 
6599 #ifdef CONFIG_PROC_FS /* { */
6600 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6601 /*
6602  * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
6603  */
6604 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6610 static int
6611 procmpt_create(void)
6612 {
6613  mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL);
6614  if (mpt_proc_root_dir == NULL)
6615  return -ENOTDIR;
6616 
6617  proc_create("summary", S_IRUGO, mpt_proc_root_dir, &mpt_summary_proc_fops);
6618  proc_create("version", S_IRUGO, mpt_proc_root_dir, &mpt_version_proc_fops);
6619  return 0;
6620 }
6621 
6622 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6628 static void
6629 procmpt_destroy(void)
6630 {
6631  remove_proc_entry("version", mpt_proc_root_dir);
6632  remove_proc_entry("summary", mpt_proc_root_dir);
6634 }
6635 
6636 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6637 /*
6638  * Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary.
6639  */
6640 static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan);
6641 
6642 static int mpt_summary_proc_show(struct seq_file *m, void *v)
6643 {
6644  MPT_ADAPTER *ioc = m->private;
6645 
6646  if (ioc) {
6647  seq_mpt_print_ioc_summary(ioc, m, 1);
6648  } else {
6649  list_for_each_entry(ioc, &ioc_list, list) {
6650  seq_mpt_print_ioc_summary(ioc, m, 1);
6651  }
6652  }
6653 
6654  return 0;
6655 }
6656 
6657 static int mpt_summary_proc_open(struct inode *inode, struct file *file)
6658 {
6659  return single_open(file, mpt_summary_proc_show, PDE(inode)->data);
6660 }
6661 
6662 static const struct file_operations mpt_summary_proc_fops = {
6663  .owner = THIS_MODULE,
6664  .open = mpt_summary_proc_open,
6665  .read = seq_read,
6666  .llseek =