Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
3w-9xxx.c
Go to the documentation of this file.
1 /*
2  3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3 
4  Written By: Adam Radford <[email protected]>
5  Modifications By: Tom Couch <[email protected]>
6 
7  Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8  Copyright (C) 2010 LSI Corporation.
9 
10  This program is free software; you can redistribute it and/or modify
11  it under the terms of the GNU General Public License as published by
12  the Free Software Foundation; version 2 of the License.
13 
14  This program is distributed in the hope that it will be useful,
15  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  GNU General Public License for more details.
18 
19  NO WARRANTY
20  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  solely responsible for determining the appropriateness of using and
25  distributing the Program and assumes all risks associated with its
26  exercise of rights under this Agreement, including but not limited to
27  the risks and costs of program errors, damage to or loss of data,
28  programs or equipment, and unavailability or interruption of operations.
29 
30  DISCLAIMER OF LIABILITY
31  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  You should have received a copy of the GNU General Public License
40  along with this program; if not, write to the Free Software
41  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 
43  Bugs/Comments/Suggestions should be mailed to:
45 
46  For more information, goto:
47  http://www.lsi.com
48 
49  Note: This version of the driver does not contain a bundled firmware
50  image.
51 
52  History
53  -------
54  2.26.02.000 - Driver cleanup for kernel submission.
55  2.26.02.001 - Replace schedule_timeout() calls with msleep().
56  2.26.02.002 - Add support for PAE mode.
57  Add lun support.
58  Fix twa_remove() to free irq handler/unregister_chrdev()
59  before shutting down card.
60  Change to new 'change_queue_depth' api.
61  Fix 'handled=1' ISR usage, remove bogus IRQ check.
62  Remove un-needed eh_abort handler.
63  Add support for embedded firmware error strings.
64  2.26.02.003 - Correctly handle single sgl's with use_sg=1.
65  2.26.02.004 - Add support for 9550SX controllers.
66  2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
67  2.26.02.006 - Fix 9550SX pchip reset timeout.
68  Add big endian support.
69  2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
70  2.26.02.008 - Free irq handler in __twa_shutdown().
71  Serialize reset code.
72  Add support for 9650SE controllers.
73  2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
74  2.26.02.010 - Add support for 9690SA controllers.
75  2.26.02.011 - Increase max AENs drained to 256.
76  Add MSI support and "use_msi" module parameter.
77  Fix bug in twa_get_param() on 4GB+.
78  Use pci_resource_len() for ioremap().
79  2.26.02.012 - Add power management support.
80  2.26.02.013 - Fix bug in twa_load_sgl().
81  2.26.02.014 - Force 60 second timeout default.
82 */
83 
84 #include <linux/module.h>
85 #include <linux/reboot.h>
86 #include <linux/spinlock.h>
87 #include <linux/interrupt.h>
88 #include <linux/moduleparam.h>
89 #include <linux/errno.h>
90 #include <linux/types.h>
91 #include <linux/delay.h>
92 #include <linux/pci.h>
93 #include <linux/time.h>
94 #include <linux/mutex.h>
95 #include <linux/slab.h>
96 #include <asm/io.h>
97 #include <asm/irq.h>
98 #include <asm/uaccess.h>
99 #include <scsi/scsi.h>
100 #include <scsi/scsi_host.h>
101 #include <scsi/scsi_tcq.h>
102 #include <scsi/scsi_cmnd.h>
103 #include "3w-9xxx.h"
104 
105 /* Globals */
106 #define TW_DRIVER_VERSION "2.26.02.014"
107 static DEFINE_MUTEX(twa_chrdev_mutex);
108 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
109 static unsigned int twa_device_extension_count;
110 static int twa_major = -1;
111 extern struct timezone sys_tz;
112 
113 /* Module parameters */
114 MODULE_AUTHOR ("LSI");
115 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
116 MODULE_LICENSE("GPL");
118 
119 static int use_msi = 0;
120 module_param(use_msi, int, S_IRUGO);
121 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
122 
123 /* Function prototypes */
124 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
125 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126 static char *twa_aen_severity_lookup(unsigned char severity_code);
127 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129 static int twa_chrdev_open(struct inode *inode, struct file *file);
130 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
132 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
133 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
134  u32 set_features, unsigned short current_fw_srl,
135  unsigned short current_fw_arch_id,
136  unsigned short current_fw_branch,
137  unsigned short current_fw_build,
138  unsigned short *fw_on_ctlr_srl,
139  unsigned short *fw_on_ctlr_arch_id,
140  unsigned short *fw_on_ctlr_branch,
141  unsigned short *fw_on_ctlr_build,
142  u32 *init_connect_result);
143 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
144 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
145 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
146 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
147 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
148 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
153 
154 /* Functions */
155 
156 /* Show some statistics about the card */
157 static ssize_t twa_show_stats(struct device *dev,
158  struct device_attribute *attr, char *buf)
159 {
160  struct Scsi_Host *host = class_to_shost(dev);
161  TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
162  unsigned long flags = 0;
163  ssize_t len;
164 
165  spin_lock_irqsave(tw_dev->host->host_lock, flags);
166  len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
167  "Current commands posted: %4d\n"
168  "Max commands posted: %4d\n"
169  "Current pending commands: %4d\n"
170  "Max pending commands: %4d\n"
171  "Last sgl length: %4d\n"
172  "Max sgl length: %4d\n"
173  "Last sector count: %4d\n"
174  "Max sector count: %4d\n"
175  "SCSI Host Resets: %4d\n"
176  "AEN's: %4d\n",
178  tw_dev->posted_request_count,
179  tw_dev->max_posted_request_count,
180  tw_dev->pending_request_count,
182  tw_dev->sgl_entries,
183  tw_dev->max_sgl_entries,
184  tw_dev->sector_count,
185  tw_dev->max_sector_count,
186  tw_dev->num_resets,
187  tw_dev->aen_count);
188  spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
189  return len;
190 } /* End twa_show_stats() */
191 
192 /* This function will set a devices queue depth */
193 static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
194  int reason)
195 {
196  if (reason != SCSI_QDEPTH_DEFAULT)
197  return -EOPNOTSUPP;
198 
199  if (queue_depth > TW_Q_LENGTH-2)
200  queue_depth = TW_Q_LENGTH-2;
201  scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
202  return queue_depth;
203 } /* End twa_change_queue_depth() */
204 
205 /* Create sysfs 'stats' entry */
206 static struct device_attribute twa_host_stats_attr = {
207  .attr = {
208  .name = "stats",
209  .mode = S_IRUGO,
210  },
211  .show = twa_show_stats
212 };
213 
214 /* Host attributes initializer */
215 static struct device_attribute *twa_host_attrs[] = {
216  &twa_host_stats_attr,
217  NULL,
218 };
219 
220 /* File operations struct for character device */
221 static const struct file_operations twa_fops = {
222  .owner = THIS_MODULE,
223  .unlocked_ioctl = twa_chrdev_ioctl,
224  .open = twa_chrdev_open,
225  .release = NULL,
226  .llseek = noop_llseek,
227 };
228 
229 /* This function will complete an aen request from the isr */
230 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
231 {
232  TW_Command_Full *full_command_packet;
233  TW_Command *command_packet;
235  unsigned short aen;
236  int retval = 1;
237 
238  header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
239  tw_dev->posted_request_count--;
240  aen = le16_to_cpu(header->status_block.error);
241  full_command_packet = tw_dev->command_packet_virt[request_id];
242  command_packet = &full_command_packet->command.oldcommand;
243 
244  /* First check for internal completion of set param for time sync */
245  if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
246  /* Keep reading the queue in case there are more aen's */
247  if (twa_aen_read_queue(tw_dev, request_id))
248  goto out2;
249  else {
250  retval = 0;
251  goto out;
252  }
253  }
254 
255  switch (aen) {
256  case TW_AEN_QUEUE_EMPTY:
257  /* Quit reading the queue if this is the last one */
258  break;
260  twa_aen_sync_time(tw_dev, request_id);
261  retval = 0;
262  goto out;
263  default:
264  twa_aen_queue_event(tw_dev, header);
265 
266  /* If there are more aen's, keep reading the queue */
267  if (twa_aen_read_queue(tw_dev, request_id))
268  goto out2;
269  else {
270  retval = 0;
271  goto out;
272  }
273  }
274  retval = 0;
275 out2:
276  tw_dev->state[request_id] = TW_S_COMPLETED;
277  twa_free_request_id(tw_dev, request_id);
279 out:
280  return retval;
281 } /* End twa_aen_complete() */
282 
283 /* This function will drain aen queue */
284 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
285 {
286  int request_id = 0;
287  char cdb[TW_MAX_CDB_LEN];
288  TW_SG_Entry sglist[1];
289  int finished = 0, count = 0;
290  TW_Command_Full *full_command_packet;
291  TW_Command_Apache_Header *header;
292  unsigned short aen;
293  int first_reset = 0, queue = 0, retval = 1;
294 
295  if (no_check_reset)
296  first_reset = 0;
297  else
298  first_reset = 1;
299 
300  full_command_packet = tw_dev->command_packet_virt[request_id];
301  memset(full_command_packet, 0, sizeof(TW_Command_Full));
302 
303  /* Initialize cdb */
304  memset(&cdb, 0, TW_MAX_CDB_LEN);
305  cdb[0] = REQUEST_SENSE; /* opcode */
306  cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
307 
308  /* Initialize sglist */
309  memset(&sglist, 0, sizeof(TW_SG_Entry));
310  sglist[0].length = TW_SECTOR_SIZE;
311  sglist[0].address = tw_dev->generic_buffer_phys[request_id];
312 
313  if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
314  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
315  goto out;
316  }
317 
318  /* Mark internal command */
319  tw_dev->srb[request_id] = NULL;
320 
321  do {
322  /* Send command to the board */
323  if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
324  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
325  goto out;
326  }
327 
328  /* Now poll for completion */
329  if (twa_poll_response(tw_dev, request_id, 30)) {
330  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
331  tw_dev->posted_request_count--;
332  goto out;
333  }
334 
335  tw_dev->posted_request_count--;
336  header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
337  aen = le16_to_cpu(header->status_block.error);
338  queue = 0;
339  count++;
340 
341  switch (aen) {
342  case TW_AEN_QUEUE_EMPTY:
343  if (first_reset != 1)
344  goto out;
345  else
346  finished = 1;
347  break;
348  case TW_AEN_SOFT_RESET:
349  if (first_reset == 0)
350  first_reset = 1;
351  else
352  queue = 1;
353  break;
355  break;
356  default:
357  queue = 1;
358  }
359 
360  /* Now queue an event info */
361  if (queue)
362  twa_aen_queue_event(tw_dev, header);
363  } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
364 
365  if (count == TW_MAX_AEN_DRAIN)
366  goto out;
367 
368  retval = 0;
369 out:
370  tw_dev->state[request_id] = TW_S_INITIAL;
371  return retval;
372 } /* End twa_aen_drain_queue() */
373 
374 /* This function will queue an event */
375 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
376 {
377  u32 local_time;
378  struct timeval time;
379  TW_Event *event;
380  unsigned short aen;
381  char host[16];
382  char *error_str;
383 
384  tw_dev->aen_count++;
385 
386  /* Fill out event info */
387  event = tw_dev->event_queue[tw_dev->error_index];
388 
389  /* Check for clobber */
390  host[0] = '\0';
391  if (tw_dev->host) {
392  sprintf(host, " scsi%d:", tw_dev->host->host_no);
393  if (event->retrieved == TW_AEN_NOT_RETRIEVED)
394  tw_dev->aen_clobber = 1;
395  }
396 
397  aen = le16_to_cpu(header->status_block.error);
398  memset(event, 0, sizeof(TW_Event));
399 
400  event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
402  local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
403  event->time_stamp_sec = local_time;
404  event->aen_code = aen;
405  event->retrieved = TW_AEN_NOT_RETRIEVED;
406  event->sequence_id = tw_dev->error_sequence_id;
407  tw_dev->error_sequence_id++;
408 
409  /* Check for embedded error string */
410  error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
411 
412  header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
413  event->parameter_len = strlen(header->err_specific_desc);
414  memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
415  if (event->severity != TW_AEN_SEVERITY_DEBUG)
416  printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
417  host,
418  twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
420  error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
421  header->err_specific_desc);
422  else
423  tw_dev->aen_count--;
424 
425  if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
426  tw_dev->event_queue_wrapped = 1;
427  tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
428 } /* End twa_aen_queue_event() */
429 
430 /* This function will read the aen queue from the isr */
431 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
432 {
433  char cdb[TW_MAX_CDB_LEN];
434  TW_SG_Entry sglist[1];
435  TW_Command_Full *full_command_packet;
436  int retval = 1;
437 
438  full_command_packet = tw_dev->command_packet_virt[request_id];
439  memset(full_command_packet, 0, sizeof(TW_Command_Full));
440 
441  /* Initialize cdb */
442  memset(&cdb, 0, TW_MAX_CDB_LEN);
443  cdb[0] = REQUEST_SENSE; /* opcode */
444  cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
445 
446  /* Initialize sglist */
447  memset(&sglist, 0, sizeof(TW_SG_Entry));
448  sglist[0].length = TW_SECTOR_SIZE;
449  sglist[0].address = tw_dev->generic_buffer_phys[request_id];
450 
451  /* Mark internal command */
452  tw_dev->srb[request_id] = NULL;
453 
454  /* Now post the command packet */
455  if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
456  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
457  goto out;
458  }
459  retval = 0;
460 out:
461  return retval;
462 } /* End twa_aen_read_queue() */
463 
464 /* This function will look up an AEN severity string */
465 static char *twa_aen_severity_lookup(unsigned char severity_code)
466 {
467  char *retval = NULL;
468 
469  if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
470  (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
471  goto out;
472 
473  retval = twa_aen_severity_table[severity_code];
474 out:
475  return retval;
476 } /* End twa_aen_severity_lookup() */
477 
478 /* This function will sync firmware time with the host time */
479 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
480 {
481  u32 schedulertime;
482  struct timeval utc;
483  TW_Command_Full *full_command_packet;
484  TW_Command *command_packet;
486  u32 local_time;
487 
488  /* Fill out the command packet */
489  full_command_packet = tw_dev->command_packet_virt[request_id];
490  memset(full_command_packet, 0, sizeof(TW_Command_Full));
491  command_packet = &full_command_packet->command.oldcommand;
492  command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
493  command_packet->request_id = request_id;
494  command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
495  command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
496  command_packet->size = TW_COMMAND_SIZE;
497  command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
498 
499  /* Setup the param */
500  param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
501  memset(param, 0, TW_SECTOR_SIZE);
502  param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
503  param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
504  param->parameter_size_bytes = cpu_to_le16(4);
505 
506  /* Convert system time in UTC to local time seconds since last
507  Sunday 12:00AM */
508  do_gettimeofday(&utc);
509  local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
510  schedulertime = local_time - (3 * 86400);
511  schedulertime = cpu_to_le32(schedulertime % 604800);
512 
513  memcpy(param->data, &schedulertime, sizeof(u32));
514 
515  /* Mark internal command */
516  tw_dev->srb[request_id] = NULL;
517 
518  /* Now post the command */
519  twa_post_command_packet(tw_dev, request_id, 1);
520 } /* End twa_aen_sync_time() */
521 
522 /* This function will allocate memory and check if it is correctly aligned */
523 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
524 {
525  int i;
527  unsigned long *cpu_addr;
528  int retval = 1;
529 
530  cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
531  if (!cpu_addr) {
532  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
533  goto out;
534  }
535 
536  if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
537  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
538  pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
539  goto out;
540  }
541 
542  memset(cpu_addr, 0, size*TW_Q_LENGTH);
543 
544  for (i = 0; i < TW_Q_LENGTH; i++) {
545  switch(which) {
546  case 0:
547  tw_dev->command_packet_phys[i] = dma_handle+(i*size);
548  tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
549  break;
550  case 1:
551  tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
552  tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
553  break;
554  }
555  }
556  retval = 0;
557 out:
558  return retval;
559 } /* End twa_allocate_memory() */
560 
561 /* This function will check the status register for unexpected bits */
562 static int twa_check_bits(u32 status_reg_value)
563 {
564  int retval = 1;
565 
566  if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
567  goto out;
568  if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
569  goto out;
570 
571  retval = 0;
572 out:
573  return retval;
574 } /* End twa_check_bits() */
575 
576 /* This function will check the srl and decide if we are compatible */
577 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
578 {
579  int retval = 1;
580  unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
581  unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
582  u32 init_connect_result = 0;
583 
584  if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
587  TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
588  &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
589  &fw_on_ctlr_build, &init_connect_result)) {
590  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
591  goto out;
592  }
593 
594  tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
595  tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
596  tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
597 
598  /* Try base mode compatibility */
599  if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
600  if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
604  &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
605  &fw_on_ctlr_branch, &fw_on_ctlr_build,
606  &init_connect_result)) {
607  TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
608  goto out;
609  }
610  if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
611  if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
612  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
613  } else {
614  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
615  }
616  goto out;
617  }
621  }
622 
623  /* Load rest of compatibility struct */
631  tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
632  tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
633  tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
634 
635  retval = 0;
636 out:
637  return retval;
638 } /* End twa_check_srl() */
639 
640 /* This function handles ioctl for the character device */
641 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
642 {
643  struct inode *inode = file->f_path.dentry->d_inode;
644  long timeout;
645  unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
647  int request_id = 0;
648  unsigned int sequence_id = 0;
649  unsigned char event_index, start_index;
650  TW_Ioctl_Driver_Command driver_command;
651  TW_Ioctl_Buf_Apache *tw_ioctl;
652  TW_Lock *tw_lock;
653  TW_Command_Full *full_command_packet;
654  TW_Compatibility_Info *tw_compat_info;
655  TW_Event *event;
656  struct timeval current_time;
657  u32 current_time_ms;
658  TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
659  int retval = TW_IOCTL_ERROR_OS_EFAULT;
660  void __user *argp = (void __user *)arg;
661 
662  mutex_lock(&twa_chrdev_mutex);
663 
664  /* Only let one of these through at a time */
665  if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
666  retval = TW_IOCTL_ERROR_OS_EINTR;
667  goto out;
668  }
669 
670  /* First copy down the driver command */
671  if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
672  goto out2;
673 
674  /* Check data buffer size */
675  if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
676  retval = TW_IOCTL_ERROR_OS_EINVAL;
677  goto out2;
678  }
679 
680  /* Hardware can only do multiple of 512 byte transfers */
681  data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
682 
683  /* Now allocate ioctl buf memory */
684  cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
685  if (!cpu_addr) {
686  retval = TW_IOCTL_ERROR_OS_ENOMEM;
687  goto out2;
688  }
689 
690  tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
691 
692  /* Now copy down the entire ioctl */
693  if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
694  goto out3;
695 
696  /* See which ioctl we are doing */
697  switch (cmd) {
699  spin_lock_irqsave(tw_dev->host->host_lock, flags);
700  twa_get_request_id(tw_dev, &request_id);
701 
702  /* Flag internal command */
703  tw_dev->srb[request_id] = NULL;
704 
705  /* Flag chrdev ioctl */
706  tw_dev->chrdev_request_id = request_id;
707 
708  full_command_packet = &tw_ioctl->firmware_command;
709 
710  /* Load request id and sglist for both command types */
711  twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
712 
713  memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
714 
715  /* Now post the command packet to the controller */
716  twa_post_command_packet(tw_dev, request_id, 1);
717  spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
718 
719  timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
720 
721  /* Now wait for command to complete */
722  timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
723 
724  /* We timed out, and didn't get an interrupt */
725  if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
726  /* Now we need to reset the board */
727  printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
728  tw_dev->host->host_no, TW_DRIVER, 0x37,
729  cmd);
730  retval = TW_IOCTL_ERROR_OS_EIO;
731  twa_reset_device_extension(tw_dev);
732  goto out3;
733  }
734 
735  /* Now copy in the command packet response */
736  memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
737 
738  /* Now complete the io */
739  spin_lock_irqsave(tw_dev->host->host_lock, flags);
740  tw_dev->posted_request_count--;
741  tw_dev->state[request_id] = TW_S_COMPLETED;
742  twa_free_request_id(tw_dev, request_id);
743  spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
744  break;
746  tw_ioctl->driver_command.status = 0;
747  /* Copy compatibility struct into ioctl data buffer */
748  tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
749  memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
750  break;
752  if (tw_dev->event_queue_wrapped) {
753  if (tw_dev->aen_clobber) {
755  tw_dev->aen_clobber = 0;
756  } else
757  tw_ioctl->driver_command.status = 0;
758  } else {
759  if (!tw_dev->error_index) {
761  break;
762  }
763  tw_ioctl->driver_command.status = 0;
764  }
765  event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
766  memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
767  tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
768  break;
770  if (tw_dev->event_queue_wrapped) {
771  if (tw_dev->aen_clobber) {
773  tw_dev->aen_clobber = 0;
774  } else
775  tw_ioctl->driver_command.status = 0;
776  event_index = tw_dev->error_index;
777  } else {
778  if (!tw_dev->error_index) {
780  break;
781  }
782  tw_ioctl->driver_command.status = 0;
783  event_index = 0;
784  }
785  memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
786  tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
787  break;
789  event = (TW_Event *)tw_ioctl->data_buffer;
790  sequence_id = event->sequence_id;
791  tw_ioctl->driver_command.status = 0;
792 
793  if (tw_dev->event_queue_wrapped) {
794  if (tw_dev->aen_clobber) {
796  tw_dev->aen_clobber = 0;
797  }
798  start_index = tw_dev->error_index;
799  } else {
800  if (!tw_dev->error_index) {
802  break;
803  }
804  start_index = 0;
805  }
806  event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
807 
808  if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
810  tw_dev->aen_clobber = 1;
812  break;
813  }
814  memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
815  tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
816  break;
818  event = (TW_Event *)tw_ioctl->data_buffer;
819  sequence_id = event->sequence_id;
820  tw_ioctl->driver_command.status = 0;
821 
822  if (tw_dev->event_queue_wrapped) {
823  if (tw_dev->aen_clobber) {
825  tw_dev->aen_clobber = 0;
826  }
827  start_index = tw_dev->error_index;
828  } else {
829  if (!tw_dev->error_index) {
831  break;
832  }
833  start_index = 0;
834  }
835  event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
836 
837  if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
839  tw_dev->aen_clobber = 1;
841  break;
842  }
843  memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
844  tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
845  break;
846  case TW_IOCTL_GET_LOCK:
847  tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
848  do_gettimeofday(&current_time);
849  current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
850 
851  if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
852  tw_dev->ioctl_sem_lock = 1;
853  tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
854  tw_ioctl->driver_command.status = 0;
855  tw_lock->time_remaining_msec = tw_lock->timeout_msec;
856  } else {
858  tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
859  }
860  break;
862  if (tw_dev->ioctl_sem_lock == 1) {
863  tw_dev->ioctl_sem_lock = 0;
864  tw_ioctl->driver_command.status = 0;
865  } else {
867  }
868  break;
869  default:
870  retval = TW_IOCTL_ERROR_OS_ENOTTY;
871  goto out3;
872  }
873 
874  /* Now copy the entire response to userspace */
875  if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
876  retval = 0;
877 out3:
878  /* Now free ioctl buf memory */
879  dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
880 out2:
881  mutex_unlock(&tw_dev->ioctl_lock);
882 out:
883  mutex_unlock(&twa_chrdev_mutex);
884  return retval;
885 } /* End twa_chrdev_ioctl() */
886 
887 /* This function handles open for the character device */
888 /* NOTE that this function will race with remove. */
889 static int twa_chrdev_open(struct inode *inode, struct file *file)
890 {
891  unsigned int minor_number;
892  int retval = TW_IOCTL_ERROR_OS_ENODEV;
893 
894  minor_number = iminor(inode);
895  if (minor_number >= twa_device_extension_count)
896  goto out;
897  retval = 0;
898 out:
899  return retval;
900 } /* End twa_chrdev_open() */
901 
902 /* This function will print readable messages from status register errors */
903 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
904 {
905  int retval = 1;
906 
907  /* Check for various error conditions and handle them appropriately */
908  if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
909  TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
911  }
912 
913  if (status_reg_value & TW_STATUS_PCI_ABORT) {
914  TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
916  pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
917  }
918 
919  if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
920  if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
921  (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
922  (!test_bit(TW_IN_RESET, &tw_dev->flags)))
923  TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
925  }
926 
927  if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
928  if (tw_dev->reset_print == 0) {
929  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
930  tw_dev->reset_print = 1;
931  }
932  goto out;
933  }
934  retval = 0;
935 out:
936  return retval;
937 } /* End twa_decode_bits() */
938 
939 /* This function will empty the response queue */
940 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
941 {
942  u32 status_reg_value, response_que_value;
943  int count = 0, retval = 1;
944 
945  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
946 
947  while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
948  response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
949  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
950  count++;
951  }
952  if (count == TW_MAX_RESPONSE_DRAIN)
953  goto out;
954 
955  retval = 0;
956 out:
957  return retval;
958 } /* End twa_empty_response_queue() */
959 
960 /* This function will clear the pchip/response queue on 9550SX */
961 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
962 {
963  u32 response_que_value = 0;
964  unsigned long before;
965  int retval = 1;
966 
967  if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
968  before = jiffies;
969  while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
970  response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
971  msleep(1);
972  if (time_after(jiffies, before + HZ * 30))
973  goto out;
974  }
975  /* P-chip settle time */
976  msleep(500);
977  retval = 0;
978  } else
979  retval = 0;
980 out:
981  return retval;
982 } /* End twa_empty_response_queue_large() */
983 
984 /* This function passes sense keys from firmware to scsi layer */
985 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
986 {
987  TW_Command_Full *full_command_packet;
988  unsigned short error;
989  int retval = 1;
990  char *error_str;
991 
992  full_command_packet = tw_dev->command_packet_virt[request_id];
993 
994  /* Check for embedded error string */
995  error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
996 
997  /* Don't print error for Logical unit not supported during rollcall */
998  error = le16_to_cpu(full_command_packet->header.status_block.error);
999  if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1000  if (print_host)
1001  printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1002  tw_dev->host->host_no,
1004  full_command_packet->header.status_block.error,
1005  error_str[0] == '\0' ?
1006  twa_string_lookup(twa_error_table,
1007  full_command_packet->header.status_block.error) : error_str,
1008  full_command_packet->header.err_specific_desc);
1009  else
1010  printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1012  full_command_packet->header.status_block.error,
1013  error_str[0] == '\0' ?
1014  twa_string_lookup(twa_error_table,
1015  full_command_packet->header.status_block.error) : error_str,
1016  full_command_packet->header.err_specific_desc);
1017  }
1018 
1019  if (copy_sense) {
1020  memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1021  tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1022  retval = TW_ISR_DONT_RESULT;
1023  goto out;
1024  }
1025  retval = 0;
1026 out:
1027  return retval;
1028 } /* End twa_fill_sense() */
1029 
1030 /* This function will free up device extension resources */
1031 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1032 {
1033  if (tw_dev->command_packet_virt[0])
1035  sizeof(TW_Command_Full)*TW_Q_LENGTH,
1036  tw_dev->command_packet_virt[0],
1037  tw_dev->command_packet_phys[0]);
1038 
1039  if (tw_dev->generic_buffer_virt[0])
1041  TW_SECTOR_SIZE*TW_Q_LENGTH,
1042  tw_dev->generic_buffer_virt[0],
1043  tw_dev->generic_buffer_phys[0]);
1044 
1045  kfree(tw_dev->event_queue[0]);
1046 } /* End twa_free_device_extension() */
1047 
1048 /* This function will free a request id */
1049 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1050 {
1051  tw_dev->free_queue[tw_dev->free_tail] = request_id;
1052  tw_dev->state[request_id] = TW_S_FINISHED;
1053  tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1054 } /* End twa_free_request_id() */
1055 
1056 /* This function will get parameter table entries from the firmware */
1057 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1058 {
1059  TW_Command_Full *full_command_packet;
1060  TW_Command *command_packet;
1062  void *retval = NULL;
1063 
1064  /* Setup the command packet */
1065  full_command_packet = tw_dev->command_packet_virt[request_id];
1066  memset(full_command_packet, 0, sizeof(TW_Command_Full));
1067  command_packet = &full_command_packet->command.oldcommand;
1068 
1069  command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1070  command_packet->size = TW_COMMAND_SIZE;
1071  command_packet->request_id = request_id;
1072  command_packet->byte6_offset.block_count = cpu_to_le16(1);
1073 
1074  /* Now setup the param */
1075  param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1076  memset(param, 0, TW_SECTOR_SIZE);
1077  param->table_id = cpu_to_le16(table_id | 0x8000);
1078  param->parameter_id = cpu_to_le16(parameter_id);
1079  param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1080 
1081  command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1082  command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1083 
1084  /* Post the command packet to the board */
1085  twa_post_command_packet(tw_dev, request_id, 1);
1086 
1087  /* Poll for completion */
1088  if (twa_poll_response(tw_dev, request_id, 30))
1089  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1090  else
1091  retval = (void *)&(param->data[0]);
1092 
1093  tw_dev->posted_request_count--;
1094  tw_dev->state[request_id] = TW_S_INITIAL;
1095 
1096  return retval;
1097 } /* End twa_get_param() */
1098 
1099 /* This function will assign an available request id */
1100 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1101 {
1102  *request_id = tw_dev->free_queue[tw_dev->free_head];
1103  tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1104  tw_dev->state[*request_id] = TW_S_STARTED;
1105 } /* End twa_get_request_id() */
1106 
1107 /* This function will send an initconnection command to controller */
1108 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1109  u32 set_features, unsigned short current_fw_srl,
1110  unsigned short current_fw_arch_id,
1111  unsigned short current_fw_branch,
1112  unsigned short current_fw_build,
1113  unsigned short *fw_on_ctlr_srl,
1114  unsigned short *fw_on_ctlr_arch_id,
1115  unsigned short *fw_on_ctlr_branch,
1116  unsigned short *fw_on_ctlr_build,
1117  u32 *init_connect_result)
1118 {
1119  TW_Command_Full *full_command_packet;
1120  TW_Initconnect *tw_initconnect;
1121  int request_id = 0, retval = 1;
1122 
1123  /* Initialize InitConnection command packet */
1124  full_command_packet = tw_dev->command_packet_virt[request_id];
1125  memset(full_command_packet, 0, sizeof(TW_Command_Full));
1126  full_command_packet->header.header_desc.size_header = 128;
1127 
1128  tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1129  tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1130  tw_initconnect->request_id = request_id;
1131  tw_initconnect->message_credits = cpu_to_le16(message_credits);
1132  tw_initconnect->features = set_features;
1133 
1134  /* Turn on 64-bit sgl support if we need to */
1135  tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1136 
1137  tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1138 
1139  if (set_features & TW_EXTENDED_INIT_CONNECT) {
1140  tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1141  tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1142  tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1143  tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1144  tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1145  } else
1146  tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1147 
1148  /* Send command packet to the board */
1149  twa_post_command_packet(tw_dev, request_id, 1);
1150 
1151  /* Poll for completion */
1152  if (twa_poll_response(tw_dev, request_id, 30)) {
1153  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1154  } else {
1155  if (set_features & TW_EXTENDED_INIT_CONNECT) {
1156  *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1157  *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1158  *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1159  *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1160  *init_connect_result = le32_to_cpu(tw_initconnect->result);
1161  }
1162  retval = 0;
1163  }
1164 
1165  tw_dev->posted_request_count--;
1166  tw_dev->state[request_id] = TW_S_INITIAL;
1167 
1168  return retval;
1169 } /* End twa_initconnection() */
1170 
1171 /* This function will initialize the fields of a device extension */
1172 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1173 {
1174  int i, retval = 1;
1175 
1176  /* Initialize command packet buffers */
1177  if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1178  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1179  goto out;
1180  }
1181 
1182  /* Initialize generic buffer */
1183  if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1184  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1185  goto out;
1186  }
1187 
1188  /* Allocate event info space */
1189  tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1190  if (!tw_dev->event_queue[0]) {
1191  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1192  goto out;
1193  }
1194 
1195 
1196  for (i = 0; i < TW_Q_LENGTH; i++) {
1197  tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1198  tw_dev->free_queue[i] = i;
1199  tw_dev->state[i] = TW_S_INITIAL;
1200  }
1201 
1202  tw_dev->pending_head = TW_Q_START;
1203  tw_dev->pending_tail = TW_Q_START;
1204  tw_dev->free_head = TW_Q_START;
1205  tw_dev->free_tail = TW_Q_START;
1206  tw_dev->error_sequence_id = 1;
1208 
1209  mutex_init(&tw_dev->ioctl_lock);
1211 
1212  retval = 0;
1213 out:
1214  return retval;
1215 } /* End twa_initialize_device_extension() */
1216 
1217 /* This function is the interrupt service routine */
1218 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1219 {
1220  int request_id, error = 0;
1221  u32 status_reg_value;
1222  TW_Response_Queue response_que;
1223  TW_Command_Full *full_command_packet;
1224  TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1225  int handled = 0;
1226 
1227  /* Get the per adapter lock */
1228  spin_lock(tw_dev->host->host_lock);
1229 
1230  /* Read the registers */
1231  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1232 
1233  /* Check if this is our interrupt, otherwise bail */
1234  if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1235  goto twa_interrupt_bail;
1236 
1237  handled = 1;
1238 
1239  /* If we are resetting, bail */
1240  if (test_bit(TW_IN_RESET, &tw_dev->flags))
1241  goto twa_interrupt_bail;
1242 
1243  /* Check controller for errors */
1244  if (twa_check_bits(status_reg_value)) {
1245  if (twa_decode_bits(tw_dev, status_reg_value)) {
1246  TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1247  goto twa_interrupt_bail;
1248  }
1249  }
1250 
1251  /* Handle host interrupt */
1252  if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1253  TW_CLEAR_HOST_INTERRUPT(tw_dev);
1254 
1255  /* Handle attention interrupt */
1256  if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1258  if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1259  twa_get_request_id(tw_dev, &request_id);
1260 
1261  error = twa_aen_read_queue(tw_dev, request_id);
1262  if (error) {
1263  tw_dev->state[request_id] = TW_S_COMPLETED;
1264  twa_free_request_id(tw_dev, request_id);
1266  }
1267  }
1268  }
1269 
1270  /* Handle command interrupt */
1271  if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1272  TW_MASK_COMMAND_INTERRUPT(tw_dev);
1273  /* Drain as many pending commands as we can */
1274  while (tw_dev->pending_request_count > 0) {
1275  request_id = tw_dev->pending_queue[tw_dev->pending_head];
1276  if (tw_dev->state[request_id] != TW_S_PENDING) {
1277  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1278  TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1279  goto twa_interrupt_bail;
1280  }
1281  if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1282  tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1283  tw_dev->pending_request_count--;
1284  } else {
1285  /* If we get here, we will continue re-posting on the next command interrupt */
1286  break;
1287  }
1288  }
1289  }
1290 
1291  /* Handle response interrupt */
1292  if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1293 
1294  /* Drain the response queue from the board */
1295  while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1296  /* Complete the response */
1297  response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1298  request_id = TW_RESID_OUT(response_que.response_id);
1299  full_command_packet = tw_dev->command_packet_virt[request_id];
1300  error = 0;
1301  /* Check for command packet errors */
1302  if (full_command_packet->command.newcommand.status != 0) {
1303  if (tw_dev->srb[request_id] != NULL) {
1304  error = twa_fill_sense(tw_dev, request_id, 1, 1);
1305  } else {
1306  /* Skip ioctl error prints */
1307  if (request_id != tw_dev->chrdev_request_id) {
1308  error = twa_fill_sense(tw_dev, request_id, 0, 1);
1309  }
1310  }
1311  }
1312 
1313  /* Check for correct state */
1314  if (tw_dev->state[request_id] != TW_S_POSTED) {
1315  if (tw_dev->srb[request_id] != NULL) {
1316  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1317  TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1318  goto twa_interrupt_bail;
1319  }
1320  }
1321 
1322  /* Check for internal command completion */
1323  if (tw_dev->srb[request_id] == NULL) {
1324  if (request_id != tw_dev->chrdev_request_id) {
1325  if (twa_aen_complete(tw_dev, request_id))
1326  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1327  } else {
1329  wake_up(&tw_dev->ioctl_wqueue);
1330  }
1331  } else {
1332  struct scsi_cmnd *cmd;
1333 
1334  cmd = tw_dev->srb[request_id];
1335 
1336  twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1337  /* If no error command was a success */
1338  if (error == 0) {
1339  cmd->result = (DID_OK << 16);
1340  }
1341 
1342  /* If error, command failed */
1343  if (error == 1) {
1344  /* Ask for a host reset */
1345  cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1346  }
1347 
1348  /* Report residual bytes for single sgl */
1349  if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1350  if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1351  scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1352  }
1353 
1354  /* Now complete the io */
1355  tw_dev->state[request_id] = TW_S_COMPLETED;
1356  twa_free_request_id(tw_dev, request_id);
1357  tw_dev->posted_request_count--;
1358  tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1359  twa_unmap_scsi_data(tw_dev, request_id);
1360  }
1361 
1362  /* Check for valid status after each drain */
1363  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1364  if (twa_check_bits(status_reg_value)) {
1365  if (twa_decode_bits(tw_dev, status_reg_value)) {
1366  TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1367  goto twa_interrupt_bail;
1368  }
1369  }
1370  }
1371  }
1372 
1373 twa_interrupt_bail:
1374  spin_unlock(tw_dev->host->host_lock);
1375  return IRQ_RETVAL(handled);
1376 } /* End twa_interrupt() */
1377 
1378 /* This function will load the request id and various sgls for ioctls */
1379 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1380 {
1381  TW_Command *oldcommand;
1382  TW_Command_Apache *newcommand;
1383  TW_SG_Entry *sgl;
1384  unsigned int pae = 0;
1385 
1386  if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1387  pae = 1;
1388 
1389  if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1390  newcommand = &full_command_packet->command.newcommand;
1391  newcommand->request_id__lunl =
1392  cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1393  if (length) {
1394  newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1395  newcommand->sg_list[0].length = cpu_to_le32(length);
1396  }
1397  newcommand->sgl_entries__lunh =
1398  cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1399  } else {
1400  oldcommand = &full_command_packet->command.oldcommand;
1401  oldcommand->request_id = request_id;
1402 
1403  if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1404  /* Load the sg list */
1405  if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1406  sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1407  else
1408  sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1409  sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1410  sgl->length = cpu_to_le32(length);
1411 
1412  oldcommand->size += pae;
1413  }
1414  }
1415 } /* End twa_load_sgl() */
1416 
1417 /* This function will perform a pci-dma mapping for a scatter gather list */
1418 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1419 {
1420  int use_sg;
1421  struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1422 
1423  use_sg = scsi_dma_map(cmd);
1424  if (!use_sg)
1425  return 0;
1426  else if (use_sg < 0) {
1427  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1428  return 0;
1429  }
1430 
1431  cmd->SCp.phase = TW_PHASE_SGLIST;
1432  cmd->SCp.have_data_in = use_sg;
1433 
1434  return use_sg;
1435 } /* End twa_map_scsi_sg_data() */
1436 
1437 /* This function will poll for a response interrupt of a request */
1438 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1439 {
1440  int retval = 1, found = 0, response_request_id;
1441  TW_Response_Queue response_queue;
1442  TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1443 
1444  if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1445  response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1446  response_request_id = TW_RESID_OUT(response_queue.response_id);
1447  if (request_id != response_request_id) {
1448  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1449  goto out;
1450  }
1451  if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1452  if (full_command_packet->command.newcommand.status != 0) {
1453  /* bad response */
1454  twa_fill_sense(tw_dev, request_id, 0, 0);
1455  goto out;
1456  }
1457  found = 1;
1458  } else {
1459  if (full_command_packet->command.oldcommand.status != 0) {
1460  /* bad response */
1461  twa_fill_sense(tw_dev, request_id, 0, 0);
1462  goto out;
1463  }
1464  found = 1;
1465  }
1466  }
1467 
1468  if (found)
1469  retval = 0;
1470 out:
1471  return retval;
1472 } /* End twa_poll_response() */
1473 
1474 /* This function will poll the status register for a flag */
1475 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1476 {
1477  u32 status_reg_value;
1478  unsigned long before;
1479  int retval = 1;
1480 
1481  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1482  before = jiffies;
1483 
1484  if (twa_check_bits(status_reg_value))
1485  twa_decode_bits(tw_dev, status_reg_value);
1486 
1487  while ((status_reg_value & flag) != flag) {
1488  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1489 
1490  if (twa_check_bits(status_reg_value))
1491  twa_decode_bits(tw_dev, status_reg_value);
1492 
1493  if (time_after(jiffies, before + HZ * seconds))
1494  goto out;
1495 
1496  msleep(50);
1497  }
1498  retval = 0;
1499 out:
1500  return retval;
1501 } /* End twa_poll_status() */
1502 
1503 /* This function will poll the status register for disappearance of a flag */
1504 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1505 {
1506  u32 status_reg_value;
1507  unsigned long before;
1508  int retval = 1;
1509 
1510  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1511  before = jiffies;
1512 
1513  if (twa_check_bits(status_reg_value))
1514  twa_decode_bits(tw_dev, status_reg_value);
1515 
1516  while ((status_reg_value & flag) != 0) {
1517  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1518  if (twa_check_bits(status_reg_value))
1519  twa_decode_bits(tw_dev, status_reg_value);
1520 
1521  if (time_after(jiffies, before + HZ * seconds))
1522  goto out;
1523 
1524  msleep(50);
1525  }
1526  retval = 0;
1527 out:
1528  return retval;
1529 } /* End twa_poll_status_gone() */
1530 
1531 /* This function will attempt to post a command packet to the board */
1532 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1533 {
1534  u32 status_reg_value;
1535  dma_addr_t command_que_value;
1536  int retval = 1;
1537 
1538  command_que_value = tw_dev->command_packet_phys[request_id];
1539 
1540  /* For 9650SE write low 4 bytes first */
1541  if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1542  (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1543  command_que_value += TW_COMMAND_OFFSET;
1544  writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1545  }
1546 
1547  status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1548 
1549  if (twa_check_bits(status_reg_value))
1550  twa_decode_bits(tw_dev, status_reg_value);
1551 
1552  if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1553 
1554  /* Only pend internal driver commands */
1555  if (!internal) {
1556  retval = SCSI_MLQUEUE_HOST_BUSY;
1557  goto out;
1558  }
1559 
1560  /* Couldn't post the command packet, so we do it later */
1561  if (tw_dev->state[request_id] != TW_S_PENDING) {
1562  tw_dev->state[request_id] = TW_S_PENDING;
1563  tw_dev->pending_request_count++;
1564  if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1566  }
1567  tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1568  tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1569  }
1571  goto out;
1572  } else {
1573  if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1574  (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1575  /* Now write upper 4 bytes */
1576  writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1577  } else {
1578  if (sizeof(dma_addr_t) > 4) {
1579  command_que_value += TW_COMMAND_OFFSET;
1580  writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1581  writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1582  } else {
1583  writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1584  }
1585  }
1586  tw_dev->state[request_id] = TW_S_POSTED;
1587  tw_dev->posted_request_count++;
1588  if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1590  }
1591  }
1592  retval = 0;
1593 out:
1594  return retval;
1595 } /* End twa_post_command_packet() */
1596 
1597 /* This function will reset a device extension */
1598 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1599 {
1600  int i = 0;
1601  int retval = 1;
1602  unsigned long flags = 0;
1603 
1604  set_bit(TW_IN_RESET, &tw_dev->flags);
1605  TW_DISABLE_INTERRUPTS(tw_dev);
1606  TW_MASK_COMMAND_INTERRUPT(tw_dev);
1607  spin_lock_irqsave(tw_dev->host->host_lock, flags);
1608 
1609  /* Abort all requests that are in progress */
1610  for (i = 0; i < TW_Q_LENGTH; i++) {
1611  if ((tw_dev->state[i] != TW_S_FINISHED) &&
1612  (tw_dev->state[i] != TW_S_INITIAL) &&
1613  (tw_dev->state[i] != TW_S_COMPLETED)) {
1614  if (tw_dev->srb[i]) {
1615  tw_dev->srb[i]->result = (DID_RESET << 16);
1616  tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1617  twa_unmap_scsi_data(tw_dev, i);
1618  }
1619  }
1620  }
1621 
1622  /* Reset queues and counts */
1623  for (i = 0; i < TW_Q_LENGTH; i++) {
1624  tw_dev->free_queue[i] = i;
1625  tw_dev->state[i] = TW_S_INITIAL;
1626  }
1627  tw_dev->free_head = TW_Q_START;
1628  tw_dev->free_tail = TW_Q_START;
1629  tw_dev->posted_request_count = 0;
1630  tw_dev->pending_request_count = 0;
1631  tw_dev->pending_head = TW_Q_START;
1632  tw_dev->pending_tail = TW_Q_START;
1633  tw_dev->reset_print = 0;
1634 
1635  spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1636 
1637  if (twa_reset_sequence(tw_dev, 1))
1638  goto out;
1639 
1641  clear_bit(TW_IN_RESET, &tw_dev->flags);
1643 
1644  retval = 0;
1645 out:
1646  return retval;
1647 } /* End twa_reset_device_extension() */
1648 
1649 /* This function will reset a controller */
1650 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1651 {
1652  int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1653 
1654  while (tries < TW_MAX_RESET_TRIES) {
1655  if (do_soft_reset) {
1656  TW_SOFT_RESET(tw_dev);
1657  /* Clear pchip/response queue on 9550SX */
1658  if (twa_empty_response_queue_large(tw_dev)) {
1659  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1660  do_soft_reset = 1;
1661  tries++;
1662  continue;
1663  }
1664  }
1665 
1666  /* Make sure controller is in a good state */
1667  if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1668  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1669  do_soft_reset = 1;
1670  tries++;
1671  continue;
1672  }
1673 
1674  /* Empty response queue */
1675  if (twa_empty_response_queue(tw_dev)) {
1676  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1677  do_soft_reset = 1;
1678  tries++;
1679  continue;
1680  }
1681 
1682  flashed = 0;
1683 
1684  /* Check for compatibility/flash */
1685  if (twa_check_srl(tw_dev, &flashed)) {
1686  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1687  do_soft_reset = 1;
1688  tries++;
1689  continue;
1690  } else {
1691  if (flashed) {
1692  tries++;
1693  continue;
1694  }
1695  }
1696 
1697  /* Drain the AEN queue */
1698  if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1699  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1700  do_soft_reset = 1;
1701  tries++;
1702  continue;
1703  }
1704 
1705  /* If we got here, controller is in a good state */
1706  retval = 0;
1707  goto out;
1708  }
1709 out:
1710  return retval;
1711 } /* End twa_reset_sequence() */
1712 
1713 /* This funciton returns unit geometry in cylinders/heads/sectors */
1714 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1715 {
1716  int heads, sectors, cylinders;
1717  TW_Device_Extension *tw_dev;
1718 
1719  tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1720 
1721  if (capacity >= 0x200000) {
1722  heads = 255;
1723  sectors = 63;
1724  cylinders = sector_div(capacity, heads * sectors);
1725  } else {
1726  heads = 64;
1727  sectors = 32;
1728  cylinders = sector_div(capacity, heads * sectors);
1729  }
1730 
1731  geom[0] = heads;
1732  geom[1] = sectors;
1733  geom[2] = cylinders;
1734 
1735  return 0;
1736 } /* End twa_scsi_biosparam() */
1737 
1738 /* This is the new scsi eh reset function */
1739 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1740 {
1741  TW_Device_Extension *tw_dev = NULL;
1742  int retval = FAILED;
1743 
1744  tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1745 
1746  tw_dev->num_resets++;
1747 
1749  "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1750  TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1751 
1752  /* Make sure we are not issuing an ioctl or resetting from ioctl */
1753  mutex_lock(&tw_dev->ioctl_lock);
1754 
1755  /* Now reset the card and some of the device extension data */
1756  if (twa_reset_device_extension(tw_dev)) {
1757  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1758  goto out;
1759  }
1760 
1761  retval = SUCCESS;
1762 out:
1763  mutex_unlock(&tw_dev->ioctl_lock);
1764  return retval;
1765 } /* End twa_scsi_eh_reset() */
1766 
1767 /* This is the main scsi queue function to handle scsi opcodes */
1768 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1769 {
1770  int request_id, retval;
1771  TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1772 
1773  /* If we are resetting due to timed out ioctl, report as busy */
1774  if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1775  retval = SCSI_MLQUEUE_HOST_BUSY;
1776  goto out;
1777  }
1778 
1779  /* Check if this FW supports luns */
1780  if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1781  SCpnt->result = (DID_BAD_TARGET << 16);
1782  done(SCpnt);
1783  retval = 0;
1784  goto out;
1785  }
1786 
1787  /* Save done function into scsi_cmnd struct */
1788  SCpnt->scsi_done = done;
1789 
1790  /* Get a free request id */
1791  twa_get_request_id(tw_dev, &request_id);
1792 
1793  /* Save the scsi command for use by the ISR */
1794  tw_dev->srb[request_id] = SCpnt;
1795 
1796  /* Initialize phase to zero */
1797  SCpnt->SCp.phase = TW_PHASE_INITIAL;
1798 
1799  retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1800  switch (retval) {
1802  twa_free_request_id(tw_dev, request_id);
1803  twa_unmap_scsi_data(tw_dev, request_id);
1804  break;
1805  case 1:
1806  tw_dev->state[request_id] = TW_S_COMPLETED;
1807  twa_free_request_id(tw_dev, request_id);
1808  twa_unmap_scsi_data(tw_dev, request_id);
1809  SCpnt->result = (DID_ERROR << 16);
1810  done(SCpnt);
1811  retval = 0;
1812  }
1813 out:
1814  return retval;
1815 } /* End twa_scsi_queue() */
1816 
1817 static DEF_SCSI_QCMD(twa_scsi_queue)
1818 
1819 /* This function hands scsi cdb's to the firmware */
1820 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1821 {
1822  TW_Command_Full *full_command_packet;
1823  TW_Command_Apache *command_packet;
1824  u32 num_sectors = 0x0;
1825  int i, sg_count;
1826  struct scsi_cmnd *srb = NULL;
1827  struct scatterlist *sglist = NULL, *sg;
1828  int retval = 1;
1829 
1830  if (tw_dev->srb[request_id]) {
1831  srb = tw_dev->srb[request_id];
1832  if (scsi_sglist(srb))
1833  sglist = scsi_sglist(srb);
1834  }
1835 
1836  /* Initialize command packet */
1837  full_command_packet = tw_dev->command_packet_virt[request_id];
1838  full_command_packet->header.header_desc.size_header = 128;
1839  full_command_packet->header.status_block.error = 0;
1840  full_command_packet->header.status_block.severity__reserved = 0;
1841 
1842  command_packet = &full_command_packet->command.newcommand;
1843  command_packet->status = 0;
1844  command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1845 
1846  /* We forced 16 byte cdb use earlier */
1847  if (!cdb)
1848  memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1849  else
1850  memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1851 
1852  if (srb) {
1853  command_packet->unit = srb->device->id;
1854  command_packet->request_id__lunl =
1855  cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1856  } else {
1857  command_packet->request_id__lunl =
1858  cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1859  command_packet->unit = 0;
1860  }
1861 
1862  command_packet->sgl_offset = 16;
1863 
1864  if (!sglistarg) {
1865  /* Map sglist from scsi layer to cmd packet */
1866 
1867  if (scsi_sg_count(srb)) {
1868  if ((scsi_sg_count(srb) == 1) &&
1869  (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1870  if (srb->sc_data_direction == DMA_TO_DEVICE ||
1872  scsi_sg_copy_to_buffer(srb,
1873  tw_dev->generic_buffer_virt[request_id],
1874  TW_SECTOR_SIZE);
1875  command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1876  command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1877  } else {
1878  sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1879  if (sg_count == 0)
1880  goto out;
1881 
1882  scsi_for_each_sg(srb, sg, sg_count, i) {
1883  command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1884  command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1885  if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1886  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1887  goto out;
1888  }
1889  }
1890  }
1891  command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1892  }
1893  } else {
1894  /* Internal cdb post */
1895  for (i = 0; i < use_sg; i++) {
1896  command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1897  command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1898  if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1899  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1900  goto out;
1901  }
1902  }
1903  command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1904  }
1905 
1906  if (srb) {
1907  if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1908  num_sectors = (u32)srb->cmnd[4];
1909 
1910  if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1911  num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1912  }
1913 
1914  /* Update sector statistic */
1915  tw_dev->sector_count = num_sectors;
1916  if (tw_dev->sector_count > tw_dev->max_sector_count)
1917  tw_dev->max_sector_count = tw_dev->sector_count;
1918 
1919  /* Update SG statistics */
1920  if (srb) {
1921  tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1922  if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1923  tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1924  }
1925 
1926  /* Now post the command to the board */
1927  if (srb) {
1928  retval = twa_post_command_packet(tw_dev, request_id, 0);
1929  } else {
1930  twa_post_command_packet(tw_dev, request_id, 1);
1931  retval = 0;
1932  }
1933 out:
1934  return retval;
1935 } /* End twa_scsiop_execute_scsi() */
1936 
1937 /* This function completes an execute scsi operation */
1938 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1939 {
1940  struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1941 
1942  if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1945  if (scsi_sg_count(cmd) == 1) {
1946  void *buf = tw_dev->generic_buffer_virt[request_id];
1947 
1948  scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1949  }
1950  }
1951 } /* End twa_scsiop_execute_scsi_complete() */
1952 
1953 /* This function tells the controller to shut down */
1954 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1955 {
1956  /* Disable interrupts */
1957  TW_DISABLE_INTERRUPTS(tw_dev);
1958 
1959  /* Free up the IRQ */
1960  free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1961 
1962  printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1963 
1964  /* Tell the card we are shutting down */
1965  if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1966  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1967  } else {
1968  printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1969  }
1970 
1971  /* Clear all interrupts just before exit */
1972  TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1973 } /* End __twa_shutdown() */
1974 
1975 /* Wrapper for __twa_shutdown */
1976 static void twa_shutdown(struct pci_dev *pdev)
1977 {
1978  struct Scsi_Host *host = pci_get_drvdata(pdev);
1979  TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1980 
1981  __twa_shutdown(tw_dev);
1982 } /* End twa_shutdown() */
1983 
1984 /* This function will look up a string */
1985 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1986 {
1987  int index;
1988 
1989  for (index = 0; ((code != table[index].code) &&
1990  (table[index].text != (char *)0)); index++);
1991  return(table[index].text);
1992 } /* End twa_string_lookup() */
1993 
1994 /* This function will perform a pci-dma unmap */
1995 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1996 {
1997  struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1998 
1999  if (cmd->SCp.phase == TW_PHASE_SGLIST)
2000  scsi_dma_unmap(cmd);
2001 } /* End twa_unmap_scsi_data() */
2002 
2003 /* This function gets called when a disk is coming on-line */
2004 static int twa_slave_configure(struct scsi_device *sdev)
2005 {
2006  /* Force 60 second timeout */
2007  blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
2008 
2009  return 0;
2010 } /* End twa_slave_configure() */
2011 
2012 /* scsi_host_template initializer */
2013 static struct scsi_host_template driver_template = {
2014  .module = THIS_MODULE,
2015  .name = "3ware 9000 Storage Controller",
2016  .queuecommand = twa_scsi_queue,
2017  .eh_host_reset_handler = twa_scsi_eh_reset,
2018  .bios_param = twa_scsi_biosparam,
2019  .change_queue_depth = twa_change_queue_depth,
2020  .can_queue = TW_Q_LENGTH-2,
2021  .slave_configure = twa_slave_configure,
2022  .this_id = -1,
2023  .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
2024  .max_sectors = TW_MAX_SECTORS,
2025  .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2026  .use_clustering = ENABLE_CLUSTERING,
2027  .shost_attrs = twa_host_attrs,
2028  .emulated = 1
2029 };
2030 
2031 /* This function will probe and initialize a card */
2032 static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2033 {
2034  struct Scsi_Host *host = NULL;
2035  TW_Device_Extension *tw_dev;
2036  unsigned long mem_addr, mem_len;
2037  int retval = -ENODEV;
2038 
2039  retval = pci_enable_device(pdev);
2040  if (retval) {
2041  TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2042  goto out_disable_device;
2043  }
2044 
2045  pci_set_master(pdev);
2046  pci_try_set_mwi(pdev);
2047 
2048  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2049  || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2050  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2051  || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2052  TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2053  retval = -ENODEV;
2054  goto out_disable_device;
2055  }
2056 
2057  host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2058  if (!host) {
2059  TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2060  retval = -ENOMEM;
2061  goto out_disable_device;
2062  }
2063  tw_dev = (TW_Device_Extension *)host->hostdata;
2064 
2065  /* Save values to device extension */
2066  tw_dev->host = host;
2067  tw_dev->tw_pci_dev = pdev;
2068 
2069  if (twa_initialize_device_extension(tw_dev)) {
2070  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2071  goto out_free_device_extension;
2072  }
2073 
2074  /* Request IO regions */
2075  retval = pci_request_regions(pdev, "3w-9xxx");
2076  if (retval) {
2077  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2078  goto out_free_device_extension;
2079  }
2080 
2081  if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2082  mem_addr = pci_resource_start(pdev, 1);
2083  mem_len = pci_resource_len(pdev, 1);
2084  } else {
2085  mem_addr = pci_resource_start(pdev, 2);
2086  mem_len = pci_resource_len(pdev, 2);
2087  }
2088 
2089  /* Save base address */
2090  tw_dev->base_addr = ioremap(mem_addr, mem_len);
2091  if (!tw_dev->base_addr) {
2092  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2093  goto out_release_mem_region;
2094  }
2095 
2096  /* Disable interrupts on the card */
2097  TW_DISABLE_INTERRUPTS(tw_dev);
2098 
2099  /* Initialize the card */
2100  if (twa_reset_sequence(tw_dev, 0))
2101  goto out_iounmap;
2102 
2103  /* Set host specific parameters */
2104  if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2106  host->max_id = TW_MAX_UNITS_9650SE;
2107  else
2108  host->max_id = TW_MAX_UNITS;
2109 
2110  host->max_cmd_len = TW_MAX_CDB_LEN;
2111 
2112  /* Channels aren't supported by adapter */
2113  host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2114  host->max_channel = 0;
2115 
2116  /* Register the card with the kernel SCSI layer */
2117  retval = scsi_add_host(host, &pdev->dev);
2118  if (retval) {
2119  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2120  goto out_iounmap;
2121  }
2122 
2123  pci_set_drvdata(pdev, host);
2124 
2125  printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2126  host->host_no, mem_addr, pdev->irq);
2127  printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2128  host->host_no,
2129  (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2131  (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2133  le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2135 
2136  /* Try to enable MSI */
2137  if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2138  !pci_enable_msi(pdev))
2139  set_bit(TW_USING_MSI, &tw_dev->flags);
2140 
2141  /* Now setup the interrupt handler */
2142  retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2143  if (retval) {
2144  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2145  goto out_remove_host;
2146  }
2147 
2148  twa_device_extension_list[twa_device_extension_count] = tw_dev;
2149  twa_device_extension_count++;
2150 
2151  /* Re-enable interrupts on the card */
2153 
2154  /* Finally, scan the host */
2155  scsi_scan_host(host);
2156 
2157  if (twa_major == -1) {
2158  if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2159  TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2160  }
2161  return 0;
2162 
2163 out_remove_host:
2164  if (test_bit(TW_USING_MSI, &tw_dev->flags))
2165  pci_disable_msi(pdev);
2166  scsi_remove_host(host);
2167 out_iounmap:
2168  iounmap(tw_dev->base_addr);
2169 out_release_mem_region:
2170  pci_release_regions(pdev);
2171 out_free_device_extension:
2172  twa_free_device_extension(tw_dev);
2173  scsi_host_put(host);
2174 out_disable_device:
2175  pci_disable_device(pdev);
2176 
2177  return retval;
2178 } /* End twa_probe() */
2179 
2180 /* This function is called to remove a device */
2181 static void twa_remove(struct pci_dev *pdev)
2182 {
2183  struct Scsi_Host *host = pci_get_drvdata(pdev);
2184  TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2185 
2186  scsi_remove_host(tw_dev->host);
2187 
2188  /* Unregister character device */
2189  if (twa_major >= 0) {
2190  unregister_chrdev(twa_major, "twa");
2191  twa_major = -1;
2192  }
2193 
2194  /* Shutdown the card */
2195  __twa_shutdown(tw_dev);
2196 
2197  /* Disable MSI if enabled */
2198  if (test_bit(TW_USING_MSI, &tw_dev->flags))
2199  pci_disable_msi(pdev);
2200 
2201  /* Free IO remapping */
2202  iounmap(tw_dev->base_addr);
2203 
2204  /* Free up the mem region */
2205  pci_release_regions(pdev);
2206 
2207  /* Free up device extension resources */
2208  twa_free_device_extension(tw_dev);
2209 
2210  scsi_host_put(tw_dev->host);
2211  pci_disable_device(pdev);
2212  twa_device_extension_count--;
2213 } /* End twa_remove() */
2214 
2215 #ifdef CONFIG_PM
2216 /* This function is called on PCI suspend */
2217 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2218 {
2219  struct Scsi_Host *host = pci_get_drvdata(pdev);
2220  TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2221 
2222  printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2223 
2224  TW_DISABLE_INTERRUPTS(tw_dev);
2225  free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2226 
2227  if (test_bit(TW_USING_MSI, &tw_dev->flags))
2228  pci_disable_msi(pdev);
2229 
2230  /* Tell the card we are shutting down */
2231  if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2232  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2233  } else {
2234  printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2235  }
2236  TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2237 
2238  pci_save_state(pdev);
2239  pci_disable_device(pdev);
2240  pci_set_power_state(pdev, pci_choose_state(pdev, state));
2241 
2242  return 0;
2243 } /* End twa_suspend() */
2244 
2245 /* This function is called on PCI resume */
2246 static int twa_resume(struct pci_dev *pdev)
2247 {
2248  int retval = 0;
2249  struct Scsi_Host *host = pci_get_drvdata(pdev);
2250  TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2251 
2252  printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2253  pci_set_power_state(pdev, PCI_D0);
2254  pci_enable_wake(pdev, PCI_D0, 0);
2255  pci_restore_state(pdev);
2256 
2257  retval = pci_enable_device(pdev);
2258  if (retval) {
2259  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2260  return retval;
2261  }
2262 
2263  pci_set_master(pdev);
2264  pci_try_set_mwi(pdev);
2265 
2266  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2267  || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2268  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2269  || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2270  TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2271  retval = -ENODEV;
2272  goto out_disable_device;
2273  }
2274 
2275  /* Initialize the card */
2276  if (twa_reset_sequence(tw_dev, 0)) {
2277  retval = -ENODEV;
2278  goto out_disable_device;
2279  }
2280 
2281  /* Now setup the interrupt handler */
2282  retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2283  if (retval) {
2284  TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2285  retval = -ENODEV;
2286  goto out_disable_device;
2287  }
2288 
2289  /* Now enable MSI if enabled */
2290  if (test_bit(TW_USING_MSI, &tw_dev->flags))
2291  pci_enable_msi(pdev);
2292 
2293  /* Re-enable interrupts on the card */
2295 
2296  printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2297  return 0;
2298 
2299 out_disable_device:
2300  scsi_remove_host(host);
2301  pci_disable_device(pdev);
2302 
2303  return retval;
2304 } /* End twa_resume() */
2305 #endif
2306 
2307 /* PCI Devices supported by this driver */
2308 static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2310  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2312  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2314  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2316  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2317  { }
2318 };
2319 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2320 
2321 /* pci_driver initializer */
2322 static struct pci_driver twa_driver = {
2323  .name = "3w-9xxx",
2324  .id_table = twa_pci_tbl,
2325  .probe = twa_probe,
2326  .remove = twa_remove,
2327 #ifdef CONFIG_PM
2328  .suspend = twa_suspend,
2329  .resume = twa_resume,
2330 #endif
2331  .shutdown = twa_shutdown
2332 };
2333 
2334 /* This function is called on driver initialization */
2335 static int __init twa_init(void)
2336 {
2337  printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2338 
2339  return pci_register_driver(&twa_driver);
2340 } /* End twa_init() */
2341 
2342 /* This function is called on driver exit */
2343 static void __exit twa_exit(void)
2344 {
2345  pci_unregister_driver(&twa_driver);
2346 } /* End twa_exit() */
2347 
2348 module_init(twa_init);
2349 module_exit(twa_exit);
2350