Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipr.c
Go to the documentation of this file.
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <[email protected]>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  * PCI-X Dual Channel Ultra 320 SCSI Adapter
33  * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  * Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  * - Ultra 320 SCSI controller
38  * - PCI-X host interface
39  * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  * - Non-Volatile Write Cache
41  * - Supports attachment of non-RAID disks, tape, and optical devices
42  * - RAID Levels 0, 5, 10
43  * - Hot spare
44  * - Background Parity Checking
45  * - Background Data Scrubbing
46  * - Ability to increase the capacity of an existing RAID 5 disk array
47  * by adding disks
48  *
49  * Driver Features:
50  * - Tagged command queuing
51  * - Adapter microcode download
52  * - PCI hot plug
53  * - SCSI device hot plug
54  *
55  */
56 
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88 
89 /*
90  * Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static DEFINE_SPINLOCK(ipr_driver_lock);
102 
103 /* This table describes the differences between DMA controller chips */
104 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
105  { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
106  .mailbox = 0x0042C,
107  .max_cmds = 100,
108  .cache_line_size = 0x20,
109  .clear_isr = 1,
110  {
111  .set_interrupt_mask_reg = 0x0022C,
112  .clr_interrupt_mask_reg = 0x00230,
113  .clr_interrupt_mask_reg32 = 0x00230,
114  .sense_interrupt_mask_reg = 0x0022C,
115  .sense_interrupt_mask_reg32 = 0x0022C,
116  .clr_interrupt_reg = 0x00228,
117  .clr_interrupt_reg32 = 0x00228,
118  .sense_interrupt_reg = 0x00224,
119  .sense_interrupt_reg32 = 0x00224,
120  .ioarrin_reg = 0x00404,
121  .sense_uproc_interrupt_reg = 0x00214,
122  .sense_uproc_interrupt_reg32 = 0x00214,
123  .set_uproc_interrupt_reg = 0x00214,
124  .set_uproc_interrupt_reg32 = 0x00214,
125  .clr_uproc_interrupt_reg = 0x00218,
126  .clr_uproc_interrupt_reg32 = 0x00218
127  }
128  },
129  { /* Snipe and Scamp */
130  .mailbox = 0x0052C,
131  .max_cmds = 100,
132  .cache_line_size = 0x20,
133  .clear_isr = 1,
134  {
135  .set_interrupt_mask_reg = 0x00288,
136  .clr_interrupt_mask_reg = 0x0028C,
137  .clr_interrupt_mask_reg32 = 0x0028C,
138  .sense_interrupt_mask_reg = 0x00288,
139  .sense_interrupt_mask_reg32 = 0x00288,
140  .clr_interrupt_reg = 0x00284,
141  .clr_interrupt_reg32 = 0x00284,
142  .sense_interrupt_reg = 0x00280,
143  .sense_interrupt_reg32 = 0x00280,
144  .ioarrin_reg = 0x00504,
145  .sense_uproc_interrupt_reg = 0x00290,
146  .sense_uproc_interrupt_reg32 = 0x00290,
147  .set_uproc_interrupt_reg = 0x00290,
148  .set_uproc_interrupt_reg32 = 0x00290,
149  .clr_uproc_interrupt_reg = 0x00294,
150  .clr_uproc_interrupt_reg32 = 0x00294
151  }
152  },
153  { /* CRoC */
154  .mailbox = 0x00044,
155  .max_cmds = 1000,
156  .cache_line_size = 0x20,
157  .clear_isr = 0,
158  {
159  .set_interrupt_mask_reg = 0x00010,
160  .clr_interrupt_mask_reg = 0x00018,
161  .clr_interrupt_mask_reg32 = 0x0001C,
162  .sense_interrupt_mask_reg = 0x00010,
163  .sense_interrupt_mask_reg32 = 0x00014,
164  .clr_interrupt_reg = 0x00008,
165  .clr_interrupt_reg32 = 0x0000C,
166  .sense_interrupt_reg = 0x00000,
167  .sense_interrupt_reg32 = 0x00004,
168  .ioarrin_reg = 0x00070,
169  .sense_uproc_interrupt_reg = 0x00020,
170  .sense_uproc_interrupt_reg32 = 0x00024,
171  .set_uproc_interrupt_reg = 0x00020,
172  .set_uproc_interrupt_reg32 = 0x00024,
173  .clr_uproc_interrupt_reg = 0x00028,
174  .clr_uproc_interrupt_reg32 = 0x0002C,
175  .init_feedback_reg = 0x0005C,
176  .dump_addr_reg = 0x00064,
177  .dump_data_reg = 0x00068,
178  .endian_swap_reg = 0x00084
179  }
180  },
181 };
182 
183 static const struct ipr_chip_t ipr_chip[] = {
193 };
194 
195 static int ipr_max_bus_speeds[] = {
197 };
198 
199 MODULE_AUTHOR("Brian King <[email protected]>");
200 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
201 module_param_named(max_speed, ipr_max_speed, uint, 0);
202 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
203 module_param_named(log_level, ipr_log_level, uint, 0);
204 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
205 module_param_named(testmode, ipr_testmode, int, 0);
206 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
207 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
208 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
209 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
210 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
211 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
213 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
214 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
215 module_param_named(max_devs, ipr_max_devs, int, 0);
216 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
217  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
218 MODULE_LICENSE("GPL");
220 
221 /* A constant array of IOASCs/URCs/Error Messages */
222 static const
223 struct ipr_error_table_t ipr_error_table[] = {
224  {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
225  "8155: An unknown error was received"},
226  {0x00330000, 0, 0,
227  "Soft underlength error"},
228  {0x005A0000, 0, 0,
229  "Command to be cancelled not found"},
230  {0x00808000, 0, 0,
231  "Qualified success"},
232  {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
233  "FFFE: Soft device bus error recovered by the IOA"},
234  {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
235  "4101: Soft device bus fabric error"},
236  {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
237  "FFFC: Logical block guard error recovered by the device"},
238  {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
239  "FFFC: Logical block reference tag error recovered by the device"},
240  {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
241  "4171: Recovered scatter list tag / sequence number error"},
242  {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
243  "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
244  {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
245  "4171: Recovered logical block sequence number error on IOA to Host transfer"},
246  {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
247  "FFFD: Recovered logical block reference tag error detected by the IOA"},
248  {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
249  "FFFD: Logical block guard error recovered by the IOA"},
250  {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
251  "FFF9: Device sector reassign successful"},
252  {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
253  "FFF7: Media error recovered by device rewrite procedures"},
254  {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
255  "7001: IOA sector reassignment successful"},
256  {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
257  "FFF9: Soft media error. Sector reassignment recommended"},
258  {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
259  "FFF7: Media error recovered by IOA rewrite procedures"},
260  {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
261  "FF3D: Soft PCI bus error recovered by the IOA"},
262  {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
263  "FFF6: Device hardware error recovered by the IOA"},
264  {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
265  "FFF6: Device hardware error recovered by the device"},
266  {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
267  "FF3D: Soft IOA error recovered by the IOA"},
268  {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
269  "FFFA: Undefined device response recovered by the IOA"},
270  {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
271  "FFF6: Device bus error, message or command phase"},
272  {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
273  "FFFE: Task Management Function failed"},
274  {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
275  "FFF6: Failure prediction threshold exceeded"},
276  {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
277  "8009: Impending cache battery pack failure"},
278  {0x02040400, 0, 0,
279  "34FF: Disk device format in progress"},
280  {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
281  "9070: IOA requested reset"},
282  {0x023F0000, 0, 0,
283  "Synchronization required"},
284  {0x024E0000, 0, 0,
285  "No ready, IOA shutdown"},
286  {0x025A0000, 0, 0,
287  "Not ready, IOA has been shutdown"},
288  {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
289  "3020: Storage subsystem configuration error"},
290  {0x03110B00, 0, 0,
291  "FFF5: Medium error, data unreadable, recommend reassign"},
292  {0x03110C00, 0, 0,
293  "7000: Medium error, data unreadable, do not reassign"},
294  {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
295  "FFF3: Disk media format bad"},
296  {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
297  "3002: Addressed device failed to respond to selection"},
298  {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
299  "3100: Device bus error"},
300  {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
301  "3109: IOA timed out a device command"},
302  {0x04088000, 0, 0,
303  "3120: SCSI bus is not operational"},
304  {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
305  "4100: Hard device bus fabric error"},
306  {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
307  "310C: Logical block guard error detected by the device"},
308  {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
309  "310C: Logical block reference tag error detected by the device"},
310  {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
311  "4170: Scatter list tag / sequence number error"},
312  {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
313  "8150: Logical block CRC error on IOA to Host transfer"},
314  {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
315  "4170: Logical block sequence number error on IOA to Host transfer"},
316  {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
317  "310D: Logical block reference tag error detected by the IOA"},
318  {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
319  "310D: Logical block guard error detected by the IOA"},
320  {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
321  "9000: IOA reserved area data check"},
322  {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
323  "9001: IOA reserved area invalid data pattern"},
324  {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
325  "9002: IOA reserved area LRC error"},
326  {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
327  "Hardware Error, IOA metadata access error"},
328  {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
329  "102E: Out of alternate sectors for disk storage"},
330  {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
331  "FFF4: Data transfer underlength error"},
332  {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
333  "FFF4: Data transfer overlength error"},
334  {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
335  "3400: Logical unit failure"},
336  {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
337  "FFF4: Device microcode is corrupt"},
338  {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
339  "8150: PCI bus error"},
340  {0x04430000, 1, 0,
341  "Unsupported device bus message received"},
342  {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
343  "FFF4: Disk device problem"},
344  {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
345  "8150: Permanent IOA failure"},
346  {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
347  "3010: Disk device returned wrong response to IOA"},
348  {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
349  "8151: IOA microcode error"},
350  {0x04448500, 0, 0,
351  "Device bus status error"},
352  {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
353  "8157: IOA error requiring IOA reset to recover"},
354  {0x04448700, 0, 0,
355  "ATA device status error"},
356  {0x04490000, 0, 0,
357  "Message reject received from the device"},
358  {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
359  "8008: A permanent cache battery pack failure occurred"},
360  {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
361  "9090: Disk unit has been modified after the last known status"},
362  {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
363  "9081: IOA detected device error"},
364  {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
365  "9082: IOA detected device error"},
366  {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
367  "3110: Device bus error, message or command phase"},
368  {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
369  "3110: SAS Command / Task Management Function failed"},
370  {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
371  "9091: Incorrect hardware configuration change has been detected"},
372  {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
373  "9073: Invalid multi-adapter configuration"},
374  {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
375  "4010: Incorrect connection between cascaded expanders"},
376  {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
377  "4020: Connections exceed IOA design limits"},
378  {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
379  "4030: Incorrect multipath connection"},
380  {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
381  "4110: Unsupported enclosure function"},
382  {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
383  "FFF4: Command to logical unit failed"},
384  {0x05240000, 1, 0,
385  "Illegal request, invalid request type or request packet"},
386  {0x05250000, 0, 0,
387  "Illegal request, invalid resource handle"},
388  {0x05258000, 0, 0,
389  "Illegal request, commands not allowed to this device"},
390  {0x05258100, 0, 0,
391  "Illegal request, command not allowed to a secondary adapter"},
392  {0x05258200, 0, 0,
393  "Illegal request, command not allowed to a non-optimized resource"},
394  {0x05260000, 0, 0,
395  "Illegal request, invalid field in parameter list"},
396  {0x05260100, 0, 0,
397  "Illegal request, parameter not supported"},
398  {0x05260200, 0, 0,
399  "Illegal request, parameter value invalid"},
400  {0x052C0000, 0, 0,
401  "Illegal request, command sequence error"},
402  {0x052C8000, 1, 0,
403  "Illegal request, dual adapter support not enabled"},
404  {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
405  "9031: Array protection temporarily suspended, protection resuming"},
406  {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
407  "9040: Array protection temporarily suspended, protection resuming"},
408  {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
409  "3140: Device bus not ready to ready transition"},
410  {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
411  "FFFB: SCSI bus was reset"},
412  {0x06290500, 0, 0,
413  "FFFE: SCSI bus transition to single ended"},
414  {0x06290600, 0, 0,
415  "FFFE: SCSI bus transition to LVD"},
416  {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
417  "FFFB: SCSI bus was reset by another initiator"},
418  {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
419  "3029: A device replacement has occurred"},
420  {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
421  "9051: IOA cache data exists for a missing or failed device"},
422  {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
423  "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
424  {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
425  "9025: Disk unit is not supported at its physical location"},
426  {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
427  "3020: IOA detected a SCSI bus configuration error"},
428  {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
429  "3150: SCSI bus configuration error"},
430  {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
431  "9074: Asymmetric advanced function disk configuration"},
432  {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
433  "4040: Incomplete multipath connection between IOA and enclosure"},
434  {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
435  "4041: Incomplete multipath connection between enclosure and device"},
436  {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
437  "9075: Incomplete multipath connection between IOA and remote IOA"},
438  {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
439  "9076: Configuration error, missing remote IOA"},
440  {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
441  "4050: Enclosure does not support a required multipath function"},
442  {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
443  "4070: Logically bad block written on device"},
444  {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
445  "9041: Array protection temporarily suspended"},
446  {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
447  "9042: Corrupt array parity detected on specified device"},
448  {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
449  "9030: Array no longer protected due to missing or failed disk unit"},
450  {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
451  "9071: Link operational transition"},
452  {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
453  "9072: Link not operational transition"},
454  {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
455  "9032: Array exposed but still protected"},
456  {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
457  "70DD: Device forced failed by disrupt device command"},
458  {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
459  "4061: Multipath redundancy level got better"},
460  {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
461  "4060: Multipath redundancy level got worse"},
462  {0x07270000, 0, 0,
463  "Failure due to other device"},
464  {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
465  "9008: IOA does not support functions expected by devices"},
466  {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
467  "9010: Cache data associated with attached devices cannot be found"},
468  {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
469  "9011: Cache data belongs to devices other than those attached"},
470  {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
471  "9020: Array missing 2 or more devices with only 1 device present"},
472  {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
473  "9021: Array missing 2 or more devices with 2 or more devices present"},
474  {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
475  "9022: Exposed array is missing a required device"},
476  {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
477  "9023: Array member(s) not at required physical locations"},
478  {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
479  "9024: Array not functional due to present hardware configuration"},
480  {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
481  "9026: Array not functional due to present hardware configuration"},
482  {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
483  "9027: Array is missing a device and parity is out of sync"},
484  {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
485  "9028: Maximum number of arrays already exist"},
486  {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
487  "9050: Required cache data cannot be located for a disk unit"},
488  {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
489  "9052: Cache data exists for a device that has been modified"},
490  {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
491  "9054: IOA resources not available due to previous problems"},
492  {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
493  "9092: Disk unit requires initialization before use"},
494  {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
495  "9029: Incorrect hardware configuration change has been detected"},
496  {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
497  "9060: One or more disk pairs are missing from an array"},
498  {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
499  "9061: One or more disks are missing from an array"},
500  {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
501  "9062: One or more disks are missing from an array"},
502  {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
503  "9063: Maximum number of functional arrays has been exceeded"},
504  {0x0B260000, 0, 0,
505  "Aborted command, invalid descriptor"},
506  {0x0B5A0000, 0, 0,
507  "Command terminated by host"}
508 };
509 
510 static const struct ipr_ses_table_entry ipr_ses_table[] = {
511  { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
512  { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
513  { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
514  { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
515  { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
516  { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
517  { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
518  { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
519  { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
520  { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
521  { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
522  { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
523  { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
524 };
525 
526 /*
527  * Function Prototypes
528  */
529 static int ipr_reset_alert(struct ipr_cmnd *);
530 static void ipr_process_ccn(struct ipr_cmnd *);
531 static void ipr_process_error(struct ipr_cmnd *);
532 static void ipr_reset_ioa_job(struct ipr_cmnd *);
533 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
534  enum ipr_shutdown_type);
535 
536 #ifdef CONFIG_SCSI_IPR_TRACE
537 
546 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
547  u8 type, u32 add_data)
548 {
550  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
551 
552  trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
553  trace_entry->time = jiffies;
554  trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
555  trace_entry->type = type;
556  if (ipr_cmd->ioa_cfg->sis64)
557  trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
558  else
559  trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
560  trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
561  trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
562  trace_entry->u.add_data = add_data;
563 }
564 #else
565 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
566 #endif
567 
575 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
576 {
577  unsigned long lock_flags;
578  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
579 
580  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
581  ipr_cmd->done(ipr_cmd);
582  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
583 }
584 
592 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
593 {
594  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
595  struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
596  struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
597  dma_addr_t dma_addr = ipr_cmd->dma_addr;
598 
599  memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
600  ioarcb->data_transfer_length = 0;
601  ioarcb->read_data_transfer_length = 0;
602  ioarcb->ioadl_len = 0;
603  ioarcb->read_ioadl_len = 0;
604 
605  if (ipr_cmd->ioa_cfg->sis64) {
606  ioarcb->u.sis64_addr_data.data_ioadl_addr =
607  cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
608  ioasa64->u.gata.status = 0;
609  } else {
610  ioarcb->write_ioadl_addr =
611  cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
612  ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
613  ioasa->u.gata.status = 0;
614  }
615 
616  ioasa->hdr.ioasc = 0;
617  ioasa->hdr.residual_data_len = 0;
618  ipr_cmd->scsi_cmd = NULL;
619  ipr_cmd->qc = NULL;
620  ipr_cmd->sense_buffer[0] = 0;
621  ipr_cmd->dma_use_sg = 0;
622 }
623 
631 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
632  void (*fast_done) (struct ipr_cmnd *))
633 {
634  ipr_reinit_ipr_cmnd(ipr_cmd);
635  ipr_cmd->u.scratch = 0;
636  ipr_cmd->sibling = NULL;
637  ipr_cmd->fast_done = fast_done;
638  init_timer(&ipr_cmd->timer);
639 }
640 
648 static
649 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
650 {
651  struct ipr_cmnd *ipr_cmd;
652 
653  ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
654  list_del(&ipr_cmd->queue);
655 
656  return ipr_cmd;
657 }
658 
666 static
667 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
668 {
669  struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
670  ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
671  return ipr_cmd;
672 }
673 
685 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
686  u32 clr_ints)
687 {
688  volatile u32 int_reg;
689 
690  /* Stop new interrupts */
691  ioa_cfg->allow_interrupts = 0;
692 
693  /* Set interrupt mask to stop all new interrupts */
694  if (ioa_cfg->sis64)
695  writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
696  else
697  writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
698 
699  /* Clear any pending interrupts */
700  if (ioa_cfg->sis64)
701  writel(~0, ioa_cfg->regs.clr_interrupt_reg);
702  writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
703  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
704 }
705 
713 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
714 {
715  int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
716 
717  if (pcix_cmd_reg == 0)
718  return 0;
719 
720  if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
721  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
722  dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
723  return -EIO;
724  }
725 
727  return 0;
728 }
729 
737 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
738 {
739  int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
740 
741  if (pcix_cmd_reg) {
742  if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
743  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
744  dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
745  return -EIO;
746  }
747  }
748 
749  return 0;
750 }
751 
762 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
763 {
764  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
765  struct ata_queued_cmd *qc = ipr_cmd->qc;
766  struct ipr_sata_port *sata_port = qc->ap->private_data;
767 
768  qc->err_mask |= AC_ERR_OTHER;
769  sata_port->ioasa.status |= ATA_BUSY;
770  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
771  ata_qc_complete(qc);
772 }
773 
784 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
785 {
786  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
787  struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
788 
789  scsi_cmd->result |= (DID_ERROR << 16);
790 
791  scsi_dma_unmap(ipr_cmd->scsi_cmd);
792  scsi_cmd->scsi_done(scsi_cmd);
793  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
794 }
795 
805 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
806 {
807  struct ipr_cmnd *ipr_cmd, *temp;
808 
809  ENTER;
810  list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
811  list_del(&ipr_cmd->queue);
812 
813  ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
814  ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
815 
816  if (ipr_cmd->scsi_cmd)
817  ipr_cmd->done = ipr_scsi_eh_done;
818  else if (ipr_cmd->qc)
819  ipr_cmd->done = ipr_sata_eh_done;
820 
822  del_timer(&ipr_cmd->timer);
823  ipr_cmd->done(ipr_cmd);
824  }
825 
826  LEAVE;
827 }
828 
840 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
841 {
842  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
843  dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
844 
845  if (ioa_cfg->sis64) {
846  /* The default size is 256 bytes */
847  send_dma_addr |= 0x1;
848 
849  /* If the number of ioadls * size of ioadl > 128 bytes,
850  then use a 512 byte ioarcb */
851  if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
852  send_dma_addr |= 0x4;
853  writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
854  } else
855  writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
856 }
857 
871 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
872  void (*done) (struct ipr_cmnd *),
873  void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
874 {
875  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
876 
877  list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
878 
879  ipr_cmd->done = done;
880 
881  ipr_cmd->timer.data = (unsigned long) ipr_cmd;
882  ipr_cmd->timer.expires = jiffies + timeout;
883  ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
884 
885  add_timer(&ipr_cmd->timer);
886 
887  ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
888 
889  ipr_send_command(ipr_cmd);
890 }
891 
902 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
903 {
904  if (ipr_cmd->sibling)
905  ipr_cmd->sibling = NULL;
906  else
907  complete(&ipr_cmd->completion);
908 }
909 
923 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
924  u32 len, int flags)
925 {
926  struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
927  struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
928 
929  ipr_cmd->dma_use_sg = 1;
930 
931  if (ipr_cmd->ioa_cfg->sis64) {
932  ioadl64->flags = cpu_to_be32(flags);
933  ioadl64->data_len = cpu_to_be32(len);
934  ioadl64->address = cpu_to_be64(dma_addr);
935 
936  ipr_cmd->ioarcb.ioadl_len =
937  cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
938  ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
939  } else {
940  ioadl->flags_and_data_len = cpu_to_be32(flags | len);
941  ioadl->address = cpu_to_be32(dma_addr);
942 
943  if (flags == IPR_IOADL_FLAGS_READ_LAST) {
944  ipr_cmd->ioarcb.read_ioadl_len =
945  cpu_to_be32(sizeof(struct ipr_ioadl_desc));
946  ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
947  } else {
948  ipr_cmd->ioarcb.ioadl_len =
949  cpu_to_be32(sizeof(struct ipr_ioadl_desc));
950  ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
951  }
952  }
953 }
954 
964 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
965  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
966  u32 timeout)
967 {
968  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
969 
970  init_completion(&ipr_cmd->completion);
971  ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
972 
973  spin_unlock_irq(ioa_cfg->host->host_lock);
974  wait_for_completion(&ipr_cmd->completion);
975  spin_lock_irq(ioa_cfg->host->host_lock);
976 }
977 
991 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
992  struct ipr_hostrcb *hostrcb)
993 {
994  struct ipr_cmnd *ipr_cmd;
995  struct ipr_ioarcb *ioarcb;
996 
997  if (ioa_cfg->allow_cmds) {
998  ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
999  list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
1000  list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1001 
1002  ipr_cmd->u.hostrcb = hostrcb;
1003  ioarcb = &ipr_cmd->ioarcb;
1004 
1006  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1007  ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1008  ioarcb->cmd_pkt.cdb[1] = type;
1009  ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1010  ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1011 
1012  ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1013  sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1014 
1016  ipr_cmd->done = ipr_process_ccn;
1017  else
1018  ipr_cmd->done = ipr_process_error;
1019 
1021 
1022  ipr_send_command(ipr_cmd);
1023  } else {
1024  list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1025  }
1026 }
1027 
1036 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1037 {
1038  switch (proto) {
1039  case IPR_PROTO_SATA:
1040  case IPR_PROTO_SAS_STP:
1041  res->ata_class = ATA_DEV_ATA;
1042  break;
1043  case IPR_PROTO_SATA_ATAPI:
1045  res->ata_class = ATA_DEV_ATAPI;
1046  break;
1047  default:
1048  res->ata_class = ATA_DEV_UNKNOWN;
1049  break;
1050  };
1051 }
1052 
1061 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1062  struct ipr_config_table_entry_wrapper *cfgtew)
1063 {
1064  int found = 0;
1065  unsigned int proto;
1066  struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1067  struct ipr_resource_entry *gscsi_res = NULL;
1068 
1069  res->needs_sync_complete = 0;
1070  res->in_erp = 0;
1071  res->add_to_ml = 0;
1072  res->del_from_ml = 0;
1073  res->resetting_device = 0;
1074  res->sdev = NULL;
1075  res->sata_port = NULL;
1076 
1077  if (ioa_cfg->sis64) {
1078  proto = cfgtew->u.cfgte64->proto;
1079  res->res_flags = cfgtew->u.cfgte64->res_flags;
1080  res->qmodel = IPR_QUEUEING_MODEL64(res);
1081  res->type = cfgtew->u.cfgte64->res_type;
1082 
1083  memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1084  sizeof(res->res_path));
1085 
1086  res->bus = 0;
1087  memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1088  sizeof(res->dev_lun.scsi_lun));
1089  res->lun = scsilun_to_int(&res->dev_lun);
1090 
1091  if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1092  list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1093  if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1094  found = 1;
1095  res->target = gscsi_res->target;
1096  break;
1097  }
1098  }
1099  if (!found) {
1100  res->target = find_first_zero_bit(ioa_cfg->target_ids,
1101  ioa_cfg->max_devs_supported);
1102  set_bit(res->target, ioa_cfg->target_ids);
1103  }
1104  } else if (res->type == IPR_RES_TYPE_IOAFP) {
1105  res->bus = IPR_IOAFP_VIRTUAL_BUS;
1106  res->target = 0;
1107  } else if (res->type == IPR_RES_TYPE_ARRAY) {
1108  res->bus = IPR_ARRAY_VIRTUAL_BUS;
1109  res->target = find_first_zero_bit(ioa_cfg->array_ids,
1110  ioa_cfg->max_devs_supported);
1111  set_bit(res->target, ioa_cfg->array_ids);
1112  } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1113  res->bus = IPR_VSET_VIRTUAL_BUS;
1114  res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1115  ioa_cfg->max_devs_supported);
1116  set_bit(res->target, ioa_cfg->vset_ids);
1117  } else {
1118  res->target = find_first_zero_bit(ioa_cfg->target_ids,
1119  ioa_cfg->max_devs_supported);
1120  set_bit(res->target, ioa_cfg->target_ids);
1121  }
1122  } else {
1123  proto = cfgtew->u.cfgte->proto;
1124  res->qmodel = IPR_QUEUEING_MODEL(res);
1125  res->flags = cfgtew->u.cfgte->flags;
1126  if (res->flags & IPR_IS_IOA_RESOURCE)
1127  res->type = IPR_RES_TYPE_IOAFP;
1128  else
1129  res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1130 
1131  res->bus = cfgtew->u.cfgte->res_addr.bus;
1132  res->target = cfgtew->u.cfgte->res_addr.target;
1133  res->lun = cfgtew->u.cfgte->res_addr.lun;
1134  res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1135  }
1136 
1137  ipr_update_ata_class(res, proto);
1138 }
1139 
1148 static int ipr_is_same_device(struct ipr_resource_entry *res,
1149  struct ipr_config_table_entry_wrapper *cfgtew)
1150 {
1151  if (res->ioa_cfg->sis64) {
1152  if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1153  sizeof(cfgtew->u.cfgte64->dev_id)) &&
1154  !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1155  sizeof(cfgtew->u.cfgte64->lun))) {
1156  return 1;
1157  }
1158  } else {
1159  if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1160  res->target == cfgtew->u.cfgte->res_addr.target &&
1161  res->lun == cfgtew->u.cfgte->res_addr.lun)
1162  return 1;
1163  }
1164 
1165  return 0;
1166 }
1167 
1176 static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1177 {
1178  int i;
1179  char *p = buffer;
1180 
1181  *p = '\0';
1182  p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1183  for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1184  p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1185 
1186  return buffer;
1187 }
1188 
1197 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1198  struct ipr_config_table_entry_wrapper *cfgtew)
1199 {
1200  char buffer[IPR_MAX_RES_PATH_LENGTH];
1201  unsigned int proto;
1202  int new_path = 0;
1203 
1204  if (res->ioa_cfg->sis64) {
1205  res->flags = cfgtew->u.cfgte64->flags;
1206  res->res_flags = cfgtew->u.cfgte64->res_flags;
1207  res->type = cfgtew->u.cfgte64->res_type;
1208 
1209  memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1210  sizeof(struct ipr_std_inq_data));
1211 
1212  res->qmodel = IPR_QUEUEING_MODEL64(res);
1213  proto = cfgtew->u.cfgte64->proto;
1214  res->res_handle = cfgtew->u.cfgte64->res_handle;
1215  res->dev_id = cfgtew->u.cfgte64->dev_id;
1216 
1217  memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1218  sizeof(res->dev_lun.scsi_lun));
1219 
1220  if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1221  sizeof(res->res_path))) {
1222  memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1223  sizeof(res->res_path));
1224  new_path = 1;
1225  }
1226 
1227  if (res->sdev && new_path)
1228  sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1229  ipr_format_res_path(res->res_path, buffer,
1230  sizeof(buffer)));
1231  } else {
1232  res->flags = cfgtew->u.cfgte->flags;
1233  if (res->flags & IPR_IS_IOA_RESOURCE)
1234  res->type = IPR_RES_TYPE_IOAFP;
1235  else
1236  res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1237 
1238  memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1239  sizeof(struct ipr_std_inq_data));
1240 
1241  res->qmodel = IPR_QUEUEING_MODEL(res);
1242  proto = cfgtew->u.cfgte->proto;
1243  res->res_handle = cfgtew->u.cfgte->res_handle;
1244  }
1245 
1246  ipr_update_ata_class(res, proto);
1247 }
1248 
1258 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1259 {
1260  struct ipr_resource_entry *gscsi_res = NULL;
1261  struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1262 
1263  if (!ioa_cfg->sis64)
1264  return;
1265 
1266  if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1267  clear_bit(res->target, ioa_cfg->array_ids);
1268  else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1269  clear_bit(res->target, ioa_cfg->vset_ids);
1270  else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1271  list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1272  if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1273  return;
1274  clear_bit(res->target, ioa_cfg->target_ids);
1275 
1276  } else if (res->bus == 0)
1277  clear_bit(res->target, ioa_cfg->target_ids);
1278 }
1279 
1288 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1289  struct ipr_hostrcb *hostrcb)
1290 {
1291  struct ipr_resource_entry *res = NULL;
1292  struct ipr_config_table_entry_wrapper cfgtew;
1293  __be32 cc_res_handle;
1294 
1295  u32 is_ndn = 1;
1296 
1297  if (ioa_cfg->sis64) {
1298  cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1299  cc_res_handle = cfgtew.u.cfgte64->res_handle;
1300  } else {
1301  cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1302  cc_res_handle = cfgtew.u.cfgte->res_handle;
1303  }
1304 
1305  list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1306  if (res->res_handle == cc_res_handle) {
1307  is_ndn = 0;
1308  break;
1309  }
1310  }
1311 
1312  if (is_ndn) {
1313  if (list_empty(&ioa_cfg->free_res_q)) {
1314  ipr_send_hcam(ioa_cfg,
1316  hostrcb);
1317  return;
1318  }
1319 
1320  res = list_entry(ioa_cfg->free_res_q.next,
1321  struct ipr_resource_entry, queue);
1322 
1323  list_del(&res->queue);
1324  ipr_init_res_entry(res, &cfgtew);
1325  list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1326  }
1327 
1328  ipr_update_res_entry(res, &cfgtew);
1329 
1330  if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1331  if (res->sdev) {
1332  res->del_from_ml = 1;
1334  if (ioa_cfg->allow_ml_add_del)
1335  schedule_work(&ioa_cfg->work_q);
1336  } else {
1337  ipr_clear_res_target(res);
1338  list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1339  }
1340  } else if (!res->sdev || res->del_from_ml) {
1341  res->add_to_ml = 1;
1342  if (ioa_cfg->allow_ml_add_del)
1343  schedule_work(&ioa_cfg->work_q);
1344  }
1345 
1346  ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1347 }
1348 
1359 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1360 {
1361  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1362  struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1363  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1364 
1365  list_del(&hostrcb->queue);
1366  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1367 
1368  if (ioasc) {
1369  if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1370  dev_err(&ioa_cfg->pdev->dev,
1371  "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1372 
1373  ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1374  } else {
1375  ipr_handle_config_change(ioa_cfg, hostrcb);
1376  }
1377 }
1378 
1390 static int strip_and_pad_whitespace(int i, char *buf)
1391 {
1392  while (i && buf[i] == ' ')
1393  i--;
1394  buf[i+1] = ' ';
1395  buf[i+2] = '\0';
1396  return i + 2;
1397 }
1398 
1408 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1409  struct ipr_vpd *vpd)
1410 {
1412  int i = 0;
1413 
1414  memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1415  i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1416 
1417  memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1418  i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1419 
1420  memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1421  buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1422 
1423  ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1424 }
1425 
1433 static void ipr_log_vpd(struct ipr_vpd *vpd)
1434 {
1435  char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1436  + IPR_SERIAL_NUM_LEN];
1437 
1438  memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1439  memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1440  IPR_PROD_ID_LEN);
1441  buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1442  ipr_err("Vendor/Product ID: %s\n", buffer);
1443 
1444  memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1445  buffer[IPR_SERIAL_NUM_LEN] = '\0';
1446  ipr_err(" Serial Number: %s\n", buffer);
1447 }
1448 
1458 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1459  struct ipr_ext_vpd *vpd)
1460 {
1461  ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1462  ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1463  be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1464 }
1465 
1473 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1474 {
1475  ipr_log_vpd(&vpd->vpd);
1476  ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1477  be32_to_cpu(vpd->wwid[1]));
1478 }
1479 
1488 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1489  struct ipr_hostrcb *hostrcb)
1490 {
1492 
1493  if (ioa_cfg->sis64)
1494  error = &hostrcb->hcam.u.error64.u.type_12_error;
1495  else
1496  error = &hostrcb->hcam.u.error.u.type_12_error;
1497 
1498  ipr_err("-----Current Configuration-----\n");
1499  ipr_err("Cache Directory Card Information:\n");
1500  ipr_log_ext_vpd(&error->ioa_vpd);
1501  ipr_err("Adapter Card Information:\n");
1502  ipr_log_ext_vpd(&error->cfc_vpd);
1503 
1504  ipr_err("-----Expected Configuration-----\n");
1505  ipr_err("Cache Directory Card Information:\n");
1506  ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1507  ipr_err("Adapter Card Information:\n");
1508  ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1509 
1510  ipr_err("Additional IOA Data: %08X %08X %08X\n",
1511  be32_to_cpu(error->ioa_data[0]),
1512  be32_to_cpu(error->ioa_data[1]),
1513  be32_to_cpu(error->ioa_data[2]));
1514 }
1515 
1524 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1525  struct ipr_hostrcb *hostrcb)
1526 {
1527  struct ipr_hostrcb_type_02_error *error =
1528  &hostrcb->hcam.u.error.u.type_02_error;
1529 
1530  ipr_err("-----Current Configuration-----\n");
1531  ipr_err("Cache Directory Card Information:\n");
1532  ipr_log_vpd(&error->ioa_vpd);
1533  ipr_err("Adapter Card Information:\n");
1534  ipr_log_vpd(&error->cfc_vpd);
1535 
1536  ipr_err("-----Expected Configuration-----\n");
1537  ipr_err("Cache Directory Card Information:\n");
1538  ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1539  ipr_err("Adapter Card Information:\n");
1540  ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1541 
1542  ipr_err("Additional IOA Data: %08X %08X %08X\n",
1543  be32_to_cpu(error->ioa_data[0]),
1544  be32_to_cpu(error->ioa_data[1]),
1545  be32_to_cpu(error->ioa_data[2]));
1546 }
1547 
1556 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1557  struct ipr_hostrcb *hostrcb)
1558 {
1559  int errors_logged, i;
1560  struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1562 
1563  error = &hostrcb->hcam.u.error.u.type_13_error;
1564  errors_logged = be32_to_cpu(error->errors_logged);
1565 
1566  ipr_err("Device Errors Detected/Logged: %d/%d\n",
1567  be32_to_cpu(error->errors_detected), errors_logged);
1568 
1569  dev_entry = error->dev;
1570 
1571  for (i = 0; i < errors_logged; i++, dev_entry++) {
1573 
1574  ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1575  ipr_log_ext_vpd(&dev_entry->vpd);
1576 
1577  ipr_err("-----New Device Information-----\n");
1578  ipr_log_ext_vpd(&dev_entry->new_vpd);
1579 
1580  ipr_err("Cache Directory Card Information:\n");
1581  ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1582 
1583  ipr_err("Adapter Card Information:\n");
1584  ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1585  }
1586 }
1587 
1596 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1597  struct ipr_hostrcb *hostrcb)
1598 {
1599  int errors_logged, i;
1600  struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1602  char buffer[IPR_MAX_RES_PATH_LENGTH];
1603 
1604  error = &hostrcb->hcam.u.error64.u.type_23_error;
1605  errors_logged = be32_to_cpu(error->errors_logged);
1606 
1607  ipr_err("Device Errors Detected/Logged: %d/%d\n",
1608  be32_to_cpu(error->errors_detected), errors_logged);
1609 
1610  dev_entry = error->dev;
1611 
1612  for (i = 0; i < errors_logged; i++, dev_entry++) {
1614 
1615  ipr_err("Device %d : %s", i + 1,
1616  ipr_format_res_path(dev_entry->res_path, buffer,
1617  sizeof(buffer)));
1618  ipr_log_ext_vpd(&dev_entry->vpd);
1619 
1620  ipr_err("-----New Device Information-----\n");
1621  ipr_log_ext_vpd(&dev_entry->new_vpd);
1622 
1623  ipr_err("Cache Directory Card Information:\n");
1624  ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1625 
1626  ipr_err("Adapter Card Information:\n");
1627  ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1628  }
1629 }
1630 
1639 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1640  struct ipr_hostrcb *hostrcb)
1641 {
1642  int errors_logged, i;
1643  struct ipr_hostrcb_device_data_entry *dev_entry;
1645 
1646  error = &hostrcb->hcam.u.error.u.type_03_error;
1647  errors_logged = be32_to_cpu(error->errors_logged);
1648 
1649  ipr_err("Device Errors Detected/Logged: %d/%d\n",
1650  be32_to_cpu(error->errors_detected), errors_logged);
1651 
1652  dev_entry = error->dev;
1653 
1654  for (i = 0; i < errors_logged; i++, dev_entry++) {
1656 
1657  ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1658  ipr_log_vpd(&dev_entry->vpd);
1659 
1660  ipr_err("-----New Device Information-----\n");
1661  ipr_log_vpd(&dev_entry->new_vpd);
1662 
1663  ipr_err("Cache Directory Card Information:\n");
1664  ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1665 
1666  ipr_err("Adapter Card Information:\n");
1667  ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1668 
1669  ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1670  be32_to_cpu(dev_entry->ioa_data[0]),
1671  be32_to_cpu(dev_entry->ioa_data[1]),
1672  be32_to_cpu(dev_entry->ioa_data[2]),
1673  be32_to_cpu(dev_entry->ioa_data[3]),
1674  be32_to_cpu(dev_entry->ioa_data[4]));
1675  }
1676 }
1677 
1686 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1687  struct ipr_hostrcb *hostrcb)
1688 {
1689  int i, num_entries;
1691  struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1692  const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1693 
1694  error = &hostrcb->hcam.u.error.u.type_14_error;
1695 
1697 
1698  ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1699  error->protection_level,
1700  ioa_cfg->host->host_no,
1701  error->last_func_vset_res_addr.bus,
1702  error->last_func_vset_res_addr.target,
1703  error->last_func_vset_res_addr.lun);
1704 
1706 
1707  array_entry = error->array_member;
1708  num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1709  ARRAY_SIZE(error->array_member));
1710 
1711  for (i = 0; i < num_entries; i++, array_entry++) {
1712  if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1713  continue;
1714 
1715  if (be32_to_cpu(error->exposed_mode_adn) == i)
1716  ipr_err("Exposed Array Member %d:\n", i);
1717  else
1718  ipr_err("Array Member %d:\n", i);
1719 
1720  ipr_log_ext_vpd(&array_entry->vpd);
1721  ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1722  ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1723  "Expected Location");
1724 
1726  }
1727 }
1728 
1737 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1738  struct ipr_hostrcb *hostrcb)
1739 {
1740  int i;
1742  struct ipr_hostrcb_array_data_entry *array_entry;
1743  const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1744 
1745  error = &hostrcb->hcam.u.error.u.type_04_error;
1746 
1748 
1749  ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1750  error->protection_level,
1751  ioa_cfg->host->host_no,
1752  error->last_func_vset_res_addr.bus,
1753  error->last_func_vset_res_addr.target,
1754  error->last_func_vset_res_addr.lun);
1755 
1757 
1758  array_entry = error->array_member;
1759 
1760  for (i = 0; i < 18; i++) {
1761  if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1762  continue;
1763 
1764  if (be32_to_cpu(error->exposed_mode_adn) == i)
1765  ipr_err("Exposed Array Member %d:\n", i);
1766  else
1767  ipr_err("Array Member %d:\n", i);
1768 
1769  ipr_log_vpd(&array_entry->vpd);
1770 
1771  ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1772  ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1773  "Expected Location");
1774 
1776 
1777  if (i == 9)
1778  array_entry = error->array_member2;
1779  else
1780  array_entry++;
1781  }
1782 }
1783 
1793 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1794 {
1795  int i;
1796 
1797  if (len == 0)
1798  return;
1799 
1800  if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1801  len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1802 
1803  for (i = 0; i < len / 4; i += 4) {
1804  ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1805  be32_to_cpu(data[i]),
1806  be32_to_cpu(data[i+1]),
1807  be32_to_cpu(data[i+2]),
1808  be32_to_cpu(data[i+3]));
1809  }
1810 }
1811 
1820 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1821  struct ipr_hostrcb *hostrcb)
1822 {
1824 
1825  if (ioa_cfg->sis64)
1826  error = &hostrcb->hcam.u.error64.u.type_17_error;
1827  else
1828  error = &hostrcb->hcam.u.error.u.type_17_error;
1829 
1830  error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1831  strim(error->failure_reason);
1832 
1833  ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1834  be32_to_cpu(hostrcb->hcam.u.error.prc));
1835  ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1836  ipr_log_hex_data(ioa_cfg, error->data,
1837  be32_to_cpu(hostrcb->hcam.length) -
1838  (offsetof(struct ipr_hostrcb_error, u) +
1839  offsetof(struct ipr_hostrcb_type_17_error, data)));
1840 }
1841 
1850 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1851  struct ipr_hostrcb *hostrcb)
1852 {
1854 
1855  error = &hostrcb->hcam.u.error.u.type_07_error;
1856  error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1857  strim(error->failure_reason);
1858 
1859  ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1860  be32_to_cpu(hostrcb->hcam.u.error.prc));
1861  ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1862  ipr_log_hex_data(ioa_cfg, error->data,
1863  be32_to_cpu(hostrcb->hcam.length) -
1864  (offsetof(struct ipr_hostrcb_error, u) +
1865  offsetof(struct ipr_hostrcb_type_07_error, data)));
1866 }
1867 
1868 static const struct {
1870  char *desc;
1871 } path_active_desc[] = {
1872  { IPR_PATH_NO_INFO, "Path" },
1873  { IPR_PATH_ACTIVE, "Active path" },
1874  { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1875 };
1876 
1877 static const struct {
1879  char *desc;
1880 } path_state_desc[] = {
1881  { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1882  { IPR_PATH_HEALTHY, "is healthy" },
1883  { IPR_PATH_DEGRADED, "is degraded" },
1884  { IPR_PATH_FAILED, "is failed" }
1885 };
1886 
1895 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1896  struct ipr_hostrcb_fabric_desc *fabric)
1897 {
1898  int i, j;
1899  u8 path_state = fabric->path_state;
1900  u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1901  u8 state = path_state & IPR_PATH_STATE_MASK;
1902 
1903  for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1904  if (path_active_desc[i].active != active)
1905  continue;
1906 
1907  for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1908  if (path_state_desc[j].state != state)
1909  continue;
1910 
1911  if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1912  ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1913  path_active_desc[i].desc, path_state_desc[j].desc,
1914  fabric->ioa_port);
1915  } else if (fabric->cascaded_expander == 0xff) {
1916  ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1917  path_active_desc[i].desc, path_state_desc[j].desc,
1918  fabric->ioa_port, fabric->phy);
1919  } else if (fabric->phy == 0xff) {
1920  ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1921  path_active_desc[i].desc, path_state_desc[j].desc,
1922  fabric->ioa_port, fabric->cascaded_expander);
1923  } else {
1924  ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1925  path_active_desc[i].desc, path_state_desc[j].desc,
1926  fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1927  }
1928  return;
1929  }
1930  }
1931 
1932  ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1933  fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1934 }
1935 
1944 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1945  struct ipr_hostrcb64_fabric_desc *fabric)
1946 {
1947  int i, j;
1948  u8 path_state = fabric->path_state;
1949  u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1950  u8 state = path_state & IPR_PATH_STATE_MASK;
1951  char buffer[IPR_MAX_RES_PATH_LENGTH];
1952 
1953  for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1954  if (path_active_desc[i].active != active)
1955  continue;
1956 
1957  for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1958  if (path_state_desc[j].state != state)
1959  continue;
1960 
1961  ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1962  path_active_desc[i].desc, path_state_desc[j].desc,
1963  ipr_format_res_path(fabric->res_path, buffer,
1964  sizeof(buffer)));
1965  return;
1966  }
1967  }
1968 
1969  ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1970  ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1971 }
1972 
1973 static const struct {
1975  char *desc;
1976 } path_type_desc[] = {
1977  { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1978  { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1979  { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1980  { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1981 };
1982 
1983 static const struct {
1985  char *desc;
1986 } path_status_desc[] = {
1987  { IPR_PATH_CFG_NO_PROB, "Functional" },
1988  { IPR_PATH_CFG_DEGRADED, "Degraded" },
1989  { IPR_PATH_CFG_FAILED, "Failed" },
1990  { IPR_PATH_CFG_SUSPECT, "Suspect" },
1991  { IPR_PATH_NOT_DETECTED, "Missing" },
1992  { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1993 };
1994 
1995 static const char *link_rate[] = {
1996  "unknown",
1997  "disabled",
1998  "phy reset problem",
1999  "spinup hold",
2000  "port selector",
2001  "unknown",
2002  "unknown",
2003  "unknown",
2004  "1.5Gbps",
2005  "3.0Gbps",
2006  "unknown",
2007  "unknown",
2008  "unknown",
2009  "unknown",
2010  "unknown",
2011  "unknown"
2012 };
2013 
2022 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2024 {
2025  int i, j;
2026  u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2028 
2029  if (type == IPR_PATH_CFG_NOT_EXIST)
2030  return;
2031 
2032  for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2033  if (path_type_desc[i].type != type)
2034  continue;
2035 
2036  for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2037  if (path_status_desc[j].status != status)
2038  continue;
2039 
2040  if (type == IPR_PATH_CFG_IOA_PORT) {
2041  ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2042  path_status_desc[j].desc, path_type_desc[i].desc,
2043  cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2044  be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2045  } else {
2046  if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2047  ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2048  path_status_desc[j].desc, path_type_desc[i].desc,
2049  link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2050  be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2051  } else if (cfg->cascaded_expander == 0xff) {
2052  ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2053  "WWN=%08X%08X\n", path_status_desc[j].desc,
2054  path_type_desc[i].desc, cfg->phy,
2055  link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2056  be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2057  } else if (cfg->phy == 0xff) {
2058  ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2059  "WWN=%08X%08X\n", path_status_desc[j].desc,
2060  path_type_desc[i].desc, cfg->cascaded_expander,
2061  link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2062  be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2063  } else {
2064  ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2065  "WWN=%08X%08X\n", path_status_desc[j].desc,
2066  path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2067  link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2068  be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2069  }
2070  }
2071  return;
2072  }
2073  }
2074 
2075  ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2076  "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2077  link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078  be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079 }
2080 
2089 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2090  struct ipr_hostrcb64_config_element *cfg)
2091 {
2092  int i, j;
2094  u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2095  u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2096  char buffer[IPR_MAX_RES_PATH_LENGTH];
2097 
2098  if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2099  return;
2100 
2101  for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2102  if (path_type_desc[i].type != type)
2103  continue;
2104 
2105  for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2106  if (path_status_desc[j].status != status)
2107  continue;
2108 
2109  ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2110  path_status_desc[j].desc, path_type_desc[i].desc,
2111  ipr_format_res_path(cfg->res_path, buffer,
2112  sizeof(buffer)),
2113  link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2114  be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2115  return;
2116  }
2117  }
2118  ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2119  "WWN=%08X%08X\n", cfg->type_status,
2120  ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2121  link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2122  be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2123 }
2124 
2133 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2134  struct ipr_hostrcb *hostrcb)
2135 {
2137  struct ipr_hostrcb_fabric_desc *fabric;
2139  int i, add_len;
2140 
2141  error = &hostrcb->hcam.u.error.u.type_20_error;
2142  error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2143  ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2144 
2145  add_len = be32_to_cpu(hostrcb->hcam.length) -
2146  (offsetof(struct ipr_hostrcb_error, u) +
2147  offsetof(struct ipr_hostrcb_type_20_error, desc));
2148 
2149  for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2150  ipr_log_fabric_path(hostrcb, fabric);
2151  for_each_fabric_cfg(fabric, cfg)
2152  ipr_log_path_elem(hostrcb, cfg);
2153 
2154  add_len -= be16_to_cpu(fabric->length);
2155  fabric = (struct ipr_hostrcb_fabric_desc *)
2156  ((unsigned long)fabric + be16_to_cpu(fabric->length));
2157  }
2158 
2159  ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2160 }
2161 
2170 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2171  struct ipr_hostrcb *hostrcb)
2172 {
2173  int i, num_entries;
2175  struct ipr_hostrcb64_array_data_entry *array_entry;
2176  char buffer[IPR_MAX_RES_PATH_LENGTH];
2177  const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2178 
2179  error = &hostrcb->hcam.u.error64.u.type_24_error;
2180 
2182 
2183  ipr_err("RAID %s Array Configuration: %s\n",
2184  error->protection_level,
2185  ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2186 
2188 
2189  array_entry = error->array_member;
2190  num_entries = min_t(u32, error->num_entries,
2191  ARRAY_SIZE(error->array_member));
2192 
2193  for (i = 0; i < num_entries; i++, array_entry++) {
2194 
2195  if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2196  continue;
2197 
2198  if (error->exposed_mode_adn == i)
2199  ipr_err("Exposed Array Member %d:\n", i);
2200  else
2201  ipr_err("Array Member %d:\n", i);
2202 
2203  ipr_err("Array Member %d:\n", i);
2204  ipr_log_ext_vpd(&array_entry->vpd);
2205  ipr_err("Current Location: %s\n",
2206  ipr_format_res_path(array_entry->res_path, buffer,
2207  sizeof(buffer)));
2208  ipr_err("Expected Location: %s\n",
2209  ipr_format_res_path(array_entry->expected_res_path,
2210  buffer, sizeof(buffer)));
2211 
2213  }
2214 }
2215 
2224 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2225  struct ipr_hostrcb *hostrcb)
2226 {
2228  struct ipr_hostrcb64_fabric_desc *fabric;
2230  int i, add_len;
2231 
2232  error = &hostrcb->hcam.u.error64.u.type_30_error;
2233 
2234  error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2235  ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2236 
2237  add_len = be32_to_cpu(hostrcb->hcam.length) -
2238  (offsetof(struct ipr_hostrcb64_error, u) +
2239  offsetof(struct ipr_hostrcb_type_30_error, desc));
2240 
2241  for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2242  ipr_log64_fabric_path(hostrcb, fabric);
2243  for_each_fabric_cfg(fabric, cfg)
2244  ipr_log64_path_elem(hostrcb, cfg);
2245 
2246  add_len -= be16_to_cpu(fabric->length);
2247  fabric = (struct ipr_hostrcb64_fabric_desc *)
2248  ((unsigned long)fabric + be16_to_cpu(fabric->length));
2249  }
2250 
2251  ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2252 }
2253 
2262 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2263  struct ipr_hostrcb *hostrcb)
2264 {
2265  ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2266  be32_to_cpu(hostrcb->hcam.length));
2267 }
2268 
2280 static u32 ipr_get_error(u32 ioasc)
2281 {
2282  int i;
2283 
2284  for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2285  if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2286  return i;
2287 
2288  return 0;
2289 }
2290 
2301 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2302  struct ipr_hostrcb *hostrcb)
2303 {
2304  u32 ioasc;
2305  int error_index;
2306 
2307  if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2308  return;
2309 
2310  if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2311  dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2312 
2313  if (ioa_cfg->sis64)
2314  ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2315  else
2316  ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2317 
2318  if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2320  /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2321  scsi_report_bus_reset(ioa_cfg->host,
2322  hostrcb->hcam.u.error.fd_res_addr.bus);
2323  }
2324 
2325  error_index = ipr_get_error(ioasc);
2326 
2327  if (!ipr_error_table[error_index].log_hcam)
2328  return;
2329 
2330  ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2331 
2332  /* Set indication we have logged an error */
2333  ioa_cfg->errors_logged++;
2334 
2335  if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2336  return;
2337  if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2338  hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2339 
2340  switch (hostrcb->hcam.overlay_id) {
2342  ipr_log_cache_error(ioa_cfg, hostrcb);
2343  break;
2345  ipr_log_config_error(ioa_cfg, hostrcb);
2346  break;
2349  ipr_log_array_error(ioa_cfg, hostrcb);
2350  break;
2352  ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2353  break;
2355  ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2356  break;
2358  ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2359  break;
2362  ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2363  break;
2365  ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2366  break;
2368  ipr_log_fabric_error(ioa_cfg, hostrcb);
2369  break;
2371  ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2372  break;
2375  ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2376  break;
2378  ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2379  break;
2382  default:
2383  ipr_log_generic_error(ioa_cfg, hostrcb);
2384  break;
2385  }
2386 }
2387 
2399 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2400 {
2401  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2402  struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2403  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2404  u32 fd_ioasc;
2405 
2406  if (ioa_cfg->sis64)
2407  fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2408  else
2409  fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2410 
2411  list_del(&hostrcb->queue);
2412  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2413 
2414  if (!ioasc) {
2415  ipr_handle_log_data(ioa_cfg, hostrcb);
2416  if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2417  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2418  } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2419  dev_err(&ioa_cfg->pdev->dev,
2420  "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2421  }
2422 
2423  ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2424 }
2425 
2436 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2437 {
2438  unsigned long lock_flags = 0;
2439  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2440 
2441  ENTER;
2442  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443 
2444  ioa_cfg->errors_logged++;
2445  dev_err(&ioa_cfg->pdev->dev,
2446  "Adapter being reset due to command timeout.\n");
2447 
2448  if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2449  ioa_cfg->sdt_state = GET_DUMP;
2450 
2451  if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2452  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2453 
2454  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2455  LEAVE;
2456 }
2457 
2468 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2469 {
2470  unsigned long lock_flags = 0;
2471  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2472 
2473  ENTER;
2474  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2475 
2476  ioa_cfg->errors_logged++;
2477  dev_err(&ioa_cfg->pdev->dev,
2478  "Adapter timed out transitioning to operational.\n");
2479 
2480  if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2481  ioa_cfg->sdt_state = GET_DUMP;
2482 
2483  if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2484  if (ipr_fastfail)
2486  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2487  }
2488 
2489  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2490  LEAVE;
2491 }
2492 
2503 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2504  enum ipr_shutdown_type shutdown_type)
2505 {
2506  if (!ioa_cfg->in_reset_reload)
2507  ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2508 
2509  spin_unlock_irq(ioa_cfg->host->host_lock);
2510  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2511  spin_lock_irq(ioa_cfg->host->host_lock);
2512 
2513  /* If we got hit with a host reset while we were already resetting
2514  the adapter for some reason, and the reset failed. */
2515  if (ioa_cfg->ioa_is_dead) {
2516  ipr_trace;
2517  return FAILED;
2518  }
2519 
2520  return SUCCESS;
2521 }
2522 
2530 static const struct ipr_ses_table_entry *
2531 ipr_find_ses_entry(struct ipr_resource_entry *res)
2532 {
2533  int i, j, matches;
2534  struct ipr_std_inq_vpids *vpids;
2535  const struct ipr_ses_table_entry *ste = ipr_ses_table;
2536 
2537  for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2538  for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2539  if (ste->compare_product_id_byte[j] == 'X') {
2540  vpids = &res->std_inq_data.vpids;
2541  if (vpids->product_id[j] == ste->product_id[j])
2542  matches++;
2543  else
2544  break;
2545  } else
2546  matches++;
2547  }
2548 
2549  if (matches == IPR_PROD_ID_LEN)
2550  return ste;
2551  }
2552 
2553  return NULL;
2554 }
2555 
2568 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2569 {
2570  struct ipr_resource_entry *res;
2571  const struct ipr_ses_table_entry *ste;
2572  u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2573 
2574  /* Loop through each config table entry in the config table buffer */
2575  list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2576  if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2577  continue;
2578 
2579  if (bus != res->bus)
2580  continue;
2581 
2582  if (!(ste = ipr_find_ses_entry(res)))
2583  continue;
2584 
2585  max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2586  }
2587 
2588  return max_xfer_rate;
2589 }
2590 
2601 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2602 {
2603  volatile u32 pcii_reg;
2604  int delay = 1;
2605 
2606  /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2607  while (delay < max_delay) {
2608  pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2609 
2610  if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2611  return 0;
2612 
2613  /* udelay cannot be used if delay is more than a few milliseconds */
2614  if ((delay / 1000) > MAX_UDELAY_MS)
2615  mdelay(delay / 1000);
2616  else
2617  udelay(delay);
2618 
2619  delay += delay;
2620  }
2621  return -EIO;
2622 }
2623 
2634 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2635  u32 start_addr,
2636  __be32 *dest, u32 length_in_words)
2637 {
2638  int i;
2639 
2640  for (i = 0; i < length_in_words; i++) {
2641  writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2642  *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2643  dest++;
2644  }
2645 
2646  return 0;
2647 }
2648 
2659 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2660  u32 start_addr,
2661  __be32 *dest, u32 length_in_words)
2662 {
2663  volatile u32 temp_pcii_reg;
2664  int i, delay = 0;
2665 
2666  if (ioa_cfg->sis64)
2667  return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2668  dest, length_in_words);
2669 
2670  /* Write IOA interrupt reg starting LDUMP state */
2672  ioa_cfg->regs.set_uproc_interrupt_reg32);
2673 
2674  /* Wait for IO debug acknowledge */
2675  if (ipr_wait_iodbg_ack(ioa_cfg,
2677  dev_err(&ioa_cfg->pdev->dev,
2678  "IOA dump long data transfer timeout\n");
2679  return -EIO;
2680  }
2681 
2682  /* Signal LDUMP interlocked - clear IO debug ack */
2683  writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2684  ioa_cfg->regs.clr_interrupt_reg);
2685 
2686  /* Write Mailbox with starting address */
2687  writel(start_addr, ioa_cfg->ioa_mailbox);
2688 
2689  /* Signal address valid - clear IOA Reset alert */
2691  ioa_cfg->regs.clr_uproc_interrupt_reg32);
2692 
2693  for (i = 0; i < length_in_words; i++) {
2694  /* Wait for IO debug acknowledge */
2695  if (ipr_wait_iodbg_ack(ioa_cfg,
2697  dev_err(&ioa_cfg->pdev->dev,
2698  "IOA dump short data transfer timeout\n");
2699  return -EIO;
2700  }
2701 
2702  /* Read data from mailbox and increment destination pointer */
2703  *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2704  dest++;
2705 
2706  /* For all but the last word of data, signal data received */
2707  if (i < (length_in_words - 1)) {
2708  /* Signal dump data received - Clear IO debug Ack */
2709  writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2710  ioa_cfg->regs.clr_interrupt_reg);
2711  }
2712  }
2713 
2714  /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2716  ioa_cfg->regs.set_uproc_interrupt_reg32);
2717 
2719  ioa_cfg->regs.clr_uproc_interrupt_reg32);
2720 
2721  /* Signal dump data received - Clear IO debug Ack */
2722  writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2723  ioa_cfg->regs.clr_interrupt_reg);
2724 
2725  /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2726  while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2727  temp_pcii_reg =
2728  readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2729 
2730  if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2731  return 0;
2732 
2733  udelay(10);
2734  delay += 10;
2735  }
2736 
2737  return 0;
2738 }
2739 
2740 #ifdef CONFIG_SCSI_IPR_DUMP
2741 
2752 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2753  unsigned long pci_address, u32 length)
2754 {
2755  int bytes_copied = 0;
2756  int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2757  __be32 *page;
2758  unsigned long lock_flags = 0;
2759  struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2760 
2761  if (ioa_cfg->sis64)
2762  max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2763  else
2764  max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2765 
2766  while (bytes_copied < length &&
2767  (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2768  if (ioa_dump->page_offset >= PAGE_SIZE ||
2769  ioa_dump->page_offset == 0) {
2770  page = (__be32 *)__get_free_page(GFP_ATOMIC);
2771 
2772  if (!page) {
2773  ipr_trace;
2774  return bytes_copied;
2775  }
2776 
2777  ioa_dump->page_offset = 0;
2778  ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2779  ioa_dump->next_page_index++;
2780  } else
2781  page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2782 
2783  rem_len = length - bytes_copied;
2784  rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2785  cur_len = min(rem_len, rem_page_len);
2786 
2787  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2788  if (ioa_cfg->sdt_state == ABORT_DUMP) {
2789  rc = -EIO;
2790  } else {
2791  rc = ipr_get_ldump_data_section(ioa_cfg,
2792  pci_address + bytes_copied,
2793  &page[ioa_dump->page_offset / 4],
2794  (cur_len / sizeof(u32)));
2795  }
2796  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2797 
2798  if (!rc) {
2799  ioa_dump->page_offset += cur_len;
2800  bytes_copied += cur_len;
2801  } else {
2802  ipr_trace;
2803  break;
2804  }
2805  schedule();
2806  }
2807 
2808  return bytes_copied;
2809 }
2810 
2818 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2819 {
2821  hdr->num_elems = 1;
2822  hdr->offset = sizeof(*hdr);
2824 }
2825 
2834 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2835  struct ipr_driver_dump *driver_dump)
2836 {
2837  struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2838 
2839  ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2840  driver_dump->ioa_type_entry.hdr.len =
2841  sizeof(struct ipr_dump_ioa_type_entry) -
2843  driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2844  driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2845  driver_dump->ioa_type_entry.type = ioa_cfg->type;
2846  driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2847  (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2848  ucode_vpd->minor_release[1];
2849  driver_dump->hdr.num_entries++;
2850 }
2851 
2860 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2861  struct ipr_driver_dump *driver_dump)
2862 {
2863  ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2864  driver_dump->version_entry.hdr.len =
2865  sizeof(struct ipr_dump_version_entry) -
2867  driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2868  driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2869  strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2870  driver_dump->hdr.num_entries++;
2871 }
2872 
2881 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2882  struct ipr_driver_dump *driver_dump)
2883 {
2884  ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2885  driver_dump->trace_entry.hdr.len =
2886  sizeof(struct ipr_dump_trace_entry) -
2888  driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2889  driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2890  memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2891  driver_dump->hdr.num_entries++;
2892 }
2893 
2902 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2903  struct ipr_driver_dump *driver_dump)
2904 {
2905  ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2906  driver_dump->location_entry.hdr.len =
2907  sizeof(struct ipr_dump_location_entry) -
2909  driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2910  driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2911  strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2912  driver_dump->hdr.num_entries++;
2913 }
2914 
2923 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2924 {
2925  unsigned long start_addr, sdt_word;
2926  unsigned long lock_flags = 0;
2927  struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2928  struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2929  u32 num_entries, max_num_entries, start_off, end_off;
2930  u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2931  struct ipr_sdt *sdt;
2932  int valid = 1;
2933  int i;
2934 
2935  ENTER;
2936 
2937  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2938 
2939  if (ioa_cfg->sdt_state != READ_DUMP) {
2940  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2941  return;
2942  }
2943 
2944  if (ioa_cfg->sis64) {
2945  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2946  ssleep(IPR_DUMP_DELAY_SECONDS);
2947  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2948  }
2949 
2950  start_addr = readl(ioa_cfg->ioa_mailbox);
2951 
2952  if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2953  dev_err(&ioa_cfg->pdev->dev,
2954  "Invalid dump table format: %lx\n", start_addr);
2955  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2956  return;
2957  }
2958 
2959  dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2960 
2961  driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2962 
2963  /* Initialize the overall dump header */
2964  driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2965  driver_dump->hdr.num_entries = 1;
2966  driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2967  driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2968  driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2969  driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2970 
2971  ipr_dump_version_data(ioa_cfg, driver_dump);
2972  ipr_dump_location_data(ioa_cfg, driver_dump);
2973  ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2974  ipr_dump_trace_data(ioa_cfg, driver_dump);
2975 
2976  /* Update dump_header */
2977  driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2978 
2979  /* IOA Dump entry */
2980  ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2981  ioa_dump->hdr.len = 0;
2982  ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2983  ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2984 
2985  /* First entries in sdt are actually a list of dump addresses and
2986  lengths to gather the real dump data. sdt represents the pointer
2987  to the ioa generated dump table. Dump data will be extracted based
2988  on entries in this table */
2989  sdt = &ioa_dump->sdt;
2990 
2991  if (ioa_cfg->sis64) {
2992  max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2993  max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2994  } else {
2995  max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2996  max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2997  }
2998 
2999  bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3000  (max_num_entries * sizeof(struct ipr_sdt_entry));
3001  rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3002  bytes_to_copy / sizeof(__be32));
3003 
3004  /* Smart Dump table is ready to use and the first entry is valid */
3005  if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3006  (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3007  dev_err(&ioa_cfg->pdev->dev,
3008  "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3009  rc, be32_to_cpu(sdt->hdr.state));
3010  driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3011  ioa_cfg->sdt_state = DUMP_OBTAINED;
3012  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3013  return;
3014  }
3015 
3016  num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3017 
3018  if (num_entries > max_num_entries)
3019  num_entries = max_num_entries;
3020 
3021  /* Update dump length to the actual data to be copied */
3022  dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3023  if (ioa_cfg->sis64)
3024  dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3025  else
3026  dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3027 
3028  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3029 
3030  for (i = 0; i < num_entries; i++) {
3031  if (ioa_dump->hdr.len > max_dump_size) {
3032  driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3033  break;
3034  }
3035 
3036  if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3037  sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3038  if (ioa_cfg->sis64)
3039  bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3040  else {
3041  start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3042  end_off = be32_to_cpu(sdt->entry[i].end_token);
3043 
3044  if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3045  bytes_to_copy = end_off - start_off;
3046  else
3047  valid = 0;
3048  }
3049  if (valid) {
3050  if (bytes_to_copy > max_dump_size) {
3051  sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3052  continue;
3053  }
3054 
3055  /* Copy data from adapter to driver buffers */
3056  bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3057  bytes_to_copy);
3058 
3059  ioa_dump->hdr.len += bytes_copied;
3060 
3061  if (bytes_copied != bytes_to_copy) {
3062  driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3063  break;
3064  }
3065  }
3066  }
3067  }
3068 
3069  dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3070 
3071  /* Update dump_header */
3072  driver_dump->hdr.len += ioa_dump->hdr.len;
3073  wmb();
3074  ioa_cfg->sdt_state = DUMP_OBTAINED;
3075  LEAVE;
3076 }
3077 
3078 #else
3079 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3080 #endif
3081 
3089 static void ipr_release_dump(struct kref *kref)
3090 {
3091  struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3092  struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3093  unsigned long lock_flags = 0;
3094  int i;
3095 
3096  ENTER;
3097  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3098  ioa_cfg->dump = NULL;
3099  ioa_cfg->sdt_state = INACTIVE;
3100  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3101 
3102  for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3103  free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3104 
3105  vfree(dump->ioa_dump.ioa_data);
3106  kfree(dump);
3107  LEAVE;
3108 }
3109 
3121 static void ipr_worker_thread(struct work_struct *work)
3122 {
3123  unsigned long lock_flags;
3124  struct ipr_resource_entry *res;
3125  struct scsi_device *sdev;
3126  struct ipr_dump *dump;
3127  struct ipr_ioa_cfg *ioa_cfg =
3128  container_of(work, struct ipr_ioa_cfg, work_q);
3129  u8 bus, target, lun;
3130  int did_work;
3131 
3132  ENTER;
3133  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3134 
3135  if (ioa_cfg->sdt_state == READ_DUMP) {
3136  dump = ioa_cfg->dump;
3137  if (!dump) {
3138  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3139  return;
3140  }
3141  kref_get(&dump->kref);
3142  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143  ipr_get_ioa_dump(ioa_cfg, dump);
3144  kref_put(&dump->kref, ipr_release_dump);
3145 
3146  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3147  if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3148  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3149  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150  return;
3151  }
3152 
3153 restart:
3154  do {
3155  did_work = 0;
3156  if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3157  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158  return;
3159  }
3160 
3161  list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3162  if (res->del_from_ml && res->sdev) {
3163  did_work = 1;
3164  sdev = res->sdev;
3165  if (!scsi_device_get(sdev)) {
3166  if (!res->add_to_ml)
3167  list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3168  else
3169  res->del_from_ml = 0;
3170  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3171  scsi_remove_device(sdev);
3172  scsi_device_put(sdev);
3173  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3174  }
3175  break;
3176  }
3177  }
3178  } while (did_work);
3179 
3180  list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3181  if (res->add_to_ml) {
3182  bus = res->bus;
3183  target = res->target;
3184  lun = res->lun;
3185  res->add_to_ml = 0;
3186  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3187  scsi_add_device(ioa_cfg->host, bus, target, lun);
3188  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189  goto restart;
3190  }
3191  }
3192 
3193  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194  kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3195  LEAVE;
3196 }
3197 
3198 #ifdef CONFIG_SCSI_IPR_TRACE
3199 
3211 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3212  struct bin_attribute *bin_attr,
3213  char *buf, loff_t off, size_t count)
3214 {
3215  struct device *dev = container_of(kobj, struct device, kobj);
3216  struct Scsi_Host *shost = class_to_shost(dev);
3217  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3218  unsigned long lock_flags = 0;
3219  ssize_t ret;
3220 
3221  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222  ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3223  IPR_TRACE_SIZE);
3224  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3225 
3226  return ret;
3227 }
3228 
3229 static struct bin_attribute ipr_trace_attr = {
3230  .attr = {
3231  .name = "trace",
3232  .mode = S_IRUGO,
3233  },
3234  .size = 0,
3235  .read = ipr_read_trace,
3236 };
3237 #endif
3238 
3247 static ssize_t ipr_show_fw_version(struct device *dev,
3248  struct device_attribute *attr, char *buf)
3249 {
3250  struct Scsi_Host *shost = class_to_shost(dev);
3251  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3252  struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3253  unsigned long lock_flags = 0;
3254  int len;
3255 
3256  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3257  len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3258  ucode_vpd->major_release, ucode_vpd->card_type,
3259  ucode_vpd->minor_release[0],
3260  ucode_vpd->minor_release[1]);
3261  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262  return len;
3263 }
3264 
3265 static struct device_attribute ipr_fw_version_attr = {
3266  .attr = {
3267  .name = "fw_version",
3268  .mode = S_IRUGO,
3269  },
3270  .show = ipr_show_fw_version,
3271 };
3272 
3281 static ssize_t ipr_show_log_level(struct device *dev,
3282  struct device_attribute *attr, char *buf)
3283 {
3284  struct Scsi_Host *shost = class_to_shost(dev);
3285  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3286  unsigned long lock_flags = 0;
3287  int len;
3288 
3289  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3290  len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3291  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292  return len;
3293 }
3294 
3303 static ssize_t ipr_store_log_level(struct device *dev,
3304  struct device_attribute *attr,
3305  const char *buf, size_t count)
3306 {
3307  struct Scsi_Host *shost = class_to_shost(dev);
3308  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3309  unsigned long lock_flags = 0;
3310 
3311  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3312  ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3313  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3314  return strlen(buf);
3315 }
3316 
3317 static struct device_attribute ipr_log_level_attr = {
3318  .attr = {
3319  .name = "log_level",
3320  .mode = S_IRUGO | S_IWUSR,
3321  },
3322  .show = ipr_show_log_level,
3323  .store = ipr_store_log_level
3324 };
3325 
3338 static ssize_t ipr_store_diagnostics(struct device *dev,
3339  struct device_attribute *attr,
3340  const char *buf, size_t count)
3341 {
3342  struct Scsi_Host *shost = class_to_shost(dev);
3343  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3344  unsigned long lock_flags = 0;
3345  int rc = count;
3346 
3348  return -EACCES;
3349 
3350  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3351  while (ioa_cfg->in_reset_reload) {
3352  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3353  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3354  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3355  }
3356 
3357  ioa_cfg->errors_logged = 0;
3358  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3359 
3360  if (ioa_cfg->in_reset_reload) {
3361  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3362  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3363 
3364  /* Wait for a second for any errors to be logged */
3365  msleep(1000);
3366  } else {
3367  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3368  return -EIO;
3369  }
3370 
3371  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372  if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3373  rc = -EIO;
3374  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3375 
3376  return rc;
3377 }
3378 
3379 static struct device_attribute ipr_diagnostics_attr = {
3380  .attr = {
3381  .name = "run_diagnostics",
3382  .mode = S_IWUSR,
3383  },
3384  .store = ipr_store_diagnostics
3385 };
3386 
3395 static ssize_t ipr_show_adapter_state(struct device *dev,
3396  struct device_attribute *attr, char *buf)
3397 {
3398  struct Scsi_Host *shost = class_to_shost(dev);
3399  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3400  unsigned long lock_flags = 0;
3401  int len;
3402 
3403  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404  if (ioa_cfg->ioa_is_dead)
3405  len = snprintf(buf, PAGE_SIZE, "offline\n");
3406  else
3407  len = snprintf(buf, PAGE_SIZE, "online\n");
3408  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3409  return len;
3410 }
3411 
3423 static ssize_t ipr_store_adapter_state(struct device *dev,
3424  struct device_attribute *attr,
3425  const char *buf, size_t count)
3426 {
3427  struct Scsi_Host *shost = class_to_shost(dev);
3428  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3429  unsigned long lock_flags;
3430  int result = count;
3431 
3433  return -EACCES;
3434 
3435  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3436  if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3437  ioa_cfg->ioa_is_dead = 0;
3438  ioa_cfg->reset_retries = 0;
3439  ioa_cfg->in_ioa_bringdown = 0;
3440  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3441  }
3442  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3443  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3444 
3445  return result;
3446 }
3447 
3448 static struct device_attribute ipr_ioa_state_attr = {
3449  .attr = {
3450  .name = "online_state",
3451  .mode = S_IRUGO | S_IWUSR,
3452  },
3453  .show = ipr_show_adapter_state,
3454  .store = ipr_store_adapter_state
3455 };
3456 
3468 static ssize_t ipr_store_reset_adapter(struct device *dev,
3469  struct device_attribute *attr,
3470  const char *buf, size_t count)
3471 {
3472  struct Scsi_Host *shost = class_to_shost(dev);
3473  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3474  unsigned long lock_flags;
3475  int result = count;
3476 
3478  return -EACCES;
3479 
3480  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3481  if (!ioa_cfg->in_reset_reload)
3482  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3483  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3485 
3486  return result;
3487 }
3488 
3489 static struct device_attribute ipr_ioa_reset_attr = {
3490  .attr = {
3491  .name = "reset_host",
3492  .mode = S_IWUSR,
3493  },
3494  .store = ipr_store_reset_adapter
3495 };
3496 
3507 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3508 {
3509  int sg_size, order, bsize_elem, num_elem, i, j;
3510  struct ipr_sglist *sglist;
3511  struct scatterlist *scatterlist;
3512  struct page *page;
3513 
3514  /* Get the minimum size per scatter/gather element */
3515  sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3516 
3517  /* Get the actual size per element */
3518  order = get_order(sg_size);
3519 
3520  /* Determine the actual number of bytes per element */
3521  bsize_elem = PAGE_SIZE * (1 << order);
3522 
3523  /* Determine the actual number of sg entries needed */
3524  if (buf_len % bsize_elem)
3525  num_elem = (buf_len / bsize_elem) + 1;
3526  else
3527  num_elem = buf_len / bsize_elem;
3528 
3529  /* Allocate a scatter/gather list for the DMA */
3530  sglist = kzalloc(sizeof(struct ipr_sglist) +
3531  (sizeof(struct scatterlist) * (num_elem - 1)),
3532  GFP_KERNEL);
3533 
3534  if (sglist == NULL) {
3535  ipr_trace;
3536  return NULL;
3537  }
3538 
3539  scatterlist = sglist->scatterlist;
3540  sg_init_table(scatterlist, num_elem);
3541 
3542  sglist->order = order;
3543  sglist->num_sg = num_elem;
3544 
3545  /* Allocate a bunch of sg elements */
3546  for (i = 0; i < num_elem; i++) {
3547  page = alloc_pages(GFP_KERNEL, order);
3548  if (!page) {
3549  ipr_trace;
3550 
3551  /* Free up what we already allocated */
3552  for (j = i - 1; j >= 0; j--)
3553  __free_pages(sg_page(&scatterlist[j]), order);
3554  kfree(sglist);
3555  return NULL;
3556  }
3557 
3558  sg_set_page(&scatterlist[i], page, 0, 0);
3559  }
3560 
3561  return sglist;
3562 }
3563 
3574 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3575 {
3576  int i;
3577 
3578  for (i = 0; i < sglist->num_sg; i++)
3579  __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3580 
3581  kfree(sglist);
3582 }
3583 
3596 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3597  u8 *buffer, u32 len)
3598 {
3599  int bsize_elem, i, result = 0;
3600  struct scatterlist *scatterlist;
3601  void *kaddr;
3602 
3603  /* Determine the actual number of bytes per element */
3604  bsize_elem = PAGE_SIZE * (1 << sglist->order);
3605 
3606  scatterlist = sglist->scatterlist;
3607 
3608  for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3609  struct page *page = sg_page(&scatterlist[i]);
3610 
3611  kaddr = kmap(page);
3612  memcpy(kaddr, buffer, bsize_elem);
3613  kunmap(page);
3614 
3615  scatterlist[i].length = bsize_elem;
3616 
3617  if (result != 0) {
3618  ipr_trace;
3619  return result;
3620  }
3621  }
3622 
3623  if (len % bsize_elem) {
3624  struct page *page = sg_page(&scatterlist[i]);
3625 
3626  kaddr = kmap(page);
3627  memcpy(kaddr, buffer, len % bsize_elem);
3628  kunmap(page);
3629 
3630  scatterlist[i].length = len % bsize_elem;
3631  }
3632 
3633  sglist->buffer_len = len;
3634  return result;
3635 }
3636 
3645 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3646  struct ipr_sglist *sglist)
3647 {
3648  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3649  struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3650  struct scatterlist *scatterlist = sglist->scatterlist;
3651  int i;
3652 
3653  ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3654  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3655  ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3656 
3657  ioarcb->ioadl_len =
3658  cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3659  for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3661  ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3662  ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3663  }
3664 
3665  ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3666 }
3667 
3676 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3677  struct ipr_sglist *sglist)
3678 {
3679  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3680  struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3681  struct scatterlist *scatterlist = sglist->scatterlist;
3682  int i;
3683 
3684  ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3685  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3686  ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3687 
3688  ioarcb->ioadl_len =
3689  cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3690 
3691  for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3692  ioadl[i].flags_and_data_len =
3693  cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3694  ioadl[i].address =
3695  cpu_to_be32(sg_dma_address(&scatterlist[i]));
3696  }
3697 
3698  ioadl[i-1].flags_and_data_len |=
3700 }
3701 
3712 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3713  struct ipr_sglist *sglist)
3714 {
3715  unsigned long lock_flags;
3716 
3717  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3718  while (ioa_cfg->in_reset_reload) {
3719  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3720  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3721  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3722  }
3723 
3724  if (ioa_cfg->ucode_sglist) {
3725  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3726  dev_err(&ioa_cfg->pdev->dev,
3727  "Microcode download already in progress\n");
3728  return -EIO;
3729  }
3730 
3731  sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3732  sglist->num_sg, DMA_TO_DEVICE);
3733 
3734  if (!sglist->num_dma_sg) {
3735  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3736  dev_err(&ioa_cfg->pdev->dev,
3737  "Failed to map microcode download buffer!\n");
3738  return -EIO;
3739  }
3740 
3741  ioa_cfg->ucode_sglist = sglist;
3742  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3743  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3744  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3745 
3746  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3747  ioa_cfg->ucode_sglist = NULL;
3748  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3749  return 0;
3750 }
3751 
3763 static ssize_t ipr_store_update_fw(struct device *dev,
3764  struct device_attribute *attr,
3765  const char *buf, size_t count)
3766 {
3767  struct Scsi_Host *shost = class_to_shost(dev);
3768  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3770  const struct firmware *fw_entry;
3771  struct ipr_sglist *sglist;
3772  char fname[100];
3773  char *src;
3774  int len, result, dnld_size;
3775 
3777  return -EACCES;
3778 
3779  len = snprintf(fname, 99, "%s", buf);
3780  fname[len-1] = '\0';
3781 
3782  if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3783  dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3784  return -EIO;
3785  }
3786 
3787  image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3788 
3789  src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3790  dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3791  sglist = ipr_alloc_ucode_buffer(dnld_size);
3792 
3793  if (!sglist) {
3794  dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3795  release_firmware(fw_entry);
3796  return -ENOMEM;
3797  }
3798 
3799  result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3800 
3801  if (result) {
3802  dev_err(&ioa_cfg->pdev->dev,
3803  "Microcode buffer copy to DMA buffer failed\n");
3804  goto out;
3805  }
3806 
3807  ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3808 
3809  result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3810 
3811  if (!result)
3812  result = count;
3813 out:
3814  ipr_free_ucode_buffer(sglist);
3815  release_firmware(fw_entry);
3816  return result;
3817 }
3818 
3819 static struct device_attribute ipr_update_fw_attr = {
3820  .attr = {
3821  .name = "update_fw",
3822  .mode = S_IWUSR,
3823  },
3824  .store = ipr_store_update_fw
3825 };
3826 
3835 static ssize_t ipr_show_fw_type(struct device *dev,
3836  struct device_attribute *attr, char *buf)
3837 {
3838  struct Scsi_Host *shost = class_to_shost(dev);
3839  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3840  unsigned long lock_flags = 0;
3841  int len;
3842 
3843  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3844  len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3845  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3846  return len;
3847 }
3848 
3849 static struct device_attribute ipr_ioa_fw_type_attr = {
3850  .attr = {
3851  .name = "fw_type",
3852  .mode = S_IRUGO,
3853  },
3854  .show = ipr_show_fw_type
3855 };
3856 
3857 static struct device_attribute *ipr_ioa_attrs[] = {
3858  &ipr_fw_version_attr,
3859  &ipr_log_level_attr,
3860  &ipr_diagnostics_attr,
3861  &ipr_ioa_state_attr,
3862  &ipr_ioa_reset_attr,
3863  &ipr_update_fw_attr,
3864  &ipr_ioa_fw_type_attr,
3865  NULL,
3866 };
3867 
3868 #ifdef CONFIG_SCSI_IPR_DUMP
3869 
3881 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3882  struct bin_attribute *bin_attr,
3883  char *buf, loff_t off, size_t count)
3884 {
3885  struct device *cdev = container_of(kobj, struct device, kobj);
3886  struct Scsi_Host *shost = class_to_shost(cdev);
3887  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3888  struct ipr_dump *dump;
3889  unsigned long lock_flags = 0;
3890  char *src;
3891  int len, sdt_end;
3892  size_t rc = count;
3893 
3895  return -EACCES;
3896 
3897  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3898  dump = ioa_cfg->dump;
3899 
3900  if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3901  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3902  return 0;
3903  }
3904  kref_get(&dump->kref);
3905  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3906 
3907  if (off > dump->driver_dump.hdr.len) {
3908  kref_put(&dump->kref, ipr_release_dump);
3909  return 0;
3910  }
3911 
3912  if (off + count > dump->driver_dump.hdr.len) {
3913  count = dump->driver_dump.hdr.len - off;
3914  rc = count;
3915  }
3916 
3917  if (count && off < sizeof(dump->driver_dump)) {
3918  if (off + count > sizeof(dump->driver_dump))
3919  len = sizeof(dump->driver_dump) - off;
3920  else
3921  len = count;
3922  src = (u8 *)&dump->driver_dump + off;
3923  memcpy(buf, src, len);
3924  buf += len;
3925  off += len;
3926  count -= len;
3927  }
3928 
3929  off -= sizeof(dump->driver_dump);
3930 
3931  if (ioa_cfg->sis64)
3932  sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3933  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3934  sizeof(struct ipr_sdt_entry));
3935  else
3936  sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3937  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3938 
3939  if (count && off < sdt_end) {
3940  if (off + count > sdt_end)
3941  len = sdt_end - off;
3942  else
3943  len = count;
3944  src = (u8 *)&dump->ioa_dump + off;
3945  memcpy(buf, src, len);
3946  buf += len;
3947  off += len;
3948  count -= len;
3949  }
3950 
3951  off -= sdt_end;
3952 
3953  while (count) {
3954  if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3955  len = PAGE_ALIGN(off) - off;
3956  else
3957  len = count;
3958  src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3959  src += off & ~PAGE_MASK;
3960  memcpy(buf, src, len);
3961  buf += len;
3962  off += len;
3963  count -= len;
3964  }
3965 
3966  kref_put(&dump->kref, ipr_release_dump);
3967  return rc;
3968 }
3969 
3977 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3978 {
3979  struct ipr_dump *dump;
3980  __be32 **ioa_data;
3981  unsigned long lock_flags = 0;
3982 
3983  dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3984 
3985  if (!dump) {
3986  ipr_err("Dump memory allocation failed\n");
3987  return -ENOMEM;
3988  }
3989 
3990  if (ioa_cfg->sis64)
3991  ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3992  else
3993  ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3994 
3995  if (!ioa_data) {
3996  ipr_err("Dump memory allocation failed\n");
3997  kfree(dump);
3998  return -ENOMEM;
3999  }
4000 
4001  dump->ioa_dump.ioa_data = ioa_data;
4002 
4003  kref_init(&dump->kref);
4004  dump->ioa_cfg = ioa_cfg;
4005 
4006  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4007 
4008  if (INACTIVE != ioa_cfg->sdt_state) {
4009  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4010  vfree(dump->ioa_dump.ioa_data);
4011  kfree(dump);
4012  return 0;
4013  }
4014 
4015  ioa_cfg->dump = dump;
4016  ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4017  if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
4018  ioa_cfg->dump_taken = 1;
4019  schedule_work(&ioa_cfg->work_q);
4020  }
4021  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4022 
4023  return 0;
4024 }
4025 
4033 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4034 {
4035  struct ipr_dump *dump;
4036  unsigned long lock_flags = 0;
4037 
4038  ENTER;
4039 
4040  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4041  dump = ioa_cfg->dump;
4042  if (!dump) {
4043  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044  return 0;
4045  }
4046 
4047  ioa_cfg->dump = NULL;
4048  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4049 
4050  kref_put(&dump->kref, ipr_release_dump);
4051 
4052  LEAVE;
4053  return 0;
4054 }
4055 
4068 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4069  struct bin_attribute *bin_attr,
4070  char *buf, loff_t off, size_t count)
4071 {
4072  struct device *cdev = container_of(kobj, struct device, kobj);
4073  struct Scsi_Host *shost = class_to_shost(cdev);
4074  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4075  int rc;
4076 
4078  return -EACCES;
4079 
4080  if (buf[0] == '1')
4081  rc = ipr_alloc_dump(ioa_cfg);
4082  else if (buf[0] == '0')
4083  rc = ipr_free_dump(ioa_cfg);
4084  else
4085  return -EINVAL;
4086 
4087  if (rc)
4088  return rc;
4089  else
4090  return count;
4091 }
4092 
4093 static struct bin_attribute ipr_dump_attr = {
4094  .attr = {
4095  .name = "dump",
4096  .mode = S_IRUSR | S_IWUSR,
4097  },
4098  .size = 0,
4099  .read = ipr_read_dump,
4100  .write = ipr_write_dump
4101 };
4102 #else
4103 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4104 #endif
4105 
4115 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4116  int reason)
4117 {
4118  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4119  struct ipr_resource_entry *res;
4120  unsigned long lock_flags = 0;
4121 
4122  if (reason != SCSI_QDEPTH_DEFAULT)
4123  return -EOPNOTSUPP;
4124 
4125  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4126  res = (struct ipr_resource_entry *)sdev->hostdata;
4127 
4128  if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4129  qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4130  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4131 
4132  scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4133  return sdev->queue_depth;
4134 }
4135 
4144 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4145 {
4146  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4147  struct ipr_resource_entry *res;
4148  unsigned long lock_flags = 0;
4149 
4150  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4151  res = (struct ipr_resource_entry *)sdev->hostdata;
4152 
4153  if (res) {
4154  if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4155  /*
4156  * We don't bother quiescing the device here since the
4157  * adapter firmware does it for us.
4158  */
4159  scsi_set_tag_type(sdev, tag_type);
4160 
4161  if (tag_type)
4162  scsi_activate_tcq(sdev, sdev->queue_depth);
4163  else
4164  scsi_deactivate_tcq(sdev, sdev->queue_depth);
4165  } else
4166  tag_type = 0;
4167  } else
4168  tag_type = 0;
4169 
4170  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4171  return tag_type;
4172 }
4173 
4183 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4184 {
4185  struct scsi_device *sdev = to_scsi_device(dev);
4186  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4187  struct ipr_resource_entry *res;
4188  unsigned long lock_flags = 0;
4189  ssize_t len = -ENXIO;
4190 
4191  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4192  res = (struct ipr_resource_entry *)sdev->hostdata;
4193  if (res)
4194  len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4195  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4196  return len;
4197 }
4198 
4199 static struct device_attribute ipr_adapter_handle_attr = {
4200  .attr = {
4201  .name = "adapter_handle",
4202  .mode = S_IRUSR,
4203  },
4204  .show = ipr_show_adapter_handle
4205 };
4206 
4217 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4218 {
4219  struct scsi_device *sdev = to_scsi_device(dev);
4220  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4221  struct ipr_resource_entry *res;
4222  unsigned long lock_flags = 0;
4223  ssize_t len = -ENXIO;
4224  char buffer[IPR_MAX_RES_PATH_LENGTH];
4225 
4226  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4227  res = (struct ipr_resource_entry *)sdev->hostdata;
4228  if (res && ioa_cfg->sis64)
4229  len = snprintf(buf, PAGE_SIZE, "%s\n",
4230  ipr_format_res_path(res->res_path, buffer,
4231  sizeof(buffer)));
4232  else if (res)
4233  len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4234  res->bus, res->target, res->lun);
4235 
4236  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237  return len;
4238 }
4239 
4240 static struct device_attribute ipr_resource_path_attr = {
4241  .attr = {
4242  .name = "resource_path",
4243  .mode = S_IRUGO,
4244  },
4245  .show = ipr_show_resource_path
4246 };
4247 
4257 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4258 {
4259  struct scsi_device *sdev = to_scsi_device(dev);
4260  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4261  struct ipr_resource_entry *res;
4262  unsigned long lock_flags = 0;
4263  ssize_t len = -ENXIO;
4264 
4265  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4266  res = (struct ipr_resource_entry *)sdev->hostdata;
4267  if (res && ioa_cfg->sis64)
4268  len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4269  else if (res)
4270  len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4271 
4272  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4273  return len;
4274 }
4275 
4276 static struct device_attribute ipr_device_id_attr = {
4277  .attr = {
4278  .name = "device_id",
4279  .mode = S_IRUGO,
4280  },
4281  .show = ipr_show_device_id
4282 };
4283 
4293 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4294 {
4295  struct scsi_device *sdev = to_scsi_device(dev);
4296  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4297  struct ipr_resource_entry *res;
4298  unsigned long lock_flags = 0;
4299  ssize_t len = -ENXIO;
4300 
4301  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4302  res = (struct ipr_resource_entry *)sdev->hostdata;
4303 
4304  if (res)
4305  len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4306 
4307  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4308  return len;
4309 }
4310 
4311 static struct device_attribute ipr_resource_type_attr = {
4312  .attr = {
4313  .name = "resource_type",
4314  .mode = S_IRUGO,
4315  },
4316  .show = ipr_show_resource_type
4317 };
4318 
4319 static struct device_attribute *ipr_dev_attrs[] = {
4320  &ipr_adapter_handle_attr,
4321  &ipr_resource_path_attr,
4322  &ipr_device_id_attr,
4323  &ipr_resource_type_attr,
4324  NULL,
4325 };
4326 
4341 static int ipr_biosparam(struct scsi_device *sdev,
4342  struct block_device *block_device,
4343  sector_t capacity, int *parm)
4344 {
4345  int heads, sectors;
4347 
4348  heads = 128;
4349  sectors = 32;
4350 
4351  cylinders = capacity;
4352  sector_div(cylinders, (128 * 32));
4353 
4354  /* return result */
4355  parm[0] = heads;
4356  parm[1] = sectors;
4357  parm[2] = cylinders;
4358 
4359  return 0;
4360 }
4361 
4369 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4370 {
4371  struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4372  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4373  struct ipr_resource_entry *res;
4374 
4375  list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4376  if ((res->bus == starget->channel) &&
4377  (res->target == starget->id)) {
4378  return res;
4379  }
4380  }
4381 
4382  return NULL;
4383 }
4384 
4385 static struct ata_port_info sata_port_info;
4386 
4397 static int ipr_target_alloc(struct scsi_target *starget)
4398 {
4399  struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4400  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4401  struct ipr_sata_port *sata_port;
4402  struct ata_port *ap;
4403  struct ipr_resource_entry *res;
4404  unsigned long lock_flags;
4405 
4406  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4407  res = ipr_find_starget(starget);
4408  starget->hostdata = NULL;
4409 
4410  if (res && ipr_is_gata(res)) {
4411  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4412  sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4413  if (!sata_port)
4414  return -ENOMEM;
4415 
4416  ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4417  if (ap) {
4418  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4419  sata_port->ioa_cfg = ioa_cfg;
4420  sata_port->ap = ap;
4421  sata_port->res = res;
4422 
4423  res->sata_port = sata_port;
4424  ap->private_data = sata_port;
4425  starget->hostdata = sata_port;
4426  } else {
4427  kfree(sata_port);
4428  return -ENOMEM;
4429  }
4430  }
4431  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4432 
4433  return 0;
4434 }
4435 
4444 static void ipr_target_destroy(struct scsi_target *starget)
4445 {
4446  struct ipr_sata_port *sata_port = starget->hostdata;
4447  struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4448  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4449 
4450  if (ioa_cfg->sis64) {
4451  if (!ipr_find_starget(starget)) {
4452  if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4453  clear_bit(starget->id, ioa_cfg->array_ids);
4454  else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4455  clear_bit(starget->id, ioa_cfg->vset_ids);
4456  else if (starget->channel == 0)
4457  clear_bit(starget->id, ioa_cfg->target_ids);
4458  }
4459  }
4460 
4461  if (sata_port) {
4462  starget->hostdata = NULL;
4463  ata_sas_port_destroy(sata_port->ap);
4464  kfree(sata_port);
4465  }
4466 }
4467 
4475 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4476 {
4477  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4478  struct ipr_resource_entry *res;
4479 
4480  list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4481  if ((res->bus == sdev->channel) &&
4482  (res->target == sdev->id) &&
4483  (res->lun == sdev->lun))
4484  return res;
4485  }
4486 
4487  return NULL;
4488 }
4489 
4497 static void ipr_slave_destroy(struct scsi_device *sdev)
4498 {
4499  struct ipr_resource_entry *res;
4500  struct ipr_ioa_cfg *ioa_cfg;
4501  unsigned long lock_flags = 0;
4502 
4503  ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4504 
4505  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4506  res = (struct ipr_resource_entry *) sdev->hostdata;
4507  if (res) {
4508  if (res->sata_port)
4509  res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4510  sdev->hostdata = NULL;
4511  res->sdev = NULL;
4512  res->sata_port = NULL;
4513  }
4514  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4515 }
4516 
4526 static int ipr_slave_configure(struct scsi_device *sdev)
4527 {
4528  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4529  struct ipr_resource_entry *res;
4530  struct ata_port *ap = NULL;
4531  unsigned long lock_flags = 0;
4532  char buffer[IPR_MAX_RES_PATH_LENGTH];
4533 
4534  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4535  res = sdev->hostdata;
4536  if (res) {
4537  if (ipr_is_af_dasd_device(res))
4538  sdev->type = TYPE_RAID;
4539  if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4540  sdev->scsi_level = 4;
4541  sdev->no_uld_attach = 1;
4542  }
4543  if (ipr_is_vset_device(res)) {
4547  }
4548  if (ipr_is_gata(res) && res->sata_port)
4549  ap = res->sata_port->ap;
4550  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4551 
4552  if (ap) {
4554  ata_sas_slave_configure(sdev, ap);
4555  } else
4556  scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4557  if (ioa_cfg->sis64)
4558  sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4559  ipr_format_res_path(res->res_path, buffer,
4560  sizeof(buffer)));
4561  return 0;
4562  }
4563  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4564  return 0;
4565 }
4566 
4577 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4578 {
4579  struct ipr_sata_port *sata_port = NULL;
4580  int rc = -ENXIO;
4581 
4582  ENTER;
4583  if (sdev->sdev_target)
4584  sata_port = sdev->sdev_target->hostdata;
4585  if (sata_port) {
4586  rc = ata_sas_port_init(sata_port->ap);
4587  if (rc == 0)
4588  rc = ata_sas_sync_probe(sata_port->ap);
4589  }
4590 
4591  if (rc)
4592  ipr_slave_destroy(sdev);
4593 
4594  LEAVE;
4595  return rc;
4596 }
4597 
4610 static int ipr_slave_alloc(struct scsi_device *sdev)
4611 {
4612  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4613  struct ipr_resource_entry *res;
4614  unsigned long lock_flags;
4615  int rc = -ENXIO;
4616 
4617  sdev->hostdata = NULL;
4618 
4619  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4620 
4621  res = ipr_find_sdev(sdev);
4622  if (res) {
4623  res->sdev = sdev;
4624  res->add_to_ml = 0;
4625  res->in_erp = 0;
4626  sdev->hostdata = res;
4627  if (!ipr_is_naca_model(res))
4628  res->needs_sync_complete = 1;
4629  rc = 0;
4630  if (ipr_is_gata(res)) {
4631  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4632  return ipr_ata_slave_alloc(sdev);
4633  }
4634  }
4635 
4636  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4637 
4638  return rc;
4639 }
4640 
4648 static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
4649 {
4650  struct ipr_ioa_cfg *ioa_cfg;
4651  int rc;
4652 
4653  ENTER;
4654  ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4655 
4656  if (!ioa_cfg->in_reset_reload) {
4657  dev_err(&ioa_cfg->pdev->dev,
4658  "Adapter being reset as a result of error recovery.\n");
4659 
4660  if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4661  ioa_cfg->sdt_state = GET_DUMP;
4662  }
4663 
4664  rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4665 
4666  LEAVE;
4667  return rc;
4668 }
4669 
4670 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4671 {
4672  int rc;
4673 
4674  spin_lock_irq(cmd->device->host->host_lock);
4675  rc = __ipr_eh_host_reset(cmd);
4676  spin_unlock_irq(cmd->device->host->host_lock);
4677 
4678  return rc;
4679 }
4680 
4695 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4696  struct ipr_resource_entry *res)
4697 {
4698  struct ipr_cmnd *ipr_cmd;
4699  struct ipr_ioarcb *ioarcb;
4700  struct ipr_cmd_pkt *cmd_pkt;
4701  struct ipr_ioarcb_ata_regs *regs;
4702  u32 ioasc;
4703 
4704  ENTER;
4705  ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4706  ioarcb = &ipr_cmd->ioarcb;
4707  cmd_pkt = &ioarcb->cmd_pkt;
4708 
4709  if (ipr_cmd->ioa_cfg->sis64) {
4710  regs = &ipr_cmd->i.ata_ioadl.regs;
4711  ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4712  } else
4713  regs = &ioarcb->u.add_data.u.regs;
4714 
4715  ioarcb->res_handle = res->res_handle;
4716  cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4717  cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4718  if (ipr_is_gata(res)) {
4719  cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4720  ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4722  }
4723 
4724  ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4725  ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4726  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4727  if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4728  if (ipr_cmd->ioa_cfg->sis64)
4729  memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4730  sizeof(struct ipr_ioasa_gata));
4731  else
4732  memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4733  sizeof(struct ipr_ioasa_gata));
4734  }
4735 
4736  LEAVE;
4737  return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4738 }
4739 
4750 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4751  unsigned long deadline)
4752 {
4753  struct ipr_sata_port *sata_port = link->ap->private_data;
4754  struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4755  struct ipr_resource_entry *res;
4756  unsigned long lock_flags = 0;
4757  int rc = -ENXIO;
4758 
4759  ENTER;
4760  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4761  while (ioa_cfg->in_reset_reload) {
4762  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4763  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4764  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4765  }
4766 
4767  res = sata_port->res;
4768  if (res) {
4769  rc = ipr_device_reset(ioa_cfg, res);
4770  *classes = res->ata_class;
4771  }
4772 
4773  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4774  LEAVE;
4775  return rc;
4776 }
4777 
4789 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4790 {
4791  struct ipr_cmnd *ipr_cmd;
4792  struct ipr_ioa_cfg *ioa_cfg;
4793  struct ipr_resource_entry *res;
4794  struct ata_port *ap;
4795  int rc = 0;
4796 
4797  ENTER;
4798  ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4799  res = scsi_cmd->device->hostdata;
4800 
4801  if (!res)
4802  return FAILED;
4803 
4804  /*
4805  * If we are currently going through reset/reload, return failed. This will force the
4806  * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4807  * reset to complete
4808  */
4809  if (ioa_cfg->in_reset_reload)
4810  return FAILED;
4811  if (ioa_cfg->ioa_is_dead)
4812  return FAILED;
4813 
4814  list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4815  if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4816  if (ipr_cmd->scsi_cmd)
4817  ipr_cmd->done = ipr_scsi_eh_done;
4818  if (ipr_cmd->qc)
4819  ipr_cmd->done = ipr_sata_eh_done;
4820  if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4821  ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4822  ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4823  }
4824  }
4825  }
4826 
4827  res->resetting_device = 1;
4828  scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4829 
4830  if (ipr_is_gata(res) && res->sata_port) {
4831  ap = res->sata_port->ap;
4832  spin_unlock_irq(scsi_cmd->device->host->host_lock);
4834  spin_lock_irq(scsi_cmd->device->host->host_lock);
4835 
4836  list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4837  if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4838  rc = -EIO;
4839  break;
4840  }
4841  }
4842  } else
4843  rc = ipr_device_reset(ioa_cfg, res);
4844  res->resetting_device = 0;
4845 
4846  LEAVE;
4847  return rc ? FAILED : SUCCESS;
4848 }
4849 
4850 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
4851 {
4852  int rc;
4853 
4854  spin_lock_irq(cmd->device->host->host_lock);
4855  rc = __ipr_eh_dev_reset(cmd);
4856  spin_unlock_irq(cmd->device->host->host_lock);
4857 
4858  return rc;
4859 }
4860 
4870 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4871 {
4872  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4873  struct ipr_resource_entry *res;
4874 
4875  ENTER;
4876  if (!ioa_cfg->sis64)
4877  list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4878  if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4879  scsi_report_bus_reset(ioa_cfg->host, res->bus);
4880  break;
4881  }
4882  }
4883 
4884  /*
4885  * If abort has not completed, indicate the reset has, else call the
4886  * abort's done function to wake the sleeping eh thread
4887  */
4888  if (ipr_cmd->sibling->sibling)
4889  ipr_cmd->sibling->sibling = NULL;
4890  else
4891  ipr_cmd->sibling->done(ipr_cmd->sibling);
4892 
4893  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4894  LEAVE;
4895 }
4896 
4908 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4909 {
4910  struct ipr_cmnd *reset_cmd;
4911  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4912  struct ipr_cmd_pkt *cmd_pkt;
4913  unsigned long lock_flags = 0;
4914 
4915  ENTER;
4916  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4917  if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4918  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4919  return;
4920  }
4921 
4922  sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4923  reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4924  ipr_cmd->sibling = reset_cmd;
4925  reset_cmd->sibling = ipr_cmd;
4926  reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4927  cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4928  cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4929  cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4930  cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4931 
4932  ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4933  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4934  LEAVE;
4935 }
4936 
4946 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
4947 {
4948  struct ipr_cmnd *ipr_cmd;
4949  struct ipr_ioa_cfg *ioa_cfg;
4950  struct ipr_resource_entry *res;
4951  struct ipr_cmd_pkt *cmd_pkt;
4952  u32 ioasc, int_reg;
4953  int op_found = 0;
4954 
4955  ENTER;
4956  ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4957  res = scsi_cmd->device->hostdata;
4958 
4959  /* If we are currently going through reset/reload, return failed.
4960  * This will force the mid-layer to call ipr_eh_host_reset,
4961  * which will then go to sleep and wait for the reset to complete
4962  */
4963  if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4964  return FAILED;
4965  if (!res)
4966  return FAILED;
4967 
4968  /*
4969  * If we are aborting a timed out op, chances are that the timeout was caused
4970  * by a still not detected EEH error. In such cases, reading a register will
4971  * trigger the EEH recovery infrastructure.
4972  */
4973  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4974 
4975  if (!ipr_is_gscsi(res))
4976  return FAILED;
4977 
4978  list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4979  if (ipr_cmd->scsi_cmd == scsi_cmd) {
4980  ipr_cmd->done = ipr_scsi_eh_done;
4981  op_found = 1;
4982  break;
4983  }
4984  }
4985 
4986  if (!op_found)
4987  return SUCCESS;
4988 
4989  ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4990  ipr_cmd->ioarcb.res_handle = res->res_handle;
4991  cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4992  cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4993  cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4994  ipr_cmd->u.sdev = scsi_cmd->device;
4995 
4996  scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4997  scsi_cmd->cmnd[0]);
4998  ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4999  ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5000 
5001  /*
5002  * If the abort task timed out and we sent a bus reset, we will get
5003  * one the following responses to the abort
5004  */
5005  if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5006  ioasc = 0;
5007  ipr_trace;
5008  }
5009 
5010  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5011  if (!ipr_is_naca_model(res))
5012  res->needs_sync_complete = 1;
5013 
5014  LEAVE;
5015  return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5016 }
5017 
5025 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5026 {
5027  unsigned long flags;
5028  int rc;
5029 
5030  ENTER;
5031 
5032  spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5033  rc = ipr_cancel_op(scsi_cmd);
5034  spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5035 
5036  LEAVE;
5037  return rc;
5038 }
5039 
5048 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5049  u32 int_reg)
5050 {
5051  irqreturn_t rc = IRQ_HANDLED;
5052  u32 int_mask_reg;
5053 
5054  int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5055  int_reg &= ~int_mask_reg;
5056 
5057  /* If an interrupt on the adapter did not occur, ignore it.
5058  * Or in the case of SIS 64, check for a stage change interrupt.
5059  */
5060  if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5061  if (ioa_cfg->sis64) {
5062  int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5063  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5064  if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5065 
5066  /* clear stage change */
5067  writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5068  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5069  list_del(&ioa_cfg->reset_cmd->queue);
5070  del_timer(&ioa_cfg->reset_cmd->timer);
5071  ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5072  return IRQ_HANDLED;
5073  }
5074  }
5075 
5076  return IRQ_NONE;
5077  }
5078 
5079  if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5080  /* Mask the interrupt */
5081  writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5082 
5083  /* Clear the interrupt */
5084  writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5085  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5086 
5087  list_del(&ioa_cfg->reset_cmd->queue);
5088  del_timer(&ioa_cfg->reset_cmd->timer);
5089  ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5090  } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5091  if (ioa_cfg->clear_isr) {
5092  if (ipr_debug && printk_ratelimit())
5093  dev_err(&ioa_cfg->pdev->dev,
5094  "Spurious interrupt detected. 0x%08X\n", int_reg);
5095  writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5096  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5097  return IRQ_NONE;
5098  }
5099  } else {
5100  if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5101  ioa_cfg->ioa_unit_checked = 1;
5102  else
5103  dev_err(&ioa_cfg->pdev->dev,
5104  "Permanent IOA failure. 0x%08X\n", int_reg);
5105 
5106  if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5107  ioa_cfg->sdt_state = GET_DUMP;
5108 
5109  ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5110  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5111  }
5112 
5113  return rc;
5114 }
5115 
5124 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5125 {
5126  ioa_cfg->errors_logged++;
5127  dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5128 
5129  if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5130  ioa_cfg->sdt_state = GET_DUMP;
5131 
5132  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5133 }
5134 
5143 static irqreturn_t ipr_isr(int irq, void *devp)
5144 {
5145  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5146  unsigned long lock_flags = 0;
5147  u32 int_reg = 0;
5148  u32 ioasc;
5149  u16 cmd_index;
5150  int num_hrrq = 0;
5151  int irq_none = 0;
5152  struct ipr_cmnd *ipr_cmd, *temp;
5153  irqreturn_t rc = IRQ_NONE;
5154  LIST_HEAD(doneq);
5155 
5156  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5157 
5158  /* If interrupts are disabled, ignore the interrupt */
5159  if (!ioa_cfg->allow_interrupts) {
5160  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5161  return IRQ_NONE;
5162  }
5163 
5164  while (1) {
5165  ipr_cmd = NULL;
5166 
5167  while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5168  ioa_cfg->toggle_bit) {
5169 
5170  cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5172 
5173  if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5174  ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5175  rc = IRQ_HANDLED;
5176  goto unlock_out;
5177  }
5178 
5179  ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5180 
5181  ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5182 
5183  ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5184 
5185  list_move_tail(&ipr_cmd->queue, &doneq);
5186 
5187  rc = IRQ_HANDLED;
5188 
5189  if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5190  ioa_cfg->hrrq_curr++;
5191  } else {
5192  ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5193  ioa_cfg->toggle_bit ^= 1u;
5194  }
5195  }
5196 
5197  if (ipr_cmd && !ioa_cfg->clear_isr)
5198  break;
5199 
5200  if (ipr_cmd != NULL) {
5201  /* Clear the PCI interrupt */
5202  num_hrrq = 0;
5203  do {
5204  writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5205  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5206  } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5207  num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5208 
5209  } else if (rc == IRQ_NONE && irq_none == 0) {
5210  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5211  irq_none++;
5212  } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5213  int_reg & IPR_PCII_HRRQ_UPDATED) {
5214  ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5215  rc = IRQ_HANDLED;
5216  goto unlock_out;
5217  } else
5218  break;
5219  }
5220 
5221  if (unlikely(rc == IRQ_NONE))
5222  rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5223 
5224 unlock_out:
5225  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5226  list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5227  list_del(&ipr_cmd->queue);
5228  del_timer(&ipr_cmd->timer);
5229  ipr_cmd->fast_done(ipr_cmd);
5230  }
5231 
5232  return rc;
5233 }
5234 
5243 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5244  struct ipr_cmnd *ipr_cmd)
5245 {
5246  int i, nseg;
5247  struct scatterlist *sg;
5248  u32 length;
5249  u32 ioadl_flags = 0;
5250  struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5251  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5252  struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5253 
5254  length = scsi_bufflen(scsi_cmd);
5255  if (!length)
5256  return 0;
5257 
5258  nseg = scsi_dma_map(scsi_cmd);
5259  if (nseg < 0) {
5260  if (printk_ratelimit())
5261  dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5262  return -1;
5263  }
5264 
5265  ipr_cmd->dma_use_sg = nseg;
5266 
5267  ioarcb->data_transfer_length = cpu_to_be32(length);
5268  ioarcb->ioadl_len =
5269  cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5270 
5271  if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5272  ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5273  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5274  } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5275  ioadl_flags = IPR_IOADL_FLAGS_READ;
5276 
5277  scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5278  ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5279  ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5280  ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5281  }
5282 
5283  ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5284  return 0;
5285 }
5286 
5295 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5296  struct ipr_cmnd *ipr_cmd)
5297 {
5298  int i, nseg;
5299  struct scatterlist *sg;
5300  u32 length;
5301  u32 ioadl_flags = 0;
5302  struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5303  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5304  struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5305 
5306  length = scsi_bufflen(scsi_cmd);
5307  if (!length)
5308  return 0;
5309 
5310  nseg = scsi_dma_map(scsi_cmd);
5311  if (nseg < 0) {
5312  dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5313  return -1;
5314  }
5315 
5316  ipr_cmd->dma_use_sg = nseg;
5317 
5318  if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5319  ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5320  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5321  ioarcb->data_transfer_length = cpu_to_be32(length);
5322  ioarcb->ioadl_len =
5323  cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5324  } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5325  ioadl_flags = IPR_IOADL_FLAGS_READ;
5326  ioarcb->read_data_transfer_length = cpu_to_be32(length);
5327  ioarcb->read_ioadl_len =
5328  cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5329  }
5330 
5331  if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5332  ioadl = ioarcb->u.add_data.u.ioadl;
5333  ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5334  offsetof(struct ipr_ioarcb, u.add_data));
5335  ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5336  }
5337 
5338  scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5339  ioadl[i].flags_and_data_len =
5340  cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5341  ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5342  }
5343 
5345  return 0;
5346 }
5347 
5355 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5356 {
5357  u8 tag[2];
5359 
5360  if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5361  switch (tag[0]) {
5362  case MSG_SIMPLE_TAG:
5364  break;
5365  case MSG_HEAD_TAG:
5367  break;
5368  case MSG_ORDERED_TAG:
5370  break;
5371  };
5372  }
5373 
5374  return rc;
5375 }
5376 
5387 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5388 {
5389  struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5390  struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5391  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5392  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5393 
5394  if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5395  scsi_cmd->result |= (DID_ERROR << 16);
5396  scmd_printk(KERN_ERR, scsi_cmd,
5397  "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5398  } else {
5399  memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5401  }
5402 
5403  if (res) {
5404  if (!ipr_is_naca_model(res))
5405  res->needs_sync_complete = 1;
5406  res->in_erp = 0;
5407  }
5408  scsi_dma_unmap(ipr_cmd->scsi_cmd);
5409  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5410  scsi_cmd->scsi_done(scsi_cmd);
5411 }
5412 
5420 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5421 {
5422  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5423  struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5424  dma_addr_t dma_addr = ipr_cmd->dma_addr;
5425 
5426  memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5427  ioarcb->data_transfer_length = 0;
5428  ioarcb->read_data_transfer_length = 0;
5429  ioarcb->ioadl_len = 0;
5430  ioarcb->read_ioadl_len = 0;
5431  ioasa->hdr.ioasc = 0;
5432  ioasa->hdr.residual_data_len = 0;
5433 
5434  if (ipr_cmd->ioa_cfg->sis64)
5435  ioarcb->u.sis64_addr_data.data_ioadl_addr =
5436  cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5437  else {
5438  ioarcb->write_ioadl_addr =
5439  cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5440  ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5441  }
5442 }
5443 
5454 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5455 {
5456  struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5457  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5458 
5459  if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5460  ipr_erp_done(ipr_cmd);
5461  return;
5462  }
5463 
5464  ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5465 
5466  cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5467  cmd_pkt->cdb[0] = REQUEST_SENSE;
5468  cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5470  cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5472 
5473  ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5475 
5476  ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5478 }
5479 
5492 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5493 {
5494  struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5495  struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5496  struct ipr_cmd_pkt *cmd_pkt;
5497 
5498  res->in_erp = 1;
5499 
5500  ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5501 
5502  if (!scsi_get_tag_type(scsi_cmd->device)) {
5503  ipr_erp_request_sense(ipr_cmd);
5504  return;
5505  }
5506 
5507  cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5508  cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5509  cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5510 
5511  ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5513 }
5514 
5528 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5529  struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5530 {
5531  int i;
5532  u16 data_len;
5533  u32 ioasc, fd_ioasc;
5534  struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5535  __be32 *ioasa_data = (__be32 *)ioasa;
5536  int error_index;
5537 
5538  ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5539  fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5540 
5541  if (0 == ioasc)
5542  return;
5543 
5544  if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5545  return;
5546 
5547  if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5548  error_index = ipr_get_error(fd_ioasc);
5549  else
5550  error_index = ipr_get_error(ioasc);
5551 
5552  if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5553  /* Don't log an error if the IOA already logged one */
5554  if (ioasa->hdr.ilid != 0)
5555  return;
5556 
5557  if (!ipr_is_gscsi(res))
5558  return;
5559 
5560  if (ipr_error_table[error_index].log_ioasa == 0)
5561  return;
5562  }
5563 
5564  ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5565 
5566  data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5567  if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5568  data_len = sizeof(struct ipr_ioasa64);
5569  else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5570  data_len = sizeof(struct ipr_ioasa);
5571 
5572  ipr_err("IOASA Dump:\n");
5573 
5574  for (i = 0; i < data_len / 4; i += 4) {
5575  ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5576  be32_to_cpu(ioasa_data[i]),
5577  be32_to_cpu(ioasa_data[i+1]),
5578  be32_to_cpu(ioasa_data[i+2]),
5579  be32_to_cpu(ioasa_data[i+3]));
5580  }
5581 }
5582 
5591 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5592 {
5593  u32 failing_lba;
5594  u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5595  struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5596  struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5597  u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5598 
5599  memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5600 
5601  if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5602  return;
5603 
5604  ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5605 
5606  if (ipr_is_vset_device(res) &&
5607  ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5608  ioasa->u.vset.failing_lba_hi != 0) {
5609  sense_buf[0] = 0x72;
5610  sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5611  sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5612  sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5613 
5614  sense_buf[7] = 12;
5615  sense_buf[8] = 0;
5616  sense_buf[9] = 0x0A;
5617  sense_buf[10] = 0x80;
5618 
5619  failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5620 
5621  sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5622  sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5623  sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5624  sense_buf[15] = failing_lba & 0x000000ff;
5625 
5626  failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5627 
5628  sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5629  sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5630  sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5631  sense_buf[19] = failing_lba & 0x000000ff;
5632  } else {
5633  sense_buf[0] = 0x70;
5634  sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5635  sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5636  sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5637 
5638  /* Illegal request */
5639  if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5640  (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5641  sense_buf[7] = 10; /* additional length */
5642 
5643  /* IOARCB was in error */
5644  if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5645  sense_buf[15] = 0xC0;
5646  else /* Parameter data was invalid */
5647  sense_buf[15] = 0x80;
5648 
5649  sense_buf[16] =
5651  be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5652  sense_buf[17] =
5654  be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5655  } else {
5656  if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5657  if (ipr_is_vset_device(res))
5658  failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5659  else
5660  failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5661 
5662  sense_buf[0] |= 0x80; /* Or in the Valid bit */
5663  sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5664  sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5665  sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5666  sense_buf[6] = failing_lba & 0x000000ff;
5667  }
5668 
5669  sense_buf[7] = 6; /* additional length */
5670  }
5671  }
5672 }
5673 
5684 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5685 {
5686  struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5687  struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5688 
5689  if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5690  return 0;
5691 
5692  if (ipr_cmd->ioa_cfg->sis64)
5693  memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5694  min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5696  else
5697  memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5698  min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5700  return 1;
5701 }
5702 
5714 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5715  struct ipr_cmnd *ipr_cmd)
5716 {
5717  struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5718  struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5719  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5720  u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5721 
5722  if (!res) {
5723  ipr_scsi_eh_done(ipr_cmd);
5724  return;
5725  }
5726 
5727  if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5728  ipr_gen_sense(ipr_cmd);
5729 
5730  ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5731 
5732  switch (masked_ioasc) {
5734  if (ipr_is_naca_model(res))
5735  scsi_cmd->result |= (DID_ABORT << 16);
5736  else
5737  scsi_cmd->result |= (DID_IMM_RETRY << 16);
5738  break;
5741  scsi_cmd->result |= (DID_NO_CONNECT << 16);
5742  break;
5744  scsi_cmd->result |= (DID_NO_CONNECT << 16);
5745  if (!ipr_is_naca_model(res))
5746  res->needs_sync_complete = 1;
5747  break;
5749  if (!res->in_erp)
5750  res->needs_sync_complete = 1;
5751  scsi_cmd->result |= (DID_IMM_RETRY << 16);
5752  break;
5753  case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5755  scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5756  break;
5759  /*
5760  * Report the bus reset and ask for a retry. The device
5761  * will give CC/UA the next command.
5762  */
5763  if (!res->resetting_device)
5764  scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5765  scsi_cmd->result |= (DID_ERROR << 16);
5766  if (!ipr_is_naca_model(res))
5767  res->needs_sync_complete = 1;
5768  break;
5770  scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5772  if (!ipr_get_autosense(ipr_cmd)) {
5773  if (!ipr_is_naca_model(res)) {
5774  ipr_erp_cancel_all(ipr_cmd);
5775  return;
5776  }
5777  }
5778  }
5779  if (!ipr_is_naca_model(res))
5780  res->needs_sync_complete = 1;
5781  break;
5783  break;
5784  default:
5785  if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5786  scsi_cmd->result |= (DID_ERROR << 16);
5787  if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5788  res->needs_sync_complete = 1;
5789  break;
5790  }
5791 
5792  scsi_dma_unmap(ipr_cmd->scsi_cmd);
5793  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5794  scsi_cmd->scsi_done(scsi_cmd);
5795 }
5796 
5807 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5808 {
5809  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5810  struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5811  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5812  unsigned long lock_flags;
5813 
5814  scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5815 
5816  if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5817  scsi_dma_unmap(scsi_cmd);
5818 
5819  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5820  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5821  scsi_cmd->scsi_done(scsi_cmd);
5822  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5823  } else {
5824  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5825  ipr_erp_start(ioa_cfg, ipr_cmd);
5826  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5827  }
5828 }
5829 
5842 static int ipr_queuecommand(struct Scsi_Host *shost,
5843  struct scsi_cmnd *scsi_cmd)
5844 {
5845  struct ipr_ioa_cfg *ioa_cfg;
5846  struct ipr_resource_entry *res;
5847  struct ipr_ioarcb *ioarcb;
5848  struct ipr_cmnd *ipr_cmd;
5849  unsigned long lock_flags;
5850  int rc;
5851 
5852  ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
5853 
5854  spin_lock_irqsave(shost->host_lock, lock_flags);
5855  scsi_cmd->result = (DID_OK << 16);
5856  res = scsi_cmd->device->hostdata;
5857 
5858  /*
5859  * We are currently blocking all devices due to a host reset
5860  * We have told the host to stop giving us new requests, but
5861  * ERP ops don't count. FIXME
5862  */
5863  if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
5864  spin_unlock_irqrestore(shost->host_lock, lock_flags);
5865  return SCSI_MLQUEUE_HOST_BUSY;
5866  }
5867 
5868  /*
5869  * FIXME - Create scsi_set_host_offline interface
5870  * and the ioa_is_dead check can be removed
5871  */
5872  if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5873  spin_unlock_irqrestore(shost->host_lock, lock_flags);
5874  goto err_nodev;
5875  }
5876 
5877  if (ipr_is_gata(res) && res->sata_port) {
5878  rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5879  spin_unlock_irqrestore(shost->host_lock, lock_flags);
5880  return rc;
5881  }
5882 
5883  ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
5884  spin_unlock_irqrestore(shost->host_lock, lock_flags);
5885 
5886  ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
5887  ioarcb = &ipr_cmd->ioarcb;
5888 
5889  memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5890  ipr_cmd->scsi_cmd = scsi_cmd;
5891  ipr_cmd->done = ipr_scsi_eh_done;
5892 
5893  if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5894  if (scsi_cmd->underflow == 0)
5895  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5896 
5897  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5898  if (ipr_is_gscsi(res))
5899  ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5900  ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5901  ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5902  }
5903 
5904  if (scsi_cmd->cmnd[0] >= 0xC0 &&
5905  (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5906  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5907 
5908  if (ioa_cfg->sis64)
5909  rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5910  else
5911  rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5912 
5913  spin_lock_irqsave(shost->host_lock, lock_flags);
5914  if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
5915  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5916  spin_unlock_irqrestore(shost->host_lock, lock_flags);
5917  if (!rc)
5918  scsi_dma_unmap(scsi_cmd);
5919  return SCSI_MLQUEUE_HOST_BUSY;
5920  }
5921 
5922  if (unlikely(ioa_cfg->ioa_is_dead)) {
5923  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5924  spin_unlock_irqrestore(shost->host_lock, lock_flags);
5925  scsi_dma_unmap(scsi_cmd);
5926  goto err_nodev;
5927  }
5928 
5929  ioarcb->res_handle = res->res_handle;
5930  if (res->needs_sync_complete) {
5931  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5932  res->needs_sync_complete = 0;
5933  }
5934  list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5936  ipr_send_command(ipr_cmd);
5937  spin_unlock_irqrestore(shost->host_lock, lock_flags);
5938  return 0;
5939 
5940 err_nodev:
5941  spin_lock_irqsave(shost->host_lock, lock_flags);
5942  memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5943  scsi_cmd->result = (DID_NO_CONNECT << 16);
5944  scsi_cmd->scsi_done(scsi_cmd);
5945  spin_unlock_irqrestore(shost->host_lock, lock_flags);
5946  return 0;
5947 }
5948 
5958 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5959 {
5960  struct ipr_resource_entry *res;
5961 
5962  res = (struct ipr_resource_entry *)sdev->hostdata;
5963  if (res && ipr_is_gata(res)) {
5964  if (cmd == HDIO_GET_IDENTITY)
5965  return -ENOTTY;
5966  return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5967  }
5968 
5969  return -EINVAL;
5970 }
5971 
5979 static const char *ipr_ioa_info(struct Scsi_Host *host)
5980 {
5981  static char buffer[512];
5982  struct ipr_ioa_cfg *ioa_cfg;
5983  unsigned long lock_flags = 0;
5984 
5985  ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5986 
5987  spin_lock_irqsave(host->host_lock, lock_flags);
5988  sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5989  spin_unlock_irqrestore(host->host_lock, lock_flags);
5990 
5991  return buffer;
5992 }
5993 
5994 static struct scsi_host_template driver_template = {
5995  .module = THIS_MODULE,
5996  .name = "IPR",
5997  .info = ipr_ioa_info,
5998  .ioctl = ipr_ioctl,
5999  .queuecommand = ipr_queuecommand,
6000  .eh_abort_handler = ipr_eh_abort,
6001  .eh_device_reset_handler = ipr_eh_dev_reset,
6002  .eh_host_reset_handler = ipr_eh_host_reset,
6003  .slave_alloc = ipr_slave_alloc,
6004  .slave_configure = ipr_slave_configure,
6005  .slave_destroy = ipr_slave_destroy,
6006  .target_alloc = ipr_target_alloc,
6007  .target_destroy = ipr_target_destroy,
6008  .change_queue_depth = ipr_change_queue_depth,
6009  .change_queue_type = ipr_change_queue_type,
6010  .bios_param = ipr_biosparam,
6011  .can_queue = IPR_MAX_COMMANDS,
6012  .this_id = -1,
6013  .sg_tablesize = IPR_MAX_SGLIST,
6014  .max_sectors = IPR_IOA_MAX_SECTORS,
6015  .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6016  .use_clustering = ENABLE_CLUSTERING,
6017  .shost_attrs = ipr_ioa_attrs,
6018  .sdev_attrs = ipr_dev_attrs,
6019  .proc_name = IPR_NAME
6020 };
6021 
6027 static void ipr_ata_phy_reset(struct ata_port *ap)
6028 {
6029  unsigned long flags;
6030  struct ipr_sata_port *sata_port = ap->private_data;
6031  struct ipr_resource_entry *res = sata_port->res;
6032  struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6033  int rc;
6034 
6035  ENTER;
6036  spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6037  while (ioa_cfg->in_reset_reload) {
6038  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6039  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6040  spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6041  }
6042 
6043  if (!ioa_cfg->allow_cmds)
6044  goto out_unlock;
6045 
6046  rc = ipr_device_reset(ioa_cfg, res);
6047 
6048  if (rc) {
6049  ap->link.device[0].class = ATA_DEV_NONE;
6050  goto out_unlock;
6051  }
6052 
6053  ap->link.device[0].class = res->ata_class;
6054  if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6055  ap->link.device[0].class = ATA_DEV_NONE;
6056 
6057 out_unlock:
6058  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6059  LEAVE;
6060 }
6061 
6069 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6070 {
6071  struct ipr_sata_port *sata_port = qc->ap->private_data;
6072  struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6073  struct ipr_cmnd *ipr_cmd;
6074  unsigned long flags;
6075 
6076  spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6077  while (ioa_cfg->in_reset_reload) {
6078  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6079  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6080  spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6081  }
6082 
6083  list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
6084  if (ipr_cmd->qc == qc) {
6085  ipr_device_reset(ioa_cfg, sata_port->res);
6086  break;
6087  }
6088  }
6089  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6090 }
6091 
6100 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6101  struct ata_taskfile *tf)
6102 {
6103  regs->feature = tf->feature;
6104  regs->nsect = tf->nsect;
6105  regs->lbal = tf->lbal;
6106  regs->lbam = tf->lbam;
6107  regs->lbah = tf->lbah;
6108  regs->device = tf->device;
6109  regs->command = tf->command;
6110  regs->hob_feature = tf->hob_feature;
6111  regs->hob_nsect = tf->hob_nsect;
6112  regs->hob_lbal = tf->hob_lbal;
6113  regs->hob_lbam = tf->hob_lbam;
6114  regs->hob_lbah = tf->hob_lbah;
6115  regs->ctl = tf->ctl;
6116 }
6117 
6128 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6129 {
6130  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6131  struct ata_queued_cmd *qc = ipr_cmd->qc;
6132  struct ipr_sata_port *sata_port = qc->ap->private_data;
6133  struct ipr_resource_entry *res = sata_port->res;
6134  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6135 
6136  if (ipr_cmd->ioa_cfg->sis64)
6137  memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6138  sizeof(struct ipr_ioasa_gata));
6139  else
6140  memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6141  sizeof(struct ipr_ioasa_gata));
6142  ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6143 
6144  if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6145  scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6146 
6147  if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6148  qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6149  else
6150  qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6151  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6152  ata_qc_complete(qc);
6153 }
6154 
6161 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6162  struct ata_queued_cmd *qc)
6163 {
6164  u32 ioadl_flags = 0;
6165  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6166  struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6167  struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6168  int len = qc->nbytes;
6169  struct scatterlist *sg;
6170  unsigned int si;
6171  dma_addr_t dma_addr = ipr_cmd->dma_addr;
6172 
6173  if (len == 0)
6174  return;
6175 
6176  if (qc->dma_dir == DMA_TO_DEVICE) {
6177  ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6178  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6179  } else if (qc->dma_dir == DMA_FROM_DEVICE)
6180  ioadl_flags = IPR_IOADL_FLAGS_READ;
6181 
6182  ioarcb->data_transfer_length = cpu_to_be32(len);
6183  ioarcb->ioadl_len =
6184  cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6185  ioarcb->u.sis64_addr_data.data_ioadl_addr =
6186  cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6187 
6188  for_each_sg(qc->sg, sg, qc->n_elem, si) {
6189  ioadl64->flags = cpu_to_be32(ioadl_flags);
6190  ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6191  ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6192 
6193  last_ioadl64 = ioadl64;
6194  ioadl64++;
6195  }
6196 
6197  if (likely(last_ioadl64))
6198  last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6199 }
6200 
6207 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6208  struct ata_queued_cmd *qc)
6209 {
6210  u32 ioadl_flags = 0;
6211  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6212  struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6213  struct ipr_ioadl_desc *last_ioadl = NULL;
6214  int len = qc->nbytes;
6215  struct scatterlist *sg;
6216  unsigned int si;
6217 
6218  if (len == 0)
6219  return;
6220 
6221  if (qc->dma_dir == DMA_TO_DEVICE) {
6222  ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6223  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6224  ioarcb->data_transfer_length = cpu_to_be32(len);
6225  ioarcb->ioadl_len =
6226  cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6227  } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6228  ioadl_flags = IPR_IOADL_FLAGS_READ;
6229  ioarcb->read_data_transfer_length = cpu_to_be32(len);
6230  ioarcb->read_ioadl_len =
6231  cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6232  }
6233 
6234  for_each_sg(qc->sg, sg, qc->n_elem, si) {
6235  ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6236  ioadl->address = cpu_to_be32(sg_dma_address(sg));
6237 
6238  last_ioadl = ioadl;
6239  ioadl++;
6240  }
6241 
6242  if (likely(last_ioadl))
6244 }
6245 
6253 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6254 {
6255  struct ata_port *ap = qc->ap;
6256  struct ipr_sata_port *sata_port = ap->private_data;
6257  struct ipr_resource_entry *res = sata_port->res;
6258  struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6259  struct ipr_cmnd *ipr_cmd;
6260  struct ipr_ioarcb *ioarcb;
6261  struct ipr_ioarcb_ata_regs *regs;
6262 
6263  if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6264  return AC_ERR_SYSTEM;
6265 
6266  ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6267  ioarcb = &ipr_cmd->ioarcb;
6268 
6269  if (ioa_cfg->sis64) {
6270  regs = &ipr_cmd->i.ata_ioadl.regs;
6271  ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6272  } else
6273  regs = &ioarcb->u.add_data.u.regs;
6274 
6275  memset(regs, 0, sizeof(*regs));
6276  ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6277 
6278  list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6279  ipr_cmd->qc = qc;
6280  ipr_cmd->done = ipr_sata_done;
6281  ipr_cmd->ioarcb.res_handle = res->res_handle;
6282  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6283  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6284  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6285  ipr_cmd->dma_use_sg = qc->n_elem;
6286 
6287  if (ioa_cfg->sis64)
6288  ipr_build_ata_ioadl64(ipr_cmd, qc);
6289  else
6290  ipr_build_ata_ioadl(ipr_cmd, qc);
6291 
6293  ipr_copy_sata_tf(regs, &qc->tf);
6294  memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6296 
6297  switch (qc->tf.protocol) {
6298  case ATA_PROT_NODATA:
6299  case ATA_PROT_PIO:
6300  break;
6301 
6302  case ATA_PROT_DMA:
6304  break;
6305 
6306  case ATAPI_PROT_PIO:
6307  case ATAPI_PROT_NODATA:
6308  regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6309  break;
6310 
6311  case ATAPI_PROT_DMA:
6312  regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6314  break;
6315 
6316  default:
6317  WARN_ON(1);
6318  return AC_ERR_INVALID;
6319  }
6320 
6321  ipr_send_command(ipr_cmd);
6322 
6323  return 0;
6324 }
6325 
6333 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6334 {
6335  struct ipr_sata_port *sata_port = qc->ap->private_data;
6336  struct ipr_ioasa_gata *g = &sata_port->ioasa;
6337  struct ata_taskfile *tf = &qc->result_tf;
6338 
6339  tf->feature = g->error;
6340  tf->nsect = g->nsect;
6341  tf->lbal = g->lbal;
6342  tf->lbam = g->lbam;
6343  tf->lbah = g->lbah;
6344  tf->device = g->device;
6345  tf->command = g->status;
6346  tf->hob_nsect = g->hob_nsect;
6347  tf->hob_lbal = g->hob_lbal;
6348  tf->hob_lbam = g->hob_lbam;
6349  tf->hob_lbah = g->hob_lbah;
6350  tf->ctl = g->alt_status;
6351 
6352  return true;
6353 }
6354 
6355 static struct ata_port_operations ipr_sata_ops = {
6356  .phy_reset = ipr_ata_phy_reset,
6357  .hardreset = ipr_sata_reset,
6358  .post_internal_cmd = ipr_ata_post_internal,
6359  .qc_prep = ata_noop_qc_prep,
6360  .qc_issue = ipr_qc_issue,
6361  .qc_fill_rtf = ipr_qc_fill_rtf,
6362  .port_start = ata_sas_port_start,
6363  .port_stop = ata_sas_port_stop
6364 };
6365 
6366 static struct ata_port_info sata_port_info = {
6367  .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6368  .pio_mask = ATA_PIO4_ONLY,
6369  .mwdma_mask = ATA_MWDMA2,
6370  .udma_mask = ATA_UDMA6,
6371  .port_ops = &ipr_sata_ops
6372 };
6373 
6374 #ifdef CONFIG_PPC_PSERIES
6375 static const u16 ipr_blocked_processors[] = {
6376  PVR_NORTHSTAR,
6377  PVR_PULSAR,
6378  PVR_POWER4,
6379  PVR_ICESTAR,
6380  PVR_SSTAR,
6381  PVR_POWER4p,
6382  PVR_630,
6383  PVR_630p
6384 };
6385 
6397 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6398 {
6399  int i;
6400 
6401  if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6402  for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6403  if (pvr_version_is(ipr_blocked_processors[i]))
6404  return 1;
6405  }
6406  }
6407  return 0;
6408 }
6409 #else
6410 #define ipr_invalid_adapter(ioa_cfg) 0
6411 #endif
6412 
6423 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6424 {
6425  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6426 
6427  ENTER;
6428  ioa_cfg->in_reset_reload = 0;
6429  ioa_cfg->reset_retries = 0;
6430  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6431  wake_up_all(&ioa_cfg->reset_wait_q);
6432 
6433  spin_unlock_irq(ioa_cfg->host->host_lock);
6434  scsi_unblock_requests(ioa_cfg->host);
6435  spin_lock_irq(ioa_cfg->host->host_lock);
6436  LEAVE;
6437 
6438  return IPR_RC_JOB_RETURN;
6439 }
6440 
6452 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6453 {
6454  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6455  struct ipr_resource_entry *res;
6456  struct ipr_hostrcb *hostrcb, *temp;
6457  int i = 0;
6458 
6459  ENTER;
6460  ioa_cfg->in_reset_reload = 0;
6461  ioa_cfg->allow_cmds = 1;
6462  ioa_cfg->reset_cmd = NULL;
6463  ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6464 
6465  list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6466  if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6467  ipr_trace;
6468  break;
6469  }
6470  }
6471  schedule_work(&ioa_cfg->work_q);
6472 
6473  list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6474  list_del(&hostrcb->queue);
6475  if (i++ < IPR_NUM_LOG_HCAMS)
6476  ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6477  else
6478  ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6479  }
6480 
6482  dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6483 
6484  ioa_cfg->reset_retries = 0;
6485  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6486  wake_up_all(&ioa_cfg->reset_wait_q);
6487 
6488  spin_unlock(ioa_cfg->host->host_lock);
6489  scsi_unblock_requests(ioa_cfg->host);
6490  spin_lock(ioa_cfg->host->host_lock);
6491 
6492  if (!ioa_cfg->allow_cmds)
6493  scsi_block_requests(ioa_cfg->host);
6494 
6495  LEAVE;
6496  return IPR_RC_JOB_RETURN;
6497 }
6498 
6507 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6508  struct ipr_std_inq_vpids *vpids)
6509 {
6510  memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6511  memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6512  supported_dev->num_records = 1;
6513  supported_dev->data_length =
6514  cpu_to_be16(sizeof(struct ipr_supported_device));
6515  supported_dev->reserved = 0;
6516 }
6517 
6527 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6528 {
6529  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6530  struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6531  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6532  struct ipr_resource_entry *res = ipr_cmd->u.res;
6533 
6534  ipr_cmd->job_step = ipr_ioa_reset_done;
6535 
6536  list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6537  if (!ipr_is_scsi_disk(res))
6538  continue;
6539 
6540  ipr_cmd->u.res = res;
6541  ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6542 
6544  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6545  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6546 
6547  ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6548  ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6549  ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6550  ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6551 
6552  ipr_init_ioadl(ipr_cmd,
6553  ioa_cfg->vpd_cbs_dma +
6554  offsetof(struct ipr_misc_cbs, supp_dev),
6555  sizeof(struct ipr_supported_device),
6557 
6558  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6560 
6561  if (!ioa_cfg->sis64)
6562  ipr_cmd->job_step = ipr_set_supported_devs;
6563  return IPR_RC_JOB_RETURN;
6564  }
6565 
6566  return IPR_RC_JOB_CONTINUE;
6567 }
6568 
6578 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6579  u32 page_code, u32 len)
6580 {
6581  struct ipr_mode_page_hdr *mode_hdr;
6582  u32 page_length;
6583  u32 length;
6584 
6585  if (!mode_pages || (mode_pages->hdr.length == 0))
6586  return NULL;
6587 
6588  length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6589  mode_hdr = (struct ipr_mode_page_hdr *)
6590  (mode_pages->data + mode_pages->hdr.block_desc_len);
6591 
6592  while (length) {
6593  if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6594  if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6595  return mode_hdr;
6596  break;
6597  } else {
6598  page_length = (sizeof(struct ipr_mode_page_hdr) +
6599  mode_hdr->page_length);
6600  length -= page_length;
6601  mode_hdr = (struct ipr_mode_page_hdr *)
6602  ((unsigned long)mode_hdr + page_length);
6603  }
6604  }
6605  return NULL;
6606 }
6607 
6618 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6619  struct ipr_mode_pages *mode_pages)
6620 {
6621  int i;
6622  int entry_length;
6623  struct ipr_dev_bus_entry *bus;
6624  struct ipr_mode_page28 *mode_page;
6625 
6626  mode_page = ipr_get_mode_page(mode_pages, 0x28,
6627  sizeof(struct ipr_mode_page28));
6628 
6629  entry_length = mode_page->entry_length;
6630 
6631  bus = mode_page->bus;
6632 
6633  for (i = 0; i < mode_page->num_entries; i++) {
6634  if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6635  dev_err(&ioa_cfg->pdev->dev,
6636  "Term power is absent on scsi bus %d\n",
6637  bus->res_addr.bus);
6638  }
6639 
6640  bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6641  }
6642 }
6643 
6655 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6656 {
6658  int i;
6659 
6660  for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6661  max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6662  ioa_cfg->bus_attr[i].bus_width);
6663 
6664  if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6665  ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6666  }
6667 }
6668 
6679 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6680  struct ipr_mode_pages *mode_pages)
6681 {
6682  int i, entry_length;
6683  struct ipr_dev_bus_entry *bus;
6684  struct ipr_bus_attributes *bus_attr;
6685  struct ipr_mode_page28 *mode_page;
6686 
6687  mode_page = ipr_get_mode_page(mode_pages, 0x28,
6688  sizeof(struct ipr_mode_page28));
6689 
6690  entry_length = mode_page->entry_length;
6691 
6692  /* Loop for each device bus entry */
6693  for (i = 0, bus = mode_page->bus;
6694  i < mode_page->num_entries;
6695  i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6696  if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6697  dev_err(&ioa_cfg->pdev->dev,
6698  "Invalid resource address reported: 0x%08X\n",
6699  IPR_GET_PHYS_LOC(bus->res_addr));
6700  continue;
6701  }
6702 
6703  bus_attr = &ioa_cfg->bus_attr[i];
6705  bus->bus_width = bus_attr->bus_width;
6706  bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6707  bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6708  if (bus_attr->qas_enabled)
6710  else
6712  }
6713 }
6714 
6726 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6727  __be32 res_handle, u8 parm,
6728  dma_addr_t dma_addr, u8 xfer_len)
6729 {
6730  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6731 
6732  ioarcb->res_handle = res_handle;
6733  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6734  ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6735  ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6736  ioarcb->cmd_pkt.cdb[1] = parm;
6737  ioarcb->cmd_pkt.cdb[4] = xfer_len;
6738 
6739  ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6740 }
6741 
6752 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6753 {
6754  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6755  struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6756  int length;
6757 
6758  ENTER;
6759  ipr_scsi_bus_speed_limit(ioa_cfg);
6760  ipr_check_term_power(ioa_cfg, mode_pages);
6761  ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6762  length = mode_pages->hdr.length + 1;
6763  mode_pages->hdr.length = 0;
6764 
6765  ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6766  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6767  length);
6768 
6769  ipr_cmd->job_step = ipr_set_supported_devs;
6770  ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6771  struct ipr_resource_entry, queue);
6772  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6773 
6774  LEAVE;
6775  return IPR_RC_JOB_RETURN;
6776 }
6777 
6789 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6791  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6792 {
6793  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6794 
6795  ioarcb->res_handle = res_handle;
6796  ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6797  ioarcb->cmd_pkt.cdb[2] = parm;
6798  ioarcb->cmd_pkt.cdb[4] = xfer_len;
6799  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6800 
6801  ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6802 }
6803 
6813 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6814 {
6815  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6816  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6817 
6818  dev_err(&ioa_cfg->pdev->dev,
6819  "0x%02X failed with IOASC: 0x%08X\n",
6820  ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6821 
6822  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6823  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6824  return IPR_RC_JOB_RETURN;
6825 }
6826 
6837 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6838 {
6839  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6840  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6841 
6842  if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6843  ipr_cmd->job_step = ipr_set_supported_devs;
6844  ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6845  struct ipr_resource_entry, queue);
6846  return IPR_RC_JOB_CONTINUE;
6847  }
6848 
6849  return ipr_reset_cmd_failed(ipr_cmd);
6850 }
6851 
6862 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6863 {
6864  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6865 
6866  ENTER;
6867  ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6868  0x28, ioa_cfg->vpd_cbs_dma +
6869  offsetof(struct ipr_misc_cbs, mode_pages),
6870  sizeof(struct ipr_mode_pages));
6871 
6872  ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6873  ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6874 
6875  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6876 
6877  LEAVE;
6878  return IPR_RC_JOB_RETURN;
6879 }
6880 
6890 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6891 {
6892  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6893  struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6894  struct ipr_mode_page24 *mode_page;
6895  int length;
6896 
6897  ENTER;
6898  mode_page = ipr_get_mode_page(mode_pages, 0x24,
6899  sizeof(struct ipr_mode_page24));
6900 
6901  if (mode_page)
6902  mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6903 
6904  length = mode_pages->hdr.length + 1;
6905  mode_pages->hdr.length = 0;
6906 
6907  ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6908  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6909  length);
6910 
6911  ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6912  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6913 
6914  LEAVE;
6915  return IPR_RC_JOB_RETURN;
6916 }
6917 
6928 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6929 {
6930  u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6931 
6932  if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6933  ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6934  return IPR_RC_JOB_CONTINUE;
6935  }
6936 
6937  return ipr_reset_cmd_failed(ipr_cmd);
6938 }
6939 
6950 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6951 {
6952  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6953 
6954  ENTER;
6955  ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6956  0x24, ioa_cfg->vpd_cbs_dma +
6957  offsetof(struct ipr_misc_cbs, mode_pages),
6958  sizeof(struct ipr_mode_pages));
6959 
6960  ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6961  ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6962 
6963  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6964 
6965  LEAVE;
6966  return IPR_RC_JOB_RETURN;
6967 }
6968 
6981 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6982 {
6983  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6984  struct ipr_resource_entry *res, *temp;
6985  struct ipr_config_table_entry_wrapper cfgtew;
6986  int entries, found, flag, i;
6987  LIST_HEAD(old_res);
6988 
6989  ENTER;
6990  if (ioa_cfg->sis64)
6991  flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6992  else
6993  flag = ioa_cfg->u.cfg_table->hdr.flags;
6994 
6995  if (flag & IPR_UCODE_DOWNLOAD_REQ)
6996  dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6997 
6998  list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6999  list_move_tail(&res->queue, &old_res);
7000 
7001  if (ioa_cfg->sis64)
7002  entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7003  else
7004  entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7005 
7006  for (i = 0; i < entries; i++) {
7007  if (ioa_cfg->sis64)
7008  cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7009  else
7010  cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7011  found = 0;
7012 
7013  list_for_each_entry_safe(res, temp, &old_res, queue) {
7014  if (ipr_is_same_device(res, &cfgtew)) {
7015  list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7016  found = 1;
7017  break;
7018  }
7019  }
7020 
7021  if (!found) {
7022  if (list_empty(&ioa_cfg->free_res_q)) {
7023  dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7024  break;
7025  }
7026 
7027  found = 1;
7028  res = list_entry(ioa_cfg->free_res_q.next,
7029  struct ipr_resource_entry, queue);
7030  list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7031  ipr_init_res_entry(res, &cfgtew);
7032  res->add_to_ml = 1;
7033  } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7034  res->sdev->allow_restart = 1;
7035 
7036  if (found)
7037  ipr_update_res_entry(res, &cfgtew);
7038  }
7039 
7040  list_for_each_entry_safe(res, temp, &old_res, queue) {
7041  if (res->sdev) {
7042  res->del_from_ml = 1;
7044  list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7045  }
7046  }
7047 
7048  list_for_each_entry_safe(res, temp, &old_res, queue) {
7049  ipr_clear_res_target(res);
7050  list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7051  }
7052 
7053  if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7054  ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7055  else
7056  ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7057 
7058  LEAVE;
7059  return IPR_RC_JOB_CONTINUE;
7060 }
7061 
7072 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7073 {
7074  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7075  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7076  struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7077  struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7078 
7079  ENTER;
7080  if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7081  ioa_cfg->dual_raid = 1;
7082  dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7083  ucode_vpd->major_release, ucode_vpd->card_type,
7084  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7085  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7087 
7088  ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7089  ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7090  ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7091  ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7092 
7093  ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7095 
7096  ipr_cmd->job_step = ipr_init_res_table;
7097 
7098  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7099 
7100  LEAVE;
7101  return IPR_RC_JOB_RETURN;
7102 }
7103 
7113 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7114  dma_addr_t dma_addr, u8 xfer_len)
7115 {
7116  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7117 
7118  ENTER;
7119  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7121 
7122  ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7123  ioarcb->cmd_pkt.cdb[1] = flags;
7124  ioarcb->cmd_pkt.cdb[2] = page;
7125  ioarcb->cmd_pkt.cdb[4] = xfer_len;
7126 
7127  ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7128 
7129  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7130  LEAVE;
7131 }
7132 
7143 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7144 {
7145  int i;
7146 
7147  for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7148  if (page0->page[i] == page)
7149  return 1;
7150 
7151  return 0;
7152 }
7153 
7164 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7165 {
7166  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7167  struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7168  struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7169 
7170  ENTER;
7171  ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7172  memset(cap, 0, sizeof(*cap));
7173 
7174  if (ipr_inquiry_page_supported(page0, 0xD0)) {
7175  ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7176  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7177  sizeof(struct ipr_inquiry_cap));
7178  return IPR_RC_JOB_RETURN;
7179  }
7180 
7181  LEAVE;
7182  return IPR_RC_JOB_CONTINUE;
7183 }
7184 
7195 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7196 {
7197  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7198 
7199  ENTER;
7200 
7201  ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7202 
7203  ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7204  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7205  sizeof(struct ipr_inquiry_page3));
7206 
7207  LEAVE;
7208  return IPR_RC_JOB_RETURN;
7209 }
7210 
7221 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7222 {
7223  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7224  char type[5];
7225 
7226  ENTER;
7227 
7228  /* Grab the type out of the VPD and store it away */
7229  memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7230  type[4] = '\0';
7231  ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7232 
7233  ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7234 
7235  ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7236  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7237  sizeof(struct ipr_inquiry_page0));
7238 
7239  LEAVE;
7240  return IPR_RC_JOB_RETURN;
7241 }
7242 
7252 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7253 {
7254  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7255 
7256  ENTER;
7257  ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7258 
7259  ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7260  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7261  sizeof(struct ipr_ioa_vpd));
7262 
7263  LEAVE;
7264  return IPR_RC_JOB_RETURN;
7265 }
7266 
7277 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7278 {
7279  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7280  struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7281 
7282  ENTER;
7283  dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7284 
7285  ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7287 
7288  ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7289  if (ioa_cfg->sis64)
7290  ioarcb->cmd_pkt.cdb[1] = 0x1;
7291  ioarcb->cmd_pkt.cdb[2] =
7292  ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7293  ioarcb->cmd_pkt.cdb[3] =
7294  ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7295  ioarcb->cmd_pkt.cdb[4] =
7296  ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7297  ioarcb->cmd_pkt.cdb[5] =
7298  ((u64) ioa_cfg->host_rrq_dma) & 0xff;
7299  ioarcb->cmd_pkt.cdb[7] =
7300  ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7301  ioarcb->cmd_pkt.cdb[8] =
7302  (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7303 
7304  if (ioa_cfg->sis64) {
7305  ioarcb->cmd_pkt.cdb[10] =
7306  ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7307  ioarcb->cmd_pkt.cdb[11] =
7308  ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7309  ioarcb->cmd_pkt.cdb[12] =
7310  ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7311  ioarcb->cmd_pkt.cdb[13] =
7312  ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7313  }
7314 
7315  ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7316 
7317  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7318 
7319  LEAVE;
7320  return IPR_RC_JOB_RETURN;
7321 }
7322 
7336 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7337 {
7338  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7339  unsigned long lock_flags = 0;
7340 
7341  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7342 
7343  if (ioa_cfg->reset_cmd == ipr_cmd) {
7344  list_del(&ipr_cmd->queue);
7345  ipr_cmd->done(ipr_cmd);
7346  }
7347 
7348  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7349 }
7350 
7365 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7366  unsigned long timeout)
7367 {
7368  list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7369  ipr_cmd->done = ipr_reset_ioa_job;
7370 
7371  ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7372  ipr_cmd->timer.expires = jiffies + timeout;
7373  ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7374  add_timer(&ipr_cmd->timer);
7375 }
7376 
7384 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7385 {
7386  memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7387 
7388  /* Initialize Host RRQ pointers */
7389  ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7390  ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7391  ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7392  ioa_cfg->toggle_bit = 1;
7393 
7394  /* Zero out config table */
7395  memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7396 }
7397 
7405 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7406 {
7407  unsigned long stage, stage_time;
7408  u32 feedback;
7409  volatile u32 int_reg;
7410  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7411  u64 maskval = 0;
7412 
7413  feedback = readl(ioa_cfg->regs.init_feedback_reg);
7414  stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7415  stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7416 
7417  ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7418 
7419  /* sanity check the stage_time value */
7420  if (stage_time == 0)
7421  stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7422  else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7423  stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7424  else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7425  stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7426 
7427  if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7428  writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7429  int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7430  stage_time = ioa_cfg->transop_timeout;
7431  ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7432  } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7433  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7434  if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7435  ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7436  maskval = IPR_PCII_IPL_STAGE_CHANGE;
7437  maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7438  writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7439  int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7440  return IPR_RC_JOB_CONTINUE;
7441  }
7442  }
7443 
7444  ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7445  ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7446  ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7447  ipr_cmd->done = ipr_reset_ioa_job;
7448  add_timer(&ipr_cmd->timer);
7449  list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7450 
7451  return IPR_RC_JOB_RETURN;
7452 }
7453 
7464 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7465 {
7466  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7467  volatile u32 int_reg;
7468  volatile u64 maskval;
7469 
7470  ENTER;
7471  ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7472  ipr_init_ioa_mem(ioa_cfg);
7473 
7474  ioa_cfg->allow_interrupts = 1;
7475  if (ioa_cfg->sis64) {
7476  /* Set the adapter to the correct endian mode. */
7477  writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7478  int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7479  }
7480 
7481  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7482 
7483  if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7484  writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7485  ioa_cfg->regs.clr_interrupt_mask_reg32);
7486  int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7487  return IPR_RC_JOB_CONTINUE;
7488  }
7489 
7490  /* Enable destructive diagnostics on IOA */
7491  writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7492 
7493  if (ioa_cfg->sis64) {
7494  maskval = IPR_PCII_IPL_STAGE_CHANGE;
7495  maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7496  writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7497  } else
7498  writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7499 
7500  int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7501 
7502  dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7503 
7504  if (ioa_cfg->sis64) {
7505  ipr_cmd->job_step = ipr_reset_next_stage;
7506  return IPR_RC_JOB_CONTINUE;
7507  }
7508 
7509  ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7510  ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7511  ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7512  ipr_cmd->done = ipr_reset_ioa_job;
7513  add_timer(&ipr_cmd->timer);
7514  list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7515 
7516  LEAVE;
7517  return IPR_RC_JOB_RETURN;
7518 }
7519 
7530 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7531 {
7532  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7533 
7534  if (ioa_cfg->sdt_state == GET_DUMP)
7535  ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7536  else if (ioa_cfg->sdt_state == READ_DUMP)
7537  ioa_cfg->sdt_state = ABORT_DUMP;
7538 
7539  ioa_cfg->dump_timeout = 1;
7540  ipr_cmd->job_step = ipr_reset_alert;
7541 
7542  return IPR_RC_JOB_CONTINUE;
7543 }
7544 
7555 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7556 {
7557  ioa_cfg->errors_logged++;
7558  dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7559 }
7560 
7571 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7572 {
7573  unsigned long mailbox;
7574  struct ipr_hostrcb *hostrcb;
7575  struct ipr_uc_sdt sdt;
7576  int rc, length;
7577  u32 ioasc;
7578 
7579  mailbox = readl(ioa_cfg->ioa_mailbox);
7580 
7581  if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7582  ipr_unit_check_no_data(ioa_cfg);
7583  return;
7584  }
7585 
7586  memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7587  rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7588  (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7589 
7590  if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7591  ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7592  (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7593  ipr_unit_check_no_data(ioa_cfg);
7594  return;
7595  }
7596 
7597  /* Find length of the first sdt entry (UC buffer) */
7598  if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7599  length = be32_to_cpu(sdt.entry[0].end_token);
7600  else
7601  length = (be32_to_cpu(sdt.entry[0].end_token) -
7602  be32_to_cpu(sdt.entry[0].start_token)) &
7603  IPR_FMT2_MBX_ADDR_MASK;
7604 
7605  hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7606  struct ipr_hostrcb, queue);
7607  list_del(&hostrcb->queue);
7608  memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7609 
7610  rc = ipr_get_ldump_data_section(ioa_cfg,
7611  be32_to_cpu(sdt.entry[0].start_token),
7612  (__be32 *)&hostrcb->hcam,
7613  min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7614 
7615  if (!rc) {
7616  ipr_handle_log_data(ioa_cfg, hostrcb);
7617  ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7618  if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7619  ioa_cfg->sdt_state == GET_DUMP)
7620  ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7621  } else
7622  ipr_unit_check_no_data(ioa_cfg);
7623 
7624  list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7625 }
7626 
7636 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7637 {
7638  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7639 
7640  ENTER;
7641  ioa_cfg->ioa_unit_checked = 0;
7642  ipr_get_unit_check_buffer(ioa_cfg);
7643  ipr_cmd->job_step = ipr_reset_alert;
7644  ipr_reset_start_timer(ipr_cmd, 0);
7645 
7646  LEAVE;
7647  return IPR_RC_JOB_RETURN;
7648 }
7649 
7661 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7662 {
7663  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7664  u32 int_reg;
7665 
7666  ENTER;
7667  ioa_cfg->pdev->state_saved = true;
7668  pci_restore_state(ioa_cfg->pdev);
7669 
7670  if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7671  ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7672  return IPR_RC_JOB_CONTINUE;
7673  }
7674 
7675  ipr_fail_all_ops(ioa_cfg);
7676 
7677  if (ioa_cfg->sis64) {
7678  /* Set the adapter to the correct endian mode. */
7679  writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7680  int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7681  }
7682 
7683  if (ioa_cfg->ioa_unit_checked) {
7684  if (ioa_cfg->sis64) {
7685  ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7686  ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7687  return IPR_RC_JOB_RETURN;
7688  } else {
7689  ioa_cfg->ioa_unit_checked = 0;
7690  ipr_get_unit_check_buffer(ioa_cfg);
7691  ipr_cmd->job_step = ipr_reset_alert;
7692  ipr_reset_start_timer(ipr_cmd, 0);
7693  return IPR_RC_JOB_RETURN;
7694  }
7695  }
7696 
7697  if (ioa_cfg->in_ioa_bringdown) {
7698  ipr_cmd->job_step = ipr_ioa_bringdown_done;
7699  } else {
7700  ipr_cmd->job_step = ipr_reset_enable_ioa;
7701 
7702  if (GET_DUMP == ioa_cfg->sdt_state) {
7703  ioa_cfg->sdt_state = READ_DUMP;
7704  ioa_cfg->dump_timeout = 0;
7705  if (ioa_cfg->sis64)
7706  ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7707  else
7708  ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7709  ipr_cmd->job_step = ipr_reset_wait_for_dump;
7710  schedule_work(&ioa_cfg->work_q);
7711  return IPR_RC_JOB_RETURN;
7712  }
7713  }
7714 
7715  LEAVE;
7716  return IPR_RC_JOB_CONTINUE;
7717 }
7718 
7728 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7729 {
7730  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7731 
7732  ENTER;
7733  if (ioa_cfg->cfg_locked)
7734  pci_cfg_access_unlock(ioa_cfg->pdev);
7735  ioa_cfg->cfg_locked = 0;
7736  ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7737  LEAVE;
7738  return IPR_RC_JOB_CONTINUE;
7739 }
7740 
7750 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7751 {
7752  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7753  int rc = PCIBIOS_SUCCESSFUL;
7754 
7755  ENTER;
7756  if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7758  ioa_cfg->regs.set_uproc_interrupt_reg32);
7759  else
7760  rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7761 
7762  if (rc == PCIBIOS_SUCCESSFUL) {
7763  ipr_cmd->job_step = ipr_reset_bist_done;
7764  ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7765  rc = IPR_RC_JOB_RETURN;
7766  } else {
7767  if (ioa_cfg->cfg_locked)
7768  pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7769  ioa_cfg->cfg_locked = 0;
7770  ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7771  rc = IPR_RC_JOB_CONTINUE;
7772  }
7773 
7774  LEAVE;
7775  return rc;
7776 }
7777 
7787 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7788 {
7789  ENTER;
7791  ipr_cmd->job_step = ipr_reset_bist_done;
7792  ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7793  LEAVE;
7794  return IPR_RC_JOB_RETURN;
7795 }
7796 
7806 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7807 {
7808  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7809  struct pci_dev *pdev = ioa_cfg->pdev;
7810 
7811  ENTER;
7813  ipr_cmd->job_step = ipr_reset_slot_reset_done;
7814  ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7815  LEAVE;
7816  return IPR_RC_JOB_RETURN;
7817 }
7818 
7828 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
7829 {
7830  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7831  int rc = IPR_RC_JOB_CONTINUE;
7832 
7833  if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
7834  ioa_cfg->cfg_locked = 1;
7835  ipr_cmd->job_step = ioa_cfg->reset;
7836  } else {
7837  if (ipr_cmd->u.time_left) {
7838  rc = IPR_RC_JOB_RETURN;
7840  ipr_reset_start_timer(ipr_cmd,
7842  } else {
7843  ipr_cmd->job_step = ioa_cfg->reset;
7844  dev_err(&ioa_cfg->pdev->dev,
7845  "Timed out waiting to lock config access. Resetting anyway.\n");
7846  }
7847  }
7848 
7849  return rc;
7850 }
7851 
7861 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
7862 {
7863  ipr_cmd->ioa_cfg->cfg_locked = 0;
7864  ipr_cmd->job_step = ipr_reset_block_config_access_wait;
7866  return IPR_RC_JOB_CONTINUE;
7867 }
7868 
7876 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7877 {
7878  volatile u32 temp_reg;
7879 
7880  temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7881  return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7882 }
7883 
7899 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7900 {
7901  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7902  int rc = IPR_RC_JOB_RETURN;
7903 
7904  if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7906  ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7907  } else {
7908  ipr_cmd->job_step = ipr_reset_block_config_access;
7909  rc = IPR_RC_JOB_CONTINUE;
7910  }
7911 
7912  return rc;
7913 }
7914 
7927 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7928 {
7929  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7930  u16 cmd_reg;
7931  int rc;
7932 
7933  ENTER;
7934  rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7935 
7936  if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7937  ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7938  writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7939  ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7940  } else {
7941  ipr_cmd->job_step = ipr_reset_block_config_access;
7942  }
7943 
7945  ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7946 
7947  LEAVE;
7948  return IPR_RC_JOB_RETURN;
7949 }
7950 
7960 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7961 {
7962  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7963  struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7964 
7965  pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7966  sglist->num_sg, DMA_TO_DEVICE);
7967 
7968  ipr_cmd->job_step = ipr_reset_alert;
7969  return IPR_RC_JOB_CONTINUE;
7970 }
7971 
7982 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7983 {
7984  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7985  struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7986 
7987  ENTER;
7988  ipr_cmd->job_step = ipr_reset_alert;
7989 
7990  if (!sglist)
7991  return IPR_RC_JOB_CONTINUE;
7992 
7993  ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7994  ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7995  ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7996  ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7997  ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7998  ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7999  ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8000 
8001  if (ioa_cfg->sis64)
8002  ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8003  else
8004  ipr_build_ucode_ioadl(ipr_cmd, sglist);
8005  ipr_cmd->job_step = ipr_reset_ucode_download_done;
8006 
8007  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8009 
8010  LEAVE;
8011  return IPR_RC_JOB_RETURN;
8012 }
8013 
8025 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8026 {
8027  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8028  enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8029  unsigned long timeout;
8030  int rc = IPR_RC_JOB_CONTINUE;
8031 
8032  ENTER;
8033  if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
8034  ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8035  ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8036  ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8037  ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8038 
8039  if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8040  timeout = IPR_SHUTDOWN_TIMEOUT;
8041  else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8042  timeout = IPR_INTERNAL_TIMEOUT;
8043  else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8045  else
8046  timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8047 
8048  ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8049 
8050  rc = IPR_RC_JOB_RETURN;
8051  ipr_cmd->job_step = ipr_reset_ucode_download;
8052  } else
8053  ipr_cmd->job_step = ipr_reset_alert;
8054 
8055  LEAVE;
8056  return rc;
8057 }
8058 
8068 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8069 {
8070  u32 rc, ioasc;
8071  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8072 
8073  do {
8074  ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8075 
8076  if (ioa_cfg->reset_cmd != ipr_cmd) {
8077  /*
8078  * We are doing nested adapter resets and this is
8079  * not the current reset job.
8080  */
8081  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8082  return;
8083  }
8084 
8085  if (IPR_IOASC_SENSE_KEY(ioasc)) {
8086  rc = ipr_cmd->job_step_failed(ipr_cmd);
8087  if (rc == IPR_RC_JOB_RETURN)
8088  return;
8089  }
8090 
8091  ipr_reinit_ipr_cmnd(ipr_cmd);
8092  ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8093  rc = ipr_cmd->job_step(ipr_cmd);
8094  } while (rc == IPR_RC_JOB_CONTINUE);
8095 }
8096 
8111 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8112  int (*job_step) (struct ipr_cmnd *),
8113  enum ipr_shutdown_type shutdown_type)
8114 {
8115  struct ipr_cmnd *ipr_cmd;
8116 
8117  ioa_cfg->in_reset_reload = 1;
8118  ioa_cfg->allow_cmds = 0;
8119  scsi_block_requests(ioa_cfg->host);
8120 
8121  ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8122  ioa_cfg->reset_cmd = ipr_cmd;
8123  ipr_cmd->job_step = job_step;
8124  ipr_cmd->u.shutdown_type = shutdown_type;
8125 
8126  ipr_reset_ioa_job(ipr_cmd);
8127 }
8128 
8141 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8142  enum ipr_shutdown_type shutdown_type)
8143 {
8144  if (ioa_cfg->ioa_is_dead)
8145  return;
8146 
8147  if (ioa_cfg->in_reset_reload) {
8148  if (ioa_cfg->sdt_state == GET_DUMP)
8149  ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8150  else if (ioa_cfg->sdt_state == READ_DUMP)
8151  ioa_cfg->sdt_state = ABORT_DUMP;
8152  }
8153 
8154  if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8155  dev_err(&ioa_cfg->pdev->dev,
8156  "IOA taken offline - error recovery failed\n");
8157 
8158  ioa_cfg->reset_retries = 0;
8159  ioa_cfg->ioa_is_dead = 1;
8160 
8161  if (ioa_cfg->in_ioa_bringdown) {
8162  ioa_cfg->reset_cmd = NULL;
8163  ioa_cfg->in_reset_reload = 0;
8164  ipr_fail_all_ops(ioa_cfg);
8165  wake_up_all(&ioa_cfg->reset_wait_q);
8166 
8167  spin_unlock_irq(ioa_cfg->host->host_lock);
8168  scsi_unblock_requests(ioa_cfg->host);
8169  spin_lock_irq(ioa_cfg->host->host_lock);
8170  return;
8171  } else {
8172  ioa_cfg->in_ioa_bringdown = 1;
8173  shutdown_type = IPR_SHUTDOWN_NONE;
8174  }
8175  }
8176 
8177  _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8178  shutdown_type);
8179 }
8180 
8189 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8190 {
8191  /* Disallow new interrupts, avoid loop */
8192  ipr_cmd->ioa_cfg->allow_interrupts = 0;
8193  list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
8194  ipr_cmd->done = ipr_reset_ioa_job;
8195  return IPR_RC_JOB_RETURN;
8196 }
8197 
8206 static void ipr_pci_frozen(struct pci_dev *pdev)
8207 {
8208  unsigned long flags = 0;
8209  struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8210 
8211  spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8212  _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8213  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8214 }
8215 
8224 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8225 {
8226  unsigned long flags = 0;
8227  struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8228 
8229  spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8230  if (ioa_cfg->needs_warm_reset)
8231  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8232  else
8233  _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8235  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8236  return PCI_ERS_RESULT_RECOVERED;
8237 }
8238 
8246 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8247 {
8248  unsigned long flags = 0;
8249  struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8250 
8251  spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8252  if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8253  ioa_cfg->sdt_state = ABORT_DUMP;
8255  ioa_cfg->in_ioa_bringdown = 1;
8256  ioa_cfg->allow_cmds = 0;
8257  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8258  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8259 }
8260 
8271 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8272  pci_channel_state_t state)
8273 {
8274  switch (state) {
8275  case pci_channel_io_frozen:
8276  ipr_pci_frozen(pdev);
8279  ipr_pci_perm_failure(pdev);
8281  break;
8282  default:
8283  break;
8284  }
8286 }
8287 
8299 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8300 {
8301  int rc = 0;
8302  unsigned long host_lock_flags = 0;
8303 
8304  ENTER;
8305  spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8306  dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8307  if (ioa_cfg->needs_hard_reset) {
8308  ioa_cfg->needs_hard_reset = 0;
8309  ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8310  } else
8311  _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8313 
8314  spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8315  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8316  spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8317 
8318  if (ioa_cfg->ioa_is_dead) {
8319  rc = -EIO;
8320  } else if (ipr_invalid_adapter(ioa_cfg)) {
8321  if (!ipr_testmode)
8322  rc = -EIO;
8323 
8324  dev_err(&ioa_cfg->pdev->dev,
8325  "Adapter not supported in this hardware configuration.\n");
8326  }
8327 
8328  spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8329 
8330  LEAVE;
8331  return rc;
8332 }
8333 
8341 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8342 {
8343  int i;
8344 
8345  for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8346  if (ioa_cfg->ipr_cmnd_list[i])
8347  pci_pool_free(ioa_cfg->ipr_cmd_pool,
8348  ioa_cfg->ipr_cmnd_list[i],
8349  ioa_cfg->ipr_cmnd_list_dma[i]);
8350 
8351  ioa_cfg->ipr_cmnd_list[i] = NULL;
8352  }
8353 
8354  if (ioa_cfg->ipr_cmd_pool)
8355  pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8356 
8357  kfree(ioa_cfg->ipr_cmnd_list);
8358  kfree(ioa_cfg->ipr_cmnd_list_dma);
8359  ioa_cfg->ipr_cmnd_list = NULL;
8360  ioa_cfg->ipr_cmnd_list_dma = NULL;
8361  ioa_cfg->ipr_cmd_pool = NULL;
8362 }
8363 
8371 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8372 {
8373  int i;
8374 
8375  kfree(ioa_cfg->res_entries);
8376  pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8377  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8378  ipr_free_cmd_blks(ioa_cfg);
8379  pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8380  ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8381  pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8382  ioa_cfg->u.cfg_table,
8383  ioa_cfg->cfg_table_dma);
8384 
8385  for (i = 0; i < IPR_NUM_HCAMS; i++) {
8386  pci_free_consistent(ioa_cfg->pdev,
8387  sizeof(struct ipr_hostrcb),
8388  ioa_cfg->hostrcb[i],
8389  ioa_cfg->hostrcb_dma[i]);
8390  }
8391 
8392  ipr_free_dump(ioa_cfg);
8393  kfree(ioa_cfg->trace);
8394 }
8395 
8406 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8407 {
8408  struct pci_dev *pdev = ioa_cfg->pdev;
8409 
8410  ENTER;
8411  free_irq(pdev->irq, ioa_cfg);
8412  pci_disable_msi(pdev);
8413  iounmap(ioa_cfg->hdw_dma_regs);
8414  pci_release_regions(pdev);
8415  ipr_free_mem(ioa_cfg);
8416  scsi_host_put(ioa_cfg->host);
8417  pci_disable_device(pdev);
8418  LEAVE;
8419 }
8420 
8428 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8429 {
8430  struct ipr_cmnd *ipr_cmd;
8431  struct ipr_ioarcb *ioarcb;
8433  int i;
8434 
8435  ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8436  sizeof(struct ipr_cmnd), 512, 0);
8437 
8438  if (!ioa_cfg->ipr_cmd_pool)
8439  return -ENOMEM;
8440 
8441  ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8442  ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8443 
8444  if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8445  ipr_free_cmd_blks(ioa_cfg);
8446  return -ENOMEM;
8447  }
8448 
8449  for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8450  ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8451 
8452  if (!ipr_cmd) {
8453  ipr_free_cmd_blks(ioa_cfg);
8454  return -ENOMEM;
8455  }
8456 
8457  memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8458  ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8459  ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8460 
8461  ioarcb = &ipr_cmd->ioarcb;
8462  ipr_cmd->dma_addr = dma_addr;
8463  if (ioa_cfg->sis64)
8464  ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8465  else
8466  ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8467 
8468  ioarcb->host_response_handle = cpu_to_be32(i << 2);
8469  if (ioa_cfg->sis64) {
8470  ioarcb->u.sis64_addr_data.data_ioadl_addr =
8471  cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8472  ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8473  cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8474  } else {
8475  ioarcb->write_ioadl_addr =
8476  cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8477  ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8478  ioarcb->ioasa_host_pci_addr =
8479  cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8480  }
8481  ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8482  ipr_cmd->cmd_index = i;
8483  ipr_cmd->ioa_cfg = ioa_cfg;
8484  ipr_cmd->sense_buffer_dma = dma_addr +
8485  offsetof(struct ipr_cmnd, sense_buffer);
8486 
8487  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8488  }
8489 
8490  return 0;
8491 }
8492 
8500 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8501 {
8502  struct pci_dev *pdev = ioa_cfg->pdev;
8503  int i, rc = -ENOMEM;
8504 
8505  ENTER;
8506  ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8507  ioa_cfg->max_devs_supported, GFP_KERNEL);
8508 
8509  if (!ioa_cfg->res_entries)
8510  goto out;
8511 
8512  if (ioa_cfg->sis64) {
8513  ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8515  ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8517  ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8519  }
8520 
8521  for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8522  list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8523  ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8524  }
8525 
8526  ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8527  sizeof(struct ipr_misc_cbs),
8528  &ioa_cfg->vpd_cbs_dma);
8529 
8530  if (!ioa_cfg->vpd_cbs)
8531  goto out_free_res_entries;
8532 
8533  if (ipr_alloc_cmd_blks(ioa_cfg))
8534  goto out_free_vpd_cbs;
8535 
8536  ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8537  sizeof(u32) * IPR_NUM_CMD_BLKS,
8538  &ioa_cfg->host_rrq_dma);
8539 
8540  if (!ioa_cfg->host_rrq)
8541  goto out_ipr_free_cmd_blocks;
8542 
8543  ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8544  ioa_cfg->cfg_table_size,
8545  &ioa_cfg->cfg_table_dma);
8546 
8547  if (!ioa_cfg->u.cfg_table)
8548  goto out_free_host_rrq;
8549 
8550  for (i = 0; i < IPR_NUM_HCAMS; i++) {
8551  ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8552  sizeof(struct ipr_hostrcb),
8553  &ioa_cfg->hostrcb_dma[i]);
8554 
8555  if (!ioa_cfg->hostrcb[i])
8556  goto out_free_hostrcb_dma;
8557 
8558  ioa_cfg->hostrcb[i]->hostrcb_dma =
8559  ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8560  ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8561  list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8562  }
8563 
8564  ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8566 
8567  if (!ioa_cfg->trace)
8568  goto out_free_hostrcb_dma;
8569 
8570  rc = 0;
8571 out:
8572  LEAVE;
8573  return rc;
8574 
8575 out_free_hostrcb_dma:
8576  while (i-- > 0) {
8577  pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8578  ioa_cfg->hostrcb[i],
8579  ioa_cfg->hostrcb_dma[i]);
8580  }
8581  pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8582  ioa_cfg->u.cfg_table,
8583  ioa_cfg->cfg_table_dma);
8584 out_free_host_rrq:
8585  pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8586  ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8587 out_ipr_free_cmd_blocks:
8588  ipr_free_cmd_blks(ioa_cfg);
8589 out_free_vpd_cbs:
8590  pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8591  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8592 out_free_res_entries:
8593  kfree(ioa_cfg->res_entries);
8594  goto out;
8595 }
8596 
8604 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8605 {
8606  int i;
8607 
8608  for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8609  ioa_cfg->bus_attr[i].bus = i;
8610  ioa_cfg->bus_attr[i].qas_enabled = 0;
8611  ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8612  if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8613  ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8614  else
8615  ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8616  }
8617 }
8618 
8628 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8629  struct Scsi_Host *host, struct pci_dev *pdev)
8630 {
8631  const struct ipr_interrupt_offsets *p;
8632  struct ipr_interrupts *t;
8633  void __iomem *base;
8634 
8635  ioa_cfg->host = host;
8636  ioa_cfg->pdev = pdev;
8637  ioa_cfg->log_level = ipr_log_level;
8638  ioa_cfg->doorbell = IPR_DOORBELL;
8639  sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8646  sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8647 
8648  INIT_LIST_HEAD(&ioa_cfg->free_q);
8649  INIT_LIST_HEAD(&ioa_cfg->pending_q);
8650  INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8651  INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8652  INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8653  INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8654  INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8655  init_waitqueue_head(&ioa_cfg->reset_wait_q);
8656  init_waitqueue_head(&ioa_cfg->msi_wait_q);
8657  ioa_cfg->sdt_state = INACTIVE;
8658 
8659  ipr_initialize_bus_attr(ioa_cfg);
8660  ioa_cfg->max_devs_supported = ipr_max_devs;
8661 
8662  if (ioa_cfg->sis64) {
8665  if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8667  } else {
8670  if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8672  }
8674  host->unique_id = host->host_no;
8675  host->max_cmd_len = IPR_MAX_CDB_LEN;
8676  host->can_queue = ioa_cfg->max_cmds;
8677  pci_set_drvdata(pdev, ioa_cfg);
8678 
8679  p = &ioa_cfg->chip_cfg->regs;
8680  t = &ioa_cfg->regs;
8681  base = ioa_cfg->hdw_dma_regs;
8682 
8688  t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8692  t->ioarrin_reg = base + p->ioarrin_reg;
8699 
8700  if (ioa_cfg->sis64) {
8701  t->init_feedback_reg = base + p->init_feedback_reg;
8702  t->dump_addr_reg = base + p->dump_addr_reg;
8703  t->dump_data_reg = base + p->dump_data_reg;
8704  t->endian_swap_reg = base + p->endian_swap_reg;
8705  }
8706 }
8707 
8715 static const struct ipr_chip_t * __devinit
8716 ipr_get_chip_info(const struct pci_device_id *dev_id)
8717 {
8718  int i;
8719 
8720  for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8721  if (ipr_chip[i].vendor == dev_id->vendor &&
8722  ipr_chip[i].device == dev_id->device)
8723  return &ipr_chip[i];
8724  return NULL;
8725 }
8726 
8737 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8738 {
8739  struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8740  unsigned long lock_flags = 0;
8741  irqreturn_t rc = IRQ_HANDLED;
8742 
8743  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8744 
8745  ioa_cfg->msi_received = 1;
8746  wake_up(&ioa_cfg->msi_wait_q);
8747 
8748  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8749  return rc;
8750 }
8751 
8764 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8765  struct pci_dev *pdev)
8766 {
8767  int rc;
8768  volatile u32 int_reg;
8769  unsigned long lock_flags = 0;
8770 
8771  ENTER;
8772 
8773  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8774  init_waitqueue_head(&ioa_cfg->msi_wait_q);
8775  ioa_cfg->msi_received = 0;
8776  ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8777  writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8778  int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8779  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8780 
8781  rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8782  if (rc) {
8783  dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8784  return rc;
8785  } else if (ipr_debug)
8786  dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8787 
8788  writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8789  int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8790  wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8791  ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8792 
8793  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8794  if (!ioa_cfg->msi_received) {
8795  /* MSI test failed */
8796  dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8797  rc = -EOPNOTSUPP;
8798  } else if (ipr_debug)
8799  dev_info(&pdev->dev, "MSI test succeeded.\n");
8800 
8801  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8802 
8803  free_irq(pdev->irq, ioa_cfg);
8804 
8805  LEAVE;
8806 
8807  return rc;
8808 }
8809 
8818 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8819  const struct pci_device_id *dev_id)
8820 {
8821  struct ipr_ioa_cfg *ioa_cfg;
8822  struct Scsi_Host *host;
8823  unsigned long ipr_regs_pci;
8824  void __iomem *ipr_regs;
8825  int rc = PCIBIOS_SUCCESSFUL;
8826  volatile u32 mask, uproc, interrupts;
8827 
8828  ENTER;
8829 
8830  if ((rc = pci_enable_device(pdev))) {
8831  dev_err(&pdev->dev, "Cannot enable adapter\n");
8832  goto out;
8833  }
8834 
8835  dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8836 
8837  host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8838 
8839  if (!host) {
8840  dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8841  rc = -ENOMEM;
8842  goto out_disable;
8843  }
8844 
8845  ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8846  memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8847  ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
8848 
8849  ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8850 
8851  if (!ioa_cfg->ipr_chip) {
8852  dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8853  dev_id->vendor, dev_id->device);
8854  goto out_scsi_host_put;
8855  }
8856 
8857  /* set SIS 32 or SIS 64 */
8858  ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8859  ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8860  ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
8861  ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
8862 
8863  if (ipr_transop_timeout)
8864  ioa_cfg->transop_timeout = ipr_transop_timeout;
8865  else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8867  else
8869 
8870  ioa_cfg->revid = pdev->revision;
8871 
8872  ipr_regs_pci = pci_resource_start(pdev, 0);
8873 
8874  rc = pci_request_regions(pdev, IPR_NAME);
8875  if (rc < 0) {
8876  dev_err(&pdev->dev,
8877  "Couldn't register memory range of registers\n");
8878  goto out_scsi_host_put;
8879  }
8880 
8881  ipr_regs = pci_ioremap_bar(pdev, 0);
8882 
8883  if (!ipr_regs) {
8884  dev_err(&pdev->dev,
8885  "Couldn't map memory range of registers\n");
8886  rc = -ENOMEM;
8887  goto out_release_regions;
8888  }
8889 
8890  ioa_cfg->hdw_dma_regs = ipr_regs;
8891  ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8892  ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8893 
8894  ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8895 
8896  pci_set_master(pdev);
8897 
8898  if (ioa_cfg->sis64) {
8899  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8900  if (rc < 0) {
8901  dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8902  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8903  }
8904 
8905  } else
8906  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8907 
8908  if (rc < 0) {
8909  dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8910  goto cleanup_nomem;
8911  }
8912 
8913  rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8914  ioa_cfg->chip_cfg->cache_line_size);
8915 
8916  if (rc != PCIBIOS_SUCCESSFUL) {
8917  dev_err(&pdev->dev, "Write of cache line size failed\n");
8918  rc = -EIO;
8919  goto cleanup_nomem;
8920  }
8921 
8922  /* Enable MSI style interrupts if they are supported. */
8923  if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8924  rc = ipr_test_msi(ioa_cfg, pdev);
8925  if (rc == -EOPNOTSUPP)
8926  pci_disable_msi(pdev);
8927  else if (rc)
8928  goto out_msi_disable;
8929  else
8930  dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8931  } else if (ipr_debug)
8932  dev_info(&pdev->dev, "Cannot enable MSI.\n");
8933 
8934  /* Save away PCI config space for use following IOA reset */
8935  rc = pci_save_state(pdev);
8936 
8937  if (rc != PCIBIOS_SUCCESSFUL) {
8938  dev_err(&pdev->dev, "Failed to save PCI config space\n");
8939  rc = -EIO;
8940  goto out_msi_disable;
8941  }
8942 
8943  if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8944  goto out_msi_disable;
8945 
8946  if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8947  goto out_msi_disable;
8948 
8949  if (ioa_cfg->sis64)
8950  ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8952  * ioa_cfg->max_devs_supported)));
8953  else
8954  ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8956  * ioa_cfg->max_devs_supported)));
8957 
8958  rc = ipr_alloc_mem(ioa_cfg);
8959  if (rc < 0) {
8960  dev_err(&pdev->dev,
8961  "Couldn't allocate enough memory for device driver!\n");
8962  goto out_msi_disable;
8963  }
8964 
8965  /*
8966  * If HRRQ updated interrupt is not masked, or reset alert is set,
8967  * the card is in an unknown state and needs a hard reset
8968  */
8969  mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8970  interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8971  uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8972  if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8973  ioa_cfg->needs_hard_reset = 1;
8974  if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
8975  ioa_cfg->needs_hard_reset = 1;
8976  if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8977  ioa_cfg->ioa_unit_checked = 1;
8978 
8979  ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8980  rc = request_irq(pdev->irq, ipr_isr,
8981  ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8982  IPR_NAME, ioa_cfg);
8983 
8984  if (rc) {
8985  dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8986  pdev->irq, rc);
8987  goto cleanup_nolog;
8988  }
8989 
8990  if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8991  (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8992  ioa_cfg->needs_warm_reset = 1;
8993  ioa_cfg->reset = ipr_reset_slot_reset;
8994  } else
8995  ioa_cfg->reset = ipr_reset_start_bist;
8996 
8997  spin_lock(&ipr_driver_lock);
8998  list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8999  spin_unlock(&ipr_driver_lock);
9000 
9001  LEAVE;
9002 out:
9003  return rc;
9004 
9005 cleanup_nolog:
9006  ipr_free_mem(ioa_cfg);
9007 out_msi_disable:
9008  pci_disable_msi(pdev);
9009 cleanup_nomem:
9010  iounmap(ipr_regs);
9011 out_release_regions:
9012  pci_release_regions(pdev);
9013 out_scsi_host_put:
9014  scsi_host_put(host);
9015 out_disable:
9016  pci_disable_device(pdev);
9017  goto out;
9018 }
9019 
9030 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9031 {
9032  int target, lun;
9033 
9034  for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9035  for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9036  scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9037 }
9038 
9053 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9054  enum ipr_shutdown_type shutdown_type)
9055 {
9056  ENTER;
9057  if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9058  ioa_cfg->sdt_state = ABORT_DUMP;
9059  ioa_cfg->reset_retries = 0;
9060  ioa_cfg->in_ioa_bringdown = 1;
9061  ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9062  LEAVE;
9063 }
9064 
9074 static void __ipr_remove(struct pci_dev *pdev)
9075 {
9076  unsigned long host_lock_flags = 0;
9077  struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9078  ENTER;
9079 
9080  spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9081  while (ioa_cfg->in_reset_reload) {
9082  spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9083  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9084  spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9085  }
9086 
9087  ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9088 
9089  spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9090  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9091  flush_work(&ioa_cfg->work_q);
9092  spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9093 
9094  spin_lock(&ipr_driver_lock);
9095  list_del(&ioa_cfg->queue);
9096  spin_unlock(&ipr_driver_lock);
9097 
9098  if (ioa_cfg->sdt_state == ABORT_DUMP)
9099  ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9100  spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9101 
9102  ipr_free_all_resources(ioa_cfg);
9103 
9104  LEAVE;
9105 }
9106 
9116 static void __devexit ipr_remove(struct pci_dev *pdev)
9117 {
9118  struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9119 
9120  ENTER;
9121 
9122  ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9123  &ipr_trace_attr);
9124  ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9125  &ipr_dump_attr);
9126  scsi_remove_host(ioa_cfg->host);
9127 
9128  __ipr_remove(pdev);
9129 
9130  LEAVE;
9131 }
9132 
9139 static int __devinit ipr_probe(struct pci_dev *pdev,
9140  const struct pci_device_id *dev_id)
9141 {
9142  struct ipr_ioa_cfg *ioa_cfg;
9143  int rc;
9144 
9145  rc = ipr_probe_ioa(pdev, dev_id);
9146 
9147  if (rc)
9148  return rc;
9149 
9150  ioa_cfg = pci_get_drvdata(pdev);
9151  rc = ipr_probe_ioa_part2(ioa_cfg);
9152 
9153  if (rc) {
9154  __ipr_remove(pdev);
9155  return rc;
9156  }
9157 
9158  rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9159 
9160  if (rc) {
9161  __ipr_remove(pdev);
9162  return rc;
9163  }
9164 
9165  rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9166  &ipr_trace_attr);
9167 
9168  if (rc) {
9169  scsi_remove_host(ioa_cfg->host);
9170  __ipr_remove(pdev);
9171  return rc;
9172  }
9173 
9174  rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9175  &ipr_dump_attr);
9176 
9177  if (rc) {
9178  ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9179  &ipr_trace_attr);
9180  scsi_remove_host(ioa_cfg->host);
9181  __ipr_remove(pdev);
9182  return rc;
9183  }
9184 
9185  scsi_scan_host(ioa_cfg->host);
9186  ipr_scan_vsets(ioa_cfg);
9188  ioa_cfg->allow_ml_add_del = 1;
9189  ioa_cfg->host->max_channel = IPR_VSET_BUS;
9190  schedule_work(&ioa_cfg->work_q);
9191  return 0;
9192 }
9193 
9204 static void ipr_shutdown(struct pci_dev *pdev)
9205 {
9206  struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9207  unsigned long lock_flags = 0;
9208 
9209  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9210  while (ioa_cfg->in_reset_reload) {
9211  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9212  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9213  spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9214  }
9215 
9216  ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9217  spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9218  wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9219 }
9220 
9221 static struct pci_device_id ipr_pci_table[] __devinitdata = {
9295  { }
9296 };
9297 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9298 
9299 static const struct pci_error_handlers ipr_err_handler = {
9300  .error_detected = ipr_pci_error_detected,
9301  .slot_reset = ipr_pci_slot_reset,
9302 };
9303 
9304 static struct pci_driver ipr_driver = {
9305  .name = IPR_NAME,
9306  .id_table = ipr_pci_table,
9307  .probe = ipr_probe,
9308  .remove = __devexit_p(ipr_remove),
9309  .shutdown = ipr_shutdown,
9310  .err_handler = &ipr_err_handler,
9311 };
9312 
9319 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9320 {
9321  struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9322 
9323  list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9324 }
9325 
9332 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9333 {
9334  struct ipr_cmnd *ipr_cmd;
9335  struct ipr_ioa_cfg *ioa_cfg;
9336  unsigned long flags = 0;
9337 
9338  if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9339  return NOTIFY_DONE;
9340 
9341  spin_lock(&ipr_driver_lock);
9342 
9343  list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9344  spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9345  if (!ioa_cfg->allow_cmds) {
9346  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9347  continue;
9348  }
9349 
9350  ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9351  ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9352  ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9353  ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9354  ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9355 
9356  ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9357  spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9358  }
9359  spin_unlock(&ipr_driver_lock);
9360 
9361  return NOTIFY_OK;
9362 }
9363 
9364 static struct notifier_block ipr_notifier = {
9365  ipr_halt, NULL, 0
9366 };
9367 
9374 static int __init ipr_init(void)
9375 {
9376  ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9378 
9379  register_reboot_notifier(&ipr_notifier);
9380  return pci_register_driver(&ipr_driver);
9381 }
9382 
9391 static void __exit ipr_exit(void)
9392 {
9393  unregister_reboot_notifier(&ipr_notifier);
9394  pci_unregister_driver(&ipr_driver);
9395 }
9396 
9397 module_init(ipr_init);
9398 module_exit(ipr_exit);