Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
megaraid_mbox.c
Go to the documentation of this file.
1 /*
2  *
3  * Linux MegaRAID device driver
4  *
5  * Copyright (c) 2003-2004 LSI Logic Corporation.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  *
12  * FILE : megaraid_mbox.c
13  * Version : v2.20.5.1 (Nov 16 2006)
14  *
15  * Authors:
16  * Atul Mukker <[email protected]>
17  * Sreenivas Bagalkote <[email protected]>
18  * Manoj Jose <[email protected]>
19  * Seokmann Ju
20  *
21  * List of supported controllers
22  *
23  * OEM Product Name VID DID SSVID SSID
24  * --- ------------ --- --- ---- ----
25  * Dell PERC3/QC 101E 1960 1028 0471
26  * Dell PERC3/DC 101E 1960 1028 0493
27  * Dell PERC3/SC 101E 1960 1028 0475
28  * Dell PERC3/Di 1028 1960 1028 0123
29  * Dell PERC4/SC 1000 1960 1028 0520
30  * Dell PERC4/DC 1000 1960 1028 0518
31  * Dell PERC4/QC 1000 0407 1028 0531
32  * Dell PERC4/Di 1028 000F 1028 014A
33  * Dell PERC 4e/Si 1028 0013 1028 016c
34  * Dell PERC 4e/Di 1028 0013 1028 016d
35  * Dell PERC 4e/Di 1028 0013 1028 016e
36  * Dell PERC 4e/Di 1028 0013 1028 016f
37  * Dell PERC 4e/Di 1028 0013 1028 0170
38  * Dell PERC 4e/DC 1000 0408 1028 0002
39  * Dell PERC 4e/SC 1000 0408 1028 0001
40  *
41  *
42  * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520
43  * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520
44  * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518
45  * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530
46  * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532
47  * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531
48  * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001
49  * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002
50  * LSI MegaRAID SATA 150-4 1000 1960 1000 4523
51  * LSI MegaRAID SATA 150-6 1000 1960 1000 0523
52  * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004
53  * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008
54  *
55  * INTEL RAID Controller SRCU42X 1000 0407 8086 0532
56  * INTEL RAID Controller SRCS16 1000 1960 8086 0523
57  * INTEL RAID Controller SRCU42E 1000 0408 8086 0002
58  * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530
59  * INTEL RAID Controller SRCS28X 1000 0409 8086 3008
60  * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431
61  * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499
62  * INTEL RAID Controller SRCU51L 1000 1960 8086 0520
63  *
64  * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065
65  *
66  * ACER MegaRAID ROMB-2E 1000 0408 1025 004D
67  *
68  * NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287
69  *
70  * For history of changes, see Documentation/scsi/ChangeLog.megaraid
71  */
72 
73 #include <linux/slab.h>
74 #include <linux/module.h>
75 #include "megaraid_mbox.h"
76 
77 static int megaraid_init(void);
78 static void megaraid_exit(void);
79 
80 static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
81 static void megaraid_detach_one(struct pci_dev *);
82 static void megaraid_mbox_shutdown(struct pci_dev *);
83 
84 static int megaraid_io_attach(adapter_t *);
85 static void megaraid_io_detach(adapter_t *);
86 
87 static int megaraid_init_mbox(adapter_t *);
88 static void megaraid_fini_mbox(adapter_t *);
89 
90 static int megaraid_alloc_cmd_packets(adapter_t *);
91 static void megaraid_free_cmd_packets(adapter_t *);
92 
93 static int megaraid_mbox_setup_dma_pools(adapter_t *);
94 static void megaraid_mbox_teardown_dma_pools(adapter_t *);
95 
96 static int megaraid_sysfs_alloc_resources(adapter_t *);
97 static void megaraid_sysfs_free_resources(adapter_t *);
98 
99 static int megaraid_abort_handler(struct scsi_cmnd *);
100 static int megaraid_reset_handler(struct scsi_cmnd *);
101 
102 static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
103 static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
104 static int megaraid_busywait_mbox(mraid_device_t *);
105 static int megaraid_mbox_product_info(adapter_t *);
106 static int megaraid_mbox_extended_cdb(adapter_t *);
107 static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
108 static int megaraid_mbox_support_random_del(adapter_t *);
109 static int megaraid_mbox_get_max_sg(adapter_t *);
110 static void megaraid_mbox_enum_raid_scsi(adapter_t *);
111 static void megaraid_mbox_flush_cache(adapter_t *);
112 static int megaraid_mbox_fire_sync_cmd(adapter_t *);
113 
114 static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
115 static void megaraid_mbox_setup_device_map(adapter_t *);
116 
117 static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
118 static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
119 static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
120 static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
121  struct scsi_cmnd *);
122 static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
123  struct scsi_cmnd *);
124 
125 static irqreturn_t megaraid_isr(int, void *);
126 
127 static void megaraid_mbox_dpc(unsigned long);
128 
129 static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
130 static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
131 
132 static int megaraid_cmm_register(adapter_t *);
133 static int megaraid_cmm_unregister(adapter_t *);
134 static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
135 static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
136 static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
137 static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
138 static int wait_till_fw_empty(adapter_t *);
139 
140 
141 
143 MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
144 MODULE_LICENSE("GPL");
146 
147 /*
148  * ### modules parameters for driver ###
149  */
150 
151 /*
152  * Set to enable driver to expose unconfigured disk to kernel
153  */
154 static int megaraid_expose_unconf_disks = 0;
155 module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
156 MODULE_PARM_DESC(unconf_disks,
157  "Set to expose unconfigured disks to kernel (default=0)");
158 
159 /*
160  * driver wait time if the adapter's mailbox is busy
161  */
162 static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
163 module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
165  "Max wait for mailbox in microseconds if busy (default=10)");
166 
167 /*
168  * number of sectors per IO command
169  */
170 static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
171 module_param_named(max_sectors, megaraid_max_sectors, int, 0);
172 MODULE_PARM_DESC(max_sectors,
173  "Maximum number of sectors per IO command (default=128)");
174 
175 /*
176  * number of commands per logical unit
177  */
178 static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
179 module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
180 MODULE_PARM_DESC(cmd_per_lun,
181  "Maximum number of commands per logical unit (default=64)");
182 
183 
184 /*
185  * Fast driver load option, skip scanning for physical devices during load.
186  * This would result in non-disk devices being skipped during driver load
187  * time. These can be later added though, using /proc/scsi/scsi
188  */
189 static unsigned int megaraid_fast_load = 0;
190 module_param_named(fast_load, megaraid_fast_load, int, 0);
191 MODULE_PARM_DESC(fast_load,
192  "Faster loading of the driver, skips physical devices! (default=0)");
193 
194 
195 /*
196  * mraid_debug level - threshold for amount of information to be displayed by
197  * the driver. This level can be changed through modules parameters, ioctl or
198  * sysfs/proc interface. By default, print the announcement messages only.
199  */
201 module_param_named(debug_level, mraid_debug_level, int, 0);
202 MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
203 
204 /*
205  * ### global data ###
206  */
207 static uint8_t megaraid_mbox_version[8] =
208  { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
209 
210 
211 /*
212  * PCI table for all supported controllers.
213  */
214 static struct pci_device_id pci_id_table_g[] = {
215  {
220  },
221  {
226  },
227  {
232  },
233  {
236  PCI_ANY_ID,
237  PCI_ANY_ID,
238  },
239  {
244  },
245  {
250  },
251  {
256  },
257  {
262  },
263  {
268  },
269  {
274  },
275  {
278  PCI_ANY_ID,
279  PCI_ANY_ID,
280  },
281  {
284  PCI_ANY_ID,
285  PCI_ANY_ID,
286  },
287  {
290  PCI_ANY_ID,
291  PCI_ANY_ID,
292  },
293  {
296  PCI_ANY_ID,
297  PCI_ANY_ID,
298  },
299  {0} /* Terminating entry */
300 };
301 MODULE_DEVICE_TABLE(pci, pci_id_table_g);
302 
303 
304 static struct pci_driver megaraid_pci_driver = {
305  .name = "megaraid",
306  .id_table = pci_id_table_g,
307  .probe = megaraid_probe_one,
308  .remove = __devexit_p(megaraid_detach_one),
309  .shutdown = megaraid_mbox_shutdown,
310 };
311 
312 
313 
314 // definitions for the device attributes for exporting logical drive number
315 // for a scsi address (Host, Channel, Id, Lun)
316 
317 DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
318  NULL);
319 
320 // Host template initializer for megaraid mbox sysfs device attributes
321 static struct device_attribute *megaraid_shost_attrs[] = {
322  &dev_attr_megaraid_mbox_app_hndl,
323  NULL,
324 };
325 
326 
327 DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
328 
329 // Host template initializer for megaraid mbox sysfs device attributes
330 static struct device_attribute *megaraid_sdev_attrs[] = {
331  &dev_attr_megaraid_mbox_ld,
332  NULL,
333 };
334 
344 static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
345  int reason)
346 {
347  if (reason != SCSI_QDEPTH_DEFAULT)
348  return -EOPNOTSUPP;
349 
350  if (qdepth > MBOX_MAX_SCSI_CMDS)
351  qdepth = MBOX_MAX_SCSI_CMDS;
352  scsi_adjust_queue_depth(sdev, 0, qdepth);
353  return sdev->queue_depth;
354 }
355 
356 /*
357  * Scsi host template for megaraid unified driver
358  */
359 static struct scsi_host_template megaraid_template_g = {
360  .module = THIS_MODULE,
361  .name = "LSI Logic MegaRAID driver",
362  .proc_name = "megaraid",
363  .queuecommand = megaraid_queue_command,
364  .eh_abort_handler = megaraid_abort_handler,
365  .eh_device_reset_handler = megaraid_reset_handler,
366  .eh_bus_reset_handler = megaraid_reset_handler,
367  .eh_host_reset_handler = megaraid_reset_handler,
368  .change_queue_depth = megaraid_change_queue_depth,
369  .use_clustering = ENABLE_CLUSTERING,
370  .sdev_attrs = megaraid_sdev_attrs,
371  .shost_attrs = megaraid_shost_attrs,
372 };
373 
374 
381 static int __init
382 megaraid_init(void)
383 {
384  int rval;
385 
386  // Announce the driver version
387  con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
389 
390  // check validity of module parameters
391  if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
392 
394  "megaraid mailbox: max commands per lun reset to %d\n",
396 
397  megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
398  }
399 
400 
401  // register as a PCI hot-plug driver module
402  rval = pci_register_driver(&megaraid_pci_driver);
403  if (rval < 0) {
405  "megaraid: could not register hotplug support.\n"));
406  }
407 
408  return rval;
409 }
410 
411 
417 static void __exit
418 megaraid_exit(void)
419 {
420  con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
421 
422  // unregister as PCI hotplug driver
423  pci_unregister_driver(&megaraid_pci_driver);
424 
425  return;
426 }
427 
428 
437 static int __devinit
438 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
439 {
441 
442 
443  // detected a new controller
445  "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
446  pdev->vendor, pdev->device, pdev->subsystem_vendor,
447  pdev->subsystem_device));
448 
449  con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
450  PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
451 
452  if (pci_enable_device(pdev)) {
454  "megaraid: pci_enable_device failed\n"));
455 
456  return -ENODEV;
457  }
458 
459  // Enable bus-mastering on this controller
460  pci_set_master(pdev);
461 
462  // Allocate the per driver initialization structure
463  adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
464 
465  if (adapter == NULL) {
467  "megaraid: out of memory, %s %d.\n", __func__, __LINE__));
468 
469  goto out_probe_one;
470  }
471 
472 
473  // set up PCI related soft state and other pre-known parameters
474  adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
475  adapter->irq = pdev->irq;
476  adapter->pdev = pdev;
477 
478  atomic_set(&adapter->being_detached, 0);
479 
480  // Setup the default DMA mask. This would be changed later on
481  // depending on hardware capabilities
482  if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
483 
485  "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
486 
487  goto out_free_adapter;
488  }
489 
490 
491  // Initialize the synchronization lock for kernel and LLD
492  spin_lock_init(&adapter->lock);
493 
494  // Initialize the command queues: the list of free SCBs and the list
495  // of pending SCBs.
496  INIT_LIST_HEAD(&adapter->kscb_pool);
498 
499  INIT_LIST_HEAD(&adapter->pend_list);
501 
502  INIT_LIST_HEAD(&adapter->completed_list);
504 
505 
506  // Start the mailbox based controller
507  if (megaraid_init_mbox(adapter) != 0) {
509  "megaraid: maibox adapter did not initialize\n"));
510 
511  goto out_free_adapter;
512  }
513 
514  // Register with LSI Common Management Module
515  if (megaraid_cmm_register(adapter) != 0) {
516 
518  "megaraid: could not register with management module\n"));
519 
520  goto out_fini_mbox;
521  }
522 
523  // setup adapter handle in PCI soft state
524  pci_set_drvdata(pdev, adapter);
525 
526  // attach with scsi mid-layer
527  if (megaraid_io_attach(adapter) != 0) {
528 
529  con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
530 
531  goto out_cmm_unreg;
532  }
533 
534  return 0;
535 
536 out_cmm_unreg:
537  pci_set_drvdata(pdev, NULL);
538  megaraid_cmm_unregister(adapter);
539 out_fini_mbox:
540  megaraid_fini_mbox(adapter);
541 out_free_adapter:
542  kfree(adapter);
543 out_probe_one:
544  pci_disable_device(pdev);
545 
546  return -ENODEV;
547 }
548 
549 
560 static void
561 megaraid_detach_one(struct pci_dev *pdev)
562 {
564  struct Scsi_Host *host;
565 
566 
567  // Start a rollback on this adapter
568  adapter = pci_get_drvdata(pdev);
569 
570  if (!adapter) {
572  "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
573  pdev->vendor, pdev->device, pdev->subsystem_vendor,
574  pdev->subsystem_device));
575 
576  return;
577  }
578  else {
580  "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
581  pdev->vendor, pdev->device, pdev->subsystem_vendor,
582  pdev->subsystem_device));
583  }
584 
585 
586  host = adapter->host;
587 
588  // do not allow any more requests from the management module for this
589  // adapter.
590  // FIXME: How do we account for the request which might still be
591  // pending with us?
592  atomic_set(&adapter->being_detached, 1);
593 
594  // detach from the IO sub-system
595  megaraid_io_detach(adapter);
596 
597  // reset the device state in the PCI structure. We check this
598  // condition when we enter here. If the device state is NULL,
599  // that would mean the device has already been removed
600  pci_set_drvdata(pdev, NULL);
601 
602  // Unregister from common management module
603  //
604  // FIXME: this must return success or failure for conditions if there
605  // is a command pending with LLD or not.
606  megaraid_cmm_unregister(adapter);
607 
608  // finalize the mailbox based controller and release all resources
609  megaraid_fini_mbox(adapter);
610 
611  kfree(adapter);
612 
613  scsi_host_put(host);
614 
615  pci_disable_device(pdev);
616 
617  return;
618 }
619 
620 
627 static void
628 megaraid_mbox_shutdown(struct pci_dev *pdev)
629 {
630  adapter_t *adapter = pci_get_drvdata(pdev);
631  static int counter;
632 
633  if (!adapter) {
635  "megaraid: null device in shutdown\n"));
636  return;
637  }
638 
639  // flush caches now
640  con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
641  counter++));
642 
643  megaraid_mbox_flush_cache(adapter);
644 
645  con_log(CL_ANN, ("done\n"));
646 }
647 
648 
655 static int
656 megaraid_io_attach(adapter_t *adapter)
657 {
658  struct Scsi_Host *host;
659 
660  // Initialize SCSI Host structure
661  host = scsi_host_alloc(&megaraid_template_g, 8);
662  if (!host) {
664  "megaraid mbox: scsi_register failed\n"));
665 
666  return -1;
667  }
668 
669  SCSIHOST2ADAP(host) = (caddr_t)adapter;
670  adapter->host = host;
671 
672  host->irq = adapter->irq;
673  host->unique_id = adapter->unique_id;
674  host->can_queue = adapter->max_cmds;
675  host->this_id = adapter->init_id;
676  host->sg_tablesize = adapter->sglen;
677  host->max_sectors = adapter->max_sectors;
678  host->cmd_per_lun = adapter->cmd_per_lun;
679  host->max_channel = adapter->max_channel;
680  host->max_id = adapter->max_target;
681  host->max_lun = adapter->max_lun;
682 
683 
684  // notify mid-layer about the new controller
685  if (scsi_add_host(host, &adapter->pdev->dev)) {
686 
688  "megaraid mbox: scsi_add_host failed\n"));
689 
690  scsi_host_put(host);
691 
692  return -1;
693  }
694 
695  scsi_scan_host(host);
696 
697  return 0;
698 }
699 
700 
707 static void
708 megaraid_io_detach(adapter_t *adapter)
709 {
710  struct Scsi_Host *host;
711 
712  con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
713 
714  host = adapter->host;
715 
716  scsi_remove_host(host);
717 
718  return;
719 }
720 
721 
722 /*
723  * START: Mailbox Low Level Driver
724  *
725  * This is section specific to the single mailbox based controllers
726  */
727 
738 static int __devinit
739 megaraid_init_mbox(adapter_t *adapter)
740 {
741  struct pci_dev *pdev;
743  int i;
744  uint32_t magic64;
745 
746 
747  adapter->ito = MBOX_TIMEOUT;
748  pdev = adapter->pdev;
749 
750  /*
751  * Allocate and initialize the init data structure for mailbox
752  * controllers
753  */
754  raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
755  if (raid_dev == NULL) return -1;
756 
757 
758  /*
759  * Attach the adapter soft state to raid device soft state
760  */
761  adapter->raid_device = (caddr_t)raid_dev;
762  raid_dev->fast_load = megaraid_fast_load;
763 
764 
765  // our baseport
766  raid_dev->baseport = pci_resource_start(pdev, 0);
767 
768  if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
769 
771  "megaraid: mem region busy\n"));
772 
773  goto out_free_raid_dev;
774  }
775 
776  raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
777 
778  if (!raid_dev->baseaddr) {
779 
781  "megaraid: could not map hba memory\n") );
782 
783  goto out_release_regions;
784  }
785 
786  /* initialize the mutual exclusion lock for the mailbox */
787  spin_lock_init(&raid_dev->mailbox_lock);
788 
789  /* allocate memory required for commands */
790  if (megaraid_alloc_cmd_packets(adapter) != 0)
791  goto out_iounmap;
792 
793  /*
794  * Issue SYNC cmd to flush the pending cmds in the adapter
795  * and initialize its internal state
796  */
797 
798  if (megaraid_mbox_fire_sync_cmd(adapter))
799  con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
800 
801  /*
802  * Setup the rest of the soft state using the library of
803  * FW routines
804  */
805 
806  /* request IRQ and register the interrupt service routine */
807  if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
808  adapter)) {
809 
811  "megaraid: Couldn't register IRQ %d!\n", adapter->irq));
812  goto out_alloc_cmds;
813 
814  }
815 
816  // Product info
817  if (megaraid_mbox_product_info(adapter) != 0)
818  goto out_free_irq;
819 
820  // Do we support extended CDBs
821  adapter->max_cdb_sz = 10;
822  if (megaraid_mbox_extended_cdb(adapter) == 0) {
823  adapter->max_cdb_sz = 16;
824  }
825 
826  /*
827  * Do we support cluster environment, if we do, what is the initiator
828  * id.
829  * NOTE: In a non-cluster aware firmware environment, the LLD should
830  * return 7 as initiator id.
831  */
832  adapter->ha = 0;
833  adapter->init_id = -1;
834  if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
835  adapter->ha = 1;
836  }
837 
838  /*
839  * Prepare the device ids array to have the mapping between the kernel
840  * device address and megaraid device address.
841  * We export the physical devices on their actual addresses. The
842  * logical drives are exported on a virtual SCSI channel
843  */
844  megaraid_mbox_setup_device_map(adapter);
845 
846  // If the firmware supports random deletion, update the device id map
847  if (megaraid_mbox_support_random_del(adapter)) {
848 
849  // Change the logical drives numbers in device_ids array one
850  // slot in device_ids is reserved for target id, that's why
851  // "<=" below
852  for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
853  adapter->device_ids[adapter->max_channel][i] += 0x80;
854  }
855  adapter->device_ids[adapter->max_channel][adapter->init_id] =
856  0xFF;
857 
858  raid_dev->random_del_supported = 1;
859  }
860 
861  /*
862  * find out the maximum number of scatter-gather elements supported by
863  * this firmware
864  */
865  adapter->sglen = megaraid_mbox_get_max_sg(adapter);
866 
867  // enumerate RAID and SCSI channels so that all devices on SCSI
868  // channels can later be exported, including disk devices
869  megaraid_mbox_enum_raid_scsi(adapter);
870 
871  /*
872  * Other parameters required by upper layer
873  *
874  * maximum number of sectors per IO command
875  */
876  adapter->max_sectors = megaraid_max_sectors;
877 
878  /*
879  * number of queued commands per LUN.
880  */
881  adapter->cmd_per_lun = megaraid_cmd_per_lun;
882 
883  /*
884  * Allocate resources required to issue FW calls, when sysfs is
885  * accessed
886  */
887  if (megaraid_sysfs_alloc_resources(adapter) != 0)
888  goto out_free_irq;
889 
890  // Set the DMA mask to 64-bit. All supported controllers as capable of
891  // DMA in this range
892  pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
893 
894  if (((magic64 == HBA_SIGNATURE_64_BIT) &&
895  ((adapter->pdev->subsystem_device !=
897  (adapter->pdev->subsystem_device !=
899  (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
900  adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
901  (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
902  adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
903  (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
904  adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
905  (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
906  adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
907  (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
908  adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
909  if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
911  "megaraid: DMA mask for 64-bit failed\n"));
912 
913  if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
915  "megaraid: 32-bit DMA mask failed\n"));
916  goto out_free_sysfs_res;
917  }
918  }
919  }
920 
921  // setup tasklet for DPC
922  tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
923  (unsigned long)adapter);
924 
926  "megaraid mbox hba successfully initialized\n"));
927 
928  return 0;
929 
930 out_free_sysfs_res:
931  megaraid_sysfs_free_resources(adapter);
932 out_free_irq:
933  free_irq(adapter->irq, adapter);
934 out_alloc_cmds:
935  megaraid_free_cmd_packets(adapter);
936 out_iounmap:
937  iounmap(raid_dev->baseaddr);
938 out_release_regions:
939  pci_release_regions(pdev);
940 out_free_raid_dev:
941  kfree(raid_dev);
942 
943  return -1;
944 }
945 
946 
951 static void
952 megaraid_fini_mbox(adapter_t *adapter)
953 {
954  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
955 
956  // flush all caches
957  megaraid_mbox_flush_cache(adapter);
958 
959  tasklet_kill(&adapter->dpc_h);
960 
961  megaraid_sysfs_free_resources(adapter);
962 
963  megaraid_free_cmd_packets(adapter);
964 
965  free_irq(adapter->irq, adapter);
966 
967  iounmap(raid_dev->baseaddr);
968 
969  pci_release_regions(adapter->pdev);
970 
971  kfree(raid_dev);
972 
973  return;
974 }
975 
976 
986 static int
987 megaraid_alloc_cmd_packets(adapter_t *adapter)
988 {
989  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
990  struct pci_dev *pdev;
991  unsigned long align;
992  scb_t *scb;
993  mbox_ccb_t *ccb;
994  struct mraid_pci_blk *epthru_pci_blk;
995  struct mraid_pci_blk *sg_pci_blk;
996  struct mraid_pci_blk *mbox_pci_blk;
997  int i;
998 
999  pdev = adapter->pdev;
1000 
1001  /*
1002  * Setup the mailbox
1003  * Allocate the common 16-byte aligned memory for the handshake
1004  * mailbox.
1005  */
1006  raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev,
1007  sizeof(mbox64_t), &raid_dev->una_mbox64_dma);
1008 
1009  if (!raid_dev->una_mbox64) {
1011  "megaraid: out of memory, %s %d\n", __func__,
1012  __LINE__));
1013  return -1;
1014  }
1015  memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t));
1016 
1017  /*
1018  * Align the mailbox at 16-byte boundary
1019  */
1020  raid_dev->mbox = &raid_dev->una_mbox64->mbox32;
1021 
1022  raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
1023  (~0UL ^ 0xFUL));
1024 
1025  raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
1026 
1027  align = ((void *)raid_dev->mbox -
1028  ((void *)&raid_dev->una_mbox64->mbox32));
1029 
1030  raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
1031  align;
1032 
1033  // Allocate memory for commands issued internally
1034  adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE,
1035  &adapter->ibuf_dma_h);
1036  if (!adapter->ibuf) {
1037 
1039  "megaraid: out of memory, %s %d\n", __func__,
1040  __LINE__));
1041 
1042  goto out_free_common_mbox;
1043  }
1044  memset(adapter->ibuf, 0, MBOX_IBUF_SIZE);
1045 
1046  // Allocate memory for our SCSI Command Blocks and their associated
1047  // memory
1048 
1049  /*
1050  * Allocate memory for the base list of scb. Later allocate memory for
1051  * CCBs and embedded components of each CCB and point the pointers in
1052  * scb to the allocated components
1053  * NOTE: The code to allocate SCB will be duplicated in all the LLD
1054  * since the calling routine does not yet know the number of available
1055  * commands.
1056  */
1057  adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
1058 
1059  if (adapter->kscb_list == NULL) {
1061  "megaraid: out of memory, %s %d\n", __func__,
1062  __LINE__));
1063  goto out_free_ibuf;
1064  }
1065 
1066  // memory allocation for our command packets
1067  if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
1069  "megaraid: out of memory, %s %d\n", __func__,
1070  __LINE__));
1071  goto out_free_scb_list;
1072  }
1073 
1074  // Adjust the scb pointers and link in the free pool
1075  epthru_pci_blk = raid_dev->epthru_pool;
1076  sg_pci_blk = raid_dev->sg_pool;
1077  mbox_pci_blk = raid_dev->mbox_pool;
1078 
1079  for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1080  scb = adapter->kscb_list + i;
1081  ccb = raid_dev->ccb_list + i;
1082 
1083  ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
1084  ccb->raw_mbox = (uint8_t *)ccb->mbox;
1085  ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
1086  ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16;
1087 
1088  // make sure the mailbox is aligned properly
1089  if (ccb->mbox_dma_h & 0x0F) {
1091  "megaraid mbox: not aligned on 16-bytes\n"));
1092 
1093  goto out_teardown_dma_pools;
1094  }
1095 
1096  ccb->epthru = (mraid_epassthru_t *)
1097  epthru_pci_blk[i].vaddr;
1098  ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr;
1099  ccb->pthru = (mraid_passthru_t *)ccb->epthru;
1100  ccb->pthru_dma_h = ccb->epthru_dma_h;
1101 
1102 
1103  ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr;
1104  ccb->sgl_dma_h = sg_pci_blk[i].dma_addr;
1105  ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64;
1106 
1107  scb->ccb = (caddr_t)ccb;
1108  scb->gp = 0;
1109 
1110  scb->sno = i; // command index
1111 
1112  scb->scp = NULL;
1113  scb->state = SCB_FREE;
1114  scb->dma_direction = PCI_DMA_NONE;
1115  scb->dma_type = MRAID_DMA_NONE;
1116  scb->dev_channel = -1;
1117  scb->dev_target = -1;
1118 
1119  // put scb in the free pool
1120  list_add_tail(&scb->list, &adapter->kscb_pool);
1121  }
1122 
1123  return 0;
1124 
1125 out_teardown_dma_pools:
1126  megaraid_mbox_teardown_dma_pools(adapter);
1127 out_free_scb_list:
1128  kfree(adapter->kscb_list);
1129 out_free_ibuf:
1130  pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
1131  adapter->ibuf_dma_h);
1132 out_free_common_mbox:
1133  pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1134  (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1135 
1136  return -1;
1137 }
1138 
1139 
1146 static void
1147 megaraid_free_cmd_packets(adapter_t *adapter)
1148 {
1149  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1150 
1151  megaraid_mbox_teardown_dma_pools(adapter);
1152 
1153  kfree(adapter->kscb_list);
1154 
1156  (void *)adapter->ibuf, adapter->ibuf_dma_h);
1157 
1158  pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1159  (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1160  return;
1161 }
1162 
1163 
1171 static int
1172 megaraid_mbox_setup_dma_pools(adapter_t *adapter)
1173 {
1174  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1175  struct mraid_pci_blk *epthru_pci_blk;
1176  struct mraid_pci_blk *sg_pci_blk;
1177  struct mraid_pci_blk *mbox_pci_blk;
1178  int i;
1179 
1180 
1181 
1182  // Allocate memory for 16-bytes aligned mailboxes
1183  raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
1184  adapter->pdev,
1185  sizeof(mbox64_t) + 16,
1186  16, 0);
1187 
1188  if (raid_dev->mbox_pool_handle == NULL) {
1189  goto fail_setup_dma_pool;
1190  }
1191 
1192  mbox_pci_blk = raid_dev->mbox_pool;
1193  for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1194  mbox_pci_blk[i].vaddr = pci_pool_alloc(
1195  raid_dev->mbox_pool_handle,
1196  GFP_KERNEL,
1197  &mbox_pci_blk[i].dma_addr);
1198  if (!mbox_pci_blk[i].vaddr) {
1199  goto fail_setup_dma_pool;
1200  }
1201  }
1202 
1203  /*
1204  * Allocate memory for each embedded passthru strucuture pointer
1205  * Request for a 128 bytes aligned structure for each passthru command
1206  * structure
1207  * Since passthru and extended passthru commands are exclusive, they
1208  * share common memory pool. Passthru structures piggyback on memory
1209  * allocted to extended passthru since passthru is smaller of the two
1210  */
1211  raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
1212  adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
1213 
1214  if (raid_dev->epthru_pool_handle == NULL) {
1215  goto fail_setup_dma_pool;
1216  }
1217 
1218  epthru_pci_blk = raid_dev->epthru_pool;
1219  for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1220  epthru_pci_blk[i].vaddr = pci_pool_alloc(
1221  raid_dev->epthru_pool_handle,
1222  GFP_KERNEL,
1223  &epthru_pci_blk[i].dma_addr);
1224  if (!epthru_pci_blk[i].vaddr) {
1225  goto fail_setup_dma_pool;
1226  }
1227  }
1228 
1229 
1230  // Allocate memory for each scatter-gather list. Request for 512 bytes
1231  // alignment for each sg list
1232  raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
1233  adapter->pdev,
1234  sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
1235  512, 0);
1236 
1237  if (raid_dev->sg_pool_handle == NULL) {
1238  goto fail_setup_dma_pool;
1239  }
1240 
1241  sg_pci_blk = raid_dev->sg_pool;
1242  for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1243  sg_pci_blk[i].vaddr = pci_pool_alloc(
1244  raid_dev->sg_pool_handle,
1245  GFP_KERNEL,
1246  &sg_pci_blk[i].dma_addr);
1247  if (!sg_pci_blk[i].vaddr) {
1248  goto fail_setup_dma_pool;
1249  }
1250  }
1251 
1252  return 0;
1253 
1254 fail_setup_dma_pool:
1255  megaraid_mbox_teardown_dma_pools(adapter);
1256  return -1;
1257 }
1258 
1259 
1267 static void
1268 megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1269 {
1270  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1271  struct mraid_pci_blk *epthru_pci_blk;
1272  struct mraid_pci_blk *sg_pci_blk;
1273  struct mraid_pci_blk *mbox_pci_blk;
1274  int i;
1275 
1276 
1277  sg_pci_blk = raid_dev->sg_pool;
1278  for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
1279  pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
1280  sg_pci_blk[i].dma_addr);
1281  }
1282  if (raid_dev->sg_pool_handle)
1283  pci_pool_destroy(raid_dev->sg_pool_handle);
1284 
1285 
1286  epthru_pci_blk = raid_dev->epthru_pool;
1287  for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
1288  pci_pool_free(raid_dev->epthru_pool_handle,
1289  epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
1290  }
1291  if (raid_dev->epthru_pool_handle)
1292  pci_pool_destroy(raid_dev->epthru_pool_handle);
1293 
1294 
1295  mbox_pci_blk = raid_dev->mbox_pool;
1296  for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
1297  pci_pool_free(raid_dev->mbox_pool_handle,
1298  mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
1299  }
1300  if (raid_dev->mbox_pool_handle)
1301  pci_pool_destroy(raid_dev->mbox_pool_handle);
1302 
1303  return;
1304 }
1305 
1306 
1315 static scb_t *
1316 megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
1317 {
1318  struct list_head *head = &adapter->kscb_pool;
1319  scb_t *scb = NULL;
1320  unsigned long flags;
1321 
1322  // detach scb from free pool
1323  spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1324 
1325  if (list_empty(head)) {
1326  spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1327  return NULL;
1328  }
1329 
1330  scb = list_entry(head->next, scb_t, list);
1331  list_del_init(&scb->list);
1332 
1333  spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1334 
1335  scb->state = SCB_ACTIVE;
1336  scb->scp = scp;
1337  scb->dma_type = MRAID_DMA_NONE;
1338 
1339  return scb;
1340 }
1341 
1342 
1353 static inline void
1354 megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
1355 {
1356  unsigned long flags;
1357 
1358  // put scb in the free pool
1359  scb->state = SCB_FREE;
1360  scb->scp = NULL;
1361  spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1362 
1363  list_add(&scb->list, &adapter->kscb_pool);
1364 
1365  spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1366 
1367  return;
1368 }
1369 
1370 
1378 static int
1379 megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1380 {
1381  struct scatterlist *sgl;
1382  mbox_ccb_t *ccb;
1383  struct scsi_cmnd *scp;
1384  int sgcnt;
1385  int i;
1386 
1387 
1388  scp = scb->scp;
1389  ccb = (mbox_ccb_t *)scb->ccb;
1390 
1391  sgcnt = scsi_dma_map(scp);
1392  BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
1393 
1394  // no mapping required if no data to be transferred
1395  if (!sgcnt)
1396  return 0;
1397 
1398  scb->dma_type = MRAID_DMA_WSG;
1399 
1400  scsi_for_each_sg(scp, sgl, sgcnt, i) {
1401  ccb->sgl64[i].address = sg_dma_address(sgl);
1402  ccb->sgl64[i].length = sg_dma_len(sgl);
1403  }
1404 
1405  // Return count of SG nodes
1406  return sgcnt;
1407 }
1408 
1409 
1417 static int
1418 mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1419 {
1420  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1421  mbox64_t *mbox64;
1422  mbox_t *mbox;
1423  mbox_ccb_t *ccb;
1424  unsigned long flags;
1425  unsigned int i = 0;
1426 
1427 
1428  ccb = (mbox_ccb_t *)scb->ccb;
1429  mbox = raid_dev->mbox;
1430  mbox64 = raid_dev->mbox64;
1431 
1432  /*
1433  * Check for busy mailbox. If it is, return failure - the caller
1434  * should retry later.
1435  */
1436  spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
1437 
1438  if (unlikely(mbox->busy)) {
1439  do {
1440  udelay(1);
1441  i++;
1442  rmb();
1443  } while(mbox->busy && (i < max_mbox_busy_wait));
1444 
1445  if (mbox->busy) {
1446 
1447  spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1448 
1449  return -1;
1450  }
1451  }
1452 
1453 
1454  // Copy this command's mailbox data into "adapter's" mailbox
1455  memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
1456  mbox->cmdid = scb->sno;
1457 
1458  adapter->outstanding_cmds++;
1459 
1460  if (scb->dma_direction == PCI_DMA_TODEVICE)
1461  pci_dma_sync_sg_for_device(adapter->pdev,
1462  scsi_sglist(scb->scp),
1463  scsi_sg_count(scb->scp),
1465 
1466  mbox->busy = 1; // Set busy
1467  mbox->poll = 0;
1468  mbox->ack = 0;
1469  wmb();
1470 
1471  WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
1472 
1473  spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1474 
1475  return 0;
1476 }
1477 
1478 
1486 static int
1487 megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
1488 {
1489  adapter_t *adapter;
1490  scb_t *scb;
1491  int if_busy;
1492 
1493  adapter = SCP2ADAPTER(scp);
1494  scp->scsi_done = done;
1495  scp->result = 0;
1496 
1497  /*
1498  * Allocate and build a SCB request
1499  * if_busy flag will be set if megaraid_mbox_build_cmd() command could
1500  * not allocate scb. We will return non-zero status in that case.
1501  * NOTE: scb can be null even though certain commands completed
1502  * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
1503  * return 0 in that case, and we would do the callback right away.
1504  */
1505  if_busy = 0;
1506  scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
1507  if (!scb) { // command already completed
1508  done(scp);
1509  return 0;
1510  }
1511 
1512  megaraid_mbox_runpendq(adapter, scb);
1513  return if_busy;
1514 }
1515 
1516 static DEF_SCSI_QCMD(megaraid_queue_command)
1517 
1518 
1529 static scb_t *
1530 megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1531 {
1532  mraid_device_t *rdev = ADAP2RAIDDEV(adapter);
1533  int channel;
1534  int target;
1535  int islogical;
1536  mbox_ccb_t *ccb;
1537  mraid_passthru_t *pthru;
1538  mbox64_t *mbox64;
1539  mbox_t *mbox;
1540  scb_t *scb;
1541  char skip[] = "skipping";
1542  char scan[] = "scanning";
1543  char *ss;
1544 
1545 
1546  /*
1547  * Get the appropriate device map for the device this command is
1548  * intended for
1549  */
1550  MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
1551 
1552  /*
1553  * Logical drive commands
1554  */
1555  if (islogical) {
1556  switch (scp->cmnd[0]) {
1557  case TEST_UNIT_READY:
1558  /*
1559  * Do we support clustering and is the support enabled
1560  * If no, return success always
1561  */
1562  if (!adapter->ha) {
1563  scp->result = (DID_OK << 16);
1564  return NULL;
1565  }
1566 
1567  if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1568  scp->result = (DID_ERROR << 16);
1569  *busy = 1;
1570  return NULL;
1571  }
1572 
1573  scb->dma_direction = scp->sc_data_direction;
1574  scb->dev_channel = 0xFF;
1575  scb->dev_target = target;
1576  ccb = (mbox_ccb_t *)scb->ccb;
1577 
1578  /*
1579  * The command id will be provided by the command
1580  * issuance routine
1581  */
1582  ccb->raw_mbox[0] = CLUSTER_CMD;
1583  ccb->raw_mbox[2] = RESERVATION_STATUS;
1584  ccb->raw_mbox[3] = target;
1585 
1586  return scb;
1587 
1588  case MODE_SENSE:
1589  {
1590  struct scatterlist *sgl;
1591  caddr_t vaddr;
1592 
1593  sgl = scsi_sglist(scp);
1594  if (sg_page(sgl)) {
1595  vaddr = (caddr_t) sg_virt(&sgl[0]);
1596 
1597  memset(vaddr, 0, scp->cmnd[4]);
1598  }
1599  else {
1601  "megaraid mailbox: invalid sg:%d\n",
1602  __LINE__));
1603  }
1604  }
1605  scp->result = (DID_OK << 16);
1606  return NULL;
1607 
1608  case INQUIRY:
1609  /*
1610  * Display the channel scan for logical drives
1611  * Do not display scan for a channel if already done.
1612  */
1613  if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1614 
1616  "scsi[%d]: scanning scsi channel %d",
1617  adapter->host->host_no,
1618  SCP2CHANNEL(scp)));
1619 
1620  con_log(CL_ANN, (
1621  " [virtual] for logical drives\n"));
1622 
1623  rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1624  }
1625 
1626  if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
1627  scp->sense_buffer[0] = 0x70;
1628  scp->sense_buffer[2] = ILLEGAL_REQUEST;
1629  scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
1630  scp->result = CHECK_CONDITION << 1;
1631  return NULL;
1632  }
1633 
1634  /* Fall through */
1635 
1636  case READ_CAPACITY:
1637  /*
1638  * Do not allow LUN > 0 for logical drives and
1639  * requests for more than 40 logical drives
1640  */
1641  if (SCP2LUN(scp)) {
1642  scp->result = (DID_BAD_TARGET << 16);
1643  return NULL;
1644  }
1645  if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
1646  scp->result = (DID_BAD_TARGET << 16);
1647  return NULL;
1648  }
1649 
1650 
1651  /* Allocate a SCB and initialize passthru */
1652  if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1653  scp->result = (DID_ERROR << 16);
1654  *busy = 1;
1655  return NULL;
1656  }
1657 
1658  ccb = (mbox_ccb_t *)scb->ccb;
1659  scb->dev_channel = 0xFF;
1660  scb->dev_target = target;
1661  pthru = ccb->pthru;
1662  mbox = ccb->mbox;
1663  mbox64 = ccb->mbox64;
1664 
1665  pthru->timeout = 0;
1666  pthru->ars = 1;
1667  pthru->reqsenselen = 14;
1668  pthru->islogical = 1;
1669  pthru->logdrv = target;
1670  pthru->cdblen = scp->cmd_len;
1671  memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1672 
1673  mbox->cmd = MBOXCMD_PASSTHRU64;
1674  scb->dma_direction = scp->sc_data_direction;
1675 
1676  pthru->dataxferlen = scsi_bufflen(scp);
1677  pthru->dataxferaddr = ccb->sgl_dma_h;
1678  pthru->numsge = megaraid_mbox_mksgl(adapter,
1679  scb);
1680 
1681  mbox->xferaddr = 0xFFFFFFFF;
1682  mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h;
1683  mbox64->xferaddr_hi = 0;
1684 
1685  return scb;
1686 
1687  case READ_6:
1688  case WRITE_6:
1689  case READ_10:
1690  case WRITE_10:
1691  case READ_12:
1692  case WRITE_12:
1693 
1694  /*
1695  * Allocate a SCB and initialize mailbox
1696  */
1697  if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1698  scp->result = (DID_ERROR << 16);
1699  *busy = 1;
1700  return NULL;
1701  }
1702  ccb = (mbox_ccb_t *)scb->ccb;
1703  scb->dev_channel = 0xFF;
1704  scb->dev_target = target;
1705  mbox = ccb->mbox;
1706  mbox64 = ccb->mbox64;
1707  mbox->logdrv = target;
1708 
1709  /*
1710  * A little HACK: 2nd bit is zero for all scsi read
1711  * commands and is set for all scsi write commands
1712  */
1713  mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64:
1714  MBOXCMD_LREAD64 ;
1715 
1716  /*
1717  * 6-byte READ(0x08) or WRITE(0x0A) cdb
1718  */
1719  if (scp->cmd_len == 6) {
1720  mbox->numsectors = (uint32_t)scp->cmnd[4];
1721  mbox->lba =
1722  ((uint32_t)scp->cmnd[1] << 16) |
1723  ((uint32_t)scp->cmnd[2] << 8) |
1724  (uint32_t)scp->cmnd[3];
1725 
1726  mbox->lba &= 0x1FFFFF;
1727  }
1728 
1729  /*
1730  * 10-byte READ(0x28) or WRITE(0x2A) cdb
1731  */
1732  else if (scp->cmd_len == 10) {
1733  mbox->numsectors =
1734  (uint32_t)scp->cmnd[8] |
1735  ((uint32_t)scp->cmnd[7] << 8);
1736  mbox->lba =
1737  ((uint32_t)scp->cmnd[2] << 24) |
1738  ((uint32_t)scp->cmnd[3] << 16) |
1739  ((uint32_t)scp->cmnd[4] << 8) |
1740  (uint32_t)scp->cmnd[5];
1741  }
1742 
1743  /*
1744  * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1745  */
1746  else if (scp->cmd_len == 12) {
1747  mbox->lba =
1748  ((uint32_t)scp->cmnd[2] << 24) |
1749  ((uint32_t)scp->cmnd[3] << 16) |
1750  ((uint32_t)scp->cmnd[4] << 8) |
1751  (uint32_t)scp->cmnd[5];
1752 
1753  mbox->numsectors =
1754  ((uint32_t)scp->cmnd[6] << 24) |
1755  ((uint32_t)scp->cmnd[7] << 16) |
1756  ((uint32_t)scp->cmnd[8] << 8) |
1757  (uint32_t)scp->cmnd[9];
1758  }
1759  else {
1761  "megaraid: unsupported CDB length\n"));
1762 
1763  megaraid_dealloc_scb(adapter, scb);
1764 
1765  scp->result = (DID_ERROR << 16);
1766  return NULL;
1767  }
1768 
1769  scb->dma_direction = scp->sc_data_direction;
1770 
1771  // Calculate Scatter-Gather info
1772  mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h;
1773  mbox->numsge = megaraid_mbox_mksgl(adapter,
1774  scb);
1775  mbox->xferaddr = 0xFFFFFFFF;
1776  mbox64->xferaddr_hi = 0;
1777 
1778  return scb;
1779 
1780  case RESERVE:
1781  case RELEASE:
1782  /*
1783  * Do we support clustering and is the support enabled
1784  */
1785  if (!adapter->ha) {
1786  scp->result = (DID_BAD_TARGET << 16);
1787  return NULL;
1788  }
1789 
1790  /*
1791  * Allocate a SCB and initialize mailbox
1792  */
1793  if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1794  scp->result = (DID_ERROR << 16);
1795  *busy = 1;
1796  return NULL;
1797  }
1798 
1799  ccb = (mbox_ccb_t *)scb->ccb;
1800  scb->dev_channel = 0xFF;
1801  scb->dev_target = target;
1802  ccb->raw_mbox[0] = CLUSTER_CMD;
1803  ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
1805 
1806  ccb->raw_mbox[3] = target;
1807  scb->dma_direction = scp->sc_data_direction;
1808 
1809  return scb;
1810 
1811  default:
1812  scp->result = (DID_BAD_TARGET << 16);
1813  return NULL;
1814  }
1815  }
1816  else { // Passthru device commands
1817 
1818  // Do not allow access to target id > 15 or LUN > 7
1819  if (target > 15 || SCP2LUN(scp) > 7) {
1820  scp->result = (DID_BAD_TARGET << 16);
1821  return NULL;
1822  }
1823 
1824  // if fast load option was set and scan for last device is
1825  // over, reset the fast_load flag so that during a possible
1826  // next scan, devices can be made available
1827  if (rdev->fast_load && (target == 15) &&
1828  (SCP2CHANNEL(scp) == adapter->max_channel -1)) {
1829 
1831  "megaraid[%d]: physical device scan re-enabled\n",
1832  adapter->host->host_no));
1833  rdev->fast_load = 0;
1834  }
1835 
1836  /*
1837  * Display the channel scan for physical devices
1838  */
1839  if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1840 
1841  ss = rdev->fast_load ? skip : scan;
1842 
1844  "scsi[%d]: %s scsi channel %d [Phy %d]",
1845  adapter->host->host_no, ss, SCP2CHANNEL(scp),
1846  channel));
1847 
1848  con_log(CL_ANN, (
1849  " for non-raid devices\n"));
1850 
1851  rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1852  }
1853 
1854  // disable channel sweep if fast load option given
1855  if (rdev->fast_load) {
1856  scp->result = (DID_BAD_TARGET << 16);
1857  return NULL;
1858  }
1859 
1860  // Allocate a SCB and initialize passthru
1861  if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1862  scp->result = (DID_ERROR << 16);
1863  *busy = 1;
1864  return NULL;
1865  }
1866 
1867  ccb = (mbox_ccb_t *)scb->ccb;
1868  scb->dev_channel = channel;
1869  scb->dev_target = target;
1870  scb->dma_direction = scp->sc_data_direction;
1871  mbox = ccb->mbox;
1872  mbox64 = ccb->mbox64;
1873 
1874  // Does this firmware support extended CDBs
1875  if (adapter->max_cdb_sz == 16) {
1876  mbox->cmd = MBOXCMD_EXTPTHRU;
1877 
1878  megaraid_mbox_prepare_epthru(adapter, scb, scp);
1879 
1880  mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h;
1881  mbox64->xferaddr_hi = 0;
1882  mbox->xferaddr = 0xFFFFFFFF;
1883  }
1884  else {
1885  mbox->cmd = MBOXCMD_PASSTHRU64;
1886 
1887  megaraid_mbox_prepare_pthru(adapter, scb, scp);
1888 
1889  mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h;
1890  mbox64->xferaddr_hi = 0;
1891  mbox->xferaddr = 0xFFFFFFFF;
1892  }
1893  return scb;
1894  }
1895 
1896  // NOT REACHED
1897 }
1898 
1899 
1913 static void
1914 megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
1915 {
1916  scb_t *scb;
1917  unsigned long flags;
1918 
1919  spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1920 
1921  if (scb_q) {
1922  scb_q->state = SCB_PENDQ;
1923  list_add_tail(&scb_q->list, &adapter->pend_list);
1924  }
1925 
1926  // if the adapter in not in quiescent mode, post the commands to FW
1927  if (adapter->quiescent) {
1928  spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1929  return;
1930  }
1931 
1932  while (!list_empty(&adapter->pend_list)) {
1933 
1935 
1936  scb = list_entry(adapter->pend_list.next, scb_t, list);
1937 
1938  // remove the scb from the pending list and try to
1939  // issue. If we are unable to issue it, put back in
1940  // the pending list and return
1941 
1942  list_del_init(&scb->list);
1943 
1944  spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1945 
1946  // if mailbox was busy, return SCB back to pending
1947  // list. Make sure to add at the head, since that's
1948  // where it would have been removed from
1949 
1950  scb->state = SCB_ISSUED;
1951 
1952  if (mbox_post_cmd(adapter, scb) != 0) {
1953 
1954  spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1955 
1956  scb->state = SCB_PENDQ;
1957 
1958  list_add(&scb->list, &adapter->pend_list);
1959 
1960  spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
1961  flags);
1962 
1963  return;
1964  }
1965 
1966  spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1967  }
1968 
1969  spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1970 
1971 
1972  return;
1973 }
1974 
1975 
1984 static void
1985 megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
1986  struct scsi_cmnd *scp)
1987 {
1988  mbox_ccb_t *ccb;
1989  mraid_passthru_t *pthru;
1990  uint8_t channel;
1991  uint8_t target;
1992 
1993  ccb = (mbox_ccb_t *)scb->ccb;
1994  pthru = ccb->pthru;
1995  channel = scb->dev_channel;
1996  target = scb->dev_target;
1997 
1998  // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
1999  pthru->timeout = 4;
2000  pthru->ars = 1;
2001  pthru->islogical = 0;
2002  pthru->channel = 0;
2003  pthru->target = (channel << 4) | target;
2004  pthru->logdrv = SCP2LUN(scp);
2005  pthru->reqsenselen = 14;
2006  pthru->cdblen = scp->cmd_len;
2007 
2008  memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
2009 
2010  if (scsi_bufflen(scp)) {
2011  pthru->dataxferlen = scsi_bufflen(scp);
2012  pthru->dataxferaddr = ccb->sgl_dma_h;
2013  pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2014  }
2015  else {
2016  pthru->dataxferaddr = 0;
2017  pthru->dataxferlen = 0;
2018  pthru->numsge = 0;
2019  }
2020  return;
2021 }
2022 
2023 
2033 static void
2034 megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
2035  struct scsi_cmnd *scp)
2036 {
2037  mbox_ccb_t *ccb;
2038  mraid_epassthru_t *epthru;
2039  uint8_t channel;
2040  uint8_t target;
2041 
2042  ccb = (mbox_ccb_t *)scb->ccb;
2043  epthru = ccb->epthru;
2044  channel = scb->dev_channel;
2045  target = scb->dev_target;
2046 
2047  // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
2048  epthru->timeout = 4;
2049  epthru->ars = 1;
2050  epthru->islogical = 0;
2051  epthru->channel = 0;
2052  epthru->target = (channel << 4) | target;
2053  epthru->logdrv = SCP2LUN(scp);
2054  epthru->reqsenselen = 14;
2055  epthru->cdblen = scp->cmd_len;
2056 
2057  memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
2058 
2059  if (scsi_bufflen(scp)) {
2060  epthru->dataxferlen = scsi_bufflen(scp);
2061  epthru->dataxferaddr = ccb->sgl_dma_h;
2062  epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2063  }
2064  else {
2065  epthru->dataxferaddr = 0;
2066  epthru->dataxferlen = 0;
2067  epthru->numsge = 0;
2068  }
2069  return;
2070 }
2071 
2072 
2082 static int
2083 megaraid_ack_sequence(adapter_t *adapter)
2084 {
2085  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2086  mbox_t *mbox;
2087  scb_t *scb;
2088  uint8_t nstatus;
2090  struct list_head clist;
2091  int handled;
2092  uint32_t dword;
2093  unsigned long flags;
2094  int i, j;
2095 
2096 
2097  mbox = raid_dev->mbox;
2098 
2099  // move the SCBs from the firmware completed array to our local list
2100  INIT_LIST_HEAD(&clist);
2101 
2102  // loop till F/W has more commands for us to complete
2103  handled = 0;
2104  spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
2105  do {
2106  /*
2107  * Check if a valid interrupt is pending. If found, force the
2108  * interrupt line low.
2109  */
2110  dword = RDOUTDOOR(raid_dev);
2111  if (dword != 0x10001234) break;
2112 
2113  handled = 1;
2114 
2115  WROUTDOOR(raid_dev, 0x10001234);
2116 
2117  nstatus = 0;
2118  // wait for valid numstatus to post
2119  for (i = 0; i < 0xFFFFF; i++) {
2120  if (mbox->numstatus != 0xFF) {
2121  nstatus = mbox->numstatus;
2122  break;
2123  }
2124  rmb();
2125  }
2126  mbox->numstatus = 0xFF;
2127 
2128  adapter->outstanding_cmds -= nstatus;
2129 
2130  for (i = 0; i < nstatus; i++) {
2131 
2132  // wait for valid command index to post
2133  for (j = 0; j < 0xFFFFF; j++) {
2134  if (mbox->completed[i] != 0xFF) break;
2135  rmb();
2136  }
2137  completed[i] = mbox->completed[i];
2138  mbox->completed[i] = 0xFF;
2139 
2140  if (completed[i] == 0xFF) {
2142  "megaraid: command posting timed out\n"));
2143 
2144  BUG();
2145  continue;
2146  }
2147 
2148  // Get SCB associated with this command id
2149  if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
2150  // a cmm command
2151  scb = adapter->uscb_list + (completed[i] -
2153  }
2154  else {
2155  // an os command
2156  scb = adapter->kscb_list + completed[i];
2157  }
2158 
2159  scb->status = mbox->status;
2160  list_add_tail(&scb->list, &clist);
2161  }
2162 
2163  // Acknowledge interrupt
2164  WRINDOOR(raid_dev, 0x02);
2165 
2166  } while(1);
2167 
2168  spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
2169 
2170 
2171  // put the completed commands in the completed list. DPC would
2172  // complete these commands later
2173  spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2174 
2175  list_splice(&clist, &adapter->completed_list);
2176 
2177  spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2178 
2179 
2180  // schedule the DPC if there is some work for it
2181  if (handled)
2182  tasklet_schedule(&adapter->dpc_h);
2183 
2184  return handled;
2185 }
2186 
2187 
2195 static irqreturn_t
2196 megaraid_isr(int irq, void *devp)
2197 {
2198  adapter_t *adapter = devp;
2199  int handled;
2200 
2201  handled = megaraid_ack_sequence(adapter);
2202 
2203  /* Loop through any pending requests */
2204  if (!adapter->quiescent) {
2205  megaraid_mbox_runpendq(adapter, NULL);
2206  }
2207 
2208  return IRQ_RETVAL(handled);
2209 }
2210 
2211 
2219 static void
2220 megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
2221 {
2222  mbox_ccb_t *ccb;
2223 
2224  ccb = (mbox_ccb_t *)scb->ccb;
2225 
2227  pci_dma_sync_sg_for_cpu(adapter->pdev,
2228  scsi_sglist(scb->scp),
2229  scsi_sg_count(scb->scp),
2231 
2232  scsi_dma_unmap(scb->scp);
2233  return;
2234 }
2235 
2236 
2245 static void
2246 megaraid_mbox_dpc(unsigned long devp)
2247 {
2248  adapter_t *adapter = (adapter_t *)devp;
2249  mraid_device_t *raid_dev;
2250  struct list_head clist;
2251  struct scatterlist *sgl;
2252  scb_t *scb;
2253  scb_t *tmp;
2254  struct scsi_cmnd *scp;
2255  mraid_passthru_t *pthru;
2256  mraid_epassthru_t *epthru;
2257  mbox_ccb_t *ccb;
2258  int islogical;
2259  int pdev_index;
2260  int pdev_state;
2261  mbox_t *mbox;
2262  unsigned long flags;
2263  uint8_t c;
2264  int status;
2265  uioc_t *kioc;
2266 
2267 
2268  if (!adapter) return;
2269 
2270  raid_dev = ADAP2RAIDDEV(adapter);
2271 
2272  // move the SCBs from the completed list to our local list
2273  INIT_LIST_HEAD(&clist);
2274 
2275  spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2276 
2277  list_splice_init(&adapter->completed_list, &clist);
2278 
2279  spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2280 
2281 
2282  list_for_each_entry_safe(scb, tmp, &clist, list) {
2283 
2284  status = scb->status;
2285  scp = scb->scp;
2286  ccb = (mbox_ccb_t *)scb->ccb;
2287  pthru = ccb->pthru;
2288  epthru = ccb->epthru;
2289  mbox = ccb->mbox;
2290 
2291  // Make sure f/w has completed a valid command
2292  if (scb->state != SCB_ISSUED) {
2294  "megaraid critical err: invalid command %d:%d:%p\n",
2295  scb->sno, scb->state, scp));
2296  BUG();
2297  continue; // Must never happen!
2298  }
2299 
2300  // check for the management command and complete it right away
2301  if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2302  scb->state = SCB_FREE;
2303  scb->status = status;
2304 
2305  // remove from local clist
2306  list_del_init(&scb->list);
2307 
2308  kioc = (uioc_t *)scb->gp;
2309  kioc->status = 0;
2310 
2311  megaraid_mbox_mm_done(adapter, scb);
2312 
2313  continue;
2314  }
2315 
2316  // Was an abort issued for this command earlier
2317  if (scb->state & SCB_ABORT) {
2319  "megaraid: aborted cmd [%x] completed\n",
2320  scb->sno));
2321  }
2322 
2323  /*
2324  * If the inquiry came of a disk drive which is not part of
2325  * any RAID array, expose it to the kernel. For this to be
2326  * enabled, user must set the "megaraid_expose_unconf_disks"
2327  * flag to 1 by specifying it on module parameter list.
2328  * This would enable data migration off drives from other
2329  * configurations.
2330  */
2331  islogical = MRAID_IS_LOGICAL(adapter, scp);
2332  if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
2333  && IS_RAID_CH(raid_dev, scb->dev_channel)) {
2334 
2335  sgl = scsi_sglist(scp);
2336  if (sg_page(sgl)) {
2337  c = *(unsigned char *) sg_virt(&sgl[0]);
2338  } else {
2340  "megaraid mailbox: invalid sg:%d\n",
2341  __LINE__));
2342  c = 0;
2343  }
2344 
2345  if ((c & 0x1F ) == TYPE_DISK) {
2346  pdev_index = (scb->dev_channel * 16) +
2347  scb->dev_target;
2348  pdev_state =
2349  raid_dev->pdrv_state[pdev_index] & 0x0F;
2350 
2351  if (pdev_state == PDRV_ONLINE ||
2352  pdev_state == PDRV_FAILED ||
2353  pdev_state == PDRV_RBLD ||
2354  pdev_state == PDRV_HOTSPARE ||
2355  megaraid_expose_unconf_disks == 0) {
2356 
2357  status = 0xF0;
2358  }
2359  }
2360  }
2361 
2362  // Convert MegaRAID status to Linux error code
2363  switch (status) {
2364 
2365  case 0x00:
2366 
2367  scp->result = (DID_OK << 16);
2368  break;
2369 
2370  case 0x02:
2371 
2372  /* set sense_buffer and result fields */
2373  if (mbox->cmd == MBOXCMD_PASSTHRU ||
2374  mbox->cmd == MBOXCMD_PASSTHRU64) {
2375 
2376  memcpy(scp->sense_buffer, pthru->reqsensearea,
2377  14);
2378 
2379  scp->result = DRIVER_SENSE << 24 |
2380  DID_OK << 16 | CHECK_CONDITION << 1;
2381  }
2382  else {
2383  if (mbox->cmd == MBOXCMD_EXTPTHRU) {
2384 
2385  memcpy(scp->sense_buffer,
2386  epthru->reqsensearea, 14);
2387 
2388  scp->result = DRIVER_SENSE << 24 |
2389  DID_OK << 16 |
2390  CHECK_CONDITION << 1;
2391  } else {
2392  scp->sense_buffer[0] = 0x70;
2393  scp->sense_buffer[2] = ABORTED_COMMAND;
2394  scp->result = CHECK_CONDITION << 1;
2395  }
2396  }
2397  break;
2398 
2399  case 0x08:
2400 
2401  scp->result = DID_BUS_BUSY << 16 | status;
2402  break;
2403 
2404  default:
2405 
2406  /*
2407  * If TEST_UNIT_READY fails, we know RESERVATION_STATUS
2408  * failed
2409  */
2410  if (scp->cmnd[0] == TEST_UNIT_READY) {
2411  scp->result = DID_ERROR << 16 |
2412  RESERVATION_CONFLICT << 1;
2413  }
2414  else
2415  /*
2416  * Error code returned is 1 if Reserve or Release
2417  * failed or the input parameter is invalid
2418  */
2419  if (status == 1 && (scp->cmnd[0] == RESERVE ||
2420  scp->cmnd[0] == RELEASE)) {
2421 
2422  scp->result = DID_ERROR << 16 |
2423  RESERVATION_CONFLICT << 1;
2424  }
2425  else {
2426  scp->result = DID_BAD_TARGET << 16 | status;
2427  }
2428  }
2429 
2430  // print a debug message for all failed commands
2431  if (status) {
2432  megaraid_mbox_display_scb(adapter, scb);
2433  }
2434 
2435  // Free our internal resources and call the mid-layer callback
2436  // routine
2437  megaraid_mbox_sync_scb(adapter, scb);
2438 
2439  // remove from local clist
2440  list_del_init(&scb->list);
2441 
2442  // put back in free list
2443  megaraid_dealloc_scb(adapter, scb);
2444 
2445  // send the scsi packet back to kernel
2446  scp->scsi_done(scp);
2447  }
2448 
2449  return;
2450 }
2451 
2452 
2460 static int
2461 megaraid_abort_handler(struct scsi_cmnd *scp)
2462 {
2463  adapter_t *adapter;
2464  mraid_device_t *raid_dev;
2465  scb_t *scb;
2466  scb_t *tmp;
2467  int found;
2468  unsigned long flags;
2469  int i;
2470 
2471 
2472  adapter = SCP2ADAPTER(scp);
2473  raid_dev = ADAP2RAIDDEV(adapter);
2474 
2476  "megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
2477  scp->cmnd[0], SCP2CHANNEL(scp),
2478  SCP2TARGET(scp), SCP2LUN(scp)));
2479 
2480  // If FW has stopped responding, simply return failure
2481  if (raid_dev->hw_error) {
2483  "megaraid: hw error, not aborting\n"));
2484  return FAILED;
2485  }
2486 
2487  // There might a race here, where the command was completed by the
2488  // firmware and now it is on the completed list. Before we could
2489  // complete the command to the kernel in dpc, the abort came.
2490  // Find out if this is the case to avoid the race.
2491  scb = NULL;
2492  spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2493  list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
2494 
2495  if (scb->scp == scp) { // Found command
2496 
2497  list_del_init(&scb->list); // from completed list
2498 
2500  "megaraid: %d[%d:%d], abort from completed list\n",
2501  scb->sno, scb->dev_channel, scb->dev_target));
2502 
2503  scp->result = (DID_ABORT << 16);
2504  scp->scsi_done(scp);
2505 
2506  megaraid_dealloc_scb(adapter, scb);
2507 
2508  spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
2509  flags);
2510 
2511  return SUCCESS;
2512  }
2513  }
2514  spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2515 
2516 
2517  // Find out if this command is still on the pending list. If it is and
2518  // was never issued, abort and return success. If the command is owned
2519  // by the firmware, we must wait for it to complete by the FW.
2520  spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2521  list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2522 
2523  if (scb->scp == scp) { // Found command
2524 
2525  list_del_init(&scb->list); // from pending list
2526 
2527  ASSERT(!(scb->state & SCB_ISSUED));
2528 
2530  "megaraid abort: [%d:%d], driver owner\n",
2531  scb->dev_channel, scb->dev_target));
2532 
2533  scp->result = (DID_ABORT << 16);
2534  scp->scsi_done(scp);
2535 
2536  megaraid_dealloc_scb(adapter, scb);
2537 
2538  spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
2539  flags);
2540 
2541  return SUCCESS;
2542  }
2543  }
2544  spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2545 
2546 
2547  // Check do we even own this command, in which case this would be
2548  // owned by the firmware. The only way to locate the FW scb is to
2549  // traverse through the list of all SCB, since driver does not
2550  // maintain these SCBs on any list
2551  found = 0;
2552  spin_lock_irq(&adapter->lock);
2553  for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
2554  scb = adapter->kscb_list + i;
2555 
2556  if (scb->scp == scp) {
2557 
2558  found = 1;
2559 
2560  if (!(scb->state & SCB_ISSUED)) {
2562  "megaraid abort: %d[%d:%d], invalid state\n",
2563  scb->sno, scb->dev_channel, scb->dev_target));
2564  BUG();
2565  }
2566  else {
2568  "megaraid abort: %d[%d:%d], fw owner\n",
2569  scb->sno, scb->dev_channel, scb->dev_target));
2570  }
2571  }
2572  }
2573  spin_unlock_irq(&adapter->lock);
2574 
2575  if (!found) {
2576  con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
2577 
2578  // FIXME: Should there be a callback for this command?
2579  return SUCCESS;
2580  }
2581 
2582  // We cannot actually abort a command owned by firmware, return
2583  // failure and wait for reset. In host reset handler, we will find out
2584  // if the HBA is still live
2585  return FAILED;
2586 }
2587 
2598 static int
2599 megaraid_reset_handler(struct scsi_cmnd *scp)
2600 {
2601  adapter_t *adapter;
2602  scb_t *scb;
2603  scb_t *tmp;
2604  mraid_device_t *raid_dev;
2605  unsigned long flags;
2606  uint8_t raw_mbox[sizeof(mbox_t)];
2607  int rval;
2608  int recovery_window;
2609  int recovering;
2610  int i;
2611  uioc_t *kioc;
2612 
2613  adapter = SCP2ADAPTER(scp);
2614  raid_dev = ADAP2RAIDDEV(adapter);
2615 
2616  // return failure if adapter is not responding
2617  if (raid_dev->hw_error) {
2619  "megaraid: hw error, cannot reset\n"));
2620  return FAILED;
2621  }
2622 
2623 
2624  // Under exceptional conditions, FW can take up to 3 minutes to
2625  // complete command processing. Wait for additional 2 minutes for the
2626  // pending commands counter to go down to 0. If it doesn't, let the
2627  // controller be marked offline
2628  // Also, reset all the commands currently owned by the driver
2629  spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2630  list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2631  list_del_init(&scb->list); // from pending list
2632 
2633  if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2635  "megaraid: IOCTL packet with %d[%d:%d] being reset\n",
2636  scb->sno, scb->dev_channel, scb->dev_target));
2637 
2638  scb->status = -1;
2639 
2640  kioc = (uioc_t *)scb->gp;
2641  kioc->status = -EFAULT;
2642 
2643  megaraid_mbox_mm_done(adapter, scb);
2644  } else {
2645  if (scb->scp == scp) { // Found command
2647  "megaraid: %d[%d:%d], reset from pending list\n",
2648  scb->sno, scb->dev_channel, scb->dev_target));
2649  } else {
2651  "megaraid: IO packet with %d[%d:%d] being reset\n",
2652  scb->sno, scb->dev_channel, scb->dev_target));
2653  }
2654 
2655  scb->scp->result = (DID_RESET << 16);
2656  scb->scp->scsi_done(scb->scp);
2657 
2658  megaraid_dealloc_scb(adapter, scb);
2659  }
2660  }
2661  spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2662 
2663  if (adapter->outstanding_cmds) {
2665  "megaraid: %d outstanding commands. Max wait %d sec\n",
2666  adapter->outstanding_cmds,
2668  }
2669 
2670  recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2671 
2672  recovering = adapter->outstanding_cmds;
2673 
2674  for (i = 0; i < recovery_window; i++) {
2675 
2676  megaraid_ack_sequence(adapter);
2677 
2678  // print a message once every 5 seconds only
2679  if (!(i % 5)) {
2680  con_log(CL_ANN, (
2681  "megaraid mbox: Wait for %d commands to complete:%d\n",
2682  adapter->outstanding_cmds,
2683  (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
2684  }
2685 
2686  // bailout if no recovery happened in reset time
2687  if (adapter->outstanding_cmds == 0) {
2688  break;
2689  }
2690 
2691  msleep(1000);
2692  }
2693 
2694  spin_lock(&adapter->lock);
2695 
2696  // If still outstanding commands, bail out
2697  if (adapter->outstanding_cmds) {
2699  "megaraid mbox: critical hardware error!\n"));
2700 
2701  raid_dev->hw_error = 1;
2702 
2703  rval = FAILED;
2704  goto out;
2705  }
2706  else {
2708  "megaraid mbox: reset sequence completed successfully\n"));
2709  }
2710 
2711 
2712  // If the controller supports clustering, reset reservations
2713  if (!adapter->ha) {
2714  rval = SUCCESS;
2715  goto out;
2716  }
2717 
2718  // clear reservations if any
2719  raw_mbox[0] = CLUSTER_CMD;
2720  raw_mbox[2] = RESET_RESERVATIONS;
2721 
2722  rval = SUCCESS;
2723  if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
2724  con_log(CL_ANN,
2725  (KERN_INFO "megaraid: reservation reset\n"));
2726  }
2727  else {
2728  rval = FAILED;
2730  "megaraid: reservation reset failed\n"));
2731  }
2732 
2733  out:
2734  spin_unlock(&adapter->lock);
2735  return rval;
2736 }
2737 
2738 /*
2739  * START: internal commands library
2740  *
2741  * This section of the driver has the common routine used by the driver and
2742  * also has all the FW routines
2743  */
2744 
2753 static int
2754 mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
2755 {
2756  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2757  mbox64_t *mbox64;
2758  mbox_t *mbox;
2759  uint8_t status;
2760  int i;
2761 
2762 
2763  mbox64 = raid_dev->mbox64;
2764  mbox = raid_dev->mbox;
2765 
2766  /*
2767  * Wait until mailbox is free
2768  */
2769  if (megaraid_busywait_mbox(raid_dev) != 0)
2770  goto blocked_mailbox;
2771 
2772  /*
2773  * Copy mailbox data into host structure
2774  */
2775  memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
2776  mbox->cmdid = 0xFE;
2777  mbox->busy = 1;
2778  mbox->poll = 0;
2779  mbox->ack = 0;
2780  mbox->numstatus = 0xFF;
2781  mbox->status = 0xFF;
2782 
2783  wmb();
2784  WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2785 
2786  // wait for maximum 1 second for status to post. If the status is not
2787  // available within 1 second, assume FW is initializing and wait
2788  // for an extended amount of time
2789  if (mbox->numstatus == 0xFF) { // status not yet available
2790  udelay(25);
2791 
2792  for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
2793  rmb();
2794  msleep(1);
2795  }
2796 
2797 
2798  if (i == 1000) {
2800  "megaraid mailbox: wait for FW to boot "));
2801 
2802  for (i = 0; (mbox->numstatus == 0xFF) &&
2803  (i < MBOX_RESET_WAIT); i++) {
2804  rmb();
2805  con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
2806  MBOX_RESET_WAIT - i));
2807  msleep(1000);
2808  }
2809 
2810  if (i == MBOX_RESET_WAIT) {
2811 
2812  con_log(CL_ANN, (
2813  "\nmegaraid mailbox: status not available\n"));
2814 
2815  return -1;
2816  }
2817  con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
2818  }
2819  }
2820 
2821  // wait for maximum 1 second for poll semaphore
2822  if (mbox->poll != 0x77) {
2823  udelay(25);
2824 
2825  for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
2826  rmb();
2827  msleep(1);
2828  }
2829 
2830  if (i == 1000) {
2832  "megaraid mailbox: could not get poll semaphore\n"));
2833  return -1;
2834  }
2835  }
2836 
2837  WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
2838  wmb();
2839 
2840  // wait for maximum 1 second for acknowledgement
2841  if (RDINDOOR(raid_dev) & 0x2) {
2842  udelay(25);
2843 
2844  for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
2845  rmb();
2846  msleep(1);
2847  }
2848 
2849  if (i == 1000) {
2851  "megaraid mailbox: could not acknowledge\n"));
2852  return -1;
2853  }
2854  }
2855  mbox->poll = 0;
2856  mbox->ack = 0x77;
2857 
2858  status = mbox->status;
2859 
2860  // invalidate the completed command id array. After command
2861  // completion, firmware would write the valid id.
2862  mbox->numstatus = 0xFF;
2863  mbox->status = 0xFF;
2864  for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
2865  mbox->completed[i] = 0xFF;
2866  }
2867 
2868  return status;
2869 
2870 blocked_mailbox:
2871 
2872  con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
2873  return -1;
2874 }
2875 
2876 
2886 static int
2887 mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
2888 {
2889  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2890  mbox_t *mbox;
2891  long i;
2892 
2893 
2894  mbox = raid_dev->mbox;
2895 
2896  // return immediately if the mailbox is busy
2897  if (mbox->busy) return -1;
2898 
2899  // Copy mailbox data into host structure
2900  memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
2901  mbox->cmdid = 0xFE;
2902  mbox->busy = 1;
2903  mbox->poll = 0;
2904  mbox->ack = 0;
2905  mbox->numstatus = 0xFF;
2906  mbox->status = 0xFF;
2907 
2908  wmb();
2909  WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2910 
2911  for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
2912  if (mbox->numstatus != 0xFF) break;
2913  rmb();
2915  }
2916 
2917  if (i == MBOX_SYNC_WAIT_CNT) {
2918  // We may need to re-calibrate the counter
2920  "megaraid: fast sync command timed out\n"));
2921  }
2922 
2923  WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
2924  wmb();
2925 
2926  return mbox->status;
2927 }
2928 
2929 
2937 static int
2938 megaraid_busywait_mbox(mraid_device_t *raid_dev)
2939 {
2940  mbox_t *mbox = raid_dev->mbox;
2941  int i = 0;
2942 
2943  if (mbox->busy) {
2944  udelay(25);
2945  for (i = 0; mbox->busy && i < 1000; i++)
2946  msleep(1);
2947  }
2948 
2949  if (i < 1000) return 0;
2950  else return -1;
2951 }
2952 
2953 
2961 static int
2962 megaraid_mbox_product_info(adapter_t *adapter)
2963 {
2964  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2965  mbox_t *mbox;
2966  uint8_t raw_mbox[sizeof(mbox_t)];
2967  mraid_pinfo_t *pinfo;
2968  dma_addr_t pinfo_dma_h;
2969  mraid_inquiry3_t *mraid_inq3;
2970  int i;
2971 
2972 
2973  memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
2974  mbox = (mbox_t *)raw_mbox;
2975 
2976  /*
2977  * Issue an ENQUIRY3 command to find out certain adapter parameters,
2978  * e.g., max channels, max commands etc.
2979  */
2980  pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
2981  &pinfo_dma_h);
2982 
2983  if (pinfo == NULL) {
2985  "megaraid: out of memory, %s %d\n", __func__,
2986  __LINE__));
2987 
2988  return -1;
2989  }
2990  memset(pinfo, 0, sizeof(mraid_pinfo_t));
2991 
2992  mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
2993  memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
2994 
2995  raw_mbox[0] = FC_NEW_CONFIG;
2996  raw_mbox[2] = NC_SUBOP_ENQUIRY3;
2997  raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
2998 
2999  // Issue the command
3000  if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3001 
3002  con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
3003 
3004  pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3005  pinfo, pinfo_dma_h);
3006 
3007  return -1;
3008  }
3009 
3010  /*
3011  * Collect information about state of each physical drive
3012  * attached to the controller. We will expose all the disks
3013  * which are not part of RAID
3014  */
3015  mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
3016  for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
3017  raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
3018  }
3019 
3020  /*
3021  * Get product info for information like number of channels,
3022  * maximum commands supported.
3023  */
3024  memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3025  mbox->xferaddr = (uint32_t)pinfo_dma_h;
3026 
3027  raw_mbox[0] = FC_NEW_CONFIG;
3028  raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
3029 
3030  if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3031 
3033  "megaraid: product info failed\n"));
3034 
3035  pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3036  pinfo, pinfo_dma_h);
3037 
3038  return -1;
3039  }
3040 
3041  /*
3042  * Setup some parameters for host, as required by our caller
3043  */
3044  adapter->max_channel = pinfo->nchannels;
3045 
3046  /*
3047  * we will export all the logical drives on a single channel.
3048  * Add 1 since inquires do not come for inititor ID
3049  */
3050  adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1;
3051  adapter->max_lun = 8; // up to 8 LUNs for non-disk devices
3052 
3053  /*
3054  * These are the maximum outstanding commands for the scsi-layer
3055  */
3056  adapter->max_cmds = MBOX_MAX_SCSI_CMDS;
3057 
3058  memset(adapter->fw_version, 0, VERSION_SIZE);
3059  memset(adapter->bios_version, 0, VERSION_SIZE);
3060 
3061  memcpy(adapter->fw_version, pinfo->fw_version, 4);
3062  adapter->fw_version[4] = 0;
3063 
3064  memcpy(adapter->bios_version, pinfo->bios_version, 4);
3065  adapter->bios_version[4] = 0;
3066 
3068  "megaraid: fw version:[%s] bios version:[%s]\n",
3069  adapter->fw_version, adapter->bios_version));
3070 
3071  pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
3072  pinfo_dma_h);
3073 
3074  return 0;
3075 }
3076 
3077 
3078 
3086 static int
3087 megaraid_mbox_extended_cdb(adapter_t *adapter)
3088 {
3089  mbox_t *mbox;
3090  uint8_t raw_mbox[sizeof(mbox_t)];
3091  int rval;
3092 
3093  mbox = (mbox_t *)raw_mbox;
3094 
3095  memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3096  mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3097 
3098  memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3099 
3100  raw_mbox[0] = MAIN_MISC_OPCODE;
3101  raw_mbox[2] = SUPPORT_EXT_CDB;
3102 
3103  /*
3104  * Issue the command
3105  */
3106  rval = 0;
3107  if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3108  rval = -1;
3109  }
3110 
3111  return rval;
3112 }
3113 
3114 
3122 static int
3123 megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
3124 {
3125  mbox_t *mbox;
3126  uint8_t raw_mbox[sizeof(mbox_t)];
3127  int rval;
3128 
3129 
3130  mbox = (mbox_t *)raw_mbox;
3131 
3132  memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3133 
3134  mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3135 
3136  memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3137 
3138  raw_mbox[0] = GET_TARGET_ID;
3139 
3140  // Issue the command
3141  *init_id = 7;
3142  rval = -1;
3143  if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3144 
3145  *init_id = *(uint8_t *)adapter->ibuf;
3146 
3148  "megaraid: cluster firmware, initiator ID: %d\n",
3149  *init_id));
3150 
3151  rval = 0;
3152  }
3153 
3154  return rval;
3155 }
3156 
3157 
3165 static int
3166 megaraid_mbox_support_random_del(adapter_t *adapter)
3167 {
3168  mbox_t *mbox;
3169  uint8_t raw_mbox[sizeof(mbox_t)];
3170  int rval;
3171 
3172  /*
3173  * Newer firmware on Dell CERC expect a different
3174  * random deletion handling, so disable it.
3175  */
3176  if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
3177  adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
3178  adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
3179  adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
3180  (adapter->fw_version[0] > '6' ||
3181  (adapter->fw_version[0] == '6' &&
3182  adapter->fw_version[2] > '6') ||
3183  (adapter->fw_version[0] == '6'
3184  && adapter->fw_version[2] == '6'
3185  && adapter->fw_version[3] > '1'))) {
3186  con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
3187  return 0;
3188  }
3189 
3190  mbox = (mbox_t *)raw_mbox;
3191 
3192  memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3193 
3194  raw_mbox[0] = FC_DEL_LOGDRV;
3195  raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3196 
3197  // Issue the command
3198  rval = 0;
3199  if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3200 
3201  con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
3202 
3203  rval = 1;
3204  }
3205 
3206  return rval;
3207 }
3208 
3209 
3217 static int
3218 megaraid_mbox_get_max_sg(adapter_t *adapter)
3219 {
3220  mbox_t *mbox;
3221  uint8_t raw_mbox[sizeof(mbox_t)];
3222  int nsg;
3223 
3224 
3225  mbox = (mbox_t *)raw_mbox;
3226 
3227  memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3228 
3229  mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3230 
3231  memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3232 
3233  raw_mbox[0] = MAIN_MISC_OPCODE;
3234  raw_mbox[2] = GET_MAX_SG_SUPPORT;
3235 
3236  // Issue the command
3237  if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3238  nsg = *(uint8_t *)adapter->ibuf;
3239  }
3240  else {
3241  nsg = MBOX_DEFAULT_SG_SIZE;
3242  }
3243 
3244  if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
3245 
3246  return nsg;
3247 }
3248 
3249 
3257 static void
3258 megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
3259 {
3260  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3261  mbox_t *mbox;
3262  uint8_t raw_mbox[sizeof(mbox_t)];
3263 
3264 
3265  mbox = (mbox_t *)raw_mbox;
3266 
3267  memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3268 
3269  mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3270 
3271  memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3272 
3273  raw_mbox[0] = CHNL_CLASS;
3274  raw_mbox[2] = GET_CHNL_CLASS;
3275 
3276  // Issue the command. If the command fails, all channels are RAID
3277  // channels
3278  raid_dev->channel_class = 0xFF;
3279  if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3280  raid_dev->channel_class = *(uint8_t *)adapter->ibuf;
3281  }
3282 
3283  return;
3284 }
3285 
3286 
3293 static void
3294 megaraid_mbox_flush_cache(adapter_t *adapter)
3295 {
3296  mbox_t *mbox;
3297  uint8_t raw_mbox[sizeof(mbox_t)];
3298 
3299 
3300  mbox = (mbox_t *)raw_mbox;
3301 
3302  memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3303 
3304  raw_mbox[0] = FLUSH_ADAPTER;
3305 
3306  if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3307  con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
3308  }
3309 
3310  raw_mbox[0] = FLUSH_SYSTEM;
3311 
3312  if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3313  con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
3314  }
3315 
3316  return;
3317 }
3318 
3319 
3326 static int
3327 megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
3328 {
3329  mbox_t *mbox;
3330  uint8_t raw_mbox[sizeof(mbox_t)];
3331  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3332  mbox64_t *mbox64;
3333  int status = 0;
3334  int i;
3335  uint32_t dword;
3336 
3337  mbox = (mbox_t *)raw_mbox;
3338 
3339  memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3340 
3341  raw_mbox[0] = 0xFF;
3342 
3343  mbox64 = raid_dev->mbox64;
3344  mbox = raid_dev->mbox;
3345 
3346  /* Wait until mailbox is free */
3347  if (megaraid_busywait_mbox(raid_dev) != 0) {
3348  status = 1;
3349  goto blocked_mailbox;
3350  }
3351 
3352  /* Copy mailbox data into host structure */
3353  memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
3354  mbox->cmdid = 0xFE;
3355  mbox->busy = 1;
3356  mbox->poll = 0;
3357  mbox->ack = 0;
3358  mbox->numstatus = 0;
3359  mbox->status = 0;
3360 
3361  wmb();
3362  WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
3363 
3364  /* Wait for maximum 1 min for status to post.
3365  * If the Firmware SUPPORTS the ABOVE COMMAND,
3366  * mbox->cmd will be set to 0
3367  * else
3368  * the firmware will reject the command with
3369  * mbox->numstatus set to 1
3370  */
3371 
3372  i = 0;
3373  status = 0;
3374  while (!mbox->numstatus && mbox->cmd == 0xFF) {
3375  rmb();
3376  msleep(1);
3377  i++;
3378  if (i > 1000 * 60) {
3379  status = 1;
3380  break;
3381  }
3382  }
3383  if (mbox->numstatus == 1)
3384  status = 1; /*cmd not supported*/
3385 
3386  /* Check for interrupt line */
3387  dword = RDOUTDOOR(raid_dev);
3388  WROUTDOOR(raid_dev, dword);
3389  WRINDOOR(raid_dev,2);
3390 
3391  return status;
3392 
3393 blocked_mailbox:
3394  con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
3395  return status;
3396 }
3397 
3407 static void
3408 megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
3409 {
3410  mbox_ccb_t *ccb;
3411  struct scsi_cmnd *scp;
3412  mbox_t *mbox;
3413  int level;
3414  int i;
3415 
3416 
3417  ccb = (mbox_ccb_t *)scb->ccb;
3418  scp = scb->scp;
3419  mbox = ccb->mbox;
3420 
3421  level = CL_DLEVEL3;
3422 
3423  con_log(level, (KERN_NOTICE
3424  "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
3425  mbox->cmd, scb->sno));
3426 
3427  con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
3428  mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
3429  mbox->numsge));
3430 
3431  if (!scp) return;
3432 
3433  con_log(level, (KERN_NOTICE "scsi cmnd: "));
3434 
3435  for (i = 0; i < scp->cmd_len; i++) {
3436  con_log(level, ("%#2.02x ", scp->cmnd[i]));
3437  }
3438 
3439  con_log(level, ("\n"));
3440 
3441  return;
3442 }
3443 
3444 
3454 static void
3455 megaraid_mbox_setup_device_map(adapter_t *adapter)
3456 {
3457  uint8_t c;
3458  uint8_t t;
3459 
3460  /*
3461  * First fill the values on the logical drive channel
3462  */
3463  for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3464  adapter->device_ids[adapter->max_channel][t] =
3465  (t < adapter->init_id) ? t : t - 1;
3466 
3467  adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
3468 
3469  /*
3470  * Fill the values on the physical devices channels
3471  */
3472  for (c = 0; c < adapter->max_channel; c++)
3473  for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3474  adapter->device_ids[c][t] = (c << 8) | t;
3475 }
3476 
3477 
3478 /*
3479  * END: internal commands library
3480  */
3481 
3482 /*
3483  * START: Interface for the common management module
3484  *
3485  * This is the module, which interfaces with the common management module to
3486  * provide support for ioctl and sysfs
3487  */
3488 
3497 static int
3498 megaraid_cmm_register(adapter_t *adapter)
3499 {
3500  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3501  mraid_mmadp_t adp;
3502  scb_t *scb;
3503  mbox_ccb_t *ccb;
3504  int rval;
3505  int i;
3506 
3507  // Allocate memory for the base list of scb for management module.
3508  adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
3509 
3510  if (adapter->uscb_list == NULL) {
3512  "megaraid: out of memory, %s %d\n", __func__,
3513  __LINE__));
3514  return -1;
3515  }
3516 
3517 
3518  // Initialize the synchronization parameters for resources for
3519  // commands for management module
3520  INIT_LIST_HEAD(&adapter->uscb_pool);
3521 
3523 
3524 
3525 
3526  // link all the packets. Note, CCB for commands, coming from the
3527  // commom management module, mailbox physical address are already
3528  // setup by it. We just need placeholder for that in our local command
3529  // control blocks
3530  for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
3531 
3532  scb = adapter->uscb_list + i;
3533  ccb = raid_dev->uccb_list + i;
3534 
3535  scb->ccb = (caddr_t)ccb;
3536  ccb->mbox64 = raid_dev->umbox64 + i;
3537  ccb->mbox = &ccb->mbox64->mbox32;
3538  ccb->raw_mbox = (uint8_t *)ccb->mbox;
3539 
3540  scb->gp = 0;
3541 
3542  // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
3543  // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
3544  scb->sno = i + MBOX_MAX_SCSI_CMDS;
3545 
3546  scb->scp = NULL;
3547  scb->state = SCB_FREE;
3548  scb->dma_direction = PCI_DMA_NONE;
3549  scb->dma_type = MRAID_DMA_NONE;
3550  scb->dev_channel = -1;
3551  scb->dev_target = -1;
3552 
3553  // put scb in the free pool
3554  list_add_tail(&scb->list, &adapter->uscb_pool);
3555  }
3556 
3557  adp.unique_id = adapter->unique_id;
3558  adp.drvr_type = DRVRTYPE_MBOX;
3559  adp.drvr_data = (unsigned long)adapter;
3560  adp.pdev = adapter->pdev;
3561  adp.issue_uioc = megaraid_mbox_mm_handler;
3564 
3565  if ((rval = mraid_mm_register_adp(&adp)) != 0) {
3566 
3568  "megaraid mbox: did not register with CMM\n"));
3569 
3570  kfree(adapter->uscb_list);
3571  }
3572 
3573  return rval;
3574 }
3575 
3576 
3585 static int
3586 megaraid_cmm_unregister(adapter_t *adapter)
3587 {
3588  kfree(adapter->uscb_list);
3590  return 0;
3591 }
3592 
3593 
3604 static int
3605 megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
3606 {
3607  adapter_t *adapter;
3608 
3609  if (action != IOCTL_ISSUE) {
3611  "megaraid: unsupported management action:%#2x\n",
3612  action));
3613  return (-ENOTSUPP);
3614  }
3615 
3616  adapter = (adapter_t *)drvr_data;
3617 
3618  // make sure this adapter is not being detached right now.
3619  if (atomic_read(&adapter->being_detached)) {
3621  "megaraid: reject management request, detaching\n"));
3622  return (-ENODEV);
3623  }
3624 
3625  switch (kioc->opcode) {
3626 
3627  case GET_ADAP_INFO:
3628 
3629  kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *)
3630  (unsigned long)kioc->buf_vaddr);
3631 
3632  kioc->done(kioc);
3633 
3634  return kioc->status;
3635 
3636  case MBOX_CMD:
3637 
3638  return megaraid_mbox_mm_command(adapter, kioc);
3639 
3640  default:
3641  kioc->status = (-EINVAL);
3642  kioc->done(kioc);
3643  return (-EINVAL);
3644  }
3645 
3646  return 0; // not reached
3647 }
3648 
3656 static int
3657 megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
3658 {
3659  struct list_head *head = &adapter->uscb_pool;
3660  mbox64_t *mbox64;
3661  uint8_t *raw_mbox;
3662  scb_t *scb;
3663  mbox_ccb_t *ccb;
3664  unsigned long flags;
3665 
3666  // detach one scb from free pool
3667  spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3668 
3669  if (list_empty(head)) { // should never happen because of CMM
3670 
3672  "megaraid mbox: bug in cmm handler, lost resources\n"));
3673 
3674  spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3675 
3676  return (-EINVAL);
3677  }
3678 
3679  scb = list_entry(head->next, scb_t, list);
3680  list_del_init(&scb->list);
3681 
3682  spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3683 
3684  scb->state = SCB_ACTIVE;
3685  scb->dma_type = MRAID_DMA_NONE;
3686  scb->dma_direction = PCI_DMA_NONE;
3687 
3688  ccb = (mbox_ccb_t *)scb->ccb;
3689  mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3690  raw_mbox = (uint8_t *)&mbox64->mbox32;
3691 
3692  memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
3693 
3694  scb->gp = (unsigned long)kioc;
3695 
3696  /*
3697  * If it is a logdrv random delete operation, we have to wait till
3698  * there are no outstanding cmds at the fw and then issue it directly
3699  */
3700  if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3701 
3702  if (wait_till_fw_empty(adapter)) {
3704  "megaraid mbox: LD delete, timed out\n"));
3705 
3706  kioc->status = -ETIME;
3707 
3708  scb->status = -1;
3709 
3710  megaraid_mbox_mm_done(adapter, scb);
3711 
3712  return (-ETIME);
3713  }
3714 
3715  INIT_LIST_HEAD(&scb->list);
3716 
3717  scb->state = SCB_ISSUED;
3718  if (mbox_post_cmd(adapter, scb) != 0) {
3719 
3721  "megaraid mbox: LD delete, mailbox busy\n"));
3722 
3723  kioc->status = -EBUSY;
3724 
3725  scb->status = -1;
3726 
3727  megaraid_mbox_mm_done(adapter, scb);
3728 
3729  return (-EBUSY);
3730  }
3731 
3732  return 0;
3733  }
3734 
3735  // put the command on the pending list and execute
3736  megaraid_mbox_runpendq(adapter, scb);
3737 
3738  return 0;
3739 }
3740 
3741 
3742 static int
3743 wait_till_fw_empty(adapter_t *adapter)
3744 {
3745  unsigned long flags = 0;
3746  int i;
3747 
3748 
3749  /*
3750  * Set the quiescent flag to stop issuing cmds to FW.
3751  */
3752  spin_lock_irqsave(&adapter->lock, flags);
3753  adapter->quiescent++;
3754  spin_unlock_irqrestore(&adapter->lock, flags);
3755 
3756  /*
3757  * Wait till there are no more cmds outstanding at FW. Try for at most
3758  * 60 seconds
3759  */
3760  for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
3762  "megaraid: FW has %d pending commands\n",
3763  adapter->outstanding_cmds));
3764 
3765  msleep(1000);
3766  }
3767 
3768  return adapter->outstanding_cmds;
3769 }
3770 
3771 
3780 static void
3781 megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3782 {
3783  uioc_t *kioc;
3784  mbox64_t *mbox64;
3785  uint8_t *raw_mbox;
3786  unsigned long flags;
3787 
3788  kioc = (uioc_t *)scb->gp;
3789  mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3790  mbox64->mbox32.status = scb->status;
3791  raw_mbox = (uint8_t *)&mbox64->mbox32;
3792 
3793 
3794  // put scb in the free pool
3795  scb->state = SCB_FREE;
3796  scb->scp = NULL;
3797 
3798  spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3799 
3800  list_add(&scb->list, &adapter->uscb_pool);
3801 
3802  spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3803 
3804  // if a delete logical drive operation succeeded, restart the
3805  // controller
3806  if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3807 
3808  adapter->quiescent--;
3809 
3810  megaraid_mbox_runpendq(adapter, NULL);
3811  }
3812 
3813  kioc->done(kioc);
3814 
3815  return;
3816 }
3817 
3818 
3824 static int
3825 gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
3826 {
3827  uint8_t dmajor;
3828 
3829  dmajor = megaraid_mbox_version[0];
3830 
3831  hinfo->pci_vendor_id = adapter->pdev->vendor;
3832  hinfo->pci_device_id = adapter->pdev->device;
3833  hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
3834  hinfo->subsys_device_id = adapter->pdev->subsystem_device;
3835 
3836  hinfo->pci_bus = adapter->pdev->bus->number;
3837  hinfo->pci_dev_fn = adapter->pdev->devfn;
3838  hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn);
3839  hinfo->irq = adapter->host->irq;
3840  hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport;
3841 
3842  hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn;
3843  hinfo->host_no = adapter->host->host_no;
3844 
3845  return 0;
3846 }
3847 
3848 /*
3849  * END: Interface for the common management module
3850  */
3851 
3852 
3853 
3866 static int
3867 megaraid_sysfs_alloc_resources(adapter_t *adapter)
3868 {
3869  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3870  int rval = 0;
3871 
3872  raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL);
3873 
3874  raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
3875 
3876  raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
3877  PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
3878 
3879  if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
3880  !raid_dev->sysfs_buffer) {
3881 
3883  "megaraid: out of memory, %s %d\n", __func__,
3884  __LINE__));
3885 
3886  rval = -ENOMEM;
3887 
3888  megaraid_sysfs_free_resources(adapter);
3889  }
3890 
3891  mutex_init(&raid_dev->sysfs_mtx);
3892 
3893  init_waitqueue_head(&raid_dev->sysfs_wait_q);
3894 
3895  return rval;
3896 }
3897 
3898 
3905 static void
3906 megaraid_sysfs_free_resources(adapter_t *adapter)
3907 {
3908  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3909 
3910  kfree(raid_dev->sysfs_uioc);
3911  kfree(raid_dev->sysfs_mbox64);
3912 
3913  if (raid_dev->sysfs_buffer) {
3915  raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
3916  }
3917 }
3918 
3919 
3926 static void
3927 megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
3928 {
3929  adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
3930  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3931 
3932  uioc->status = 0;
3933 
3934  wake_up(&raid_dev->sysfs_wait_q);
3935 }
3936 
3937 
3946 static void
3947 megaraid_sysfs_get_ldmap_timeout(unsigned long data)
3948 {
3949  uioc_t *uioc = (uioc_t *)data;
3950  adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
3951  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3952 
3953  uioc->status = -ETIME;
3954 
3955  wake_up(&raid_dev->sysfs_wait_q);
3956 }
3957 
3958 
3976 static int
3977 megaraid_sysfs_get_ldmap(adapter_t *adapter)
3978 {
3979  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3980  uioc_t *uioc;
3981  mbox64_t *mbox64;
3982  mbox_t *mbox;
3983  char *raw_mbox;
3984  struct timer_list sysfs_timer;
3985  struct timer_list *timerp;
3986  caddr_t ldmap;
3987  int rval = 0;
3988 
3989  /*
3990  * Allow only one read at a time to go through the sysfs attributes
3991  */
3992  mutex_lock(&raid_dev->sysfs_mtx);
3993 
3994  uioc = raid_dev->sysfs_uioc;
3995  mbox64 = raid_dev->sysfs_mbox64;
3996  ldmap = raid_dev->sysfs_buffer;
3997 
3998  memset(uioc, 0, sizeof(uioc_t));
3999  memset(mbox64, 0, sizeof(mbox64_t));
4000  memset(ldmap, 0, sizeof(raid_dev->curr_ldmap));
4001 
4002  mbox = &mbox64->mbox32;
4003  raw_mbox = (char *)mbox;
4004  uioc->cmdbuf = (uint64_t)(unsigned long)mbox64;
4005  uioc->buf_vaddr = (caddr_t)adapter;
4006  uioc->status = -ENODATA;
4007  uioc->done = megaraid_sysfs_get_ldmap_done;
4008 
4009  /*
4010  * Prepare the mailbox packet to get the current logical drive mapping
4011  * table
4012  */
4013  mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma;
4014 
4015  raw_mbox[0] = FC_DEL_LOGDRV;
4016  raw_mbox[2] = OP_GET_LDID_MAP;
4017 
4018  /*
4019  * Setup a timer to recover from a non-responding controller
4020  */
4021  timerp = &sysfs_timer;
4022  init_timer(timerp);
4023 
4024  timerp->function = megaraid_sysfs_get_ldmap_timeout;
4025  timerp->data = (unsigned long)uioc;
4026  timerp->expires = jiffies + 60 * HZ;
4027 
4028  add_timer(timerp);
4029 
4030  /*
4031  * Send the command to the firmware
4032  */
4033  rval = megaraid_mbox_mm_command(adapter, uioc);
4034 
4035  if (rval == 0) { // command successfully issued
4036  wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA));
4037 
4038  /*
4039  * Check if the command timed out
4040  */
4041  if (uioc->status == -ETIME) {
4043  "megaraid: sysfs get ld map timed out\n"));
4044 
4045  rval = -ETIME;
4046  }
4047  else {
4048  rval = mbox->status;
4049  }
4050 
4051  if (rval == 0) {
4052  memcpy(raid_dev->curr_ldmap, ldmap,
4053  sizeof(raid_dev->curr_ldmap));
4054  }
4055  else {
4057  "megaraid: get ld map failed with %x\n", rval));
4058  }
4059  }
4060  else {
4062  "megaraid: could not issue ldmap command:%x\n", rval));
4063  }
4064 
4065 
4066  del_timer_sync(timerp);
4067 
4068  mutex_unlock(&raid_dev->sysfs_mtx);
4069 
4070  return rval;
4071 }
4072 
4073 
4083 static ssize_t
4084 megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
4085  char *buf)
4086 {
4087  struct Scsi_Host *shost = class_to_shost(dev);
4088  adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost);
4089  uint32_t app_hndl;
4090 
4091  app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
4092 
4093  return snprintf(buf, 8, "%u\n", app_hndl);
4094 }
4095 
4096 
4112 static ssize_t
4113 megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
4114 {
4115  struct scsi_device *sdev = to_scsi_device(dev);
4116  adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
4117  mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
4118  int scsi_id = -1;
4119  int logical_drv = -1;
4120  int ldid_map = -1;
4121  uint32_t app_hndl = 0;
4122  int mapped_sdev_id;
4123  int rval;
4124  int i;
4125 
4126  if (raid_dev->random_del_supported &&
4127  MRAID_IS_LOGICAL_SDEV(adapter, sdev)) {
4128 
4129  rval = megaraid_sysfs_get_ldmap(adapter);
4130  if (rval == 0) {
4131 
4132  for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) {
4133 
4134  mapped_sdev_id = sdev->id;
4135 
4136  if (sdev->id > adapter->init_id) {
4137  mapped_sdev_id -= 1;
4138  }
4139 
4140  if (raid_dev->curr_ldmap[i] == mapped_sdev_id) {
4141 
4142  scsi_id = sdev->id;
4143 
4144  logical_drv = i;
4145 
4146  ldid_map = raid_dev->curr_ldmap[i];
4147 
4148  app_hndl = mraid_mm_adapter_app_handle(
4149  adapter->unique_id);
4150 
4151  break;
4152  }
4153  }
4154  }
4155  else {
4157  "megaraid: sysfs get ld map failed: %x\n",
4158  rval));
4159  }
4160  }
4161 
4162  return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
4163  ldid_map, app_hndl);
4164 }
4165 
4166 
4167 /*
4168  * END: Mailbox Low Level Driver
4169  */
4170 module_init(megaraid_init);
4171 module_exit(megaraid_exit);
4172 
4173 /* vim: set ts=8 sw=8 tw=78 ai si: */