Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hptiop.c
Go to the documentation of this file.
1 /*
2  * HighPoint RR3xxx/4xxx controller driver for Linux
3  * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * Please report bugs/comments/suggestions to [email protected]
15  *
16  * For more information, visit http://www.highpoint-tech.com
17  */
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <linux/gfp.h>
29 #include <asm/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/div64.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_host.h>
37 
38 #include "hptiop.h"
39 
40 MODULE_AUTHOR("HighPoint Technologies, Inc.");
41 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
42 
43 static char driver_name[] = "hptiop";
44 static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
45 static const char driver_ver[] = "v1.6 (091225)";
46 
47 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
48 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
50 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
51 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
52 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
53 
54 static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
55 {
56  u32 req = 0;
57  int i;
58 
59  for (i = 0; i < millisec; i++) {
60  req = readl(&hba->u.itl.iop->inbound_queue);
61  if (req != IOPMU_QUEUE_EMPTY)
62  break;
63  msleep(1);
64  }
65 
66  if (req != IOPMU_QUEUE_EMPTY) {
67  writel(req, &hba->u.itl.iop->outbound_queue);
68  readl(&hba->u.itl.iop->outbound_intstatus);
69  return 0;
70  }
71 
72  return -1;
73 }
74 
75 static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
76 {
77  return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
78 }
79 
80 static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
81 {
82  if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
83  hptiop_host_request_callback_itl(hba,
84  tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
85  else
86  hptiop_iop_request_callback_itl(hba, tag);
87 }
88 
89 static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
90 {
91  u32 req;
92 
93  while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
95 
97  hptiop_request_callback_itl(hba, req);
98  else {
100 
101  p = (struct hpt_iop_request_header __iomem *)
102  ((char __iomem *)hba->u.itl.iop + req);
103 
105  if (readl(&p->context))
106  hptiop_request_callback_itl(hba, req);
107  else
108  writel(1, &p->context);
109  }
110  else
111  hptiop_request_callback_itl(hba, req);
112  }
113  }
114 }
115 
116 static int iop_intr_itl(struct hptiop_hba *hba)
117 {
118  struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
119  void __iomem *plx = hba->u.itl.plx;
120  u32 status;
121  int ret = 0;
122 
123  if (plx && readl(plx + 0x11C5C) & 0xf)
124  writel(1, plx + 0x11C60);
125 
126  status = readl(&iop->outbound_intstatus);
127 
128  if (status & IOPMU_OUTBOUND_INT_MSG0) {
129  u32 msg = readl(&iop->outbound_msgaddr0);
130 
131  dprintk("received outbound msg %x\n", msg);
132  writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
133  hptiop_message_callback(hba, msg);
134  ret = 1;
135  }
136 
137  if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
138  hptiop_drain_outbound_queue_itl(hba);
139  ret = 1;
140  }
141 
142  return ret;
143 }
144 
145 static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
146 {
147  u32 outbound_tail = readl(&mu->outbound_tail);
148  u32 outbound_head = readl(&mu->outbound_head);
149 
150  if (outbound_tail != outbound_head) {
151  u64 p;
152 
153  memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
154  outbound_tail++;
155 
156  if (outbound_tail == MVIOP_QUEUE_LEN)
157  outbound_tail = 0;
158  writel(outbound_tail, &mu->outbound_tail);
159  return p;
160  } else
161  return 0;
162 }
163 
164 static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
165 {
166  u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
167  u32 head = inbound_head + 1;
168 
169  if (head == MVIOP_QUEUE_LEN)
170  head = 0;
171 
172  memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
173  writel(head, &hba->u.mv.mu->inbound_head);
175  &hba->u.mv.regs->inbound_doorbell);
176 }
177 
178 static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
179 {
180  u32 req_type = (tag >> 5) & 0x7;
182 
183  dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
184 
186 
187  switch (req_type) {
190  hba->msg_done = 1;
191  break;
192 
194  req = hba->reqs[tag >> 8].req_virt;
196  req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
197 
198  hptiop_finish_scsi_req(hba, tag>>8, req);
199  break;
200 
201  default:
202  break;
203  }
204 }
205 
206 static int iop_intr_mv(struct hptiop_hba *hba)
207 {
208  u32 status;
209  int ret = 0;
210 
211  status = readl(&hba->u.mv.regs->outbound_doorbell);
212  writel(~status, &hba->u.mv.regs->outbound_doorbell);
213 
214  if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
215  u32 msg;
216  msg = readl(&hba->u.mv.mu->outbound_msg);
217  dprintk("received outbound msg %x\n", msg);
218  hptiop_message_callback(hba, msg);
219  ret = 1;
220  }
221 
222  if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
223  u64 tag;
224 
225  while ((tag = mv_outbound_read(hba->u.mv.mu)))
226  hptiop_request_callback_mv(hba, tag);
227  ret = 1;
228  }
229 
230  return ret;
231 }
232 
233 static int iop_send_sync_request_itl(struct hptiop_hba *hba,
234  void __iomem *_req, u32 millisec)
235 {
236  struct hpt_iop_request_header __iomem *req = _req;
237  u32 i;
238 
240  writel(0, &req->context);
241  writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
242  &hba->u.itl.iop->inbound_queue);
243  readl(&hba->u.itl.iop->outbound_intstatus);
244 
245  for (i = 0; i < millisec; i++) {
246  iop_intr_itl(hba);
247  if (readl(&req->context))
248  return 0;
249  msleep(1);
250  }
251 
252  return -1;
253 }
254 
255 static int iop_send_sync_request_mv(struct hptiop_hba *hba,
256  u32 size_bits, u32 millisec)
257 {
258  struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
259  u32 i;
260 
261  hba->msg_done = 0;
263  mv_inbound_write(hba->u.mv.internal_req_phy |
264  MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
265 
266  for (i = 0; i < millisec; i++) {
267  iop_intr_mv(hba);
268  if (hba->msg_done)
269  return 0;
270  msleep(1);
271  }
272  return -1;
273 }
274 
275 static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
276 {
277  writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
278  readl(&hba->u.itl.iop->outbound_intstatus);
279 }
280 
281 static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
282 {
283  writel(msg, &hba->u.mv.mu->inbound_msg);
284  writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
285  readl(&hba->u.mv.regs->inbound_doorbell);
286 }
287 
288 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
289 {
290  u32 i;
291 
292  hba->msg_done = 0;
293  hba->ops->post_msg(hba, msg);
294 
295  for (i = 0; i < millisec; i++) {
296  spin_lock_irq(hba->host->host_lock);
297  hba->ops->iop_intr(hba);
298  spin_unlock_irq(hba->host->host_lock);
299  if (hba->msg_done)
300  break;
301  msleep(1);
302  }
303 
304  return hba->msg_done? 0 : -1;
305 }
306 
307 static int iop_get_config_itl(struct hptiop_hba *hba,
309 {
310  u32 req32;
312 
313  req32 = readl(&hba->u.itl.iop->inbound_queue);
314  if (req32 == IOPMU_QUEUE_EMPTY)
315  return -1;
316 
317  req = (struct hpt_iop_request_get_config __iomem *)
318  ((unsigned long)hba->u.itl.iop + req32);
319 
320  writel(0, &req->header.flags);
322  writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
323  writel(IOP_RESULT_PENDING, &req->header.result);
324 
325  if (iop_send_sync_request_itl(hba, req, 20000)) {
326  dprintk("Get config send cmd failed\n");
327  return -1;
328  }
329 
330  memcpy_fromio(config, req, sizeof(*config));
331  writel(req32, &hba->u.itl.iop->outbound_queue);
332  return 0;
333 }
334 
335 static int iop_get_config_mv(struct hptiop_hba *hba,
336  struct hpt_iop_request_get_config *config)
337 {
338  struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
339 
342  req->header.size =
343  cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
344  req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
346  req->header.context_hi32 = 0;
347 
348  if (iop_send_sync_request_mv(hba, 0, 20000)) {
349  dprintk("Get config send cmd failed\n");
350  return -1;
351  }
352 
353  memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
354  return 0;
355 }
356 
357 static int iop_set_config_itl(struct hptiop_hba *hba,
358  struct hpt_iop_request_set_config *config)
359 {
360  u32 req32;
362 
363  req32 = readl(&hba->u.itl.iop->inbound_queue);
364  if (req32 == IOPMU_QUEUE_EMPTY)
365  return -1;
366 
367  req = (struct hpt_iop_request_set_config __iomem *)
368  ((unsigned long)hba->u.itl.iop + req32);
369 
370  memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
371  (u8 *)config + sizeof(struct hpt_iop_request_header),
372  sizeof(struct hpt_iop_request_set_config) -
373  sizeof(struct hpt_iop_request_header));
374 
375  writel(0, &req->header.flags);
377  writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
378  writel(IOP_RESULT_PENDING, &req->header.result);
379 
380  if (iop_send_sync_request_itl(hba, req, 20000)) {
381  dprintk("Set config send cmd failed\n");
382  return -1;
383  }
384 
385  writel(req32, &hba->u.itl.iop->outbound_queue);
386  return 0;
387 }
388 
389 static int iop_set_config_mv(struct hptiop_hba *hba,
390  struct hpt_iop_request_set_config *config)
391 {
392  struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
393 
394  memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
397  req->header.size =
398  cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
399  req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
401  req->header.context_hi32 = 0;
402 
403  if (iop_send_sync_request_mv(hba, 0, 20000)) {
404  dprintk("Set config send cmd failed\n");
405  return -1;
406  }
407 
408  return 0;
409 }
410 
411 static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
412 {
413  writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
414  &hba->u.itl.iop->outbound_intmask);
415 }
416 
417 static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
418 {
419  writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
420  &hba->u.mv.regs->outbound_intmask);
421 }
422 
423 static int hptiop_initialize_iop(struct hptiop_hba *hba)
424 {
425  /* enable interrupts */
426  hba->ops->enable_intr(hba);
427 
428  hba->initialized = 1;
429 
430  /* start background tasks */
431  if (iop_send_sync_msg(hba,
433  printk(KERN_ERR "scsi%d: fail to start background task\n",
434  hba->host->host_no);
435  return -1;
436  }
437  return 0;
438 }
439 
440 static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
441 {
442  u32 mem_base_phy, length;
443  void __iomem *mem_base_virt;
444 
445  struct pci_dev *pcidev = hba->pcidev;
446 
447 
448  if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
449  printk(KERN_ERR "scsi%d: pci resource invalid\n",
450  hba->host->host_no);
451  return NULL;
452  }
453 
454  mem_base_phy = pci_resource_start(pcidev, index);
455  length = pci_resource_len(pcidev, index);
456  mem_base_virt = ioremap(mem_base_phy, length);
457 
458  if (!mem_base_virt) {
459  printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
460  hba->host->host_no);
461  return NULL;
462  }
463  return mem_base_virt;
464 }
465 
466 static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
467 {
468  struct pci_dev *pcidev = hba->pcidev;
469  hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
470  if (hba->u.itl.iop == NULL)
471  return -1;
472  if ((pcidev->device & 0xff00) == 0x4400) {
473  hba->u.itl.plx = hba->u.itl.iop;
474  hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
475  if (hba->u.itl.iop == NULL) {
476  iounmap(hba->u.itl.plx);
477  return -1;
478  }
479  }
480  return 0;
481 }
482 
483 static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
484 {
485  if (hba->u.itl.plx)
486  iounmap(hba->u.itl.plx);
487  iounmap(hba->u.itl.iop);
488 }
489 
490 static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
491 {
492  hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
493  if (hba->u.mv.regs == NULL)
494  return -1;
495 
496  hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
497  if (hba->u.mv.mu == NULL) {
498  iounmap(hba->u.mv.regs);
499  return -1;
500  }
501 
502  return 0;
503 }
504 
505 static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
506 {
507  iounmap(hba->u.mv.regs);
508  iounmap(hba->u.mv.mu);
509 }
510 
511 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
512 {
513  dprintk("iop message 0x%x\n", msg);
514 
515  if (msg == IOPMU_INBOUND_MSG0_NOP)
516  hba->msg_done = 1;
517 
518  if (!hba->initialized)
519  return;
520 
521  if (msg == IOPMU_INBOUND_MSG0_RESET) {
522  atomic_set(&hba->resetting, 0);
523  wake_up(&hba->reset_wq);
524  }
525  else if (msg <= IOPMU_INBOUND_MSG0_MAX)
526  hba->msg_done = 1;
527 }
528 
529 static struct hptiop_request *get_req(struct hptiop_hba *hba)
530 {
531  struct hptiop_request *ret;
532 
533  dprintk("get_req : req=%p\n", hba->req_list);
534 
535  ret = hba->req_list;
536  if (ret)
537  hba->req_list = ret->next;
538 
539  return ret;
540 }
541 
542 static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
543 {
544  dprintk("free_req(%d, %p)\n", req->index, req);
545  req->next = hba->req_list;
546  hba->req_list = req;
547 }
548 
549 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
550  struct hpt_iop_request_scsi_command *req)
551 {
552  struct scsi_cmnd *scp;
553 
554  dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
555  "result=%d, context=0x%x tag=%d\n",
556  req, req->header.type, req->header.result,
557  req->header.context, tag);
558 
559  BUG_ON(!req->header.result);
561 
562  scp = hba->reqs[tag].scp;
563 
564  if (HPT_SCP(scp)->mapped)
565  scsi_dma_unmap(scp);
566 
567  switch (le32_to_cpu(req->header.result)) {
568  case IOP_RESULT_SUCCESS:
569  scsi_set_resid(scp,
570  scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
571  scp->result = (DID_OK<<16);
572  break;
574  scp->result = (DID_BAD_TARGET<<16);
575  break;
576  case IOP_RESULT_BUSY:
577  scp->result = (DID_BUS_BUSY<<16);
578  break;
579  case IOP_RESULT_RESET:
580  scp->result = (DID_RESET<<16);
581  break;
582  case IOP_RESULT_FAIL:
583  scp->result = (DID_ERROR<<16);
584  break;
586  scp->result = (DID_ABORT<<16);
587  break;
589  scsi_set_resid(scp,
590  scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
592  memcpy(scp->sense_buffer, &req->sg_list,
595  break;
596 
597  default:
598  scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
599  break;
600  }
601 
602  dprintk("scsi_done(%p)\n", scp);
603  scp->scsi_done(scp);
604  free_req(hba, &hba->reqs[tag]);
605 }
606 
607 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
608 {
610  u32 tag;
611 
612  if (hba->iopintf_v2) {
613  tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
614  req = hba->reqs[tag].req_virt;
616  req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
617  } else {
618  tag = _tag;
619  req = hba->reqs[tag].req_virt;
620  }
621 
622  hptiop_finish_scsi_req(hba, tag, req);
623 }
624 
625 void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
626 {
629  struct hpt_ioctl_k *arg;
630 
631  req = (struct hpt_iop_request_header __iomem *)
632  ((unsigned long)hba->u.itl.iop + tag);
633  dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
634  "result=%d, context=0x%x tag=%d\n",
635  req, readl(&req->type), readl(&req->result),
636  readl(&req->context), tag);
637 
638  BUG_ON(!readl(&req->result));
640 
641  p = (struct hpt_iop_request_ioctl_command __iomem *)req;
642  arg = (struct hpt_ioctl_k *)(unsigned long)
643  (readl(&req->context) |
644  ((u64)readl(&req->context_hi32)<<32));
645 
646  if (readl(&req->result) == IOP_RESULT_SUCCESS) {
648 
649  if (arg->outbuf_size)
650  memcpy_fromio(arg->outbuf,
651  &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
652  arg->outbuf_size);
653 
654  if (arg->bytes_returned)
655  *arg->bytes_returned = arg->outbuf_size;
656  }
657  else
659 
660  arg->done(arg);
661  writel(tag, &hba->u.itl.iop->outbound_queue);
662 }
663 
664 static irqreturn_t hptiop_intr(int irq, void *dev_id)
665 {
666  struct hptiop_hba *hba = dev_id;
667  int handled;
668  unsigned long flags;
669 
670  spin_lock_irqsave(hba->host->host_lock, flags);
671  handled = hba->ops->iop_intr(hba);
672  spin_unlock_irqrestore(hba->host->host_lock, flags);
673 
674  return handled;
675 }
676 
677 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
678 {
679  struct Scsi_Host *host = scp->device->host;
680  struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
681  struct scatterlist *sg;
682  int idx, nseg;
683 
684  nseg = scsi_dma_map(scp);
685  BUG_ON(nseg < 0);
686  if (!nseg)
687  return 0;
688 
689  HPT_SCP(scp)->sgcnt = nseg;
690  HPT_SCP(scp)->mapped = 1;
691 
692  BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
693 
694  scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
696  psg[idx].size = cpu_to_le32(sg_dma_len(sg));
697  psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
698  cpu_to_le32(1) : 0;
699  }
700  return HPT_SCP(scp)->sgcnt;
701 }
702 
703 static void hptiop_post_req_itl(struct hptiop_hba *hba,
704  struct hptiop_request *_req)
705 {
706  struct hpt_iop_request_header *reqhdr = _req->req_virt;
707 
709  (u32)_req->index);
710  reqhdr->context_hi32 = 0;
711 
712  if (hba->iopintf_v2) {
713  u32 size, size_bits;
714 
715  size = le32_to_cpu(reqhdr->size);
716  if (size < 256)
717  size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
718  else if (size < 512)
719  size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
720  else
721  size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
723  writel(_req->req_shifted_phy | size_bits,
724  &hba->u.itl.iop->inbound_queue);
725  } else
727  &hba->u.itl.iop->inbound_queue);
728 }
729 
730 static void hptiop_post_req_mv(struct hptiop_hba *hba,
731  struct hptiop_request *_req)
732 {
733  struct hpt_iop_request_header *reqhdr = _req->req_virt;
734  u32 size, size_bit;
735 
736  reqhdr->context = cpu_to_le32(_req->index<<8 |
738  reqhdr->context_hi32 = 0;
739  size = le32_to_cpu(reqhdr->size);
740 
741  if (size <= 256)
742  size_bit = 0;
743  else if (size <= 256*2)
744  size_bit = 1;
745  else if (size <= 256*3)
746  size_bit = 2;
747  else
748  size_bit = 3;
749 
750  mv_inbound_write((_req->req_shifted_phy << 5) |
751  MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
752 }
753 
754 static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
755  void (*done)(struct scsi_cmnd *))
756 {
757  struct Scsi_Host *host = scp->device->host;
758  struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
759  struct hpt_iop_request_scsi_command *req;
760  int sg_count = 0;
761  struct hptiop_request *_req;
762 
763  BUG_ON(!done);
764  scp->scsi_done = done;
765 
766  _req = get_req(hba);
767  if (_req == NULL) {
768  dprintk("hptiop_queuecmd : no free req\n");
769  return SCSI_MLQUEUE_HOST_BUSY;
770  }
771 
772  _req->scp = scp;
773 
774  dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
775  "req_index=%d, req=%p\n",
776  scp,
777  host->host_no, scp->device->channel,
778  scp->device->id, scp->device->lun,
779  ((u32 *)scp->cmnd)[0],
780  ((u32 *)scp->cmnd)[1],
781  ((u32 *)scp->cmnd)[2],
782  _req->index, _req->req_virt);
783 
784  scp->result = 0;
785 
786  if (scp->device->channel || scp->device->lun ||
787  scp->device->id > hba->max_devices) {
788  scp->result = DID_BAD_TARGET << 16;
789  free_req(hba, _req);
790  goto cmd_done;
791  }
792 
793  req = _req->req_virt;
794 
795  /* build S/G table */
796  sg_count = hptiop_buildsgl(scp, req->sg_list);
797  if (!sg_count)
798  HPT_SCP(scp)->mapped = 0;
799 
802  req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
803  req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
804  req->channel = scp->device->channel;
805  req->target = scp->device->id;
806  req->lun = scp->device->lun;
807  req->header.size = cpu_to_le32(
808  sizeof(struct hpt_iop_request_scsi_command)
809  - sizeof(struct hpt_iopsg)
810  + sg_count * sizeof(struct hpt_iopsg));
811 
812  memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
813  hba->ops->post_req(hba, _req);
814  return 0;
815 
816 cmd_done:
817  dprintk("scsi_done(scp=%p)\n", scp);
818  scp->scsi_done(scp);
819  return 0;
820 }
821 
822 static DEF_SCSI_QCMD(hptiop_queuecommand)
823 
824 static const char *hptiop_info(struct Scsi_Host *host)
825 {
826  return driver_name_long;
827 }
828 
829 static int hptiop_reset_hba(struct hptiop_hba *hba)
830 {
831  if (atomic_xchg(&hba->resetting, 1) == 0) {
832  atomic_inc(&hba->reset_count);
833  hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
834  }
835 
837  atomic_read(&hba->resetting) == 0, 60 * HZ);
838 
839  if (atomic_read(&hba->resetting)) {
840  /* IOP is in unknown state, abort reset */
841  printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
842  return -1;
843  }
844 
845  if (iop_send_sync_msg(hba,
847  dprintk("scsi%d: fail to start background task\n",
848  hba->host->host_no);
849  }
850 
851  return 0;
852 }
853 
854 static int hptiop_reset(struct scsi_cmnd *scp)
855 {
856  struct Scsi_Host * host = scp->device->host;
857  struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
858 
859  printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
860  scp->device->host->host_no, scp->device->channel,
861  scp->device->id, scp);
862 
863  return hptiop_reset_hba(hba)? FAILED : SUCCESS;
864 }
865 
866 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
867  int queue_depth, int reason)
868 {
869  struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
870 
871  if (reason != SCSI_QDEPTH_DEFAULT)
872  return -EOPNOTSUPP;
873 
874  if (queue_depth > hba->max_requests)
875  queue_depth = hba->max_requests;
876  scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
877  return queue_depth;
878 }
879 
880 static ssize_t hptiop_show_version(struct device *dev,
881  struct device_attribute *attr, char *buf)
882 {
883  return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
884 }
885 
886 static ssize_t hptiop_show_fw_version(struct device *dev,
887  struct device_attribute *attr, char *buf)
888 {
889  struct Scsi_Host *host = class_to_shost(dev);
890  struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
891 
892  return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
893  hba->firmware_version >> 24,
894  (hba->firmware_version >> 16) & 0xff,
895  (hba->firmware_version >> 8) & 0xff,
896  hba->firmware_version & 0xff);
897 }
898 
899 static struct device_attribute hptiop_attr_version = {
900  .attr = {
901  .name = "driver-version",
902  .mode = S_IRUGO,
903  },
904  .show = hptiop_show_version,
905 };
906 
907 static struct device_attribute hptiop_attr_fw_version = {
908  .attr = {
909  .name = "firmware-version",
910  .mode = S_IRUGO,
911  },
912  .show = hptiop_show_fw_version,
913 };
914 
915 static struct device_attribute *hptiop_attrs[] = {
916  &hptiop_attr_version,
917  &hptiop_attr_fw_version,
918  NULL
919 };
920 
921 static struct scsi_host_template driver_template = {
922  .module = THIS_MODULE,
923  .name = driver_name,
924  .queuecommand = hptiop_queuecommand,
925  .eh_device_reset_handler = hptiop_reset,
926  .eh_bus_reset_handler = hptiop_reset,
927  .info = hptiop_info,
928  .emulated = 0,
929  .use_clustering = ENABLE_CLUSTERING,
930  .proc_name = driver_name,
931  .shost_attrs = hptiop_attrs,
932  .this_id = -1,
933  .change_queue_depth = hptiop_adjust_disk_queue_depth,
934 };
935 
936 static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
937 {
938  hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
939  0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
940  if (hba->u.mv.internal_req)
941  return 0;
942  else
943  return -1;
944 }
945 
946 static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
947 {
948  if (hba->u.mv.internal_req) {
949  dma_free_coherent(&hba->pcidev->dev, 0x800,
950  hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
951  return 0;
952  } else
953  return -1;
954 }
955 
956 static int __devinit hptiop_probe(struct pci_dev *pcidev,
957  const struct pci_device_id *id)
958 {
959  struct Scsi_Host *host = NULL;
960  struct hptiop_hba *hba;
961  struct hptiop_adapter_ops *iop_ops;
962  struct hpt_iop_request_get_config iop_config;
963  struct hpt_iop_request_set_config set_config;
964  dma_addr_t start_phy;
965  void *start_virt;
966  u32 offset, i, req_size;
967 
968  dprintk("hptiop_probe(%p)\n", pcidev);
969 
970  if (pci_enable_device(pcidev)) {
971  printk(KERN_ERR "hptiop: fail to enable pci device\n");
972  return -ENODEV;
973  }
974 
975  printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
976  pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
977  pcidev->irq);
978 
979  pci_set_master(pcidev);
980 
981  /* Enable 64bit DMA if possible */
982  iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
983  if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
984  if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
985  printk(KERN_ERR "hptiop: fail to set dma_mask\n");
986  goto disable_pci_device;
987  }
988  }
989 
990  if (pci_request_regions(pcidev, driver_name)) {
991  printk(KERN_ERR "hptiop: pci_request_regions failed\n");
992  goto disable_pci_device;
993  }
994 
995  host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
996  if (!host) {
997  printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
998  goto free_pci_regions;
999  }
1000 
1001  hba = (struct hptiop_hba *)host->hostdata;
1002 
1003  hba->ops = iop_ops;
1004  hba->pcidev = pcidev;
1005  hba->host = host;
1006  hba->initialized = 0;
1007  hba->iopintf_v2 = 0;
1008 
1009  atomic_set(&hba->resetting, 0);
1010  atomic_set(&hba->reset_count, 0);
1011 
1014 
1015  host->max_lun = 1;
1016  host->max_channel = 0;
1017  host->io_port = 0;
1018  host->n_io_port = 0;
1019  host->irq = pcidev->irq;
1020 
1021  if (hba->ops->map_pci_bar(hba))
1022  goto free_scsi_host;
1023 
1024  if (hba->ops->iop_wait_ready(hba, 20000)) {
1025  printk(KERN_ERR "scsi%d: firmware not ready\n",
1026  hba->host->host_no);
1027  goto unmap_pci_bar;
1028  }
1029 
1030  if (hba->ops->internal_memalloc) {
1031  if (hba->ops->internal_memalloc(hba)) {
1032  printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1033  hba->host->host_no);
1034  goto unmap_pci_bar;
1035  }
1036  }
1037 
1038  if (hba->ops->get_config(hba, &iop_config)) {
1039  printk(KERN_ERR "scsi%d: get config failed\n",
1040  hba->host->host_no);
1041  goto unmap_pci_bar;
1042  }
1043 
1044  hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
1046  hba->max_devices = le32_to_cpu(iop_config.max_devices);
1047  hba->max_request_size = le32_to_cpu(iop_config.request_size);
1048  hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
1049  hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
1050  hba->interface_version = le32_to_cpu(iop_config.interface_version);
1051  hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
1052 
1053  if (hba->firmware_version > 0x01020000 ||
1054  hba->interface_version > 0x01020000)
1055  hba->iopintf_v2 = 1;
1056 
1057  host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
1058  host->max_id = le32_to_cpu(iop_config.max_devices);
1059  host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
1060  host->can_queue = le32_to_cpu(iop_config.max_requests);
1061  host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
1062  host->max_cmd_len = 16;
1063 
1064  req_size = sizeof(struct hpt_iop_request_scsi_command)
1065  + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
1066  if ((req_size & 0x1f) != 0)
1067  req_size = (req_size + 0x1f) & ~0x1f;
1068 
1069  memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
1070  set_config.iop_id = cpu_to_le32(host->host_no);
1071  set_config.vbus_id = cpu_to_le16(host->host_no);
1072  set_config.max_host_request_size = cpu_to_le16(req_size);
1073 
1074  if (hba->ops->set_config(hba, &set_config)) {
1075  printk(KERN_ERR "scsi%d: set config failed\n",
1076  hba->host->host_no);
1077  goto unmap_pci_bar;
1078  }
1079 
1080  pci_set_drvdata(pcidev, host);
1081 
1082  if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1083  driver_name, hba)) {
1084  printk(KERN_ERR "scsi%d: request irq %d failed\n",
1085  hba->host->host_no, pcidev->irq);
1086  goto unmap_pci_bar;
1087  }
1088 
1089  /* Allocate request mem */
1090 
1091  dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1092 
1093  hba->req_size = req_size;
1094  start_virt = dma_alloc_coherent(&pcidev->dev,
1095  hba->req_size*hba->max_requests + 0x20,
1096  &start_phy, GFP_KERNEL);
1097 
1098  if (!start_virt) {
1099  printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
1100  hba->host->host_no);
1101  goto free_request_irq;
1102  }
1103 
1104  hba->dma_coherent = start_virt;
1105  hba->dma_coherent_handle = start_phy;
1106 
1107  if ((start_phy & 0x1f) != 0)
1108  {
1109  offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
1110  start_phy += offset;
1111  start_virt += offset;
1112  }
1113 
1114  hba->req_list = start_virt;
1115  for (i = 0; i < hba->max_requests; i++) {
1116  hba->reqs[i].next = NULL;
1117  hba->reqs[i].req_virt = start_virt;
1118  hba->reqs[i].req_shifted_phy = start_phy >> 5;
1119  hba->reqs[i].index = i;
1120  free_req(hba, &hba->reqs[i]);
1121  start_virt = (char *)start_virt + hba->req_size;
1122  start_phy = start_phy + hba->req_size;
1123  }
1124 
1125  /* Enable Interrupt and start background task */
1126  if (hptiop_initialize_iop(hba))
1127  goto free_request_mem;
1128 
1129  if (scsi_add_host(host, &pcidev->dev)) {
1130  printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1131  hba->host->host_no);
1132  goto free_request_mem;
1133  }
1134 
1135 
1136  scsi_scan_host(host);
1137 
1138  dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
1139  return 0;
1140 
1141 free_request_mem:
1142  dma_free_coherent(&hba->pcidev->dev,
1143  hba->req_size * hba->max_requests + 0x20,
1144  hba->dma_coherent, hba->dma_coherent_handle);
1145 
1146 free_request_irq:
1147  free_irq(hba->pcidev->irq, hba);
1148 
1149 unmap_pci_bar:
1150  if (hba->ops->internal_memfree)
1151  hba->ops->internal_memfree(hba);
1152 
1153  hba->ops->unmap_pci_bar(hba);
1154 
1155 free_scsi_host:
1156  scsi_host_put(host);
1157 
1158 free_pci_regions:
1159  pci_release_regions(pcidev);
1160 
1161 disable_pci_device:
1162  pci_disable_device(pcidev);
1163 
1164  dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
1165  return -ENODEV;
1166 }
1167 
1168 static void hptiop_shutdown(struct pci_dev *pcidev)
1169 {
1170  struct Scsi_Host *host = pci_get_drvdata(pcidev);
1171  struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1172 
1173  dprintk("hptiop_shutdown(%p)\n", hba);
1174 
1175  /* stop the iop */
1176  if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1177  printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
1178  hba->host->host_no);
1179 
1180  /* disable all outbound interrupts */
1181  hba->ops->disable_intr(hba);
1182 }
1183 
1184 static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1185 {
1186  u32 int_mask;
1187 
1188  int_mask = readl(&hba->u.itl.iop->outbound_intmask);
1189  writel(int_mask |
1190  IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
1191  &hba->u.itl.iop->outbound_intmask);
1192  readl(&hba->u.itl.iop->outbound_intmask);
1193 }
1194 
1195 static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1196 {
1197  writel(0, &hba->u.mv.regs->outbound_intmask);
1198  readl(&hba->u.mv.regs->outbound_intmask);
1199 }
1200 
1201 static void hptiop_remove(struct pci_dev *pcidev)
1202 {
1203  struct Scsi_Host *host = pci_get_drvdata(pcidev);
1204  struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1205 
1206  dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1207 
1208  scsi_remove_host(host);
1209 
1210  hptiop_shutdown(pcidev);
1211 
1212  free_irq(hba->pcidev->irq, hba);
1213 
1214  dma_free_coherent(&hba->pcidev->dev,
1215  hba->req_size * hba->max_requests + 0x20,
1216  hba->dma_coherent,
1217  hba->dma_coherent_handle);
1218 
1219  if (hba->ops->internal_memfree)
1220  hba->ops->internal_memfree(hba);
1221 
1222  hba->ops->unmap_pci_bar(hba);
1223 
1225  pci_set_drvdata(hba->pcidev, NULL);
1226  pci_disable_device(hba->pcidev);
1227 
1228  scsi_host_put(host);
1229 }
1230 
1231 static struct hptiop_adapter_ops hptiop_itl_ops = {
1232  .iop_wait_ready = iop_wait_ready_itl,
1233  .internal_memalloc = NULL,
1234  .internal_memfree = NULL,
1235  .map_pci_bar = hptiop_map_pci_bar_itl,
1236  .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
1237  .enable_intr = hptiop_enable_intr_itl,
1238  .disable_intr = hptiop_disable_intr_itl,
1239  .get_config = iop_get_config_itl,
1240  .set_config = iop_set_config_itl,
1241  .iop_intr = iop_intr_itl,
1242  .post_msg = hptiop_post_msg_itl,
1243  .post_req = hptiop_post_req_itl,
1244  .hw_dma_bit_mask = 64,
1245 };
1246 
1247 static struct hptiop_adapter_ops hptiop_mv_ops = {
1248  .iop_wait_ready = iop_wait_ready_mv,
1249  .internal_memalloc = hptiop_internal_memalloc_mv,
1250  .internal_memfree = hptiop_internal_memfree_mv,
1251  .map_pci_bar = hptiop_map_pci_bar_mv,
1252  .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
1253  .enable_intr = hptiop_enable_intr_mv,
1254  .disable_intr = hptiop_disable_intr_mv,
1255  .get_config = iop_get_config_mv,
1256  .set_config = iop_set_config_mv,
1257  .iop_intr = iop_intr_mv,
1258  .post_msg = hptiop_post_msg_mv,
1259  .post_req = hptiop_post_req_mv,
1260  .hw_dma_bit_mask = 33,
1261 };
1262 
1263 static struct pci_device_id hptiop_id_table[] = {
1264  { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
1265  { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
1266  { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1267  { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1268  { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1269  { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
1270  { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1271  { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1272  { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1273  { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1274  { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1275  { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1276  { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1277  { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1278  { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1279  { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1280  { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
1281  { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1282  { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
1283  { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1284  { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1285  { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
1286  {},
1287 };
1288 
1289 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
1290 
1291 static struct pci_driver hptiop_pci_driver = {
1292  .name = driver_name,
1293  .id_table = hptiop_id_table,
1294  .probe = hptiop_probe,
1295  .remove = hptiop_remove,
1296  .shutdown = hptiop_shutdown,
1297 };
1298 
1299 static int __init hptiop_module_init(void)
1300 {
1301  printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1302  return pci_register_driver(&hptiop_pci_driver);
1303 }
1304 
1305 static void __exit hptiop_module_exit(void)
1306 {
1307  pci_unregister_driver(&hptiop_pci_driver);
1308 }
1309 
1310 
1311 module_init(hptiop_module_init);
1312 module_exit(hptiop_module_exit);
1313 
1314 MODULE_LICENSE("GPL");
1315