Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mbcs.c
Go to the documentation of this file.
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License. See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7  */
8 
9 /*
10  * MOATB Core Services driver.
11  */
12 
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/types.h>
17 #include <linux/ioport.h>
18 #include <linux/kernel.h>
19 #include <linux/notifier.h>
20 #include <linux/reboot.h>
21 #include <linux/init.h>
22 #include <linux/fs.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/mm.h>
26 #include <linux/uio.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/sn/addrs.h>
33 #include <asm/sn/intr.h>
34 #include <asm/sn/tiocx.h>
35 #include "mbcs.h"
36 
37 #define MBCS_DEBUG 0
38 #if MBCS_DEBUG
39 #define DBG(fmt...) printk(KERN_ALERT fmt)
40 #else
41 #define DBG(fmt...)
42 #endif
43 static DEFINE_MUTEX(mbcs_mutex);
44 static int mbcs_major;
45 
46 static LIST_HEAD(soft_list);
47 
48 /*
49  * file operations
50  */
51 static const struct file_operations mbcs_ops = {
52  .open = mbcs_open,
53  .llseek = mbcs_sram_llseek,
54  .read = mbcs_sram_read,
55  .write = mbcs_sram_write,
56  .mmap = mbcs_gscr_mmap,
57 };
58 
60  int minor;
61  struct cx_dev *cx_dev;
62 };
63 
64 static inline void mbcs_getdma_init(struct getdma *gdma)
65 {
66  memset(gdma, 0, sizeof(struct getdma));
67  gdma->DoneIntEnable = 1;
68 }
69 
70 static inline void mbcs_putdma_init(struct putdma *pdma)
71 {
72  memset(pdma, 0, sizeof(struct putdma));
73  pdma->DoneIntEnable = 1;
74 }
75 
76 static inline void mbcs_algo_init(struct algoblock *algo_soft)
77 {
78  memset(algo_soft, 0, sizeof(struct algoblock));
79 }
80 
81 static inline void mbcs_getdma_set(void *mmr,
82  uint64_t hostAddr,
83  uint64_t localAddr,
84  uint64_t localRamSel,
85  uint64_t numPkts,
86  uint64_t amoEnable,
87  uint64_t intrEnable,
88  uint64_t peerIO,
89  uint64_t amoHostDest,
90  uint64_t amoModType, uint64_t intrHostDest,
91  uint64_t intrVector)
92 {
93  union dma_control rdma_control;
94  union dma_amo_dest amo_dest;
95  union intr_dest intr_dest;
97  union dma_hostaddr host_addr;
98 
99  rdma_control.dma_control_reg = 0;
100  amo_dest.dma_amo_dest_reg = 0;
102  local_addr.dma_localaddr_reg = 0;
103  host_addr.dma_hostaddr_reg = 0;
104 
105  host_addr.dma_sys_addr = hostAddr;
106  MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
107 
108  local_addr.dma_ram_addr = localAddr;
109  local_addr.dma_ram_sel = localRamSel;
110  MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
111 
112  rdma_control.dma_op_length = numPkts;
113  rdma_control.done_amo_en = amoEnable;
114  rdma_control.done_int_en = intrEnable;
115  rdma_control.pio_mem_n = peerIO;
116  MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
117 
118  amo_dest.dma_amo_sys_addr = amoHostDest;
119  amo_dest.dma_amo_mod_type = amoModType;
120  MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
121 
122  intr_dest.address = intrHostDest;
123  intr_dest.int_vector = intrVector;
125 
126 }
127 
128 static inline void mbcs_putdma_set(void *mmr,
129  uint64_t hostAddr,
130  uint64_t localAddr,
131  uint64_t localRamSel,
132  uint64_t numPkts,
133  uint64_t amoEnable,
134  uint64_t intrEnable,
135  uint64_t peerIO,
136  uint64_t amoHostDest,
137  uint64_t amoModType,
138  uint64_t intrHostDest, uint64_t intrVector)
139 {
140  union dma_control wdma_control;
141  union dma_amo_dest amo_dest;
142  union intr_dest intr_dest;
144  union dma_hostaddr host_addr;
145 
146  wdma_control.dma_control_reg = 0;
147  amo_dest.dma_amo_dest_reg = 0;
149  local_addr.dma_localaddr_reg = 0;
150  host_addr.dma_hostaddr_reg = 0;
151 
152  host_addr.dma_sys_addr = hostAddr;
153  MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
154 
155  local_addr.dma_ram_addr = localAddr;
156  local_addr.dma_ram_sel = localRamSel;
157  MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
158 
159  wdma_control.dma_op_length = numPkts;
160  wdma_control.done_amo_en = amoEnable;
161  wdma_control.done_int_en = intrEnable;
162  wdma_control.pio_mem_n = peerIO;
163  MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
164 
165  amo_dest.dma_amo_sys_addr = amoHostDest;
166  amo_dest.dma_amo_mod_type = amoModType;
167  MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
168 
169  intr_dest.address = intrHostDest;
170  intr_dest.int_vector = intrVector;
172 
173 }
174 
175 static inline void mbcs_algo_set(void *mmr,
176  uint64_t amoHostDest,
177  uint64_t amoModType,
178  uint64_t intrHostDest,
179  uint64_t intrVector, uint64_t algoStepCount)
180 {
181  union dma_amo_dest amo_dest;
182  union intr_dest intr_dest;
183  union algo_step step;
184 
185  step.algo_step_reg = 0;
187  amo_dest.dma_amo_dest_reg = 0;
188 
189  amo_dest.dma_amo_sys_addr = amoHostDest;
190  amo_dest.dma_amo_mod_type = amoModType;
191  MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
192 
193  intr_dest.address = intrHostDest;
194  intr_dest.int_vector = intrVector;
196 
197  step.alg_step_cnt = algoStepCount;
198  MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
199 }
200 
201 static inline int mbcs_getdma_start(struct mbcs_soft *soft)
202 {
203  void *mmr_base;
204  struct getdma *gdma;
205  uint64_t numPkts;
206  union cm_control cm_control;
207 
208  mmr_base = soft->mmr_base;
209  gdma = &soft->getdma;
210 
211  /* check that host address got setup */
212  if (!gdma->hostAddr)
213  return -1;
214 
215  numPkts =
217 
218  /* program engine */
219  mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
220  gdma->localAddr,
221  (gdma->localAddr < MB2) ? 0 :
222  (gdma->localAddr < MB4) ? 1 :
223  (gdma->localAddr < MB6) ? 2 : 3,
224  numPkts,
225  gdma->DoneAmoEnable,
226  gdma->DoneIntEnable,
227  gdma->peerIO,
228  gdma->amoHostDest,
229  gdma->amoModType,
230  gdma->intrHostDest, gdma->intrVector);
231 
232  /* start engine */
234  cm_control.rd_dma_go = 1;
236 
237  return 0;
238 
239 }
240 
241 static inline int mbcs_putdma_start(struct mbcs_soft *soft)
242 {
243  void *mmr_base;
244  struct putdma *pdma;
245  uint64_t numPkts;
246  union cm_control cm_control;
247 
248  mmr_base = soft->mmr_base;
249  pdma = &soft->putdma;
250 
251  /* check that host address got setup */
252  if (!pdma->hostAddr)
253  return -1;
254 
255  numPkts =
257 
258  /* program engine */
259  mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
260  pdma->localAddr,
261  (pdma->localAddr < MB2) ? 0 :
262  (pdma->localAddr < MB4) ? 1 :
263  (pdma->localAddr < MB6) ? 2 : 3,
264  numPkts,
265  pdma->DoneAmoEnable,
266  pdma->DoneIntEnable,
267  pdma->peerIO,
268  pdma->amoHostDest,
269  pdma->amoModType,
270  pdma->intrHostDest, pdma->intrVector);
271 
272  /* start engine */
274  cm_control.wr_dma_go = 1;
276 
277  return 0;
278 
279 }
280 
281 static inline int mbcs_algo_start(struct mbcs_soft *soft)
282 {
283  struct algoblock *algo_soft = &soft->algo;
284  void *mmr_base = soft->mmr_base;
285  union cm_control cm_control;
286 
288  return -ERESTARTSYS;
289 
290  atomic_set(&soft->algo_done, 0);
291 
292  mbcs_algo_set(mmr_base,
293  algo_soft->amoHostDest,
294  algo_soft->amoModType,
295  algo_soft->intrHostDest,
296  algo_soft->intrVector, algo_soft->algoStepCount);
297 
298  /* start algorithm */
301  cm_control.alg_go = 1;
303 
304  mutex_unlock(&soft->algolock);
305 
306  return 0;
307 }
308 
309 static inline ssize_t
310 do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
311  size_t len, loff_t * off)
312 {
313  int rv = 0;
314 
316  return -ERESTARTSYS;
317 
318  atomic_set(&soft->dmawrite_done, 0);
319 
320  soft->putdma.hostAddr = hostAddr;
321  soft->putdma.localAddr = *off;
322  soft->putdma.bytes = len;
323 
324  if (mbcs_putdma_start(soft) < 0) {
325  DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
326  "mbcs_putdma_start failed\n");
327  rv = -EAGAIN;
328  goto dmawrite_exit;
329  }
330 
332  atomic_read(&soft->dmawrite_done))) {
333  rv = -ERESTARTSYS;
334  goto dmawrite_exit;
335  }
336 
337  rv = len;
338  *off += len;
339 
340 dmawrite_exit:
341  mutex_unlock(&soft->dmawritelock);
342 
343  return rv;
344 }
345 
346 static inline ssize_t
347 do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
348  size_t len, loff_t * off)
349 {
350  int rv = 0;
351 
353  return -ERESTARTSYS;
354 
355  atomic_set(&soft->dmawrite_done, 0);
356 
357  soft->getdma.hostAddr = hostAddr;
358  soft->getdma.localAddr = *off;
359  soft->getdma.bytes = len;
360 
361  if (mbcs_getdma_start(soft) < 0) {
362  DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
363  rv = -EAGAIN;
364  goto dmaread_exit;
365  }
366 
368  atomic_read(&soft->dmaread_done))) {
369  rv = -ERESTARTSYS;
370  goto dmaread_exit;
371  }
372 
373  rv = len;
374  *off += len;
375 
376 dmaread_exit:
377  mutex_unlock(&soft->dmareadlock);
378 
379  return rv;
380 }
381 
382 static int mbcs_open(struct inode *ip, struct file *fp)
383 {
384  struct mbcs_soft *soft;
385  int minor;
386 
387  mutex_lock(&mbcs_mutex);
388  minor = iminor(ip);
389 
390  /* Nothing protects access to this list... */
391  list_for_each_entry(soft, &soft_list, list) {
392  if (soft->nasid == minor) {
393  fp->private_data = soft->cxdev;
394  mutex_unlock(&mbcs_mutex);
395  return 0;
396  }
397  }
398 
399  mutex_unlock(&mbcs_mutex);
400  return -ENODEV;
401 }
402 
403 static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
404 {
405  struct cx_dev *cx_dev = fp->private_data;
406  struct mbcs_soft *soft = cx_dev->soft;
407  uint64_t hostAddr;
408  int rv = 0;
409 
410  hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
411  if (hostAddr == 0)
412  return -ENOMEM;
413 
414  rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
415  if (rv < 0)
416  goto exit;
417 
418  if (copy_to_user(buf, (void *)hostAddr, len))
419  rv = -EFAULT;
420 
421  exit:
422  free_pages(hostAddr, get_order(len));
423 
424  return rv;
425 }
426 
427 static ssize_t
428 mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
429 {
430  struct cx_dev *cx_dev = fp->private_data;
431  struct mbcs_soft *soft = cx_dev->soft;
432  uint64_t hostAddr;
433  int rv = 0;
434 
435  hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
436  if (hostAddr == 0)
437  return -ENOMEM;
438 
439  if (copy_from_user((void *)hostAddr, buf, len)) {
440  rv = -EFAULT;
441  goto exit;
442  }
443 
444  rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
445 
446  exit:
447  free_pages(hostAddr, get_order(len));
448 
449  return rv;
450 }
451 
452 static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
453 {
454  loff_t newpos;
455 
456  switch (whence) {
457  case SEEK_SET:
458  newpos = off;
459  break;
460 
461  case SEEK_CUR:
462  newpos = filp->f_pos + off;
463  break;
464 
465  case SEEK_END:
466  newpos = MBCS_SRAM_SIZE + off;
467  break;
468 
469  default: /* can't happen */
470  return -EINVAL;
471  }
472 
473  if (newpos < 0)
474  return -EINVAL;
475 
476  filp->f_pos = newpos;
477 
478  return newpos;
479 }
480 
481 static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
482 {
484 
485  mmr_base = (uint64_t) (soft->mmr_base + offset);
486 
487  return mmr_base;
488 }
489 
490 static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
491 {
492  soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
493 }
494 
495 static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
496 {
497  soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
498 }
499 
500 static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
501 {
502  struct cx_dev *cx_dev = fp->private_data;
503  struct mbcs_soft *soft = cx_dev->soft;
504 
505  if (vma->vm_pgoff != 0)
506  return -EINVAL;
507 
509 
510  /* Remap-pfn-range will mark the range VM_IO */
511  if (remap_pfn_range(vma,
512  vma->vm_start,
513  __pa(soft->gscr_addr) >> PAGE_SHIFT,
514  PAGE_SIZE,
515  vma->vm_page_prot))
516  return -EAGAIN;
517 
518  return 0;
519 }
520 
527 static irqreturn_t
528 mbcs_completion_intr_handler(int irq, void *arg)
529 {
530  struct mbcs_soft *soft = (struct mbcs_soft *)arg;
531  void *mmr_base;
532  union cm_status cm_status;
533  union cm_control cm_control;
534 
535  mmr_base = soft->mmr_base;
537 
538  if (cm_status.rd_dma_done) {
539  /* stop dma-read engine, clear status */
541  MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
543  MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
545  atomic_set(&soft->dmaread_done, 1);
546  wake_up(&soft->dmaread_queue);
547  }
548  if (cm_status.wr_dma_done) {
549  /* stop dma-write engine, clear status */
551  MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
553  MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
555  atomic_set(&soft->dmawrite_done, 1);
556  wake_up(&soft->dmawrite_queue);
557  }
558  if (cm_status.alg_done) {
559  /* clear status */
561  MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
563  MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
565  atomic_set(&soft->algo_done, 1);
566  wake_up(&soft->algo_queue);
567  }
568 
569  return IRQ_HANDLED;
570 }
571 
577 static int mbcs_intr_alloc(struct cx_dev *dev)
578 {
579  struct sn_irq_info *sn_irq;
580  struct mbcs_soft *soft;
581  struct getdma *getdma;
582  struct putdma *putdma;
583  struct algoblock *algo;
584 
585  soft = dev->soft;
586  getdma = &soft->getdma;
587  putdma = &soft->putdma;
588  algo = &soft->algo;
589 
590  soft->get_sn_irq = NULL;
591  soft->put_sn_irq = NULL;
592  soft->algo_sn_irq = NULL;
593 
594  sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
595  if (sn_irq == NULL)
596  return -EAGAIN;
597  soft->get_sn_irq = sn_irq;
598  getdma->intrHostDest = sn_irq->irq_xtalkaddr;
599  getdma->intrVector = sn_irq->irq_irq;
600  if (request_irq(sn_irq->irq_irq,
601  (void *)mbcs_completion_intr_handler, IRQF_SHARED,
602  "MBCS get intr", (void *)soft)) {
603  tiocx_irq_free(soft->get_sn_irq);
604  return -EAGAIN;
605  }
606 
607  sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
608  if (sn_irq == NULL) {
609  free_irq(soft->get_sn_irq->irq_irq, soft);
610  tiocx_irq_free(soft->get_sn_irq);
611  return -EAGAIN;
612  }
613  soft->put_sn_irq = sn_irq;
614  putdma->intrHostDest = sn_irq->irq_xtalkaddr;
615  putdma->intrVector = sn_irq->irq_irq;
616  if (request_irq(sn_irq->irq_irq,
617  (void *)mbcs_completion_intr_handler, IRQF_SHARED,
618  "MBCS put intr", (void *)soft)) {
619  tiocx_irq_free(soft->put_sn_irq);
620  free_irq(soft->get_sn_irq->irq_irq, soft);
621  tiocx_irq_free(soft->get_sn_irq);
622  return -EAGAIN;
623  }
624 
625  sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
626  if (sn_irq == NULL) {
627  free_irq(soft->put_sn_irq->irq_irq, soft);
628  tiocx_irq_free(soft->put_sn_irq);
629  free_irq(soft->get_sn_irq->irq_irq, soft);
630  tiocx_irq_free(soft->get_sn_irq);
631  return -EAGAIN;
632  }
633  soft->algo_sn_irq = sn_irq;
634  algo->intrHostDest = sn_irq->irq_xtalkaddr;
635  algo->intrVector = sn_irq->irq_irq;
636  if (request_irq(sn_irq->irq_irq,
637  (void *)mbcs_completion_intr_handler, IRQF_SHARED,
638  "MBCS algo intr", (void *)soft)) {
640  free_irq(soft->put_sn_irq->irq_irq, soft);
641  tiocx_irq_free(soft->put_sn_irq);
642  free_irq(soft->get_sn_irq->irq_irq, soft);
643  tiocx_irq_free(soft->get_sn_irq);
644  return -EAGAIN;
645  }
646 
647  return 0;
648 }
649 
655 static void mbcs_intr_dealloc(struct cx_dev *dev)
656 {
657  struct mbcs_soft *soft;
658 
659  soft = dev->soft;
660 
661  free_irq(soft->get_sn_irq->irq_irq, soft);
662  tiocx_irq_free(soft->get_sn_irq);
663  free_irq(soft->put_sn_irq->irq_irq, soft);
664  tiocx_irq_free(soft->put_sn_irq);
665  free_irq(soft->algo_sn_irq->irq_irq, soft);
667 }
668 
669 static inline int mbcs_hw_init(struct mbcs_soft *soft)
670 {
671  void *mmr_base = soft->mmr_base;
672  union cm_control cm_control;
674  uint64_t err_stat;
675 
677  MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
678 
680  MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
682 
683  mbcs_gscr_pioaddr_set(soft);
684  mbcs_debug_pioaddr_set(soft);
685 
686  /* clear errors */
687  err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
688  MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
690 
691  /* enable interrupts */
692  /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
693  MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
694 
695  /* arm status regs and clear engines */
698  cm_control.alg_clr = 1;
701 
703 
704  return 0;
705 }
706 
707 static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf)
708 {
709  struct cx_dev *cx_dev = to_cx_dev(dev);
710  struct mbcs_soft *soft = cx_dev->soft;
711  uint64_t debug0;
712 
713  /*
714  * By convention, the first debug register contains the
715  * algorithm number and revision.
716  */
717  debug0 = *(uint64_t *) soft->debug_addr;
718 
719  return sprintf(buf, "0x%x 0x%x\n",
720  upper_32_bits(debug0), lower_32_bits(debug0));
721 }
722 
723 static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
724 {
725  int n;
726  struct cx_dev *cx_dev = to_cx_dev(dev);
727  struct mbcs_soft *soft = cx_dev->soft;
728 
729  if (count <= 0)
730  return 0;
731 
732  n = simple_strtoul(buf, NULL, 0);
733 
734  if (n == 1) {
735  mbcs_algo_start(soft);
737  atomic_read(&soft->algo_done)))
738  return -ERESTARTSYS;
739  }
740 
741  return count;
742 }
743 
744 DEVICE_ATTR(algo, 0644, show_algo, store_algo);
745 
752 static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
753 {
754  struct mbcs_soft *soft;
755 
756  dev->soft = NULL;
757 
758  soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
759  if (soft == NULL)
760  return -ENOMEM;
761 
762  soft->nasid = dev->cx_id.nasid;
763  list_add(&soft->list, &soft_list);
764  soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
765  dev->soft = soft;
766  soft->cxdev = dev;
767 
771 
772  mutex_init(&soft->dmawritelock);
773  mutex_init(&soft->dmareadlock);
774  mutex_init(&soft->algolock);
775 
776  mbcs_getdma_init(&soft->getdma);
777  mbcs_putdma_init(&soft->putdma);
778  mbcs_algo_init(&soft->algo);
779 
780  mbcs_hw_init(soft);
781 
782  /* Allocate interrupts */
783  mbcs_intr_alloc(dev);
784 
785  device_create_file(&dev->dev, &dev_attr_algo);
786 
787  return 0;
788 }
789 
790 static int mbcs_remove(struct cx_dev *dev)
791 {
792  if (dev->soft) {
793  mbcs_intr_dealloc(dev);
794  kfree(dev->soft);
795  }
796 
797  device_remove_file(&dev->dev, &dev_attr_algo);
798 
799  return 0;
800 }
801 
802 static const struct cx_device_id __devinitconst mbcs_id_table[] = {
803  {
804  .part_num = MBCS_PART_NUM,
805  .mfg_num = MBCS_MFG_NUM,
806  },
807  {
808  .part_num = MBCS_PART_NUM_ALG0,
809  .mfg_num = MBCS_MFG_NUM,
810  },
811  {0, 0}
812 };
813 
814 MODULE_DEVICE_TABLE(cx, mbcs_id_table);
815 
816 static struct cx_drv mbcs_driver = {
817  .name = DEVICE_NAME,
818  .id_table = mbcs_id_table,
819  .probe = mbcs_probe,
820  .remove = mbcs_remove,
821 };
822 
823 static void __exit mbcs_exit(void)
824 {
825  unregister_chrdev(mbcs_major, DEVICE_NAME);
826  cx_driver_unregister(&mbcs_driver);
827 }
828 
829 static int __init mbcs_init(void)
830 {
831  int rv;
832 
833  if (!ia64_platform_is("sn2"))
834  return -ENODEV;
835 
836  // Put driver into chrdevs[]. Get major number.
837  rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
838  if (rv < 0) {
839  DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
840  return rv;
841  }
842  mbcs_major = rv;
843 
844  return cx_driver_register(&mbcs_driver);
845 }
846 
847 module_init(mbcs_init);
848 module_exit(mbcs_exit);
849 
850 MODULE_AUTHOR("Bruce Losure <[email protected]>");
851 MODULE_DESCRIPTION("Driver for MOATB Core Services");
852 MODULE_LICENSE("GPL");