Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vmu-flash.c
Go to the documentation of this file.
1 /* vmu-flash.c
2  * Driver for SEGA Dreamcast Visual Memory Unit
3  *
4  * Copyright (c) Adrian McMenamin 2002 - 2009
5  * Copyright (c) Paul Mundt 2001
6  *
7  * Licensed under version 2 of the
8  * GNU General Public Licence
9  */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/sched.h>
13 #include <linux/delay.h>
14 #include <linux/maple.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/mtd/map.h>
17 
18 struct vmu_cache {
19  unsigned char *buffer; /* Cache */
20  unsigned int block; /* Which block was cached */
21  unsigned long jiffies_atc; /* When was it cached? */
22  int valid;
23 };
24 
25 struct mdev_part {
26  struct maple_device *mdev;
27  int partition;
28 };
29 
30 struct vmupart {
34  char *name;
35  struct vmu_cache *pcache;
36 };
37 
38 struct memcard {
46  int partition;
47  int read;
48  unsigned char *blockread;
49  struct vmupart *parts;
50  struct mtd_info *mtd;
51 };
52 
53 struct vmu_block {
54  unsigned int num; /* block number */
55  unsigned int ofs; /* block offset */
56 };
57 
58 static struct vmu_block *ofs_to_block(unsigned long src_ofs,
59  struct mtd_info *mtd, int partition)
60 {
61  struct vmu_block *vblock;
62  struct maple_device *mdev;
63  struct memcard *card;
64  struct mdev_part *mpart;
65  int num;
66 
67  mpart = mtd->priv;
68  mdev = mpart->mdev;
69  card = maple_get_drvdata(mdev);
70 
71  if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
72  goto failed;
73 
74  num = src_ofs / card->blocklen;
75  if (num > card->parts[partition].numblocks)
76  goto failed;
77 
78  vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
79  if (!vblock)
80  goto failed;
81 
82  vblock->num = num;
83  vblock->ofs = src_ofs % card->blocklen;
84  return vblock;
85 
86 failed:
87  return NULL;
88 }
89 
90 /* Maple bus callback function for reads */
91 static void vmu_blockread(struct mapleq *mq)
92 {
93  struct maple_device *mdev;
94  struct memcard *card;
95 
96  mdev = mq->dev;
97  card = maple_get_drvdata(mdev);
98  /* copy the read in data */
99 
100  if (unlikely(!card->blockread))
101  return;
102 
103  memcpy(card->blockread, mq->recvbuf->buf + 12,
104  card->blocklen/card->readcnt);
105 
106 }
107 
108 /* Interface with maple bus to read blocks
109  * caching the results so that other parts
110  * of the driver can access block reads */
111 static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
112  struct mtd_info *mtd)
113 {
114  struct memcard *card;
115  struct mdev_part *mpart;
116  struct maple_device *mdev;
117  int partition, error = 0, x, wait;
118  unsigned char *blockread = NULL;
119  struct vmu_cache *pcache;
120  __be32 sendbuf;
121 
122  mpart = mtd->priv;
123  mdev = mpart->mdev;
124  partition = mpart->partition;
125  card = maple_get_drvdata(mdev);
126  pcache = card->parts[partition].pcache;
127  pcache->valid = 0;
128 
129  /* prepare the cache for this block */
130  if (!pcache->buffer) {
131  pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
132  if (!pcache->buffer) {
133  dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
134  " to lack of memory\n", mdev->port,
135  mdev->unit);
136  error = -ENOMEM;
137  goto outB;
138  }
139  }
140 
141  /*
142  * Reads may be phased - again the hardware spec
143  * supports this - though may not be any devices in
144  * the wild that implement it, but we will here
145  */
146  for (x = 0; x < card->readcnt; x++) {
147  sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
148 
149  if (atomic_read(&mdev->busy) == 1) {
151  atomic_read(&mdev->busy) == 0, HZ);
152  if (atomic_read(&mdev->busy) == 1) {
153  dev_notice(&mdev->dev, "VMU at (%d, %d)"
154  " is busy\n", mdev->port, mdev->unit);
155  error = -EAGAIN;
156  goto outB;
157  }
158  }
159 
160  atomic_set(&mdev->busy, 1);
161  blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
162  if (!blockread) {
163  error = -ENOMEM;
164  atomic_set(&mdev->busy, 0);
165  goto outB;
166  }
167  card->blockread = blockread;
168 
169  maple_getcond_callback(mdev, vmu_blockread, 0,
171  error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
172  MAPLE_COMMAND_BREAD, 2, &sendbuf);
173  /* Very long timeouts seem to be needed when box is stressed */
175  (atomic_read(&mdev->busy) == 0 ||
176  atomic_read(&mdev->busy) == 2), HZ * 3);
177  /*
178  * MTD layer does not handle hotplugging well
179  * so have to return errors when VMU is unplugged
180  * in the middle of a read (busy == 2)
181  */
182  if (error || atomic_read(&mdev->busy) == 2) {
183  if (atomic_read(&mdev->busy) == 2)
184  error = -ENXIO;
185  atomic_set(&mdev->busy, 0);
186  card->blockread = NULL;
187  goto outA;
188  }
189  if (wait == 0 || wait == -ERESTARTSYS) {
190  card->blockread = NULL;
191  atomic_set(&mdev->busy, 0);
192  error = -EIO;
193  list_del_init(&(mdev->mq->list));
194  kfree(mdev->mq->sendbuf);
195  mdev->mq->sendbuf = NULL;
196  if (wait == -ERESTARTSYS) {
197  dev_warn(&mdev->dev, "VMU read on (%d, %d)"
198  " interrupted on block 0x%X\n",
199  mdev->port, mdev->unit, num);
200  } else
201  dev_notice(&mdev->dev, "VMU read on (%d, %d)"
202  " timed out on block 0x%X\n",
203  mdev->port, mdev->unit, num);
204  goto outA;
205  }
206 
207  memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
208  card->blocklen/card->readcnt);
209 
210  memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
211  card->blockread, card->blocklen/card->readcnt);
212  card->blockread = NULL;
213  pcache->block = num;
214  pcache->jiffies_atc = jiffies;
215  pcache->valid = 1;
216  kfree(blockread);
217  }
218 
219  return error;
220 
221 outA:
222  kfree(blockread);
223 outB:
224  return error;
225 }
226 
227 /* communicate with maple bus for phased writing */
228 static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
229  struct mtd_info *mtd)
230 {
231  struct memcard *card;
232  struct mdev_part *mpart;
233  struct maple_device *mdev;
234  int partition, error, locking, x, phaselen, wait;
235  __be32 *sendbuf;
236 
237  mpart = mtd->priv;
238  mdev = mpart->mdev;
239  partition = mpart->partition;
240  card = maple_get_drvdata(mdev);
241 
242  phaselen = card->blocklen/card->writecnt;
243 
244  sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
245  if (!sendbuf) {
246  error = -ENOMEM;
247  goto fail_nosendbuf;
248  }
249  for (x = 0; x < card->writecnt; x++) {
250  sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
251  memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
252  /* wait until the device is not busy doing something else
253  * or 1 second - which ever is longer */
254  if (atomic_read(&mdev->busy) == 1) {
256  atomic_read(&mdev->busy) == 0, HZ);
257  if (atomic_read(&mdev->busy) == 1) {
258  error = -EBUSY;
259  dev_notice(&mdev->dev, "VMU write at (%d, %d)"
260  "failed - device is busy\n",
261  mdev->port, mdev->unit);
262  goto fail_nolock;
263  }
264  }
265  atomic_set(&mdev->busy, 1);
266 
267  locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
268  MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
270  atomic_read(&mdev->busy) == 0, HZ/10);
271  if (locking) {
272  error = -EIO;
273  atomic_set(&mdev->busy, 0);
274  goto fail_nolock;
275  }
276  if (atomic_read(&mdev->busy) == 2) {
277  atomic_set(&mdev->busy, 0);
278  } else if (wait == 0 || wait == -ERESTARTSYS) {
279  error = -EIO;
280  dev_warn(&mdev->dev, "Write at (%d, %d) of block"
281  " 0x%X at phase %d failed: could not"
282  " communicate with VMU", mdev->port,
283  mdev->unit, num, x);
284  atomic_set(&mdev->busy, 0);
285  kfree(mdev->mq->sendbuf);
286  mdev->mq->sendbuf = NULL;
287  list_del_init(&(mdev->mq->list));
288  goto fail_nolock;
289  }
290  }
291  kfree(sendbuf);
292 
293  return card->blocklen;
294 
295 fail_nolock:
296  kfree(sendbuf);
297 fail_nosendbuf:
298  dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
299  mdev->unit);
300  return error;
301 }
302 
303 /* mtd function to simulate reading byte by byte */
304 static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
305  struct mtd_info *mtd)
306 {
307  struct vmu_block *vblock;
308  struct memcard *card;
309  struct mdev_part *mpart;
310  struct maple_device *mdev;
311  unsigned char *buf, ret;
312  int partition, error;
313 
314  mpart = mtd->priv;
315  mdev = mpart->mdev;
316  partition = mpart->partition;
317  card = maple_get_drvdata(mdev);
318  *retval = 0;
319 
320  buf = kmalloc(card->blocklen, GFP_KERNEL);
321  if (!buf) {
322  *retval = 1;
323  ret = -ENOMEM;
324  goto finish;
325  }
326 
327  vblock = ofs_to_block(ofs, mtd, partition);
328  if (!vblock) {
329  *retval = 3;
330  ret = -ENOMEM;
331  goto out_buf;
332  }
333 
334  error = maple_vmu_read_block(vblock->num, buf, mtd);
335  if (error) {
336  ret = error;
337  *retval = 2;
338  goto out_vblock;
339  }
340 
341  ret = buf[vblock->ofs];
342 
343 out_vblock:
344  kfree(vblock);
345 out_buf:
346  kfree(buf);
347 finish:
348  return ret;
349 }
350 
351 /* mtd higher order function to read flash */
352 static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
353  size_t *retlen, u_char *buf)
354 {
355  struct maple_device *mdev;
356  struct memcard *card;
357  struct mdev_part *mpart;
358  struct vmu_cache *pcache;
359  struct vmu_block *vblock;
360  int index = 0, retval, partition, leftover, numblocks;
361  unsigned char cx;
362 
363  mpart = mtd->priv;
364  mdev = mpart->mdev;
365  partition = mpart->partition;
366  card = maple_get_drvdata(mdev);
367 
368  numblocks = card->parts[partition].numblocks;
369  if (from + len > numblocks * card->blocklen)
370  len = numblocks * card->blocklen - from;
371  if (len == 0)
372  return -EIO;
373  /* Have we cached this bit already? */
374  pcache = card->parts[partition].pcache;
375  do {
376  vblock = ofs_to_block(from + index, mtd, partition);
377  if (!vblock)
378  return -ENOMEM;
379  /* Have we cached this and is the cache valid and timely? */
380  if (pcache->valid &&
381  time_before(jiffies, pcache->jiffies_atc + HZ) &&
382  (pcache->block == vblock->num)) {
383  /* we have cached it, so do necessary copying */
384  leftover = card->blocklen - vblock->ofs;
385  if (vblock->ofs + len - index < card->blocklen) {
386  /* only a bit of this block to copy */
387  memcpy(buf + index,
388  pcache->buffer + vblock->ofs,
389  len - index);
390  index = len;
391  } else {
392  /* otherwise copy remainder of whole block */
393  memcpy(buf + index, pcache->buffer +
394  vblock->ofs, leftover);
395  index += leftover;
396  }
397  } else {
398  /*
399  * Not cached so read one byte -
400  * but cache the rest of the block
401  */
402  cx = vmu_flash_read_char(from + index, &retval, mtd);
403  if (retval) {
404  *retlen = index;
405  kfree(vblock);
406  return cx;
407  }
408  memset(buf + index, cx, 1);
409  index++;
410  }
411  kfree(vblock);
412  } while (len > index);
413  *retlen = index;
414 
415  return 0;
416 }
417 
418 static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
419  size_t *retlen, const u_char *buf)
420 {
421  struct maple_device *mdev;
422  struct memcard *card;
423  struct mdev_part *mpart;
424  int index = 0, partition, error = 0, numblocks;
425  struct vmu_cache *pcache;
426  struct vmu_block *vblock;
427  unsigned char *buffer;
428 
429  mpart = mtd->priv;
430  mdev = mpart->mdev;
431  partition = mpart->partition;
432  card = maple_get_drvdata(mdev);
433 
434  numblocks = card->parts[partition].numblocks;
435  if (to + len > numblocks * card->blocklen)
436  len = numblocks * card->blocklen - to;
437  if (len == 0) {
438  error = -EIO;
439  goto failed;
440  }
441 
442  vblock = ofs_to_block(to, mtd, partition);
443  if (!vblock) {
444  error = -ENOMEM;
445  goto failed;
446  }
447 
448  buffer = kmalloc(card->blocklen, GFP_KERNEL);
449  if (!buffer) {
450  error = -ENOMEM;
451  goto fail_buffer;
452  }
453 
454  do {
455  /* Read in the block we are to write to */
456  error = maple_vmu_read_block(vblock->num, buffer, mtd);
457  if (error)
458  goto fail_io;
459 
460  do {
461  buffer[vblock->ofs] = buf[index];
462  vblock->ofs++;
463  index++;
464  if (index >= len)
465  break;
466  } while (vblock->ofs < card->blocklen);
467 
468  /* write out new buffer */
469  error = maple_vmu_write_block(vblock->num, buffer, mtd);
470  /* invalidate the cache */
471  pcache = card->parts[partition].pcache;
472  pcache->valid = 0;
473 
474  if (error != card->blocklen)
475  goto fail_io;
476 
477  vblock->num++;
478  vblock->ofs = 0;
479  } while (len > index);
480 
481  kfree(buffer);
482  *retlen = index;
483  kfree(vblock);
484  return 0;
485 
486 fail_io:
487  kfree(buffer);
488 fail_buffer:
489  kfree(vblock);
490 failed:
491  dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
492  return error;
493 }
494 
495 static void vmu_flash_sync(struct mtd_info *mtd)
496 {
497  /* Do nothing here */
498 }
499 
500 /* Maple bus callback function to recursively query hardware details */
501 static void vmu_queryblocks(struct mapleq *mq)
502 {
503  struct maple_device *mdev;
504  unsigned short *res;
505  struct memcard *card;
506  __be32 partnum;
507  struct vmu_cache *pcache;
508  struct mdev_part *mpart;
509  struct mtd_info *mtd_cur;
510  struct vmupart *part_cur;
511  int error;
512 
513  mdev = mq->dev;
514  card = maple_get_drvdata(mdev);
515  res = (unsigned short *) (mq->recvbuf->buf);
516  card->tempA = res[12];
517  card->tempB = res[6];
518 
519  dev_info(&mdev->dev, "VMU device at partition %d has %d user "
520  "blocks with a root block at %d\n", card->partition,
521  card->tempA, card->tempB);
522 
523  part_cur = &card->parts[card->partition];
524  part_cur->user_blocks = card->tempA;
525  part_cur->root_block = card->tempB;
526  part_cur->numblocks = card->tempB + 1;
527  part_cur->name = kmalloc(12, GFP_KERNEL);
528  if (!part_cur->name)
529  goto fail_name;
530 
531  sprintf(part_cur->name, "vmu%d.%d.%d",
532  mdev->port, mdev->unit, card->partition);
533  mtd_cur = &card->mtd[card->partition];
534  mtd_cur->name = part_cur->name;
535  mtd_cur->type = 8;
536  mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
537  mtd_cur->size = part_cur->numblocks * card->blocklen;
538  mtd_cur->erasesize = card->blocklen;
539  mtd_cur->_write = vmu_flash_write;
540  mtd_cur->_read = vmu_flash_read;
541  mtd_cur->_sync = vmu_flash_sync;
542  mtd_cur->writesize = card->blocklen;
543 
544  mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
545  if (!mpart)
546  goto fail_mpart;
547 
548  mpart->mdev = mdev;
549  mpart->partition = card->partition;
550  mtd_cur->priv = mpart;
551  mtd_cur->owner = THIS_MODULE;
552 
553  pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
554  if (!pcache)
555  goto fail_cache_create;
556  part_cur->pcache = pcache;
557 
558  error = mtd_device_register(mtd_cur, NULL, 0);
559  if (error)
560  goto fail_mtd_register;
561 
562  maple_getcond_callback(mdev, NULL, 0,
564 
565  /*
566  * Set up a recursive call to the (probably theoretical)
567  * second or more partition
568  */
569  if (++card->partition < card->partitions) {
570  partnum = cpu_to_be32(card->partition << 24);
571  maple_getcond_callback(mdev, vmu_queryblocks, 0,
574  MAPLE_COMMAND_GETMINFO, 2, &partnum);
575  }
576  return;
577 
578 fail_mtd_register:
579  dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
580  "error is 0x%X\n", mdev->port, mdev->unit, error);
581  for (error = 0; error <= card->partition; error++) {
582  kfree(((card->parts)[error]).pcache);
583  ((card->parts)[error]).pcache = NULL;
584  }
585 fail_cache_create:
586 fail_mpart:
587  for (error = 0; error <= card->partition; error++) {
588  kfree(((card->mtd)[error]).priv);
589  ((card->mtd)[error]).priv = NULL;
590  }
591  maple_getcond_callback(mdev, NULL, 0,
593  kfree(part_cur->name);
594 fail_name:
595  return;
596 }
597 
598 /* Handles very basic info about the flash, queries for details */
599 static int __devinit vmu_connect(struct maple_device *mdev)
600 {
601  unsigned long test_flash_data, basic_flash_data;
602  int c, error;
603  struct memcard *card;
604  u32 partnum = 0;
605 
606  test_flash_data = be32_to_cpu(mdev->devinfo.function);
607  /* Need to count how many bits are set - to find out which
608  * function_data element has details of the memory card
609  */
610  c = hweight_long(test_flash_data);
611 
612  basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
613 
614  card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
615  if (!card) {
616  error = -ENOMEM;
617  goto fail_nomem;
618  }
619 
620  card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
621  card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
622  card->writecnt = basic_flash_data >> 12 & 0xF;
623  card->readcnt = basic_flash_data >> 8 & 0xF;
624  card->removeable = basic_flash_data >> 7 & 1;
625 
626  card->partition = 0;
627 
628  /*
629  * Not sure there are actually any multi-partition devices in the
630  * real world, but the hardware supports them, so, so will we
631  */
632  card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
633  GFP_KERNEL);
634  if (!card->parts) {
635  error = -ENOMEM;
636  goto fail_partitions;
637  }
638 
639  card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
640  GFP_KERNEL);
641  if (!card->mtd) {
642  error = -ENOMEM;
643  goto fail_mtd_info;
644  }
645 
646  maple_set_drvdata(mdev, card);
647 
648  /*
649  * We want to trap meminfo not get cond
650  * so set interval to zero, but rely on maple bus
651  * driver to pass back the results of the meminfo
652  */
653  maple_getcond_callback(mdev, vmu_queryblocks, 0,
655 
656  /* Make sure we are clear to go */
657  if (atomic_read(&mdev->busy) == 1) {
659  atomic_read(&mdev->busy) == 0, HZ);
660  if (atomic_read(&mdev->busy) == 1) {
661  dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
662  mdev->port, mdev->unit);
663  error = -EAGAIN;
664  goto fail_device_busy;
665  }
666  }
667 
668  atomic_set(&mdev->busy, 1);
669 
670  /*
671  * Set up the minfo call: vmu_queryblocks will handle
672  * the information passed back
673  */
674  error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
675  MAPLE_COMMAND_GETMINFO, 2, &partnum);
676  if (error) {
677  dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
678  " error is 0x%X\n", mdev->port, mdev->unit, error);
679  goto fail_mtd_info;
680  }
681  return 0;
682 
683 fail_device_busy:
684  kfree(card->mtd);
685 fail_mtd_info:
686  kfree(card->parts);
687 fail_partitions:
688  kfree(card);
689 fail_nomem:
690  return error;
691 }
692 
693 static void __devexit vmu_disconnect(struct maple_device *mdev)
694 {
695  struct memcard *card;
696  struct mdev_part *mpart;
697  int x;
698 
699  mdev->callback = NULL;
700  card = maple_get_drvdata(mdev);
701  for (x = 0; x < card->partitions; x++) {
702  mpart = ((card->mtd)[x]).priv;
703  mpart->mdev = NULL;
704  mtd_device_unregister(&((card->mtd)[x]));
705  kfree(((card->parts)[x]).name);
706  }
707  kfree(card->parts);
708  kfree(card->mtd);
709  kfree(card);
710 }
711 
712 /* Callback to handle eccentricities of both mtd subsystem
713  * and general flakyness of Dreamcast VMUs
714  */
715 static int vmu_can_unload(struct maple_device *mdev)
716 {
717  struct memcard *card;
718  int x;
719  struct mtd_info *mtd;
720 
721  card = maple_get_drvdata(mdev);
722  for (x = 0; x < card->partitions; x++) {
723  mtd = &((card->mtd)[x]);
724  if (mtd->usecount > 0)
725  return 0;
726  }
727  return 1;
728 }
729 
730 #define ERRSTR "VMU at (%d, %d) file error -"
731 
732 static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
733 {
734  enum maple_file_errors error = ((int *)recvbuf)[1];
735 
736  switch (error) {
737 
739  dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
740  mdev->port, mdev->unit);
741  break;
742 
744  dev_notice(&mdev->dev, ERRSTR " phase error\n",
745  mdev->port, mdev->unit);
746  break;
747 
749  dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
750  mdev->port, mdev->unit);
751  break;
752 
754  dev_notice(&mdev->dev, ERRSTR " write error\n",
755  mdev->port, mdev->unit);
756  break;
757 
759  dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
760  mdev->port, mdev->unit);
761  break;
762 
764  dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
765  mdev->port, mdev->unit);
766  break;
767 
768  default:
769  dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
770  mdev->port, mdev->unit, error);
771  }
772 }
773 
774 
775 static int __devinit probe_maple_vmu(struct device *dev)
776 {
777  int error;
778  struct maple_device *mdev = to_maple_dev(dev);
779  struct maple_driver *mdrv = to_maple_driver(dev->driver);
780 
781  mdev->can_unload = vmu_can_unload;
782  mdev->fileerr_handler = vmu_file_error;
783  mdev->driver = mdrv;
784 
785  error = vmu_connect(mdev);
786  if (error)
787  return error;
788 
789  return 0;
790 }
791 
792 static int __devexit remove_maple_vmu(struct device *dev)
793 {
794  struct maple_device *mdev = to_maple_dev(dev);
795 
796  vmu_disconnect(mdev);
797  return 0;
798 }
799 
800 static struct maple_driver vmu_flash_driver = {
801  .function = MAPLE_FUNC_MEMCARD,
802  .drv = {
803  .name = "Dreamcast_visual_memory",
804  .probe = probe_maple_vmu,
805  .remove = __devexit_p(remove_maple_vmu),
806  },
807 };
808 
809 static int __init vmu_flash_map_init(void)
810 {
811  return maple_driver_register(&vmu_flash_driver);
812 }
813 
814 static void __exit vmu_flash_map_exit(void)
815 {
816  maple_driver_unregister(&vmu_flash_driver);
817 }
818 
819 module_init(vmu_flash_map_init);
820 module_exit(vmu_flash_map_exit);
821 
822 MODULE_LICENSE("GPL");
823 MODULE_AUTHOR("Adrian McMenamin");
824 MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");