Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mm.c
Go to the documentation of this file.
1 /*
2  * PS3 address space management.
3  *
4  * Copyright (C) 2006 Sony Computer Entertainment Inc.
5  * Copyright 2006 Sony Corp.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/export.h>
23 #include <linux/memblock.h>
24 #include <linux/slab.h>
25 
26 #include <asm/cell-regs.h>
27 #include <asm/firmware.h>
28 #include <asm/prom.h>
29 #include <asm/udbg.h>
30 #include <asm/lv1call.h>
31 #include <asm/setup.h>
32 
33 #include "platform.h"
34 
35 #if defined(DEBUG)
36 #define DBG udbg_printf
37 #else
38 #define DBG pr_devel
39 #endif
40 
41 enum {
42 #if defined(CONFIG_PS3_DYNAMIC_DMA)
43  USE_DYNAMIC_DMA = 1,
44 #else
46 #endif
47 };
48 
49 enum {
53 };
54 
55 static unsigned long make_page_sizes(unsigned long a, unsigned long b)
56 {
57  return (a << 56) | (b << 48);
58 }
59 
60 enum {
63 };
64 
65 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
66 
67 enum {
68  HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
69  HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
70 };
71 
72 /*============================================================================*/
73 /* virtual address space routines */
74 /*============================================================================*/
75 
84 struct mem_region {
87  unsigned long offset;
88  int destroy;
89 };
90 
109 struct map {
113  struct mem_region rm;
114  struct mem_region r1;
115 };
116 
117 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
118 static void __maybe_unused _debug_dump_map(const struct map *m,
119  const char *func, int line)
120 {
121  DBG("%s:%d: map.total = %llxh\n", func, line, m->total);
122  DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size);
123  DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
124  DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
125  DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
126  DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
127  DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size);
128 }
129 
130 static struct map map;
131 
137 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
138 {
139  BUG_ON(is_kernel_addr(phys_addr));
141  ? phys_addr : phys_addr + map.r1.offset;
142 }
143 
145 
150 void __init ps3_mm_vas_create(unsigned long* htab_size)
151 {
152  int result;
153  u64 start_address;
154  u64 size;
155  u64 access_right;
156  u64 max_page_size;
157  u64 flags;
158 
159  result = lv1_query_logical_partition_address_region_info(0,
160  &start_address, &size, &access_right, &max_page_size,
161  &flags);
162 
163  if (result) {
164  DBG("%s:%d: lv1_query_logical_partition_address_region_info "
165  "failed: %s\n", __func__, __LINE__,
166  ps3_result(result));
167  goto fail;
168  }
169 
170  if (max_page_size < PAGE_SHIFT_16M) {
171  DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
172  max_page_size);
173  goto fail;
174  }
175 
176  BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
177  BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
178 
179  result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
180  2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
181  &map.vas_id, &map.htab_size);
182 
183  if (result) {
184  DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
185  __func__, __LINE__, ps3_result(result));
186  goto fail;
187  }
188 
189  result = lv1_select_virtual_address_space(map.vas_id);
190 
191  if (result) {
192  DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
193  __func__, __LINE__, ps3_result(result));
194  goto fail;
195  }
196 
197  *htab_size = map.htab_size;
198 
200 
201  return;
202 
203 fail:
204  panic("ps3_mm_vas_create failed");
205 }
206 
212 {
213  int result;
214 
215  DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
216 
217  if (map.vas_id) {
218  result = lv1_select_virtual_address_space(0);
219  BUG_ON(result);
220  result = lv1_destruct_virtual_address_space(map.vas_id);
221  BUG_ON(result);
222  map.vas_id = 0;
223  }
224 }
225 
235 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
236 {
237  int result;
238  u64 muid;
239 
240  r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
241 
242  DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
243  DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
244  DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
245  size - r->size, (size - r->size) / 1024 / 1024);
246 
247  if (r->size == 0) {
248  DBG("%s:%d: size == 0\n", __func__, __LINE__);
249  result = -1;
250  goto zero_region;
251  }
252 
253  result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
254  ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
255 
256  if (result || r->base < map.rm.size) {
257  DBG("%s:%d: lv1_allocate_memory failed: %s\n",
258  __func__, __LINE__, ps3_result(result));
259  goto zero_region;
260  }
261 
262  r->destroy = 1;
263  r->offset = r->base - map.rm.size;
264  return result;
265 
266 zero_region:
267  r->size = r->base = r->offset = 0;
268  return result;
269 }
270 
276 static void ps3_mm_region_destroy(struct mem_region *r)
277 {
278  int result;
279 
280  if (!r->destroy) {
281  pr_info("%s:%d: Not destroying high region: %llxh %llxh\n",
282  __func__, __LINE__, r->base, r->size);
283  return;
284  }
285 
286  DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
287 
288  if (r->base) {
289  result = lv1_release_memory(r->base);
290  BUG_ON(result);
291  r->size = r->base = r->offset = 0;
292  map.total = map.rm.size;
293  }
294 }
295 
296 static int ps3_mm_get_repository_highmem(struct mem_region *r)
297 {
298  int result;
299 
300  /* Assume a single highmem region. */
301 
302  result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
303 
304  if (result)
305  goto zero_region;
306 
307  if (!r->base || !r->size) {
308  result = -1;
309  goto zero_region;
310  }
311 
312  r->offset = r->base - map.rm.size;
313 
314  DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
315  __func__, __LINE__, r->base, r->size);
316 
317  return 0;
318 
319 zero_region:
320  DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
321 
322  r->size = r->base = r->offset = 0;
323  return result;
324 }
325 
326 /*============================================================================*/
327 /* dma routines */
328 /*============================================================================*/
329 
336 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
337  unsigned long lpar_addr)
338 {
339  if (lpar_addr >= map.rm.size)
340  lpar_addr -= map.r1.offset;
341  BUG_ON(lpar_addr < r->offset);
342  BUG_ON(lpar_addr >= r->offset + r->len);
343  return r->bus_addr + lpar_addr - r->offset;
344 }
345 
346 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
347 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
348  const char *func, int line)
349 {
350  DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
351  r->dev->dev_id);
352  DBG("%s:%d: page_size %u\n", func, line, r->page_size);
353  DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
354  DBG("%s:%d: len %lxh\n", func, line, r->len);
355  DBG("%s:%d: offset %lxh\n", func, line, r->offset);
356 }
357 
372 struct dma_chunk {
374  unsigned long lpar_addr;
375  unsigned long bus_addr;
376  unsigned long len;
377  struct list_head link;
378  unsigned int usage_count;
379 };
380 
381 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
382 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
383  int line)
384 {
385  DBG("%s:%d: r.dev %llu:%llu\n", func, line,
386  c->region->dev->bus_id, c->region->dev->dev_id);
387  DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
388  DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
389  DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
390  DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
391  DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
392  DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
393  DBG("%s:%d: c.len %lxh\n", func, line, c->len);
394 }
395 
396 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
397  unsigned long bus_addr, unsigned long len)
398 {
399  struct dma_chunk *c;
400  unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
401  unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
402  1 << r->page_size);
403 
404  list_for_each_entry(c, &r->chunk_list.head, link) {
405  /* intersection */
406  if (aligned_bus >= c->bus_addr &&
407  aligned_bus + aligned_len <= c->bus_addr + c->len)
408  return c;
409 
410  /* below */
411  if (aligned_bus + aligned_len <= c->bus_addr)
412  continue;
413 
414  /* above */
415  if (aligned_bus >= c->bus_addr + c->len)
416  continue;
417 
418  /* we don't handle the multi-chunk case for now */
419  dma_dump_chunk(c);
420  BUG();
421  }
422  return NULL;
423 }
424 
425 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
426  unsigned long lpar_addr, unsigned long len)
427 {
428  struct dma_chunk *c;
429  unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
430  unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
431  1 << r->page_size);
432 
433  list_for_each_entry(c, &r->chunk_list.head, link) {
434  /* intersection */
435  if (c->lpar_addr <= aligned_lpar &&
436  aligned_lpar < c->lpar_addr + c->len) {
437  if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
438  return c;
439  else {
440  dma_dump_chunk(c);
441  BUG();
442  }
443  }
444  /* below */
445  if (aligned_lpar + aligned_len <= c->lpar_addr) {
446  continue;
447  }
448  /* above */
449  if (c->lpar_addr + c->len <= aligned_lpar) {
450  continue;
451  }
452  }
453  return NULL;
454 }
455 
456 static int dma_sb_free_chunk(struct dma_chunk *c)
457 {
458  int result = 0;
459 
460  if (c->bus_addr) {
461  result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
462  c->region->dev->dev_id, c->bus_addr, c->len);
463  BUG_ON(result);
464  }
465 
466  kfree(c);
467  return result;
468 }
469 
470 static int dma_ioc0_free_chunk(struct dma_chunk *c)
471 {
472  int result = 0;
473  int iopage;
474  unsigned long offset;
475  struct ps3_dma_region *r = c->region;
476 
477  DBG("%s:start\n", __func__);
478  for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
479  offset = (1 << r->page_size) * iopage;
480  /* put INVALID entry */
481  result = lv1_put_iopte(0,
482  c->bus_addr + offset,
483  c->lpar_addr + offset,
484  r->ioid,
485  0);
486  DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
487  c->bus_addr + offset,
488  c->lpar_addr + offset,
489  r->ioid);
490 
491  if (result) {
492  DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
493  __LINE__, ps3_result(result));
494  }
495  }
496  kfree(c);
497  DBG("%s:end\n", __func__);
498  return result;
499 }
500 
512 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
513  unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
514 {
515  int result;
516  struct dma_chunk *c;
517 
518  c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
519 
520  if (!c) {
521  result = -ENOMEM;
522  goto fail_alloc;
523  }
524 
525  c->region = r;
526  c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
527  c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
528  c->len = len;
529 
530  BUG_ON(iopte_flag != 0xf800000000000000UL);
531  result = lv1_map_device_dma_region(c->region->dev->bus_id,
532  c->region->dev->dev_id, c->lpar_addr,
533  c->bus_addr, c->len, iopte_flag);
534  if (result) {
535  DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
536  __func__, __LINE__, ps3_result(result));
537  goto fail_map;
538  }
539 
540  list_add(&c->link, &r->chunk_list.head);
541 
542  *c_out = c;
543  return 0;
544 
545 fail_map:
546  kfree(c);
547 fail_alloc:
548  *c_out = NULL;
549  DBG(" <- %s:%d\n", __func__, __LINE__);
550  return result;
551 }
552 
553 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
554  unsigned long len, struct dma_chunk **c_out,
555  u64 iopte_flag)
556 {
557  int result;
558  struct dma_chunk *c, *last;
559  int iopage, pages;
560  unsigned long offset;
561 
562  DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
563  phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
564  c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
565 
566  if (!c) {
567  result = -ENOMEM;
568  goto fail_alloc;
569  }
570 
571  c->region = r;
572  c->len = len;
573  c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
574  /* allocate IO address */
575  if (list_empty(&r->chunk_list.head)) {
576  /* first one */
577  c->bus_addr = r->bus_addr;
578  } else {
579  /* derive from last bus addr*/
580  last = list_entry(r->chunk_list.head.next,
581  struct dma_chunk, link);
582  c->bus_addr = last->bus_addr + last->len;
583  DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
584  last->bus_addr, last->len);
585  }
586 
587  /* FIXME: check whether length exceeds region size */
588 
589  /* build ioptes for the area */
590  pages = len >> r->page_size;
591  DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
592  r->page_size, r->len, pages, iopte_flag);
593  for (iopage = 0; iopage < pages; iopage++) {
594  offset = (1 << r->page_size) * iopage;
595  result = lv1_put_iopte(0,
596  c->bus_addr + offset,
597  c->lpar_addr + offset,
598  r->ioid,
599  iopte_flag);
600  if (result) {
601  pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
602  __func__, __LINE__, ps3_result(result));
603  goto fail_map;
604  }
605  DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
606  iopage, c->bus_addr + offset, c->lpar_addr + offset,
607  r->ioid);
608  }
609 
610  /* be sure that last allocated one is inserted at head */
611  list_add(&c->link, &r->chunk_list.head);
612 
613  *c_out = c;
614  DBG("%s: end\n", __func__);
615  return 0;
616 
617 fail_map:
618  for (iopage--; 0 <= iopage; iopage--) {
619  lv1_put_iopte(0,
620  c->bus_addr + offset,
621  c->lpar_addr + offset,
622  r->ioid,
623  0);
624  }
625  kfree(c);
626 fail_alloc:
627  *c_out = NULL;
628  return result;
629 }
630 
639 static int dma_sb_region_create(struct ps3_dma_region *r)
640 {
641  int result;
642  u64 bus_addr;
643 
644  DBG(" -> %s:%d:\n", __func__, __LINE__);
645 
646  BUG_ON(!r);
647 
648  if (!r->dev->bus_id) {
649  pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
650  r->dev->bus_id, r->dev->dev_id);
651  return 0;
652  }
653 
654  DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
655  __LINE__, r->len, r->page_size, r->offset);
656 
657  BUG_ON(!r->len);
658  BUG_ON(!r->page_size);
659  BUG_ON(!r->region_ops);
660 
661  INIT_LIST_HEAD(&r->chunk_list.head);
662  spin_lock_init(&r->chunk_list.lock);
663 
664  result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
666  &bus_addr);
667  r->bus_addr = bus_addr;
668 
669  if (result) {
670  DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
671  __func__, __LINE__, ps3_result(result));
672  r->len = r->bus_addr = 0;
673  }
674 
675  return result;
676 }
677 
678 static int dma_ioc0_region_create(struct ps3_dma_region *r)
679 {
680  int result;
681  u64 bus_addr;
682 
683  INIT_LIST_HEAD(&r->chunk_list.head);
684  spin_lock_init(&r->chunk_list.lock);
685 
686  result = lv1_allocate_io_segment(0,
687  r->len,
688  r->page_size,
689  &bus_addr);
690  r->bus_addr = bus_addr;
691  if (result) {
692  DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
693  __func__, __LINE__, ps3_result(result));
694  r->len = r->bus_addr = 0;
695  }
696  DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
697  r->len, r->page_size, r->bus_addr);
698  return result;
699 }
700 
709 static int dma_sb_region_free(struct ps3_dma_region *r)
710 {
711  int result;
712  struct dma_chunk *c;
713  struct dma_chunk *tmp;
714 
715  BUG_ON(!r);
716 
717  if (!r->dev->bus_id) {
718  pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
719  r->dev->bus_id, r->dev->dev_id);
720  return 0;
721  }
722 
723  list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
724  list_del(&c->link);
725  dma_sb_free_chunk(c);
726  }
727 
728  result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
729  r->bus_addr);
730 
731  if (result)
732  DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
733  __func__, __LINE__, ps3_result(result));
734 
735  r->bus_addr = 0;
736 
737  return result;
738 }
739 
740 static int dma_ioc0_region_free(struct ps3_dma_region *r)
741 {
742  int result;
743  struct dma_chunk *c, *n;
744 
745  DBG("%s: start\n", __func__);
746  list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
747  list_del(&c->link);
748  dma_ioc0_free_chunk(c);
749  }
750 
751  result = lv1_release_io_segment(0, r->bus_addr);
752 
753  if (result)
754  DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
755  __func__, __LINE__, ps3_result(result));
756 
757  r->bus_addr = 0;
758  DBG("%s: end\n", __func__);
759 
760  return result;
761 }
762 
774 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
775  unsigned long len, dma_addr_t *bus_addr,
776  u64 iopte_flag)
777 {
778  int result;
779  unsigned long flags;
780  struct dma_chunk *c;
781  unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
782  : virt_addr;
783  unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
784  unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
785  1 << r->page_size);
786  *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
787 
788  if (!USE_DYNAMIC_DMA) {
789  unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
790  DBG(" -> %s:%d\n", __func__, __LINE__);
791  DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
792  virt_addr);
793  DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
794  phys_addr);
795  DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
796  lpar_addr);
797  DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
798  DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
799  *bus_addr, len);
800  }
801 
802  spin_lock_irqsave(&r->chunk_list.lock, flags);
803  c = dma_find_chunk(r, *bus_addr, len);
804 
805  if (c) {
806  DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
807  dma_dump_chunk(c);
808  c->usage_count++;
809  spin_unlock_irqrestore(&r->chunk_list.lock, flags);
810  return 0;
811  }
812 
813  result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
814 
815  if (result) {
816  *bus_addr = 0;
817  DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
818  __func__, __LINE__, result);
819  spin_unlock_irqrestore(&r->chunk_list.lock, flags);
820  return result;
821  }
822 
823  c->usage_count = 1;
824 
825  spin_unlock_irqrestore(&r->chunk_list.lock, flags);
826  return result;
827 }
828 
829 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
830  unsigned long len, dma_addr_t *bus_addr,
831  u64 iopte_flag)
832 {
833  int result;
834  unsigned long flags;
835  struct dma_chunk *c;
836  unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
837  : virt_addr;
838  unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
839  unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
840  1 << r->page_size);
841 
842  DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
843  virt_addr, len);
844  DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
845  phys_addr, aligned_phys, aligned_len);
846 
847  spin_lock_irqsave(&r->chunk_list.lock, flags);
848  c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
849 
850  if (c) {
851  /* FIXME */
852  BUG();
853  *bus_addr = c->bus_addr + phys_addr - aligned_phys;
854  c->usage_count++;
855  spin_unlock_irqrestore(&r->chunk_list.lock, flags);
856  return 0;
857  }
858 
859  result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
860  iopte_flag);
861 
862  if (result) {
863  *bus_addr = 0;
864  DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
865  __func__, __LINE__, result);
866  spin_unlock_irqrestore(&r->chunk_list.lock, flags);
867  return result;
868  }
869  *bus_addr = c->bus_addr + phys_addr - aligned_phys;
870  DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
871  virt_addr, phys_addr, aligned_phys, *bus_addr);
872  c->usage_count = 1;
873 
874  spin_unlock_irqrestore(&r->chunk_list.lock, flags);
875  return result;
876 }
877 
887 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
888  unsigned long len)
889 {
890  unsigned long flags;
891  struct dma_chunk *c;
892 
893  spin_lock_irqsave(&r->chunk_list.lock, flags);
894  c = dma_find_chunk(r, bus_addr, len);
895 
896  if (!c) {
897  unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
898  1 << r->page_size);
899  unsigned long aligned_len = _ALIGN_UP(len + bus_addr
900  - aligned_bus, 1 << r->page_size);
901  DBG("%s:%d: not found: bus_addr %llxh\n",
902  __func__, __LINE__, bus_addr);
903  DBG("%s:%d: not found: len %lxh\n",
904  __func__, __LINE__, len);
905  DBG("%s:%d: not found: aligned_bus %lxh\n",
906  __func__, __LINE__, aligned_bus);
907  DBG("%s:%d: not found: aligned_len %lxh\n",
908  __func__, __LINE__, aligned_len);
909  BUG();
910  }
911 
912  c->usage_count--;
913 
914  if (!c->usage_count) {
915  list_del(&c->link);
916  dma_sb_free_chunk(c);
917  }
918 
919  spin_unlock_irqrestore(&r->chunk_list.lock, flags);
920  return 0;
921 }
922 
923 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
924  dma_addr_t bus_addr, unsigned long len)
925 {
926  unsigned long flags;
927  struct dma_chunk *c;
928 
929  DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
930  spin_lock_irqsave(&r->chunk_list.lock, flags);
931  c = dma_find_chunk(r, bus_addr, len);
932 
933  if (!c) {
934  unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
935  1 << r->page_size);
936  unsigned long aligned_len = _ALIGN_UP(len + bus_addr
937  - aligned_bus,
938  1 << r->page_size);
939  DBG("%s:%d: not found: bus_addr %llxh\n",
940  __func__, __LINE__, bus_addr);
941  DBG("%s:%d: not found: len %lxh\n",
942  __func__, __LINE__, len);
943  DBG("%s:%d: not found: aligned_bus %lxh\n",
944  __func__, __LINE__, aligned_bus);
945  DBG("%s:%d: not found: aligned_len %lxh\n",
946  __func__, __LINE__, aligned_len);
947  BUG();
948  }
949 
950  c->usage_count--;
951 
952  if (!c->usage_count) {
953  list_del(&c->link);
954  dma_ioc0_free_chunk(c);
955  }
956 
957  spin_unlock_irqrestore(&r->chunk_list.lock, flags);
958  DBG("%s: end\n", __func__);
959  return 0;
960 }
961 
970 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
971 {
972  int result;
973  unsigned long virt_addr, len;
974  dma_addr_t tmp;
975 
976  if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
977  /* force 16M dma pages for linear mapping */
978  if (r->page_size != PS3_DMA_16M) {
979  pr_info("%s:%d: forcing 16M pages for linear map\n",
980  __func__, __LINE__);
981  r->page_size = PS3_DMA_16M;
982  r->len = _ALIGN_UP(r->len, 1 << r->page_size);
983  }
984  }
985 
986  result = dma_sb_region_create(r);
987  BUG_ON(result);
988 
989  if (r->offset < map.rm.size) {
990  /* Map (part of) 1st RAM chunk */
991  virt_addr = map.rm.base + r->offset;
992  len = map.rm.size - r->offset;
993  if (len > r->len)
994  len = r->len;
995  result = dma_sb_map_area(r, virt_addr, len, &tmp,
997  CBE_IOPTE_M);
998  BUG_ON(result);
999  }
1000 
1001  if (r->offset + r->len > map.rm.size) {
1002  /* Map (part of) 2nd RAM chunk */
1003  virt_addr = map.rm.size;
1004  len = r->len;
1005  if (r->offset >= map.rm.size)
1006  virt_addr += r->offset - map.rm.size;
1007  else
1008  len -= map.rm.size - r->offset;
1009  result = dma_sb_map_area(r, virt_addr, len, &tmp,
1011  CBE_IOPTE_M);
1012  BUG_ON(result);
1013  }
1014 
1015  return result;
1016 }
1017 
1025 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1026 {
1027  int result;
1029  unsigned long len, lpar_addr;
1030 
1031  if (r->offset < map.rm.size) {
1032  /* Unmap (part of) 1st RAM chunk */
1033  lpar_addr = map.rm.base + r->offset;
1034  len = map.rm.size - r->offset;
1035  if (len > r->len)
1036  len = r->len;
1037  bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1038  result = dma_sb_unmap_area(r, bus_addr, len);
1039  BUG_ON(result);
1040  }
1041 
1042  if (r->offset + r->len > map.rm.size) {
1043  /* Unmap (part of) 2nd RAM chunk */
1044  lpar_addr = map.r1.base;
1045  len = r->len;
1046  if (r->offset >= map.rm.size)
1047  lpar_addr += r->offset - map.rm.size;
1048  else
1049  len -= map.rm.size - r->offset;
1050  bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1051  result = dma_sb_unmap_area(r, bus_addr, len);
1052  BUG_ON(result);
1053  }
1054 
1055  result = dma_sb_region_free(r);
1056  BUG_ON(result);
1057 
1058  return result;
1059 }
1060 
1073 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1074  unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1075  u64 iopte_flag)
1076 {
1077  unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1078  : virt_addr;
1079  *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1080  return 0;
1081 }
1082 
1092 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1093  dma_addr_t bus_addr, unsigned long len)
1094 {
1095  return 0;
1096 };
1097 
1098 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1099  .create = dma_sb_region_create,
1100  .free = dma_sb_region_free,
1101  .map = dma_sb_map_area,
1102  .unmap = dma_sb_unmap_area
1103 };
1104 
1105 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1106  .create = dma_sb_region_create_linear,
1107  .free = dma_sb_region_free_linear,
1108  .map = dma_sb_map_area_linear,
1109  .unmap = dma_sb_unmap_area_linear
1110 };
1111 
1112 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1113  .create = dma_ioc0_region_create,
1114  .free = dma_ioc0_region_free,
1115  .map = dma_ioc0_map_area,
1116  .unmap = dma_ioc0_unmap_area
1117 };
1118 
1120  struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1121  enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1122 {
1123  unsigned long lpar_addr;
1124 
1125  lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1126 
1127  r->dev = dev;
1128  r->page_size = page_size;
1129  r->region_type = region_type;
1130  r->offset = lpar_addr;
1131  if (r->offset >= map.rm.size)
1132  r->offset -= map.r1.offset;
1133  r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1134 
1135  switch (dev->dev_type) {
1136  case PS3_DEVICE_TYPE_SB:
1138  ? &ps3_dma_sb_region_ops
1139  : &ps3_dma_sb_region_linear_ops;
1140  break;
1141  case PS3_DEVICE_TYPE_IOC0:
1142  r->region_ops = &ps3_dma_ioc0_region_ops;
1143  break;
1144  default:
1145  BUG();
1146  return -EINVAL;
1147  }
1148  return 0;
1149 }
1151 
1153 {
1154  BUG_ON(!r);
1155  BUG_ON(!r->region_ops);
1156  BUG_ON(!r->region_ops->create);
1157  return r->region_ops->create(r);
1158 }
1160 
1162 {
1163  BUG_ON(!r);
1164  BUG_ON(!r->region_ops);
1165  BUG_ON(!r->region_ops->free);
1166  return r->region_ops->free(r);
1167 }
1169 
1170 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1171  unsigned long len, dma_addr_t *bus_addr,
1172  u64 iopte_flag)
1173 {
1174  return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1175 }
1176 
1177 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1178  unsigned long len)
1179 {
1180  return r->region_ops->unmap(r, bus_addr, len);
1181 }
1182 
1183 /*============================================================================*/
1184 /* system startup routines */
1185 /*============================================================================*/
1186 
1192 {
1193  int result;
1194 
1195  DBG(" -> %s:%d\n", __func__, __LINE__);
1196 
1197  result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1198  &map.total);
1199 
1200  if (result)
1201  panic("ps3_repository_read_mm_info() failed");
1202 
1203  map.rm.offset = map.rm.base;
1204  map.vas_id = map.htab_size = 0;
1205 
1206  /* this implementation assumes map.rm.base is zero */
1207 
1208  BUG_ON(map.rm.base);
1209  BUG_ON(!map.rm.size);
1210 
1211  /* Check if we got the highmem region from an earlier boot step */
1212 
1213  if (ps3_mm_get_repository_highmem(&map.r1))
1214  ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1215 
1216  /* correct map.total for the real total amount of memory we use */
1217  map.total = map.rm.size + map.r1.size;
1218 
1219  if (!map.r1.size) {
1220  DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
1221  } else {
1222  DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
1223  __func__, __LINE__, map.rm.size,
1224  map.total - map.rm.size);
1225  memblock_add(map.rm.size, map.total - map.rm.size);
1226  }
1227 
1228  DBG(" <- %s:%d\n", __func__, __LINE__);
1229 }
1230 
1236 {
1237  ps3_mm_region_destroy(&map.r1);
1238 }