Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
nouveau_bo.c
Go to the documentation of this file.
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <[email protected]>
26  * Ben Skeggs <[email protected]>
27  * Jeremy Kolb <[email protected]>
28  */
29 
30 #include <core/engine.h>
31 
32 #include <subdev/fb.h>
33 #include <subdev/vm.h>
34 #include <subdev/bar.h>
35 
36 #include "nouveau_drm.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
39 
40 #include "nouveau_bo.h"
41 #include "nouveau_ttm.h"
42 #include "nouveau_gem.h"
43 
44 /*
45  * NV10-NV40 tiling helpers
46  */
47 
48 static void
49 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
50  u32 addr, u32 size, u32 pitch, u32 flags)
51 {
52  struct nouveau_drm *drm = nouveau_drm(dev);
53  int i = reg - drm->tile.reg;
54  struct nouveau_fb *pfb = nouveau_fb(drm->device);
55  struct nouveau_fb_tile *tile = &pfb->tile.region[i];
56  struct nouveau_engine *engine;
57 
59 
60  if (tile->pitch)
61  pfb->tile.fini(pfb, i, tile);
62 
63  if (pitch)
64  pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
65 
66  pfb->tile.prog(pfb, i, tile);
67 
68  if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
69  engine->tile_prog(engine, i);
70  if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
71  engine->tile_prog(engine, i);
72 }
73 
74 static struct nouveau_drm_tile *
75 nv10_bo_get_tile_region(struct drm_device *dev, int i)
76 {
77  struct nouveau_drm *drm = nouveau_drm(dev);
78  struct nouveau_drm_tile *tile = &drm->tile.reg[i];
79 
80  spin_lock(&drm->tile.lock);
81 
82  if (!tile->used &&
83  (!tile->fence || nouveau_fence_done(tile->fence)))
84  tile->used = true;
85  else
86  tile = NULL;
87 
88  spin_unlock(&drm->tile.lock);
89  return tile;
90 }
91 
92 static void
93 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
94  struct nouveau_fence *fence)
95 {
96  struct nouveau_drm *drm = nouveau_drm(dev);
97 
98  if (tile) {
99  spin_lock(&drm->tile.lock);
100  if (fence) {
101  /* Mark it as pending. */
102  tile->fence = fence;
103  nouveau_fence_ref(fence);
104  }
105 
106  tile->used = false;
107  spin_unlock(&drm->tile.lock);
108  }
109 }
110 
111 static struct nouveau_drm_tile *
112 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
113  u32 size, u32 pitch, u32 flags)
114 {
115  struct nouveau_drm *drm = nouveau_drm(dev);
116  struct nouveau_fb *pfb = nouveau_fb(drm->device);
117  struct nouveau_drm_tile *tile, *found = NULL;
118  int i;
119 
120  for (i = 0; i < pfb->tile.regions; i++) {
121  tile = nv10_bo_get_tile_region(dev, i);
122 
123  if (pitch && !found) {
124  found = tile;
125  continue;
126 
127  } else if (tile && pfb->tile.region[i].pitch) {
128  /* Kill an unused tile region. */
129  nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
130  }
131 
132  nv10_bo_put_tile_region(dev, tile, NULL);
133  }
134 
135  if (found)
136  nv10_bo_update_tile_region(dev, found, addr, size,
137  pitch, flags);
138  return found;
139 }
140 
141 static void
142 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
143 {
144  struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
145  struct drm_device *dev = drm->dev;
146  struct nouveau_bo *nvbo = nouveau_bo(bo);
147 
148  if (unlikely(nvbo->gem))
149  DRM_ERROR("bo %p still attached to GEM object\n", bo);
150  nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
151  kfree(nvbo);
152 }
153 
154 static void
155 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
156  int *align, int *size)
157 {
158  struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159  struct nouveau_device *device = nv_device(drm->device);
160 
161  if (device->card_type < NV_50) {
162  if (nvbo->tile_mode) {
163  if (device->chipset >= 0x40) {
164  *align = 65536;
165  *size = roundup(*size, 64 * nvbo->tile_mode);
166 
167  } else if (device->chipset >= 0x30) {
168  *align = 32768;
169  *size = roundup(*size, 64 * nvbo->tile_mode);
170 
171  } else if (device->chipset >= 0x20) {
172  *align = 16384;
173  *size = roundup(*size, 64 * nvbo->tile_mode);
174 
175  } else if (device->chipset >= 0x10) {
176  *align = 16384;
177  *size = roundup(*size, 32 * nvbo->tile_mode);
178  }
179  }
180  } else {
181  *size = roundup(*size, (1 << nvbo->page_shift));
182  *align = max((1 << nvbo->page_shift), *align);
183  }
184 
185  *size = roundup(*size, PAGE_SIZE);
186 }
187 
188 int
189 nouveau_bo_new(struct drm_device *dev, int size, int align,
190  uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
191  struct sg_table *sg,
192  struct nouveau_bo **pnvbo)
193 {
194  struct nouveau_drm *drm = nouveau_drm(dev);
195  struct nouveau_bo *nvbo;
196  size_t acc_size;
197  int ret;
198  int type = ttm_bo_type_device;
199 
200  if (sg)
201  type = ttm_bo_type_sg;
202 
203  nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
204  if (!nvbo)
205  return -ENOMEM;
206  INIT_LIST_HEAD(&nvbo->head);
207  INIT_LIST_HEAD(&nvbo->entry);
208  INIT_LIST_HEAD(&nvbo->vma_list);
209  nvbo->tile_mode = tile_mode;
210  nvbo->tile_flags = tile_flags;
211  nvbo->bo.bdev = &drm->ttm.bdev;
212 
213  nvbo->page_shift = 12;
214  if (drm->client.base.vm) {
215  if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
216  nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
217  }
218 
219  nouveau_bo_fixup_align(nvbo, flags, &align, &size);
220  nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
221  nouveau_bo_placement_set(nvbo, flags, 0);
222 
223  acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
224  sizeof(struct nouveau_bo));
225 
226  ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
227  type, &nvbo->placement,
228  align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
229  nouveau_bo_del_ttm);
230  if (ret) {
231  /* ttm will call nouveau_bo_del_ttm if it fails.. */
232  return ret;
233  }
234 
235  *pnvbo = nvbo;
236  return 0;
237 }
238 
239 static void
240 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
241 {
242  *n = 0;
243 
244  if (type & TTM_PL_FLAG_VRAM)
245  pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
246  if (type & TTM_PL_FLAG_TT)
247  pl[(*n)++] = TTM_PL_FLAG_TT | flags;
248  if (type & TTM_PL_FLAG_SYSTEM)
249  pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
250 }
251 
252 static void
253 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
254 {
255  struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
256  struct nouveau_fb *pfb = nouveau_fb(drm->device);
257  u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
258 
259  if (nv_device(drm->device)->card_type == NV_10 &&
260  nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
261  nvbo->bo.mem.num_pages < vram_pages / 4) {
262  /*
263  * Make sure that the color and depth buffers are handled
264  * by independent memory controller units. Up to a 9x
265  * speed up when alpha-blending and depth-test are enabled
266  * at the same time.
267  */
268  if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
269  nvbo->placement.fpfn = vram_pages / 2;
270  nvbo->placement.lpfn = ~0;
271  } else {
272  nvbo->placement.fpfn = 0;
273  nvbo->placement.lpfn = vram_pages / 2;
274  }
275  }
276 }
277 
278 void
280 {
281  struct ttm_placement *pl = &nvbo->placement;
282  uint32_t flags = TTM_PL_MASK_CACHING |
283  (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
284 
285  pl->placement = nvbo->placements;
286  set_placement_list(nvbo->placements, &pl->num_placement,
287  type, flags);
288 
289  pl->busy_placement = nvbo->busy_placements;
290  set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
291  type | busy, flags);
292 
293  set_placement_range(nvbo, type);
294 }
295 
296 int
298 {
299  struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
300  struct ttm_buffer_object *bo = &nvbo->bo;
301  int ret;
302 
303  if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
304  NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
305  1 << bo->mem.mem_type, memtype);
306  return -EINVAL;
307  }
308 
309  if (nvbo->pin_refcnt++)
310  return 0;
311 
312  ret = ttm_bo_reserve(bo, false, false, false, 0);
313  if (ret)
314  goto out;
315 
316  nouveau_bo_placement_set(nvbo, memtype, 0);
317 
318  ret = nouveau_bo_validate(nvbo, false, false, false);
319  if (ret == 0) {
320  switch (bo->mem.mem_type) {
321  case TTM_PL_VRAM:
322  drm->gem.vram_available -= bo->mem.size;
323  break;
324  case TTM_PL_TT:
325  drm->gem.gart_available -= bo->mem.size;
326  break;
327  default:
328  break;
329  }
330  }
331  ttm_bo_unreserve(bo);
332 out:
333  if (unlikely(ret))
334  nvbo->pin_refcnt--;
335  return ret;
336 }
337 
338 int
340 {
341  struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
342  struct ttm_buffer_object *bo = &nvbo->bo;
343  int ret;
344 
345  if (--nvbo->pin_refcnt)
346  return 0;
347 
348  ret = ttm_bo_reserve(bo, false, false, false, 0);
349  if (ret)
350  return ret;
351 
352  nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
353 
354  ret = nouveau_bo_validate(nvbo, false, false, false);
355  if (ret == 0) {
356  switch (bo->mem.mem_type) {
357  case TTM_PL_VRAM:
358  drm->gem.vram_available += bo->mem.size;
359  break;
360  case TTM_PL_TT:
361  drm->gem.gart_available += bo->mem.size;
362  break;
363  default:
364  break;
365  }
366  }
367 
368  ttm_bo_unreserve(bo);
369  return ret;
370 }
371 
372 int
374 {
375  int ret;
376 
377  ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
378  if (ret)
379  return ret;
380 
381  ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
382  ttm_bo_unreserve(&nvbo->bo);
383  return ret;
384 }
385 
386 void
388 {
389  if (nvbo)
390  ttm_bo_kunmap(&nvbo->kmap);
391 }
392 
393 int
394 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
395  bool no_wait_reserve, bool no_wait_gpu)
396 {
397  int ret;
398 
399  ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
400  no_wait_reserve, no_wait_gpu);
401  if (ret)
402  return ret;
403 
404  return 0;
405 }
406 
407 u16
408 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
409 {
410  bool is_iomem;
411  u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
412  mem = &mem[index];
413  if (is_iomem)
414  return ioread16_native((void __force __iomem *)mem);
415  else
416  return *mem;
417 }
418 
419 void
420 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
421 {
422  bool is_iomem;
423  u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
424  mem = &mem[index];
425  if (is_iomem)
426  iowrite16_native(val, (void __force __iomem *)mem);
427  else
428  *mem = val;
429 }
430 
431 u32
432 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
433 {
434  bool is_iomem;
435  u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
436  mem = &mem[index];
437  if (is_iomem)
438  return ioread32_native((void __force __iomem *)mem);
439  else
440  return *mem;
441 }
442 
443 void
444 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
445 {
446  bool is_iomem;
447  u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
448  mem = &mem[index];
449  if (is_iomem)
450  iowrite32_native(val, (void __force __iomem *)mem);
451  else
452  *mem = val;
453 }
454 
455 static struct ttm_tt *
456 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
457  uint32_t page_flags, struct page *dummy_read)
458 {
459 #if __OS_HAS_AGP
460  struct nouveau_drm *drm = nouveau_bdev(bdev);
461  struct drm_device *dev = drm->dev;
462 
463  if (drm->agp.stat == ENABLED) {
464  return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
465  page_flags, dummy_read);
466  }
467 #endif
468 
469  return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
470 }
471 
472 static int
473 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
474 {
475  /* We'll do this from user space. */
476  return 0;
477 }
478 
479 static int
480 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
481  struct ttm_mem_type_manager *man)
482 {
483  struct nouveau_drm *drm = nouveau_bdev(bdev);
484 
485  switch (type) {
486  case TTM_PL_SYSTEM:
490  break;
491  case TTM_PL_VRAM:
492  if (nv_device(drm->device)->card_type >= NV_50) {
493  man->func = &nouveau_vram_manager;
494  man->io_reserve_fastpath = false;
495  man->use_io_reserve_lru = true;
496  } else {
497  man->func = &ttm_bo_manager_func;
498  }
504  break;
505  case TTM_PL_TT:
506  if (nv_device(drm->device)->card_type >= NV_50)
507  man->func = &nouveau_gart_manager;
508  else
509  if (drm->agp.stat != ENABLED)
510  man->func = &nv04_gart_manager;
511  else
512  man->func = &ttm_bo_manager_func;
513 
514  if (drm->agp.stat == ENABLED) {
519  } else {
524  }
525 
526  break;
527  default:
528  return -EINVAL;
529  }
530  return 0;
531 }
532 
533 static void
534 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
535 {
536  struct nouveau_bo *nvbo = nouveau_bo(bo);
537 
538  switch (bo->mem.mem_type) {
539  case TTM_PL_VRAM:
540  nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
542  break;
543  default:
545  break;
546  }
547 
548  *pl = nvbo->placement;
549 }
550 
551 
552 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
553  * TTM_PL_{VRAM,TT} directly.
554  */
555 
556 static int
557 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
558  struct nouveau_bo *nvbo, bool evict,
559  bool no_wait_reserve, bool no_wait_gpu,
560  struct ttm_mem_reg *new_mem)
561 {
562  struct nouveau_fence *fence = NULL;
563  int ret;
564 
565  ret = nouveau_fence_new(chan, &fence);
566  if (ret)
567  return ret;
568 
569  ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
570  no_wait_reserve, no_wait_gpu, new_mem);
571  nouveau_fence_unref(&fence);
572  return ret;
573 }
574 
575 static int
576 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
577 {
578  int ret = RING_SPACE(chan, 2);
579  if (ret == 0) {
580  BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
581  OUT_RING (chan, handle);
582  FIRE_RING (chan);
583  }
584  return ret;
585 }
586 
587 static int
588 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
589  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
590 {
591  struct nouveau_mem *node = old_mem->mm_node;
592  int ret = RING_SPACE(chan, 10);
593  if (ret == 0) {
594  BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
595  OUT_RING (chan, upper_32_bits(node->vma[0].offset));
596  OUT_RING (chan, lower_32_bits(node->vma[0].offset));
597  OUT_RING (chan, upper_32_bits(node->vma[1].offset));
598  OUT_RING (chan, lower_32_bits(node->vma[1].offset));
599  OUT_RING (chan, PAGE_SIZE);
600  OUT_RING (chan, PAGE_SIZE);
601  OUT_RING (chan, PAGE_SIZE);
602  OUT_RING (chan, new_mem->num_pages);
603  BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
604  }
605  return ret;
606 }
607 
608 static int
609 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
610 {
611  int ret = RING_SPACE(chan, 2);
612  if (ret == 0) {
613  BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
614  OUT_RING (chan, handle);
615  }
616  return ret;
617 }
618 
619 static int
620 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
621  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
622 {
623  struct nouveau_mem *node = old_mem->mm_node;
624  u64 src_offset = node->vma[0].offset;
625  u64 dst_offset = node->vma[1].offset;
626  u32 page_count = new_mem->num_pages;
627  int ret;
628 
629  page_count = new_mem->num_pages;
630  while (page_count) {
631  int line_count = (page_count > 8191) ? 8191 : page_count;
632 
633  ret = RING_SPACE(chan, 11);
634  if (ret)
635  return ret;
636 
637  BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
638  OUT_RING (chan, upper_32_bits(src_offset));
639  OUT_RING (chan, lower_32_bits(src_offset));
640  OUT_RING (chan, upper_32_bits(dst_offset));
641  OUT_RING (chan, lower_32_bits(dst_offset));
642  OUT_RING (chan, PAGE_SIZE);
643  OUT_RING (chan, PAGE_SIZE);
644  OUT_RING (chan, PAGE_SIZE);
645  OUT_RING (chan, line_count);
646  BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
647  OUT_RING (chan, 0x00000110);
648 
649  page_count -= line_count;
650  src_offset += (PAGE_SIZE * line_count);
651  dst_offset += (PAGE_SIZE * line_count);
652  }
653 
654  return 0;
655 }
656 
657 static int
658 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
659  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
660 {
661  struct nouveau_mem *node = old_mem->mm_node;
662  u64 src_offset = node->vma[0].offset;
663  u64 dst_offset = node->vma[1].offset;
664  u32 page_count = new_mem->num_pages;
665  int ret;
666 
667  page_count = new_mem->num_pages;
668  while (page_count) {
669  int line_count = (page_count > 2047) ? 2047 : page_count;
670 
671  ret = RING_SPACE(chan, 12);
672  if (ret)
673  return ret;
674 
675  BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
676  OUT_RING (chan, upper_32_bits(dst_offset));
677  OUT_RING (chan, lower_32_bits(dst_offset));
678  BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
679  OUT_RING (chan, upper_32_bits(src_offset));
680  OUT_RING (chan, lower_32_bits(src_offset));
681  OUT_RING (chan, PAGE_SIZE); /* src_pitch */
682  OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
683  OUT_RING (chan, PAGE_SIZE); /* line_length */
684  OUT_RING (chan, line_count);
685  BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
686  OUT_RING (chan, 0x00100110);
687 
688  page_count -= line_count;
689  src_offset += (PAGE_SIZE * line_count);
690  dst_offset += (PAGE_SIZE * line_count);
691  }
692 
693  return 0;
694 }
695 
696 static int
697 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
698  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
699 {
700  struct nouveau_mem *node = old_mem->mm_node;
701  u64 src_offset = node->vma[0].offset;
702  u64 dst_offset = node->vma[1].offset;
703  u32 page_count = new_mem->num_pages;
704  int ret;
705 
706  page_count = new_mem->num_pages;
707  while (page_count) {
708  int line_count = (page_count > 8191) ? 8191 : page_count;
709 
710  ret = RING_SPACE(chan, 11);
711  if (ret)
712  return ret;
713 
714  BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
715  OUT_RING (chan, upper_32_bits(src_offset));
716  OUT_RING (chan, lower_32_bits(src_offset));
717  OUT_RING (chan, upper_32_bits(dst_offset));
718  OUT_RING (chan, lower_32_bits(dst_offset));
719  OUT_RING (chan, PAGE_SIZE);
720  OUT_RING (chan, PAGE_SIZE);
721  OUT_RING (chan, PAGE_SIZE);
722  OUT_RING (chan, line_count);
723  BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
724  OUT_RING (chan, 0x00000110);
725 
726  page_count -= line_count;
727  src_offset += (PAGE_SIZE * line_count);
728  dst_offset += (PAGE_SIZE * line_count);
729  }
730 
731  return 0;
732 }
733 
734 static int
735 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
736  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
737 {
738  struct nouveau_mem *node = old_mem->mm_node;
739  int ret = RING_SPACE(chan, 7);
740  if (ret == 0) {
741  BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
742  OUT_RING (chan, upper_32_bits(node->vma[0].offset));
743  OUT_RING (chan, lower_32_bits(node->vma[0].offset));
744  OUT_RING (chan, upper_32_bits(node->vma[1].offset));
745  OUT_RING (chan, lower_32_bits(node->vma[1].offset));
746  OUT_RING (chan, 0x00000000 /* COPY */);
747  OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
748  }
749  return ret;
750 }
751 
752 static int
753 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
754  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
755 {
756  struct nouveau_mem *node = old_mem->mm_node;
757  int ret = RING_SPACE(chan, 7);
758  if (ret == 0) {
759  BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
760  OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
761  OUT_RING (chan, upper_32_bits(node->vma[0].offset));
762  OUT_RING (chan, lower_32_bits(node->vma[0].offset));
763  OUT_RING (chan, upper_32_bits(node->vma[1].offset));
764  OUT_RING (chan, lower_32_bits(node->vma[1].offset));
765  OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
766  }
767  return ret;
768 }
769 
770 static int
771 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
772 {
773  int ret = RING_SPACE(chan, 6);
774  if (ret == 0) {
775  BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
776  OUT_RING (chan, handle);
777  BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
778  OUT_RING (chan, NvNotify0);
779  OUT_RING (chan, NvDmaFB);
780  OUT_RING (chan, NvDmaFB);
781  }
782 
783  return ret;
784 }
785 
786 static int
787 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
788  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
789 {
790  struct nouveau_mem *node = old_mem->mm_node;
791  struct nouveau_bo *nvbo = nouveau_bo(bo);
792  u64 length = (new_mem->num_pages << PAGE_SHIFT);
793  u64 src_offset = node->vma[0].offset;
794  u64 dst_offset = node->vma[1].offset;
795  int ret;
796 
797  while (length) {
798  u32 amount, stride, height;
799 
800  amount = min(length, (u64)(4 * 1024 * 1024));
801  stride = 16 * 4;
802  height = amount / stride;
803 
804  if (new_mem->mem_type == TTM_PL_VRAM &&
805  nouveau_bo_tile_layout(nvbo)) {
806  ret = RING_SPACE(chan, 8);
807  if (ret)
808  return ret;
809 
810  BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
811  OUT_RING (chan, 0);
812  OUT_RING (chan, 0);
813  OUT_RING (chan, stride);
814  OUT_RING (chan, height);
815  OUT_RING (chan, 1);
816  OUT_RING (chan, 0);
817  OUT_RING (chan, 0);
818  } else {
819  ret = RING_SPACE(chan, 2);
820  if (ret)
821  return ret;
822 
823  BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
824  OUT_RING (chan, 1);
825  }
826  if (old_mem->mem_type == TTM_PL_VRAM &&
827  nouveau_bo_tile_layout(nvbo)) {
828  ret = RING_SPACE(chan, 8);
829  if (ret)
830  return ret;
831 
832  BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
833  OUT_RING (chan, 0);
834  OUT_RING (chan, 0);
835  OUT_RING (chan, stride);
836  OUT_RING (chan, height);
837  OUT_RING (chan, 1);
838  OUT_RING (chan, 0);
839  OUT_RING (chan, 0);
840  } else {
841  ret = RING_SPACE(chan, 2);
842  if (ret)
843  return ret;
844 
845  BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
846  OUT_RING (chan, 1);
847  }
848 
849  ret = RING_SPACE(chan, 14);
850  if (ret)
851  return ret;
852 
853  BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
854  OUT_RING (chan, upper_32_bits(src_offset));
855  OUT_RING (chan, upper_32_bits(dst_offset));
856  BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
857  OUT_RING (chan, lower_32_bits(src_offset));
858  OUT_RING (chan, lower_32_bits(dst_offset));
859  OUT_RING (chan, stride);
860  OUT_RING (chan, stride);
861  OUT_RING (chan, stride);
862  OUT_RING (chan, height);
863  OUT_RING (chan, 0x00000101);
864  OUT_RING (chan, 0x00000000);
865  BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
866  OUT_RING (chan, 0);
867 
868  length -= amount;
869  src_offset += amount;
870  dst_offset += amount;
871  }
872 
873  return 0;
874 }
875 
876 static int
877 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
878 {
879  int ret = RING_SPACE(chan, 4);
880  if (ret == 0) {
881  BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
882  OUT_RING (chan, handle);
883  BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
884  OUT_RING (chan, NvNotify0);
885  }
886 
887  return ret;
888 }
889 
890 static inline uint32_t
891 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
892  struct nouveau_channel *chan, struct ttm_mem_reg *mem)
893 {
894  if (mem->mem_type == TTM_PL_TT)
895  return NvDmaTT;
896  return NvDmaFB;
897 }
898 
899 static int
900 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
901  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
902 {
903  u32 src_offset = old_mem->start << PAGE_SHIFT;
904  u32 dst_offset = new_mem->start << PAGE_SHIFT;
905  u32 page_count = new_mem->num_pages;
906  int ret;
907 
908  ret = RING_SPACE(chan, 3);
909  if (ret)
910  return ret;
911 
912  BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
913  OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
914  OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
915 
916  page_count = new_mem->num_pages;
917  while (page_count) {
918  int line_count = (page_count > 2047) ? 2047 : page_count;
919 
920  ret = RING_SPACE(chan, 11);
921  if (ret)
922  return ret;
923 
924  BEGIN_NV04(chan, NvSubCopy,
926  OUT_RING (chan, src_offset);
927  OUT_RING (chan, dst_offset);
928  OUT_RING (chan, PAGE_SIZE); /* src_pitch */
929  OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
930  OUT_RING (chan, PAGE_SIZE); /* line_length */
931  OUT_RING (chan, line_count);
932  OUT_RING (chan, 0x00000101);
933  OUT_RING (chan, 0x00000000);
934  BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
935  OUT_RING (chan, 0);
936 
937  page_count -= line_count;
938  src_offset += (PAGE_SIZE * line_count);
939  dst_offset += (PAGE_SIZE * line_count);
940  }
941 
942  return 0;
943 }
944 
945 static int
946 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
947  struct ttm_mem_reg *mem, struct nouveau_vma *vma)
948 {
949  struct nouveau_mem *node = mem->mm_node;
950  int ret;
951 
952  ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
953  PAGE_SHIFT, node->page_shift,
954  NV_MEM_ACCESS_RW, vma);
955  if (ret)
956  return ret;
957 
958  if (mem->mem_type == TTM_PL_VRAM)
959  nouveau_vm_map(vma, node);
960  else
961  nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
962 
963  return 0;
964 }
965 
966 static int
967 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
968  bool no_wait_reserve, bool no_wait_gpu,
969  struct ttm_mem_reg *new_mem)
970 {
971  struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
972  struct nouveau_channel *chan = chan = drm->channel;
973  struct nouveau_bo *nvbo = nouveau_bo(bo);
974  struct ttm_mem_reg *old_mem = &bo->mem;
975  int ret;
976 
977  mutex_lock(&chan->cli->mutex);
978 
979  /* create temporary vmas for the transfer and attach them to the
980  * old nouveau_mem node, these will get cleaned up after ttm has
981  * destroyed the ttm_mem_reg
982  */
983  if (nv_device(drm->device)->card_type >= NV_50) {
984  struct nouveau_mem *node = old_mem->mm_node;
985 
986  ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
987  if (ret)
988  goto out;
989 
990  ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
991  if (ret)
992  goto out;
993  }
994 
995  ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
996  if (ret == 0) {
997  ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
998  no_wait_reserve,
999  no_wait_gpu, new_mem);
1000  }
1001 
1002 out:
1003  mutex_unlock(&chan->cli->mutex);
1004  return ret;
1005 }
1006 
1007 void
1009 {
1010  static const struct {
1011  const char *name;
1012  int engine;
1013  u32 oclass;
1014  int (*exec)(struct nouveau_channel *,
1015  struct ttm_buffer_object *,
1016  struct ttm_mem_reg *, struct ttm_mem_reg *);
1017  int (*init)(struct nouveau_channel *, u32 handle);
1018  } _methods[] = {
1019  { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1020  { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1021  { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1022  { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1023  { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1024  { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1025  { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1026  { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1027  { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1028  {},
1029  { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1030  }, *mthd = _methods;
1031  const char *name = "CPU";
1032  int ret;
1033 
1034  do {
1035  struct nouveau_object *object;
1036  struct nouveau_channel *chan;
1037  u32 handle = (mthd->engine << 16) | mthd->oclass;
1038 
1039  if (mthd->init == nve0_bo_move_init)
1040  chan = drm->cechan;
1041  else
1042  chan = drm->channel;
1043  if (chan == NULL)
1044  continue;
1045 
1046  ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
1047  mthd->oclass, NULL, 0, &object);
1048  if (ret == 0) {
1049  ret = mthd->init(chan, handle);
1050  if (ret) {
1051  nouveau_object_del(nv_object(drm),
1052  chan->handle, handle);
1053  continue;
1054  }
1055 
1056  drm->ttm.move = mthd->exec;
1057  name = mthd->name;
1058  break;
1059  }
1060  } while ((++mthd)->exec);
1061 
1062  NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1063 }
1064 
1065 static int
1066 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1067  bool no_wait_reserve, bool no_wait_gpu,
1068  struct ttm_mem_reg *new_mem)
1069 {
1070  u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1071  struct ttm_placement placement;
1072  struct ttm_mem_reg tmp_mem;
1073  int ret;
1074 
1075  placement.fpfn = placement.lpfn = 0;
1076  placement.num_placement = placement.num_busy_placement = 1;
1077  placement.placement = placement.busy_placement = &placement_memtype;
1078 
1079  tmp_mem = *new_mem;
1080  tmp_mem.mm_node = NULL;
1081  ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
1082  if (ret)
1083  return ret;
1084 
1085  ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1086  if (ret)
1087  goto out;
1088 
1089  ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
1090  if (ret)
1091  goto out;
1092 
1093  ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
1094 out:
1095  ttm_bo_mem_put(bo, &tmp_mem);
1096  return ret;
1097 }
1098 
1099 static int
1100 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1101  bool no_wait_reserve, bool no_wait_gpu,
1102  struct ttm_mem_reg *new_mem)
1103 {
1104  u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1105  struct ttm_placement placement;
1106  struct ttm_mem_reg tmp_mem;
1107  int ret;
1108 
1109  placement.fpfn = placement.lpfn = 0;
1110  placement.num_placement = placement.num_busy_placement = 1;
1111  placement.placement = placement.busy_placement = &placement_memtype;
1112 
1113  tmp_mem = *new_mem;
1114  tmp_mem.mm_node = NULL;
1115  ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
1116  if (ret)
1117  return ret;
1118 
1119  ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
1120  if (ret)
1121  goto out;
1122 
1123  ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
1124  if (ret)
1125  goto out;
1126 
1127 out:
1128  ttm_bo_mem_put(bo, &tmp_mem);
1129  return ret;
1130 }
1131 
1132 static void
1133 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1134 {
1135  struct nouveau_bo *nvbo = nouveau_bo(bo);
1136  struct nouveau_vma *vma;
1137 
1138  /* ttm can now (stupidly) pass the driver bos it didn't create... */
1139  if (bo->destroy != nouveau_bo_del_ttm)
1140  return;
1141 
1142  list_for_each_entry(vma, &nvbo->vma_list, head) {
1143  if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
1144  nouveau_vm_map(vma, new_mem->mm_node);
1145  } else
1146  if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1147  nvbo->page_shift == vma->vm->vmm->spg_shift) {
1148  if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1149  nouveau_vm_map_sg_table(vma, 0, new_mem->
1150  num_pages << PAGE_SHIFT,
1151  new_mem->mm_node);
1152  else
1153  nouveau_vm_map_sg(vma, 0, new_mem->
1154  num_pages << PAGE_SHIFT,
1155  new_mem->mm_node);
1156  } else {
1157  nouveau_vm_unmap(vma);
1158  }
1159  }
1160 }
1161 
1162 static int
1163 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1164  struct nouveau_drm_tile **new_tile)
1165 {
1166  struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1167  struct drm_device *dev = drm->dev;
1168  struct nouveau_bo *nvbo = nouveau_bo(bo);
1169  u64 offset = new_mem->start << PAGE_SHIFT;
1170 
1171  *new_tile = NULL;
1172  if (new_mem->mem_type != TTM_PL_VRAM)
1173  return 0;
1174 
1175  if (nv_device(drm->device)->card_type >= NV_10) {
1176  *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1177  nvbo->tile_mode,
1178  nvbo->tile_flags);
1179  }
1180 
1181  return 0;
1182 }
1183 
1184 static void
1185 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1186  struct nouveau_drm_tile *new_tile,
1187  struct nouveau_drm_tile **old_tile)
1188 {
1189  struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1190  struct drm_device *dev = drm->dev;
1191 
1192  nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1193  *old_tile = new_tile;
1194 }
1195 
1196 static int
1197 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1198  bool no_wait_reserve, bool no_wait_gpu,
1199  struct ttm_mem_reg *new_mem)
1200 {
1201  struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1202  struct nouveau_bo *nvbo = nouveau_bo(bo);
1203  struct ttm_mem_reg *old_mem = &bo->mem;
1204  struct nouveau_drm_tile *new_tile = NULL;
1205  int ret = 0;
1206 
1207  if (nv_device(drm->device)->card_type < NV_50) {
1208  ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1209  if (ret)
1210  return ret;
1211  }
1212 
1213  /* Fake bo copy. */
1214  if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1215  BUG_ON(bo->mem.mm_node != NULL);
1216  bo->mem = *new_mem;
1217  new_mem->mm_node = NULL;
1218  goto out;
1219  }
1220 
1221  /* CPU copy if we have no accelerated method available */
1222  if (!drm->ttm.move) {
1223  ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1224  goto out;
1225  }
1226 
1227  /* Hardware assisted copy. */
1228  if (new_mem->mem_type == TTM_PL_SYSTEM)
1229  ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1230  else if (old_mem->mem_type == TTM_PL_SYSTEM)
1231  ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1232  else
1233  ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1234 
1235  if (!ret)
1236  goto out;
1237 
1238  /* Fallback to software copy. */
1239  ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1240 
1241 out:
1242  if (nv_device(drm->device)->card_type < NV_50) {
1243  if (ret)
1244  nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1245  else
1246  nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1247  }
1248 
1249  return ret;
1250 }
1251 
1252 static int
1253 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1254 {
1255  return 0;
1256 }
1257 
1258 static int
1259 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1260 {
1261  struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1262  struct nouveau_drm *drm = nouveau_bdev(bdev);
1263  struct drm_device *dev = drm->dev;
1264  int ret;
1265 
1266  mem->bus.addr = NULL;
1267  mem->bus.offset = 0;
1268  mem->bus.size = mem->num_pages << PAGE_SHIFT;
1269  mem->bus.base = 0;
1270  mem->bus.is_iomem = false;
1271  if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1272  return -EINVAL;
1273  switch (mem->mem_type) {
1274  case TTM_PL_SYSTEM:
1275  /* System memory */
1276  return 0;
1277  case TTM_PL_TT:
1278 #if __OS_HAS_AGP
1279  if (drm->agp.stat == ENABLED) {
1280  mem->bus.offset = mem->start << PAGE_SHIFT;
1281  mem->bus.base = drm->agp.base;
1282  mem->bus.is_iomem = true;
1283  }
1284 #endif
1285  break;
1286  case TTM_PL_VRAM:
1287  mem->bus.offset = mem->start << PAGE_SHIFT;
1288  mem->bus.base = pci_resource_start(dev->pdev, 1);
1289  mem->bus.is_iomem = true;
1290  if (nv_device(drm->device)->card_type >= NV_50) {
1291  struct nouveau_bar *bar = nouveau_bar(drm->device);
1292  struct nouveau_mem *node = mem->mm_node;
1293 
1294  ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1295  &node->bar_vma);
1296  if (ret)
1297  return ret;
1298 
1299  mem->bus.offset = node->bar_vma.offset;
1300  }
1301  break;
1302  default:
1303  return -EINVAL;
1304  }
1305  return 0;
1306 }
1307 
1308 static void
1309 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1310 {
1311  struct nouveau_drm *drm = nouveau_bdev(bdev);
1312  struct nouveau_bar *bar = nouveau_bar(drm->device);
1313  struct nouveau_mem *node = mem->mm_node;
1314 
1315  if (!node->bar_vma.node)
1316  return;
1317 
1318  bar->unmap(bar, &node->bar_vma);
1319 }
1320 
1321 static int
1322 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1323 {
1324  struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1325  struct nouveau_bo *nvbo = nouveau_bo(bo);
1326  struct nouveau_device *device = nv_device(drm->device);
1327  u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
1328 
1329  /* as long as the bo isn't in vram, and isn't tiled, we've got
1330  * nothing to do here.
1331  */
1332  if (bo->mem.mem_type != TTM_PL_VRAM) {
1333  if (nv_device(drm->device)->card_type < NV_50 ||
1334  !nouveau_bo_tile_layout(nvbo))
1335  return 0;
1336  }
1337 
1338  /* make sure bo is in mappable vram */
1339  if (bo->mem.start + bo->mem.num_pages < mappable)
1340  return 0;
1341 
1342 
1343  nvbo->placement.fpfn = 0;
1344  nvbo->placement.lpfn = mappable;
1345  nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1346  return nouveau_bo_validate(nvbo, false, true, false);
1347 }
1348 
1349 static int
1350 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1351 {
1352  struct ttm_dma_tt *ttm_dma = (void *)ttm;
1353  struct nouveau_drm *drm;
1354  struct drm_device *dev;
1355  unsigned i;
1356  int r;
1357  bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1358 
1359  if (ttm->state != tt_unpopulated)
1360  return 0;
1361 
1362  if (slave && ttm->sg) {
1363  /* make userspace faulting work */
1365  ttm_dma->dma_address, ttm->num_pages);
1366  ttm->state = tt_unbound;
1367  return 0;
1368  }
1369 
1370  drm = nouveau_bdev(ttm->bdev);
1371  dev = drm->dev;
1372 
1373 #if __OS_HAS_AGP
1374  if (drm->agp.stat == ENABLED) {
1375  return ttm_agp_tt_populate(ttm);
1376  }
1377 #endif
1378 
1379 #ifdef CONFIG_SWIOTLB
1380  if (swiotlb_nr_tbl()) {
1381  return ttm_dma_populate((void *)ttm, dev->dev);
1382  }
1383 #endif
1384 
1385  r = ttm_pool_populate(ttm);
1386  if (r) {
1387  return r;
1388  }
1389 
1390  for (i = 0; i < ttm->num_pages; i++) {
1391  ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1392  0, PAGE_SIZE,
1394  if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1395  while (--i) {
1396  pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1398  ttm_dma->dma_address[i] = 0;
1399  }
1400  ttm_pool_unpopulate(ttm);
1401  return -EFAULT;
1402  }
1403  }
1404  return 0;
1405 }
1406 
1407 static void
1408 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1409 {
1410  struct ttm_dma_tt *ttm_dma = (void *)ttm;
1411  struct nouveau_drm *drm;
1412  struct drm_device *dev;
1413  unsigned i;
1414  bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1415 
1416  if (slave)
1417  return;
1418 
1419  drm = nouveau_bdev(ttm->bdev);
1420  dev = drm->dev;
1421 
1422 #if __OS_HAS_AGP
1423  if (drm->agp.stat == ENABLED) {
1424  ttm_agp_tt_unpopulate(ttm);
1425  return;
1426  }
1427 #endif
1428 
1429 #ifdef CONFIG_SWIOTLB
1430  if (swiotlb_nr_tbl()) {
1431  ttm_dma_unpopulate((void *)ttm, dev->dev);
1432  return;
1433  }
1434 #endif
1435 
1436  for (i = 0; i < ttm->num_pages; i++) {
1437  if (ttm_dma->dma_address[i]) {
1438  pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1440  }
1441  }
1442 
1443  ttm_pool_unpopulate(ttm);
1444 }
1445 
1446 void
1447 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1448 {
1449  struct nouveau_fence *old_fence = NULL;
1450 
1451  if (likely(fence))
1452  nouveau_fence_ref(fence);
1453 
1454  spin_lock(&nvbo->bo.bdev->fence_lock);
1455  old_fence = nvbo->bo.sync_obj;
1456  nvbo->bo.sync_obj = fence;
1457  spin_unlock(&nvbo->bo.bdev->fence_lock);
1458 
1459  nouveau_fence_unref(&old_fence);
1460 }
1461 
1462 static void
1463 nouveau_bo_fence_unref(void **sync_obj)
1464 {
1465  nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1466 }
1467 
1468 static void *
1469 nouveau_bo_fence_ref(void *sync_obj)
1470 {
1471  return nouveau_fence_ref(sync_obj);
1472 }
1473 
1474 static bool
1475 nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
1476 {
1477  return nouveau_fence_done(sync_obj);
1478 }
1479 
1480 static int
1481 nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
1482 {
1483  return nouveau_fence_wait(sync_obj, lazy, intr);
1484 }
1485 
1486 static int
1487 nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
1488 {
1489  return 0;
1490 }
1491 
1493  .ttm_tt_create = &nouveau_ttm_tt_create,
1494  .ttm_tt_populate = &nouveau_ttm_tt_populate,
1495  .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1496  .invalidate_caches = nouveau_bo_invalidate_caches,
1497  .init_mem_type = nouveau_bo_init_mem_type,
1498  .evict_flags = nouveau_bo_evict_flags,
1499  .move_notify = nouveau_bo_move_ntfy,
1500  .move = nouveau_bo_move,
1501  .verify_access = nouveau_bo_verify_access,
1502  .sync_obj_signaled = nouveau_bo_fence_signalled,
1503  .sync_obj_wait = nouveau_bo_fence_wait,
1504  .sync_obj_flush = nouveau_bo_fence_flush,
1505  .sync_obj_unref = nouveau_bo_fence_unref,
1506  .sync_obj_ref = nouveau_bo_fence_ref,
1507  .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1508  .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1509  .io_mem_free = &nouveau_ttm_io_mem_free,
1510 };
1511 
1512 struct nouveau_vma *
1514 {
1515  struct nouveau_vma *vma;
1516  list_for_each_entry(vma, &nvbo->vma_list, head) {
1517  if (vma->vm == vm)
1518  return vma;
1519  }
1520 
1521  return NULL;
1522 }
1523 
1524 int
1526  struct nouveau_vma *vma)
1527 {
1528  const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1529  struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1530  int ret;
1531 
1532  ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1533  NV_MEM_ACCESS_RW, vma);
1534  if (ret)
1535  return ret;
1536 
1537  if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1538  nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1539  else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1540  if (node->sg)
1541  nouveau_vm_map_sg_table(vma, 0, size, node);
1542  else
1543  nouveau_vm_map_sg(vma, 0, size, node);
1544  }
1545 
1546  list_add_tail(&vma->head, &nvbo->vma_list);
1547  vma->refcount = 1;
1548  return 0;
1549 }
1550 
1551 void
1552 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1553 {
1554  if (vma->node) {
1555  if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1556  spin_lock(&nvbo->bo.bdev->fence_lock);
1557  ttm_bo_wait(&nvbo->bo, false, false, false);
1558  spin_unlock(&nvbo->bo.bdev->fence_lock);
1559  nouveau_vm_unmap(vma);
1560  }
1561 
1562  nouveau_vm_put(vma);
1563  list_del(&vma->head);
1564  }
1565 }