47 static void radeon_sa_bo_remove_locked(
struct radeon_sa_bo *sa_bo);
59 sa_manager->
domain = domain;
61 INIT_LIST_HEAD(&sa_manager->
olist);
63 INIT_LIST_HEAD(&sa_manager->
flist[i]);
69 dev_err(rdev->
dev,
"(%d) failed to allocate bo for manager\n", r);
81 if (!list_empty(&sa_manager->
olist)) {
83 radeon_sa_bo_try_free(sa_manager);
84 if (!list_empty(&sa_manager->
olist)) {
85 dev_err(rdev->
dev,
"sa_manager is not empty, clearing anyway\n");
89 radeon_sa_bo_remove_locked(sa_bo);
100 if (sa_manager->
bo ==
NULL) {
108 dev_err(rdev->
dev,
"(%d) failed to reserve manager bo\n", r);
113 radeon_bo_unreserve(sa_manager->
bo);
114 dev_err(rdev->
dev,
"(%d) failed to pin manager bo\n", r);
118 radeon_bo_unreserve(sa_manager->
bo);
127 if (sa_manager->
bo ==
NULL) {
136 radeon_bo_unreserve(sa_manager->
bo);
141 static void radeon_sa_bo_remove_locked(
struct radeon_sa_bo *sa_bo)
147 list_del_init(&sa_bo->
olist);
148 list_del_init(&sa_bo->
flist);
157 if (sa_manager->
hole->next == &sa_manager->
olist)
165 radeon_sa_bo_remove_locked(sa_bo);
169 static inline unsigned radeon_sa_bo_hole_soffset(
struct radeon_sa_manager *sa_manager)
173 if (hole != &sa_manager->
olist) {
179 static inline unsigned radeon_sa_bo_hole_eoffset(
struct radeon_sa_manager *sa_manager)
186 return sa_manager->
size;
193 unsigned soffset, eoffset, wasted;
195 soffset = radeon_sa_bo_hole_soffset(sa_manager);
196 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
199 if ((eoffset - soffset) >= (size + wasted)) {
205 list_add(&sa_bo->
olist, sa_manager->
hole);
206 INIT_LIST_HEAD(&sa_bo->
flist);
224 unsigned size,
unsigned align)
226 unsigned soffset, eoffset, wasted;
230 if (!list_empty(&sa_manager->
flist[i])) {
235 soffset = radeon_sa_bo_hole_soffset(sa_manager);
236 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
239 if ((eoffset - soffset) >= (size + wasted)) {
254 if (sa_manager->
hole->next == &sa_manager->
olist) {
260 soffset = radeon_sa_bo_hole_soffset(sa_manager);
262 best = sa_manager->
size * 2;
269 if (list_empty(&sa_manager->
flist[i])) {
289 tmp += sa_manager->
size;
300 ++tries[best_bo->
fence->ring];
305 radeon_sa_bo_remove_locked(best_bo);
314 unsigned size,
unsigned align,
bool block)
324 if ((*sa_bo) ==
NULL) {
327 (*sa_bo)->manager = sa_manager;
328 (*sa_bo)->fence =
NULL;
329 INIT_LIST_HEAD(&(*sa_bo)->olist);
330 INIT_LIST_HEAD(&(*sa_bo)->flist);
332 spin_lock(&sa_manager->
wq.
lock);
340 radeon_sa_bo_try_free(sa_manager);
342 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
344 spin_unlock(&sa_manager->
wq.
lock);
349 }
while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
351 spin_unlock(&sa_manager->
wq.
lock);
353 spin_lock(&sa_manager->
wq.
lock);
355 if (r == -
ENOENT && block) {
358 radeon_sa_event(sa_manager, size, align)
361 }
else if (r == -
ENOENT) {
367 spin_unlock(&sa_manager->
wq.
lock);
378 if (sa_bo ==
NULL || *sa_bo ==
NULL) {
382 sa_manager = (*sa_bo)->manager;
383 spin_lock(&sa_manager->
wq.
lock);
389 radeon_sa_bo_remove_locked(*sa_bo);
392 spin_unlock(&sa_manager->
wq.
lock);
396 #if defined(CONFIG_DEBUG_FS)
402 spin_lock(&sa_manager->
wq.
lock);
412 seq_printf(m,
" protected by 0x%016llx on ring %d",
417 spin_unlock(&sa_manager->
wq.
lock);