Skip to content

Commit 2889caa

Browse files
committed
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the execobjects. Creating an auxiliary list is inefficient when compared to using the execobject array we already have allocated. Reservation is then split into phases. As we lookup up the VMA, we try and bind it back into active location. Only if that fails, do we add it to the unbound list for phase 2. In phase 2, we try and add all those objects that could not fit into their previous location, with fallback to retrying all objects and evicting the VM in case of severe fragmentation. (This is the same as before, except that phase 1 is now done inline with looking up the VMA to avoid an iteration over the execobject array. In the ideal case, we eliminate the separate reservation phase). During the reservation phase, we only evict from the VM between passes (rather than currently as we try to fit every new VMA). In testing with Unreal Engine's Atlantis demo which stresses the eviction logic on gen7 class hardware, this speed up the framerate by a factor of 2. The second loop amalgamation is between move_to_gpu and move_to_active. As we always submit the request, even if incomplete, we can use the current request to track active VMA as we perform the flushes and synchronisation required. The next big advancement is to avoid copying back to the user any execobjects and relocations that are not changed. v2: Add a Theory of Operation spiel. v3: Fall back to slow relocations in preparation for flushing userptrs. v4: Document struct members, factor out eb_validate_vma(), add a few more comments to explain some magic and hide other magic behind macros. Signed-off-by: Chris Wilson <[email protected]> Reviewed-by: Joonas Lahtinen <[email protected]>
1 parent 071750e commit 2889caa

File tree

7 files changed

+1241
-918
lines changed

7 files changed

+1241
-918
lines changed

drivers/gpu/drm/i915/i915_drv.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3581,7 +3581,7 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
35813581
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
35823582
struct drm_mm_node *node,
35833583
unsigned int flags);
3584-
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3584+
int i915_gem_evict_vm(struct i915_address_space *vm);
35853585

35863586
/* belongs in i915_gem_gtt.h */
35873587
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)

drivers/gpu/drm/i915/i915_gem_evict.c

Lines changed: 57 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,29 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
5050
return true;
5151
}
5252

53+
static int ggtt_flush(struct drm_i915_private *i915)
54+
{
55+
int err;
56+
57+
/* Not everything in the GGTT is tracked via vma (otherwise we
58+
* could evict as required with minimal stalling) so we are forced
59+
* to idle the GPU and explicitly retire outstanding requests in
60+
* the hopes that we can then remove contexts and the like only
61+
* bound by their active reference.
62+
*/
63+
err = i915_gem_switch_to_kernel_context(i915);
64+
if (err)
65+
return err;
66+
67+
err = i915_gem_wait_for_idle(i915,
68+
I915_WAIT_INTERRUPTIBLE |
69+
I915_WAIT_LOCKED);
70+
if (err)
71+
return err;
72+
73+
return 0;
74+
}
75+
5376
static bool
5477
mark_free(struct drm_mm_scan *scan,
5578
struct i915_vma *vma,
@@ -175,19 +198,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
175198
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
176199
}
177200

178-
/* Not everything in the GGTT is tracked via vma (otherwise we
179-
* could evict as required with minimal stalling) so we are forced
180-
* to idle the GPU and explicitly retire outstanding requests in
181-
* the hopes that we can then remove contexts and the like only
182-
* bound by their active reference.
183-
*/
184-
ret = i915_gem_switch_to_kernel_context(dev_priv);
185-
if (ret)
186-
return ret;
187-
188-
ret = i915_gem_wait_for_idle(dev_priv,
189-
I915_WAIT_INTERRUPTIBLE |
190-
I915_WAIT_LOCKED);
201+
ret = ggtt_flush(dev_priv);
191202
if (ret)
192203
return ret;
193204

@@ -337,48 +348,59 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
337348
/**
338349
* i915_gem_evict_vm - Evict all idle vmas from a vm
339350
* @vm: Address space to cleanse
340-
* @do_idle: Boolean directing whether to idle first.
341351
*
342-
* This function evicts all idles vmas from a vm. If all unpinned vmas should be
343-
* evicted the @do_idle needs to be set to true.
352+
* This function evicts all vmas from a vm.
344353
*
345354
* This is used by the execbuf code as a last-ditch effort to defragment the
346355
* address space.
347356
*
348357
* To clarify: This is for freeing up virtual address space, not for freeing
349358
* memory in e.g. the shrinker.
350359
*/
351-
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
360+
int i915_gem_evict_vm(struct i915_address_space *vm)
352361
{
362+
struct list_head *phases[] = {
363+
&vm->inactive_list,
364+
&vm->active_list,
365+
NULL
366+
}, **phase;
367+
struct list_head eviction_list;
353368
struct i915_vma *vma, *next;
354369
int ret;
355370

356371
lockdep_assert_held(&vm->i915->drm.struct_mutex);
357372
trace_i915_gem_evict_vm(vm);
358373

359-
if (do_idle) {
360-
struct drm_i915_private *dev_priv = vm->i915;
361-
362-
if (i915_is_ggtt(vm)) {
363-
ret = i915_gem_switch_to_kernel_context(dev_priv);
364-
if (ret)
365-
return ret;
366-
}
367-
368-
ret = i915_gem_wait_for_idle(dev_priv,
369-
I915_WAIT_INTERRUPTIBLE |
370-
I915_WAIT_LOCKED);
374+
/* Switch back to the default context in order to unpin
375+
* the existing context objects. However, such objects only
376+
* pin themselves inside the global GTT and performing the
377+
* switch otherwise is ineffective.
378+
*/
379+
if (i915_is_ggtt(vm)) {
380+
ret = ggtt_flush(vm->i915);
371381
if (ret)
372382
return ret;
373-
374-
WARN_ON(!list_empty(&vm->active_list));
375383
}
376384

377-
list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
378-
if (!i915_vma_is_pinned(vma))
379-
WARN_ON(i915_vma_unbind(vma));
385+
INIT_LIST_HEAD(&eviction_list);
386+
phase = phases;
387+
do {
388+
list_for_each_entry(vma, *phase, vm_link) {
389+
if (i915_vma_is_pinned(vma))
390+
continue;
380391

381-
return 0;
392+
__i915_vma_pin(vma);
393+
list_add(&vma->evict_link, &eviction_list);
394+
}
395+
} while (*++phase);
396+
397+
ret = 0;
398+
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
399+
__i915_vma_unpin(vma);
400+
if (ret == 0)
401+
ret = i915_vma_unbind(vma);
402+
}
403+
return ret;
382404
}
383405

384406
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)

0 commit comments

Comments
 (0)