@@ -311,20 +311,33 @@ static void spin_unlock_wait(spinlock_t *lock)
311311 spin_unlock_irq (lock );
312312}
313313
314+ static void active_flush (struct i915_active * ref ,
315+ struct i915_active_fence * active )
316+ {
317+ struct dma_fence * fence ;
318+
319+ fence = xchg (__active_fence_slot (active ), NULL );
320+ if (!fence )
321+ return ;
322+
323+ spin_lock_irq (fence -> lock );
324+ __list_del_entry (& active -> cb .node );
325+ spin_unlock_irq (fence -> lock ); /* serialise with fence->cb_list */
326+ atomic_dec (& ref -> count );
327+
328+ GEM_BUG_ON (!test_bit (DMA_FENCE_FLAG_SIGNALED_BIT , & fence -> flags ));
329+ }
330+
314331void i915_active_unlock_wait (struct i915_active * ref )
315332{
316333 if (i915_active_acquire_if_busy (ref )) {
317334 struct active_node * it , * n ;
318335
336+ /* Wait for all active callbacks */
319337 rcu_read_lock ();
320- rbtree_postorder_for_each_entry_safe (it , n , & ref -> tree , node ) {
321- struct dma_fence * f ;
322-
323- /* Wait for all active callbacks */
324- f = rcu_dereference (it -> base .fence );
325- if (f )
326- spin_unlock_wait (f -> lock );
327- }
338+ active_flush (ref , & ref -> excl );
339+ rbtree_postorder_for_each_entry_safe (it , n , & ref -> tree , node )
340+ active_flush (ref , & it -> base );
328341 rcu_read_unlock ();
329342
330343 i915_active_release (ref );
0 commit comments