@@ -1339,68 +1339,30 @@ void i915_request_add(struct i915_request *rq)
1339
1339
{
1340
1340
struct intel_timeline * const tl = i915_request_timeline (rq );
1341
1341
struct i915_sched_attr attr = {};
1342
- struct i915_request * prev ;
1342
+ struct i915_gem_context * ctx ;
1343
1343
1344
1344
lockdep_assert_held (& tl -> mutex );
1345
1345
lockdep_unpin_lock (& tl -> mutex , rq -> cookie );
1346
1346
1347
1347
trace_i915_request_add (rq );
1348
+ __i915_request_commit (rq );
1348
1349
1349
- prev = __i915_request_commit (rq );
1350
-
1351
- if (rcu_access_pointer (rq -> context -> gem_context ))
1352
- attr = i915_request_gem_context (rq )-> sched ;
1350
+ /* XXX placeholder for selftests */
1351
+ rcu_read_lock ();
1352
+ ctx = rcu_dereference (rq -> context -> gem_context );
1353
+ if (ctx )
1354
+ attr = ctx -> sched ;
1355
+ rcu_read_unlock ();
1353
1356
1354
- /*
1355
- * Boost actual workloads past semaphores!
1356
- *
1357
- * With semaphores we spin on one engine waiting for another,
1358
- * simply to reduce the latency of starting our work when
1359
- * the signaler completes. However, if there is any other
1360
- * work that we could be doing on this engine instead, that
1361
- * is better utilisation and will reduce the overall duration
1362
- * of the current work. To avoid PI boosting a semaphore
1363
- * far in the distance past over useful work, we keep a history
1364
- * of any semaphore use along our dependency chain.
1365
- */
1366
1357
if (!(rq -> sched .flags & I915_SCHED_HAS_SEMAPHORE_CHAIN ))
1367
1358
attr .priority |= I915_PRIORITY_NOSEMAPHORE ;
1368
-
1369
- /*
1370
- * Boost priorities to new clients (new request flows).
1371
- *
1372
- * Allow interactive/synchronous clients to jump ahead of
1373
- * the bulk clients. (FQ_CODEL)
1374
- */
1375
1359
if (list_empty (& rq -> sched .signalers_list ))
1376
1360
attr .priority |= I915_PRIORITY_WAIT ;
1377
1361
1378
1362
local_bh_disable ();
1379
1363
__i915_request_queue (rq , & attr );
1380
1364
local_bh_enable (); /* Kick the execlists tasklet if just scheduled */
1381
1365
1382
- /*
1383
- * In typical scenarios, we do not expect the previous request on
1384
- * the timeline to be still tracked by timeline->last_request if it
1385
- * has been completed. If the completed request is still here, that
1386
- * implies that request retirement is a long way behind submission,
1387
- * suggesting that we haven't been retiring frequently enough from
1388
- * the combination of retire-before-alloc, waiters and the background
1389
- * retirement worker. So if the last request on this timeline was
1390
- * already completed, do a catch up pass, flushing the retirement queue
1391
- * up to this client. Since we have now moved the heaviest operations
1392
- * during retirement onto secondary workers, such as freeing objects
1393
- * or contexts, retiring a bunch of requests is mostly list management
1394
- * (and cache misses), and so we should not be overly penalizing this
1395
- * client by performing excess work, though we may still performing
1396
- * work on behalf of others -- but instead we should benefit from
1397
- * improved resource management. (Well, that's the theory at least.)
1398
- */
1399
- if (prev &&
1400
- i915_request_completed (prev ) &&
1401
- rcu_access_pointer (prev -> timeline ) == tl )
1402
- i915_request_retire_upto (prev );
1403
-
1404
1366
mutex_unlock (& tl -> mutex );
1405
1367
}
1406
1368
0 commit comments