Skip to content

Commit 7e80576

Browse files
committed
drm/i915: Drop struct_mutex from around i915_retire_requests()
We don't need to hold struct_mutex now for retiring requests, so drop it from i915_retire_requests() and i915_gem_wait_for_idle(), finally removing I915_WAIT_LOCKED for good. Signed-off-by: Chris Wilson <[email protected]> Reviewed-by: Tvrtko Ursulin <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent b723484 commit 7e80576

26 files changed

+213
-460
lines changed

drivers/gpu/drm/i915/gem/i915_gem_client_blt.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
155155
static void clear_pages_worker(struct work_struct *work)
156156
{
157157
struct clear_pages_work *w = container_of(work, typeof(*w), work);
158-
struct drm_i915_private *i915 = w->ce->engine->i915;
159158
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
160159
struct i915_vma *vma = w->sleeve->vma;
161160
struct i915_request *rq;
@@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work)
173172
obj->read_domains = I915_GEM_GPU_DOMAINS;
174173
obj->write_domain = 0;
175174

176-
/* XXX: we need to kill this */
177-
mutex_lock(&i915->drm.struct_mutex);
178175
err = i915_vma_pin(vma, 0, 0, PIN_USER);
179176
if (unlikely(err))
180-
goto out_unlock;
177+
goto out_signal;
181178

182179
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
183180
if (IS_ERR(batch)) {
@@ -229,8 +226,6 @@ static void clear_pages_worker(struct work_struct *work)
229226
intel_emit_vma_release(w->ce, batch);
230227
out_unpin:
231228
i915_vma_unpin(vma);
232-
out_unlock:
233-
mutex_unlock(&i915->drm.struct_mutex);
234229
out_signal:
235230
if (unlikely(err)) {
236231
dma_fence_set_error(&w->dma, err);

drivers/gpu/drm/i915/gem/i915_gem_context.c

Lines changed: 1 addition & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1159,8 +1159,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
11591159
}
11601160

11611161
static int
1162-
__intel_context_reconfigure_sseu(struct intel_context *ce,
1163-
struct intel_sseu sseu)
1162+
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
11641163
{
11651164
int ret;
11661165

@@ -1183,23 +1182,6 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
11831182
return ret;
11841183
}
11851184

1186-
static int
1187-
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1188-
{
1189-
struct drm_i915_private *i915 = ce->engine->i915;
1190-
int ret;
1191-
1192-
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1193-
if (ret)
1194-
return ret;
1195-
1196-
ret = __intel_context_reconfigure_sseu(ce, sseu);
1197-
1198-
mutex_unlock(&i915->drm.struct_mutex);
1199-
1200-
return ret;
1201-
}
1202-
12031185
static int
12041186
user_to_context_sseu(struct drm_i915_private *i915,
12051187
const struct drm_i915_gem_context_param_sseu *user,

drivers/gpu/drm/i915/gem/i915_gem_pm.c

Lines changed: 17 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,7 @@ static void retire_work_handler(struct work_struct *work)
4848
struct drm_i915_private *i915 =
4949
container_of(work, typeof(*i915), gem.retire_work.work);
5050

51-
/* Come back later if the device is busy... */
52-
if (mutex_trylock(&i915->drm.struct_mutex)) {
53-
i915_retire_requests(i915);
54-
mutex_unlock(&i915->drm.struct_mutex);
55-
}
51+
i915_retire_requests(i915);
5652

5753
queue_delayed_work(i915->wq,
5854
&i915->gem.retire_work,
@@ -86,26 +82,23 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
8682
{
8783
bool result = !intel_gt_is_wedged(gt);
8884

89-
do {
90-
if (i915_gem_wait_for_idle(gt->i915,
91-
I915_WAIT_LOCKED |
92-
I915_WAIT_FOR_IDLE_BOOST,
93-
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
94-
/* XXX hide warning from gem_eio */
95-
if (i915_modparams.reset) {
96-
dev_err(gt->i915->drm.dev,
97-
"Failed to idle engines, declaring wedged!\n");
98-
GEM_TRACE_DUMP();
99-
}
100-
101-
/*
102-
* Forcibly cancel outstanding work and leave
103-
* the gpu quiet.
104-
*/
105-
intel_gt_set_wedged(gt);
106-
result = false;
85+
if (i915_gem_wait_for_idle(gt->i915,
86+
I915_WAIT_FOR_IDLE_BOOST,
87+
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
88+
/* XXX hide warning from gem_eio */
89+
if (i915_modparams.reset) {
90+
dev_err(gt->i915->drm.dev,
91+
"Failed to idle engines, declaring wedged!\n");
92+
GEM_TRACE_DUMP();
10793
}
108-
} while (i915_retire_requests(gt->i915) && result);
94+
95+
/*
96+
* Forcibly cancel outstanding work and leave
97+
* the gpu quiet.
98+
*/
99+
intel_gt_set_wedged(gt);
100+
result = false;
101+
}
109102

110103
if (intel_gt_pm_wait_for_idle(gt))
111104
result = false;
@@ -145,8 +138,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
145138

146139
user_forcewake(&i915->gt, true);
147140

148-
mutex_lock(&i915->drm.struct_mutex);
149-
150141
/*
151142
* We have to flush all the executing contexts to main memory so
152143
* that they can saved in the hibernation image. To ensure the last
@@ -158,8 +149,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
158149
*/
159150
switch_to_kernel_context_sync(&i915->gt);
160151

161-
mutex_unlock(&i915->drm.struct_mutex);
162-
163152
cancel_delayed_work_sync(&i915->gt.hangcheck.work);
164153

165154
i915_gem_drain_freed_objects(i915);

drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c

Lines changed: 24 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <linux/prime_numbers.h>
88

99
#include "gt/intel_gt.h"
10+
#include "gt/intel_gt_pm.h"
1011

1112
#include "i915_selftest.h"
1213
#include "selftests/i915_random.h"
@@ -78,7 +79,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
7879
{
7980
struct i915_vma *vma;
8081
u32 __iomem *map;
81-
int err;
82+
int err = 0;
8283

8384
i915_gem_object_lock(obj);
8485
err = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -90,15 +91,21 @@ static int gtt_set(struct drm_i915_gem_object *obj,
9091
if (IS_ERR(vma))
9192
return PTR_ERR(vma);
9293

94+
intel_gt_pm_get(vma->vm->gt);
95+
9396
map = i915_vma_pin_iomap(vma);
9497
i915_vma_unpin(vma);
95-
if (IS_ERR(map))
96-
return PTR_ERR(map);
98+
if (IS_ERR(map)) {
99+
err = PTR_ERR(map);
100+
goto out_rpm;
101+
}
97102

98103
iowrite32(v, &map[offset / sizeof(*map)]);
99104
i915_vma_unpin_iomap(vma);
100105

101-
return 0;
106+
out_rpm:
107+
intel_gt_pm_put(vma->vm->gt);
108+
return err;
102109
}
103110

104111
static int gtt_get(struct drm_i915_gem_object *obj,
@@ -107,7 +114,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
107114
{
108115
struct i915_vma *vma;
109116
u32 __iomem *map;
110-
int err;
117+
int err = 0;
111118

112119
i915_gem_object_lock(obj);
113120
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -119,15 +126,21 @@ static int gtt_get(struct drm_i915_gem_object *obj,
119126
if (IS_ERR(vma))
120127
return PTR_ERR(vma);
121128

129+
intel_gt_pm_get(vma->vm->gt);
130+
122131
map = i915_vma_pin_iomap(vma);
123132
i915_vma_unpin(vma);
124-
if (IS_ERR(map))
125-
return PTR_ERR(map);
133+
if (IS_ERR(map)) {
134+
err = PTR_ERR(map);
135+
goto out_rpm;
136+
}
126137

127138
*v = ioread32(&map[offset / sizeof(*map)]);
128139
i915_vma_unpin_iomap(vma);
129140

130-
return 0;
141+
out_rpm:
142+
intel_gt_pm_put(vma->vm->gt);
143+
return err;
131144
}
132145

133146
static int wc_set(struct drm_i915_gem_object *obj,
@@ -280,7 +293,6 @@ static int igt_gem_coherency(void *arg)
280293
struct drm_i915_private *i915 = arg;
281294
const struct igt_coherency_mode *read, *write, *over;
282295
struct drm_i915_gem_object *obj;
283-
intel_wakeref_t wakeref;
284296
unsigned long count, n;
285297
u32 *offsets, *values;
286298
int err = 0;
@@ -299,8 +311,6 @@ static int igt_gem_coherency(void *arg)
299311

300312
values = offsets + ncachelines;
301313

302-
mutex_lock(&i915->drm.struct_mutex);
303-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
304314
for (over = igt_coherency_mode; over->name; over++) {
305315
if (!over->set)
306316
continue;
@@ -326,7 +336,7 @@ static int igt_gem_coherency(void *arg)
326336
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
327337
if (IS_ERR(obj)) {
328338
err = PTR_ERR(obj);
329-
goto unlock;
339+
goto free;
330340
}
331341

332342
i915_random_reorder(offsets, ncachelines, &prng);
@@ -377,15 +387,13 @@ static int igt_gem_coherency(void *arg)
377387
}
378388
}
379389
}
380-
unlock:
381-
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
382-
mutex_unlock(&i915->drm.struct_mutex);
390+
free:
383391
kfree(offsets);
384392
return err;
385393

386394
put_object:
387395
i915_gem_object_put(obj);
388-
goto unlock;
396+
goto free;
389397
}
390398

391399
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)

drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,6 @@ struct parallel_switch {
164164
static int __live_parallel_switch1(void *data)
165165
{
166166
struct parallel_switch *arg = data;
167-
struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
168167
IGT_TIMEOUT(end_time);
169168
unsigned long count;
170169

@@ -176,16 +175,12 @@ static int __live_parallel_switch1(void *data)
176175
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
177176
i915_request_put(rq);
178177

179-
mutex_lock(&i915->drm.struct_mutex);
180178
rq = i915_request_create(arg->ce[n]);
181-
if (IS_ERR(rq)) {
182-
mutex_unlock(&i915->drm.struct_mutex);
179+
if (IS_ERR(rq))
183180
return PTR_ERR(rq);
184-
}
185181

186182
i915_request_get(rq);
187183
i915_request_add(rq);
188-
mutex_unlock(&i915->drm.struct_mutex);
189184
}
190185

191186
err = 0;
@@ -205,7 +200,6 @@ static int __live_parallel_switch1(void *data)
205200
static int __live_parallel_switchN(void *data)
206201
{
207202
struct parallel_switch *arg = data;
208-
struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
209203
IGT_TIMEOUT(end_time);
210204
unsigned long count;
211205
int n;
@@ -215,15 +209,11 @@ static int __live_parallel_switchN(void *data)
215209
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
216210
struct i915_request *rq;
217211

218-
mutex_lock(&i915->drm.struct_mutex);
219212
rq = i915_request_create(arg->ce[n]);
220-
if (IS_ERR(rq)) {
221-
mutex_unlock(&i915->drm.struct_mutex);
213+
if (IS_ERR(rq))
222214
return PTR_ERR(rq);
223-
}
224215

225216
i915_request_add(rq);
226-
mutex_unlock(&i915->drm.struct_mutex);
227217
}
228218

229219
count++;
@@ -1173,7 +1163,7 @@ __sseu_test(const char *name,
11731163
if (ret)
11741164
return ret;
11751165

1176-
ret = __intel_context_reconfigure_sseu(ce, sseu);
1166+
ret = intel_context_reconfigure_sseu(ce, sseu);
11771167
if (ret)
11781168
goto out_spin;
11791169

@@ -1277,7 +1267,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
12771267
goto out_fail;
12781268

12791269
out_fail:
1280-
if (igt_flush_test(i915, I915_WAIT_LOCKED))
1270+
if (igt_flush_test(i915))
12811271
ret = -EIO;
12821272

12831273
intel_context_unpin(ce);

drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -581,12 +581,8 @@ static void disable_retire_worker(struct drm_i915_private *i915)
581581

582582
static void restore_retire_worker(struct drm_i915_private *i915)
583583
{
584+
igt_flush_test(i915);
584585
intel_gt_pm_put(&i915->gt);
585-
586-
mutex_lock(&i915->drm.struct_mutex);
587-
igt_flush_test(i915, I915_WAIT_LOCKED);
588-
mutex_unlock(&i915->drm.struct_mutex);
589-
590586
i915_gem_driver_register__shrinker(i915);
591587
}
592588

drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -65,9 +65,7 @@ static int igt_fill_blt(void *arg)
6565
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
6666
obj->cache_dirty = true;
6767

68-
mutex_lock(&i915->drm.struct_mutex);
6968
err = i915_gem_object_fill_blt(obj, ce, val);
70-
mutex_unlock(&i915->drm.struct_mutex);
7169
if (err)
7270
goto err_unpin;
7371

@@ -166,9 +164,7 @@ static int igt_copy_blt(void *arg)
166164
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
167165
dst->cache_dirty = true;
168166

169-
mutex_lock(&i915->drm.struct_mutex);
170167
err = i915_gem_object_copy_blt(src, dst, ce);
171-
mutex_unlock(&i915->drm.struct_mutex);
172168
if (err)
173169
goto err_unpin;
174170

drivers/gpu/drm/i915/gt/intel_gt_pm.c

Lines changed: 8 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -196,26 +196,14 @@ int intel_gt_resume(struct intel_gt *gt)
196196

197197
static void wait_for_idle(struct intel_gt *gt)
198198
{
199-
mutex_lock(&gt->i915->drm.struct_mutex); /* XXX */
200-
do {
201-
if (i915_gem_wait_for_idle(gt->i915,
202-
I915_WAIT_LOCKED,
203-
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
204-
/* XXX hide warning from gem_eio */
205-
if (i915_modparams.reset) {
206-
dev_err(gt->i915->drm.dev,
207-
"Failed to idle engines, declaring wedged!\n");
208-
GEM_TRACE_DUMP();
209-
}
210-
211-
/*
212-
* Forcibly cancel outstanding work and leave
213-
* the gpu quiet.
214-
*/
215-
intel_gt_set_wedged(gt);
216-
}
217-
} while (i915_retire_requests(gt->i915));
218-
mutex_unlock(&gt->i915->drm.struct_mutex);
199+
if (i915_gem_wait_for_idle(gt->i915, 0,
200+
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
201+
/*
202+
* Forcibly cancel outstanding work and leave
203+
* the gpu quiet.
204+
*/
205+
intel_gt_set_wedged(gt);
206+
}
219207

220208
intel_gt_pm_wait_for_idle(gt);
221209
}

0 commit comments

Comments
 (0)