|
26 | 26 | #include <asm/mmu_context.h> |
27 | 27 | #include <asm/pgalloc.h> |
28 | 28 |
|
29 | | -static DEFINE_SPINLOCK(mmu_context_lock); |
30 | 29 | static DEFINE_IDA(mmu_context_ida); |
31 | 30 |
|
32 | 31 | static int alloc_context_id(int min_id, int max_id) |
33 | 32 | { |
34 | | - int index, err; |
35 | | - |
36 | | -again: |
37 | | - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) |
38 | | - return -ENOMEM; |
39 | | - |
40 | | - spin_lock(&mmu_context_lock); |
41 | | - err = ida_get_new_above(&mmu_context_ida, min_id, &index); |
42 | | - spin_unlock(&mmu_context_lock); |
43 | | - |
44 | | - if (err == -EAGAIN) |
45 | | - goto again; |
46 | | - else if (err) |
47 | | - return err; |
48 | | - |
49 | | - if (index > max_id) { |
50 | | - spin_lock(&mmu_context_lock); |
51 | | - ida_remove(&mmu_context_ida, index); |
52 | | - spin_unlock(&mmu_context_lock); |
53 | | - return -ENOMEM; |
54 | | - } |
55 | | - |
56 | | - return index; |
| 33 | + return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); |
57 | 34 | } |
58 | 35 |
|
59 | 36 | void hash__reserve_context_id(int id) |
60 | 37 | { |
61 | | - int rc, result = 0; |
62 | | - |
63 | | - do { |
64 | | - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) |
65 | | - break; |
66 | | - |
67 | | - spin_lock(&mmu_context_lock); |
68 | | - rc = ida_get_new_above(&mmu_context_ida, id, &result); |
69 | | - spin_unlock(&mmu_context_lock); |
70 | | - } while (rc == -EAGAIN); |
| 38 | + int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); |
71 | 39 |
|
72 | 40 | WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); |
73 | 41 | } |
@@ -172,23 +140,19 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
172 | 140 |
|
173 | 141 | void __destroy_context(int context_id) |
174 | 142 | { |
175 | | - spin_lock(&mmu_context_lock); |
176 | | - ida_remove(&mmu_context_ida, context_id); |
177 | | - spin_unlock(&mmu_context_lock); |
| 143 | + ida_free(&mmu_context_ida, context_id); |
178 | 144 | } |
179 | 145 | EXPORT_SYMBOL_GPL(__destroy_context); |
180 | 146 |
|
181 | 147 | static void destroy_contexts(mm_context_t *ctx) |
182 | 148 | { |
183 | 149 | int index, context_id; |
184 | 150 |
|
185 | | - spin_lock(&mmu_context_lock); |
186 | 151 | for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { |
187 | 152 | context_id = ctx->extended_id[index]; |
188 | 153 | if (context_id) |
189 | | - ida_remove(&mmu_context_ida, context_id); |
| 154 | + ida_free(&mmu_context_ida, context_id); |
190 | 155 | } |
191 | | - spin_unlock(&mmu_context_lock); |
192 | 156 | } |
193 | 157 |
|
194 | 158 | static void pte_frag_destroy(void *pte_frag) |
|
0 commit comments