@@ -39,7 +39,7 @@ static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
39
39
kfree_rcu (fault_param , rcu );
40
40
}
41
41
42
- void iopf_free_group (struct iopf_group * group )
42
+ static void __iopf_free_group (struct iopf_group * group )
43
43
{
44
44
struct iopf_fault * iopf , * next ;
45
45
@@ -50,6 +50,11 @@ void iopf_free_group(struct iopf_group *group)
50
50
51
51
/* Pair with iommu_report_device_fault(). */
52
52
iopf_put_dev_fault_param (group -> fault_param );
53
+ }
54
+
55
+ void iopf_free_group (struct iopf_group * group )
56
+ {
57
+ __iopf_free_group (group );
53
58
kfree (group );
54
59
}
55
60
EXPORT_SYMBOL_GPL (iopf_free_group );
@@ -97,14 +102,49 @@ static int report_partial_fault(struct iommu_fault_param *fault_param,
97
102
return 0 ;
98
103
}
99
104
105
+ static struct iopf_group * iopf_group_alloc (struct iommu_fault_param * iopf_param ,
106
+ struct iopf_fault * evt ,
107
+ struct iopf_group * abort_group )
108
+ {
109
+ struct iopf_fault * iopf , * next ;
110
+ struct iopf_group * group ;
111
+
112
+ group = kzalloc (sizeof (* group ), GFP_KERNEL );
113
+ if (!group ) {
114
+ /*
115
+ * We always need to construct the group as we need it to abort
116
+ * the request at the driver if it can't be handled.
117
+ */
118
+ group = abort_group ;
119
+ }
120
+
121
+ group -> fault_param = iopf_param ;
122
+ group -> last_fault .fault = evt -> fault ;
123
+ INIT_LIST_HEAD (& group -> faults );
124
+ INIT_LIST_HEAD (& group -> pending_node );
125
+ list_add (& group -> last_fault .list , & group -> faults );
126
+
127
+ /* See if we have partial faults for this group */
128
+ mutex_lock (& iopf_param -> lock );
129
+ list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list ) {
130
+ if (iopf -> fault .prm .grpid == evt -> fault .prm .grpid )
131
+ /* Insert *before* the last fault */
132
+ list_move (& iopf -> list , & group -> faults );
133
+ }
134
+ list_add (& group -> pending_node , & iopf_param -> faults );
135
+ mutex_unlock (& iopf_param -> lock );
136
+
137
+ return group ;
138
+ }
139
+
100
140
/**
101
141
* iommu_report_device_fault() - Report fault event to device driver
102
142
* @dev: the device
103
143
* @evt: fault event data
104
144
*
105
145
* Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
106
- * handler. When this function fails and the fault is recoverable, it is the
107
- * caller's responsibility to complete the fault .
146
+ * handler. If this function fails then ops->page_response() was called to
147
+ * complete evt if required .
108
148
*
109
149
* This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
110
150
* them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
@@ -143,22 +183,18 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
143
183
{
144
184
struct iommu_fault * fault = & evt -> fault ;
145
185
struct iommu_fault_param * iopf_param ;
146
- struct iopf_fault * iopf , * next ;
147
- struct iommu_domain * domain ;
186
+ struct iopf_group abort_group = {};
148
187
struct iopf_group * group ;
149
188
int ret ;
150
189
151
- if (fault -> type != IOMMU_FAULT_PAGE_REQ )
152
- return - EOPNOTSUPP ;
153
-
154
190
iopf_param = iopf_get_dev_fault_param (dev );
155
- if (!iopf_param )
191
+ if (WARN_ON ( !iopf_param ) )
156
192
return - ENODEV ;
157
193
158
194
if (!(fault -> prm .flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE )) {
159
195
ret = report_partial_fault (iopf_param , fault );
160
196
iopf_put_dev_fault_param (iopf_param );
161
-
197
+ /* A request that is not the last does not need to be ack'd */
162
198
return ret ;
163
199
}
164
200
@@ -170,56 +206,33 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
170
206
* will send a response to the hardware. We need to clean up before
171
207
* leaving, otherwise partial faults will be stuck.
172
208
*/
173
- domain = get_domain_for_iopf (dev , fault );
174
- if (!domain ) {
175
- ret = - EINVAL ;
176
- goto cleanup_partial ;
177
- }
178
-
179
- group = kzalloc (sizeof (* group ), GFP_KERNEL );
180
- if (!group ) {
209
+ group = iopf_group_alloc (iopf_param , evt , & abort_group );
210
+ if (group == & abort_group ) {
181
211
ret = - ENOMEM ;
182
- goto cleanup_partial ;
212
+ goto err_abort ;
183
213
}
184
214
185
- group -> fault_param = iopf_param ;
186
- group -> last_fault .fault = * fault ;
187
- INIT_LIST_HEAD (& group -> faults );
188
- INIT_LIST_HEAD (& group -> pending_node );
189
- group -> domain = domain ;
190
- list_add (& group -> last_fault .list , & group -> faults );
191
-
192
- /* See if we have partial faults for this group */
193
- mutex_lock (& iopf_param -> lock );
194
- list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list ) {
195
- if (iopf -> fault .prm .grpid == fault -> prm .grpid )
196
- /* Insert *before* the last fault */
197
- list_move (& iopf -> list , & group -> faults );
198
- }
199
- list_add (& group -> pending_node , & iopf_param -> faults );
200
- mutex_unlock (& iopf_param -> lock );
201
-
202
- ret = domain -> iopf_handler (group );
203
- if (ret ) {
204
- mutex_lock (& iopf_param -> lock );
205
- list_del_init (& group -> pending_node );
206
- mutex_unlock (& iopf_param -> lock );
207
- iopf_free_group (group );
215
+ group -> domain = get_domain_for_iopf (dev , fault );
216
+ if (!group -> domain ) {
217
+ ret = - EINVAL ;
218
+ goto err_abort ;
208
219
}
209
220
210
- return ret ;
211
-
212
- cleanup_partial :
213
- mutex_lock (& iopf_param -> lock );
214
- list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list ) {
215
- if (iopf -> fault .prm .grpid == fault -> prm .grpid ) {
216
- list_del (& iopf -> list );
217
- kfree (iopf );
218
- }
219
- }
220
- mutex_unlock (& iopf_param -> lock );
221
- iopf_put_dev_fault_param (iopf_param );
221
+ /*
222
+ * On success iopf_handler must call iopf_group_response() and
223
+ * iopf_free_group()
224
+ */
225
+ ret = group -> domain -> iopf_handler (group );
226
+ if (ret )
227
+ goto err_abort ;
228
+ return 0 ;
222
229
230
+ err_abort :
231
+ iopf_group_response (group , IOMMU_PAGE_RESP_FAILURE );
232
+ if (group == & abort_group )
233
+ __iopf_free_group (group );
234
+ else
235
+ iopf_free_group (group );
223
236
return ret ;
224
237
}
225
238
EXPORT_SYMBOL_GPL (iommu_report_device_fault );
@@ -259,11 +272,9 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
259
272
* iopf_group_response - Respond a group of page faults
260
273
* @group: the group of faults with the same group id
261
274
* @status: the response code
262
- *
263
- * Return 0 on success and <0 on error.
264
275
*/
265
- int iopf_group_response (struct iopf_group * group ,
266
- enum iommu_page_response_code status )
276
+ void iopf_group_response (struct iopf_group * group ,
277
+ enum iommu_page_response_code status )
267
278
{
268
279
struct iommu_fault_param * fault_param = group -> fault_param ;
269
280
struct iopf_fault * iopf = & group -> last_fault ;
@@ -274,17 +285,14 @@ int iopf_group_response(struct iopf_group *group,
274
285
.grpid = iopf -> fault .prm .grpid ,
275
286
.code = status ,
276
287
};
277
- int ret = - EINVAL ;
278
288
279
289
/* Only send response if there is a fault report pending */
280
290
mutex_lock (& fault_param -> lock );
281
291
if (!list_empty (& group -> pending_node )) {
282
- ret = ops -> page_response (dev , & group -> last_fault , & resp );
292
+ ops -> page_response (dev , & group -> last_fault , & resp );
283
293
list_del_init (& group -> pending_node );
284
294
}
285
295
mutex_unlock (& fault_param -> lock );
286
-
287
- return ret ;
288
296
}
289
297
EXPORT_SYMBOL_GPL (iopf_group_response );
290
298
0 commit comments