Skip to content

Commit b554e39

Browse files
LuBaolujoergroedel
authored andcommitted
iommu: Make iopf_group_response() return void
The iopf_group_response() should return void, as nothing can do anything with the failure. This implies that ops->page_response() must also return void; this is consistent with what the drivers do. The failure paths, which are all integrity validations of the fault, should be WARN_ON'd, not return codes. If the iommu core fails to enqueue the fault, it should respond the fault directly by calling ops->page_response() instead of returning an error number and relying on the iommu drivers to do so. Consolidate the error fault handling code in the core. Co-developed-by: Jason Gunthorpe <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]> Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 1991123 commit b554e39

File tree

5 files changed

+98
-120
lines changed

5 files changed

+98
-120
lines changed

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c

Lines changed: 18 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -920,31 +920,29 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
920920
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
921921
}
922922

923-
static int arm_smmu_page_response(struct device *dev,
924-
struct iopf_fault *unused,
925-
struct iommu_page_response *resp)
923+
static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused,
924+
struct iommu_page_response *resp)
926925
{
927926
struct arm_smmu_cmdq_ent cmd = {0};
928927
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
929928
int sid = master->streams[0].id;
930929

931-
if (master->stall_enabled) {
932-
cmd.opcode = CMDQ_OP_RESUME;
933-
cmd.resume.sid = sid;
934-
cmd.resume.stag = resp->grpid;
935-
switch (resp->code) {
936-
case IOMMU_PAGE_RESP_INVALID:
937-
case IOMMU_PAGE_RESP_FAILURE:
938-
cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
939-
break;
940-
case IOMMU_PAGE_RESP_SUCCESS:
941-
cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
942-
break;
943-
default:
944-
return -EINVAL;
945-
}
946-
} else {
947-
return -ENODEV;
930+
if (WARN_ON(!master->stall_enabled))
931+
return;
932+
933+
cmd.opcode = CMDQ_OP_RESUME;
934+
cmd.resume.sid = sid;
935+
cmd.resume.stag = resp->grpid;
936+
switch (resp->code) {
937+
case IOMMU_PAGE_RESP_INVALID:
938+
case IOMMU_PAGE_RESP_FAILURE:
939+
cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
940+
break;
941+
case IOMMU_PAGE_RESP_SUCCESS:
942+
cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
943+
break;
944+
default:
945+
break;
948946
}
949947

950948
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
@@ -954,8 +952,6 @@ static int arm_smmu_page_response(struct device *dev,
954952
* terminated... at some point in the future. PRI_RESP is fire and
955953
* forget.
956954
*/
957-
958-
return 0;
959955
}
960956

961957
/* Context descriptor manipulation functions */
@@ -1516,16 +1512,6 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
15161512
}
15171513

15181514
ret = iommu_report_device_fault(master->dev, &fault_evt);
1519-
if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
1520-
/* Nobody cared, abort the access */
1521-
struct iommu_page_response resp = {
1522-
.pasid = flt->prm.pasid,
1523-
.grpid = flt->prm.grpid,
1524-
.code = IOMMU_PAGE_RESP_FAILURE,
1525-
};
1526-
arm_smmu_page_response(master->dev, &fault_evt, &resp);
1527-
}
1528-
15291515
out_unlock:
15301516
mutex_unlock(&smmu->streams_mutex);
15311517
return ret;

drivers/iommu/intel/iommu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1079,8 +1079,8 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
10791079
void intel_svm_check(struct intel_iommu *iommu);
10801080
int intel_svm_enable_prq(struct intel_iommu *iommu);
10811081
int intel_svm_finish_prq(struct intel_iommu *iommu);
1082-
int intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
1083-
struct iommu_page_response *msg);
1082+
void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
1083+
struct iommu_page_response *msg);
10841084
struct iommu_domain *intel_svm_domain_alloc(void);
10851085
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
10861086
void intel_drain_pasid_prq(struct device *dev, u32 pasid);

drivers/iommu/intel/svm.c

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -740,9 +740,8 @@ static irqreturn_t prq_event_thread(int irq, void *d)
740740
return IRQ_RETVAL(handled);
741741
}
742742

743-
int intel_svm_page_response(struct device *dev,
744-
struct iopf_fault *evt,
745-
struct iommu_page_response *msg)
743+
void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
744+
struct iommu_page_response *msg)
746745
{
747746
struct device_domain_info *info = dev_iommu_priv_get(dev);
748747
struct intel_iommu *iommu = info->iommu;
@@ -751,7 +750,6 @@ int intel_svm_page_response(struct device *dev,
751750
bool private_present;
752751
bool pasid_present;
753752
bool last_page;
754-
int ret = 0;
755753
u16 sid;
756754

757755
prm = &evt->fault.prm;
@@ -760,16 +758,6 @@ int intel_svm_page_response(struct device *dev,
760758
private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
761759
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
762760

763-
if (!pasid_present) {
764-
ret = -EINVAL;
765-
goto out;
766-
}
767-
768-
if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
769-
ret = -EINVAL;
770-
goto out;
771-
}
772-
773761
/*
774762
* Per VT-d spec. v3.0 ch7.7, system software must respond
775763
* with page group response if private data is present (PDP)
@@ -798,8 +786,6 @@ int intel_svm_page_response(struct device *dev,
798786

799787
qi_submit_sync(iommu, &desc, 1, 0);
800788
}
801-
out:
802-
return ret;
803789
}
804790

805791
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,

drivers/iommu/io-pgfault.c

Lines changed: 70 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
3939
kfree_rcu(fault_param, rcu);
4040
}
4141

42-
void iopf_free_group(struct iopf_group *group)
42+
static void __iopf_free_group(struct iopf_group *group)
4343
{
4444
struct iopf_fault *iopf, *next;
4545

@@ -50,6 +50,11 @@ void iopf_free_group(struct iopf_group *group)
5050

5151
/* Pair with iommu_report_device_fault(). */
5252
iopf_put_dev_fault_param(group->fault_param);
53+
}
54+
55+
void iopf_free_group(struct iopf_group *group)
56+
{
57+
__iopf_free_group(group);
5358
kfree(group);
5459
}
5560
EXPORT_SYMBOL_GPL(iopf_free_group);
@@ -97,14 +102,49 @@ static int report_partial_fault(struct iommu_fault_param *fault_param,
97102
return 0;
98103
}
99104

105+
static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
106+
struct iopf_fault *evt,
107+
struct iopf_group *abort_group)
108+
{
109+
struct iopf_fault *iopf, *next;
110+
struct iopf_group *group;
111+
112+
group = kzalloc(sizeof(*group), GFP_KERNEL);
113+
if (!group) {
114+
/*
115+
* We always need to construct the group as we need it to abort
116+
* the request at the driver if it can't be handled.
117+
*/
118+
group = abort_group;
119+
}
120+
121+
group->fault_param = iopf_param;
122+
group->last_fault.fault = evt->fault;
123+
INIT_LIST_HEAD(&group->faults);
124+
INIT_LIST_HEAD(&group->pending_node);
125+
list_add(&group->last_fault.list, &group->faults);
126+
127+
/* See if we have partial faults for this group */
128+
mutex_lock(&iopf_param->lock);
129+
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
130+
if (iopf->fault.prm.grpid == evt->fault.prm.grpid)
131+
/* Insert *before* the last fault */
132+
list_move(&iopf->list, &group->faults);
133+
}
134+
list_add(&group->pending_node, &iopf_param->faults);
135+
mutex_unlock(&iopf_param->lock);
136+
137+
return group;
138+
}
139+
100140
/**
101141
* iommu_report_device_fault() - Report fault event to device driver
102142
* @dev: the device
103143
* @evt: fault event data
104144
*
105145
* Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
106-
* handler. When this function fails and the fault is recoverable, it is the
107-
* caller's responsibility to complete the fault.
146+
* handler. If this function fails then ops->page_response() was called to
147+
* complete evt if required.
108148
*
109149
* This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
110150
* them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
@@ -143,22 +183,18 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
143183
{
144184
struct iommu_fault *fault = &evt->fault;
145185
struct iommu_fault_param *iopf_param;
146-
struct iopf_fault *iopf, *next;
147-
struct iommu_domain *domain;
186+
struct iopf_group abort_group = {};
148187
struct iopf_group *group;
149188
int ret;
150189

151-
if (fault->type != IOMMU_FAULT_PAGE_REQ)
152-
return -EOPNOTSUPP;
153-
154190
iopf_param = iopf_get_dev_fault_param(dev);
155-
if (!iopf_param)
191+
if (WARN_ON(!iopf_param))
156192
return -ENODEV;
157193

158194
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
159195
ret = report_partial_fault(iopf_param, fault);
160196
iopf_put_dev_fault_param(iopf_param);
161-
197+
/* A request that is not the last does not need to be ack'd */
162198
return ret;
163199
}
164200

@@ -170,56 +206,33 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
170206
* will send a response to the hardware. We need to clean up before
171207
* leaving, otherwise partial faults will be stuck.
172208
*/
173-
domain = get_domain_for_iopf(dev, fault);
174-
if (!domain) {
175-
ret = -EINVAL;
176-
goto cleanup_partial;
177-
}
178-
179-
group = kzalloc(sizeof(*group), GFP_KERNEL);
180-
if (!group) {
209+
group = iopf_group_alloc(iopf_param, evt, &abort_group);
210+
if (group == &abort_group) {
181211
ret = -ENOMEM;
182-
goto cleanup_partial;
212+
goto err_abort;
183213
}
184214

185-
group->fault_param = iopf_param;
186-
group->last_fault.fault = *fault;
187-
INIT_LIST_HEAD(&group->faults);
188-
INIT_LIST_HEAD(&group->pending_node);
189-
group->domain = domain;
190-
list_add(&group->last_fault.list, &group->faults);
191-
192-
/* See if we have partial faults for this group */
193-
mutex_lock(&iopf_param->lock);
194-
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
195-
if (iopf->fault.prm.grpid == fault->prm.grpid)
196-
/* Insert *before* the last fault */
197-
list_move(&iopf->list, &group->faults);
198-
}
199-
list_add(&group->pending_node, &iopf_param->faults);
200-
mutex_unlock(&iopf_param->lock);
201-
202-
ret = domain->iopf_handler(group);
203-
if (ret) {
204-
mutex_lock(&iopf_param->lock);
205-
list_del_init(&group->pending_node);
206-
mutex_unlock(&iopf_param->lock);
207-
iopf_free_group(group);
215+
group->domain = get_domain_for_iopf(dev, fault);
216+
if (!group->domain) {
217+
ret = -EINVAL;
218+
goto err_abort;
208219
}
209220

210-
return ret;
211-
212-
cleanup_partial:
213-
mutex_lock(&iopf_param->lock);
214-
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
215-
if (iopf->fault.prm.grpid == fault->prm.grpid) {
216-
list_del(&iopf->list);
217-
kfree(iopf);
218-
}
219-
}
220-
mutex_unlock(&iopf_param->lock);
221-
iopf_put_dev_fault_param(iopf_param);
221+
/*
222+
* On success iopf_handler must call iopf_group_response() and
223+
* iopf_free_group()
224+
*/
225+
ret = group->domain->iopf_handler(group);
226+
if (ret)
227+
goto err_abort;
228+
return 0;
222229

230+
err_abort:
231+
iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
232+
if (group == &abort_group)
233+
__iopf_free_group(group);
234+
else
235+
iopf_free_group(group);
223236
return ret;
224237
}
225238
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
@@ -259,11 +272,9 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
259272
* iopf_group_response - Respond a group of page faults
260273
* @group: the group of faults with the same group id
261274
* @status: the response code
262-
*
263-
* Return 0 on success and <0 on error.
264275
*/
265-
int iopf_group_response(struct iopf_group *group,
266-
enum iommu_page_response_code status)
276+
void iopf_group_response(struct iopf_group *group,
277+
enum iommu_page_response_code status)
267278
{
268279
struct iommu_fault_param *fault_param = group->fault_param;
269280
struct iopf_fault *iopf = &group->last_fault;
@@ -274,17 +285,14 @@ int iopf_group_response(struct iopf_group *group,
274285
.grpid = iopf->fault.prm.grpid,
275286
.code = status,
276287
};
277-
int ret = -EINVAL;
278288

279289
/* Only send response if there is a fault report pending */
280290
mutex_lock(&fault_param->lock);
281291
if (!list_empty(&group->pending_node)) {
282-
ret = ops->page_response(dev, &group->last_fault, &resp);
292+
ops->page_response(dev, &group->last_fault, &resp);
283293
list_del_init(&group->pending_node);
284294
}
285295
mutex_unlock(&fault_param->lock);
286-
287-
return ret;
288296
}
289297
EXPORT_SYMBOL_GPL(iopf_group_response);
290298

include/linux/iommu.h

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -574,9 +574,8 @@ struct iommu_ops {
574574
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
575575
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
576576

577-
int (*page_response)(struct device *dev,
578-
struct iopf_fault *evt,
579-
struct iommu_page_response *msg);
577+
void (*page_response)(struct device *dev, struct iopf_fault *evt,
578+
struct iommu_page_response *msg);
580579

581580
int (*def_domain_type)(struct device *dev);
582581
void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
@@ -1547,8 +1546,8 @@ void iopf_queue_free(struct iopf_queue *queue);
15471546
int iopf_queue_discard_partial(struct iopf_queue *queue);
15481547
void iopf_free_group(struct iopf_group *group);
15491548
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1550-
int iopf_group_response(struct iopf_group *group,
1551-
enum iommu_page_response_code status);
1549+
void iopf_group_response(struct iopf_group *group,
1550+
enum iommu_page_response_code status);
15521551
#else
15531552
static inline int
15541553
iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
@@ -1590,10 +1589,9 @@ iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
15901589
return -ENODEV;
15911590
}
15921591

1593-
static inline int iopf_group_response(struct iopf_group *group,
1594-
enum iommu_page_response_code status)
1592+
static inline void iopf_group_response(struct iopf_group *group,
1593+
enum iommu_page_response_code status)
15951594
{
1596-
return -ENODEV;
15971595
}
15981596
#endif /* CONFIG_IOMMU_IOPF */
15991597
#endif /* __LINUX_IOMMU_H */

0 commit comments

Comments
 (0)