@@ -124,6 +124,7 @@ struct rk_iommudata {
124
124
125
125
static struct device * dma_dev ;
126
126
static const struct rk_iommu_ops * rk_ops ;
127
+ static struct iommu_domain rk_identity_domain ;
127
128
128
129
static inline void rk_table_flush (struct rk_iommu_domain * dom , dma_addr_t dma ,
129
130
unsigned int count )
@@ -646,7 +647,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
646
647
* Ignore the return code, though, since we always zap cache
647
648
* and clear the page fault anyway.
648
649
*/
649
- if (iommu -> domain )
650
+ if (iommu -> domain != & rk_identity_domain )
650
651
report_iommu_fault (iommu -> domain , iommu -> dev , iova ,
651
652
flags );
652
653
else
@@ -980,26 +981,27 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
980
981
return ret ;
981
982
}
982
983
983
- static void rk_iommu_detach_device (struct iommu_domain * domain ,
984
- struct device * dev )
984
+ static int rk_iommu_identity_attach (struct iommu_domain * identity_domain ,
985
+ struct device * dev )
985
986
{
986
987
struct rk_iommu * iommu ;
987
- struct rk_iommu_domain * rk_domain = to_rk_domain ( domain ) ;
988
+ struct rk_iommu_domain * rk_domain ;
988
989
unsigned long flags ;
989
990
int ret ;
990
991
991
992
/* Allow 'virtual devices' (eg drm) to detach from domain */
992
993
iommu = rk_iommu_from_dev (dev );
993
994
if (!iommu )
994
- return ;
995
+ return - ENODEV ;
996
+
997
+ rk_domain = to_rk_domain (iommu -> domain );
995
998
996
999
dev_dbg (dev , "Detaching from iommu domain\n" );
997
1000
998
- /* iommu already detached */
999
- if (iommu -> domain != domain )
1000
- return ;
1001
+ if (iommu -> domain == identity_domain )
1002
+ return 0 ;
1001
1003
1002
- iommu -> domain = NULL ;
1004
+ iommu -> domain = identity_domain ;
1003
1005
1004
1006
spin_lock_irqsave (& rk_domain -> iommus_lock , flags );
1005
1007
list_del_init (& iommu -> node );
@@ -1011,8 +1013,31 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
1011
1013
rk_iommu_disable (iommu );
1012
1014
pm_runtime_put (iommu -> dev );
1013
1015
}
1016
+
1017
+ return 0 ;
1014
1018
}
1015
1019
1020
+ static void rk_iommu_identity_free (struct iommu_domain * domain )
1021
+ {
1022
+ }
1023
+
1024
+ static struct iommu_domain_ops rk_identity_ops = {
1025
+ .attach_dev = rk_iommu_identity_attach ,
1026
+ .free = rk_iommu_identity_free ,
1027
+ };
1028
+
1029
+ static struct iommu_domain rk_identity_domain = {
1030
+ .type = IOMMU_DOMAIN_IDENTITY ,
1031
+ .ops = & rk_identity_ops ,
1032
+ };
1033
+
1034
+ #ifdef CONFIG_ARM
1035
+ static void rk_iommu_set_platform_dma (struct device * dev )
1036
+ {
1037
+ WARN_ON (rk_iommu_identity_attach (& rk_identity_domain , dev ));
1038
+ }
1039
+ #endif
1040
+
1016
1041
static int rk_iommu_attach_device (struct iommu_domain * domain ,
1017
1042
struct device * dev )
1018
1043
{
@@ -1035,8 +1060,9 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
1035
1060
if (iommu -> domain == domain )
1036
1061
return 0 ;
1037
1062
1038
- if (iommu -> domain )
1039
- rk_iommu_detach_device (iommu -> domain , dev );
1063
+ ret = rk_iommu_identity_attach (& rk_identity_domain , dev );
1064
+ if (ret )
1065
+ return ret ;
1040
1066
1041
1067
iommu -> domain = domain ;
1042
1068
@@ -1050,7 +1076,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
1050
1076
1051
1077
ret = rk_iommu_enable (iommu );
1052
1078
if (ret )
1053
- rk_iommu_detach_device ( iommu -> domain , dev );
1079
+ WARN_ON ( rk_iommu_identity_attach ( & rk_identity_domain , dev ) );
1054
1080
1055
1081
pm_runtime_put (iommu -> dev );
1056
1082
@@ -1061,6 +1087,9 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
1061
1087
{
1062
1088
struct rk_iommu_domain * rk_domain ;
1063
1089
1090
+ if (type == IOMMU_DOMAIN_IDENTITY )
1091
+ return & rk_identity_domain ;
1092
+
1064
1093
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA )
1065
1094
return NULL ;
1066
1095
@@ -1176,6 +1205,7 @@ static int rk_iommu_of_xlate(struct device *dev,
1176
1205
iommu_dev = of_find_device_by_node (args -> np );
1177
1206
1178
1207
data -> iommu = platform_get_drvdata (iommu_dev );
1208
+ data -> iommu -> domain = & rk_identity_domain ;
1179
1209
dev_iommu_priv_set (dev , data );
1180
1210
1181
1211
platform_device_put (iommu_dev );
@@ -1188,6 +1218,9 @@ static const struct iommu_ops rk_iommu_ops = {
1188
1218
.probe_device = rk_iommu_probe_device ,
1189
1219
.release_device = rk_iommu_release_device ,
1190
1220
.device_group = rk_iommu_device_group ,
1221
+ #ifdef CONFIG_ARM
1222
+ .set_platform_dma_ops = rk_iommu_set_platform_dma ,
1223
+ #endif
1191
1224
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP ,
1192
1225
.of_xlate = rk_iommu_of_xlate ,
1193
1226
.default_domain_ops = & (const struct iommu_domain_ops ) {
@@ -1343,7 +1376,7 @@ static int __maybe_unused rk_iommu_suspend(struct device *dev)
1343
1376
{
1344
1377
struct rk_iommu * iommu = dev_get_drvdata (dev );
1345
1378
1346
- if (! iommu -> domain )
1379
+ if (iommu -> domain == & rk_identity_domain )
1347
1380
return 0 ;
1348
1381
1349
1382
rk_iommu_disable (iommu );
@@ -1354,7 +1387,7 @@ static int __maybe_unused rk_iommu_resume(struct device *dev)
1354
1387
{
1355
1388
struct rk_iommu * iommu = dev_get_drvdata (dev );
1356
1389
1357
- if (! iommu -> domain )
1390
+ if (iommu -> domain == & rk_identity_domain )
1358
1391
return 0 ;
1359
1392
1360
1393
return rk_iommu_enable (iommu );
0 commit comments