@@ -1531,37 +1531,6 @@ static void flush_async_ops(struct xe_vm *vm)
15311531 flush_work (& vm -> async_ops .work );
15321532}
15331533
1534- static void vm_error_capture (struct xe_vm * vm , int err ,
1535- u32 op , u64 addr , u64 size )
1536- {
1537- struct drm_xe_vm_bind_op_error_capture capture ;
1538- u64 __user * address =
1539- u64_to_user_ptr (vm -> async_ops .error_capture .addr );
1540- bool in_kthread = !current -> mm ;
1541-
1542- capture .error = err ;
1543- capture .op = op ;
1544- capture .addr = addr ;
1545- capture .size = size ;
1546-
1547- if (in_kthread ) {
1548- if (!mmget_not_zero (vm -> async_ops .error_capture .mm ))
1549- goto mm_closed ;
1550- kthread_use_mm (vm -> async_ops .error_capture .mm );
1551- }
1552-
1553- if (copy_to_user (address , & capture , sizeof (capture )))
1554- drm_warn (& vm -> xe -> drm , "Copy to user failed" );
1555-
1556- if (in_kthread ) {
1557- kthread_unuse_mm (vm -> async_ops .error_capture .mm );
1558- mmput (vm -> async_ops .error_capture .mm );
1559- }
1560-
1561- mm_closed :
1562- wake_up_all (& vm -> async_ops .error_capture .wq );
1563- }
1564-
15651534static void xe_vm_close (struct xe_vm * vm )
15661535{
15671536 down_write (& vm -> lock );
@@ -2036,91 +2005,6 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
20362005 return 0 ;
20372006}
20382007
2039- static int vm_set_error_capture_address (struct xe_device * xe , struct xe_vm * vm ,
2040- u64 value )
2041- {
2042- if (XE_IOCTL_DBG (xe , !value ))
2043- return - EINVAL ;
2044-
2045- if (XE_IOCTL_DBG (xe , !(vm -> flags & XE_VM_FLAG_ASYNC_BIND_OPS )))
2046- return - EOPNOTSUPP ;
2047-
2048- if (XE_IOCTL_DBG (xe , vm -> async_ops .error_capture .addr ))
2049- return - EOPNOTSUPP ;
2050-
2051- vm -> async_ops .error_capture .mm = current -> mm ;
2052- vm -> async_ops .error_capture .addr = value ;
2053- init_waitqueue_head (& vm -> async_ops .error_capture .wq );
2054-
2055- return 0 ;
2056- }
2057-
2058- typedef int (* xe_vm_set_property_fn )(struct xe_device * xe , struct xe_vm * vm ,
2059- u64 value );
2060-
2061- static const xe_vm_set_property_fn vm_set_property_funcs [] = {
2062- [XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS ] =
2063- vm_set_error_capture_address ,
2064- };
2065-
2066- static int vm_user_ext_set_property (struct xe_device * xe , struct xe_vm * vm ,
2067- u64 extension )
2068- {
2069- u64 __user * address = u64_to_user_ptr (extension );
2070- struct drm_xe_ext_set_property ext ;
2071- int err ;
2072-
2073- err = __copy_from_user (& ext , address , sizeof (ext ));
2074- if (XE_IOCTL_DBG (xe , err ))
2075- return - EFAULT ;
2076-
2077- if (XE_IOCTL_DBG (xe , ext .property >=
2078- ARRAY_SIZE (vm_set_property_funcs )) ||
2079- XE_IOCTL_DBG (xe , ext .pad ) ||
2080- XE_IOCTL_DBG (xe , ext .reserved [0 ] || ext .reserved [1 ]))
2081- return - EINVAL ;
2082-
2083- return vm_set_property_funcs [ext .property ](xe , vm , ext .value );
2084- }
2085-
2086- typedef int (* xe_vm_user_extension_fn )(struct xe_device * xe , struct xe_vm * vm ,
2087- u64 extension );
2088-
2089- static const xe_vm_set_property_fn vm_user_extension_funcs [] = {
2090- [XE_VM_EXTENSION_SET_PROPERTY ] = vm_user_ext_set_property ,
2091- };
2092-
2093- #define MAX_USER_EXTENSIONS 16
2094- static int vm_user_extensions (struct xe_device * xe , struct xe_vm * vm ,
2095- u64 extensions , int ext_number )
2096- {
2097- u64 __user * address = u64_to_user_ptr (extensions );
2098- struct xe_user_extension ext ;
2099- int err ;
2100-
2101- if (XE_IOCTL_DBG (xe , ext_number >= MAX_USER_EXTENSIONS ))
2102- return - E2BIG ;
2103-
2104- err = __copy_from_user (& ext , address , sizeof (ext ));
2105- if (XE_IOCTL_DBG (xe , err ))
2106- return - EFAULT ;
2107-
2108- if (XE_IOCTL_DBG (xe , ext .pad ) ||
2109- XE_IOCTL_DBG (xe , ext .name >=
2110- ARRAY_SIZE (vm_user_extension_funcs )))
2111- return - EINVAL ;
2112-
2113- err = vm_user_extension_funcs [ext .name ](xe , vm , extensions );
2114- if (XE_IOCTL_DBG (xe , err ))
2115- return err ;
2116-
2117- if (ext .next_extension )
2118- return vm_user_extensions (xe , vm , ext .next_extension ,
2119- ++ ext_number );
2120-
2121- return 0 ;
2122- }
2123-
21242008#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
21252009 DRM_XE_VM_CREATE_COMPUTE_MODE | \
21262010 DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
@@ -2138,6 +2022,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
21382022 int err ;
21392023 u32 flags = 0 ;
21402024
2025+ if (XE_IOCTL_DBG (xe , args -> extensions ))
2026+ return - EINVAL ;
2027+
21412028 if (XE_WA (xe_root_mmio_gt (xe ), 14016763929 ))
21422029 args -> flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE ;
21432030
@@ -2180,14 +2067,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
21802067 if (IS_ERR (vm ))
21812068 return PTR_ERR (vm );
21822069
2183- if (args -> extensions ) {
2184- err = vm_user_extensions (xe , vm , args -> extensions , 0 );
2185- if (XE_IOCTL_DBG (xe , err )) {
2186- xe_vm_close_and_put (vm );
2187- return err ;
2188- }
2189- }
2190-
21912070 mutex_lock (& xef -> vm .lock );
21922071 err = xa_alloc (& xef -> vm .xa , & id , vm , xa_limit_32b , GFP_KERNEL );
21932072 mutex_unlock (& xef -> vm .lock );
@@ -3087,8 +2966,6 @@ static void xe_vma_op_work_func(struct work_struct *w)
30872966 vm_set_async_error (vm , err );
30882967 up_write (& vm -> lock );
30892968
3090- if (vm -> async_ops .error_capture .addr )
3091- vm_error_capture (vm , err , 0 , 0 , 0 );
30922969 break ;
30932970 }
30942971 up_write (& vm -> lock );
0 commit comments