@@ -62,8 +62,6 @@ class __copyAcc2Acc;
6262namespace cl {
6363namespace sycl {
6464
65- namespace csd = cl::sycl::detail;
66-
6765// Forward declaration
6866
6967template <typename T, int Dimensions, typename AllocatorT> class buffer ;
@@ -106,7 +104,7 @@ template <typename Name, typename Type> struct get_kernel_name_t {
106104};
107105
108106// / Specialization for the case when \c Name is undefined.
109- template <typename Type> struct get_kernel_name_t <csd ::auto_name, Type> {
107+ template <typename Type> struct get_kernel_name_t <detail ::auto_name, Type> {
110108 using name = Type;
111109};
112110
@@ -409,9 +407,9 @@ class handler {
409407 break ;
410408 case detail::CG::PREFETCH_USM:
411409 CommandGroup.reset (new detail::CGPrefetchUSM (
412- MDstPtr, MLength, std::move (MArgsStorage),
413- std::move (MAccStorage ), std::move (MSharedPtrStorage ),
414- std::move (MRequirements), std::move ( MEvents)));
410+ MDstPtr, MLength, std::move (MArgsStorage), std::move (MAccStorage),
411+ std::move (MSharedPtrStorage ), std::move (MRequirements ),
412+ std::move (MEvents)));
415413 break ;
416414 case detail::CG::NONE:
417415 throw runtime_error (" Command group submitted without a kernel or a "
@@ -666,7 +664,7 @@ class handler {
666664 extractArgsAndReqsFromLambda (MHostKernel->getPtr (), KI::getNumParams (),
667665 &KI::getParamDesc (0 ));
668666 MKernelName = KI::getName ();
669- MOSModuleHandle = csd ::OSUtil::getOSModuleHandle (KI::getName ());
667+ MOSModuleHandle = detail ::OSUtil::getOSModuleHandle (KI::getName ());
670668 } else {
671669 // In case w/o the integration header it is necessary to process
672670 // accessors from the list(which are associated with this handler) as
@@ -676,9 +674,10 @@ class handler {
676674 }
677675
678676 // single_task version with a kernel represented as a lambda.
679- template <typename KernelName = csd ::auto_name, typename KernelType>
677+ template <typename KernelName = detail ::auto_name, typename KernelType>
680678 void single_task (KernelType KernelFunc) {
681- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
679+ using NameT =
680+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
682681#ifdef __SYCL_DEVICE_ONLY__
683682 kernel_single_task<NameT>(KernelFunc);
684683#else
@@ -691,9 +690,11 @@ class handler {
691690
692691 // parallel_for version with a kernel represented as a lambda + range that
693692 // specifies global size only.
694- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
693+ template <typename KernelName = detail::auto_name, typename KernelType,
694+ int Dims>
695695 void parallel_for (range<Dims> NumWorkItems, KernelType KernelFunc) {
696- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
696+ using NameT =
697+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
697698#ifdef __SYCL_DEVICE_ONLY__
698699 kernel_parallel_for<NameT, KernelType, Dims>(KernelFunc);
699700#else
@@ -708,17 +709,18 @@ class handler {
708709 MNDRDesc.set (range<1 >{1 });
709710
710711 MArgs = std::move (MAssociatedAccesors);
711- MHostKernel.reset (
712- new detail::HostKernel<FuncT, void , 1 >(std::move (Func)));
712+ MHostKernel.reset (new detail::HostKernel<FuncT, void , 1 >(std::move (Func)));
713713 MCGType = detail::CG::RUN_ON_HOST_INTEL;
714714 }
715715
716716 // parallel_for version with a kernel represented as a lambda + range and
717717 // offset that specify global size and global offset correspondingly.
718- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
718+ template <typename KernelName = detail::auto_name, typename KernelType,
719+ int Dims>
719720 void parallel_for (range<Dims> NumWorkItems, id<Dims> WorkItemOffset,
720721 KernelType KernelFunc) {
721- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
722+ using NameT =
723+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
722724#ifdef __SYCL_DEVICE_ONLY__
723725 kernel_parallel_for<NameT, KernelType, Dims>(KernelFunc);
724726#else
@@ -730,9 +732,11 @@ class handler {
730732
731733 // parallel_for version with a kernel represented as a lambda + nd_range that
732734 // specifies global, local sizes and offset.
733- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
735+ template <typename KernelName = detail::auto_name, typename KernelType,
736+ int Dims>
734737 void parallel_for (nd_range<Dims> ExecutionRange, KernelType KernelFunc) {
735- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
738+ using NameT =
739+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
736740#ifdef __SYCL_DEVICE_ONLY__
737741 kernel_parallel_for<NameT, KernelType, Dims>(KernelFunc);
738742#else
@@ -742,10 +746,12 @@ class handler {
742746#endif
743747 }
744748
745- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
749+ template <typename KernelName = detail::auto_name, typename KernelType,
750+ int Dims>
746751 void parallel_for_work_group (range<Dims> NumWorkGroups,
747752 KernelType KernelFunc) {
748- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
753+ using NameT =
754+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
749755#ifdef __SYCL_DEVICE_ONLY__
750756 kernel_parallel_for_work_group<NameT, KernelType, Dims>(KernelFunc);
751757#else
@@ -755,11 +761,13 @@ class handler {
755761#endif // __SYCL_DEVICE_ONLY__
756762 }
757763
758- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
764+ template <typename KernelName = detail::auto_name, typename KernelType,
765+ int Dims>
759766 void parallel_for_work_group (range<Dims> NumWorkGroups,
760767 range<Dims> WorkGroupSize,
761768 KernelType KernelFunc) {
762- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
769+ using NameT =
770+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
763771#ifdef __SYCL_DEVICE_ONLY__
764772 kernel_parallel_for_work_group<NameT, KernelType, Dims>(KernelFunc);
765773#else
@@ -823,9 +831,10 @@ class handler {
823831 // single_task version which takes two "kernels". One is a lambda which is
824832 // used if device, queue is bound to, is host device. Second is a sycl::kernel
825833 // which is used otherwise.
826- template <typename KernelName = csd ::auto_name, typename KernelType>
834+ template <typename KernelName = detail ::auto_name, typename KernelType>
827835 void single_task (kernel SyclKernel, KernelType KernelFunc) {
828- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
836+ using NameT =
837+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
829838#ifdef __SYCL_DEVICE_ONLY__
830839 kernel_single_task<NameT>(KernelFunc);
831840#else
@@ -842,10 +851,12 @@ class handler {
842851 // parallel_for version which takes two "kernels". One is a lambda which is
843852 // used if device, queue is bound to, is host device. Second is a sycl::kernel
844853 // which is used otherwise. range argument specifies global size.
845- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
854+ template <typename KernelName = detail::auto_name, typename KernelType,
855+ int Dims>
846856 void parallel_for (kernel SyclKernel, range<Dims> NumWorkItems,
847857 KernelType KernelFunc) {
848- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
858+ using NameT =
859+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
849860#ifdef __SYCL_DEVICE_ONLY__
850861 kernel_parallel_for<NameT, KernelType, Dims>(KernelFunc);
851862#else
@@ -862,10 +873,12 @@ class handler {
862873 // parallel_for version which takes two "kernels". One is a lambda which is
863874 // used if device, queue is bound to, is host device. Second is a sycl::kernel
864875 // which is used otherwise. range and id specify global size and offset.
865- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
876+ template <typename KernelName = detail::auto_name, typename KernelType,
877+ int Dims>
866878 void parallel_for (kernel SyclKernel, range<Dims> NumWorkItems,
867879 id<Dims> WorkItemOffset, KernelType KernelFunc) {
868- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
880+ using NameT =
881+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
869882#ifdef __SYCL_DEVICE_ONLY__
870883 kernel_parallel_for<NameT, KernelType, Dims>(KernelFunc);
871884#else
@@ -882,10 +895,12 @@ class handler {
882895 // parallel_for version which takes two "kernels". One is a lambda which is
883896 // used if device, queue is bound to, is host device. Second is a sycl::kernel
884897 // which is used otherwise. nd_range specifies global, local size and offset.
885- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
898+ template <typename KernelName = detail::auto_name, typename KernelType,
899+ int Dims>
886900 void parallel_for (kernel SyclKernel, nd_range<Dims> NDRange,
887901 KernelType KernelFunc) {
888- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
902+ using NameT =
903+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
889904#ifdef __SYCL_DEVICE_ONLY__
890905 kernel_parallel_for<NameT, KernelType, Dims>(KernelFunc);
891906#else
@@ -905,10 +920,12 @@ class handler {
905920 // / of the kernel. The same source kernel can be compiled multiple times
906921 // / yielding multiple kernel class objects accessible via the \c program class
907922 // / interface.
908- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
923+ template <typename KernelName = detail::auto_name, typename KernelType,
924+ int Dims>
909925 void parallel_for_work_group (kernel SyclKernel, range<Dims> NumWorkGroups,
910926 KernelType KernelFunc) {
911- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
927+ using NameT =
928+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
912929#ifdef __SYCL_DEVICE_ONLY__
913930 kernel_parallel_for_work_group<NameT, KernelType, Dims>(KernelFunc);
914931#else
@@ -921,11 +938,13 @@ class handler {
921938
922939 // / Two-kernel version of the \c parallel_for_work_group with group and local
923940 // / range.
924- template <typename KernelName = csd::auto_name, typename KernelType, int Dims>
941+ template <typename KernelName = detail::auto_name, typename KernelType,
942+ int Dims>
925943 void parallel_for_work_group (kernel SyclKernel, range<Dims> NumWorkGroups,
926944 range<Dims> WorkGroupSize,
927945 KernelType KernelFunc) {
928- using NameT = typename csd::get_kernel_name_t <KernelName, KernelType>::name;
946+ using NameT =
947+ typename detail::get_kernel_name_t <KernelName, KernelType>::name;
929948#ifdef __SYCL_DEVICE_ONLY__
930949 kernel_parallel_for_work_group<NameT, KernelType, Dims>(KernelFunc);
931950#else
@@ -1083,7 +1102,7 @@ class handler {
10831102 // Shapes can be 1, 2 or 3 dimensional rectangles.
10841103 template <int Dims_Src, int Dims_Dst>
10851104 static bool IsCopyingRectRegionAvailable (const range<Dims_Src> Src,
1086- const range<Dims_Dst> Dst) {
1105+ const range<Dims_Dst> Dst) {
10871106 if (Dims_Src > Dims_Dst)
10881107 return false ;
10891108 for (size_t I = 0 ; I < Dims_Src; ++I)
@@ -1092,7 +1111,7 @@ class handler {
10921111 return true ;
10931112 }
10941113
1095- // copy memory pointed by accessor to the memory pointed by another accessor
1114+ // copy memory pointed by accessor to the memory pointed by another accessor
10961115 template <
10971116 typename T_Src, int Dims_Src, access::mode AccessMode_Src,
10981117 access::target AccessTarget_Src, typename T_Dst, int Dims_Dst,
@@ -1209,7 +1228,7 @@ class handler {
12091228 }
12101229
12111230 // Copy memory from the source to the destination.
1212- void memcpy (void * Dest, const void * Src, size_t Count) {
1231+ void memcpy (void * Dest, const void * Src, size_t Count) {
12131232 MSrcPtr = const_cast <void *>(Src);
12141233 MDstPtr = Dest;
12151234 MLength = Count;
0 commit comments