@@ -672,7 +672,7 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
672672 size_t d_size;
673673 cl_mem d_X;
674674 if (src0->backend == GGML_BACKEND_CL) {
675- d_X = * (cl_mem* ) src0->data ;
675+ d_X = (cl_mem) src0->data ;
676676 } else {
677677 d_X = ggml_cl_pool_malloc (sizeof (ggml_fp16_t ) * x_ne, &x_size, CL_MEM_READ_ONLY);
678678 }
@@ -748,7 +748,7 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
748748 size_t d_size;
749749 cl_mem d_X;
750750 if (src0->backend == GGML_BACKEND_CL) {
751- d_X = * (cl_mem* ) src0->data ;
751+ d_X = (cl_mem) src0->data ;
752752 } else {
753753 d_X = ggml_cl_pool_malloc (sizeof (ggml_fp16_t ) * x_ne, &x_size, CL_MEM_READ_ONLY);
754754 }
@@ -873,7 +873,7 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
873873 if (src0->backend == GGML_BACKEND_CPU) {
874874 CL_CHECK (ggml_cl_h2d_tensor_2d (queue, d_Q, 0 , src0, i03, i02, NULL ));
875875 } else if (src0->backend == GGML_BACKEND_CL) {
876- d_Q = * (cl_mem* ) src0->data ;
876+ d_Q = (cl_mem) src0->data ;
877877 } else {
878878 GGML_ASSERT (false );
879879 }
@@ -1016,14 +1016,13 @@ void ggml_cl_transform_tensor(ggml_tensor * tensor) {
10161016 const size_t q_sz = ggml_type_size (type) * ne0 * ne1 * ne2 * ne3 / ggml_blck_size (type);
10171017
10181018 size_t q_size;
1019- cl_mem* dst = (cl_mem*) malloc (sizeof (cl_mem));
1020- *dst = ggml_cl_pool_malloc (q_sz, &q_size, CL_MEM_READ_ONLY);
1019+ cl_mem dst = ggml_cl_pool_malloc (q_sz, &q_size, CL_MEM_READ_ONLY);
10211020
10221021 // copy tensor to device
10231022 for (int64_t i3 = 0 ; i3 < ne3; i3++) {
10241023 for (int64_t i2 = 0 ; i2 < ne2; i2++) {
10251024 int i = i3*ne2 + i2;
1026- CL_CHECK (ggml_cl_h2d_tensor_2d (queue, * dst, i*ne0*ne1, tensor, i3, i2, NULL ));
1025+ CL_CHECK (ggml_cl_h2d_tensor_2d (queue, dst, i*ne0*ne1, tensor, i3, i2, NULL ));
10271026 }
10281027 }
10291028
0 commit comments