@@ -6385,19 +6385,27 @@ inline void ggml_cuda_op_mul_mat_cublas(
63856385 }
63866386 const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16;
63876387
6388- const float alpha = 1 .0f ;
6389- const float beta = 0 .0f ;
6388+ size_t dst_as = 0 ;
6389+ half * dst_f16 = (half *) ggml_cuda_pool_malloc (row_diff*src1_ncols * sizeof (half), &dst_as);
6390+
6391+ const half alpha = 1 .0f ;
6392+ const half beta = 0 .0f ;
63906393
63916394 CUBLAS_CHECK (cublasSetStream (g_cublas_handles[id], stream));
63926395 CUBLAS_CHECK (
63936396 cublasGemmEx (g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
63946397 row_diff, src1_ncols, ne10,
63956398 &alpha, src0_ptr, CUDA_R_16F, ne00,
63966399 src1_ptr, CUDA_R_16F, ne10,
6397- &beta, dst_dd_i, CUDA_R_32F , ldc,
6398- CUBLAS_COMPUTE_32F ,
6400+ &beta, dst_f16, CUDA_R_16F , ldc,
6401+ CUBLAS_COMPUTE_16F ,
63996402 CUBLAS_GEMM_DEFAULT_TENSOR_OP));
64006403
6404+ const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (GGML_TYPE_F16);
6405+ to_fp32_cuda (dst_f16, dst_dd_i, row_diff*src1_ncols, stream);
6406+
6407+ ggml_cuda_pool_free (dst_f16, dst_as);
6408+
64016409 if (src0_as != 0 ) {
64026410 ggml_cuda_pool_free (src0_as_f16, src0_as);
64036411 }
0 commit comments