@@ -27,14 +27,16 @@ float tensor_sum_elements(const ggml_tensor * tensor) {
2727 return sum;
2828}
2929
30- void tensor_dump (const ggml_tensor * tensor) {
31- printf (" %15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - " , " tensor " ,
30+ void tensor_dump (const ggml_tensor * tensor, const char * name ) {
31+ printf (" %15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - " , name ,
3232 tensor->type , ggml_type_name (tensor->type ),
3333 (int ) tensor->ne [0 ], (int ) tensor->ne [1 ], (int ) tensor->ne [2 ], tensor->nb [0 ], tensor->nb [1 ], tensor->nb [2 ]);
3434 float sum = tensor_sum_elements (tensor);
3535 printf (" Sum of tensor %s is %6.2f\n " ," tensor" , sum);
3636}
3737
38+ #define TENSOR_DUMP (tensor ) tensor_dump(tensor, #tensor)
39+
3840struct benchmark_params_struct {
3941 int32_t n_threads = 1 ;
4042 int32_t n_iterations = 10 ;
@@ -155,12 +157,12 @@ int main(int argc, char ** argv) {
155157 gf.n_threads =benchmark_params.n_threads ;
156158 printf (" cgraph->n_threads=%i\n " ,gf.n_threads );
157159
158- tensor_dump (m11);
159- tensor_dump (m2);
160+ TENSOR_DUMP (m11);
161+ TENSOR_DUMP (m2);
160162
161163 ggml_graph_compute (ctx, &gf);
162164
163- tensor_dump (gf.nodes [0 ]);
165+ TENSOR_DUMP (gf.nodes [0 ]);
164166
165167 printf (" \n ------ Test 2 - Matrix Mult via Q4_0 code ------------------------------------------------------------------------------\n " );
166168
@@ -224,7 +226,7 @@ int main(int argc, char ** argv) {
224226 usec,gflops);
225227
226228#ifdef VERBOSE_DEBUGGING
227- tensor_dump (" res" ,gf31.nodes [0 ])
229+ TENSOR_DUMP (" res" ,gf31.nodes [0 ])
228230#endif
229231
230232 // Check that the matrix multiplication result is in the right ballpark
0 commit comments