We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent abeaad9 commit cfbff89Copy full SHA for cfbff89
neural_compressor/torch/algorithms/fp8_quant/_core/vllm_functions.py
@@ -23,10 +23,10 @@
23
tensor_model_parallel_all_gather = None
24
tensor_model_parallel_all_reduce = None
25
26
-def get_vllm_row_parallel_collective_func():
+def get_vllm_column_parallel_collective_func():
27
assert tensor_model_parallel_all_gather is not None, "Couldn't import vllm function tensor_model_parallel_all_gather"
28
return tensor_model_parallel_all_gather
29
30
-def get_vllm_column_parallel_collective_func():
+def get_vllm_row_parallel_collective_func():
31
assert tensor_model_parallel_all_reduce is not None, "Couldn't import vllm function tensor_model_parallel_all_reduce"
32
return tensor_model_parallel_all_reduce
0 commit comments