@@ -19,7 +19,7 @@ def make_model_and_data(model, *args, **kwargs):
1919 name = model ._name
2020
2121 # TODO: support multiple outputs
22- output = model (* args ) # eager mode
22+ outputs = model (* args ) # eager mode
2323
2424 # Save model
2525 model_proto = model .to_model_proto ()
@@ -34,23 +34,49 @@ def make_model_and_data(model, *args, **kwargs):
3434 model_proto_ = onnx .shape_inference .infer_shapes (model_proto )
3535 onnx .save (model_proto_ , save_path )
3636
37- # Save inputs and output
37+ # Save inputs
3838 inputs = args
3939 if "force_saving_input_as_dtype_float32" in kwargs and kwargs ["force_saving_input_as_dtype_float32" ]:
4040 inputs = []
4141 for input in args :
4242 inputs .append (input .astype (np .float32 ))
4343 if len (args ) == 1 :
4444 input_file = os .path .join ("data" , "input_" + name )
45- np .save (input_file , inputs [0 ])
45+ if "save_inputs_as_pb" in kwargs and kwargs ["save_inputs_as_pb" ]:
46+ input_tensor = onnx .numpy_helper .from_array (inputs [0 ])
47+ onnx .save_tensor (input_tensor , input_file + ".pb" )
48+ else :
49+ np .save (input_file , inputs [0 ])
4650 else :
4751 for idx , input in enumerate (inputs , start = 0 ):
48- input_files = os .path .join ("data" , "input_" + name + "_" + str (idx ))
49- np .save (input_files , input )
50- if "force_saving_output_as_dtype_float32" in kwargs and kwargs ["force_saving_output_as_dtype_float32" ]:
51- output = output .astype (np .float32 )
52- output_files = os .path .join ("data" , "output_" + name )
53- np .save (output_files , output )
52+ input_file = os .path .join ("data" , "input_{}_{}" .format (name , idx ))
53+ if "save_inputs_as_pb" in kwargs and kwargs ["save_inputs_as_pb" ]:
54+ input_tensor = onnx .numpy_helper .from_array (input )
55+ onnx .save_tensor (input_tensor , input_file + ".pb" )
56+ else :
57+ np .save (input_file , input )
58+
59+ # Save outputs
60+ if isinstance (outputs , tuple ):
61+ for idx , output in enumerate (outputs ):
62+ output_filepath = os .path .join ("data" , "output_{}_{}" .format (name , idx ))
63+ if "force_saving_output_as_dtype_float32" in kwargs and kwargs ["force_saving_output_as_dtype_float32" ]:
64+ output = output .astype (np .float32 )
65+ if "save_outputs_as_pb" in kwargs and kwargs ["save_outputs_as_pb" ]:
66+ input_tensor = onnx .numpy_helper .from_array (output )
67+ onnx .save_tensor (input_tensor , output_filepath + ".pb" )
68+ else :
69+ np .save (output_filepath , output )
70+ else :
71+ output = outputs
72+ if "force_saving_output_as_dtype_float32" in kwargs and kwargs ["force_saving_output_as_dtype_float32" ]:
73+ output = output .astype (np .float32 )
74+ output_filepath = os .path .join ("data" , "output_" + name )
75+ if "save_outputs_as_pb" in kwargs and kwargs ["save_outputs_as_pb" ]:
76+ output_tensor = onnx .numpy_helper .from_array (output )
77+ onnx .save_tensor (output_tensor , output_filepath + ".pb" )
78+ else :
79+ np .save (output_filepath , output )
5480
5581'''
5682 It builds a model with two Gather ops sharing a single same indices:
@@ -390,3 +416,30 @@ def clip_div_shared_constant(x: ost.FLOAT[1, 8, 12, 10]) -> ost.FLOAT[1, 8, 12,
390416def matmul_bcast (x : ost .FLOAT [64 , 1 , 16 ]) -> ost .FLOAT [64 , 1 , 8 ]:
391417 return op .MatMul (x , op .Constant (value = onnx .numpy_helper .from_array (B )))
392418make_model_and_data (matmul_bcast , np .random .randn (64 , 1 , 16 ).astype (np .float32 ))
419+
420+ ''' TopK conformance
421+ '''
422+
423+ top_k_K_arr = np .array ([3 ], dtype = np .int64 )
424+ @ost .script ()
425+ def top_k (x : ost .FLOAT [3 , 4 ]) -> (ost .FLOAT [3 , 3 ], ost .INT64 [3 , 3 ]):
426+ values , indices = op .TopK (x , op .Constant (value = onnx .numpy_helper .from_array (top_k_K_arr )), axis = 1 )
427+ return values , indices
428+
429+ @ost .script ()
430+ def top_k_negative_axis (x : ost .FLOAT [3 , 4 ]) -> (ost .FLOAT [3 , 3 ], ost .INT64 [3 , 3 ]):
431+ values , indices = op .TopK (x , op .Constant (value = onnx .numpy_helper .from_array (top_k_K_arr )), axis = - 1 )
432+ return values , indices
433+
434+ @ost .script ()
435+ def top_k_smallest (x : ost .FLOAT [3 , 4 ]) -> (ost .FLOAT [3 , 3 ], ost .INT64 [3 , 3 ]):
436+ values , indices = op .TopK (x , op .Constant (value = onnx .numpy_helper .from_array (top_k_K_arr )), axis = 1 , largest = 0 , sorted = 1 )
437+ return values , indices
438+
439+ top_k_input0 = [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 ]
440+ top_k_input1 = [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 11 , 10 , 9 , 8 ]
441+ top_k_input0_arr = np .array (top_k_input0 , dtype = np .float32 ).reshape (3 , 4 )
442+ top_k_input1_arr = np .array (top_k_input1 , dtype = np .float32 ).reshape (3 , 4 )
443+ make_model_and_data (top_k , top_k_input0_arr , save_inputs_as_pb = True , save_outputs_as_pb = True )
444+ make_model_and_data (top_k_negative_axis , top_k_input0_arr , save_inputs_as_pb = True , save_outputs_as_pb = True )
445+ make_model_and_data (top_k_smallest , top_k_input1_arr , save_inputs_as_pb = True , save_outputs_as_pb = True )
0 commit comments