diff --git a/testdata/dnn/onnx/data/input_PReLU_slope.npy b/testdata/dnn/onnx/data/input_PReLU_slope.npy new file mode 100644 index 000000000..b850fc49b Binary files /dev/null and b/testdata/dnn/onnx/data/input_PReLU_slope.npy differ diff --git a/testdata/dnn/onnx/data/input_expand.npy b/testdata/dnn/onnx/data/input_expand.npy new file mode 100644 index 000000000..5c2c1c121 Binary files /dev/null and b/testdata/dnn/onnx/data/input_expand.npy differ diff --git a/testdata/dnn/onnx/data/input_expand_identity.npy b/testdata/dnn/onnx/data/input_expand_identity.npy new file mode 100644 index 000000000..b2adf81de Binary files /dev/null and b/testdata/dnn/onnx/data/input_expand_identity.npy differ diff --git a/testdata/dnn/onnx/data/input_reduceL2_subgraph2_2.npy b/testdata/dnn/onnx/data/input_reduceL2_subgraph2_2.npy new file mode 100644 index 000000000..a69a25a2a Binary files /dev/null and b/testdata/dnn/onnx/data/input_reduceL2_subgraph2_2.npy differ diff --git a/testdata/dnn/onnx/data/input_scale_broadcast_mid_0.npy b/testdata/dnn/onnx/data/input_scale_broadcast_mid_0.npy new file mode 100644 index 000000000..59fe767af Binary files /dev/null and b/testdata/dnn/onnx/data/input_scale_broadcast_mid_0.npy differ diff --git a/testdata/dnn/onnx/data/input_scale_broadcast_mid_1.npy b/testdata/dnn/onnx/data/input_scale_broadcast_mid_1.npy new file mode 100644 index 000000000..4175e2dc7 Binary files /dev/null and b/testdata/dnn/onnx/data/input_scale_broadcast_mid_1.npy differ diff --git a/testdata/dnn/onnx/data/input_slice_neg_starts.npy b/testdata/dnn/onnx/data/input_slice_neg_starts.npy new file mode 100644 index 000000000..796895eaf Binary files /dev/null and b/testdata/dnn/onnx/data/input_slice_neg_starts.npy differ diff --git a/testdata/dnn/onnx/data/input_split_neg_axis.npy b/testdata/dnn/onnx/data/input_split_neg_axis.npy new file mode 100644 index 000000000..7aa3924e7 Binary files /dev/null and b/testdata/dnn/onnx/data/input_split_neg_axis.npy differ diff --git a/testdata/dnn/onnx/data/output_PReLU_slope.npy b/testdata/dnn/onnx/data/output_PReLU_slope.npy new file mode 100644 index 000000000..e8d0d5d20 Binary files /dev/null and b/testdata/dnn/onnx/data/output_PReLU_slope.npy differ diff --git a/testdata/dnn/onnx/data/output_expand.npy b/testdata/dnn/onnx/data/output_expand.npy new file mode 100644 index 000000000..dd95e515f Binary files /dev/null and b/testdata/dnn/onnx/data/output_expand.npy differ diff --git a/testdata/dnn/onnx/data/output_expand_identity.npy b/testdata/dnn/onnx/data/output_expand_identity.npy new file mode 100644 index 000000000..b2adf81de Binary files /dev/null and b/testdata/dnn/onnx/data/output_expand_identity.npy differ diff --git a/testdata/dnn/onnx/data/output_reduceL2_subgraph2_2.npy b/testdata/dnn/onnx/data/output_reduceL2_subgraph2_2.npy new file mode 100644 index 000000000..cd7a5ae6a Binary files /dev/null and b/testdata/dnn/onnx/data/output_reduceL2_subgraph2_2.npy differ diff --git a/testdata/dnn/onnx/data/output_scale_broadcast_mid.npy b/testdata/dnn/onnx/data/output_scale_broadcast_mid.npy new file mode 100644 index 000000000..4175e2dc7 Binary files /dev/null and b/testdata/dnn/onnx/data/output_scale_broadcast_mid.npy differ diff --git a/testdata/dnn/onnx/data/output_slice_neg_starts.npy b/testdata/dnn/onnx/data/output_slice_neg_starts.npy new file mode 100644 index 000000000..796895eaf Binary files /dev/null and b/testdata/dnn/onnx/data/output_slice_neg_starts.npy differ diff --git a/testdata/dnn/onnx/data/output_split_neg_axis.npy b/testdata/dnn/onnx/data/output_split_neg_axis.npy new file mode 100644 index 000000000..7aa3924e7 Binary files /dev/null and b/testdata/dnn/onnx/data/output_split_neg_axis.npy differ diff --git a/testdata/dnn/onnx/generate_onnx_models.py b/testdata/dnn/onnx/generate_onnx_models.py index 6a5515281..8b9d20d33 100644 --- a/testdata/dnn/onnx/generate_onnx_models.py +++ b/testdata/dnn/onnx/generate_onnx_models.py @@ -10,6 +10,7 @@ import numpy as np import os.path import onnx +import onnxsim import google.protobuf.text_format import io @@ -74,6 +75,14 @@ def save_onnx_data_and_model(input, output, name, operation, *args, **kwargs): model = onnx.helper.make_model(graph, producer_name=name) onnx.save(model, models_files) +def simplify(name, rename=False, **kwargs): + model, check = onnxsim.simplify(name, **kwargs) + assert check, "couldn't valide" + name = name[:-5] + if rename: + name += '_optimized' + onnx.save(model, name + '.onnx') + torch.manual_seed(0) np.random.seed(0) @@ -129,6 +138,18 @@ def save_onnx_data_and_model(input, output, name, operation, *args, **kwargs): relu = nn.ReLU(inplace=True) save_data_and_model("ReLU", input, relu) +class PReLU_slope(nn.Module): + def __init__(self, *args, **kwargs): + super(PReLU_slope, self).__init__() + + def forward(self, x): + return nn.PReLU()(x) + +model = PReLU_slope() +input_ = Variable(torch.randn(1, 1, 5, 5, dtype=torch.float32)) +save_data_and_model("PReLU_slope", input_, model, export_params=True) +simplify('models/PReLU_slope.onnx', False) + input = Variable(torch.randn(2, 3)) dropout = nn.Dropout() @@ -414,6 +435,17 @@ def forward(self, x): save_data_and_model("slice", input, model) save_data_and_model("slice_opset_11", input, model, version=11) +class SliceStarts(nn.Module): + def __init__(self, *args, **kwargs): + super(SliceStarts, self).__init__() + + def forward(self, x): + return x[-1:] + +model = SliceStarts() +input_ = Variable(torch.randn(1, 10, dtype=torch.float32)) +save_data_and_model("slice_neg_starts", input_, model) + input_2 = Variable(torch.randn(6, 6)) custom_slice_list = [ slice(1, 3, 1), @@ -575,6 +607,18 @@ def forward(self, x): input_ = Variable(torch.tensor(list(range(20)), dtype=torch.float32)) save_data_and_model("split_sizes", input_, model) +class SplitAxis(nn.Module): + def __init__(self, *args, **kwargs): + super(SplitAxis, self).__init__() + + def forward(self, x): + tup = torch.split(x, 2, -1) + return torch.cat(tup, 1) + +model = SplitAxis() +input_ = Variable(torch.randn(1, 10, dtype=torch.float32)) +save_data_and_model("split_neg_axis", input_, model) + class SplitMax(nn.Module): def __init__(self): @@ -865,6 +909,32 @@ def forward(self, x): output = np.mean(x, axis=2, keepdims=True) save_onnx_data_and_model(x, output, 'reduce_mean_axis2', 'ReduceMean', axes=(2), keepdims=True) +class Expand(nn.Module): + def __init__(self): + super(Expand, self).__init__() + + def forward(self, x): + return x.expand(1, 3, -1, -1, -1) + +input = Variable(torch.randn(1, 3, 2, 4)) +model = Expand() +model.eval() +save_data_and_model("expand", input, model, export_params=True, version=12) +simplify('models/expand.onnx', False) + +class ExpandIdentity(nn.Module): + def __init__(self): + super(ExpandIdentity, self).__init__() + + def forward(self, x): + return x.expand(1, 3, -1, -1) + +input = Variable(torch.randn(1, 3, 2, 4)) +model = ExpandIdentity() +model.eval() +save_data_and_model("expand_identity", input, model, export_params=True, version=12) +simplify('models/expand_identity.onnx', False) + class Expand(nn.Module): def __init__(self, shape): super(Expand, self).__init__() @@ -933,6 +1003,23 @@ def forward(self, x): x = Variable(torch.randn(1, 2, 3, 4)) save_data_and_model("reduceL2_subgraph_2", x, model) +class reduceL2_subgraph2_2(nn.Module): + def __init__(self): + super(reduceL2_subgraph2_2, self).__init__() + self.size = torch.Size([1, 3, 2, 4]) + + def forward(self, x): + norm = torch.norm(x, p=2, dim=1, keepdim=True) + clip = torch.clamp(norm, min=0) + expand = clip.expand([1, 3, 2, 4]) + return x / expand + +input = Variable(torch.randn(1, 3, 2, 4)) +model = reduceL2_subgraph2_2() +model.eval() +save_data_and_model("reduceL2_subgraph2_2", input, model, export_params=True, version=12) +simplify('models/reduceL2_subgraph2_2.onnx', False) + from torchvision.ops.misc import * n = 3 model = FrozenBatchNorm2d(n) @@ -1173,6 +1260,18 @@ def forward(self, x0, x1, x2): input_2 = Variable(torch.ones(2, 1, 4, 1, dtype=torch.float32)) save_data_and_model_multy_inputs("scale_broadcast", model, input_0, input_1, input_2) +class ScaleBroadcastMid(nn.Module): + def __init__(self, *args, **kwargs): + super(ScaleBroadcastMid, self).__init__() + + def forward(self, x0, x1): + return torch.mul(x0, x1) + +model = ScaleBroadcastMid() +input_0 = Variable(torch.ones(2, 1, 4, dtype=torch.float32)) +input_1 = Variable(torch.ones(2, 5, 4, dtype=torch.float32)) +save_data_and_model_multy_inputs("scale_broadcast_mid", model, input_0, input_1) + x = Variable(torch.randn(1, 3, 25)) conv1d = nn.Conv1d(3, 2, kernel_size=3, padding=2, stride=2, dilation=2, bias=False) save_data_and_model("conv1d", x, conv1d) diff --git a/testdata/dnn/onnx/models/PReLU_slope.onnx b/testdata/dnn/onnx/models/PReLU_slope.onnx new file mode 100644 index 000000000..b0d72218b Binary files /dev/null and b/testdata/dnn/onnx/models/PReLU_slope.onnx differ diff --git a/testdata/dnn/onnx/models/expand.onnx b/testdata/dnn/onnx/models/expand.onnx new file mode 100644 index 000000000..50fabecae Binary files /dev/null and b/testdata/dnn/onnx/models/expand.onnx differ diff --git a/testdata/dnn/onnx/models/expand_identity.onnx b/testdata/dnn/onnx/models/expand_identity.onnx new file mode 100644 index 000000000..ddec80015 Binary files /dev/null and b/testdata/dnn/onnx/models/expand_identity.onnx differ diff --git a/testdata/dnn/onnx/models/reduceL2_subgraph2_2.onnx b/testdata/dnn/onnx/models/reduceL2_subgraph2_2.onnx new file mode 100644 index 000000000..141301bb4 Binary files /dev/null and b/testdata/dnn/onnx/models/reduceL2_subgraph2_2.onnx differ diff --git a/testdata/dnn/onnx/models/scale_broadcast_mid.onnx b/testdata/dnn/onnx/models/scale_broadcast_mid.onnx new file mode 100644 index 000000000..766bdb73e --- /dev/null +++ b/testdata/dnn/onnx/models/scale_broadcast_mid.onnx @@ -0,0 +1,19 @@ +pytorch1.9:t + +0 +12Mul_0"Multorch-jit-exportZ +0 + + + +Z +1 + + + +b +2 + + + +B \ No newline at end of file diff --git a/testdata/dnn/onnx/models/slice_neg_starts.onnx b/testdata/dnn/onnx/models/slice_neg_starts.onnx new file mode 100644 index 000000000..07c0c9966 Binary files /dev/null and b/testdata/dnn/onnx/models/slice_neg_starts.onnx differ diff --git a/testdata/dnn/onnx/models/split_neg_axis.onnx b/testdata/dnn/onnx/models/split_neg_axis.onnx new file mode 100644 index 000000000..b2b5fceb7 --- /dev/null +++ b/testdata/dnn/onnx/models/split_neg_axis.onnx @@ -0,0 +1,22 @@ +pytorch1.9:É +S +tensor12345Split_0"Split* +axisÿÿÿÿÿÿÿÿÿ * +split@@@@@  +1 +1 +2 +3 +4 +56Concat_1"Concat* +axis torch-jit-exportZ +tensor +  + + +b +6 +  + + +B \ No newline at end of file diff --git a/testdata/dnn/tensorflow/bias_add_1_in.npy b/testdata/dnn/tensorflow/bias_add_1_in.npy new file mode 100644 index 000000000..2e749acdd Binary files /dev/null and b/testdata/dnn/tensorflow/bias_add_1_in.npy differ diff --git a/testdata/dnn/tensorflow/bias_add_1_net.pb b/testdata/dnn/tensorflow/bias_add_1_net.pb new file mode 100644 index 000000000..517a5a559 Binary files /dev/null and b/testdata/dnn/tensorflow/bias_add_1_net.pb differ diff --git a/testdata/dnn/tensorflow/bias_add_1_out.npy b/testdata/dnn/tensorflow/bias_add_1_out.npy new file mode 100644 index 000000000..e0fcb7290 Binary files /dev/null and b/testdata/dnn/tensorflow/bias_add_1_out.npy differ diff --git a/testdata/dnn/tensorflow/generate_tf_models.py b/testdata/dnn/tensorflow/generate_tf_models.py index 06b88aaaa..2a08c38ed 100644 --- a/testdata/dnn/tensorflow/generate_tf_models.py +++ b/testdata/dnn/tensorflow/generate_tf_models.py @@ -1019,6 +1019,19 @@ def pad_depth(x, desired_channels): input_down = tf.image.resize(conv, size=[hi, wi], method=0, name='resize_down') save(inp, input_down, 'resize_bilinear_down') ################################################################################ +inp = tf.placeholder(tf.float32, [1, None, None, 3], 'input') +biased = tf.nn.bias_add(inp, [1, 2, 3], data_format='NHWC') +resized1 = tf.image.resize(biased, [5, 6]) +concat = tf.concat([resized1, biased], 3) +# blob = np.random.standard_normal([1, 5, 6, 3]).astype(tf.float32.as_numpy_dtype()) +# writeBlob(blob, 'resize_concat_optimization_in') +save(inp, concat, 'resize_concat_optimization', optimize=False, is_gen_data=False) +################################################################################ +inp = tf.placeholder(tf.float32, [1, 2, 3, 4], 'input') +sub = inp - 3.0 +sub = 4.0 + sub +save(inp, sub, prefix + 'bias_add_1', optimize=False) +################################################################################ # Uncomment to print the final graph. # with tf.gfile.FastGFile('fused_batch_norm_net.pb', 'rb') as f: diff --git a/testdata/dnn/tensorflow/resize_concat_optimization_in.npy b/testdata/dnn/tensorflow/resize_concat_optimization_in.npy new file mode 100644 index 000000000..a56e39e32 Binary files /dev/null and b/testdata/dnn/tensorflow/resize_concat_optimization_in.npy differ diff --git a/testdata/dnn/tensorflow/resize_concat_optimization_net.pb b/testdata/dnn/tensorflow/resize_concat_optimization_net.pb new file mode 100644 index 000000000..27256c1e8 Binary files /dev/null and b/testdata/dnn/tensorflow/resize_concat_optimization_net.pb differ diff --git a/testdata/dnn/tensorflow/resize_concat_optimization_out.npy b/testdata/dnn/tensorflow/resize_concat_optimization_out.npy new file mode 100644 index 000000000..759289158 Binary files /dev/null and b/testdata/dnn/tensorflow/resize_concat_optimization_out.npy differ