@@ -80,7 +80,7 @@ def trt_transposed_linear_converter(network, target, args, kwargs, name):
8080 trt .MatrixOperation .NONE ,
8181 )
8282 set_layer_name (layer , target , f"{ name } _mm" )
83- return operator . add_binary_elementwise_layer (
83+ return add_binary_elementwise_layer (
8484 network ,
8585 layer .get_output (0 ),
8686 bias ,
@@ -679,7 +679,7 @@ def acc_ops_batch_norm(
679679
680680@tensorrt_converter (acc_ops .layer_norm )
681681def acc_ops_layer_norm (network , target , args , kwargs , name ):
682- return operator . add_layer_norm (network , target , kwargs , name )
682+ return add_layer_norm (network , target , kwargs , name )
683683
684684@tensorrt_converter (acc_ops .softmax )
685685def acc_ops_softmax (
@@ -730,7 +730,7 @@ def acc_ops_tile(
730730 kwargs : Dict [str , Argument ],
731731 name : str ,
732732) -> Union [TRTTensor , Sequence [TRTTensor ]]:
733- return operator . add_tile (network , target , kwargs , name )
733+ return add_tile (network , target , kwargs , name )
734734
735735@tensorrt_converter (acc_ops .sign )
736736def acc_ops_sign (
@@ -758,7 +758,7 @@ def acc_ops_relu(
758758 kwargs : Dict [str , Argument ],
759759 name : str ,
760760) -> Union [TRTTensor , Sequence [TRTTensor ]]:
761- return activation . add_relu (network , target , kwargs , name )
761+ return add_relu (network , target , kwargs , name )
762762
763763@tensorrt_converter (acc_ops .leaky_relu )
764764def acc_ops_leaky_relu (
@@ -768,7 +768,7 @@ def acc_ops_leaky_relu(
768768 kwargs : Dict [str , Argument ],
769769 name : str ,
770770) -> Union [TRTTensor , Sequence [TRTTensor ]]:
771- return activation . add_leaky_relu (network , target , kwargs , name )
771+ return add_leaky_relu (network , target , kwargs , name )
772772
773773@tensorrt_converter (acc_ops .elu )
774774def acc_ops_elu (
@@ -778,7 +778,7 @@ def acc_ops_elu(
778778 kwargs : Dict [str , Argument ],
779779 name : str ,
780780) -> Union [TRTTensor , Sequence [TRTTensor ]]:
781- return activation . add_elu (network , target , kwargs , name )
781+ return add_elu (network , target , kwargs , name )
782782
783783@tensorrt_converter (acc_ops .selu )
784784def acc_ops_selu (
@@ -788,7 +788,7 @@ def acc_ops_selu(
788788 kwargs : Dict [str , Argument ],
789789 name : str ,
790790) -> Union [TRTTensor , Sequence [TRTTensor ]]:
791- return activation . add_selu (network , target , kwargs , name )
791+ return add_selu (network , target , kwargs , name )
792792
793793@tensorrt_converter (acc_ops .softsign )
794794def acc_ops_softsign (
@@ -798,7 +798,7 @@ def acc_ops_softsign(
798798 kwargs : Dict [str , Argument ],
799799 name : str ,
800800) -> Union [TRTTensor , Sequence [TRTTensor ]]:
801- return activation . add_softsign (network , target , kwargs , name )
801+ return add_softsign (network , target , kwargs , name )
802802
803803@tensorrt_converter (acc_ops .sin )
804804def acc_ops_sin (
@@ -873,7 +873,7 @@ def acc_ops_tanh(
873873 kwargs : Dict [str , Argument ],
874874 name : str ,
875875) -> Union [TRTTensor , Sequence [TRTTensor ]]:
876- return activation . add_tanh (network , target , kwargs , name )
876+ return add_tanh (network , target , kwargs , name )
877877
878878@tensorrt_converter (acc_ops .asin )
879879def acc_ops_asin (
@@ -1190,7 +1190,7 @@ def acc_ops_maximum(
11901190 kwargs : Dict [str , Argument ],
11911191 name : str ,
11921192) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1193- return operator . add_maximum (network , target , kwargs , name )
1193+ return add_maximum (network , target , kwargs , name )
11941194
11951195@tensorrt_converter (acc_ops .minimum )
11961196def acc_ops_minimum (
@@ -1200,7 +1200,7 @@ def acc_ops_minimum(
12001200 kwargs : Dict [str , Argument ],
12011201 name : str ,
12021202) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1203- return operator . add_minimum (network , target , kwargs , name )
1203+ return add_minimum (network , target , kwargs , name )
12041204
12051205@tensorrt_converter (acc_ops .dtype )
12061206def acc_ops_dtype (
@@ -1269,7 +1269,7 @@ def acc_ops_logical_and(
12691269 kwargs : Dict [str , Argument ],
12701270 name : str ,
12711271) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1272- return operator . add_logical_and (network , target , kwargs , name )
1272+ return add_logical_and (network , target , kwargs , name )
12731273
12741274@tensorrt_converter (acc_ops .ne , no_implicit_batch_dim = True )
12751275def acc_ops_ne (
@@ -1279,7 +1279,7 @@ def acc_ops_ne(
12791279 kwargs : Dict [str , Argument ],
12801280 name : str ,
12811281) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1282- return operator . add_ne (network , target , kwargs , name )
1282+ return add_ne (network , target , kwargs , name )
12831283
12841284@tensorrt_converter (acc_ops .eq , no_implicit_batch_dim = True )
12851285def acc_ops_eq (
@@ -1289,7 +1289,7 @@ def acc_ops_eq(
12891289 kwargs : Dict [str , Argument ],
12901290 name : str ,
12911291) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1292- return operator . add_eq (network , target , kwargs , name )
1292+ return add_eq (network , target , kwargs , name )
12931293
12941294@tensorrt_converter (acc_ops .gt , no_implicit_batch_dim = True )
12951295def acc_ops_gt (
@@ -1299,7 +1299,7 @@ def acc_ops_gt(
12991299 kwargs : Dict [str , Argument ],
13001300 name : str ,
13011301) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1302- return operator . add_gt (network , target , kwargs , name )
1302+ return add_gt (network , target , kwargs , name )
13031303
13041304@tensorrt_converter (acc_ops .lt , no_implicit_batch_dim = True )
13051305def acc_ops_lt (
@@ -1309,7 +1309,7 @@ def acc_ops_lt(
13091309 kwargs : Dict [str , Argument ],
13101310 name : str ,
13111311) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1312- return operator . add_lt (network , target , kwargs , name )
1312+ return add_lt (network , target , kwargs , name )
13131313
13141314
13151315@tensorrt_converter (acc_ops .logical_or , no_implicit_batch_dim = True )
@@ -1320,7 +1320,7 @@ def acc_ops_logical_or(
13201320 kwargs : Dict [str , Argument ],
13211321 name : str ,
13221322) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1323- return operator . add_logical_or (network , target , kwargs , name )
1323+ return add_logical_or (network , target , kwargs , name )
13241324
13251325@tensorrt_converter (acc_ops .logical_xor , no_implicit_batch_dim = True )
13261326def acc_ops_logical_xor (
@@ -1330,7 +1330,7 @@ def acc_ops_logical_xor(
13301330 kwargs : Dict [str , Argument ],
13311331 name : str ,
13321332) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1333- return operator . add_logical_xor (network , target , kwargs , name )
1333+ return add_logical_xor (network , target , kwargs , name )
13341334
13351335# T113156424 Have some accuracy problems in hf_T5.
13361336# [TRT] [W] Weights [name=isinf_1_inf_t]: Converted FP32 value in weights (either FP32 infinity or FP32 value outside FP16 range) to corresponding FP16 infinity. If this is not the desired behavior, please modify the weights or retrain with regularization to reduce the magnitude of the weights.
@@ -1423,7 +1423,7 @@ def acc_ops_fmod(
14231423 kwargs : Dict [str , Argument ],
14241424 name : str ,
14251425) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1426- return operator . add_fmod (network , target , kwargs , name )
1426+ return add_fmod (network , target , kwargs , name )
14271427
14281428# T113156424 embedding implemenatation is very limited and shows no usage in hf models due to the indices are int64.
14291429# if we cast to int32, it will create accuracy issues. We'd better leave it to future implementation.
@@ -1651,7 +1651,7 @@ def acc_ops_add(
16511651 kwargs : Dict [str , Argument ],
16521652 name : str ,
16531653) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1654- return operator . add_add (network , target , kwargs , name )
1654+ return add_add (network , target , kwargs , name )
16551655
16561656@tensorrt_converter (acc_ops .sub )
16571657def acc_ops_sub (
@@ -1661,7 +1661,7 @@ def acc_ops_sub(
16611661 kwargs : Dict [str , Argument ],
16621662 name : str ,
16631663) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1664- return operator . add_sub (network , target , kwargs , name )
1664+ return add_sub (network , target , kwargs , name )
16651665
16661666@tensorrt_converter (acc_ops .div )
16671667def acc_ops_div (
@@ -1671,7 +1671,7 @@ def acc_ops_div(
16711671 kwargs : Dict [str , Argument ],
16721672 name : str ,
16731673) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1674- return operator . add_div (network , target , kwargs , name )
1674+ return add_div (network , target , kwargs , name )
16751675
16761676@tensorrt_converter (acc_ops .floor_div )
16771677def acc_ops_floor_div (
@@ -1681,7 +1681,7 @@ def acc_ops_floor_div(
16811681 kwargs : Dict [str , Argument ],
16821682 name : str ,
16831683) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1684- return operator . add_floor_div (network , target , kwargs , name )
1684+ return add_floor_div (network , target , kwargs , name )
16851685
16861686@tensorrt_converter (acc_ops .trunc_div )
16871687def acc_ops_trunc_div (
@@ -1691,7 +1691,7 @@ def acc_ops_trunc_div(
16911691 kwargs : Dict [str , Argument ],
16921692 name : str ,
16931693) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1694- return operator . add_trunc_div (network , target , kwargs , name )
1694+ return add_trunc_div (network , target , kwargs , name )
16951695
16961696@tensorrt_converter (acc_ops .mul )
16971697def acc_ops_mul (
@@ -1701,7 +1701,7 @@ def acc_ops_mul(
17011701 kwargs : Dict [str , Argument ],
17021702 name : str ,
17031703) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1704- return operator . add_mul (network , target , kwargs , name )
1704+ return add_mul (network , target , kwargs , name )
17051705
17061706@tensorrt_converter (acc_ops .pow )
17071707def acc_ops_pow (
@@ -1711,7 +1711,7 @@ def acc_ops_pow(
17111711 kwargs : Dict [str , Argument ],
17121712 name : str ,
17131713) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1714- return operator . add_pow (network , target , kwargs , name )
1714+ return add_pow (network , target , kwargs , name )
17151715
17161716@tensorrt_converter (acc_ops .unsqueeze )
17171717def acc_ops_unsqueeze (
@@ -1981,7 +1981,7 @@ def acc_ops_slice_tensor(
19811981 kwargs : Dict [str , Argument ],
19821982 name : str ,
19831983) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1984- return operator . add_slice (network , target , kwargs , name )
1984+ return add_slice (network , target , kwargs , name )
19851985
19861986
19871987@tensorrt_converter (acc_ops .expand )
@@ -1992,7 +1992,7 @@ def acc_ops_expand_tensor(
19921992 kwargs : Dict [str , Argument ],
19931993 name : str ,
19941994) -> Union [TRTTensor , Sequence [TRTTensor ]]:
1995- return operator . add_expand (network , target , kwargs , name )
1995+ return add_expand (network , target , kwargs , name )
19961996
19971997
19981998@tensorrt_converter (acc_ops .where )
@@ -2214,7 +2214,7 @@ def acc_ops_linear(
22142214 kwargs : Dict [str , Argument ],
22152215 name : str ,
22162216) -> Union [TRTTensor , Sequence [TRTTensor ]]:
2217- return operator . add_linear (network , target , kwargs , name )
2217+ return add_linear (network , target , kwargs , name )
22182218
22192219def add_clamp (network , input , val , op , name ):
22202220 if not len (input .shape ):
@@ -2310,7 +2310,7 @@ def acc_ops_getitem(
23102310 input_val = kwargs ["input" ]
23112311 slices = kwargs ["idx" ]
23122312 if not isinstance (input_val , TRTTensor ):
2313- return operator . getitem (input_val , slices ) # type: ignore[arg-type]
2313+ return getitem (input_val , slices ) # type: ignore[arg-type]
23142314
23152315 if not isinstance (slices , tuple ) and not isinstance (slices , list ):
23162316 slices = (slices ,)
@@ -2467,7 +2467,7 @@ def acc_ops_matmul(
24672467 kwargs : Dict [str , Argument ],
24682468 name : str ,
24692469) -> Union [TRTTensor , Sequence [TRTTensor ]]:
2470- return operator . add_matmul (network , target , kwargs , name )
2470+ return add_matmul (network , target , kwargs , name )
24712471
24722472@tensorrt_converter (acc_ops .hardsigmoid )
24732473def acc_ops_hard_sigmoid (
@@ -2477,7 +2477,7 @@ def acc_ops_hard_sigmoid(
24772477 kwargs : Dict [str , Argument ],
24782478 name : str ,
24792479) -> Union [TRTTensor , Sequence [TRTTensor ]]:
2480- return activation . add_hard_sigmoid (network , target , kwargs , name )
2480+ return add_hard_sigmoid (network , target , kwargs , name )
24812481
24822482
24832483@tensorrt_converter (acc_ops .sigmoid )
@@ -2488,7 +2488,7 @@ def acc_ops_sigmoid(
24882488 kwargs : Dict [str , Argument ],
24892489 name : str ,
24902490) -> Union [TRTTensor , Sequence [TRTTensor ]]:
2491- return activation . add_sigmoid (network , target , kwargs , name )
2491+ return add_sigmoid (network , target , kwargs , name )
24922492
24932493
24942494@tensorrt_converter (acc_ops .permute )
@@ -2689,7 +2689,7 @@ def acc_ops_gelu(
26892689 kwargs : Dict [str , Argument ],
26902690 name : str ,
26912691) -> Union [TRTTensor , Sequence [TRTTensor ]]:
2692- return activation . add_gelu (network , target , kwargs , name )
2692+ return add_gelu (network , target , kwargs , name )
26932693
26942694
26952695@tensorrt_converter (acc_ops .chunk )
@@ -2766,7 +2766,7 @@ def acc_ops_cumsum(
27662766 kwargs : Dict [str , Argument ],
27672767 name : str ,
27682768) -> Union [TRTTensor , Sequence [TRTTensor ]]:
2769- return operator . add_cumsum (network , target , kwargs , name )
2769+ return add_cumsum (network , target , kwargs , name )
27702770
27712771@tensorrt_converter (acc_ops .hardtanh )
27722772def acc_ops_hardtanh (
@@ -2776,7 +2776,7 @@ def acc_ops_hardtanh(
27762776 kwargs : Dict [str , Argument ],
27772777 name : str ,
27782778) -> Union [TRTTensor , Sequence [TRTTensor ]]:
2779- return activation . add_hardtanh (network , target , kwargs , name )
2779+ return add_hardtanh (network , target , kwargs , name )
27802780
27812781@tensorrt_converter (acc_ops .interpolate )
27822782def acc_ops_interpolate (
0 commit comments