Skip to content

Commit 57d8e0c

Browse files
replace allow_complex_guards_as_runtime_assertswithprefer_deferred_ru… (#3809)
1 parent f68a808 commit 57d8e0c

File tree

8 files changed

+13
-13
lines changed

8 files changed

+13
-13
lines changed

examples/apps/flux_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def forward_loop(mod):
121121

122122
settings = {
123123
"strict": False,
124-
"allow_complex_guards_as_runtime_asserts": True,
124+
"prefer_deferred_runtime_asserts_over_guards": True,
125125
"enabled_precisions": enabled_precisions,
126126
"truncate_double": True,
127127
"min_block_size": 1,

examples/dynamo/torch_export_flux_dev.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@
9292
kwargs=dummy_inputs,
9393
dynamic_shapes=dynamic_shapes,
9494
strict=False,
95-
allow_complex_guards_as_runtime_asserts=True,
95+
prefer_deferred_runtime_asserts_over_guards=True,
9696
)
9797

9898
# %%

examples/dynamo/weight_streaming_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def export_llm(model, inputs, min_seq_len=1, max_seq_len=16):
6565
kwargs={"position_ids": position_ids},
6666
dynamic_shapes=({1: seq_len}, {1: seq_len}),
6767
strict=False,
68-
allow_complex_guards_as_runtime_asserts=True,
68+
prefer_deferred_runtime_asserts_over_guards=True,
6969
)
7070

7171
return ep

py/torch_tensorrt/dynamo/runtime/_MutableTorchTensorRTModule.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def __init__(
6868
use_python_runtime: bool = _defaults.USE_PYTHON_RUNTIME,
6969
immutable_weights: bool = False,
7070
strict: bool = True,
71-
allow_complex_guards_as_runtime_asserts: bool = False,
71+
prefer_deferred_runtime_asserts_over_guards: bool = False,
7272
weight_streaming_budget: Optional[int] = None,
7373
enabled_precisions: Optional[Set[Union[torch.dtype, dtype]]] = None,
7474
**kwargs: Any,
@@ -134,8 +134,8 @@ def __init__(
134134
self.kwarg_inputs: dict[str, Any] = {}
135135
self.additional_settings = kwargs
136136
self.strict = strict
137-
self.allow_complex_guards_as_runtime_asserts = (
138-
allow_complex_guards_as_runtime_asserts
137+
self.prefer_deferred_runtime_asserts_over_guards = (
138+
prefer_deferred_runtime_asserts_over_guards
139139
)
140140
self.use_python_runtime = use_python_runtime
141141
self.trt_device = to_torch_tensorrt_device(device)
@@ -312,14 +312,14 @@ def refit_gm(self) -> None:
312312
def get_exported_program(self) -> torch.export.ExportedProgram:
313313

314314
def export_fn() -> torch.export.ExportedProgram:
315-
if self.allow_complex_guards_as_runtime_asserts:
315+
if self.prefer_deferred_runtime_asserts_over_guards:
316316
return _export(
317317
self.original_model,
318318
self.arg_inputs,
319319
kwargs=self.kwarg_inputs,
320320
dynamic_shapes=self._get_total_dynamic_shapes(),
321321
strict=self.strict,
322-
allow_complex_guards_as_runtime_asserts=self.allow_complex_guards_as_runtime_asserts,
322+
prefer_deferred_runtime_asserts_over_guards=self.prefer_deferred_runtime_asserts_over_guards,
323323
)
324324
else:
325325
return torch.export.export(

tests/py/dynamo/models/test_engine_cache.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -856,7 +856,7 @@ def export_llm(model, inputs, min_seq_len=1, max_seq_len=16):
856856
(inputs,),
857857
dynamic_shapes=({1: seq_len},),
858858
strict=False,
859-
allow_complex_guards_as_runtime_asserts=True,
859+
prefer_deferred_runtime_asserts_over_guards=True,
860860
)
861861

862862
return ep

tools/llm/test_llama_components.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def test_llama_attention(args):
7979
args=(hidden_states, position_embeddings, None),
8080
dynamic_shapes=dynamic_shapes,
8181
strict=False,
82-
allow_complex_guards_as_runtime_asserts=True,
82+
prefer_deferred_runtime_asserts_over_guards=True,
8383
)
8484

8585
with torch_tensorrt.logging.debug() if args.debug else nullcontext():
@@ -463,7 +463,7 @@ def test_llama_model(args):
463463
kwargs=kwarg_inputs,
464464
dynamic_shapes=dynamic_shapes,
465465
strict=False,
466-
allow_complex_guards_as_runtime_asserts=True,
466+
prefer_deferred_runtime_asserts_over_guards=True,
467467
)
468468

469469
with torch_tensorrt.logging.debug() if args.debug else nullcontext():

tools/llm/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def export_llm(model, inputs, min_seq_len=1, max_seq_len=16):
4141
kwargs={"position_ids": position_ids},
4242
dynamic_shapes=({1: seq_len}, {1: seq_len}),
4343
strict=False,
44-
allow_complex_guards_as_runtime_asserts=True,
44+
prefer_deferred_runtime_asserts_over_guards=True,
4545
)
4646

4747
return ep

tools/perf/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def export_llm(model, inputs, min_seq_len=1, max_seq_len=16):
228228
(inputs,),
229229
dynamic_shapes=({1: seq_len},),
230230
strict=False,
231-
allow_complex_guards_as_runtime_asserts=True,
231+
prefer_deferred_runtime_asserts_over_guards=True,
232232
)
233233

234234
return ep

0 commit comments

Comments
 (0)