Skip to content

Commit b2aabde

Browse files
committed
Update base for Update on "Enable aoti for preprocess + torch pin update"
Update torch nightly pin to 11/01 after: pytorch/pytorch#137063 Test Plan: With pytorch/pytorch#137063: ``` pytest -c /dev/null -v -n auto examples/models/llama3_2_vision/preprocess/ ``` [ghstack-poisoned]
2 parents 0faa31d + 09cf982 commit b2aabde

File tree

3 files changed

+8
-23
lines changed

3 files changed

+8
-23
lines changed

backends/apple/coreml/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ To quantize a Program in a Core ML favored way, the client may utilize **CoreMLQ
6565
import torch
6666
import executorch.exir
6767

68-
from torch._export import capture_pre_autograd_graph
68+
from torch.export import export_for_training
6969
from torch.ao.quantization.quantize_pt2e import (
7070
convert_pt2e,
7171
prepare_pt2e,
@@ -93,7 +93,7 @@ class Model(torch.nn.Module):
9393
source_model = Model()
9494
example_inputs = (torch.randn((1, 3, 256, 256)), )
9595

96-
pre_autograd_aten_dialect = capture_pre_autograd_graph(model, example_inputs)
96+
pre_autograd_aten_dialect = export_for_training(model, example_inputs).module()
9797

9898
quantization_config = LinearQuantizerConfig.from_dict(
9999
{

docs/source/llm/getting-started.md

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,7 @@ import torch
144144

145145
from executorch.exir import EdgeCompileConfig, to_edge
146146
from torch.nn.attention import sdpa_kernel, SDPBackend
147-
from torch._export import capture_pre_autograd_graph
148-
from torch.export import export
147+
from torch.export import export, export_for_training
149148

150149
from model import GPT
151150

@@ -170,7 +169,7 @@ dynamic_shape = (
170169
# Trace the model, converting it to a portable intermediate representation.
171170
# The torch.no_grad() call tells PyTorch to exclude training-specific logic.
172171
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad():
173-
m = capture_pre_autograd_graph(model, example_inputs, dynamic_shapes=dynamic_shape)
172+
m = export_for_training(model, example_inputs, dynamic_shapes=dynamic_shape).module()
174173
traced_model = export(m, example_inputs, dynamic_shapes=dynamic_shape)
175174

176175
# Convert the model into a runnable ExecuTorch program.
@@ -462,7 +461,7 @@ from executorch.exir import EdgeCompileConfig, to_edge
462461
import torch
463462
from torch.export import export
464463
from torch.nn.attention import sdpa_kernel, SDPBackend
465-
from torch._export import capture_pre_autograd_graph
464+
from torch.export import export_for_training
466465

467466
from model import GPT
468467

@@ -489,7 +488,7 @@ dynamic_shape = (
489488
# Trace the model, converting it to a portable intermediate representation.
490489
# The torch.no_grad() call tells PyTorch to exclude training-specific logic.
491490
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad():
492-
m = capture_pre_autograd_graph(model, example_inputs, dynamic_shapes=dynamic_shape)
491+
m = export_for_training(model, example_inputs, dynamic_shapes=dynamic_shape).module()
493492
traced_model = export(m, example_inputs, dynamic_shapes=dynamic_shape)
494493

495494
# Convert the model into a runnable ExecuTorch program.
@@ -635,7 +634,7 @@ xnnpack_quant_config = get_symmetric_quantization_config(
635634
xnnpack_quantizer = XNNPACKQuantizer()
636635
xnnpack_quantizer.set_global(xnnpack_quant_config)
637636

638-
m = capture_pre_autograd_graph(model, example_inputs)
637+
m = export_for_training(model, example_inputs).module()
639638

640639
# Annotate the model for quantization. This prepares the model for calibration.
641640
m = prepare_pt2e(m, xnnpack_quantizer)

shim/xplat/executorch/kernels/portable/op_registration_util.bzl

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,21 +2,7 @@ load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "is_xplat", "runti
22
load("@fbsource//xplat/executorch/build:selects.bzl", "selects")
33

44
def get_compiler_optimization_flags():
5-
# various ovr_configs are not available in oss
6-
if not runtime.is_oss:
7-
compiler_flags = select({
8-
"DEFAULT": [],
9-
"ovr_config//os:android-arm64": [
10-
"-O2",
11-
],
12-
"ovr_config//os:iphoneos": [
13-
"-O2",
14-
],
15-
"ovr_config//os:macos-arm64": [
16-
"-O2",
17-
],
18-
})
19-
return compiler_flags
5+
# App size regressons requires this to be baktraced until I have a better solution
206
return []
217

228
def op_target(name, deps = [], android_deps = [], _allow_third_party_deps = False, _aten_mode_deps = []):

0 commit comments

Comments
 (0)