File tree 5 files changed +8
-8
lines changed
.ci/docker/ci_commit_pins
5 files changed +8
-8
lines changed Original file line number Diff line number Diff line change 1
- 4b2970f7cd3cdd56883cacf116a8693862f89db5
1
+ d1b87e26e5c4343f5b56bb1e6f89b479b389bfac
Original file line number Diff line number Diff line change @@ -166,7 +166,7 @@ def get_model_config(args):
166
166
167
167
# pre-autograd export. eventually this will become torch.export
168
168
with torch .no_grad ():
169
- model = torch ._export . capture_pre_autograd_graph (model , example_inputs )
169
+ model = torch .export . export_for_training (model , example_inputs ). module ( )
170
170
edge : EdgeProgramManager = export_to_edge (
171
171
model ,
172
172
example_inputs ,
Original file line number Diff line number Diff line change 15
15
from executorch .backends .xnnpack .partition .xnnpack_partitioner import XnnpackPartitioner
16
16
from executorch .backends .xnnpack .utils .configs import get_xnnpack_edge_compile_config
17
17
from executorch .exir import to_edge
18
- from torch ._export import capture_pre_autograd_graph
19
18
from torch .ao .quantization .quantize_pt2e import convert_pt2e , prepare_pt2e
20
19
21
20
from torch .ao .quantization .quantizer .xnnpack_quantizer import (
22
21
get_symmetric_quantization_config ,
23
22
XNNPACKQuantizer ,
24
23
)
24
+ from torch .export import export_for_training
25
25
26
26
from transformers import Phi3ForCausalLM
27
27
@@ -64,9 +64,9 @@ def export(args) -> None:
64
64
xnnpack_quantizer = XNNPACKQuantizer ()
65
65
xnnpack_quantizer .set_global (xnnpack_quant_config )
66
66
67
- model = capture_pre_autograd_graph (
67
+ model = export_for_training (
68
68
model , example_inputs , dynamic_shapes = dynamic_shapes
69
- )
69
+ ). module ()
70
70
model = prepare_pt2e (model , xnnpack_quantizer ) # pyre-fixme[6]
71
71
model (* example_inputs )
72
72
model = convert_pt2e (model )
Original file line number Diff line number Diff line change @@ -1413,10 +1413,10 @@ def quantize_model(
1413
1413
m_eager : torch .nn .Module , example_inputs : Tuple [torch .Tensor ]
1414
1414
) -> Tuple [EdgeProgramManager , int , int ]:
1415
1415
# program capture
1416
- m = torch ._export . capture_pre_autograd_graph (
1416
+ m = torch .export . export_for_training (
1417
1417
m_eager ,
1418
1418
example_inputs ,
1419
- )
1419
+ ). module ()
1420
1420
1421
1421
quantizer = XNNPACKQuantizer ()
1422
1422
quantization_config = get_symmetric_quantization_config ()
Original file line number Diff line number Diff line change @@ -94,7 +94,7 @@ def python_is_compatible():
94
94
# NOTE: If a newly-fetched version of the executorch repo changes the value of
95
95
# NIGHTLY_VERSION, you should re-run this script to install the necessary
96
96
# package versions.
97
- NIGHTLY_VERSION = "dev20241002 "
97
+ NIGHTLY_VERSION = "dev20241007 "
98
98
99
99
# The pip repository that hosts nightly torch packages.
100
100
TORCH_NIGHTLY_URL = "https://download.pytorch.org/whl/nightly/cpu"
You can’t perform that action at this time.
0 commit comments