Skip to content

Commit 1432243

Browse files
authored
Rename convert_pt2
Differential Revision: D73440519 Pull Request resolved: #10378
1 parent dd71ca8 commit 1432243

File tree

2 files changed

+6
-11
lines changed

2 files changed

+6
-11
lines changed

backends/cadence/aot/compiler.py

+3-8
Original file line numberDiff line numberDiff line change
@@ -47,12 +47,7 @@
4747
from .utils import print_ops_info
4848

4949

50-
# Note: this is not meant as a primary API since it can create inconsistencies
51-
# if the quantizer here is different from the quantizer used to convert. It is
52-
# however useful for unit tests to separate the converted model from the fused
53-
# model, to be able to get reference numerics.
54-
# If this does not apply, please use quantize_and_fuse_pt2 instead.
55-
def convert_pt2(
50+
def prepare_and_convert_pt2(
5651
model: torch.nn.Module,
5752
inputs: tuple[object, ...],
5853
quantizer: CadenceQuantizer,
@@ -150,7 +145,7 @@ def quantize_pt2(
150145
dump_graphs: bool = False,
151146
) -> torch.fx.GraphModule:
152147
"""
153-
Prepare, convert and fuse the model using the given quantizer.
148+
Trace, prepare, convert and fuse the model using the given quantizer.
154149
If calibration data is provided, it will be used to calibrate the model. If
155150
not, the inputs will be used for calibration instead, which is useful for
156151
unit tests but should not be used for end-to-end use cases.
@@ -164,7 +159,7 @@ def quantize_pt2(
164159
quantizer = CadenceDefaultQuantizer()
165160

166161
# Get converted graph module
167-
converted_gm = convert_pt2(
162+
converted_gm = prepare_and_convert_pt2(
168163
model, inputs, quantizer, calibration_data, dump_graphs=dump_graphs
169164
)
170165

backends/cadence/aot/export_example.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
from typing import Any, Tuple
1616

1717
from executorch.backends.cadence.aot.compiler import (
18-
convert_pt2,
1918
export_to_executorch_gen_etrecord,
2019
fuse_pt2,
20+
prepare_and_convert_pt2,
2121
)
2222

2323
from executorch.backends.cadence.aot.quantizer.quantizer import CadenceDefaultQuantizer
@@ -49,13 +49,13 @@ def export_model(
4949
quantizer = CadenceDefaultQuantizer()
5050

5151
# Convert the model
52-
converted_model = convert_pt2(model, example_inputs, quantizer)
52+
converted_model = prepare_and_convert_pt2(model, example_inputs, quantizer)
5353

5454
# Get reference outputs from converted model
5555
ref_outputs = converted_model(*example_inputs)
5656

5757
# Quantize the model (note: quantizer needs to be the same as
58-
# the one used in convert_pt2)
58+
# the one used in prepare_and_convert_pt2)
5959
quantized_model = fuse_pt2(converted_model, quantizer)
6060

6161
# Get edge program after Cadence specific passes

0 commit comments

Comments
 (0)