Skip to content

Commit 5334d52

Browse files
Merge branch 'main' into dev1/danny/support_qnn_ir_backend
2 parents 8611a4b + 17b933f commit 5334d52

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+553
-370
lines changed

.github/scripts/extract_benchmark_results.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,10 @@ def transform(
349349
# Overwrite the device name here with the job name as it has more information about
350350
# the device, i.e. Samsung Galaxy S22 5G instead of just Samsung
351351
for r in benchmark_results:
352-
r["deviceInfo"]["device"] = job_name
352+
is_private_device = job_report.get("is_private_instance", False)
353+
r["deviceInfo"]["device"] = (
354+
f"{job_name} (private)" if is_private_device else job_name
355+
)
353356

354357
# From https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
355358
return [
@@ -363,6 +366,7 @@ def transform(
363366
"benchmark_config": json.dumps(benchmark_config),
364367
"job_conclusion": "SUCCESS",
365368
"job_arn": job_report.get("arn", ""),
369+
"instance_arn": job_report.get("instance_arn", ""),
366370
},
367371
},
368372
"model": {

.github/workflows/apple-perf-private-device-experiment.yml

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,16 @@
11
name: apple-perf (private devices)
22

33
on:
4-
# TODO (huydhn): Disable the schedule run until we land the change to add device pool and device name
5-
# to separate between public and private iOS devices
6-
# schedule:
7-
# - cron: 0 0,4,8,12,16,20 * * *
4+
schedule:
5+
- cron: 0 0,4,8,12,16,20 * * *
86
pull_request:
97
paths:
108
- .github/workflows/apple-perf-private-device-experiment.yml
11-
# push:
12-
# branches:
13-
# - main
14-
# paths:
15-
# - .github/workflows/apple-perf-private-device-experiment.yml
9+
push:
10+
branches:
11+
- main
12+
paths:
13+
- .github/workflows/apple-perf-private-device-experiment.yml
1614
# Note: GitHub has an upper limit of 10 inputs
1715
workflow_dispatch:
1816
inputs:

.lintrunner.toml

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ exclude_patterns = [
1010
'exir/serde/**',
1111
]
1212
command = [
13-
'python',
13+
'python3',
1414
'-m',
1515
'lintrunner_adapters',
1616
'run',
@@ -19,7 +19,7 @@ command = [
1919
'@{{PATHSFILE}}'
2020
]
2121
init_command = [
22-
'python',
22+
'python3',
2323
'-m',
2424
'lintrunner_adapters',
2525
'run',
@@ -41,7 +41,7 @@ exclude_patterns = [
4141
'exir/serde/**',
4242
]
4343
command = [
44-
'python',
44+
'python3',
4545
'-m',
4646
'lintrunner_adapters',
4747
'run',
@@ -50,7 +50,7 @@ command = [
5050
'@{{PATHSFILE}}'
5151
]
5252
init_command = [
53-
'python',
53+
'python3',
5454
'-m',
5555
'lintrunner_adapters',
5656
'run',
@@ -83,7 +83,7 @@ exclude_patterns = [
8383
'runtime/core/portable_type/c10/**',
8484
]
8585
command = [
86-
'python',
86+
'python3',
8787
'-m',
8888
'lintrunner_adapters',
8989
'run',
@@ -94,7 +94,7 @@ command = [
9494
'@{{PATHSFILE}}'
9595
]
9696
init_command = [
97-
'python',
97+
'python3',
9898
'-m',
9999
'lintrunner_adapters',
100100
'run',
@@ -116,7 +116,7 @@ exclude_patterns = [
116116
'**/third-party/**',
117117
]
118118
command = [
119-
'python',
119+
'python3',
120120
'-m',
121121
'lintrunner_adapters',
122122
'run',
@@ -126,7 +126,7 @@ command = [
126126
'@{{PATHSFILE}}',
127127
]
128128
init_command = [
129-
'python',
129+
'python3',
130130
'-m',
131131
'lintrunner_adapters',
132132
'run',
@@ -150,7 +150,7 @@ exclude_patterns = [
150150
'**/third-party/**',
151151
]
152152
command = [
153-
'python',
153+
'python3',
154154
'-m',
155155
'lintrunner_adapters',
156156
'run',
@@ -191,7 +191,7 @@ exclude_patterns = [
191191
'extension/llm/custom_ops/spinquant/test/fast_hadamard_transform_special_unstrided_cpu.h',
192192
]
193193
command = [
194-
'python',
194+
'python3',
195195
'-m',
196196
'lintrunner_adapters',
197197
'run',
@@ -226,7 +226,7 @@ exclude_patterns = [
226226
'util/**',
227227
]
228228
command = [
229-
'python',
229+
'python3',
230230
'-m',
231231
'lintrunner_adapters',
232232
'run',
@@ -275,7 +275,7 @@ exclude_patterns = [
275275
'util/**',
276276
]
277277
command = [
278-
'python',
278+
'python3',
279279
'-m',
280280
'lintrunner_adapters',
281281
'run',
@@ -325,7 +325,7 @@ exclude_patterns = [
325325
'backends/arm/test/**',
326326
]
327327
command = [
328-
'python',
328+
'python3',
329329
'-m',
330330
'lintrunner_adapters',
331331
'run',
@@ -337,7 +337,7 @@ command = [
337337
'@{{PATHSFILE}}'
338338
]
339339
init_command = [
340-
'python',
340+
'python3',
341341
'-m',
342342
'lintrunner_adapters',
343343
'run',
@@ -356,7 +356,7 @@ exclude_patterns = [
356356
'.lintrunner.toml',
357357
]
358358
command = [
359-
'python',
359+
'python3',
360360
'-m',
361361
'lintrunner_adapters',
362362
'run',

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,8 @@ Key value propositions of ExecuTorch are:
4949
## Getting Started
5050
To get started you can:
5151

52-
- Visit the [Step by Step Tutorial](https://pytorch.org/executorch/main/index) to get things running locally and deploy a model to a device
53-
- Use this [Colab Notebook](https://pytorch.org/executorch/main/getting-started-setup#quick-setup-colab-jupyter-notebook-prototype) to start playing around right away
52+
- Visit the [Step by Step Tutorial](https://pytorch.org/executorch/stable/getting-started.html) to get things running locally and deploy a model to a device
53+
- Use this [Colab Notebook](https://colab.research.google.com/drive/1qpxrXC3YdJQzly3mRg-4ayYiOjC6rue3?usp=sharing) to start playing around right away
5454
- Jump straight into LLM use cases by following specific instructions for [Llama](examples/models/llama/README.md) and [Llava](examples/models/llava/README.md)
5555

5656
## Feedback and Engagement

backends/arm/operators/op_avg_pool2d.py

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,12 @@ def define_node(
8585
) -> None:
8686
import tosa_tools.v0_80.serializer.tosa_serializer as ts # type: ignore
8787

88-
input_tensor = inputs[0]
89-
assert input_tensor.dtype == ts.DType.INT8
88+
supported_dtypes = [ts.DType.INT8]
89+
if inputs[0].dtype not in supported_dtypes:
90+
raise TypeError(
91+
f"IO data type needs to be one of {supported_dtypes}, got "
92+
f'"{inputs[0].dtype}"'
93+
)
9094

9195
accumulator_type = ts.DType.INT32
9296

@@ -118,9 +122,12 @@ def define_node(
118122
) -> None:
119123
import tosa_tools.v0_80.serializer.tosa_serializer as ts # type: ignore
120124

121-
assert (
122-
inputs[0].dtype == ts.DType.INT8 or inputs[0].dtype == ts.DType.FP32
123-
), "Only FP32 and INT8 supported"
125+
supported_dtypes = [ts.DType.INT8, ts.DType.FP32]
126+
if inputs[0].dtype not in supported_dtypes:
127+
raise TypeError(
128+
f"IO data type needs to be one of {supported_dtypes}, got "
129+
f'"{inputs[0].dtype}"'
130+
)
124131

125132
if inputs[0].dtype == ts.DType.INT8:
126133
super().define_node(node, tosa_graph, inputs, output)
@@ -205,8 +212,12 @@ def define_node(
205212
) -> None:
206213
import serializer.tosa_serializer as ts # type: ignore
207214

208-
input_tensor = inputs[0]
209-
assert input_tensor.dtype == ts.DType.INT8
215+
supported_dtypes = [ts.DType.INT8]
216+
if inputs[0].dtype not in supported_dtypes:
217+
raise TypeError(
218+
f"IO data type needs to be one of {supported_dtypes}, got "
219+
f'"{inputs[0].dtype}"'
220+
)
210221

211222
accumulator_type = ts.DType.INT32
212223

@@ -241,9 +252,12 @@ def define_node(
241252
) -> None:
242253
import serializer.tosa_serializer as ts # type: ignore
243254

244-
assert (
245-
inputs[0].dtype == ts.DType.INT8 or inputs[0].dtype == ts.DType.FP32
246-
), "Only FP32 and INT8 supported"
255+
supported_dtypes = [ts.DType.INT8, ts.DType.FP32]
256+
if inputs[0].dtype not in supported_dtypes:
257+
raise TypeError(
258+
f"IO data type needs to be one of {supported_dtypes}, got "
259+
f'"{inputs[0].dtype}"'
260+
)
247261

248262
if inputs[0].dtype == ts.DType.INT8:
249263
super().define_node(node, tosa_graph, inputs, output)

backends/arm/quantizer/__init__.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,3 +13,16 @@
1313

1414
# Used in tests
1515
from .arm_quantizer_utils import is_annotated # noqa
16+
17+
# Load quantized ops library.
18+
try:
19+
import executorch.extension.pybindings.portable_lib
20+
import executorch.kernels.quantized # noqa
21+
except:
22+
import logging
23+
24+
logging.info(
25+
"Failed to load portable_lib and quantized_aot_lib. To run quantized kernels AOT, either build "
26+
"Executorch with pybindings, or load your own custom built op library using torch.ops.load_library."
27+
)
28+
del logging

backends/arm/scripts/build_quantized_ops_aot_lib.sh

Lines changed: 0 additions & 54 deletions
This file was deleted.

backends/arm/scripts/pre-push

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ VERBS="Add|Fix|Update|Refactor|Improve|Remove|Change|Implement|Create|Modify|"\
2727
"Handle|Ignore|Interpret|Instantiate|Invoke|Limit|Load|Modify|Permit|Print|"\
2828
"Profile|Recalculate|Reconstruct|Redefine|Redesign|Reevaluate|Relocate|Remap|"\
2929
"Render|Reposition|Request|Revert|Sanitize|Specify|Strengthen|Stub|Substitute|"\
30-
"Tag|Tweak|Unify|Unlock|Unset|Use|Validate|Verify"
30+
"Tag|Tweak|Unify|Unlock|Unset|Use|Validate|Verify|Rename"
3131

3232
# Remote branch
3333
REMOTE=$(git rev-parse --abbrev-ref --symbolic-full-name @{u} 2>/dev/null)

backends/arm/test/conftest.py

Lines changed: 1 addition & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,8 @@
55

66
import logging
77
import os
8-
import platform
98
import random
109
import shutil
11-
import subprocess
1210
import sys
1311
from typing import Any
1412

@@ -81,8 +79,7 @@ def try_addoption(*args, **kwargs):
8179

8280

8381
def pytest_sessionstart(session):
84-
if not session.config.option.collectonly:
85-
_load_libquantized_ops_aot_lib()
82+
pass
8683

8784

8885
def pytest_sessionfinish(session, exitstatus):
@@ -172,32 +169,3 @@ def get_option(option: str) -> Any | None:
172169
if option in pytest._test_options: # type: ignore[attr-defined]
173170
return pytest._test_options[option] # type: ignore[attr-defined]
174171
return None
175-
176-
177-
def _load_libquantized_ops_aot_lib():
178-
"""
179-
Find and load the libquantized_ops_aot_lib shared library.
180-
"""
181-
so_ext = {
182-
"Darwin": "dylib",
183-
"Linux": "so",
184-
"Windows": "dll",
185-
}.get(platform.system(), None)
186-
187-
find_lib_cmd = [
188-
"find",
189-
"cmake-out-aot-lib",
190-
"-name",
191-
f"libquantized_ops_aot_lib.{so_ext}",
192-
]
193-
194-
res = subprocess.run(find_lib_cmd, capture_output=True)
195-
if res.returncode == 0:
196-
library_path = res.stdout.decode().strip()
197-
import torch
198-
199-
torch.ops.load_library(library_path)
200-
else:
201-
raise RuntimeError(
202-
f"Did not find libquantized_ops_aot_lib.{so_ext} in cmake-out-aot-lib. Did you build it?"
203-
)

backends/arm/test/setup_testing.sh

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,6 @@ function build_semihosting_executorch_runner() {
5252
find ${build_test_dir} -name "arm_executor_runner"
5353
}
5454

55-
cd $et_root_dir && backends/arm/scripts/build_quantized_ops_aot_lib.sh
56-
5755
# Use most optimal system_configs for testing
5856
build_semihosting_executorch_runner corstone-300 Ethos_U55_High_End_Embedded
5957

0 commit comments

Comments
 (0)