Skip to content

Commit 7dc5cee

Browse files
committed
Update
[ghstack-poisoned]
2 parents 545777f + b2e23ae commit 7dc5cee

File tree

391 files changed

+12908
-134882
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

391 files changed

+12908
-134882
lines changed

.ci/scripts/gather_test_models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from typing import Any
1515

1616
from examples.models import MODEL_NAME_TO_MODEL
17-
from examples.xnnpack import MODEL_NAME_TO_OPTIONS
17+
from examples.xnnpack import MODEL_NAME_TO_OPTIONS, QuantType
1818

1919
DEFAULT_RUNNERS = {
2020
"linux": "linux.2xlarge",
@@ -154,7 +154,7 @@ def export_models_for_ci() -> dict[str, dict]:
154154
if backend == "xnnpack":
155155
if name not in MODEL_NAME_TO_OPTIONS:
156156
continue
157-
if MODEL_NAME_TO_OPTIONS[name].quantization:
157+
if MODEL_NAME_TO_OPTIONS[name].quantization != QuantType.NONE:
158158
backend += "-quantization"
159159

160160
if MODEL_NAME_TO_OPTIONS[name].delegation:

.ci/scripts/setup-openvino.sh

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -ex
9+
10+
# shellcheck source=/dev/null
11+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
12+
13+
git clone https://github.com/openvinotoolkit/openvino.git
14+
cd openvino && git checkout releases/2025/1
15+
git submodule update --init --recursive
16+
sudo ./install_build_dependencies.sh
17+
mkdir build && cd build
18+
cmake .. -DCMAKE_BUILD_TYPE=Release -DENABLE_PYTHON=ON
19+
make -j$(nproc)
20+
21+
cd ..
22+
cmake --install build --prefix dist
23+
24+
source dist/setupvars.sh
25+
cd ../backends/openvino
26+
pip install -r requirements.txt
27+
cd scripts
28+
./openvino_build.sh --enable_python

.ci/scripts/test_llama.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ $PYTHON_EXECUTABLE -m examples.models.llama.export_llama ${EXPORT_ARGS}
269269

270270
# Create tokenizer.bin.
271271
echo "Creating tokenizer.bin"
272-
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
272+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
273273

274274

275275
RUNTIME_ARGS="--model_path=${EXPORTED_MODEL_NAME} --tokenizer_path=tokenizer.bin --prompt=Once --temperature=0 --seq_len=10 --warmup=1"

.ci/scripts/test_llama_torchao_lowbit.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ cmake --build cmake-out/examples/models/llama -j16 --config Release
5555
download_stories_model_artifacts
5656

5757
echo "Creating tokenizer.bin"
58-
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
58+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
5959

6060
# Export model
6161
LLAMA_CHECKPOINT=stories110M.pt

.ci/scripts/test_model.sh

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -96,15 +96,15 @@ test_model() {
9696
bash examples/models/llama/install_requirements.sh
9797
# Test export_llama script: python3 -m examples.models.llama.export_llama.
9898
# Use Llama random checkpoint with Qwen 2.5 1.5b model configuration.
99-
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/qwen2_5/1_5b_config.json
99+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -p examples/models/qwen2_5/1_5b_config.json
100100
rm "./${MODEL_NAME}.pte"
101101
return # Skip running with portable executor runnner since portable doesn't support Qwen's biased linears.
102102
fi
103103
if [[ "${MODEL_NAME}" == "phi_4_mini" ]]; then
104104
# Install requirements for export_llama
105105
bash examples/models/llama/install_requirements.sh
106106
# Test export_llama script: python3 -m examples.models.llama.export_llama.
107-
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/phi_4_mini/config.json
107+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -p examples/models/phi_4_mini/config.json
108108
run_portable_executor_runner
109109
rm "./${MODEL_NAME}.pte"
110110
return
@@ -224,19 +224,22 @@ test_model_with_coreml() {
224224

225225
"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision "${DTYPE}"
226226
EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
227-
# TODO:
227+
228228
if [ -n "$EXPORTED_MODEL" ]; then
229229
EXPORTED_MODEL_WITH_DTYPE="${EXPORTED_MODEL%.pte}_${DTYPE}.pte"
230230
mv "$EXPORTED_MODEL" "$EXPORTED_MODEL_WITH_DTYPE"
231231
EXPORTED_MODEL="$EXPORTED_MODEL_WITH_DTYPE"
232-
echo "Renamed file path: $EXPORTED_MODEL"
232+
echo "OK exported model: $EXPORTED_MODEL"
233233
else
234-
echo "No .pte file found"
234+
echo "[error] failed to export model: no .pte file found"
235235
exit 1
236236
fi
237237

238238
# Run the model
239239
if [ "${should_test}" = true ]; then
240+
echo "Installing requirements needed to build coreml_executor_runner..."
241+
backends/apple/coreml/scripts/install_requirements.sh
242+
240243
echo "Testing exported model with coreml_executor_runner..."
241244
local out_dir=$(mktemp -d)
242245
COREML_EXECUTOR_RUNNER_OUT_DIR="${out_dir}" examples/apple/coreml/scripts/build_executor_runner.sh

.ci/scripts/test_openvino.sh

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -ex
9+
10+
# shellcheck source=/dev/null
11+
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
12+
13+
source openvino/dist/setupvars.sh
14+
cd backends/openvino/tests
15+
python test_runner.py --test_type ops
16+
python test_runner.py --test_type models

.ci/scripts/test_phi_3_mini.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ cmake_build_phi_3_mini() {
5656
prepare_tokenizer() {
5757
echo "Downloading and converting tokenizer.model"
5858
wget -O tokenizer.model "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/tokenizer.model?download=true"
59-
$PYTHON_EXECUTABLE -m executorch.extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
59+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
6060
}
6161

6262
# Export phi-3-mini model to pte

.ci/scripts/test_qnn_static_llama.sh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
77

8-
set -exu
8+
set -euxo pipefail
99

1010
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
1111

@@ -30,7 +30,7 @@ pip install graphviz
3030
# Download stories llama110m artifacts
3131
download_stories_model_artifacts
3232
echo "Creating tokenizer.bin"
33-
$PYTHON_EXECUTABLE -m extension.llm.tokenizer.tokenizer -t tokenizer.model -o tokenizer.bin
33+
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
3434

3535
set +e
3636
# Compile only as weight sharing is not applicable on x86
@@ -56,4 +56,3 @@ if [ $exit_code1 -ne 0 ] || [ $exit_code2 -ne 0 ]; then
5656
else
5757
exit 0
5858
fi
59-
set -e

.ci/scripts/utils.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ install_executorch() {
3232
which pip
3333
# Install executorch, this assumes that Executorch is checked out in the
3434
# current directory.
35-
./install_executorch.sh --pybind xnnpack "$@"
35+
./install_executorch.sh "$@"
3636
# Just print out the list of packages for debugging
3737
pip list
3838
}

.ci/scripts/wheel/test_macos.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,9 @@
1515
model=Model.Mv3,
1616
backend=Backend.XnnpackQuantizationDelegation,
1717
),
18-
# Enable this once CoreML is suppported out-of-the-box
19-
# https://github.com/pytorch/executorch/issues/9019
20-
# test_base.ModelTest(
21-
# model=Model.Mv3,
22-
# backend=Backend.CoreMlTest,
23-
# )
18+
test_base.ModelTest(
19+
model=Model.Mv3,
20+
backend=Backend.CoreMlTest,
21+
),
2422
]
2523
)

.github/workflows/apple.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ jobs:
3737
id: set_version
3838
shell: bash
3939
run: |
40-
VERSION="0.5.0.$(TZ='PST8PDT' date +%Y%m%d)"
40+
VERSION="0.7.0.$(TZ='PST8PDT' date +%Y%m%d)"
4141
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
4242
4343
build-demo-ios:

.github/workflows/build-wheels-linux.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ on:
66
paths:
77
- .ci/**/*
88
- .github/workflows/build-wheels-linux.yml
9+
- examples/**/*
10+
- pyproject.toml
11+
- setup.py
912
push:
1013
branches:
1114
- nightly

.github/workflows/build-wheels-macos.yml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ on:
66
paths:
77
- .ci/**/*
88
- .github/workflows/build-wheels-macos.yml
9+
- examples/**/*
10+
- pyproject.toml
11+
- setup.py
912
push:
1013
branches:
1114
- nightly
@@ -57,6 +60,8 @@ jobs:
5760
pre-script: ${{ matrix.pre-script }}
5861
post-script: ${{ matrix.post-script }}
5962
package-name: ${{ matrix.package-name }}
60-
runner-type: macos-m1-stable
63+
# Meta's macOS runners do not have Xcode, so use GitHub's runners.
64+
runner-type: macos-latest-xlarge
65+
setup-miniconda: true
6166
smoke-test-script: ${{ matrix.smoke-test-script }}
6267
trigger-event: ${{ github.event_name }}

.github/workflows/pull.yml

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -573,7 +573,6 @@ jobs:
573573
574574
BUILD_TOOL="cmake"
575575
576-
./install_requirements.sh --use-pt-pinned-commit
577576
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
578577
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
579578
@@ -736,3 +735,25 @@ jobs:
736735
conda activate "${CONDA_ENV}"
737736
738737
# placeholder for mediatek to add more tests
738+
739+
test-openvino-linux:
740+
name: test-openvino-linux
741+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
742+
permissions:
743+
id-token: write
744+
contents: read
745+
strategy:
746+
fail-fast: false
747+
with:
748+
runner: linux.2xlarge
749+
docker-image: executorch-ubuntu-22.04-gcc9
750+
submodules: 'true'
751+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
752+
timeout: 90
753+
script: |
754+
# The generic Linux job chooses to use base env, not the one setup by the image
755+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
756+
conda activate "${CONDA_ENV}"
757+
758+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-openvino.sh
759+
PYTHON_EXECUTABLE=python bash .ci/scripts/test_openvino.sh

.github/workflows/trunk.yml

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -65,22 +65,29 @@ jobs:
6565
matrix:
6666
model: [linear, add, add_mul, ic3, ic4, mv2, mv3, resnet18, resnet50, vit, w2l, mobilebert, emformer_join, emformer_transcribe]
6767
backend: [portable, xnnpack-quantization-delegation]
68+
runner: [linux.arm64.2xlarge]
6869
include:
6970
- model: lstm
7071
backend: portable
72+
runner: linux.arm64.2xlarge
7173
- model: mul
7274
backend: portable
75+
runner: linux.arm64.2xlarge
7376
- model: softmax
7477
backend: portable
78+
runner: linux.arm64.2xlarge
7579
- model: phi_4_mini
7680
backend: portable
81+
runner: linux.arm64.m7g.4xlarge
7782
- model: qwen2_5
7883
backend: portable
84+
runner: linux.arm64.2xlarge
7985
- model: llama3_2_vision_encoder
8086
backend: portable
87+
runner: linux.arm64.2xlarge
8188
fail-fast: false
8289
with:
83-
runner: linux.arm64.2xlarge
90+
runner: ${{ matrix.runner }}
8491
docker-image: executorch-ubuntu-22.04-gcc11-aarch64
8592
submodules: 'true'
8693
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
@@ -536,9 +543,8 @@ jobs:
536543
git clone https://github.com/huggingface/optimum-executorch
537544
cd optimum-executorch
538545
# There is no release yet, for CI stability, always test from the same commit on main
539-
git checkout 6a7e83f3eee2976fa809335bfb78a45b1ea1cb25
540-
pip install .
541-
pip install accelerate sentencepiece
546+
git checkout 577a2b19670e4c643a5c6ecb09bf47b9a699e7c6
547+
pip install .[tests]
542548
pip list
543549
echo "::endgroup::"
544550

.gitmodules

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
[submodule "backends/arm/third-party/ethos-u-core-driver"]
22
path = backends/arm/third-party/ethos-u-core-driver
3-
url = https://github.com/pytorch-labs/ethos-u-core-driver-mirror
3+
url = https://git.gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-core-driver.git
44
[submodule "backends/arm/third-party/serialization_lib"]
55
path = backends/arm/third-party/serialization_lib
6-
url = https://github.com/pytorch-labs/tosa_serialization_lib-mirror
6+
url = https://git.gitlab.arm.com/tosa/tosa-serialization.git
77
[submodule "backends/vulkan/third-party/Vulkan-Headers"]
88
path = backends/vulkan/third-party/Vulkan-Headers
99
url = https://github.com/KhronosGroup/Vulkan-Headers

.lintrunner.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -266,6 +266,8 @@ exclude_patterns = [
266266
'extension/**',
267267
# Uses properly-gated (ET_USE_PYTORCH_HEADERS) ATen include.
268268
'kernels/portable/cpu/util/elementwise_util.h',
269+
'kernels/portable/cpu/util/math_util.h',
270+
'kernels/portable/cpu/util/vectorized_math.h',
269271
'kernels/optimized/**',
270272
'runtime/core/exec_aten/**',
271273
# Want to be able to keep c10 in sync with PyTorch core.
@@ -301,12 +303,14 @@ include_patterns = [
301303
# TODO(https://github.com/pytorch/executorch/issues/7441): Gradually start enabling all folders.
302304
# 'backends/**/*.py',
303305
'backends/arm/**/*.py',
306+
'backends/openvino/**/*.py',
304307
'build/**/*.py',
305308
'codegen/**/*.py',
306309
# 'devtools/**/*.py',
307310
'devtools/visualization/**/*.py',
308311
'docs/**/*.py',
309312
# 'examples/**/*.py',
313+
'examples/openvino/**/*.py',
310314
# 'exir/**/*.py',
311315
# 'extension/**/*.py',
312316
'kernels/**/*.py',

0 commit comments

Comments
 (0)