Skip to content

Commit 22d6aeb

Browse files
committed
Rename CMake BUILD_KERNELS_CUSTOM to BUILD_EXTENSION_LLM
1 parent 905b88c commit 22d6aeb

File tree

22 files changed

+51
-52
lines changed

22 files changed

+51
-52
lines changed

.ci/scripts/build_llama_android.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ install_executorch_and_backend_lib() {
2828
-DEXECUTORCH_BUILD_XNNPACK=ON \
2929
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
3030
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
31-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
31+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
3232
-DXNNPACK_ENABLE_ARM_BF16=OFF \
3333
-Bcmake-android-out .
3434

@@ -47,7 +47,7 @@ build_llama_runner() {
4747
-DEXECUTORCH_BUILD_XNNPACK=ON \
4848
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
4949
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
50-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
50+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
5151
-Bcmake-android-out/examples/models/llama2 examples/models/llama2
5252

5353
cmake --build cmake-android-out/examples/models/llama2 -j4 --config Release

.ci/scripts/test_llama.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ cmake_install_executorch_libraries() {
110110
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
111111
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
112112
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
113-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM="$CUSTOM" \
113+
-DEXECUTORCH_BUILD_EXTENSION_LLM="$CUSTOM" \
114114
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
115115
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
116116
-DEXECUTORCH_BUILD_XNNPACK="$XNNPACK" \
@@ -129,7 +129,7 @@ cmake_build_llama_runner() {
129129
retry cmake \
130130
-DCMAKE_INSTALL_PREFIX=cmake-out \
131131
-DCMAKE_BUILD_TYPE=Debug \
132-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM="$CUSTOM" \
132+
-DEXECUTORCH_BUILD_EXTENSION_LLM="$CUSTOM" \
133133
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
134134
-DEXECUTORCH_BUILD_XNNPACK="$XNNPACK" \
135135
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \

.ci/scripts/test_llava.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ EXECUTORCH_COMMON_CMAKE_ARGS=" \
3737
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
3838
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
3939
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
40-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
40+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
4141
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
4242
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
4343
-DEXECUTORCH_BUILD_XNNPACK=ON \
@@ -68,7 +68,7 @@ LLAVA_COMMON_CMAKE_ARGS=" \
6868
-DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \
6969
-DCMAKE_INSTALL_PREFIX=${BUILD_DIR} \
7070
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
71-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
71+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
7272
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
7373
-DEXECUTORCH_BUILD_XNNPACK=ON"
7474

.ci/scripts/test_phi_3_mini.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ cmake_install_executorch_libraries() {
3232
-DEXECUTORCH_BUILD_XNNPACK=ON \
3333
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
3434
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
35-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
35+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
3636
-B${BUILD_DIR} .
3737

3838
cmake --build ${BUILD_DIR} -j${NPROC} --target install --config ${BUILD_TYPE}
@@ -42,7 +42,7 @@ cmake_build_phi_3_mini() {
4242
cmake -DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
4343
-DCMAKE_INSTALL_PREFIX=${BUILD_DIR} \
4444
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
45-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
45+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
4646
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
4747
-DEXECUTORCH_BUILD_XNNPACK=ON \
4848
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \

.github/workflows/trunk.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ jobs:
371371
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
372372
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
373373
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
374-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
374+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
375375
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
376376
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
377377
-DEXECUTORCH_BUILD_XNNPACK=ON \
@@ -384,7 +384,7 @@ jobs:
384384
cmake \
385385
-DCMAKE_INSTALL_PREFIX=cmake-out \
386386
-DCMAKE_BUILD_TYPE=Release \
387-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
387+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
388388
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
389389
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
390390
-DEXECUTORCH_BUILD_XNNPACK=ON \

CMakeLists.txt

+11-12
Original file line numberDiff line numberDiff line change
@@ -165,14 +165,14 @@ option(EXECUTORCH_BUILD_ARM_BAREMETAL
165165

166166
option(EXECUTORCH_BUILD_COREML "Build the Core ML backend" OFF)
167167

168-
option(EXECUTORCH_BUILD_KERNELS_CUSTOM "Build the custom kernels" OFF)
169-
170-
option(EXECUTORCH_BUILD_KERNELS_CUSTOM_AOT "Build the custom ops lib for AOT"
168+
option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER "Build the Data Loader extension"
171169
OFF
172170
)
173171

174-
option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER "Build the Data Loader extension"
175-
OFF
172+
option(EXECUTORCH_BUILD_EXTENSION_LLM "Build the LLM extension" OFF)
173+
174+
option(EXECUTORCH_BUILD_EXTENSION_LLM_AOT
175+
"Build the LLM extension custom ops lib for AOT" OFF
176176
)
177177

178178
option(EXECUTORCH_BUILD_EXTENSION_MODULE "Build the Module extension" OFF)
@@ -229,12 +229,12 @@ cmake_dependent_option(
229229
"NOT EXECUTORCH_BUILD_ARM_BAREMETAL" OFF
230230
)
231231

232-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM_AOT)
232+
if(EXECUTORCH_BUILD_EXTENSION_LLM_AOT)
233233
set(EXECUTORCH_BUILD_EXTENSION_TENSOR ON)
234-
set(EXECUTORCH_BUILD_KERNELS_CUSTOM ON)
234+
set(EXECUTORCH_BUILD_EXTENSION_LLM ON)
235235
endif()
236236

237-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
237+
if(EXECUTORCH_BUILD_EXTENSION_LLM)
238238
set(EXECUTORCH_BUILD_KERNELS_OPTIMIZED ON)
239239
endif()
240240

@@ -786,10 +786,9 @@ if(EXECUTORCH_BUILD_PYBIND)
786786
)
787787
endif()
788788

789-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
790-
# TODO: move all custom kernels to ${CMAKE_CURRENT_SOURCE_DIR}/kernels/custom
791-
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/custom_ops)
792-
endif()
789+
if(EXECUTORCH_BUILD_EXTENSION_LLM)
790+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/custom_ops)
791+
endif()
793792

794793
if(EXECUTORCH_BUILD_KERNELS_QUANTIZED)
795794
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels/quantized)

backends/cadence/build_cadence_xtensa.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ if $STEPWISE_BUILD; then
4646
-DEXECUTORCH_ENABLE_PROGRAM_VERIFICATION=ON \
4747
-DEXECUTORCH_USE_DL=OFF \
4848
-DBUILD_EXECUTORCH_PORTABLE_OPS=ON \
49-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=OFF \
49+
-DEXECUTORCH_BUILD_EXTENSION_LLM=OFF \
5050
-DPYTHON_EXECUTABLE=python3 \
5151
-DEXECUTORCH_NNLIB_OPT=ON \
5252
-DEXECUTORCH_BUILD_GFLAGS=ON \
@@ -74,7 +74,7 @@ else
7474
-DEXECUTORCH_ENABLE_PROGRAM_VERIFICATION=ON \
7575
-DEXECUTORCH_USE_DL=OFF \
7676
-DBUILD_EXECUTORCH_PORTABLE_OPS=ON \
77-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=OFF \
77+
-DEXECUTORCH_BUILD_EXTENSION_LLM=OFF \
7878
-DPYTHON_EXECUTABLE=python3 \
7979
-DEXECUTORCH_NNLIB_OPT=ON \
8080
-DHAVE_FNMATCH_H=OFF \

build/Utils.cmake

+3-3
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ function(executorch_print_configuration_summary)
5050
STATUS
5151
" EXECUTORCH_BUILD_COREML : ${EXECUTORCH_BUILD_COREML}"
5252
)
53-
message(STATUS " EXECUTORCH_BUILD_KERNELS_CUSTOM : "
54-
"${EXECUTORCH_BUILD_KERNELS_CUSTOM}"
53+
message(STATUS " EXECUTORCH_BUILD_EXTENSION_LLM : "
54+
"${EXECUTORCH_BUILD_EXTENSION_LLM}"
5555
)
5656
message(STATUS " EXECUTORCH_BUILD_EXECUTOR_RUNNER : "
5757
"${EXECUTORCH_BUILD_EXECUTOR_RUNNER}"
@@ -68,7 +68,7 @@ function(executorch_print_configuration_summary)
6868
message(STATUS " EXECUTORCH_BUILD_EXTENSION_TENSOR : "
6969
"${EXECUTORCH_BUILD_EXTENSION_TENSOR}"
7070
)
71-
message(STATUS " EXECUTORCH_BUILD_EXTENSION_TRAINING : "
71+
message(STATUS " EXECUTORCH_BUILD_EXTENSION_TRAINING : "
7272
"${EXECUTORCH_BUILD_EXTENSION_TRAINING}"
7373
)
7474
message(

build/build_android_llm_demo.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ build_android_native_library() {
4141
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
4242
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
4343
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
44-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
44+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
4545
-DEXECUTORCH_BUILD_QNN="${EXECUTORCH_BUILD_QNN}" \
4646
-DQNN_SDK_ROOT="${QNN_SDK_ROOT}" \
4747
-DCMAKE_BUILD_TYPE=Release \
@@ -61,7 +61,7 @@ build_android_native_library() {
6161
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
6262
-DEXECUTORCH_ENABLE_LOGGING=ON \
6363
-DEXECUTORCH_LOG_LEVEL=Info \
64-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
64+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
6565
-DEXECUTORCH_BUILD_LLAMA_JNI=ON \
6666
-DCMAKE_BUILD_TYPE=Release \
6767
-B"${CMAKE_OUT}"/extension/android

build/build_apple_frameworks.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ cmake_build() {
168168
-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
169169
-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
170170
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
171-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=$CUSTOM \
171+
-DEXECUTORCH_BUILD_EXTENSION_LLM=$CUSTOM \
172172
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=$OPTIMIZED \
173173
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=$QUANTIZED \
174174
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY="$(pwd)" \

docs/source/llm/build-run-llama3-qualcomm-ai-engine-direct-backend.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ llama3/Meta-Llama-3-8B-Instruct/tokenizer.model -p <path_to_params.json> -c <pat
5959
-DQNN_SDK_ROOT=${QNN_SDK_ROOT} \
6060
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
6161
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
62-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
62+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
6363
-Bcmake-android-out .
6464

6565
cmake --build cmake-android-out -j16 --target install --config Release
@@ -75,7 +75,7 @@ llama3/Meta-Llama-3-8B-Instruct/tokenizer.model -p <path_to_params.json> -c <pat
7575
-DEXECUTORCH_BUILD_QNN=ON \
7676
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
7777
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
78-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
78+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
7979
-Bcmake-android-out/examples/models/llama2 examples/models/llama2
8080
8181
cmake --build cmake-android-out/examples/models/llama2 -j16 --config Release

examples/demo-apps/android/LlamaDemo/docs/delegates/qualcomm_README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ cmake -DPYTHON_EXECUTABLE=python \
6464
-DQNN_SDK_ROOT=${QNN_SDK_ROOT} \
6565
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
6666
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
67-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
67+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
6868
-Bcmake-out .
6969
cmake --build cmake-out -j16 --target install --config Release
7070
```
@@ -81,7 +81,7 @@ cmake -DPYTHON_EXECUTABLE=python \
8181
-DCMAKE_BUILD_TYPE=Release \
8282
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
8383
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
84-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
84+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
8585
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
8686
-DEXECUTORCH_BUILD_QNN=ON \
8787
-Bcmake-out/examples/models/llama2 \

examples/demo-apps/android/LlamaDemo/setup-with-qnn.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ cmake extension/android \
3737
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
3838
-DEXECUTORCH_BUILD_LLAMA_JNI=ON \
3939
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
40-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
40+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
4141
-DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \
4242
-DCMAKE_BUILD_TYPE=Release \
4343
-B"${CMAKE_OUT}"/extension/android

examples/demo-apps/android/LlamaDemo/setup.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ cmake . -DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
2020
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
2121
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
2222
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
23-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
23+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
2424
-DCMAKE_BUILD_TYPE=Release \
2525
-B"${CMAKE_OUT}"
2626

@@ -37,7 +37,7 @@ cmake extension/android \
3737
-DANDROID_PLATFORM=android-23 \
3838
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
3939
-DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \
40-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
40+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
4141
-DEXECUTORCH_BUILD_LLAMA_JNI=ON \
4242
-DCMAKE_BUILD_TYPE=Release \
4343
-B"${CMAKE_OUT}"/extension/android

examples/llm_manual/build/schema/include/executorch/backends/xnnpack/serialization/schema_generated.h

Whitespace-only changes.

examples/models/llama2/CMakeLists.txt

+2-2
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ if(CMAKE_TOOLCHAIN_IOS OR ANDROID)
8383
endif()
8484

8585
# custom ops library
86-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
86+
if(EXECUTORCH_BUILD_EXTENSION_LLM)
8787
add_subdirectory(
8888
${CMAKE_CURRENT_SOURCE_DIR}/../../../extension/llm/custom_ops
8989
${CMAKE_CURRENT_BINARY_DIR}/../../../extension/llm/custom_ops
@@ -116,7 +116,7 @@ endif()
116116
target_link_options_shared_lib(quantized_ops_lib)
117117
list(APPEND link_libraries quantized_kernels quantized_ops_lib)
118118

119-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
119+
if(EXECUTORCH_BUILD_EXTENSION_LLM)
120120
target_link_options_shared_lib(custom_ops)
121121
list(APPEND link_libraries custom_ops)
122122
endif()

examples/models/llama2/README.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,7 @@ The Wikitext results generated above used: `{max_seq_len: 2048, limit: 1000}`
291291
-DEXECUTORCH_BUILD_XNNPACK=ON \
292292
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
293293
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
294-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
294+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
295295
-Bcmake-out .
296296
297297
cmake --build cmake-out -j16 --target install --config Release
@@ -303,7 +303,7 @@ Note for Mac users: There's a known linking issue with Xcode 15.1. Refer to the
303303
cmake -DPYTHON_EXECUTABLE=python \
304304
-DCMAKE_INSTALL_PREFIX=cmake-out \
305305
-DCMAKE_BUILD_TYPE=Release \
306-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
306+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
307307
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
308308
-DEXECUTORCH_BUILD_XNNPACK=ON \
309309
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
@@ -345,7 +345,7 @@ cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake \
345345
-DEXECUTORCH_BUILD_XNNPACK=ON \
346346
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
347347
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
348-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
348+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
349349
-Bcmake-out-android .
350350

351351
cmake --build cmake-out-android -j16 --target install --config Release
@@ -362,7 +362,7 @@ cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake \
362362
-DEXECUTORCH_BUILD_XNNPACK=ON \
363363
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
364364
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
365-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
365+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
366366
-Bcmake-out-android/examples/models/llama2 \
367367
examples/models/llama2
368368

examples/models/llava/CMakeLists.txt

+2-2
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ if(CMAKE_TOOLCHAIN_IOS OR ANDROID)
9595
endif()
9696

9797
# custom ops library
98-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
98+
if(EXECUTORCH_BUILD_EXTENSION_LLM)
9999
add_subdirectory(
100100
${EXECUTORCH_ROOT}/extension/llm/custom_ops
101101
${CMAKE_CURRENT_BINARY_DIR}/../../../extension/llm/custom_ops
@@ -132,7 +132,7 @@ endif()
132132
target_link_options_shared_lib(quantized_ops_lib)
133133
list(APPEND link_libraries quantized_kernels quantized_ops_lib)
134134

135-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
135+
if(EXECUTORCH_BUILD_EXTENSION_LLM)
136136
target_link_options_shared_lib(custom_ops)
137137
list(APPEND link_libraries custom_ops)
138138
endif()

examples/models/phi-3-mini/README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ python -m examples.models.phi-3-mini.export_phi-3-mini -c "4k" -s 128 -o phi-3-m
3232
-DEXECUTORCH_BUILD_XNNPACK=ON \
3333
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
3434
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
35-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
35+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
3636
-Bcmake-out .
3737
3838
cmake --build cmake-out -j16 --target install --config Release
@@ -42,7 +42,7 @@ python -m examples.models.phi-3-mini.export_phi-3-mini -c "4k" -s 128 -o phi-3-m
4242
cmake -DPYTHON_EXECUTABLE=python \
4343
-DCMAKE_INSTALL_PREFIX=cmake-out \
4444
-DCMAKE_BUILD_TYPE=Release \
45-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
45+
-DEXECUTORCH_BUILD_EXTENSION_LLM=ON \
4646
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
4747
-DEXECUTORCH_BUILD_XNNPACK=ON \
4848
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \

extension/android/CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ if(TARGET vulkan_backend)
8080
list(APPEND link_libraries vulkan_backend)
8181
endif()
8282

83-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
83+
if(EXECUTORCH_BUILD_EXTENSION_LLM)
8484
add_subdirectory(
8585
${EXECUTORCH_ROOT}/extension/llm/custom_ops
8686
${CMAKE_CURRENT_BINARY_DIR}/../../extension/llm/custom_ops

extension/llm/custom_ops/CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ target_compile_options(
6969

7070
install(TARGETS custom_ops DESTINATION lib)
7171

72-
if(EXECUTORCH_BUILD_KERNELS_CUSTOM_AOT)
72+
if(EXECUTORCH_BUILD_EXTENSION_LLM_AOT)
7373
# Add a AOT library
7474
find_package(Torch CONFIG REQUIRED)
7575
add_library(

setup.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def pybindings(cls) -> bool:
8888

8989
@classmethod
9090
def llama_custom_ops(cls) -> bool:
91-
return cls._is_env_enabled("EXECUTORCH_BUILD_KERNELS_CUSTOM_AOT", default=True)
91+
return cls._is_env_enabled("EXECUTORCH_BUILD_EXTENSION_LLM_AOT", default=True)
9292

9393
@classmethod
9494
def flatc(cls) -> bool:
@@ -542,8 +542,8 @@ def run(self):
542542

543543
if ShouldBuild.llama_custom_ops():
544544
cmake_args += [
545-
"-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON", # add llama sdpa ops to pybindings.
546-
"-DEXECUTORCH_BUILD_KERNELS_CUSTOM_AOT=ON",
545+
"-DEXECUTORCH_BUILD_EXTENSION_LLM=ON", # add llama sdpa ops to pybindings.
546+
"-DEXECUTORCH_BUILD_EXTENSION_LLM_AOT=ON",
547547
"-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON", # add quantized ops to pybindings.
548548
"-DEXECUTORCH_BUILD_KERNELS_QUANTIZED_AOT=ON",
549549
]

0 commit comments

Comments
 (0)