Skip to content

Commit e58ce7e

Browse files
committed
Rename phi-4-mini to phi_4_mini
1 parent 8cd1b93 commit e58ce7e

File tree

9 files changed

+8
-8
lines changed

9 files changed

+8
-8
lines changed

.ci/scripts/gather_test_models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
"dl3": "linux.4xlarge.memory",
3434
"emformer_join": "linux.4xlarge.memory",
3535
"emformer_predict": "linux.4xlarge.memory",
36-
"phi-4-mini": "linux.4xlarge.memory",
36+
"phi_4_mini": "linux.4xlarge.memory",
3737
}
3838
}
3939

.ci/scripts/test_model.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,11 +100,11 @@ test_model() {
100100
rm "./${MODEL_NAME}.pte"
101101
return # Skip running with portable executor runnner since portable doesn't support Qwen's biased linears.
102102
fi
103-
if [[ "${MODEL_NAME}" == "phi-4-mini" ]]; then
103+
if [[ "${MODEL_NAME}" == "phi_4_mini" ]]; then
104104
# Install requirements for export_llama
105105
bash examples/models/llama/install_requirements.sh
106106
# Test export_llama script: python3 -m examples.models.llama.export_llama.
107-
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/phi-4-mini/config.json
107+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/phi_4_mini/config.json
108108
run_portable_executor_runner
109109
rm "./${MODEL_NAME}.pte"
110110
return

.github/workflows/pull.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ jobs:
106106
- model: emformer_join
107107
backend: xnnpack-quantization-delegation
108108
runner: linux.4xlarge.memory
109-
- model: phi-4-mini
109+
- model: phi_4_mini
110110
backend: portable
111111
runner: linux.4xlarge.memory
112112
- model: llama3_2_vision_encoder

.github/workflows/trunk.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ jobs:
7272
backend: portable
7373
- model: softmax
7474
backend: portable
75-
- model: phi-4-mini
75+
- model: phi_4_mini
7676
backend: portable
7777
- model: qwen2_5
7878
backend: portable

examples/models/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class Model(str, Enum):
3636
Llava = "llava"
3737
EfficientSam = "efficient_sam"
3838
Qwen25 = "qwen2_5"
39-
Phi4Mini = "phi-4-mini"
39+
Phi4Mini = "phi_4_mini"
4040

4141
def __str__(self) -> str:
4242
return self.value
@@ -80,7 +80,7 @@ def __str__(self) -> str:
8080
str(Model.Llava): ("llava", "LlavaModel"),
8181
str(Model.EfficientSam): ("efficient_sam", "EfficientSAM"),
8282
str(Model.Qwen25): ("qwen2_5", "Qwen2_5Model"),
83-
str(Model.Phi4Mini): ("phi-4-mini", "Phi4MiniModel"),
83+
str(Model.Phi4Mini): ("phi_4_mini", "Phi4MiniModel"),
8484
}
8585

8686
__all__ = [

examples/models/llama/export_llama_lib.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@
9595
"llama3_2",
9696
"static_llama",
9797
"qwen2_5",
98-
"phi-4-mini",
98+
"phi_4_mini",
9999
]
100100
TORCHTUNE_DEFINED_MODELS = ["llama3_2_vision"]
101101

0 commit comments

Comments
 (0)