Skip to content

Commit 5dce710

Browse files
committed
Add vit_little in12k + in12k-ft-in1k weights
1 parent 3c0283f commit 5dce710

File tree

1 file changed

+18
-0
lines changed

1 file changed

+18
-0
lines changed

timm/models/vision_transformer.py

+18
Original file line numberDiff line numberDiff line change
@@ -1872,6 +1872,13 @@ def _cfg(url: str = '', **kwargs) -> Dict[str, Any]:
18721872
'vit_pwee_patch16_reg1_gap_256.sbb_in1k': _cfg(
18731873
hf_hub_id='timm/',
18741874
input_size=(3, 256, 256), crop_pct=0.95),
1875+
'vit_little_patch16_reg1_gap_256.sbb_in12k_ft_in1k': _cfg(
1876+
hf_hub_id='timm/',
1877+
input_size=(3, 256, 256), crop_pct=0.95),
1878+
'vit_little_patch16_reg1_gap_256.sbb_in12k': _cfg(
1879+
hf_hub_id='timm/',
1880+
num_classes=11821,
1881+
input_size=(3, 256, 256), crop_pct=0.95),
18751882
'vit_little_patch16_reg4_gap_256.sbb_in1k': _cfg(
18761883
hf_hub_id='timm/',
18771884
input_size=(3, 256, 256), crop_pct=0.95),
@@ -2975,6 +2982,17 @@ def vit_pwee_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionT
29752982
return model
29762983

29772984

2985+
@register_model
2986+
def vit_little_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
2987+
model_args = dict(
2988+
patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-5, mlp_ratio=5.6,
2989+
class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg',
2990+
)
2991+
model = _create_vision_transformer(
2992+
'vit_little_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
2993+
return model
2994+
2995+
29782996
@register_model
29792997
def vit_little_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
29802998
model_args = dict(

0 commit comments

Comments
 (0)