@@ -1872,6 +1872,13 @@ def _cfg(url: str = '', **kwargs) -> Dict[str, Any]:
1872
1872
'vit_pwee_patch16_reg1_gap_256.sbb_in1k' : _cfg (
1873
1873
hf_hub_id = 'timm/' ,
1874
1874
input_size = (3 , 256 , 256 ), crop_pct = 0.95 ),
1875
+ 'vit_little_patch16_reg1_gap_256.sbb_in12k_ft_in1k' : _cfg (
1876
+ hf_hub_id = 'timm/' ,
1877
+ input_size = (3 , 256 , 256 ), crop_pct = 0.95 ),
1878
+ 'vit_little_patch16_reg1_gap_256.sbb_in12k' : _cfg (
1879
+ hf_hub_id = 'timm/' ,
1880
+ num_classes = 11821 ,
1881
+ input_size = (3 , 256 , 256 ), crop_pct = 0.95 ),
1875
1882
'vit_little_patch16_reg4_gap_256.sbb_in1k' : _cfg (
1876
1883
hf_hub_id = 'timm/' ,
1877
1884
input_size = (3 , 256 , 256 ), crop_pct = 0.95 ),
@@ -2975,6 +2982,17 @@ def vit_pwee_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionT
2975
2982
return model
2976
2983
2977
2984
2985
+ @register_model
2986
+ def vit_little_patch16_reg1_gap_256 (pretrained : bool = False , ** kwargs ) -> VisionTransformer :
2987
+ model_args = dict (
2988
+ patch_size = 16 , embed_dim = 320 , depth = 14 , num_heads = 5 , init_values = 1e-5 , mlp_ratio = 5.6 ,
2989
+ class_token = False , no_embed_class = True , reg_tokens = 1 , global_pool = 'avg' ,
2990
+ )
2991
+ model = _create_vision_transformer (
2992
+ 'vit_little_patch16_reg1_gap_256' , pretrained = pretrained , ** dict (model_args , ** kwargs ))
2993
+ return model
2994
+
2995
+
2978
2996
@register_model
2979
2997
def vit_little_patch16_reg4_gap_256 (pretrained : bool = False , ** kwargs ) -> VisionTransformer :
2980
2998
model_args = dict (
0 commit comments