prithivMLmods's picture
Upload folder using huggingface_hub
2a01586 verified
raw
history blame
2.98 kB
{
"architectures": [
"SiglipForImageClassification"
],
"id2label": {
"0": "Accessories",
"1": "Apparel Set",
"2": "Bags",
"3": "Bath and Body",
"4": "Beauty Accessories",
"5": "Belts",
"6": "Bottomwear",
"7": "Cufflinks",
"8": "Dress",
"9": "Eyes",
"10": "Eyewear",
"11": "Flip Flops",
"12": "Fragrance",
"13": "Free Gifts",
"14": "Gloves",
"15": "Hair",
"16": "Headwear",
"17": "Home Furnishing",
"18": "Innerwear",
"19": "Jewellery",
"20": "Lips",
"21": "Loungewear and Nightwear",
"22": "Makeup",
"23": "Mufflers",
"24": "Nails",
"25": "Perfumes",
"26": "Sandal",
"27": "Saree",
"28": "Scarves",
"29": "Shoe Accessories",
"30": "Shoes",
"31": "Skin",
"32": "Skin Care",
"33": "Socks",
"34": "Sports Accessories",
"35": "Sports Equipment",
"36": "Stoles",
"37": "Ties",
"38": "Topwear",
"39": "Umbrellas",
"40": "Vouchers",
"41": "Wallets",
"42": "Watches",
"43": "Water Bottle",
"44": "Wristbands"
},
"initializer_factor": 1.0,
"label2id": {
"Accessories": 0,
"Apparel Set": 1,
"Bags": 2,
"Bath and Body": 3,
"Beauty Accessories": 4,
"Belts": 5,
"Bottomwear": 6,
"Cufflinks": 7,
"Dress": 8,
"Eyes": 9,
"Eyewear": 10,
"Flip Flops": 11,
"Fragrance": 12,
"Free Gifts": 13,
"Gloves": 14,
"Hair": 15,
"Headwear": 16,
"Home Furnishing": 17,
"Innerwear": 18,
"Jewellery": 19,
"Lips": 20,
"Loungewear and Nightwear": 21,
"Makeup": 22,
"Mufflers": 23,
"Nails": 24,
"Perfumes": 25,
"Sandal": 26,
"Saree": 27,
"Scarves": 28,
"Shoe Accessories": 29,
"Shoes": 30,
"Skin": 31,
"Skin Care": 32,
"Socks": 33,
"Sports Accessories": 34,
"Sports Equipment": 35,
"Stoles": 36,
"Ties": 37,
"Topwear": 38,
"Umbrellas": 39,
"Vouchers": 40,
"Wallets": 41,
"Watches": 42,
"Water Bottle": 43,
"Wristbands": 44
},
"model_type": "siglip",
"problem_type": "single_label_classification",
"text_config": {
"attention_dropout": 0.0,
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 768,
"intermediate_size": 3072,
"layer_norm_eps": 1e-06,
"max_position_embeddings": 64,
"model_type": "siglip_text_model",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"projection_size": 768,
"torch_dtype": "float32",
"vocab_size": 256000
},
"torch_dtype": "float32",
"transformers_version": "4.50.3",
"vision_config": {
"attention_dropout": 0.0,
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 768,
"image_size": 224,
"intermediate_size": 3072,
"layer_norm_eps": 1e-06,
"model_type": "siglip_vision_model",
"num_attention_heads": 12,
"num_channels": 3,
"num_hidden_layers": 12,
"patch_size": 16,
"torch_dtype": "float32"
}
}