AhmedSayeem commited on
Commit
92a217e
·
1 Parent(s): 4e74fbe

commit files to HF hub

Browse files
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - image-classification
4
+ - pytorch
5
+ - huggingpics
6
+ metrics:
7
+ - accuracy
8
+
9
+ model-index:
10
+ - name: VIT_Basic
11
+ results:
12
+ - task:
13
+ name: Image Classification
14
+ type: image-classification
15
+ metrics:
16
+ - name: Accuracy
17
+ type: accuracy
18
+ value: 0.9107142686843872
19
+ ---
20
+
21
+ # VIT_Basic
22
+
23
+
24
+ Autogenerated by HuggingPics🤗🖼️
25
+
26
+ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb).
27
+
28
+ Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics).
29
+
30
+
31
+ ## Example Images
32
+
33
+
34
+ #### chairs
35
+
36
+ ![chairs](images/chairs.jpg)
37
+
38
+ #### hot dog
39
+
40
+ ![hot dog](images/hot_dog.jpg)
41
+
42
+ #### ice cream
43
+
44
+ ![ice cream](images/ice_cream.jpg)
45
+
46
+ #### ladders
47
+
48
+ ![ladders](images/ladders.jpg)
49
+
50
+ #### tables
51
+
52
+ ![tables](images/tables.jpg)
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "chairs",
13
+ "1": "hot dog",
14
+ "2": "ice cream",
15
+ "3": "ladders",
16
+ "4": "tables"
17
+ },
18
+ "image_size": 224,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "chairs": "0",
23
+ "hot dog": "1",
24
+ "ice cream": "2",
25
+ "ladders": "3",
26
+ "tables": "4"
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "model_type": "vit",
30
+ "num_attention_heads": 12,
31
+ "num_channels": 3,
32
+ "num_hidden_layers": 12,
33
+ "patch_size": 16,
34
+ "problem_type": "single_label_classification",
35
+ "qkv_bias": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.18.0"
38
+ }
images/chairs.jpg ADDED
images/hot_dog.jpg ADDED
images/ice_cream.jpg ADDED
images/ladders.jpg ADDED
images/tables.jpg ADDED
preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_resize": true,
4
+ "feature_extractor_type": "ViTFeatureExtractor",
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_std": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "resample": 2,
16
+ "size": 224
17
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ced9aad59730e16e3d3c88f0556aa4fe3a5a877dc8b34d72eff771535645692
3
+ size 343273393
runs/events.out.tfevents.1649962355.0228f6d7e57c.80.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f576672d77133c0ef993fce56124af0bb8ad2bf0ae086ec11734e83608329ca
3
+ size 1444