Upload 18 files
Browse files- README.md +12 -16
- all_results.json +10 -10
- eval_results.json +6 -6
- pytorch_model.bin +1 -1
- runs/Mar27_20-02-23_aleuse/1679965690.8306394/events.out.tfevents.1679965690.aleuse.8064.2 +3 -0
- runs/Mar27_20-02-23_aleuse/events.out.tfevents.1679965353.aleuse.8064.0 +2 -2
- runs/Mar27_20-02-23_aleuse/events.out.tfevents.1679969426.aleuse.8064.3 +3 -0
- train_results.json +4 -4
README.md
CHANGED
@@ -1,19 +1,13 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
tags:
|
4 |
-
- image-classification
|
5 |
- generated_from_trainer
|
6 |
datasets:
|
7 |
- beans
|
8 |
metrics:
|
9 |
- accuracy
|
10 |
-
widget:
|
11 |
-
- src: https://huggingface.co/platzi/platzi-vit-model-andres-galvis/blob/main/healthy.jpeg
|
12 |
-
example_title: Healthy
|
13 |
-
- src: https://huggingface.co/platzi/platzi-vit-model-andres-galvis/blob/main/bean_rust.jpeg
|
14 |
-
example_title: Bean Rust
|
15 |
model-index:
|
16 |
-
- name: platzi-vit-
|
17 |
results:
|
18 |
- task:
|
19 |
name: Image Classification
|
@@ -22,12 +16,12 @@ model-index:
|
|
22 |
name: beans
|
23 |
type: beans
|
24 |
config: default
|
25 |
-
split:
|
26 |
args: default
|
27 |
metrics:
|
28 |
- name: Accuracy
|
29 |
type: accuracy
|
30 |
-
value: 0.
|
31 |
---
|
32 |
|
33 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -37,13 +31,8 @@ should probably proofread and complete it, then remove this comment. -->
|
|
37 |
|
38 |
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset.
|
39 |
It achieves the following results on the evaluation set:
|
40 |
-
-
|
41 |
-
-
|
42 |
-
- eval_runtime: 33.5958
|
43 |
-
- eval_samples_per_second: 3.959
|
44 |
-
- eval_steps_per_second: 0.506
|
45 |
-
- epoch: 0.01
|
46 |
-
- step: 1
|
47 |
|
48 |
## Model description
|
49 |
|
@@ -70,6 +59,13 @@ The following hyperparameters were used during training:
|
|
70 |
- lr_scheduler_type: linear
|
71 |
- num_epochs: 4
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
### Framework versions
|
74 |
|
75 |
- Transformers 4.27.3
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
tags:
|
|
|
4 |
- generated_from_trainer
|
5 |
datasets:
|
6 |
- beans
|
7 |
metrics:
|
8 |
- accuracy
|
|
|
|
|
|
|
|
|
|
|
9 |
model-index:
|
10 |
+
- name: platzi-vit-model-andres-galvis
|
11 |
results:
|
12 |
- task:
|
13 |
name: Image Classification
|
|
|
16 |
name: beans
|
17 |
type: beans
|
18 |
config: default
|
19 |
+
split: validation
|
20 |
args: default
|
21 |
metrics:
|
22 |
- name: Accuracy
|
23 |
type: accuracy
|
24 |
+
value: 0.9849624060150376
|
25 |
---
|
26 |
|
27 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
31 |
|
32 |
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset.
|
33 |
It achieves the following results on the evaluation set:
|
34 |
+
- Loss: 0.0227
|
35 |
+
- Accuracy: 0.9850
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
## Model description
|
38 |
|
|
|
59 |
- lr_scheduler_type: linear
|
60 |
- num_epochs: 4
|
61 |
|
62 |
+
### Training results
|
63 |
+
|
64 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
65 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
66 |
+
| 0.1343 | 3.85 | 500 | 0.0227 | 0.9850 |
|
67 |
+
|
68 |
+
|
69 |
### Framework versions
|
70 |
|
71 |
- Transformers 4.27.3
|
all_results.json
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
{
|
2 |
-
"epoch": 0
|
3 |
-
"eval_accuracy": 0.
|
4 |
-
"eval_loss":
|
5 |
-
"eval_runtime":
|
6 |
-
"eval_samples_per_second": 3.
|
7 |
-
"eval_steps_per_second": 0.
|
8 |
-
"train_loss": 0.
|
9 |
-
"train_runtime":
|
10 |
-
"train_samples_per_second": 1.
|
11 |
-
"train_steps_per_second": 0.
|
12 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 4.0,
|
3 |
+
"eval_accuracy": 0.9849624060150376,
|
4 |
+
"eval_loss": 0.02271120436489582,
|
5 |
+
"eval_runtime": 38.2076,
|
6 |
+
"eval_samples_per_second": 3.481,
|
7 |
+
"eval_steps_per_second": 0.445,
|
8 |
+
"train_loss": 0.12937975365381973,
|
9 |
+
"train_runtime": 3342.3263,
|
10 |
+
"train_samples_per_second": 1.237,
|
11 |
+
"train_steps_per_second": 0.156
|
12 |
}
|
eval_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
-
"epoch": 0
|
3 |
-
"eval_accuracy": 0.
|
4 |
-
"eval_loss":
|
5 |
-
"eval_runtime":
|
6 |
-
"eval_samples_per_second": 3.
|
7 |
-
"eval_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 4.0,
|
3 |
+
"eval_accuracy": 0.9849624060150376,
|
4 |
+
"eval_loss": 0.02271120436489582,
|
5 |
+
"eval_runtime": 38.2076,
|
6 |
+
"eval_samples_per_second": 3.481,
|
7 |
+
"eval_steps_per_second": 0.445
|
8 |
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 343269037
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:989476412983050a4f8f2378f3315d3c07cfa5138cb42782a2e5d3b699bdaa79
|
3 |
size 343269037
|
runs/Mar27_20-02-23_aleuse/1679965690.8306394/events.out.tfevents.1679965690.aleuse.8064.2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b2d0f690dfa8f9869dfda928edb15394abc929ea4097fd3b2b429ac61465ec3e
|
3 |
+
size 5849
|
runs/Mar27_20-02-23_aleuse/events.out.tfevents.1679965353.aleuse.8064.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02112fd73a0d48bef084453227c394c54c76f220599aeab6b996444c482e9678
|
3 |
+
size 9321
|
runs/Mar27_20-02-23_aleuse/events.out.tfevents.1679969426.aleuse.8064.3
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:566b8176f49d5308c98d89369103b40804b08865dc896d3a8ca8addc995683b4
|
3 |
+
size 363
|
train_results.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"epoch": 4.0,
|
3 |
-
"train_loss": 0.
|
4 |
-
"train_runtime":
|
5 |
-
"train_samples_per_second": 1.
|
6 |
-
"train_steps_per_second": 0.
|
7 |
}
|
|
|
1 |
{
|
2 |
"epoch": 4.0,
|
3 |
+
"train_loss": 0.12937975365381973,
|
4 |
+
"train_runtime": 3342.3263,
|
5 |
+
"train_samples_per_second": 1.237,
|
6 |
+
"train_steps_per_second": 0.156
|
7 |
}
|