farshadafx commited on
Commit
4e7e859
1 Parent(s): 8cf7595

End of training

Browse files
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- license: mit
3
- base_model: gpt2
4
  tags:
5
  - generated_from_trainer
6
  model-index:
@@ -13,9 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # LEDA-v1
15
 
16
- This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset.
17
- It achieves the following results on the evaluation set:
18
- - Loss: 5.3486
19
 
20
  ## Model description
21
 
@@ -34,28 +32,25 @@ More information needed
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
- - learning_rate: 0.0005
38
- - train_batch_size: 16
39
- - eval_batch_size: 16
40
  - seed: 42
41
  - gradient_accumulation_steps: 8
42
- - total_train_batch_size: 128
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: cosine
45
  - lr_scheduler_warmup_steps: 1000
46
- - num_epochs: 1
47
  - mixed_precision_training: Native AMP
48
 
49
  ### Training results
50
 
51
- | Training Loss | Epoch | Step | Validation Loss |
52
- |:-------------:|:-----:|:----:|:---------------:|
53
- | 5.3987 | 0.8 | 5000 | 5.3486 |
54
 
55
 
56
  ### Framework versions
57
 
58
- - Transformers 4.35.0
59
  - Pytorch 2.0.0
60
  - Datasets 2.1.0
61
- - Tokenizers 0.14.1
 
1
  ---
2
+ license: apache-2.0
3
+ base_model: distilgpt2
4
  tags:
5
  - generated_from_trainer
6
  model-index:
 
13
 
14
  # LEDA-v1
15
 
16
+ This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on an unknown dataset.
 
 
17
 
18
  ## Model description
19
 
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 0.002
36
+ - train_batch_size: 64
37
+ - eval_batch_size: 32
38
  - seed: 42
39
  - gradient_accumulation_steps: 8
40
+ - total_train_batch_size: 512
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
  - lr_scheduler_warmup_steps: 1000
44
+ - num_epochs: 4
45
  - mixed_precision_training: Native AMP
46
 
47
  ### Training results
48
 
 
 
 
49
 
50
 
51
  ### Framework versions
52
 
53
+ - Transformers 4.35.2
54
  - Pytorch 2.0.0
55
  - Datasets 2.1.0
56
+ - Tokenizers 0.15.0
config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
- "_name_or_path": "gpt2",
 
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
@@ -8,14 +9,20 @@
8
  "bos_token_id": 0,
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 0,
 
 
 
11
  "initializer_range": 0.02,
 
 
 
12
  "layer_norm_epsilon": 1e-05,
13
  "model_type": "gpt2",
14
  "n_ctx": 160,
15
  "n_embd": 768,
16
  "n_head": 12,
17
  "n_inner": null,
18
- "n_layer": 12,
19
  "n_positions": 1024,
20
  "reorder_and_upcast_attn": false,
21
  "resid_pdrop": 0.1,
@@ -33,7 +40,7 @@
33
  }
34
  },
35
  "torch_dtype": "float32",
36
- "transformers_version": "4.35.0",
37
  "use_cache": true,
38
  "vocab_size": 3372
39
  }
 
1
  {
2
+ "_name_or_path": "distilgpt2",
3
+ "_num_labels": 1,
4
  "activation_function": "gelu_new",
5
  "architectures": [
6
  "GPT2LMHeadModel"
 
9
  "bos_token_id": 0,
10
  "embd_pdrop": 0.1,
11
  "eos_token_id": 0,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
  "initializer_range": 0.02,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
  "layer_norm_epsilon": 1e-05,
20
  "model_type": "gpt2",
21
  "n_ctx": 160,
22
  "n_embd": 768,
23
  "n_head": 12,
24
  "n_inner": null,
25
+ "n_layer": 6,
26
  "n_positions": 1024,
27
  "reorder_and_upcast_attn": false,
28
  "resid_pdrop": 0.1,
 
40
  }
41
  },
42
  "torch_dtype": "float32",
43
+ "transformers_version": "4.35.2",
44
  "use_cache": true,
45
  "vocab_size": 3372
46
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 0,
4
  "eos_token_id": 0,
5
- "transformers_version": "4.35.0"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 0,
4
  "eos_token_id": 0,
5
+ "transformers_version": "4.35.2"
6
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59f5a1093f2bf9e2b6dd92a4a01cd190c9d9a12e641045b28238dc9a18db6740
3
- size 353743488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d9989af044c739f698ffdaa3d129f142cc734a374b233d53b6b1291a7a03de
3
+ size 183627208
runs/Dec10_17-54-16_92ff5bf1e590/events.out.tfevents.1702230867.92ff5bf1e590.42.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4cc50bbd82364851fd8d8de047a7a1d423a7f4320451abf2e855e359f5f215c
3
+ size 4882
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60bc919dced4a5b31c726814e3730056573c17ad28afca8e00414d39cd6a6ae2
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f8d0caf08fbdd7d62143470f60d51e8807ae2c5bf81dcac327bbf51e243a3df
3
  size 4091