EnhancerModel / config.json
ayjays132's picture
Create config.json
5161b97 verified
raw
history blame
3.77 kB
{
"_name_or_path": "ayjays132/CustomGPT2Conversational",
"activation_function": "gelu_new",
"advanced_model_options": {
"contextual_embeddings": {
"approaches": [
"contextual_attention_mechanisms",
"semantic_embedding_regularization"
],
"enable": true
},
"dynamic_adaptation": {
"enable": true,
"techniques": [
"adaptive_layer_dropping",
"dynamic_context_window"
]
},
"innovative_neuron_growth": {
"enable": true,
"strategies": [
"selective_neuron_pruning",
"progressive_neuron_expansion"
]
},
"memory_optimization": {
"enable": true,
"methods": [
"gradient_checkpointing",
"memory-efficient_attention"
]
},
"meta_learning": {
"approaches": [
"meta_learning_rate_adjustment",
"online_adaptation"
],
"enable": true
},
"secret_advanced_options": {
"adaptive_token_embedding": {
"enable": true,
"strategies": [
"dynamic_embedding_resizing",
"contextual_embedding_scaling"
]
},
"future_context_prediction": {
"enable": true,
"techniques": [
"lookahead_context_integration",
"predictive_attention_mechanisms"
]
},
"multi_modal_integration": {
"enable": true,
"methods": [
"text_image_alignment",
"cross_modal_attention"
]
}
}
},
"architectures": [
"GPT2LMHeadModel"
],
"attn_pdrop": 0.1,
"bos_token_id": 50267,
"context_window": 20,
"contextual_embedding_dim": 1024,
"device": "cuda",
"dropout_rate": 0.1,
"early_stopping": true,
"embd_pdrop": 0.1,
"embedding_dim": 1024,
"eos_token_id": 50267,
"hidden_dim": 1024,
"initializer_range": 0.02,
"innovative_growth_capacity": 50000,
"integration_settings": {
"config_name": "config.json",
"load_from_transformers": true,
"pytorch_dump_folder_path": "./model_save",
"pytorch_model_bin_name": "pytorch_model.bin"
},
"layer_norm_epsilon": 1e-05,
"max_length": 1024,
"max_memory_size": 100000,
"max_neurons": 100,
"meta_learning_rate": 0.001,
"min_length": 50,
"model_type": "gpt2",
"n_ctx": 1024,
"n_embd": 1024,
"n_head": 16,
"n_inner": null,
"n_layer": 24,
"n_positions": 1024,
"no_repeat_ngram_size": 2,
"num_beams": 5,
"num_embeddings": 50257,
"num_heads": 64,
"num_layers": 24,
"output_attentions": true,
"output_hidden_states": true,
"pad_token_id": 50267,
"reorder_and_upcast_attn": false,
"resid_pdrop": 0.1,
"scale_attn_by_inverse_layer_idx": false,
"scale_attn_weights": true,
"sep_token_id": 50267,
"special_tokens": {
"additional_special_tokens": [
"<greeting>",
"<farewell>",
"<thank>",
"<apology>"
],
"bos_token": "<bos>",
"cls_token": "<cls>",
"eos_token": "<eos>",
"mask_token": "<mask>",
"pad_token": "<pad>",
"sep_token": "<sep>",
"unk_token": "<unk>"
},
"state_shape": null,
"summary_activation": null,
"summary_first_dropout": 0.1,
"summary_proj_to_labels": true,
"summary_type": "cls_index",
"summary_use_proj": true,
"target_q_model": null,
"task_specific_params": {
"text-generation": {
"do_sample": true,
"early_stopping": true,
"length_penalty": 1.0,
"max_length": 2048,
"min_length": 64,
"no_repeat_ngram_size": 2,
"num_beams": 8,
"num_return_sequences": 3,
"repetition_penalty": 1.2,
"temperature": 0.9,
"top_k": 50,
"top_p": 0.95
}
},
"torch_dtype": "float32",
"transformers_version": "4.44.0",
"use_cache": true,
"vocab_size": 50257
}