Upload folder using huggingface_hub
Browse files- .gitattributes +5 -32
- README.md +52 -1
- config.json +179 -0
- generator_tokenizer/merges.txt +0 -0
- generator_tokenizer/special_tokens_map.json +1 -0
- generator_tokenizer/tokenizer_config.json +1 -0
- generator_tokenizer/vocab.json +0 -0
- question_encoder_tokenizer/special_tokens_map.json +1 -0
- question_encoder_tokenizer/tokenizer_config.json +1 -0
- question_encoder_tokenizer/vocab.txt +0 -0
.gitattributes
CHANGED
@@ -1,35 +1,8 @@
|
|
1 |
-
*.
|
2 |
-
*.
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.
|
31 |
-
*.
|
32 |
-
*.
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
4 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
README.md
CHANGED
@@ -1,3 +1,54 @@
|
|
1 |
---
|
2 |
-
license:
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
license: apache-2.0
|
3 |
+
thumbnail: https://huggingface.co/front/thumbnails/facebook.png
|
4 |
---
|
5 |
+
## RAG
|
6 |
+
|
7 |
+
This is a non-finetuned version of the RAG-Sequence model of the the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/pdf/2005.11401.pdf)
|
8 |
+
by Patrick Lewis, Ethan Perez, Aleksandara Piktus et al.
|
9 |
+
|
10 |
+
Rag consits of a *question encoder*, *retriever* and a *generator*. The retriever should be a `RagRetriever` instance. The *question encoder* can be any model that can be loaded with `AutoModel` and the *generator* can be any model that can be loaded with `AutoModelForSeq2SeqLM`.
|
11 |
+
|
12 |
+
This model is a non-finetuned RAG-Sequence model and was created as follows:
|
13 |
+
|
14 |
+
```python
|
15 |
+
from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration, AutoTokenizer
|
16 |
+
|
17 |
+
model = RagSequenceForGeneration.from_pretrained_question_encoder_generator("facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large")
|
18 |
+
|
19 |
+
question_encoder_tokenizer = AutoTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
|
20 |
+
generator_tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large")
|
21 |
+
|
22 |
+
tokenizer = RagTokenizer(question_encoder_tokenizer, generator_tokenizer)
|
23 |
+
model.config.use_dummy_dataset = True
|
24 |
+
model.config.index_name = "exact"
|
25 |
+
retriever = RagRetriever(model.config, question_encoder_tokenizer, generator_tokenizer)
|
26 |
+
|
27 |
+
model.save_pretrained("./")
|
28 |
+
tokenizer.save_pretrained("./")
|
29 |
+
retriever.save_pretrained("./")
|
30 |
+
```
|
31 |
+
|
32 |
+
Note that the model is *uncased* so that all capital input letters are converted to lower-case.
|
33 |
+
|
34 |
+
## Usage:
|
35 |
+
|
36 |
+
*Note*: the model uses the *dummy* retriever as a default. Better results are obtained by using the full retriever,
|
37 |
+
by setting `config.index_name="legacy"` and `config.use_dummy_dataset=False`.
|
38 |
+
The model can be fine-tuned as follows:
|
39 |
+
|
40 |
+
```python
|
41 |
+
from transformers import RagTokenizer, RagRetriever, RagTokenForGeneration
|
42 |
+
|
43 |
+
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-base")
|
44 |
+
retriever = RagRetriever.from_pretrained("facebook/rag-sequence-base")
|
45 |
+
model = RagTokenForGeneration.from_pretrained("facebook/rag-sequence-base", retriever=retriever)
|
46 |
+
|
47 |
+
input_dict = tokenizer.prepare_seq2seq_batch("who holds the record in 100m freestyle", "michael phelps", return_tensors="pt")
|
48 |
+
|
49 |
+
outputs = model(input_dict["input_ids"], labels=input_dict["labels"])
|
50 |
+
|
51 |
+
loss = outputs.loss
|
52 |
+
|
53 |
+
# train on loss
|
54 |
+
```
|
config.json
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"RagSequenceForGeneration"
|
4 |
+
],
|
5 |
+
"dataset": "wiki_dpr",
|
6 |
+
"dataset_split": "train",
|
7 |
+
"do_deduplication": true,
|
8 |
+
"do_marginalize": false,
|
9 |
+
"doc_sep": " // ",
|
10 |
+
"exclude_bos_score": false,
|
11 |
+
"generator": {
|
12 |
+
"_num_labels": 3,
|
13 |
+
"activation_dropout": 0.0,
|
14 |
+
"activation_function": "gelu",
|
15 |
+
"add_bias_logits": false,
|
16 |
+
"add_cross_attention": false,
|
17 |
+
"add_final_layer_norm": false,
|
18 |
+
"architectures": [
|
19 |
+
"BartModel",
|
20 |
+
"BartForMaskedLM",
|
21 |
+
"BartForSequenceClassification"
|
22 |
+
],
|
23 |
+
"attention_dropout": 0.0,
|
24 |
+
"bad_words_ids": null,
|
25 |
+
"bos_token_id": 0,
|
26 |
+
"chunk_size_feed_forward": 0,
|
27 |
+
"classif_dropout": 0.0,
|
28 |
+
"d_model": 1024,
|
29 |
+
"decoder_attention_heads": 16,
|
30 |
+
"decoder_ffn_dim": 4096,
|
31 |
+
"decoder_layerdrop": 0.0,
|
32 |
+
"decoder_layers": 12,
|
33 |
+
"decoder_start_token_id": 2,
|
34 |
+
"do_sample": false,
|
35 |
+
"dropout": 0.1,
|
36 |
+
"early_stopping": false,
|
37 |
+
"encoder_attention_heads": 16,
|
38 |
+
"encoder_ffn_dim": 4096,
|
39 |
+
"encoder_layerdrop": 0.0,
|
40 |
+
"encoder_layers": 12,
|
41 |
+
"eos_token_id": 2,
|
42 |
+
"extra_pos_embeddings": 2,
|
43 |
+
"finetuning_task": null,
|
44 |
+
"force_bos_token_to_be_generated": false,
|
45 |
+
"id2label": {
|
46 |
+
"0": "LABEL_0",
|
47 |
+
"1": "LABEL_1",
|
48 |
+
"2": "LABEL_2"
|
49 |
+
},
|
50 |
+
"init_std": 0.02,
|
51 |
+
"is_decoder": false,
|
52 |
+
"is_encoder_decoder": true,
|
53 |
+
"label2id": {
|
54 |
+
"LABEL_0": 0,
|
55 |
+
"LABEL_1": 1,
|
56 |
+
"LABEL_2": 2
|
57 |
+
},
|
58 |
+
"length_penalty": 1.0,
|
59 |
+
"max_length": 20,
|
60 |
+
"max_position_embeddings": 1024,
|
61 |
+
"min_length": 0,
|
62 |
+
"model_type": "bart",
|
63 |
+
"no_repeat_ngram_size": 0,
|
64 |
+
"normalize_before": false,
|
65 |
+
"normalize_embedding": true,
|
66 |
+
"num_beams": 1,
|
67 |
+
"num_hidden_layers": 12,
|
68 |
+
"num_return_sequences": 1,
|
69 |
+
"output_attentions": false,
|
70 |
+
"output_hidden_states": false,
|
71 |
+
"output_past": false,
|
72 |
+
"pad_token_id": 1,
|
73 |
+
"prefix": " ",
|
74 |
+
"pruned_heads": {},
|
75 |
+
"repetition_penalty": 1.0,
|
76 |
+
"return_dict": false,
|
77 |
+
"scale_embedding": false,
|
78 |
+
"static_position_embeddings": false,
|
79 |
+
"task_specific_params": {
|
80 |
+
"summarization": {
|
81 |
+
"early_stopping": true,
|
82 |
+
"length_penalty": 2.0,
|
83 |
+
"max_length": 142,
|
84 |
+
"min_length": 56,
|
85 |
+
"no_repeat_ngram_size": 3,
|
86 |
+
"num_beams": 4
|
87 |
+
}
|
88 |
+
},
|
89 |
+
"temperature": 1.0,
|
90 |
+
"tie_encoder_decoder": false,
|
91 |
+
"tie_word_embeddings": true,
|
92 |
+
"tokenizer_class": null,
|
93 |
+
"top_k": 50,
|
94 |
+
"top_p": 1.0,
|
95 |
+
"torchscript": false,
|
96 |
+
"use_bfloat16": false,
|
97 |
+
"use_cache": true,
|
98 |
+
"vocab_size": 50265,
|
99 |
+
"xla_device": null
|
100 |
+
},
|
101 |
+
"index_name": "exact",
|
102 |
+
"index_path": null,
|
103 |
+
"is_encoder_decoder": true,
|
104 |
+
"label_smoothing": 0.0,
|
105 |
+
"max_combined_length": 300,
|
106 |
+
"model_type": "rag",
|
107 |
+
"n_docs": 5,
|
108 |
+
"output_retrieved": false,
|
109 |
+
"passages_path": null,
|
110 |
+
"question_encoder": {
|
111 |
+
"add_cross_attention": false,
|
112 |
+
"architectures": [
|
113 |
+
"DPRQuestionEncoder"
|
114 |
+
],
|
115 |
+
"attention_probs_dropout_prob": 0.1,
|
116 |
+
"bad_words_ids": null,
|
117 |
+
"bos_token_id": null,
|
118 |
+
"chunk_size_feed_forward": 0,
|
119 |
+
"decoder_start_token_id": null,
|
120 |
+
"do_sample": false,
|
121 |
+
"early_stopping": false,
|
122 |
+
"eos_token_id": null,
|
123 |
+
"finetuning_task": null,
|
124 |
+
"gradient_checkpointing": false,
|
125 |
+
"hidden_act": "gelu",
|
126 |
+
"hidden_dropout_prob": 0.1,
|
127 |
+
"hidden_size": 768,
|
128 |
+
"id2label": {
|
129 |
+
"0": "LABEL_0",
|
130 |
+
"1": "LABEL_1"
|
131 |
+
},
|
132 |
+
"initializer_range": 0.02,
|
133 |
+
"intermediate_size": 3072,
|
134 |
+
"is_decoder": false,
|
135 |
+
"is_encoder_decoder": false,
|
136 |
+
"label2id": {
|
137 |
+
"LABEL_0": 0,
|
138 |
+
"LABEL_1": 1
|
139 |
+
},
|
140 |
+
"layer_norm_eps": 1e-12,
|
141 |
+
"length_penalty": 1.0,
|
142 |
+
"max_length": 20,
|
143 |
+
"max_position_embeddings": 512,
|
144 |
+
"min_length": 0,
|
145 |
+
"model_type": "dpr",
|
146 |
+
"no_repeat_ngram_size": 0,
|
147 |
+
"num_attention_heads": 12,
|
148 |
+
"num_beams": 1,
|
149 |
+
"num_hidden_layers": 12,
|
150 |
+
"num_return_sequences": 1,
|
151 |
+
"output_attentions": false,
|
152 |
+
"output_hidden_states": false,
|
153 |
+
"pad_token_id": 0,
|
154 |
+
"prefix": null,
|
155 |
+
"projection_dim": 0,
|
156 |
+
"pruned_heads": {},
|
157 |
+
"repetition_penalty": 1.0,
|
158 |
+
"return_dict": false,
|
159 |
+
"task_specific_params": null,
|
160 |
+
"temperature": 1.0,
|
161 |
+
"tie_encoder_decoder": false,
|
162 |
+
"tie_word_embeddings": true,
|
163 |
+
"tokenizer_class": null,
|
164 |
+
"top_k": 50,
|
165 |
+
"top_p": 1.0,
|
166 |
+
"torchscript": false,
|
167 |
+
"type_vocab_size": 2,
|
168 |
+
"use_bfloat16": false,
|
169 |
+
"use_cache": true,
|
170 |
+
"vocab_size": 30522,
|
171 |
+
"xla_device": null
|
172 |
+
},
|
173 |
+
"reduce_loss": false,
|
174 |
+
"retrieval_batch_size": 8,
|
175 |
+
"retrieval_vector_size": 768,
|
176 |
+
"title_sep": " / ",
|
177 |
+
"use_dummy_dataset": false,
|
178 |
+
"vocab_size": null
|
179 |
+
}
|
generator_tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
generator_tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
generator_tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model_max_length": 1024}
|
generator_tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
question_encoder_tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
question_encoder_tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"do_lower_case": true, "model_max_length": 512}
|
question_encoder_tokenizer/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|