Update modelling_pisco.py
Browse files- modelling_pisco.py +0 -9
modelling_pisco.py
CHANGED
@@ -65,14 +65,6 @@ class PISCO(PreTrainedModel):
|
|
65 |
self.adapter_keys.append('encoder_adapter')
|
66 |
|
67 |
self.generation_config = GenerationConfig(do_sample=False, top_p=None)
|
68 |
-
|
69 |
-
print('a')
|
70 |
-
# self.decoder = self.decoder.to('cuda')
|
71 |
-
print('b')
|
72 |
-
if torch.cuda.is_available():
|
73 |
-
print('c')
|
74 |
-
# self.decoder = self.decoder.to('cuda')
|
75 |
-
print('d')
|
76 |
|
77 |
def create_tokenizer(self, cfg):
|
78 |
self.tokenizer = AutoTokenizer.from_pretrained(cfg.decoder_model_name, use_fast=True, padding_side='left')
|
@@ -236,7 +228,6 @@ class PISCO(PreTrainedModel):
|
|
236 |
questions: list of string
|
237 |
compressed_documents: torch tensor, its first dimension should be a multiple of len(questions)
|
238 |
"""
|
239 |
-
print(compressed_documents.size(), len(questions))
|
240 |
self.generation_top_k = compressed_documents.size(0) // len(questions)
|
241 |
assert compressed_documents.size(0) % self.generation_top_k == 0, f"{compressed_documents.size(0)} {self.generation_top_k}"
|
242 |
|
|
|
65 |
self.adapter_keys.append('encoder_adapter')
|
66 |
|
67 |
self.generation_config = GenerationConfig(do_sample=False, top_p=None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
def create_tokenizer(self, cfg):
|
70 |
self.tokenizer = AutoTokenizer.from_pretrained(cfg.decoder_model_name, use_fast=True, padding_side='left')
|
|
|
228 |
questions: list of string
|
229 |
compressed_documents: torch tensor, its first dimension should be a multiple of len(questions)
|
230 |
"""
|
|
|
231 |
self.generation_top_k = compressed_documents.size(0) // len(questions)
|
232 |
assert compressed_documents.size(0) % self.generation_top_k == 0, f"{compressed_documents.size(0)} {self.generation_top_k}"
|
233 |
|