update modeling_minicpmo.py
Browse files- modeling_minicpmo.py +3 -2
modeling_minicpmo.py
CHANGED
@@ -637,6 +637,7 @@ class MiniCPMO(MiniCPMOPreTrainedModel):
|
|
637 |
|
638 |
def _decode(self, inputs_embeds, tokenizer, attention_mask, **kwargs):
|
639 |
kwargs.pop("output_hidden_states", None)
|
|
|
640 |
terminators = [tokenizer.convert_tokens_to_ids(i) for i in self.terminators]
|
641 |
outputs = self.llm.generate(
|
642 |
inputs_embeds=inputs_embeds,
|
@@ -778,7 +779,7 @@ class MiniCPMO(MiniCPMOPreTrainedModel):
|
|
778 |
tokenizer=None,
|
779 |
vision_hidden_states=None,
|
780 |
stream=False,
|
781 |
-
|
782 |
**kwargs,
|
783 |
):
|
784 |
assert input_ids is not None
|
@@ -817,7 +818,7 @@ class MiniCPMO(MiniCPMOPreTrainedModel):
|
|
817 |
|
818 |
result = self._decode_text(outputs.sequences, tokenizer)
|
819 |
|
820 |
-
if
|
821 |
return outputs
|
822 |
|
823 |
return result, outputs
|
|
|
637 |
|
638 |
def _decode(self, inputs_embeds, tokenizer, attention_mask, **kwargs):
|
639 |
kwargs.pop("output_hidden_states", None)
|
640 |
+
kwargs.pop("return_dict_in_generate", None)
|
641 |
terminators = [tokenizer.convert_tokens_to_ids(i) for i in self.terminators]
|
642 |
outputs = self.llm.generate(
|
643 |
inputs_embeds=inputs_embeds,
|
|
|
779 |
tokenizer=None,
|
780 |
vision_hidden_states=None,
|
781 |
stream=False,
|
782 |
+
decode_text=True,
|
783 |
**kwargs,
|
784 |
):
|
785 |
assert input_ids is not None
|
|
|
818 |
|
819 |
result = self._decode_text(outputs.sequences, tokenizer)
|
820 |
|
821 |
+
if decode_text is False:
|
822 |
return outputs
|
823 |
|
824 |
return result, outputs
|