Error in Colab

#4
by howWasThisUsernameNotTaken - opened

When trying to use the model in Google Colab, I get this error in the last line:


RuntimeError Traceback (most recent call last)
in <cell line: 1>()
----> 1 print(pipe(prompt_template)[0]['generated_text'].split('|im_start|>assistant')[1].strip())

23 frames
/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py in forward(self, hidden_states, attention_mask, position_ids, past_key_value, output_attentions, use_cache, cache_position, **kwargs)
615 value_states = self.v_proj(hidden_states)
616
--> 617 query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
618 key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
619 value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)

RuntimeError: shape '[1, 170, 32, 160]' is invalid for input of size 696320


This is my code. What could be causing the issue?

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline, GenerationConfig

model_name = "cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b"

bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=False,
)

model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
use_cache=False,
quantization_config=bnb_config,
trust_remote_code=True
)

model.eval()

tokenizer = AutoTokenizer.from_pretrained(
model_name,
trust_remote_code=True
)

tokenizer.add_eos_token = True
tokenizer.add_bos_token = True
tokenizer.pad_token = tokenizer.eos_token

tokenizer.padding_side = "left"
tokenizer.truncation_side = 'left'

print('BOS:', tokenizer.bos_token)
print('EOS:', tokenizer.eos_token)

task = "text-generation"

pipe = pipeline(
task,
model=model,
tokenizer=tokenizer,
return_text=True,
batch_size=1, # Adjusting batch size for debugging
generation_config=GenerationConfig(
do_sample=True,
temperature=0.7,
max_new_tokens=128
),
framework="pt",
)

messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]

prompt_template = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

print(pipe(prompt_template)[0]['generated_text'].split('|im_start|>assistant')[1].strip())

Sign up or log in to comment