ThomasBlumet commited on
Commit
7a5f668
·
1 Parent(s): 5201e84

change model

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -9,12 +9,12 @@ logger = logging.get_logger("transformers")
9
  model_name = "TheBloke/Mistral-7B-Instruct-v0.1-GPTQ" #"openai-community/gpt2" or "TheBloke/Mistral-7B-Instruct-v0.1-GPTQ" or "TheBloke/Llama-2-7B-Chat-GGML" or "TheBloke/zephyr-7B-beta-GPTQ"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  #model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
12
- model = AutoModelForCausalLM.from_pretrained(model_name)
13
 
14
  # Generate text using the model and tokenizer
15
  def generate_text(input_text):
16
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
17
- output = model.generate(input_ids, max_length=100, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7)
18
  return tokenizer.decode(output[0], skip_special_tokens=True)
19
 
20
  # def generate_text(prompt):
 
9
  model_name = "TheBloke/Mistral-7B-Instruct-v0.1-GPTQ" #"openai-community/gpt2" or "TheBloke/Mistral-7B-Instruct-v0.1-GPTQ" or "TheBloke/Llama-2-7B-Chat-GGML" or "TheBloke/zephyr-7B-beta-GPTQ"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  #model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
12
+ model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True)
13
 
14
  # Generate text using the model and tokenizer
15
  def generate_text(input_text):
16
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
17
+ output = model.generate(input_ids, max_length=100, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7, do_sample=True, pad_token_id=tokenizer.eos_token_id)
18
  return tokenizer.decode(output[0], skip_special_tokens=True)
19
 
20
  # def generate_text(prompt):