Dhahlan2000 commited on
Commit
a413ffd
·
verified ·
1 Parent(s): b549a09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -24
app.py CHANGED
@@ -99,36 +99,19 @@ def transliterate_to_sinhala(text):
99
  latin_text = transliterate.process(source_script, target_script, text)
100
  return latin_text
101
 
 
102
  model = AutoModelForCausalLM.from_pretrained(
103
- "microsoft/Phi-3-mini-4k-instruct",
104
- device_map="cuda",
105
- torch_dtype="auto",
106
- trust_remote_code=True,
107
  )
108
- tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
109
 
110
 
111
- def conversation_predict(prompt):
112
- messages = [
113
- {"role": "system", "content": "You are a helpful assistant."},
114
- {"role": "user", "content": prompt}
115
- ]
116
- pipe = pipeline(
117
- "text-generation",
118
- model=model,
119
- tokenizer=tokenizer,
120
- )
121
 
122
- generation_args = {
123
- "max_new_tokens": 500,
124
- "return_full_text": False,
125
- "temperature": 0.0,
126
- "do_sample": False,
127
- }
128
-
129
- output = pipe(messages, **generation_args)
130
 
131
- return output[0]['generated_text']
132
 
133
  def ai_predicted(user_input):
134
  user_input = translate_Singlish_to_sinhala(user_input)
 
99
  latin_text = transliterate.process(source_script, target_script, text)
100
  return latin_text
101
 
102
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
103
  model = AutoModelForCausalLM.from_pretrained(
104
+ "google/gemma-2b-it",
105
+ torch_dtype=torch.bfloat16
 
 
106
  )
 
107
 
108
 
109
+ def conversation_predict(input_text):
110
+ input_ids = tokenizer(input_text, return_tensors="pt")
 
 
 
 
 
 
 
 
111
 
112
+ outputs = model.generate(**input_ids)
 
 
 
 
 
 
 
113
 
114
+ return tokenizer.decode(outputs[0])
115
 
116
  def ai_predicted(user_input):
117
  user_input = translate_Singlish_to_sinhala(user_input)