Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -99,36 +99,19 @@ def transliterate_to_sinhala(text):
|
|
99 |
latin_text = transliterate.process(source_script, target_script, text)
|
100 |
return latin_text
|
101 |
|
|
|
102 |
model = AutoModelForCausalLM.from_pretrained(
|
103 |
-
"
|
104 |
-
|
105 |
-
torch_dtype="auto",
|
106 |
-
trust_remote_code=True,
|
107 |
)
|
108 |
-
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
109 |
|
110 |
|
111 |
-
def conversation_predict(
|
112 |
-
|
113 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
114 |
-
{"role": "user", "content": prompt}
|
115 |
-
]
|
116 |
-
pipe = pipeline(
|
117 |
-
"text-generation",
|
118 |
-
model=model,
|
119 |
-
tokenizer=tokenizer,
|
120 |
-
)
|
121 |
|
122 |
-
|
123 |
-
"max_new_tokens": 500,
|
124 |
-
"return_full_text": False,
|
125 |
-
"temperature": 0.0,
|
126 |
-
"do_sample": False,
|
127 |
-
}
|
128 |
-
|
129 |
-
output = pipe(messages, **generation_args)
|
130 |
|
131 |
-
return
|
132 |
|
133 |
def ai_predicted(user_input):
|
134 |
user_input = translate_Singlish_to_sinhala(user_input)
|
|
|
99 |
latin_text = transliterate.process(source_script, target_script, text)
|
100 |
return latin_text
|
101 |
|
102 |
+
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
|
103 |
model = AutoModelForCausalLM.from_pretrained(
|
104 |
+
"google/gemma-2b-it",
|
105 |
+
torch_dtype=torch.bfloat16
|
|
|
|
|
106 |
)
|
|
|
107 |
|
108 |
|
109 |
+
def conversation_predict(input_text):
|
110 |
+
input_ids = tokenizer(input_text, return_tensors="pt")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
+
outputs = model.generate(**input_ids)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
+
return tokenizer.decode(outputs[0])
|
115 |
|
116 |
def ai_predicted(user_input):
|
117 |
user_input = translate_Singlish_to_sinhala(user_input)
|