import os import gradio as gr from langchain.llms import HuggingFaceHub from langchain.prompts import PromptTemplate from langchain.chains import LLMChain model_repo = os.getenv('HF_MODEL_REPO') eos_string = "" template = """[INST]<>You work as translator. You job is translate user requests from {source} to {target}<> {query}[/INST]\n""" prompt = PromptTemplate(template=template, input_variables=["source","target","query"]) model_kwargs={ "max_new_tokens":2048, "temperature":0.5, "stop" : ["","<|endoftext|>","<|end|>"] } llm = HuggingFaceHub(repo_id=model_repo, task="text-generation", model_kwargs=model_kwargs) chain = LLMChain(prompt=prompt, llm=llm) def translation(source, target, text): response=chain.run(question) return response.partition(eos_string)[0] inputs = [gr.inputs.Dropdown(lang_codes, default='English', label='Source'), gr.inputs.Dropdown(lang_codes, default='Korean', label='Target'), gr.inputs.Textbox(lines=5, label="Input text"), ] gr.Interface(fn=translation, inputs, outputs="text").launch()