suinY00N's picture
Update app.py
4e9d310 verified
raw
history blame
1.78 kB
import gradio as gr
from transformers import pipeline
# ๊ฐ์„ฑ ๋ถ„์„ ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
sentiment = pipeline("sentiment-analysis")
# ์‚ฌ์šฉ์ž ์ž…๋ ฅ์— ๋Œ€ํ•œ ๊ฐ์„ฑ ๋ถ„์„ ๊ฒฐ๊ณผ๋ฅผ ๋ฐ˜ํ™˜ํ•˜๋Š” ํ•จ์ˆ˜
def get_sentiment(์ž…๋ ฅ):
# ๊ฐ์„ฑ ๋ถ„์„ ์‹คํ–‰
return sentiment(์ž…๋ ฅ)
gr.Interface(fn=ask_question, inputs="์ž…๋ ฅ", outputs="output", title="Sentiment Analysis", description="").launch()
# import gradio as gr
# from transformers import pipeline
# sentiment = pipeline("sentiment-analysis")
# def get_sentiment(์ž…๋ ฅ):
# # from transformers import AutoTokenizer, AutoModelForCausalLM
# # model_name = "heegyu/koalpaca-355m"
# # tokenizer = AutoTokenizer.from_pretrained(model_name)
# # tokenizer.truncation_side = "right"
# # model = AutoModelForCausalLM.from_pretrained(model_name)
# return sentiment(์ž…๋ ฅ)
# def get_response(output):
# context = f"<usr>{context}\n<sys>"
# inputs = tokenizer(
# context,
# truncation=True,
# max_length=512,
# return_tensors="pt")
# generation_args = dict(
# max_length=256,
# min_length=64,
# eos_token_id=2,
# do_sample=True,
# top_p=1.0,
# early_stopping=True
# )
# outputs = model.generate(**inputs, **generation_args)
# response = tokenizer.decode(outputs[0])
# print(context)
# print(response)
# response = response[len(context):].replace("</s>", "")
# return response
# model, tokenizer = get_pipe()
# def ask_question(input_):
# response = get_response(tokenizer, model, input_)
# return response
# gr.Interface(fn=ask_question, inputs="text", outputs="text", title="KoAlpaca-355M", description="ํ•œ๊ตญ์–ด๋กœ ์งˆ๋ฌธํ•˜์„ธ์š”.").launch()