suinY00N commited on
Commit
0200696
โ€ข
1 Parent(s): c2f8b4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -39
app.py CHANGED
@@ -1,42 +1,69 @@
1
  import gradio as gr
 
2
 
3
- def get_pipe():
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
- model_name = "heegyu/koalpaca-355m"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- tokenizer.truncation_side = "right"
8
- model = AutoModelForCausalLM.from_pretrained(model_name)
9
- return model, tokenizer
10
-
11
- def get_response(tokenizer, model, context):
12
- context = f"<usr>{context}\n<sys>"
13
- inputs = tokenizer(
14
- context,
15
- truncation=True,
16
- max_length=512,
17
- return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- generation_args = dict(
20
- max_length=256,
21
- min_length=64,
22
- eos_token_id=2,
23
- do_sample=True,
24
- top_p=1.0,
25
- early_stopping=True
26
- )
27
-
28
- outputs = model.generate(**inputs, **generation_args)
29
- response = tokenizer.decode(outputs[0])
30
- print(context)
31
- print(response)
32
- response = response[len(context):].replace("</s>", "")
33
-
34
- return response
35
-
36
- model, tokenizer = get_pipe()
37
-
38
- def ask_question(input_):
39
- response = get_response(tokenizer, model, input_)
40
- return response
41
-
42
- gr.Interface(fn=ask_question, inputs="text", outputs="text", title="KoAlpaca-355M", description="ํ•œ๊ตญ์–ด๋กœ ์งˆ๋ฌธํ•˜์„ธ์š”.").launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ # Sentiment analysis pipeline ์ƒ์„ฑ
5
+ sentiment = pipeline("sentiment-analysis")
6
+
7
+ def get_sentiment(์ž…๋ ฅ):
8
+ # ํ…์ŠคํŠธ์˜ ๊ฐ์„ฑ ๋ถ„์„์„ ์ˆ˜ํ–‰
9
+ result = sentiment_analysis(์ž…๋ ฅ)
10
+ # ๋ถ„์„ ๊ฒฐ๊ณผ ๋ฐ˜ํ™˜
11
+ return result
12
+
13
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
14
+ interface = gr.Interface(
15
+ fn=get_sentiment, # ํ˜ธ์ถœ๋  ํ•จ์ˆ˜
16
+ inputs=gr.inputs.Textbox(lines=2, placeholder="์—ฌ๊ธฐ์— ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”..."), # ์ž…๋ ฅ๋ž€ ์„ค์ •
17
+ outputs="text", # ์ถœ๋ ฅ ํ˜•์‹
18
+ title="ํ…์ŠคํŠธ ๊ฐ์„ฑ ๋ถ„์„", # UI ์ œ๋ชฉ
19
+ description="์ด ์•ฑ์€ ์ž…๋ ฅ๋œ ํ…์ŠคํŠธ์˜ ๊ฐ์„ฑ์„ ๋ถ„์„ํ•ฉ๋‹ˆ๋‹ค. ๊ธ์ •์ ์ด๊ฑฐ๋‚˜ ๋ถ€์ •์ ์ธ ๊ฒฐ๊ณผ๋ฅผ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค.", # UI ์„ค๋ช…
20
+ examples=[["์ด ์ œํ’ˆ์€ ์ •๋ง ์ข‹์Šต๋‹ˆ๋‹ค!"], ["์ œ ๊ธฐ๋Œ€์— ๋ชป ๋ฏธ์ณค์–ด์š”."]], # ์˜ˆ์‹œ ์ž…๋ ฅ
21
+ theme="default", # UI ํ…Œ๋งˆ
22
+ layout="vertical" # UI ๋ ˆ์ด์•„์›ƒ
23
+ )
24
+
25
+ # Gradio ์•ฑ ์‹คํ–‰
26
+ interface.launch()
27
+
28
+ # import gradio as gr
29
+
30
+ # def get_pipe():
31
+ # from transformers import AutoTokenizer, AutoModelForCausalLM
32
+ # model_name = "heegyu/koalpaca-355m"
33
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
34
+ # tokenizer.truncation_side = "right"
35
+ # model = AutoModelForCausalLM.from_pretrained(model_name)
36
+ # return model, tokenizer
37
+
38
+ # def get_response(tokenizer, model, context):
39
+ # context = f"<usr>{context}\n<sys>"
40
+ # inputs = tokenizer(
41
+ # context,
42
+ # truncation=True,
43
+ # max_length=512,
44
+ # return_tensors="pt")
45
 
46
+ # generation_args = dict(
47
+ # max_length=256,
48
+ # min_length=64,
49
+ # eos_token_id=2,
50
+ # do_sample=True,
51
+ # top_p=1.0,
52
+ # early_stopping=True
53
+ # )
54
+
55
+ # outputs = model.generate(**inputs, **generation_args)
56
+ # response = tokenizer.decode(outputs[0])
57
+ # print(context)
58
+ # print(response)
59
+ # response = response[len(context):].replace("</s>", "")
60
+
61
+ # return response
62
+
63
+ # model, tokenizer = get_pipe()
64
+
65
+ # def ask_question(input_):
66
+ # response = get_response(tokenizer, model, input_)
67
+ # return response
68
+
69
+ # gr.Interface(fn=ask_question, inputs="text", outputs="text", title="KoAlpaca-355M", description="ํ•œ๊ตญ์–ด๋กœ ์งˆ๋ฌธํ•˜์„ธ์š”.").launch()