Sakalti commited on
Commit
17c4696
·
verified ·
1 Parent(s): 3957ed6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -1,19 +1,20 @@
 
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import torch
3
- import gradio as gr
4
 
5
  # モデルとトークナイザーの読み込み
6
- def load_model():
7
- model_name = "EleutherAI/pythia-1b-deduped"
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name)
10
- return model, tokenizer
11
-
12
- model, tokenizer = load_model()
13
 
14
  # 応答を生成する関数
15
  def respond(message, history, max_tokens, temperature, top_p):
16
  # 入力履歴と新しいメッセージを連結
 
 
 
17
  input_text = ""
18
  for user_message, bot_response in history:
19
  input_text += f"User: {user_message}\nAssistant: {bot_response}\n"
@@ -55,7 +56,7 @@ with gr.Blocks() as demo:
55
  def clear_history():
56
  return [], []
57
 
58
- send_button.click(respond, inputs=[msg, chatbot, max_tokens, temperature, top_p], outputs=[chatbot, chatbot])
59
  clear.click(clear_history, outputs=[chatbot])
60
 
61
  demo.launch()
 
1
+ !pip install torch transformers gradio
2
+
3
+ import gradio as gr
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import torch
 
6
 
7
  # モデルとトークナイザーの読み込み
8
+ model_name = "EleutherAI/pythia-1b-deduped"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
11
 
12
  # 応答を生成する関数
13
  def respond(message, history, max_tokens, temperature, top_p):
14
  # 入力履歴と新しいメッセージを連結
15
+ if history is None:
16
+ history = []
17
+
18
  input_text = ""
19
  for user_message, bot_response in history:
20
  input_text += f"User: {user_message}\nAssistant: {bot_response}\n"
 
56
  def clear_history():
57
  return [], []
58
 
59
+ send_button.click(respond, inputs=[msg, chatbot, max_tokens, temperature, top_p], outputs=[chatbot, chatbot], _js=None)
60
  clear.click(clear_history, outputs=[chatbot])
61
 
62
  demo.launch()