Sakalti commited on
Commit
4aac07e
·
verified ·
1 Parent(s): d0d8458

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
  # モデルとトークナイザーの読み込み
6
- model_name = "Sakalti/iturkaAI-large"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name, ignore_mismatched_sizes=True)
9
 
@@ -45,10 +45,10 @@ with gr.Blocks() as demo:
45
  gr.Markdown("## AIチャット")
46
  chatbot = gr.Chatbot()
47
  msg = gr.Textbox(label="あなたのメッセージ", placeholder="ここにメッセージを入力...")
48
- max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Max new tokens")
49
- temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
50
- top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
51
- send_button = gr.Button("送信")
52
  clear = gr.Button("クリア")
53
 
54
  def clear_history():
 
3
  import torch
4
 
5
  # モデルとトークナイザーの読み込み
6
+ model_name = "Sakalti/gpt-neox-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name, ignore_mismatched_sizes=True)
9
 
 
45
  gr.Markdown("## AIチャット")
46
  chatbot = gr.Chatbot()
47
  msg = gr.Textbox(label="あなたのメッセージ", placeholder="ここにメッセージを入力...")
48
+ max_tokens = gr.Slider(1, 2048, value=512, step=1, label="新規トークン最大")
49
+ temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="温度")
50
+ top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p (核サンプリング)")
51
+ send_button = gr.Button("送さんむ信")
52
  clear = gr.Button("クリア")
53
 
54
  def clear_history():