Koh Mitsuda commited on
Commit
f5b620a
1 Parent(s): 2591fc9
Files changed (3) hide show
  1. README.md +4 -4
  2. app.py +152 -0
  3. requirements.txt +3 -0
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Llama 3 Youko 8b Int
3
- emoji: 🐢
4
- colorFrom: pink
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 4.29.0
8
  app_file: app.py
 
1
  ---
2
+ title: Llama 3 Youko 8b Instruct
3
+ emoji: 🦊
4
+ colorFrom: yellow
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.29.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import spaces
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
5
+ from threading import Thread
6
+
7
+
8
+ # Set an environment variable
9
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
10
+
11
+
12
+ model_id = "rinna/llama-3-youko-8b-instruct"
13
+
14
+
15
+ DESCRIPTION = """
16
+ <div>
17
+ <p>🦊 <a href="https://huggingface.co/rinna/llama-3-youko-8b-instruct"><b>Llama 3 Youko 8B Instruct</b> (rinna/llama-3-youko-8b-instruct)</a>は、<a href="https://rinna.co.jp">rinna株式会社</a>が<a href=https://huggingface.co/meta-llama/Meta-Llama-3-8B>Meta Llama 3 8B</a>に日本語継続事前学習およびインストラクションチューニングを行った大規模言語モデルです.Llama 3 8Bの優れたパフォーマンスを日本語に引き継いでおり、日本語のチャットにおいて高い性能を示しています。</p>
18
+ <p>🤖 このデモでは、Llama 3 Youko 8B Instructとチャットを行うことが可能です。</p>
19
+ <p>📄 モデルの詳細については、<a href="https://rinna.co.jp/news/2024/07/20240725.html">プレスリリース</a>、および、<a href="https://rinnakk.github.io/research/benchmarks/lm/index.html">ベンチマーク</a>をご覧ください。お問い合わせは<a href="https://rinna.co.jp/inquiry/">こちら</a>までどうぞ。</p>
20
+ </div>
21
+ """
22
+
23
+ LICENSE = """
24
+ ---
25
+ <div>
26
+ <p>Built with Meta Llama 3</p>
27
+ <p>License: <a href="https://llama.meta.com/llama3/license/">Meta Llama 3 Community License</a><p>
28
+ <p>This space is implemented based on <a href="https://huggingface.co/spaces/ysharma/Chat_with_Meta_llama3_8b">ysharma/Chat_with_Meta_llama3_8b</a>.</p>
29
+ </div>
30
+ """
31
+
32
+ PLACEHOLDER = """
33
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
34
+ <img src="https://huggingface.co/rinna/llama-3-youko-8b/resolve/main/rinna.png" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
35
+ <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Llama 3 Youko</h1>
36
+ </div>
37
+ """
38
+
39
+ css = """
40
+ h1 {
41
+ text-align: center;
42
+ display: block;
43
+ }
44
+ #duplicate-button {
45
+ margin: auto;
46
+ color: white;
47
+ background: #1565c0;
48
+ border-radius: 100vh;
49
+ }
50
+ """
51
+
52
+
53
+ # Load the tokenizer and model
54
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
55
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
56
+ terminators = [
57
+ tokenizer.eos_token_id,
58
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
59
+ ]
60
+
61
+
62
+ @spaces.GPU(duration=120)
63
+ def chat_llama3_8b(message: str,
64
+ history: list,
65
+ temperature: float,
66
+ max_new_tokens: int
67
+ ) -> str:
68
+ """
69
+ Generate a streaming response using the llama3-8b model.
70
+ Args:
71
+ message (str): The input message.
72
+ history (list): The conversation history used by ChatInterface.
73
+ temperature (float): The temperature for generating the response.
74
+ max_new_tokens (int): The maximum number of new tokens to generate.
75
+ Returns:
76
+ str: The generated response.
77
+ """
78
+ conversation = []
79
+ conversation.append({"role": "system", "content": "あなたは誠実で優秀なアシスタントです。どうか、簡潔かつ正直に答えてください。"})
80
+ for user, assistant in history:
81
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
82
+ conversation.append({"role": "user", "content": message})
83
+
84
+ # Need to set add_generation_prompt=True to ensure the model generates the response.
85
+ input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
86
+
87
+ print(input_ids)
88
+ print(tokenizer.decode(input_ids.tolist()[0]))
89
+
90
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
91
+
92
+ generate_kwargs = dict(
93
+ input_ids=input_ids,
94
+ streamer=streamer,
95
+ max_new_tokens=max_new_tokens,
96
+ do_sample=True,
97
+ temperature=temperature,
98
+ repetition_penalty=1.1,
99
+ eos_token_id=terminators,
100
+ )
101
+ # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
102
+ if temperature == 0:
103
+ generate_kwargs['do_sample'] = False
104
+
105
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
106
+ t.start()
107
+
108
+ outputs = []
109
+ for text in streamer:
110
+ outputs.append(text)
111
+ yield "".join(outputs)
112
+
113
+
114
+ # Gradio block
115
+ chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
116
+
117
+ with gr.Blocks(fill_height=True, css=css) as demo:
118
+
119
+ gr.Markdown(DESCRIPTION)
120
+ gr.ChatInterface(
121
+ fn=chat_llama3_8b,
122
+ chatbot=chatbot,
123
+ fill_height=True,
124
+ additional_inputs_accordion=gr.Accordion(label="⚙️ パラメータ", open=False, render=False),
125
+ additional_inputs=[
126
+ gr.Slider(minimum=0,
127
+ maximum=1,
128
+ step=0.05,
129
+ value=0.9,
130
+ label="生成時におけるサンプリングの温度(ランダム性)",
131
+ render=False),
132
+ gr.Slider(minimum=128,
133
+ maximum=4096,
134
+ step=1,
135
+ value=512,
136
+ label="生成したい最大のトークン数",
137
+ render=False),
138
+ ],
139
+ examples=[
140
+ ["日本で有名なものと言えば"],
141
+ ["ネコ: 「お腹が減ったニャ」\nイヌ: 「\nで始まる物語を書いて"],
142
+ ["C言語で素数を判定するコードを書いて"],
143
+ ["人工知能とは何ですか"],
144
+ ],
145
+ cache_examples=False,
146
+ )
147
+
148
+ gr.Markdown(LICENSE)
149
+
150
+
151
+ if __name__ == "__main__":
152
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ accelerate
2
+ transformers
3
+ SentencePiece