Sakalti commited on
Commit
bae9f36
·
verified ·
1 Parent(s): c9d5831

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -1,5 +1,3 @@
1
- #refer llama recipes for more info https://github.com/huggingface/huggingface-llama-recipes/blob/main/inference-api.ipynb
2
- #huggingface-llama-recipes : https://github.com/huggingface/huggingface-llama-recipes/tree/main
3
  import gradio as gr
4
  from openai import OpenAI
5
  import os
@@ -9,6 +7,7 @@ ACCESS_TOKEN = os.getenv("HF_TOKEN")
9
  client = OpenAI(
10
  base_url="https://api-inference.huggingface.co/v1/",
11
  api_key=ACCESS_TOKEN,
 
12
  )
13
 
14
  def respond(
@@ -31,7 +30,7 @@ def respond(
31
 
32
  response = ""
33
 
34
- for message in client.chat.completions.create(
35
  model="Sakalti/model-3",
36
  max_tokens=max_tokens,
37
  stream=True,
@@ -43,7 +42,7 @@ def respond(
43
 
44
  response += token
45
  yield response
46
-
47
  chatbot = gr.Chatbot(height=600)
48
 
49
  demo = gr.ChatInterface(
@@ -59,11 +58,11 @@ demo = gr.ChatInterface(
59
  step=0.05,
60
  label="Top-P",
61
  ),
62
-
63
  ],
64
  fill_height=True,
65
  chatbot=chatbot,
66
  theme="Nymbo/Nymbo_Theme",
67
  )
 
68
  if __name__ == "__main__":
69
  demo.launch()
 
 
 
1
  import gradio as gr
2
  from openai import OpenAI
3
  import os
 
7
  client = OpenAI(
8
  base_url="https://api-inference.huggingface.co/v1/",
9
  api_key=ACCESS_TOKEN,
10
+ # proxies={} # この行を削除
11
  )
12
 
13
  def respond(
 
30
 
31
  response = ""
32
 
33
+ for message in client.chat.completions.create(
34
  model="Sakalti/model-3",
35
  max_tokens=max_tokens,
36
  stream=True,
 
42
 
43
  response += token
44
  yield response
45
+
46
  chatbot = gr.Chatbot(height=600)
47
 
48
  demo = gr.ChatInterface(
 
58
  step=0.05,
59
  label="Top-P",
60
  ),
 
61
  ],
62
  fill_height=True,
63
  chatbot=chatbot,
64
  theme="Nymbo/Nymbo_Theme",
65
  )
66
+
67
  if __name__ == "__main__":
68
  demo.launch()