cetusian commited on
Commit
0719bdb
1 Parent(s): e715acb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -47
app.py CHANGED
@@ -3,53 +3,7 @@ import os
3
  from huggingface_hub import login
4
  from transformers import pipeline
5
 
6
- # Login using your Hugging Face API key
7
  api_key = os.getenv("LLAMA")
8
  login(api_key)
9
 
10
- # Load the model using the Hugging Face Hub
11
- llama_model = gr.load("models/meta-llama/Llama-3.1-8B-Instruct")
12
-
13
- # Function to handle input and output
14
- def chat_with_llama(user_input):
15
- # Ensure user_input is correctly formatted as a string
16
- if isinstance(user_input, str):
17
- response = llama_model(user_input)
18
- return response[0] # Return the first element of the output list
19
- else:
20
- return "Invalid input. Please provide a valid text."
21
-
22
- # Customize the Gradio interface
23
- with gr.Blocks(css=".title {font-size: 3em; font-weight: bold; text-align: center; color: #4A90E2;}") as demo:
24
-
25
- # Header
26
- gr.Markdown(
27
- """
28
- <div class="title">🦙 Llama 3.1 Chatbot 🦙</div>
29
- <p style="text-align:center; font-size:1.2em; color:gray;">Ask anything from the advanced Llama 3.1 model!</p>
30
- """,
31
- elem_classes="header"
32
- )
33
-
34
- # Main Input/Output Section
35
- with gr.Row():
36
- with gr.Column(scale=1):
37
- user_input = gr.Textbox(label="Your question", placeholder="Type your question here...", lines=4)
38
- with gr.Column(scale=1):
39
- response_output = gr.Textbox(label="Llama's response", lines=4)
40
-
41
- # Button to submit input
42
- submit_button = gr.Button("Submit", variant="primary")
43
-
44
- # Link the input and output
45
- submit_button.click(fn=chat_with_llama, inputs=user_input, outputs=response_output)
46
-
47
- # Footer
48
- gr.Markdown(
49
- """
50
- <div style="text-align:center; font-size:0.8em; color:gray;">Developed with ❤️ using Llama 3.1 and Gradio</div>
51
- """
52
- )
53
-
54
- # Launch the app
55
- demo.launch()
 
3
  from huggingface_hub import login
4
  from transformers import pipeline
5
 
 
6
  api_key = os.getenv("LLAMA")
7
  login(api_key)
8
 
9
+ gr.load("models/meta-llama/Llama-3.1-8B-Instruct").launch()