Added API key validation, get a working version of the app

#1
Files changed (1) hide show
  1. app.py +105 -34
app.py CHANGED
@@ -1,16 +1,58 @@
1
  import gradio as gr
2
  import os
3
  import requests
 
4
  import google.generativeai as genai
5
  import openai
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def generate_text_chatgpt(key, prompt, temperature, top_p):
8
 
9
  openai.api_key = key
10
 
11
  response = openai.chat.completions.create(
12
  model="gpt-4-0613",
13
- messages=[{"role": "system", "content": "Suppose that you are a talented diagnostician"},
14
  {"role": "user", "content": prompt}],
15
  temperature=temperature,
16
  max_tokens=50,
@@ -35,32 +77,33 @@ def generate_text_gemini(key, prompt, temperature, top_p):
35
 
36
 
37
  def generate_text_llama(key, prompt, temperature, top_p):
38
- model_name = "meta-llama/Llama-3.1-8B-Instruct"
39
-
40
- API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
41
- headers = {"Authorization": f"Bearer {key}"}
42
- payload = {
43
- "inputs": prompt,
44
- "parameters": {
45
- "temperature": temperature,
46
- "max_new_tokens": 50,
47
- "top_p": top_p,
48
- }
49
- }
50
- response = requests.post(API_URL, headers=headers, json=payload)
51
- resp_obj = response.json()
52
- if isinstance(resp_obj, list):
53
- resp = resp_obj[0]
54
- if 'generated_text' in resp:
55
- if len(resp['generated_text']) > len(prompt):
56
- return resp['generated_text'][len(prompt):]
57
- return resp['generated_text']
58
- return resp
59
- return resp_obj
60
 
61
 
62
  def diagnose(key, model, top_k, temperature, symptom_prompt):
63
 
 
 
 
 
 
64
  if symptom_prompt:
65
  if "GPT" in model:
66
  message = generate_text_chatgpt(key, symptom_prompt, temperature, top_k)
@@ -75,6 +118,27 @@ def diagnose(key, model, top_k, temperature, symptom_prompt):
75
 
76
  return message
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
 
80
  with gr.Blocks() as ui:
@@ -82,22 +146,29 @@ with gr.Blocks() as ui:
82
  with gr.Row(equal_height=500):
83
  with gr.Column(scale=1, min_width=300):
84
  model = gr.Radio(label="LLM Selection", value="GPT-3.5-Turbo",
85
- choices=["GPT-3.5-Turbo", "Llama-3.1", "Gemini-1.5"])
86
- key = gr.Textbox(label="Please input your LLM key", type="password")
87
- gr.Button(value="Don't have an LLM key? Get one through the below links.")
88
- gr.Button(value="OpenAi Key", link="https://platform.openai.com/account/api-keys")
89
- gr.Button(value="Meta Llama Key", link="https://platform.openai.com/account/api-keys")
90
- gr.Button(value="Gemini Key", link="https://platform.openai.com/account/api-keys")
 
 
 
 
91
  gr.ClearButton(key, variant="primary")
92
 
93
  with gr.Column(scale=2, min_width=600):
94
- gr.Markdown("### Hello, Welcome to the GUI by Team #9.")
95
- temperature = gr.Slider(0.0, 1.0, value=0.7, step = 0.01, label="Temperature", info="Set the Temperature")
96
- top_k = gr.Slider(1, 10, value=3, step = 1, label="top-k value", info="Set the 'k' for top-k LLM responses")
97
  symptoms = gr.Textbox(label="Add the symptom data in the input to receive diagnosis")
98
- llm_btn = gr.Button(value="Diagnose Disease", variant="primary", elem_id="diagnose")
 
 
 
99
  output = gr.Textbox(label="LLM Output Status", interactive=False, placeholder="Output will appear here...")
100
- llm_btn.click(fn=diagnose, inputs=[key, model, top_k, temperature, symptoms], outputs=output, api_name="auditor")
101
 
102
 
103
  ui.launch(share=True)
 
1
  import gradio as gr
2
  import os
3
  import requests
4
+ from huggingface_hub import InferenceClient
5
  import google.generativeai as genai
6
  import openai
7
 
8
+ def api_check_msg(api_key, selected_model):
9
+ res = validate_api_key(api_key, selected_model)
10
+ return res["message"]
11
+
12
+ def validate_api_key(api_key, selected_model):
13
+ # Check if the API key is valid for GPT-3.5-Turbo
14
+ if "GPT" in selected_model:
15
+ url = "https://api.openai.com/v1/models"
16
+ headers = {
17
+ "Authorization": f"Bearer {api_key}"
18
+ }
19
+ try:
20
+ response = requests.get(url, headers=headers)
21
+ if response.status_code == 200:
22
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
23
+ else:
24
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid OpenAI API Key. Status code: {response.status_code}</p>'}
25
+ except requests.exceptions.RequestException as e:
26
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid OpenAI API Key. Error: {e}</p>'}
27
+ elif "Llama" in selected_model:
28
+ url = "https://huggingface.co/api/whoami-v2"
29
+ headers = {
30
+ "Authorization": f"Bearer {api_key}"
31
+ }
32
+ try:
33
+ response = requests.get(url, headers=headers)
34
+ if response.status_code == 200:
35
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
36
+ else:
37
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Hugging Face API Key. Status code: {response.status_code}</p>'}
38
+ except requests.exceptions.RequestException as e:
39
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Hugging Face API Key. Error: {e}</p>'}
40
+ elif "Gemini" in selected_model:
41
+ try:
42
+ genai.configure(api_key=api_key)
43
+ model = genai.GenerativeModel("gemini-1.5-flash")
44
+ response = model.generate_content("Help me diagnose the patient.")
45
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
46
+ except Exception as e:
47
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Google API Key. Error: {e}</p>'}
48
+
49
  def generate_text_chatgpt(key, prompt, temperature, top_p):
50
 
51
  openai.api_key = key
52
 
53
  response = openai.chat.completions.create(
54
  model="gpt-4-0613",
55
+ messages=[{"role": "system", "content": "You are a talented diagnostician who is diagnosing a patient."},
56
  {"role": "user", "content": prompt}],
57
  temperature=temperature,
58
  max_tokens=50,
 
77
 
78
 
79
  def generate_text_llama(key, prompt, temperature, top_p):
80
+ model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
81
+ client = InferenceClient(api_key=key)
82
+
83
+ messages = [{"role": "system", "content": "You are a talented diagnostician who is diagnosing a patient."},
84
+ {"role": "user","content": prompt}]
85
+
86
+ completion = client.chat.completions.create(
87
+ model=model_name,
88
+ messages=messages,
89
+ max_tokens=len(prompt)+50,
90
+ temperature=temperature,
91
+ top_p=top_p
92
+ )
93
+
94
+ response = completion.choices[0].message.content
95
+ if len(response) > len(prompt):
96
+ return response[len(prompt):]
97
+ return response
 
 
 
 
98
 
99
 
100
  def diagnose(key, model, top_k, temperature, symptom_prompt):
101
 
102
+ model_map = {
103
+ "GPT-3.5-Turbo": "GPT",
104
+ "Llama-3": "Llama",
105
+ "Gemini-1.5": "Gemini"
106
+ }
107
  if symptom_prompt:
108
  if "GPT" in model:
109
  message = generate_text_chatgpt(key, symptom_prompt, temperature, top_k)
 
118
 
119
  return message
120
 
121
+ def update_model_components(selected_model):
122
+ model_map = {
123
+ "GPT-3.5-Turbo": "GPT",
124
+ "Llama-3": "Llama",
125
+ "Gemini-1.5": "Gemini"
126
+ }
127
+
128
+ link_map = {
129
+ "GPT-3.5-Turbo": "https://platform.openai.com/account/api-keys",
130
+ "Llama-3": "https://hf.co/settings/tokens",
131
+ "Gemini-1.5": "https://aistudio.google.com/apikey"
132
+ }
133
+ textbox_label = f"Please input the API key for your {model_map[selected_model]} model"
134
+ button_value = f"Don't have an API key? Get one for the {model_map[selected_model]} model here."
135
+ button_link = link_map[selected_model]
136
+ return gr.update(label=textbox_label), gr.update(value=button_value, link=button_link)
137
+
138
+ def toggle_button(symptoms_text, api_key, model):
139
+ if symptoms_text.strip() and validate_api_key(api_key, model):
140
+ return gr.update(interactive=True)
141
+ return gr.update(interactive=False)
142
 
143
 
144
  with gr.Blocks() as ui:
 
146
  with gr.Row(equal_height=500):
147
  with gr.Column(scale=1, min_width=300):
148
  model = gr.Radio(label="LLM Selection", value="GPT-3.5-Turbo",
149
+ choices=["GPT-3.5-Turbo", "Llama-3", "Gemini-1.5"])
150
+ is_valid = False
151
+ key = gr.Textbox(label="Please input the API key for your GPT model", type="password")
152
+ status_message = gr.HTML(label="Validation Status")
153
+ key.input(fn=api_check_msg, inputs=[key, model], outputs=status_message)
154
+ button = gr.Button(value="Don't have an API key? Get one for the GPT model here.", link="https://platform.openai.com/account/api-keys")
155
+ model.change(update_model_components, inputs=model, outputs=[key, button])
156
+ # gr.Button(value="OpenAi Key", link="https://platform.openai.com/account/api-keys")
157
+ # gr.Button(value="Meta Llama Key", link="https://platform.openai.com/account/api-keys")
158
+ # gr.Button(value="Gemini Key", link="https://platform.openai.com/account/api-keys")
159
  gr.ClearButton(key, variant="primary")
160
 
161
  with gr.Column(scale=2, min_width=600):
162
+ gr.Markdown("## Hello, Welcome to the GUI by Team #9.")
163
+ temperature = gr.Slider(0.0, 1.0, value=0.7, step = 0.05, label="Temperature", info="Set the Temperature")
164
+ top_p = gr.Slider(0.0, 1.0, value=0.9, step = 0.05, label="top-p value", info="Set the sampling nucleus parameter")
165
  symptoms = gr.Textbox(label="Add the symptom data in the input to receive diagnosis")
166
+ llm_btn = gr.Button(value="Diagnose Disease", variant="primary", elem_id="diagnose", interactive=False)
167
+ symptoms.input(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
168
+ key.input(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
169
+ model.change(toggle_button, inputs=[symptoms, key, model], outputs=llm_btn)
170
  output = gr.Textbox(label="LLM Output Status", interactive=False, placeholder="Output will appear here...")
171
+ llm_btn.click(fn=diagnose, inputs=[key, model, top_p, temperature, symptoms], outputs=output, api_name="auditor")
172
 
173
 
174
  ui.launch(share=True)