|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") |
|
|
|
|
|
def switch_client(model_name: str): |
|
return InferenceClient(model_name) |
|
|
|
|
|
presets = { |
|
"mistralai/Mistral-7B-Instruct-v0.3": { |
|
"Fast": {"max_tokens": 256, "temperature": 1.0, "top_p": 0.9}, |
|
"Normal": {"max_tokens": 512, "temperature": 0.7, "top_p": 0.95}, |
|
"Quality": {"max_tokens": 1024, "temperature": 0.5, "top_p": 0.90}, |
|
"Unreal Performance": {"max_tokens": 2048, "temperature": 0.6, "top_p": 0.75}, |
|
} |
|
} |
|
|
|
|
|
SYSTEM_MESSAGE = "Your name is Lake 1 Base but my is User" |
|
|
|
def respond( |
|
message, |
|
history: list, |
|
model_name, |
|
preset_name |
|
): |
|
|
|
global client |
|
client = switch_client(model_name) |
|
|
|
messages = [{"role": "system", "content": SYSTEM_MESSAGE}] |
|
|
|
|
|
for val in history: |
|
if isinstance(val, dict) and 'role' in val and 'content' in val: |
|
messages.append({"role": val['role'], "content": val['content']}) |
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
preset = presets[model_name][preset_name] |
|
max_tokens = preset["max_tokens"] |
|
temperature = preset["temperature"] |
|
top_p = preset["top_p"] |
|
|
|
|
|
response = client.chat_completion( |
|
messages, |
|
max_tokens=max_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
) |
|
|
|
|
|
final_response = response.choices[0].message['content'] |
|
|
|
return final_response |
|
|
|
|
|
model_choices = [ |
|
("mistralai/Mistral-7B-Instruct-v0.3", "Lake 1 Base") |
|
] |
|
|
|
|
|
pseudonyms = [model[1] for model in model_choices] |
|
|
|
|
|
def respond_with_pseudonym( |
|
message, |
|
history: list, |
|
selected_model, |
|
selected_preset |
|
): |
|
print(f"Selected Model: {selected_model}") |
|
print(f"Available Models: {pseudonyms}") |
|
|
|
|
|
try: |
|
model_name = next(model[0] for model in model_choices if model[1] == selected_model) |
|
except StopIteration: |
|
return f"Error: The selected model '{selected_model}' is not valid. Please select a valid model." |
|
|
|
|
|
response = respond(message, history, model_name, selected_preset) |
|
|
|
return response |
|
|
|
|
|
demo = gr.ChatInterface( |
|
fn=respond_with_pseudonym, |
|
additional_inputs=[ |
|
gr.Dropdown(choices=pseudonyms, label="Select Model", value=pseudonyms[0]), |
|
gr.Dropdown(choices=list(presets["mistralai/Mistral-7B-Instruct-v0.3"].keys()), label="Select Preset", value="Fast") |
|
|
|
], |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |