Spaces:
Sleeping
Sleeping
import gradio as gr | |
from gradio_huggingfacehub_search import HuggingfaceHubSearch | |
import requests | |
processed_inputs = {} | |
def process_inputs(model_id, q_method, email, oauth_token: gr.OAuthToken | None, profile: gr.OAuthProfile | None): | |
if oauth_token is None or profile is None or oauth_token.token is None or profile.username is None: | |
return "### You must be logged in to use this service." | |
if not model_id or not q_method or not email: | |
return "### All fields are required!" | |
input_hash = hash((model_id, q_method, oauth_token.token, profile.username)) | |
if input_hash in processed_inputs and processed_inputs[input_hash] == 200: | |
return "### Oops! π² Looks like you've already submitted this task π. Please hang tight! We'll send you an email with all the details once it's ready π. Thanks for your patience! π" | |
url = "https://sdk.nexa4ai.com/task" | |
data = { | |
"repository_url": f"https://huggingface.co/{model_id}", | |
"username": profile.username, | |
"access_token": oauth_token.token, | |
"email": email, | |
"quantization_option": q_method, | |
} | |
""" | |
# OAuth Token Information: | |
# - This is an OAuth token, not a user's password. | |
# - We need the OAuth token to clone the related repository and access its contents. | |
# - As mentioned in the README.md, only read permission is requested, which includes: | |
# - Read access to your public profile | |
# - Read access to the content of all your public repos | |
# - The token expires after 60 minutes. | |
# - For more information about OAuth, please refer to the official documentation: | |
# https://huggingface.co/docs/hub/en/spaces-oauth | |
""" | |
response = requests.post(url, json=data) | |
if response.status_code == 200: | |
processed_inputs[input_hash] = 200 | |
return "### Your request has been submitted successfully! π We'll notify you by email π§ once everything is processed. π" | |
else: | |
processed_inputs[input_hash] = response.status_code | |
return f"### Failed to submit request: {response.text}" | |
iface = gr.Interface( | |
fn=process_inputs, | |
inputs=[ | |
HuggingfaceHubSearch( | |
label="Hub Model ID", | |
placeholder="Search for model id on Huggingface", | |
search_type="model", | |
), | |
gr.Dropdown( | |
["q2_K", "q3_K", "q3_K_S", "q3_K_M", "q3_K_L", "q4_0", "q4_1", "q4_K", "q4_K_S", "q4_K_M", "q5_0", "q5_1", "q5_K", "q5_K_S", "q5_K_M", "q6_K", "q8_0", "f16"], | |
label="Quantization Option", | |
info="GGML quantisation options", | |
value="q4_0", | |
filterable=False | |
), | |
gr.Textbox(label="Email", placeholder="Enter your email here") | |
], | |
outputs = gr.Markdown( | |
label="output", | |
value="### Please enter the model URL, select a quantization method, and provide your email address." | |
), | |
allow_flagging="never" | |
) | |
theme = gr.themes.Soft(text_size="lg", spacing_size="lg") | |
with gr.Blocks(theme=theme) as demo: | |
with gr.Row(variant="panel'"): | |
gr.Markdown(value="## π Unleash the Power of Custom GGML Quantized Models! β‘"), | |
gr.LoginButton(min_width=380) | |
gr.Markdown(value="π¨ **IMPORTANT:** You **MUST** grant access to the model repository before use.") | |
gr.Markdown(value="π You **MUST** be logged in to use this service.") | |
iface.render() | |
gr.Markdown(value="We sincerely thank our community members, [Perry](https://huggingface.co/PerryCheng614), [Brian](https://huggingface.co/JoyboyBrian), [Qi](https://huggingface.co/qiqiWav), [David](https://huggingface.co/Davidqian123), for their extraordinary contributions to this GGUF converter project.") | |
demo.launch(share=True) |