qiqiWav commited on
Commit
70606a9
β€’
1 Parent(s): 39dc08f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -2,19 +2,31 @@ import gradio as gr
2
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
3
  import requests
4
 
5
- processed_inputs = {}
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- def process_inputs(markdown, model_id, q_method, email, oauth_token: gr.OAuthToken | None, profile: gr.OAuthProfile | None):
 
8
  if oauth_token is None or oauth_token.token is None or profile.username is None:
9
- return "##### You must be logged in to use this service."
10
 
11
  if not model_id or not q_method or not email:
12
- return "##### All fields are required!"
13
 
14
  input_hash = hash((model_id, q_method, oauth_token.token, profile.username))
15
 
16
  if input_hash in processed_inputs and processed_inputs[input_hash] == 200:
17
- return "##### This request has already been submitted successfully. Please do not submit the same request multiple times."
18
 
19
  url = "https://sdk.nexa4ai.com/task"
20
 
@@ -30,15 +42,14 @@ def process_inputs(markdown, model_id, q_method, email, oauth_token: gr.OAuthTok
30
 
31
  if response.status_code == 200:
32
  processed_inputs[input_hash] = 200
33
- return "##### Your request has been submitted successfully. We will notify you by email once processing is complete. There is no need to submit the same request multiple times."
34
  else:
35
  processed_inputs[input_hash] = response.status_code
36
- return f"##### Failed to submit request: {response.text}"
37
 
38
  iface = gr.Interface(
39
  fn=process_inputs,
40
  inputs=[
41
- gr.Markdown(value="##### πŸ”” You must grant access to the model repository before use."),
42
  HuggingfaceHubSearch(
43
  label="Hub Model ID",
44
  placeholder="Search for model id on Huggingface",
@@ -55,17 +66,19 @@ iface = gr.Interface(
55
  ],
56
  outputs = gr.Markdown(
57
  label="output",
58
- value="##### Please enter the model URL, select a quantization method, and provide your email address."
59
  ),
60
- title="Create your own GGUF Quants, blazingly fast ⚑!",
61
  allow_flagging="never"
62
  )
63
 
64
  theme = gr.themes.Base(text_size="lg")
65
  with gr.Blocks(theme=theme) as demo:
 
 
66
  gr.Markdown(value="### πŸ”” You must be logged in to use this service.")
67
- gr.LoginButton(min_width=250)
68
  iface.render()
 
69
 
70
  demo.launch(share=True)
71
 
 
2
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
3
  import requests
4
 
5
+ """
6
+ # OAuth Token Information:
7
+ # - This is an OAuth token, not a user's password.
8
+ # - We need the OAuth token to clone the related repository and access its contents.
9
+ # - As mentioned in the README.md, only read permission is requested, which includes:
10
+ # - Read access to your public profile
11
+ # - Read access to the content of all your public repos
12
+ # - The token expires after 60 minutes.
13
+ # - For more information about OAuth, please refer to the official documentation:
14
+ # https://huggingface.co/docs/hub/en/spaces-oauth
15
+ """
16
+
17
 
18
+ processed_inputs = {}
19
+ def process_inputs(model_id, q_method, email, oauth_token: gr.OAuthToken | None, profile: gr.OAuthProfile | None):
20
  if oauth_token is None or oauth_token.token is None or profile.username is None:
21
+ return "#### You must be logged in to use this service."
22
 
23
  if not model_id or not q_method or not email:
24
+ return "#### All fields are required!"
25
 
26
  input_hash = hash((model_id, q_method, oauth_token.token, profile.username))
27
 
28
  if input_hash in processed_inputs and processed_inputs[input_hash] == 200:
29
+ return "#### This request has already been submitted successfully. Please do not submit the same request multiple times."
30
 
31
  url = "https://sdk.nexa4ai.com/task"
32
 
 
42
 
43
  if response.status_code == 200:
44
  processed_inputs[input_hash] = 200
45
+ return "#### Your request has been submitted successfully. We will notify you by email once processing is complete. There is no need to submit the same request multiple times."
46
  else:
47
  processed_inputs[input_hash] = response.status_code
48
+ return f"#### Failed to submit request: {response.text}"
49
 
50
  iface = gr.Interface(
51
  fn=process_inputs,
52
  inputs=[
 
53
  HuggingfaceHubSearch(
54
  label="Hub Model ID",
55
  placeholder="Search for model id on Huggingface",
 
66
  ],
67
  outputs = gr.Markdown(
68
  label="output",
69
+ value="#### Please enter the model URL, select a quantization method, and provide your email address."
70
  ),
 
71
  allow_flagging="never"
72
  )
73
 
74
  theme = gr.themes.Base(text_size="lg")
75
  with gr.Blocks(theme=theme) as demo:
76
+ gr.Markdown(value="# πŸš€ Unleash the Power of Custom GGML Quantized Models! ⚑")
77
+ gr.LoginButton(min_width=300)
78
  gr.Markdown(value="### πŸ”” You must be logged in to use this service.")
79
+ gr.Markdown(value="### 🚨 IMPORTANT: You must grant access to the model repository before use.")
80
  iface.render()
81
+ gr.Markdown(value="We sincerely thank our community members, [Perry](https://huggingface.co/PerryCheng614), [Brian](https://huggingface.co/JoyboyBrian), [Qi](https://huggingface.co/qiqiWav), [David](https://huggingface.co/Davidqian123), for their extraordinary contributions to this GGUF converter project.")
82
 
83
  demo.launch(share=True)
84