prithivMLmods commited on
Commit
8e47763
·
verified ·
1 Parent(s): 1124539

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -1
app.py CHANGED
@@ -24,6 +24,14 @@ from transformers import (
24
  from transformers.image_utils import load_image
25
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
26
 
 
 
 
 
 
 
 
 
27
  MAX_MAX_NEW_TOKENS = 2048
28
  DEFAULT_MAX_NEW_TOKENS = 1024
29
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
@@ -186,8 +194,8 @@ def generate(
186
  ):
187
  text = input_dict["text"]
188
  files = input_dict.get("files", [])
189
-
190
  lower_text = text.lower().strip()
 
191
  # Check if the prompt is an image generation command using model flags.
192
  if (lower_text.startswith("@lightningv5") or
193
  lower_text.startswith("@lightningv4") or
@@ -240,6 +248,18 @@ def generate(
240
  yield gr.Image(image_path)
241
  return
242
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  # Otherwise, handle text/chat (and TTS) generation.
244
  tts_prefix = "@tts"
245
  is_tts = any(text.strip().lower().startswith(f"{tts_prefix}{i}") for i in range(1, 3))
@@ -345,6 +365,7 @@ demo = gr.ChatInterface(
345
  ['@turbov3 "Abstract art, colorful and vibrant"'],
346
  ["Write a Python function to check if a number is prime."],
347
  ["@tts2 What causes rainbows to form?"],
 
348
  ],
349
  cache_examples=False,
350
  type="messages",
 
24
  from transformers.image_utils import load_image
25
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
26
 
27
+ # Load the reasoning model interface from sambanova_gradio
28
+ try:
29
+ import sambanova_gradio
30
+ reasoning_interface = gr.load("DeepSeek-R1-Distill-Llama-70B", src=sambanova_gradio.registry, accept_token=True)
31
+ except Exception as e:
32
+ reasoning_interface = None
33
+ print("Reasoning model could not be loaded:", e)
34
+
35
  MAX_MAX_NEW_TOKENS = 2048
36
  DEFAULT_MAX_NEW_TOKENS = 1024
37
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
 
194
  ):
195
  text = input_dict["text"]
196
  files = input_dict.get("files", [])
 
197
  lower_text = text.lower().strip()
198
+
199
  # Check if the prompt is an image generation command using model flags.
200
  if (lower_text.startswith("@lightningv5") or
201
  lower_text.startswith("@lightningv4") or
 
248
  yield gr.Image(image_path)
249
  return
250
 
251
+ # New reasoning feature.
252
+ elif lower_text.startswith("@reasoning"):
253
+ prompt_clean = re.sub(r"@reasoning", "", text, flags=re.IGNORECASE).strip()
254
+ if reasoning_interface is None:
255
+ yield "Reasoning model is not available."
256
+ return
257
+ yield "Reasoning in progress..."
258
+ # Call the reasoning model's prediction.
259
+ result = reasoning_interface.predict(prompt_clean)
260
+ yield result
261
+ return
262
+
263
  # Otherwise, handle text/chat (and TTS) generation.
264
  tts_prefix = "@tts"
265
  is_tts = any(text.strip().lower().startswith(f"{tts_prefix}{i}") for i in range(1, 3))
 
365
  ['@turbov3 "Abstract art, colorful and vibrant"'],
366
  ["Write a Python function to check if a number is prime."],
367
  ["@tts2 What causes rainbows to form?"],
368
+ ["@reasoning Explain the significance of Gödel's incompleteness theorems."],
369
  ],
370
  cache_examples=False,
371
  type="messages",