Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- llmdolphin.py +3 -0
- requirements.txt +2 -1
llmdolphin.py
CHANGED
@@ -6,6 +6,7 @@ from llama_cpp_agent.providers import LlamaCppPythonProvider
|
|
6 |
from llama_cpp_agent.chat_history import BasicChatHistory
|
7 |
from llama_cpp_agent.chat_history.messages import Roles
|
8 |
from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
|
|
|
9 |
|
10 |
|
11 |
llm_models_dir = "./llm_models"
|
@@ -429,6 +430,7 @@ def dolphin_respond(
|
|
429 |
yield [(outputs, None)]
|
430 |
|
431 |
|
|
|
432 |
def dolphin_parse(
|
433 |
history: list[tuple[str, str]],
|
434 |
):
|
@@ -526,6 +528,7 @@ def dolphin_respond_auto(
|
|
526 |
yield [(outputs, None)]
|
527 |
|
528 |
|
|
|
529 |
def dolphin_parse_simple(
|
530 |
message: str,
|
531 |
history: list[tuple[str, str]],
|
|
|
6 |
from llama_cpp_agent.chat_history import BasicChatHistory
|
7 |
from llama_cpp_agent.chat_history.messages import Roles
|
8 |
from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
|
9 |
+
import timeout_decorator
|
10 |
|
11 |
|
12 |
llm_models_dir = "./llm_models"
|
|
|
430 |
yield [(outputs, None)]
|
431 |
|
432 |
|
433 |
+
@timeout_decorator.timeout(15, use_signals=False)
|
434 |
def dolphin_parse(
|
435 |
history: list[tuple[str, str]],
|
436 |
):
|
|
|
528 |
yield [(outputs, None)]
|
529 |
|
530 |
|
531 |
+
@timeout_decorator.timeout(15, use_signals=False)
|
532 |
def dolphin_parse_simple(
|
533 |
message: str,
|
534 |
history: list[tuple[str, str]],
|
requirements.txt
CHANGED
@@ -14,4 +14,5 @@ httpx==0.13.3
|
|
14 |
httpcore
|
15 |
googletrans==4.0.0rc1
|
16 |
git+https://github.com/huggingface/diffusers
|
17 |
-
rapidfuzz
|
|
|
|
14 |
httpcore
|
15 |
googletrans==4.0.0rc1
|
16 |
git+https://github.com/huggingface/diffusers
|
17 |
+
rapidfuzz
|
18 |
+
timeout-decorator
|