Spaces:
Running
on
Zero
Running
on
Zero
Upload 4 files
Browse files
app.py
CHANGED
@@ -384,6 +384,35 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
384 |
copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
|
385 |
copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt], show_api=False)
|
386 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
387 |
gr.LoginButton()
|
388 |
gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
|
389 |
|
|
|
384 |
copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
|
385 |
copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt], show_api=False)
|
386 |
|
387 |
+
with gr.Tab("PNG Info"):
|
388 |
+
def extract_exif_data(image):
|
389 |
+
if image is None: return ""
|
390 |
+
|
391 |
+
try:
|
392 |
+
metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
|
393 |
+
|
394 |
+
for key in metadata_keys:
|
395 |
+
if key in image.info:
|
396 |
+
return image.info[key]
|
397 |
+
|
398 |
+
return str(image.info)
|
399 |
+
|
400 |
+
except Exception as e:
|
401 |
+
return f"Error extracting metadata: {str(e)}"
|
402 |
+
|
403 |
+
with gr.Row():
|
404 |
+
with gr.Column():
|
405 |
+
image_metadata = gr.Image(label="Image with metadata", type="pil", sources=["upload"])
|
406 |
+
|
407 |
+
with gr.Column():
|
408 |
+
result_metadata = gr.Textbox(label="Metadata", show_label=True, show_copy_button=True, interactive=False, container=True, max_lines=99)
|
409 |
+
|
410 |
+
image_metadata.change(
|
411 |
+
fn=extract_exif_data,
|
412 |
+
inputs=[image_metadata],
|
413 |
+
outputs=[result_metadata],
|
414 |
+
)
|
415 |
+
|
416 |
gr.LoginButton()
|
417 |
gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
|
418 |
|
dc.py
CHANGED
@@ -23,6 +23,9 @@ from stablepy import (
|
|
23 |
)
|
24 |
import time
|
25 |
#import urllib.parse
|
|
|
|
|
|
|
26 |
import gradio as gr
|
27 |
import logging
|
28 |
logging.getLogger("diffusers").setLevel(logging.ERROR)
|
@@ -52,14 +55,14 @@ PREPROCESSOR_CONTROLNET = {
|
|
52 |
],
|
53 |
"scribble": [
|
54 |
"HED",
|
55 |
-
"
|
56 |
"None",
|
57 |
],
|
58 |
"softedge": [
|
59 |
-
"
|
60 |
"HED",
|
61 |
"HED safe",
|
62 |
-
"
|
63 |
"None",
|
64 |
],
|
65 |
"segmentation": [
|
@@ -295,18 +298,21 @@ def extract_parameters(input_string):
|
|
295 |
input_string = input_string.replace("\n", "")
|
296 |
|
297 |
if "Negative prompt:" not in input_string:
|
298 |
-
|
299 |
-
|
300 |
-
|
|
|
|
|
|
|
301 |
|
302 |
parm = input_string.split("Negative prompt:")
|
303 |
-
parameters["prompt"] = parm[0]
|
304 |
if "Steps:" not in parm[1]:
|
305 |
print("Steps not detected")
|
306 |
-
parameters["neg_prompt"] = parm[1]
|
307 |
return parameters
|
308 |
parm = parm[1].split("Steps:")
|
309 |
-
parameters["neg_prompt"] = parm[0]
|
310 |
input_string = "Steps:" + parm[1]
|
311 |
|
312 |
# Extracting Steps
|
@@ -355,12 +361,13 @@ class GuiSD:
|
|
355 |
retain_task_model_in_cache=False,
|
356 |
device="cpu",
|
357 |
)
|
358 |
-
self.model.
|
|
|
359 |
|
360 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
361 |
-
progress(0, desc="Start inference...")
|
362 |
images, seed, image_list, metadata = model(**pipe_params)
|
363 |
-
progress(1, desc="Inference completed.")
|
364 |
if not isinstance(images, list): images = [images]
|
365 |
images = save_images(images, metadata)
|
366 |
img = []
|
@@ -500,7 +507,7 @@ class GuiSD:
|
|
500 |
pag_scale,
|
501 |
progress=gr.Progress(track_tqdm=True),
|
502 |
):
|
503 |
-
progress(0, desc="Preparing inference...")
|
504 |
|
505 |
vae_model = vae_model if vae_model != "None" else None
|
506 |
loras_list = [lora1, lora2, lora3, lora4, lora5]
|
@@ -670,7 +677,7 @@ class GuiSD:
|
|
670 |
self.model.pipe.transformer.to(self.model.device)
|
671 |
print("transformer to cuda")
|
672 |
|
673 |
-
progress(1, desc="Inference preparation completed. Starting inference...")
|
674 |
|
675 |
info_state = "" # for yield version
|
676 |
return self.infer_short(self.model, pipe_params, progress), info_state
|
@@ -805,9 +812,12 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
805 |
lora3 = get_valid_lora_path(lora3)
|
806 |
lora4 = get_valid_lora_path(lora4)
|
807 |
lora5 = get_valid_lora_path(lora5)
|
808 |
-
progress(1, desc="Preparation completed. Starting inference
|
809 |
|
810 |
-
|
|
|
|
|
|
|
811 |
images, info = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
812 |
guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
813 |
lora4, lora4_wt, lora5, lora5_wt, sampler,
|
@@ -820,7 +830,6 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
820 |
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
|
821 |
load_lora_cpu, verbose_info, gpu_duration
|
822 |
)
|
823 |
-
|
824 |
progress(1, desc="Inference completed.")
|
825 |
output_image = images[0][0] if images else None
|
826 |
|
|
|
23 |
)
|
24 |
import time
|
25 |
#import urllib.parse
|
26 |
+
|
27 |
+
print(os.getenv("SPACES_ZERO_GPU"))
|
28 |
+
|
29 |
import gradio as gr
|
30 |
import logging
|
31 |
logging.getLogger("diffusers").setLevel(logging.ERROR)
|
|
|
55 |
],
|
56 |
"scribble": [
|
57 |
"HED",
|
58 |
+
"PidiNet",
|
59 |
"None",
|
60 |
],
|
61 |
"softedge": [
|
62 |
+
"PidiNet",
|
63 |
"HED",
|
64 |
"HED safe",
|
65 |
+
"PidiNet safe",
|
66 |
"None",
|
67 |
],
|
68 |
"segmentation": [
|
|
|
298 |
input_string = input_string.replace("\n", "")
|
299 |
|
300 |
if "Negative prompt:" not in input_string:
|
301 |
+
if "Steps:" in input_string:
|
302 |
+
input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
|
303 |
+
else:
|
304 |
+
print("Invalid metadata")
|
305 |
+
parameters["prompt"] = input_string
|
306 |
+
return parameters
|
307 |
|
308 |
parm = input_string.split("Negative prompt:")
|
309 |
+
parameters["prompt"] = parm[0].strip()
|
310 |
if "Steps:" not in parm[1]:
|
311 |
print("Steps not detected")
|
312 |
+
parameters["neg_prompt"] = parm[1].strip()
|
313 |
return parameters
|
314 |
parm = parm[1].split("Steps:")
|
315 |
+
parameters["neg_prompt"] = parm[0].strip()
|
316 |
input_string = "Steps:" + parm[1]
|
317 |
|
318 |
# Extracting Steps
|
|
|
361 |
retain_task_model_in_cache=False,
|
362 |
device="cpu",
|
363 |
)
|
364 |
+
self.model.load_beta_styles()
|
365 |
+
#self.model.device = torch.device("cpu") #
|
366 |
|
367 |
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
368 |
+
#progress(0, desc="Start inference...")
|
369 |
images, seed, image_list, metadata = model(**pipe_params)
|
370 |
+
#progress(1, desc="Inference completed.")
|
371 |
if not isinstance(images, list): images = [images]
|
372 |
images = save_images(images, metadata)
|
373 |
img = []
|
|
|
507 |
pag_scale,
|
508 |
progress=gr.Progress(track_tqdm=True),
|
509 |
):
|
510 |
+
#progress(0, desc="Preparing inference...")
|
511 |
|
512 |
vae_model = vae_model if vae_model != "None" else None
|
513 |
loras_list = [lora1, lora2, lora3, lora4, lora5]
|
|
|
677 |
self.model.pipe.transformer.to(self.model.device)
|
678 |
print("transformer to cuda")
|
679 |
|
680 |
+
#progress(1, desc="Inference preparation completed. Starting inference...")
|
681 |
|
682 |
info_state = "" # for yield version
|
683 |
return self.infer_short(self.model, pipe_params, progress), info_state
|
|
|
812 |
lora3 = get_valid_lora_path(lora3)
|
813 |
lora4 = get_valid_lora_path(lora4)
|
814 |
lora5 = get_valid_lora_path(lora5)
|
815 |
+
progress(1, desc="Preparation completed. Starting inference...")
|
816 |
|
817 |
+
progress(0, desc="Loading model...")
|
818 |
+
sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
|
819 |
+
progress(1, desc="Model loaded.")
|
820 |
+
progress(0, desc="Starting Inference...")
|
821 |
images, info = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
822 |
guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
823 |
lora4, lora4_wt, lora5, lora5_wt, sampler,
|
|
|
830 |
True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, 0.0,
|
831 |
load_lora_cpu, verbose_info, gpu_duration
|
832 |
)
|
|
|
833 |
progress(1, desc="Inference completed.")
|
834 |
output_image = images[0][0] if images else None
|
835 |
|
env.py
CHANGED
@@ -39,6 +39,11 @@ load_diffusers_format_model = [
|
|
39 |
'votepurchase/juggernautXL_hyper_8step_sfw',
|
40 |
'votepurchase/ponyRealism_v21MainVAE',
|
41 |
'stabilityai/stable-diffusion-xl-base-1.0',
|
|
|
|
|
|
|
|
|
|
|
42 |
'cagliostrolab/animagine-xl-3.1',
|
43 |
'John6666/epicrealism-xl-v8kiss-sdxl',
|
44 |
'misri/epicrealismXL_v7FinalDestination',
|
@@ -54,15 +59,30 @@ load_diffusers_format_model = [
|
|
54 |
'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
|
55 |
'kitty7779/ponyDiffusionV6XL',
|
56 |
'GraydientPlatformAPI/aniverse-pony',
|
|
|
|
|
57 |
'John6666/mistoon-anime-ponyalpha-sdxl',
|
|
|
|
|
58 |
'John6666/ebara-mfcg-pony-mix-v12-sdxl',
|
59 |
'John6666/t-ponynai3-v51-sdxl',
|
|
|
|
|
60 |
'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
|
61 |
'John6666/wai-real-mix-v11-sdxl',
|
|
|
|
|
|
|
|
|
|
|
62 |
'John6666/cyberrealistic-pony-v63-sdxl',
|
|
|
63 |
'GraydientPlatformAPI/realcartoon-pony-diffusion',
|
64 |
'John6666/nova-anime-xl-pony-v5-sdxl',
|
65 |
'John6666/autismmix-sdxl-autismmix-pony-sdxl',
|
|
|
|
|
|
|
66 |
'yodayo-ai/kivotos-xl-2.0',
|
67 |
'yodayo-ai/holodayo-xl-2.1',
|
68 |
'yodayo-ai/clandestine-xl-1.0',
|
@@ -81,6 +101,8 @@ load_diffusers_format_model = [
|
|
81 |
'GraydientPlatformAPI/picx-real',
|
82 |
'GraydientPlatformAPI/perfectworld6',
|
83 |
'emilianJR/epiCRealism',
|
|
|
|
|
84 |
'Meina/MeinaMix_V11',
|
85 |
'Meina/MeinaUnreal_V5',
|
86 |
'Meina/MeinaPastel_V7',
|
|
|
39 |
'votepurchase/juggernautXL_hyper_8step_sfw',
|
40 |
'votepurchase/ponyRealism_v21MainVAE',
|
41 |
'stabilityai/stable-diffusion-xl-base-1.0',
|
42 |
+
'John6666/blue-pencil-flux1-v021-fp8-flux',
|
43 |
+
'John6666/wai-ani-flux-v10forfp8-fp8-flux',
|
44 |
+
'John6666/xe-anime-flux-v04-fp8-flux',
|
45 |
+
'John6666/lyh-anime-flux-v2a1-fp8-flux',
|
46 |
+
'John6666/carnival-unchained-v10-fp8-flux',
|
47 |
'cagliostrolab/animagine-xl-3.1',
|
48 |
'John6666/epicrealism-xl-v8kiss-sdxl',
|
49 |
'misri/epicrealismXL_v7FinalDestination',
|
|
|
59 |
'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
|
60 |
'kitty7779/ponyDiffusionV6XL',
|
61 |
'GraydientPlatformAPI/aniverse-pony',
|
62 |
+
'John6666/ras-real-anime-screencap-v1-sdxl',
|
63 |
+
'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
|
64 |
'John6666/mistoon-anime-ponyalpha-sdxl',
|
65 |
+
'John6666/3x3x3mixxl-v2-sdxl',
|
66 |
+
'John6666/3x3x3mixxl-3dv01-sdxl',
|
67 |
'John6666/ebara-mfcg-pony-mix-v12-sdxl',
|
68 |
'John6666/t-ponynai3-v51-sdxl',
|
69 |
+
'John6666/t-ponynai3-v65-sdxl',
|
70 |
+
'John6666/prefect-pony-xl-v3-sdxl',
|
71 |
'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
|
72 |
'John6666/wai-real-mix-v11-sdxl',
|
73 |
+
'John6666/wai-c-v6-sdxl',
|
74 |
+
'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
|
75 |
+
'John6666/photo-realistic-pony-v5-sdxl',
|
76 |
+
'John6666/pony-realism-v21main-sdxl',
|
77 |
+
'John6666/pony-realism-v22main-sdxl',
|
78 |
'John6666/cyberrealistic-pony-v63-sdxl',
|
79 |
+
'John6666/cyberrealistic-pony-v64-sdxl',
|
80 |
'GraydientPlatformAPI/realcartoon-pony-diffusion',
|
81 |
'John6666/nova-anime-xl-pony-v5-sdxl',
|
82 |
'John6666/autismmix-sdxl-autismmix-pony-sdxl',
|
83 |
+
'John6666/aimz-dream-real-pony-mix-v3-sdxl',
|
84 |
+
'John6666/duchaiten-pony-real-v11fix-sdxl',
|
85 |
+
'John6666/duchaiten-pony-real-v20-sdxl',
|
86 |
'yodayo-ai/kivotos-xl-2.0',
|
87 |
'yodayo-ai/holodayo-xl-2.1',
|
88 |
'yodayo-ai/clandestine-xl-1.0',
|
|
|
101 |
'GraydientPlatformAPI/picx-real',
|
102 |
'GraydientPlatformAPI/perfectworld6',
|
103 |
'emilianJR/epiCRealism',
|
104 |
+
'votepurchase/counterfeitV30_v30',
|
105 |
+
'votepurchase/ChilloutMix',
|
106 |
'Meina/MeinaMix_V11',
|
107 |
'Meina/MeinaUnreal_V5',
|
108 |
'Meina/MeinaPastel_V7',
|
llmdolphin.py
CHANGED
@@ -20,6 +20,7 @@ llm_models = {
|
|
20 |
#"": ["", MessagesFormatterType.PHI_3],
|
21 |
"mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
|
22 |
"L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
|
|
23 |
"MN-12B-Mag-Mell-Q4_K_M.gguf": ["inflatebot/MN-12B-Mag-Mell-R1-GGUF", MessagesFormatterType.MISTRAL],
|
24 |
"Qwen2.5-14B-Instruct-Q4_K_M.gguf": ["bartowski/Qwen2.5-14B-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
|
25 |
"Instant-RP-Noodles-12B-v1.3.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.3-GGUF", MessagesFormatterType.MISTRAL],
|
@@ -62,6 +63,30 @@ llm_models = {
|
|
62 |
"ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
|
63 |
"ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
|
64 |
"ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
"eidolon-v1-14b-q4_k_m.gguf": ["Lambent/Eidolon-v1-14B-Q4_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
|
66 |
"Mistral-Nemo-Gutenberg-Doppel-12B-v2.Q4_K_M.gguf": ["QuantFactory/Mistral-Nemo-Gutenberg-Doppel-12B-v2-GGUF", MessagesFormatterType.MISTRAL],
|
67 |
"MN-Dark-Planet-TITAN-12B-D_AU-Q4_k_s.gguf": ["DavidAU/MN-Dark-Planet-TITAN-12B-GGUF", MessagesFormatterType.CHATML],
|
|
|
20 |
#"": ["", MessagesFormatterType.PHI_3],
|
21 |
"mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
|
22 |
"L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
23 |
+
"Mahou-1.5-mistral-nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Mahou-1.5-mistral-nemo-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
|
24 |
"MN-12B-Mag-Mell-Q4_K_M.gguf": ["inflatebot/MN-12B-Mag-Mell-R1-GGUF", MessagesFormatterType.MISTRAL],
|
25 |
"Qwen2.5-14B-Instruct-Q4_K_M.gguf": ["bartowski/Qwen2.5-14B-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
|
26 |
"Instant-RP-Noodles-12B-v1.3.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.3-GGUF", MessagesFormatterType.MISTRAL],
|
|
|
63 |
"ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
|
64 |
"ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
|
65 |
"ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
|
66 |
+
"Humanish-Qwen2.5-7B-Instruct-Q5_K_M.gguf": ["bartowski/Humanish-Qwen2.5-7B-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
|
67 |
+
"FactAlign-LLaMA-3-8B.i1-Q4_K_M.gguf": ["mradermacher/FactAlign-LLaMA-3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
68 |
+
"Magot-v3-Gemma2-8k-9B.i1-Q4_K_M.gguf": ["mradermacher/Magot-v3-Gemma2-8k-9B-i1-GGUF", MessagesFormatterType.ALPACA],
|
69 |
+
"EVA-Qwen2.5-14B-v0.1-Q4_K_M.gguf": ["bartowski/EVA-Qwen2.5-14B-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
|
70 |
+
"Flammades-Mistral-Nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Flammades-Mistral-Nemo-12B-i1-GGUF", MessagesFormatterType.CHATML],
|
71 |
+
"Humanish-LLama3-8B-Instruct-Q5_K_M.gguf": ["bartowski/Humanish-LLama3-8B-Instruct-GGUF", MessagesFormatterType.LLAMA_3],
|
72 |
+
"Humanish-Mistral-Nemo-Instruct-2407-Q4_K_M.gguf": ["bartowski/Humanish-Mistral-Nemo-Instruct-2407-GGUF", MessagesFormatterType.MISTRAL],
|
73 |
+
"L3.1-Clouded-Uchtave-v0.1-8B.Q5_K_M.gguf": ["mradermacher/L3.1-Clouded-Uchtave-v0.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
|
74 |
+
"Baldur-Q5_K_M.gguf": ["Delta-Vector/Baldur-8B-GGUF", MessagesFormatterType.LLAMA_3],
|
75 |
+
"llama-jpsft-q6_k.gguf": ["ai-net/Llama-JPSFT-GGUF", MessagesFormatterType.LLAMA_3],
|
76 |
+
"Gemma-2-Ataraxy-v3i-9B.Q5_K_M.gguf": ["mradermacher/Gemma-2-Ataraxy-v3i-9B-GGUF", MessagesFormatterType.ALPACA],
|
77 |
+
"EVA-Qwen2.5-7B-v0.1.i1-Q5_K_M.gguf": ["mradermacher/EVA-Qwen2.5-7B-v0.1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
|
78 |
+
"EZO-Llama-3.2-3B-Instruct-dpoE.Q4_K_S.gguf": ["mradermacher/EZO-Llama-3.2-3B-Instruct-dpoE-GGUF", MessagesFormatterType.LLAMA_3],
|
79 |
+
"Bezaliel_p1.1-12B.i1-Q4_K_M.gguf": ["mradermacher/Bezaliel_p1.1-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
|
80 |
+
"Ice0.7-29.09-RP.i1-Q5_K_M.gguf": ["mradermacher/Ice0.7-29.09-RP-i1-GGUF", MessagesFormatterType.MISTRAL],
|
81 |
+
"IceWhiskeyRP-7b.i1-Q5_K_M.gguf": ["mradermacher/IceWhiskeyRP-7b-i1-GGUF", MessagesFormatterType.ALPACA],
|
82 |
+
"L3.1-Purosani-2-8B.i1-Q4_K_M.gguf": ["mradermacher/L3.1-Purosani-2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
83 |
+
"llama-3.2-3b-instruct-abliterated-q4_k_m.gguf": ["darkc0de/Llama-3.2-3B-Instruct-abliterated-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
|
84 |
+
"Daredevil-Aura-8B_uncensored_OAS_abliterated.i1-Q4_K_S.gguf": ["mradermacher/Daredevil-Aura-8B_uncensored_OAS_abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
85 |
+
"Qwen-2.5-7b-TitanFusion-v3.Q5_K_M.gguf": ["mradermacher/Qwen-2.5-7b-TitanFusion-v3-GGUF", MessagesFormatterType.OPEN_CHAT],
|
86 |
+
"MixTAO-7Bx2-MoE-v8.1.Q4_K_M.gguf": ["mradermacher/MixTAO-7Bx2-MoE-v8.1-GGUF", MessagesFormatterType.ALPACA],
|
87 |
+
"Promissum_Mane-8B-LINEAR.i1-Q5_K_M.gguf": ["mradermacher/Promissum_Mane-8B-LINEAR-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
88 |
+
"L3.1-Purosani-1.5-8B.Q5_K_M.gguf": ["mradermacher/L3.1-Purosani-1.5-8B-GGUF", MessagesFormatterType.LLAMA_3],
|
89 |
+
"SzilviaB-Daredevil-LongWriter-8B_abliterated.i1-Q5_K_M.gguf": ["mradermacher/SzilviaB-Daredevil-LongWriter-8B_abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
90 |
"eidolon-v1-14b-q4_k_m.gguf": ["Lambent/Eidolon-v1-14B-Q4_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
|
91 |
"Mistral-Nemo-Gutenberg-Doppel-12B-v2.Q4_K_M.gguf": ["QuantFactory/Mistral-Nemo-Gutenberg-Doppel-12B-v2-GGUF", MessagesFormatterType.MISTRAL],
|
92 |
"MN-Dark-Planet-TITAN-12B-D_AU-Q4_k_s.gguf": ["DavidAU/MN-Dark-Planet-TITAN-12B-GGUF", MessagesFormatterType.CHATML],
|