File size: 1,593 Bytes
5490186 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import os
from huggingface_hub import hf_hub_download
from typing import List
def QuantizeModel(model_name: str, lvl: str = 'q4_0'):
"""
Function to quantize the model and save it in GGUF format.
:param model_name: Path to the model (safetensors file)
:param lvl: Quantization level (e.g., 'q4_0')
"""
gguf_name = model_name.replace('.safetensors', f"_{lvl}.gguf")
os.system(f'./bin/sd -M convert -m {model_name} -o {gguf_name} -v --type {lvl}')
def download_models(models: List[str]):
"""
Download models from Hugging Face using their URLs.
:param models: List of Hugging Face model URLs
"""
for model_url in models:
os.system(f'curl -L -O {model_url}')
# Example usage
if __name__ == "__main__":
models_to_download = [
"https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips_t5xxlfp16.safetensors",
"https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt",
"https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors",
"https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-nonema-pruned.safetensors"
]
download_models(models_to_download)
# Quantize models
model_names = [
'sd-v1-4.ckpt',
'v1-5-pruned-emaonly.safetensors',
'v2-1_768-nonema-pruned.safetensors',
'sd3_medium_incl_clips_t5xxlfp16.safetensors'
]
for model_name in model_names:
QuantizeModel(model_name, lvl='q4_0')
|