Commit
•
e7620c4
1
Parent(s):
6078866
Linux support for BF16 (#36)
Browse files- linux support gguf-imat-lossless-for-BF16-linux.py (9e58050ed7fc995caff97ad81f63f8c7dc99416d)
Co-authored-by: Virt <[email protected]>
gguf-imat-lossless-for-BF16-linux.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from posix import system
|
3 |
+
import requests
|
4 |
+
import zipfile
|
5 |
+
import subprocess
|
6 |
+
import shutil
|
7 |
+
import platform
|
8 |
+
from huggingface_hub import snapshot_download
|
9 |
+
|
10 |
+
def clone_or_update_llama_cpp():
|
11 |
+
print("Preparing...")
|
12 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
13 |
+
os.chdir(base_dir)
|
14 |
+
if not os.path.exists("llama.cpp"):
|
15 |
+
subprocess.run(["git", "clone", "--depth", "1", "https://github.com/ggerganov/llama.cpp"])
|
16 |
+
else:
|
17 |
+
os.chdir("llama.cpp")
|
18 |
+
subprocess.run(["git", "pull"])
|
19 |
+
os.chdir(base_dir)
|
20 |
+
print("The 'llama.cpp' repository is ready.")
|
21 |
+
|
22 |
+
def build_or_rebuild_llama_cpp():
|
23 |
+
answer = input("Build or Rebuild? yes/no\n")
|
24 |
+
answer = answer.strip().lower()
|
25 |
+
if answer == "yes":
|
26 |
+
os.chdir("llama.cpp")
|
27 |
+
subprocess.run(["make", "clean"])
|
28 |
+
subprocess.run(["make", "-j", "8", "GGML_CUDA=1"])
|
29 |
+
elif answer == "no":
|
30 |
+
print("No action")
|
31 |
+
|
32 |
+
def download_llama_release():
|
33 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
34 |
+
dl_dir = os.path.join(base_dir, "bin", "dl")
|
35 |
+
if not os.path.exists(dl_dir):
|
36 |
+
os.makedirs(dl_dir)
|
37 |
+
|
38 |
+
os.chdir(dl_dir)
|
39 |
+
latest_release_url = "https://github.com/ggerganov/llama.cpp/releases/latest"
|
40 |
+
response = requests.get(latest_release_url)
|
41 |
+
if response.status_code == 200:
|
42 |
+
latest_release_tag = response.url.split("/")[-1]
|
43 |
+
download_url = f"https://github.com/ggerganov/llama.cpp/releases/download/{latest_release_tag}/llama-{latest_release_tag}-bin-win-cuda-cu12.2.0-x64.zip"
|
44 |
+
response = requests.get(download_url)
|
45 |
+
if response.status_code == 200:
|
46 |
+
with open(f"llama-{latest_release_tag}-bin-win-cuda-cu12.2.0-x64.zip", "wb") as f:
|
47 |
+
f.write(response.content)
|
48 |
+
with zipfile.ZipFile(f"llama-{latest_release_tag}-bin-win-cuda-cu12.2.0-x64.zip", "r") as zip_ref:
|
49 |
+
zip_ref.extractall(os.path.join(base_dir, "bin"))
|
50 |
+
print("Downloading latest 'llama.cpp' prebuilt Windows binaries...")
|
51 |
+
print("Download and extraction completed successfully.")
|
52 |
+
return latest_release_tag
|
53 |
+
else:
|
54 |
+
print("Failed to download the release file.")
|
55 |
+
else:
|
56 |
+
print("Failed to fetch the latest release information.")
|
57 |
+
|
58 |
+
def download_cudart_if_necessary(latest_release_tag):
|
59 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
60 |
+
cudart_dl_dir = os.path.join(base_dir, "bin", "dl")
|
61 |
+
if not os.path.exists(cudart_dl_dir):
|
62 |
+
os.makedirs(cudart_dl_dir)
|
63 |
+
|
64 |
+
cudart_zip_file = os.path.join(cudart_dl_dir, "cudart-llama-bin-win-cu12.2.0-x64.zip")
|
65 |
+
cudart_extracted_files = ["cublas64_12.dll", "cublasLt64_12.dll", "cudart64_12.dll"]
|
66 |
+
|
67 |
+
if all(os.path.exists(os.path.join(base_dir, "bin", file)) for file in cudart_extracted_files):
|
68 |
+
print("Cuda resources already exist. Skipping download.")
|
69 |
+
else:
|
70 |
+
cudart_download_url = f"https://github.com/ggerganov/llama.cpp/releases/download/{latest_release_tag}/cudart-llama-bin-win-cu12.2.0-x64.zip"
|
71 |
+
response = requests.get(cudart_download_url)
|
72 |
+
if response.status_code == 200:
|
73 |
+
with open(cudart_zip_file, "wb") as f:
|
74 |
+
f.write(response.content)
|
75 |
+
with zipfile.ZipFile(cudart_zip_file, "r") as zip_ref:
|
76 |
+
zip_ref.extractall(os.path.join(base_dir, "bin"))
|
77 |
+
print("Preparing 'cuda' resources...")
|
78 |
+
print("Download and extraction of cudart completed successfully.")
|
79 |
+
else:
|
80 |
+
print("Failed to download the cudart release file.")
|
81 |
+
|
82 |
+
def download_model_repo():
|
83 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
84 |
+
models_dir = os.path.join(base_dir, "models")
|
85 |
+
|
86 |
+
if not os.path.exists(models_dir):
|
87 |
+
os.makedirs(models_dir)
|
88 |
+
|
89 |
+
model_id = input("Enter the model ID to download (e.g., huggingface/transformers): ")
|
90 |
+
model_name = model_id.split("/")[-1]
|
91 |
+
model_dir = os.path.join(models_dir, model_name)
|
92 |
+
|
93 |
+
gguf_dir = os.path.join(base_dir, "models", f"{model_name}-GGUF")
|
94 |
+
gguf_model_path = os.path.join(gguf_dir, f"{model_name}-F16.gguf")
|
95 |
+
imatrix_file_name = input("Enter the name of the imatrix.txt file (default: imatrix.txt): ").strip() or "imatrix.txt"
|
96 |
+
delete_model_dir = input("Remove HF model folder after converting original model to GGUF? (yes/no) (default: no): ").strip().lower()
|
97 |
+
|
98 |
+
if os.path.exists(gguf_model_path):
|
99 |
+
create_imatrix(base_dir, gguf_dir, gguf_model_path, model_name, imatrix_file_name)
|
100 |
+
else:
|
101 |
+
if os.path.exists(model_dir):
|
102 |
+
print("Model repository already exists. Using existing repository.")
|
103 |
+
|
104 |
+
convert_model_to_gguf_bf16(base_dir, model_dir, model_name, delete_model_dir, imatrix_file_name)
|
105 |
+
|
106 |
+
else:
|
107 |
+
revision = input("Enter the revision (branch, tag, or commit) to download (default: main): ") or "main"
|
108 |
+
|
109 |
+
print("Downloading model repository...")
|
110 |
+
snapshot_download(repo_id=model_id, local_dir=model_dir, revision=revision)
|
111 |
+
print("Model repository downloaded successfully.")
|
112 |
+
|
113 |
+
convert_model_to_gguf_bf16(base_dir, model_dir, model_name, delete_model_dir, imatrix_file_name)
|
114 |
+
|
115 |
+
def convert_model_to_gguf_bf16(base_dir, model_dir, model_name, delete_model_dir, imatrix_file_name):
|
116 |
+
convert_script = os.path.join(base_dir, "llama.cpp", "convert_hf_to_gguf.py")
|
117 |
+
gguf_dir = os.path.join(base_dir, "models", f"{model_name}-GGUF")
|
118 |
+
gguf_model_path = os.path.join(gguf_dir, f"{model_name}-BF16.gguf")
|
119 |
+
|
120 |
+
if not os.path.exists(gguf_dir):
|
121 |
+
os.makedirs(gguf_dir)
|
122 |
+
|
123 |
+
if not os.path.exists(gguf_model_path):
|
124 |
+
subprocess.run(["python", convert_script, model_dir, "--outfile", gguf_model_path, "--outtype", "bf16"])
|
125 |
+
|
126 |
+
convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir, imatrix_file_name)
|
127 |
+
|
128 |
+
def convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir, imatrix_file_name):
|
129 |
+
convert_script = os.path.join(base_dir, "llama.cpp", "convert_hf_to_gguf.py")
|
130 |
+
gguf_dir = os.path.join(base_dir, "models", f"{model_name}-GGUF")
|
131 |
+
gguf_model_path = os.path.join(gguf_dir, f"{model_name}-F16.gguf")
|
132 |
+
|
133 |
+
if not os.path.exists(gguf_dir):
|
134 |
+
os.makedirs(gguf_dir)
|
135 |
+
|
136 |
+
if not os.path.exists(gguf_model_path):
|
137 |
+
subprocess.run(["python", convert_script, model_dir, "--outfile", gguf_model_path, "--outtype", "f16"])
|
138 |
+
|
139 |
+
if delete_model_dir == 'yes' or delete_model_dir == 'y':
|
140 |
+
shutil.rmtree(model_dir)
|
141 |
+
print(f"Original model directory '{model_dir}' deleted.")
|
142 |
+
else:
|
143 |
+
print(f"Original model directory '{model_dir}' was not deleted. You can remove it manually.")
|
144 |
+
|
145 |
+
create_imatrix(base_dir, gguf_dir, gguf_model_path, model_name, imatrix_file_name)
|
146 |
+
|
147 |
+
def create_imatrix(base_dir, gguf_dir, gguf_model_path, model_name, imatrix_file_name):
|
148 |
+
if platform.system() == "Linux":
|
149 |
+
imatrix_exe = os.path.join(base_dir, "llama.cpp", "llama-imatrix")
|
150 |
+
elif platform.system() == "Windows":
|
151 |
+
imatrix_exe = os.path.join(base_dir, "bin", "llama-imatrix.exe")
|
152 |
+
imatrix_output_src = os.path.join(gguf_dir, "imatrix.dat")
|
153 |
+
imatrix_output_dst = os.path.join(gguf_dir, "imatrix.dat")
|
154 |
+
if not os.path.exists(imatrix_output_dst):
|
155 |
+
try:
|
156 |
+
subprocess.run([imatrix_exe, "-m", gguf_model_path, "-f", os.path.join(base_dir, "imatrix", imatrix_file_name), "-ngl", "7"], cwd=gguf_dir)
|
157 |
+
shutil.move(imatrix_output_src, imatrix_output_dst)
|
158 |
+
print("imatrix.dat moved successfully.")
|
159 |
+
except Exception as e:
|
160 |
+
print("Error occurred while moving imatrix.dat:", e)
|
161 |
+
else:
|
162 |
+
print("imatrix.dat already exists in the GGUF folder.")
|
163 |
+
|
164 |
+
quantize_models(base_dir, model_name)
|
165 |
+
|
166 |
+
def quantize_models(base_dir, model_name):
|
167 |
+
gguf_dir = os.path.join(base_dir, "models", f"{model_name}-GGUF")
|
168 |
+
bf16_gguf_path = os.path.join(gguf_dir, f"{model_name}-BF16.gguf")
|
169 |
+
|
170 |
+
quantization_options = [
|
171 |
+
"IQ3_M", "IQ3_XXS",
|
172 |
+
"Q4_K_M", "Q4_K_S", "IQ4_XS",
|
173 |
+
"Q5_K_M", "Q5_K_S",
|
174 |
+
"Q6_K",
|
175 |
+
"Q8_0"
|
176 |
+
]
|
177 |
+
|
178 |
+
for quant_option in quantization_options:
|
179 |
+
quantized_gguf_name = f"{model_name}-{quant_option}-imat.gguf"
|
180 |
+
quantized_gguf_path = os.path.join(gguf_dir, quantized_gguf_name)
|
181 |
+
if platform.system() == "Linux":
|
182 |
+
quantize_command = os.path.join(base_dir, "llama.cpp", "llama-quantize")
|
183 |
+
elif platform.system() == "Windows":
|
184 |
+
quantize_command = os.path.join(base_dir, "bin", "llama-quantize.exe")
|
185 |
+
imatrix_path = os.path.join(gguf_dir, "imatrix.dat")
|
186 |
+
|
187 |
+
subprocess.run([quantize_command, "--imatrix", imatrix_path,
|
188 |
+
bf16_gguf_path, quantized_gguf_path, quant_option], cwd=gguf_dir)
|
189 |
+
print(f"Model quantized with {quant_option} option.")
|
190 |
+
|
191 |
+
|
192 |
+
if platform.system() == "Linux":
|
193 |
+
def main():
|
194 |
+
clone_or_update_llama_cpp()
|
195 |
+
build_or_rebuild_llama_cpp()
|
196 |
+
download_model_repo()
|
197 |
+
print("Finished preparing resources.")
|
198 |
+
elif platform.system() == "Windows":
|
199 |
+
def main():
|
200 |
+
clone_or_update_llama_cpp()
|
201 |
+
latest_release_tag = download_llama_release()
|
202 |
+
download_cudart_if_necessary(latest_release_tag)
|
203 |
+
download_model_repo()
|
204 |
+
print("Finished preparing resources.")
|
205 |
+
|
206 |
+
if __name__ == "__main__":
|
207 |
+
main()
|