pcuenq HF staff commited on
Commit
09babdf
1 Parent(s): 3d7b108

Place each download in a separate temp dir

Browse files

This is a workaround for https://github.com/huggingface/huggingface_hub/issues/2607

Files changed (2) hide show
  1. app.py +21 -20
  2. downloads/.keep +0 -0
app.py CHANGED
@@ -1,17 +1,16 @@
1
  import os
2
- import shutil
3
  import subprocess
4
  import signal
5
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
6
  import gradio as gr
 
7
 
8
  from huggingface_hub import HfApi, ModelCard, whoami
9
-
10
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
11
-
 
12
  from apscheduler.schedulers.background import BackgroundScheduler
13
 
14
- from textwrap import dedent
15
 
16
  HF_TOKEN = os.environ.get("HF_TOKEN")
17
 
@@ -109,19 +108,23 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
109
 
110
  dl_pattern += [pattern]
111
 
112
- api.snapshot_download(repo_id=model_id, local_dir=model_name, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
113
- print("Model downloaded successfully!")
114
- print(f"Current working directory: {os.getcwd()}")
115
- print(f"Model directory contents: {os.listdir(model_name)}")
116
-
117
- conversion_script = "convert_hf_to_gguf.py"
118
- fp16_conversion = f"python llama.cpp/{conversion_script} {model_name} --outtype f16 --outfile {fp16}"
119
- result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
120
- print(result)
121
- if result.returncode != 0:
122
- raise Exception(f"Error converting to fp16: {result.stderr}")
123
- print("Model converted to fp16 successfully!")
124
- print(f"Converted model path: {fp16}")
 
 
 
 
125
 
126
  imatrix_path = "llama.cpp/imatrix.dat"
127
 
@@ -254,9 +257,7 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
254
  )
255
  except Exception as e:
256
  return (f"Error: {e}", "error.png")
257
- finally:
258
- shutil.rmtree(model_name, ignore_errors=True)
259
- print("Folder cleaned up successfully!")
260
 
261
  css="""/* Custom CSS to allow scrolling */
262
  .gradio-container {overflow-y: auto;}
 
1
  import os
 
2
  import subprocess
3
  import signal
4
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
5
  import gradio as gr
6
+ import tempfile
7
 
8
  from huggingface_hub import HfApi, ModelCard, whoami
 
9
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
10
+ from pathlib import Path
11
+ from textwrap import dedent
12
  from apscheduler.schedulers.background import BackgroundScheduler
13
 
 
14
 
15
  HF_TOKEN = os.environ.get("HF_TOKEN")
16
 
 
108
 
109
  dl_pattern += [pattern]
110
 
111
+ with tempfile.TemporaryDirectory(dir="downloads") as tmpdir:
112
+ # Keep the model name as the dirname so the model name metadata is populated correctly
113
+ local_dir = Path(tmpdir)/model_name
114
+ print(local_dir)
115
+ api.snapshot_download(repo_id=model_id, local_dir=local_dir, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
116
+ print("Model downloaded successfully!")
117
+ print(f"Current working directory: {os.getcwd()}")
118
+ print(f"Model directory contents: {os.listdir(local_dir)}")
119
+
120
+ conversion_script = "convert_hf_to_gguf.py"
121
+ fp16_conversion = f"python llama.cpp/{conversion_script} {local_dir} --outtype f16 --outfile {fp16}"
122
+ result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
123
+ print(result)
124
+ if result.returncode != 0:
125
+ raise Exception(f"Error converting to fp16: {result.stderr}")
126
+ print("Model converted to fp16 successfully!")
127
+ print(f"Converted model path: {fp16}")
128
 
129
  imatrix_path = "llama.cpp/imatrix.dat"
130
 
 
257
  )
258
  except Exception as e:
259
  return (f"Error: {e}", "error.png")
260
+
 
 
261
 
262
  css="""/* Custom CSS to allow scrolling */
263
  .gradio-container {overflow-y: auto;}
downloads/.keep ADDED
File without changes