Download fixes

#127
by pcuenq HF staff - opened
Files changed (2) hide show
  1. app.py +24 -26
  2. downloads/.keep +0 -0
app.py CHANGED
@@ -1,20 +1,16 @@
1
  import os
2
- import shutil
3
  import subprocess
4
  import signal
5
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
6
  import gradio as gr
 
7
 
8
- from huggingface_hub import create_repo, HfApi
9
- from huggingface_hub import snapshot_download
10
- from huggingface_hub import whoami
11
- from huggingface_hub import ModelCard
12
-
13
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
14
-
 
15
  from apscheduler.schedulers.background import BackgroundScheduler
16
 
17
- from textwrap import dedent
18
 
19
  HF_TOKEN = os.environ.get("HF_TOKEN")
20
 
@@ -110,21 +106,25 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
110
  else "*.bin"
111
  )
112
 
113
- dl_pattern += pattern
114
-
115
- api.snapshot_download(repo_id=model_id, local_dir=model_name, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
116
- print("Model downloaded successfully!")
117
- print(f"Current working directory: {os.getcwd()}")
118
- print(f"Model directory contents: {os.listdir(model_name)}")
119
-
120
- conversion_script = "convert_hf_to_gguf.py"
121
- fp16_conversion = f"python llama.cpp/{conversion_script} {model_name} --outtype f16 --outfile {fp16}"
122
- result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
123
- print(result)
124
- if result.returncode != 0:
125
- raise Exception(f"Error converting to fp16: {result.stderr}")
126
- print("Model converted to fp16 successfully!")
127
- print(f"Converted model path: {fp16}")
 
 
 
 
128
 
129
  imatrix_path = "llama.cpp/imatrix.dat"
130
 
@@ -257,9 +257,7 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
257
  )
258
  except Exception as e:
259
  return (f"Error: {e}", "error.png")
260
- finally:
261
- shutil.rmtree(model_name, ignore_errors=True)
262
- print("Folder cleaned up successfully!")
263
 
264
  css="""/* Custom CSS to allow scrolling */
265
  .gradio-container {overflow-y: auto;}
 
1
  import os
 
2
  import subprocess
3
  import signal
4
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
5
  import gradio as gr
6
+ import tempfile
7
 
8
+ from huggingface_hub import HfApi, ModelCard, whoami
 
 
 
 
9
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
10
+ from pathlib import Path
11
+ from textwrap import dedent
12
  from apscheduler.schedulers.background import BackgroundScheduler
13
 
 
14
 
15
  HF_TOKEN = os.environ.get("HF_TOKEN")
16
 
 
106
  else "*.bin"
107
  )
108
 
109
+ dl_pattern += [pattern]
110
+
111
+ with tempfile.TemporaryDirectory(dir="downloads") as tmpdir:
112
+ # Keep the model name as the dirname so the model name metadata is populated correctly
113
+ local_dir = Path(tmpdir)/model_name
114
+ print(local_dir)
115
+ api.snapshot_download(repo_id=model_id, local_dir=local_dir, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
116
+ print("Model downloaded successfully!")
117
+ print(f"Current working directory: {os.getcwd()}")
118
+ print(f"Model directory contents: {os.listdir(local_dir)}")
119
+
120
+ conversion_script = "convert_hf_to_gguf.py"
121
+ fp16_conversion = f"python llama.cpp/{conversion_script} {local_dir} --outtype f16 --outfile {fp16}"
122
+ result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
123
+ print(result)
124
+ if result.returncode != 0:
125
+ raise Exception(f"Error converting to fp16: {result.stderr}")
126
+ print("Model converted to fp16 successfully!")
127
+ print(f"Converted model path: {fp16}")
128
 
129
  imatrix_path = "llama.cpp/imatrix.dat"
130
 
 
257
  )
258
  except Exception as e:
259
  return (f"Error: {e}", "error.png")
260
+
 
 
261
 
262
  css="""/* Custom CSS to allow scrolling */
263
  .gradio-container {overflow-y: auto;}
downloads/.keep ADDED
File without changes