Spaces:
Running
on
A10G
Running
on
A10G
Update app.py
#132
by
velyan
- opened
app.py
CHANGED
@@ -117,10 +117,17 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
|
|
117 |
print(f"Current working directory: {os.getcwd()}")
|
118 |
print(f"Model directory contents: {os.listdir(local_dir)}")
|
119 |
|
120 |
-
config_dir = local_dir/"config.json"
|
121 |
-
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
conversion_script = "convert_hf_to_gguf.py"
|
126 |
fp16_conversion = f"python llama.cpp/{conversion_script} {local_dir} --outtype f16 --outfile {fp16}"
|
|
|
117 |
print(f"Current working directory: {os.getcwd()}")
|
118 |
print(f"Model directory contents: {os.listdir(local_dir)}")
|
119 |
|
120 |
+
config_dir = local_dir / "config.json"
|
121 |
+
params_dir = local_dir / "params.json" # Adding params.json as a fallback
|
122 |
+
adapter_config_dir = local_dir / "adapter_config.json"
|
123 |
+
|
124 |
+
# Check if adapter_config.json is present without config.json or params.json
|
125 |
+
if os.path.exists(adapter_config_dir) and not os.path.exists(config_dir) and not os.path.exists(params_dir):
|
126 |
+
raise Exception(
|
127 |
+
'adapter_config.json is present.<br/><br/>If you are converting a LoRA adapter to GGUF, please use '
|
128 |
+
'<a href="https://huggingface.co/spaces/ggml-org/gguf-my-lora" target="_blank" style="text-decoration:underline">'
|
129 |
+
'GGUF-my-lora</a>.'
|
130 |
+
))
|
131 |
|
132 |
conversion_script = "convert_hf_to_gguf.py"
|
133 |
fp16_conversion = f"python llama.cpp/{conversion_script} {local_dir} --outtype f16 --outfile {fp16}"
|