Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
fix url
Browse files
app.py
CHANGED
@@ -54,7 +54,7 @@ def process_model(peft_model_id: str, q_method: str, private_repo, oauth_token:
|
|
54 |
|
55 |
adapter_config_dir = local_dir/"adapter_config.json"
|
56 |
if not os.path.exists(adapter_config_dir):
|
57 |
-
raise Exception("adapter_config.json not found. Please ensure the selected repo is a PEFT LoRA model.<br/><br/>If you are converting a model (not a LoRA adapter), please use
|
58 |
|
59 |
fp16_conversion = f"python llama.cpp/{CONVERSION_SCRIPT} {local_dir} --outtype {q_method.lower()} --outfile {gguf_output_name}"
|
60 |
result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
|
|
|
54 |
|
55 |
adapter_config_dir = local_dir/"adapter_config.json"
|
56 |
if not os.path.exists(adapter_config_dir):
|
57 |
+
raise Exception("adapter_config.json not found. Please ensure the selected repo is a PEFT LoRA model.<br/><br/>If you are converting a model (not a LoRA adapter), please use <a href=\"https://huggingface.co/spaces/ggml-org/gguf-my-repo\" target=\"_blank\">GGUF-my-repo</a> instead.")
|
58 |
|
59 |
fp16_conversion = f"python llama.cpp/{CONVERSION_SCRIPT} {local_dir} --outtype {q_method.lower()} --outfile {gguf_output_name}"
|
60 |
result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
|