ngxson HF staff commited on
Commit
57c7ce1
1 Parent(s): 30549f3
Files changed (6) hide show
  1. .dockerignore +3 -0
  2. .gitignore +167 -0
  3. Dockerfile +44 -0
  4. app.py +177 -0
  5. docker-compose.yml +16 -0
  6. start.sh +10 -0
.dockerignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ /downloads
2
+ /llama.cpp
3
+ *.gguf
.gitignore ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
163
+
164
+ /downloads
165
+ !/downloads/.keep
166
+ /llama.cpp
167
+ *.gguf
Dockerfile ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10.13-slim-bullseye
2
+
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+ RUN apt-get update && \
5
+ apt-get upgrade -y && \
6
+ apt-get install -y --no-install-recommends \
7
+ git \
8
+ git-lfs \
9
+ wget \
10
+ curl \
11
+ # python build dependencies \
12
+ build-essential
13
+
14
+ RUN useradd -m -u 1000 user
15
+ USER user
16
+ ENV HOME=/home/user \
17
+ PATH=/home/user/.local/bin:${PATH}
18
+ WORKDIR ${HOME}/app
19
+
20
+ RUN pip install --no-cache-dir -U pip setuptools wheel && \
21
+ pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
22
+
23
+ COPY --chown=1000 . ${HOME}/app
24
+ # TODO: revert once the PR is merged
25
+ # RUN git clone https://github.com/ggerganov/llama.cpp --depth 1
26
+ RUN git clone https://github.com/ngxson/llama.cpp -b xsn/lora_convert_base_is_optional --depth 1
27
+ RUN pip install -r llama.cpp/requirements.txt
28
+
29
+ ENV PYTHONPATH=${HOME}/app \
30
+ PYTHONUNBUFFERED=1 \
31
+ HF_HUB_ENABLE_HF_TRANSFER=1 \
32
+ GRADIO_ALLOW_FLAGGING=never \
33
+ GRADIO_NUM_PORTS=1 \
34
+ GRADIO_SERVER_NAME=0.0.0.0 \
35
+ GRADIO_THEME=huggingface \
36
+ TQDM_POSITION=-1 \
37
+ TQDM_MININTERVAL=1 \
38
+ SYSTEM=spaces \
39
+ LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
40
+ PATH=/usr/local/nvidia/bin:${PATH}
41
+
42
+
43
+ ENTRYPOINT /bin/bash start.sh
44
+
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import signal
4
+ os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
5
+ import gradio as gr
6
+ import tempfile
7
+
8
+ from huggingface_hub import HfApi, ModelCard, whoami
9
+ from gradio_huggingfacehub_search import HuggingfaceHubSearch
10
+ from pathlib import Path
11
+ from textwrap import dedent
12
+ from apscheduler.schedulers.background import BackgroundScheduler
13
+
14
+
15
+ HF_TOKEN = os.environ.get("HF_TOKEN")
16
+ CONVERSION_SCRIPT = "convert_lora_to_gguf.py"
17
+
18
+ def process_model(peft_model_id: str, q_method: str, private_repo, oauth_token: gr.OAuthToken | None):
19
+ if oauth_token.token is None:
20
+ raise ValueError("You must be logged in to use GGUF-my-lora")
21
+ model_name = peft_model_id.split('/')[-1]
22
+ gguf_output_name = f"{model_name}-{q_method.lower()}.gguf"
23
+
24
+ try:
25
+ api = HfApi(token=oauth_token.token)
26
+
27
+ dl_pattern = ["*.md", "*.json", "*.model"]
28
+
29
+ pattern = (
30
+ "*.safetensors"
31
+ if any(
32
+ file.path.endswith(".safetensors")
33
+ for file in api.list_repo_tree(
34
+ repo_id=peft_model_id,
35
+ recursive=True,
36
+ )
37
+ )
38
+ else "*.bin"
39
+ )
40
+
41
+ dl_pattern += [pattern]
42
+
43
+ if not os.path.exists("downloads"):
44
+ os.makedirs("downloads")
45
+
46
+ with tempfile.TemporaryDirectory(dir="downloads") as tmpdir:
47
+ # Keep the model name as the dirname so the model name metadata is populated correctly
48
+ local_dir = Path(tmpdir)/model_name
49
+ print(local_dir)
50
+ api.snapshot_download(repo_id=peft_model_id, local_dir=local_dir, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
51
+ print("Model downloaded successfully!")
52
+ print(f"Current working directory: {os.getcwd()}")
53
+ print(f"Model directory contents: {os.listdir(local_dir)}")
54
+
55
+ adapter_config_dir = local_dir/"adapter_config.json"
56
+ if not os.path.exists(adapter_config_dir):
57
+ raise Exception("adapter_config.json not found. Please ensure the selected repo is a PEFT LoRA model.")
58
+
59
+ fp16_conversion = f"python llama.cpp/{CONVERSION_SCRIPT} {local_dir} --outtype {q_method.lower()} --outfile {gguf_output_name}"
60
+ result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
61
+ print(result)
62
+ if result.returncode != 0:
63
+ raise Exception(f"Error converting to GGUF {q_method}: {result.stderr}")
64
+ print("Model converted to GGUF successfully!")
65
+ print(f"Converted model path: {gguf_output_name}")
66
+
67
+ # Create empty repo
68
+ username = whoami(oauth_token.token)["name"]
69
+ new_repo_url = api.create_repo(repo_id=f"{username}/{model_name}-{q_method}-GGUF", exist_ok=True, private=private_repo)
70
+ new_repo_id = new_repo_url.repo_id
71
+ print("Repo created successfully!", new_repo_url)
72
+
73
+ # Upload the GGUF model
74
+ api.upload_file(
75
+ path_or_fileobj=gguf_output_name,
76
+ path_in_repo=gguf_output_name,
77
+ repo_id=new_repo_id,
78
+ )
79
+ print("Uploaded", gguf_output_name)
80
+
81
+ try:
82
+ card = ModelCard.load(peft_model_id, token=oauth_token.token)
83
+ except:
84
+ card = ModelCard("")
85
+ if card.data.tags is None:
86
+ card.data.tags = []
87
+ card.data.tags.append("llama-cpp")
88
+ card.data.tags.append("gguf-my-lora")
89
+ card.data.base_model = peft_model_id
90
+ card.text = dedent(
91
+ f"""
92
+ # {new_repo_id}
93
+ This LoRA adapter was converted to GGUF format from [`{peft_model_id}`](https://huggingface.co/{peft_model_id}) via the ggml.ai's [GGUF-my-lora](https://huggingface.co/spaces/ggml-org/gguf-my-lora) space.
94
+ Refer to the [original adapter repository](https://huggingface.co/{peft_model_id}) for more details.
95
+
96
+ ## Use with llama.cpp
97
+
98
+ ```bash
99
+ # with cli
100
+ llama-cli -m base_model.gguf --lora {gguf_output_name} (...other args)
101
+
102
+ # with server
103
+ llama-server -m base_model.gguf --lora {gguf_output_name} (...other args)
104
+ ```
105
+
106
+ To know more about LoRA usage with llama.cpp server, refer to the [llama.cpp server documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md).
107
+ """
108
+ )
109
+ card.save(f"README.md")
110
+
111
+ api.upload_file(
112
+ path_or_fileobj=f"README.md",
113
+ path_in_repo=f"README.md",
114
+ repo_id=new_repo_id,
115
+ )
116
+
117
+ return (
118
+ f'<h1>✅ DONE</h1><br/><br/>Find your repo here: <a href="{new_repo_url}" target="_blank" style="text-decoration:underline">{new_repo_id}</a>'
119
+ )
120
+ except Exception as e:
121
+ return (f"<h1>❌ ERROR</h1><br/><br/>{e}")
122
+
123
+
124
+ css="""/* Custom CSS to allow scrolling */
125
+ .gradio-container {overflow-y: auto;}
126
+ """
127
+ # Create Gradio interface
128
+ with gr.Blocks(css=css) as demo:
129
+ gr.Markdown("You must be logged in to use GGUF-my-lora.")
130
+ gr.LoginButton(min_width=250)
131
+
132
+ peft_model_id = HuggingfaceHubSearch(
133
+ label="PEFT LoRA repository",
134
+ placeholder="Search for repository on Huggingface",
135
+ search_type="model",
136
+ )
137
+
138
+ q_method = gr.Dropdown(
139
+ ["F32", "F16", "Q8_0"],
140
+ label="Quantization Method",
141
+ info="(Note: Quantization less than Q8 produces very poor results)",
142
+ value="F16",
143
+ filterable=False,
144
+ visible=True
145
+ )
146
+
147
+ private_repo = gr.Checkbox(
148
+ value=False,
149
+ label="Private Repo",
150
+ info="Create a private repo under your username."
151
+ )
152
+
153
+ iface = gr.Interface(
154
+ fn=process_model,
155
+ inputs=[
156
+ peft_model_id,
157
+ q_method,
158
+ private_repo,
159
+ ],
160
+ outputs=[
161
+ gr.Markdown(label="output"),
162
+ ],
163
+ title="Convert PEFT LoRA adapters to GGUF, blazingly fast ⚡!",
164
+ description="The space takes a PEFT LoRA (stored on a HF repo) as an input, converts it to GGUF and creates a Public repo under your HF user namespace.",
165
+ api_name=False
166
+ )
167
+
168
+
169
+ def restart_space():
170
+ HfApi().restart_space(repo_id="ggml-org/gguf-my-lora", token=HF_TOKEN, factory_reboot=True)
171
+
172
+ scheduler = BackgroundScheduler()
173
+ scheduler.add_job(restart_space, "interval", seconds=21600)
174
+ scheduler.start()
175
+
176
+ # Launch the interface
177
+ demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False)
docker-compose.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Docker compose file to LOCAL development
2
+
3
+ services:
4
+ gguf-my-lora:
5
+ build:
6
+ context: .
7
+ dockerfile: Dockerfile
8
+ image: gguf-my-lora
9
+ container_name: gguf-my-lora
10
+ ports:
11
+ - "7860:7860"
12
+ volumes:
13
+ - .:/home/user/app
14
+ environment:
15
+ - RUN_LOCALLY=1
16
+ - HF_TOKEN=${HF_TOKEN}
start.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ if [ ! -d "llama.cpp" ]; then
4
+ # only run in dev env
5
+ # TODO: revert once the PR is merged
6
+ # git clone https://github.com/ggerganov/llama.cpp --depth 1
7
+ git clone https://github.com/ngxson/llama.cpp -b xsn/lora_convert_base_is_optional --depth 1
8
+ fi
9
+
10
+ python app.py