github-actions[bot] commited on
Commit
e4fa8c5
0 Parent(s):

Sync to HuggingFace Spaces

Browse files
Files changed (9) hide show
  1. .gitattributes +2 -0
  2. .github/workflows/sync.yml +26 -0
  3. .gitignore +160 -0
  4. LICENSE +21 -0
  5. README.md +15 -0
  6. app.py +138 -0
  7. audiosr_utils.py +42 -0
  8. headers.yaml +9 -0
  9. requirements.txt +7 -0
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
.github/workflows/sync.yml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Spaces
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ sync:
10
+ name: Sync
11
+ runs-on: ubuntu-latest
12
+
13
+ steps:
14
+ - name: Checkout Repository
15
+ uses: actions/checkout@v4
16
+ with:
17
+ lfs: true
18
+
19
+ - name: Sync to Hugging Face Spaces
20
+ uses: JacobLinCool/huggingface-sync@v1
21
+ with:
22
+ github: ${{ secrets.GITHUB_TOKEN }}
23
+ user: jacoblincool # Hugging Face username or organization name
24
+ space: audio-super-resolution # Hugging Face space name
25
+ token: ${{ secrets.HF_TOKEN }} # Hugging Face token
26
+ configuration: headers.yaml
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 JacobLinCool
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Audio Super Resolution
3
+ emoji: 🎧
4
+ colorFrom: red
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 4.37.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ # Audio Super Resolution
14
+
15
+ Audio Super Resolution.
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+ import gradio as gr
4
+ import tempfile
5
+ import numpy as np
6
+ import librosa
7
+ import soundfile as sf
8
+ import spaces
9
+ from audiosr import super_resolution
10
+ from audiosr_utils import load_audiosr
11
+
12
+
13
+ audiosr_model = load_audiosr()
14
+
15
+
16
+ def split_audio_to_chunks(y, sr=48000, chunk_duration=5.12) -> List[str]:
17
+ # Calculate the number of samples per chunk
18
+ chunk_samples = int(chunk_duration * sr)
19
+
20
+ # Split the audio into chunks
21
+ chunks = [y[i : i + chunk_samples] for i in range(0, len(y), chunk_samples)]
22
+
23
+ # Save each chunk to a temporary file
24
+ temp_files = []
25
+ for i, chunk in enumerate(chunks):
26
+ # Create a temporary file
27
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
28
+ temp_files.append(temp_file.name)
29
+
30
+ # Write the chunk to the temporary file
31
+ sf.write(temp_file.name, chunk, sr)
32
+
33
+ return temp_files
34
+
35
+
36
+ @spaces.GPU(duration=180)
37
+ def run_audiosr(
38
+ chunks: List[str], guidance_scale: float, ddim_steps: int
39
+ ) -> np.ndarray:
40
+ waveforms = []
41
+ for i, chunk in enumerate(chunks):
42
+ print(f"Processing chunk {i+1}/{len(chunks)}")
43
+ waveform = super_resolution(
44
+ audiosr_model,
45
+ chunk,
46
+ guidance_scale=guidance_scale,
47
+ ddim_steps=ddim_steps,
48
+ )
49
+ waveforms.append(waveform)
50
+ waveform = np.concatenate(waveforms, axis=-1) # (1, 1, N)
51
+ waveform = waveform.squeeze()
52
+ return waveform
53
+
54
+
55
+ def audiosr_infer(audio: str) -> str:
56
+ guidance_scale = 3.5
57
+ ddim_steps = 100
58
+
59
+ y, sr = librosa.load(audio, sr=48000)
60
+ if len(y) > 60 * sr:
61
+ y = y[: 60 * sr]
62
+ gr.Info("Audio is too long, only the first 60 seconds will be processed")
63
+
64
+ chunk_files = split_audio_to_chunks(y, sr=sr, chunk_duration=5.12)
65
+ print(f"Splited audio chunks: {chunk_files}")
66
+
67
+ waveform = run_audiosr(chunk_files, guidance_scale, ddim_steps)
68
+ sr = 44100
69
+
70
+ for chunk_file in chunk_files:
71
+ os.remove(chunk_file)
72
+
73
+ with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as f:
74
+ sf.write(f.name, waveform, sr)
75
+ return f.name
76
+
77
+
78
+ models = {
79
+ "AudioSR": audiosr_infer,
80
+ }
81
+
82
+
83
+ def infer(audio: str, model: str, sr: int) -> str:
84
+ if sr > 0:
85
+ # resample audio
86
+ y, _ = librosa.load(audio, sr=sr)
87
+ # save resampled audio
88
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
89
+ sf.write(f.name, y, sr)
90
+ return models[model](f.name)
91
+ else:
92
+ return models[model](audio)
93
+
94
+
95
+ with gr.Blocks() as app:
96
+ with open(os.path.join(os.path.dirname(__file__), "README.md"), "r") as f:
97
+ README = f.read()
98
+ # remove yaml front matter
99
+ blocks = README.split("---")
100
+ if len(blocks) > 1:
101
+ README = "---".join(blocks[2:])
102
+
103
+ gr.Markdown(README)
104
+
105
+ with gr.Row():
106
+ with gr.Column():
107
+ gr.Markdown("## Upload an audio file")
108
+ audio = gr.Audio(label="Upload an audio file", type="filepath")
109
+ sr = gr.Slider(
110
+ value=0,
111
+ label="Resample audio to sample rate before inference, 0 means no resampling",
112
+ minimum=0,
113
+ maximum=48000,
114
+ step=1000,
115
+ )
116
+
117
+ with gr.Row():
118
+ model = gr.Radio(
119
+ label="Select a model",
120
+ choices=[s for s in models.keys()],
121
+ value="AudioSR",
122
+ )
123
+ btn = gr.Button("Infer")
124
+
125
+ with gr.Row():
126
+ with gr.Column():
127
+ out = gr.Audio(
128
+ label="Output", format="mp3", type="filepath", interactive=False
129
+ )
130
+
131
+ btn.click(
132
+ fn=infer,
133
+ inputs=[audio, model, sr],
134
+ outputs=[out],
135
+ api_name="infer",
136
+ )
137
+
138
+ app.launch(show_error=True)
audiosr_utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import yaml
3
+ from audiosr import download_checkpoint, default_audioldm_config, LatentDiffusion
4
+
5
+
6
+ def load_audiosr(ckpt_path=None, config=None, device=None, model_name="basic"):
7
+ if device is None or device == "auto":
8
+ if torch.cuda.is_available():
9
+ device = torch.device("cuda:0")
10
+ elif torch.backends.mps.is_available():
11
+ device = torch.device("mps")
12
+ else:
13
+ device = torch.device("cpu")
14
+
15
+ print("Loading AudioSR: %s" % model_name)
16
+ print("Loading model on %s" % device)
17
+
18
+ ckpt_path = download_checkpoint(model_name)
19
+
20
+ if config is not None:
21
+ assert type(config) is str
22
+ config = yaml.load(open(config, "r"), Loader=yaml.FullLoader)
23
+ else:
24
+ config = default_audioldm_config(model_name)
25
+
26
+ # # Use text as condition instead of using waveform during training
27
+ config["model"]["params"]["device"] = device
28
+ # config["model"]["params"]["cond_stage_key"] = "text"
29
+
30
+ # No normalization here
31
+ latent_diffusion = LatentDiffusion(**config["model"]["params"])
32
+
33
+ resume_from_checkpoint = ckpt_path
34
+
35
+ checkpoint = torch.load(resume_from_checkpoint, map_location="cpu")
36
+
37
+ latent_diffusion.load_state_dict(checkpoint["state_dict"], strict=True)
38
+
39
+ latent_diffusion.eval()
40
+ latent_diffusion = latent_diffusion.to(device)
41
+
42
+ return latent_diffusion
headers.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ title: Audio Super Resolution
2
+ emoji: 🎧
3
+ colorFrom: red
4
+ colorTo: gray
5
+ sdk: gradio
6
+ sdk_version: 4.37.2
7
+ app_file: app.py
8
+ pinned: false
9
+ license: mit
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio
2
+ numpy
3
+ librosa
4
+ soundfile
5
+ spaces
6
+ huggingface_hub
7
+ audiosr