Spaces:
Running
Running
fixed it breaking on some files, sorry!
Browse files- app.py +17 -9
- beat_manipulator/__pycache__/__init__.cpython-310.pyc +0 -0
- beat_manipulator/__pycache__/analyze.cpython-310.pyc +0 -0
- beat_manipulator/__pycache__/beatmap.cpython-310.pyc +0 -0
- beat_manipulator/__pycache__/effect.cpython-310.pyc +0 -0
- beat_manipulator/__pycache__/generate.cpython-310.pyc +0 -0
- beat_manipulator/__pycache__/image.cpython-310.pyc +0 -0
- beat_manipulator/__pycache__/main.cpython-310.pyc +0 -0
- beat_manipulator/__pycache__/mix.cpython-310.pyc +0 -0
- beat_manipulator/__pycache__/wrapper.cpython-310.pyc +0 -0
- beat_manipulator/image.py +7 -4
- beat_manipulator/main.py +3 -1
app.py
CHANGED
@@ -7,19 +7,27 @@ def _safer_eval(string:str) -> float:
|
|
7 |
string = eval(''.join([i for i in string if i.isdecimal() or i in '.+-*/']))
|
8 |
return string
|
9 |
|
10 |
-
def BeatSwap(audiofile, pattern: str, scale:float, shift:float, caching:bool, variableBPM:bool):
|
|
|
11 |
print(f'___ PATH = {audiofile} ___')
|
|
|
|
|
|
|
12 |
scale=_safer_eval(scale)
|
13 |
shift=_safer_eval(shift)
|
14 |
if audiofile is not None:
|
15 |
try:
|
16 |
song=bm.song(path=audiofile, filename=audiofile.split('.')[-2][:-8]+'.'+audiofile.split('.')[-1], caching=caching)
|
17 |
except Exception as e:
|
18 |
-
print(e)
|
19 |
song=bm.song(path=audiofile, caching=caching)
|
20 |
-
else:
|
|
|
|
|
21 |
lib = 'madmom.BeatDetectionProcessor' if variableBPM is False else 'madmom.BeatTrackingProcessor'
|
22 |
song.beatmap.generate(lib=lib, caching=caching)
|
|
|
|
|
23 |
try:
|
24 |
song.beat_image.generate()
|
25 |
image = song.beat_image.combined
|
@@ -28,17 +36,17 @@ def BeatSwap(audiofile, pattern: str, scale:float, shift:float, caching:bool, va
|
|
28 |
image = np.clip(cv2.resize(image, (y,y), interpolation=cv2.INTER_NEAREST).T/255, -1, 1)
|
29 |
#print(image)
|
30 |
except Exception as e:
|
31 |
-
print(e)
|
32 |
-
image = [[0
|
33 |
-
song.quick_beatswap(output=None, pattern=pattern, scale=
|
34 |
-
song.audio = (np.clip(np.asarray(song.audio), -1, 1) *
|
35 |
#song.write_audio(output=bm.outputfilename('',song.filename, suffix=' (beatswap)'))
|
36 |
print('___ SUCCESS ___')
|
37 |
return ((song.samplerate, song.audio), image)
|
38 |
|
39 |
audiofile=Audio(source='upload', type='filepath')
|
40 |
-
patternbox = Textbox(label="Pattern, comma separated:", placeholder="1, 3, 2, 4!", value="1,
|
41 |
-
scalebox = Textbox(value=
|
42 |
shiftbox = Textbox(value=0, label="Beatmap shift, in beats (applies before scaling):", placeholder=0, lines=1)
|
43 |
cachebox = Checkbox(value=True, label="""Enable caching beatmaps. If enabled, a text file with the beatmap will be saved to the server (your PC if you are running locally), so that beatswapping for the second time doesn't have to generate the beatmap again.
|
44 |
|
|
|
7 |
string = eval(''.join([i for i in string if i.isdecimal() or i in '.+-*/']))
|
8 |
return string
|
9 |
|
10 |
+
def BeatSwap(audiofile, pattern: str = 'test', scale:float = 1, shift:float = 0, caching:bool = True, variableBPM:bool = False):
|
11 |
+
print()
|
12 |
print(f'___ PATH = {audiofile} ___')
|
13 |
+
if scale == '' or scale is None: scale = 1
|
14 |
+
if shift == '' or shift is None: shift = 0
|
15 |
+
if pattern == '' or pattern is None: pattern = 'test'
|
16 |
scale=_safer_eval(scale)
|
17 |
shift=_safer_eval(shift)
|
18 |
if audiofile is not None:
|
19 |
try:
|
20 |
song=bm.song(path=audiofile, filename=audiofile.split('.')[-2][:-8]+'.'+audiofile.split('.')[-1], caching=caching)
|
21 |
except Exception as e:
|
22 |
+
print(f'Failed to load audio, retrying: {e}')
|
23 |
song=bm.song(path=audiofile, caching=caching)
|
24 |
+
else:
|
25 |
+
print(f'Audiofile is {audiofile}')
|
26 |
+
return
|
27 |
lib = 'madmom.BeatDetectionProcessor' if variableBPM is False else 'madmom.BeatTrackingProcessor'
|
28 |
song.beatmap.generate(lib=lib, caching=caching)
|
29 |
+
song.beatmap.shift(shift)
|
30 |
+
song.beatmap.scale(scale)
|
31 |
try:
|
32 |
song.beat_image.generate()
|
33 |
image = song.beat_image.combined
|
|
|
36 |
image = np.clip(cv2.resize(image, (y,y), interpolation=cv2.INTER_NEAREST).T/255, -1, 1)
|
37 |
#print(image)
|
38 |
except Exception as e:
|
39 |
+
print(f'Image generation failed: {e}')
|
40 |
+
image = np.asarray([[0.5,-0.5],[-0.5,0.5]])
|
41 |
+
song.quick_beatswap(output=None, pattern=pattern, scale=1, shift=0, lib=lib)
|
42 |
+
song.audio = (np.clip(np.asarray(song.audio), -1, 1) * 32766).astype(np.int16).T
|
43 |
#song.write_audio(output=bm.outputfilename('',song.filename, suffix=' (beatswap)'))
|
44 |
print('___ SUCCESS ___')
|
45 |
return ((song.samplerate, song.audio), image)
|
46 |
|
47 |
audiofile=Audio(source='upload', type='filepath')
|
48 |
+
patternbox = Textbox(label="Pattern, comma separated:", placeholder="1, 3, 2, 4!", value="1, 2!", lines=1)
|
49 |
+
scalebox = Textbox(value=1, label="Beatmap scale, beatmap's beats per minute will be multiplied by this:", placeholder=1, lines=1)
|
50 |
shiftbox = Textbox(value=0, label="Beatmap shift, in beats (applies before scaling):", placeholder=0, lines=1)
|
51 |
cachebox = Checkbox(value=True, label="""Enable caching beatmaps. If enabled, a text file with the beatmap will be saved to the server (your PC if you are running locally), so that beatswapping for the second time doesn't have to generate the beatmap again.
|
52 |
|
beat_manipulator/__pycache__/__init__.cpython-310.pyc
DELETED
Binary file (322 Bytes)
|
|
beat_manipulator/__pycache__/analyze.cpython-310.pyc
DELETED
Binary file (1.65 kB)
|
|
beat_manipulator/__pycache__/beatmap.cpython-310.pyc
DELETED
Binary file (13.5 kB)
|
|
beat_manipulator/__pycache__/effect.cpython-310.pyc
DELETED
Binary file (3.5 kB)
|
|
beat_manipulator/__pycache__/generate.cpython-310.pyc
DELETED
Binary file (1.39 kB)
|
|
beat_manipulator/__pycache__/image.cpython-310.pyc
DELETED
Binary file (7.19 kB)
|
|
beat_manipulator/__pycache__/main.cpython-310.pyc
DELETED
Binary file (27.4 kB)
|
|
beat_manipulator/__pycache__/mix.cpython-310.pyc
DELETED
Binary file (1.28 kB)
|
|
beat_manipulator/__pycache__/wrapper.cpython-310.pyc
DELETED
Binary file (6.36 kB)
|
|
beat_manipulator/image.py
CHANGED
@@ -134,23 +134,26 @@ class beat_image(image):
|
|
134 |
# maximum is needed to make the array homogeneous
|
135 |
maximum=self.beatmap[0]
|
136 |
values=[]
|
|
|
137 |
values.append(self.beatmap[0])
|
138 |
for i in range(len(self.beatmap)-1):
|
139 |
self.image[0].append(self.audio[0][self.beatmap[i]:self.beatmap[i+1]])
|
140 |
self.image[1].append(self.audio[1][self.beatmap[i]:self.beatmap[i+1]])
|
141 |
maximum = max(self.beatmap[i+1]-self.beatmap[i], maximum)
|
142 |
values.append(self.beatmap[i+1]-self.beatmap[i])
|
143 |
-
if 'max' in mode: norm=maximum
|
144 |
-
elif 'med' in mode: norm=numpy.median(values)
|
145 |
-
elif 'av' in mode: norm=numpy.average(values)
|
146 |
for i in range(len(self.image[0])):
|
147 |
-
beat_diff=
|
148 |
if beat_diff>0:
|
149 |
self.image[0][i].extend([numpy.nan]*beat_diff)
|
150 |
self.image[1][i].extend([numpy.nan]*beat_diff)
|
|
|
151 |
elif beat_diff<0:
|
152 |
self.image[0][i]=self.image[0][i][:beat_diff]
|
153 |
self.image[1][i]=self.image[1][i][:beat_diff]
|
|
|
154 |
self.image=numpy.asarray(self.image)*255
|
155 |
self.mask = self.image == numpy.nan
|
156 |
self.image=numpy.nan_to_num(self.image)
|
|
|
134 |
# maximum is needed to make the array homogeneous
|
135 |
maximum=self.beatmap[0]
|
136 |
values=[]
|
137 |
+
#print(self.beatmap)
|
138 |
values.append(self.beatmap[0])
|
139 |
for i in range(len(self.beatmap)-1):
|
140 |
self.image[0].append(self.audio[0][self.beatmap[i]:self.beatmap[i+1]])
|
141 |
self.image[1].append(self.audio[1][self.beatmap[i]:self.beatmap[i+1]])
|
142 |
maximum = max(self.beatmap[i+1]-self.beatmap[i], maximum)
|
143 |
values.append(self.beatmap[i+1]-self.beatmap[i])
|
144 |
+
if 'max' in mode: norm=int(maximum)
|
145 |
+
elif 'med' in mode: norm=int(numpy.median(values))
|
146 |
+
elif 'av' in mode: norm=int(numpy.average(values))
|
147 |
for i in range(len(self.image[0])):
|
148 |
+
beat_diff=norm-len(self.image[0][i])
|
149 |
if beat_diff>0:
|
150 |
self.image[0][i].extend([numpy.nan]*beat_diff)
|
151 |
self.image[1][i].extend([numpy.nan]*beat_diff)
|
152 |
+
#print(0, len(self.image[0][i]), len(self.image[1][i]))
|
153 |
elif beat_diff<0:
|
154 |
self.image[0][i]=self.image[0][i][:beat_diff]
|
155 |
self.image[1][i]=self.image[1][i][:beat_diff]
|
156 |
+
#print(1, len(self.image[0][i]), len(self.image[1][i]))
|
157 |
self.image=numpy.asarray(self.image)*255
|
158 |
self.mask = self.image == numpy.nan
|
159 |
self.image=numpy.nan_to_num(self.image)
|
beat_manipulator/main.py
CHANGED
@@ -768,10 +768,12 @@ def beatswap(pattern: str, audio = None, scale: float = 1, shift: float = 0, out
|
|
768 |
audio.quick_beatswap(pattern = pattern, scale=scale, shift=shift, output=output)
|
769 |
return audio.path
|
770 |
|
771 |
-
def generate_beat_image(audio = None, output='', samplerate = None, bmap = None, log = True, ext='png', maximum=4096):
|
772 |
audio = _tosong(audio=audio, bmap=bmap, samplerate=samplerate, log=log)
|
773 |
output = _outputfilename(output=output, filename=audio.path, ext=ext, suffix = '')
|
774 |
audio.beatmap.generate()
|
|
|
|
|
775 |
audio.beat_image.generate()
|
776 |
audio.beat_image.write(output=output, maximum = maximum)
|
777 |
return output
|
|
|
768 |
audio.quick_beatswap(pattern = pattern, scale=scale, shift=shift, output=output)
|
769 |
return audio.path
|
770 |
|
771 |
+
def generate_beat_image(audio = None, scale: float = 1, shift: float = 0, output='', samplerate = None, bmap = None, log = True, ext='png', maximum=4096):
|
772 |
audio = _tosong(audio=audio, bmap=bmap, samplerate=samplerate, log=log)
|
773 |
output = _outputfilename(output=output, filename=audio.path, ext=ext, suffix = '')
|
774 |
audio.beatmap.generate()
|
775 |
+
audio.beatmap.scale(scale)
|
776 |
+
audio.beatmap.shift(shift)
|
777 |
audio.beat_image.generate()
|
778 |
audio.beat_image.write(output=output, maximum = maximum)
|
779 |
return output
|