Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -92,8 +92,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 92 |
Upload an audio file, split audio tracks with Demucs, choose a track as conditional sound for MusicGen, get a remix !
|
| 93 |
*** Careful, MusicGen model loaded here can only handle up to 30 second audio, please use the audio component gradio feature to edit your audio before conditioning ***
|
| 94 |
<br/>
|
| 95 |
-
|
| 96 |
-
<img style="margin-bottom: 0em;display: inline;margin-top: -.25em;" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
| 97 |
for longer audio, more control and no queue.</p>
|
| 98 |
"""
|
| 99 |
)
|
|
@@ -102,14 +101,14 @@ with gr.Blocks(css=css) as demo:
|
|
| 102 |
uploaded_sound = gr.Audio(type="numpy", label="Input", source="upload")
|
| 103 |
with gr.Row():
|
| 104 |
chosen_track = gr.Radio(["vocals", "bass", "drums", "other", "all-in"], label="Track", info="Which track from your audio do you want to mashup ?", value="vocals")
|
| 105 |
-
load_sound_btn = gr.Button('Load your
|
| 106 |
#split_vocals = gr.Audio(type="filepath", label="Vocals")
|
| 107 |
#split_bass = gr.Audio(type="filepath", label="Bass")
|
| 108 |
#split_drums = gr.Audio(type="filepath", label="Drums")
|
| 109 |
#split_others = gr.Audio(type="filepath", label="Other")
|
| 110 |
|
| 111 |
with gr.Row():
|
| 112 |
-
music_prompt = gr.Textbox(label="Musical Prompt", info="Describe what kind of music you wish for", interactive=True)
|
| 113 |
melody = gr.Audio(source="upload", type="numpy", label="Track Condition (from previous step)", interactive=False)
|
| 114 |
with gr.Row():
|
| 115 |
model = gr.Radio(["melody", "medium", "small", "large"], label="MusicGen Model", value="melody", interactive=True)
|
|
@@ -123,6 +122,40 @@ with gr.Blocks(css=css) as demo:
|
|
| 123 |
# cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
|
| 124 |
|
| 125 |
output = gr.Audio(label="Generated Music")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
load_sound_btn.click(split_process, inputs=[uploaded_sound, chosen_track], outputs=[melody])
|
| 127 |
submit.click(predict, inputs=[music_prompt, melody, duration, model], outputs=[output])
|
| 128 |
|
|
|
|
| 92 |
Upload an audio file, split audio tracks with Demucs, choose a track as conditional sound for MusicGen, get a remix !
|
| 93 |
*** Careful, MusicGen model loaded here can only handle up to 30 second audio, please use the audio component gradio feature to edit your audio before conditioning ***
|
| 94 |
<br/>
|
| 95 |
+
[](https://huggingface.co/spaces/fffiloni/SplitTrack2MusicGen?duplicate=true)
|
|
|
|
| 96 |
for longer audio, more control and no queue.</p>
|
| 97 |
"""
|
| 98 |
)
|
|
|
|
| 101 |
uploaded_sound = gr.Audio(type="numpy", label="Input", source="upload")
|
| 102 |
with gr.Row():
|
| 103 |
chosen_track = gr.Radio(["vocals", "bass", "drums", "other", "all-in"], label="Track", info="Which track from your audio do you want to mashup ?", value="vocals")
|
| 104 |
+
load_sound_btn = gr.Button('Load your chosen track')
|
| 105 |
#split_vocals = gr.Audio(type="filepath", label="Vocals")
|
| 106 |
#split_bass = gr.Audio(type="filepath", label="Bass")
|
| 107 |
#split_drums = gr.Audio(type="filepath", label="Drums")
|
| 108 |
#split_others = gr.Audio(type="filepath", label="Other")
|
| 109 |
|
| 110 |
with gr.Row():
|
| 111 |
+
music_prompt = gr.Textbox(label="Musical Prompt", info="Describe what kind of music you wish for", interactive=True, placeholder="lofi slow bpm electro chill with organic samples")
|
| 112 |
melody = gr.Audio(source="upload", type="numpy", label="Track Condition (from previous step)", interactive=False)
|
| 113 |
with gr.Row():
|
| 114 |
model = gr.Radio(["melody", "medium", "small", "large"], label="MusicGen Model", value="melody", interactive=True)
|
|
|
|
| 122 |
# cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
|
| 123 |
|
| 124 |
output = gr.Audio(label="Generated Music")
|
| 125 |
+
|
| 126 |
+
gr.Examples(
|
| 127 |
+
fn=predict,
|
| 128 |
+
examples=[
|
| 129 |
+
[
|
| 130 |
+
"An 80s driving pop song with heavy drums and synth pads in the background",
|
| 131 |
+
None,
|
| 132 |
+
10,
|
| 133 |
+
"melody"
|
| 134 |
+
],
|
| 135 |
+
[
|
| 136 |
+
"A cheerful country song with acoustic guitars",
|
| 137 |
+
None,
|
| 138 |
+
10,
|
| 139 |
+
"melody"
|
| 140 |
+
],
|
| 141 |
+
[
|
| 142 |
+
"90s rock song with electric guitar and heavy drums",
|
| 143 |
+
None,
|
| 144 |
+
],
|
| 145 |
+
[
|
| 146 |
+
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
|
| 147 |
+
None,
|
| 148 |
+
10,
|
| 149 |
+
"melody"
|
| 150 |
+
],
|
| 151 |
+
[
|
| 152 |
+
"lofi slow bpm electro chill with organic samples",
|
| 153 |
+
None,
|
| 154 |
+
],
|
| 155 |
+
],
|
| 156 |
+
inputs=[music_prompt, melody, duration, model],
|
| 157 |
+
outputs=[output]
|
| 158 |
+
)
|
| 159 |
load_sound_btn.click(split_process, inputs=[uploaded_sound, chosen_track], outputs=[melody])
|
| 160 |
submit.click(predict, inputs=[music_prompt, melody, duration, model], outputs=[output])
|
| 161 |
|