Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -104,19 +104,6 @@ def selectall(text):
|
|
| 104 |
doclist += r
|
| 105 |
return doclist
|
| 106 |
|
| 107 |
-
# image generator
|
| 108 |
-
image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
|
| 109 |
-
|
| 110 |
-
# video generator
|
| 111 |
-
os.system("git clone https://github.com/google-research/frame-interpolation")
|
| 112 |
-
sys.path.append("frame-interpolation")
|
| 113 |
-
from eval import interpolator, util
|
| 114 |
-
|
| 115 |
-
ffmpeg_path = util.get_ffmpeg_path()
|
| 116 |
-
mediapy.set_ffmpeg(ffmpeg_path)
|
| 117 |
-
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
|
| 118 |
-
interpolator = interpolator.Interpolator(model, None)
|
| 119 |
-
|
| 120 |
# story gen
|
| 121 |
def generate_story(choice, input_text):
|
| 122 |
query = "<BOS> <{0}> {1}".format(choice, input_text)
|
|
@@ -160,8 +147,21 @@ def generate_interpolation(gallery):
|
|
| 160 |
mediapy.write_video("out.mp4", frames, fps=15)
|
| 161 |
return "out.mp4"
|
| 162 |
|
| 163 |
-
demo = gr.Blocks()
|
| 164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
with demo:
|
| 166 |
|
| 167 |
audio_file = gr.inputs.Audio(source="microphone", type="filepath")
|
|
@@ -169,7 +169,7 @@ with demo:
|
|
| 169 |
label = gr.Label()
|
| 170 |
saved = gr.Textbox()
|
| 171 |
savedAll = gr.Textbox()
|
| 172 |
-
TTSchoice = gr.inputs.Radio( label="Pick a TTS Model", choices=MODEL_NAMES
|
| 173 |
audio = gr.Audio(label="Output", interactive=False)
|
| 174 |
|
| 175 |
b1 = gr.Button("Recognize Speech")
|
|
@@ -184,43 +184,17 @@ with demo:
|
|
| 184 |
b4.click(selectall, inputs=text, outputs=savedAll)
|
| 185 |
b5.click(tts, inputs=[text,TTSchoice], outputs=audio)
|
| 186 |
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
|
| 199 |
-
button_gen_story = gr.Button("Generate Story")
|
| 200 |
-
with gr.Row():
|
| 201 |
-
gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
|
| 202 |
-
button_gen_images = gr.Button("Generate Images")
|
| 203 |
-
with gr.Row():
|
| 204 |
-
gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
|
| 205 |
-
button_gen_video = gr.Button("Generate Video")
|
| 206 |
-
|
| 207 |
-
# Rows of references
|
| 208 |
-
with gr.Row():
|
| 209 |
-
gr.Markdown("--Models Used--")
|
| 210 |
-
with gr.Row():
|
| 211 |
-
gr.Markdown("Story Generation: [GPT-J](https://huggingface.co/pranavpsv/gpt2-genre-story-generator)")
|
| 212 |
-
with gr.Row():
|
| 213 |
-
gr.Markdown("Image Generation Conditioned on Text: [Latent Diffusion](https://huggingface.co/spaces/multimodalart/latentdiffusion) | [Github Repo](https://github.com/CompVis/latent-diffusion)")
|
| 214 |
-
with gr.Row():
|
| 215 |
-
gr.Markdown("Interpolations: [FILM](https://huggingface.co/spaces/akhaliq/frame-interpolation) | [Github Repo](https://github.com/google-research/frame-interpolation)")
|
| 216 |
-
with gr.Row():
|
| 217 |
-
gr.Markdown("")
|
| 218 |
-
|
| 219 |
-
# Right column (outputs)
|
| 220 |
-
with gr.Column():
|
| 221 |
-
output_generated_story = gr.Textbox(label="Generated Story")
|
| 222 |
-
output_gallery = gr.Gallery(label="Generated Story Images")
|
| 223 |
-
output_interpolation = gr.Video(label="Generated Video")
|
| 224 |
|
| 225 |
# Bind functions to buttons
|
| 226 |
button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
|
|
|
|
| 104 |
doclist += r
|
| 105 |
return doclist
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
# story gen
|
| 108 |
def generate_story(choice, input_text):
|
| 109 |
query = "<BOS> <{0}> {1}".format(choice, input_text)
|
|
|
|
| 147 |
mediapy.write_video("out.mp4", frames, fps=15)
|
| 148 |
return "out.mp4"
|
| 149 |
|
|
|
|
| 150 |
|
| 151 |
+
# image generator
|
| 152 |
+
image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
|
| 153 |
+
|
| 154 |
+
# video generator
|
| 155 |
+
os.system("git clone https://github.com/google-research/frame-interpolation")
|
| 156 |
+
sys.path.append("frame-interpolation")
|
| 157 |
+
from eval import interpolator, util
|
| 158 |
+
|
| 159 |
+
ffmpeg_path = util.get_ffmpeg_path()
|
| 160 |
+
mediapy.set_ffmpeg(ffmpeg_path)
|
| 161 |
+
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
|
| 162 |
+
interpolator = interpolator.Interpolator(model, None)
|
| 163 |
+
|
| 164 |
+
demo = gr.Blocks()
|
| 165 |
with demo:
|
| 166 |
|
| 167 |
audio_file = gr.inputs.Audio(source="microphone", type="filepath")
|
|
|
|
| 169 |
label = gr.Label()
|
| 170 |
saved = gr.Textbox()
|
| 171 |
savedAll = gr.Textbox()
|
| 172 |
+
TTSchoice = gr.inputs.Radio( label="Pick a TTS Model", choices=MODEL_NAMES )
|
| 173 |
audio = gr.Audio(label="Output", interactive=False)
|
| 174 |
|
| 175 |
b1 = gr.Button("Recognize Speech")
|
|
|
|
| 184 |
b4.click(selectall, inputs=text, outputs=savedAll)
|
| 185 |
b5.click(tts, inputs=[text,TTSchoice], outputs=audio)
|
| 186 |
|
| 187 |
+
input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
|
| 188 |
+
input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
|
| 189 |
+
gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
|
| 190 |
+
button_gen_story = gr.Button("Generate Story")
|
| 191 |
+
gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
|
| 192 |
+
button_gen_images = gr.Button("Generate Images")
|
| 193 |
+
gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
|
| 194 |
+
button_gen_video = gr.Button("Generate Video")
|
| 195 |
+
output_generated_story = gr.Textbox(label="Generated Story")
|
| 196 |
+
output_gallery = gr.Gallery(label="Generated Story Images")
|
| 197 |
+
output_interpolation = gr.Video(label="Generated Video")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
# Bind functions to buttons
|
| 200 |
button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
|