Spaces:
Running
Running
Update
Browse files- README.md +1 -1
- app.py +15 -14
- requirements.txt +6 -6
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 😻
|
|
| 4 |
colorFrom: purple
|
| 5 |
colorTo: red
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
suggested_hardware: t4-small
|
|
|
|
| 4 |
colorFrom: purple
|
| 5 |
colorTo: red
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.36.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
suggested_hardware: t4-small
|
app.py
CHANGED
|
@@ -2,7 +2,6 @@
|
|
| 2 |
|
| 3 |
from __future__ import annotations
|
| 4 |
|
| 5 |
-
import argparse
|
| 6 |
import pathlib
|
| 7 |
|
| 8 |
import gradio as gr
|
|
@@ -44,29 +43,29 @@ def update_slider(choice: str) -> dict:
|
|
| 44 |
"pixar": 121,
|
| 45 |
"slamdunk": 119,
|
| 46 |
}
|
| 47 |
-
return gr.Slider
|
| 48 |
|
| 49 |
|
| 50 |
def update_style_image(style_name: str) -> dict:
|
| 51 |
text = get_style_image_markdown_text(style_name)
|
| 52 |
-
return gr.Markdown
|
| 53 |
|
| 54 |
|
| 55 |
def set_example_image(example: list) -> dict:
|
| 56 |
-
return gr.Image
|
| 57 |
|
| 58 |
|
| 59 |
def set_example_styles(example: list) -> list[dict]:
|
| 60 |
return [
|
| 61 |
-
gr.Radio
|
| 62 |
-
gr.Slider
|
| 63 |
]
|
| 64 |
|
| 65 |
|
| 66 |
def set_example_weights(example: list) -> list[dict]:
|
| 67 |
return [
|
| 68 |
-
gr.Slider
|
| 69 |
-
gr.Slider
|
| 70 |
]
|
| 71 |
|
| 72 |
|
|
@@ -75,7 +74,7 @@ model = Model()
|
|
| 75 |
with gr.Blocks(css="style.css") as demo:
|
| 76 |
gr.Markdown(DESCRIPTION)
|
| 77 |
|
| 78 |
-
with gr.
|
| 79 |
gr.Markdown(
|
| 80 |
"""## Step 1 (Preprocess Input Image)
|
| 81 |
|
|
@@ -99,13 +98,13 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 99 |
reconstruct_button = gr.Button("Reconstruct Face")
|
| 100 |
with gr.Column():
|
| 101 |
reconstructed_face = gr.Image(label="Reconstructed Face", type="numpy")
|
| 102 |
-
instyle = gr.
|
| 103 |
|
| 104 |
with gr.Row():
|
| 105 |
paths = sorted(pathlib.Path("images").glob("*.jpg"))
|
| 106 |
gr.Examples(examples=[[path.as_posix()] for path in paths], inputs=input_image)
|
| 107 |
|
| 108 |
-
with gr.
|
| 109 |
gr.Markdown(
|
| 110 |
"""## Step 2 (Select Style Image)
|
| 111 |
|
|
@@ -115,7 +114,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 115 |
)
|
| 116 |
with gr.Row():
|
| 117 |
with gr.Column():
|
| 118 |
-
style_type = gr.Radio(label="Style Type", choices=model.style_types)
|
| 119 |
text = get_style_image_markdown_text("cartoon")
|
| 120 |
style_image = gr.Markdown(value=text)
|
| 121 |
style_index = gr.Slider(label="Style Image Index", minimum=0, maximum=316, step=1, value=26)
|
|
@@ -131,7 +130,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 131 |
inputs=[style_type, style_index],
|
| 132 |
)
|
| 133 |
|
| 134 |
-
with gr.
|
| 135 |
gr.Markdown(
|
| 136 |
"""## Step 3 (Generate Style Transferred Image)
|
| 137 |
|
|
@@ -181,4 +180,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 181 |
],
|
| 182 |
outputs=result,
|
| 183 |
)
|
| 184 |
-
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
from __future__ import annotations
|
| 4 |
|
|
|
|
| 5 |
import pathlib
|
| 6 |
|
| 7 |
import gradio as gr
|
|
|
|
| 43 |
"pixar": 121,
|
| 44 |
"slamdunk": 119,
|
| 45 |
}
|
| 46 |
+
return gr.Slider(maximum=max_vals[choice])
|
| 47 |
|
| 48 |
|
| 49 |
def update_style_image(style_name: str) -> dict:
|
| 50 |
text = get_style_image_markdown_text(style_name)
|
| 51 |
+
return gr.Markdown(value=text)
|
| 52 |
|
| 53 |
|
| 54 |
def set_example_image(example: list) -> dict:
|
| 55 |
+
return gr.Image(value=example[0])
|
| 56 |
|
| 57 |
|
| 58 |
def set_example_styles(example: list) -> list[dict]:
|
| 59 |
return [
|
| 60 |
+
gr.Radio(value=example[0]),
|
| 61 |
+
gr.Slider(value=example[1]),
|
| 62 |
]
|
| 63 |
|
| 64 |
|
| 65 |
def set_example_weights(example: list) -> list[dict]:
|
| 66 |
return [
|
| 67 |
+
gr.Slider(value=example[0]),
|
| 68 |
+
gr.Slider(value=example[1]),
|
| 69 |
]
|
| 70 |
|
| 71 |
|
|
|
|
| 74 |
with gr.Blocks(css="style.css") as demo:
|
| 75 |
gr.Markdown(DESCRIPTION)
|
| 76 |
|
| 77 |
+
with gr.Group():
|
| 78 |
gr.Markdown(
|
| 79 |
"""## Step 1 (Preprocess Input Image)
|
| 80 |
|
|
|
|
| 98 |
reconstruct_button = gr.Button("Reconstruct Face")
|
| 99 |
with gr.Column():
|
| 100 |
reconstructed_face = gr.Image(label="Reconstructed Face", type="numpy")
|
| 101 |
+
instyle = gr.State()
|
| 102 |
|
| 103 |
with gr.Row():
|
| 104 |
paths = sorted(pathlib.Path("images").glob("*.jpg"))
|
| 105 |
gr.Examples(examples=[[path.as_posix()] for path in paths], inputs=input_image)
|
| 106 |
|
| 107 |
+
with gr.Group():
|
| 108 |
gr.Markdown(
|
| 109 |
"""## Step 2 (Select Style Image)
|
| 110 |
|
|
|
|
| 114 |
)
|
| 115 |
with gr.Row():
|
| 116 |
with gr.Column():
|
| 117 |
+
style_type = gr.Radio(label="Style Type", choices=model.style_types, value="cartoon")
|
| 118 |
text = get_style_image_markdown_text("cartoon")
|
| 119 |
style_image = gr.Markdown(value=text)
|
| 120 |
style_index = gr.Slider(label="Style Image Index", minimum=0, maximum=316, step=1, value=26)
|
|
|
|
| 130 |
inputs=[style_type, style_index],
|
| 131 |
)
|
| 132 |
|
| 133 |
+
with gr.Group():
|
| 134 |
gr.Markdown(
|
| 135 |
"""## Step 3 (Generate Style Transferred Image)
|
| 136 |
|
|
|
|
| 180 |
],
|
| 181 |
outputs=result,
|
| 182 |
)
|
| 183 |
+
|
| 184 |
+
if __name__ == "__main__":
|
| 185 |
+
demo.queue(max_size=10).launch()
|
requirements.txt
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
-
dlib==19.24.
|
| 2 |
-
numpy==1.
|
| 3 |
-
opencv-python-headless==4.
|
| 4 |
Pillow==9.5.0
|
| 5 |
-
scipy==1.
|
| 6 |
-
torch==2.0
|
| 7 |
-
torchvision==0.
|
|
|
|
| 1 |
+
dlib==19.24.4
|
| 2 |
+
numpy==1.26.4
|
| 3 |
+
opencv-python-headless==4.10.0.82
|
| 4 |
Pillow==9.5.0
|
| 5 |
+
scipy==1.13.1
|
| 6 |
+
torch==2.2.0
|
| 7 |
+
torchvision==0.17.0
|