Spaces:
Running
Running
adding comments and logs
Browse files
app.py
CHANGED
|
@@ -7,109 +7,166 @@ from externalmod import gr_Interface_load, randomize_seed
|
|
| 7 |
import asyncio
|
| 8 |
import os
|
| 9 |
from threading import RLock
|
|
|
|
|
|
|
| 10 |
lock = RLock()
|
|
|
|
| 11 |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
| 12 |
|
| 13 |
-
|
| 14 |
def load_fn(models):
|
| 15 |
global models_load
|
| 16 |
models_load = {}
|
| 17 |
|
|
|
|
| 18 |
for model in models:
|
| 19 |
if model not in models_load.keys():
|
| 20 |
try:
|
|
|
|
|
|
|
|
|
|
| 21 |
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
|
|
|
|
| 22 |
except Exception as error:
|
| 23 |
-
print
|
|
|
|
| 24 |
m = gr.Interface(lambda: None, ['text'], ['image'])
|
|
|
|
| 25 |
models_load.update({model: m})
|
| 26 |
|
| 27 |
-
|
|
|
|
| 28 |
load_fn(models)
|
| 29 |
-
|
| 30 |
|
| 31 |
num_models = 6
|
| 32 |
|
|
|
|
| 33 |
default_models = models[:num_models]
|
| 34 |
inference_timeout = 600
|
| 35 |
-
MAX_SEED=3999999999
|
|
|
|
| 36 |
starting_seed = randint(1941, 2024)
|
|
|
|
| 37 |
|
|
|
|
| 38 |
def extend_choices(choices):
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
| 41 |
|
|
|
|
| 42 |
def update_imgbox(choices):
|
|
|
|
| 43 |
choices_plus = extend_choices(choices[:num_models])
|
| 44 |
-
|
|
|
|
|
|
|
| 45 |
|
|
|
|
| 46 |
async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
|
| 47 |
from pathlib import Path
|
| 48 |
kwargs = {}
|
| 49 |
noise = ""
|
| 50 |
kwargs["seed"] = seed
|
|
|
|
|
|
|
| 51 |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
|
| 52 |
prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
|
| 53 |
-
await asyncio.sleep(0)
|
| 54 |
try:
|
|
|
|
| 55 |
result = await asyncio.wait_for(task, timeout=timeout)
|
|
|
|
| 56 |
except (Exception, asyncio.TimeoutError) as e:
|
| 57 |
-
|
| 58 |
-
print(f"
|
| 59 |
-
if not task.done():
|
|
|
|
|
|
|
| 60 |
result = None
|
|
|
|
| 61 |
if task.done() and result is not None:
|
| 62 |
with lock:
|
| 63 |
png_path = "image.png"
|
| 64 |
result.save(png_path)
|
| 65 |
image = str(Path(png_path).resolve())
|
|
|
|
| 66 |
return image
|
|
|
|
| 67 |
return None
|
| 68 |
|
| 69 |
-
|
| 70 |
def gen_fnseed(model_str, prompt, seed=1):
|
| 71 |
if model_str == 'NA':
|
|
|
|
| 72 |
return None
|
| 73 |
try:
|
|
|
|
|
|
|
| 74 |
loop = asyncio.new_event_loop()
|
| 75 |
result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
|
| 76 |
except (Exception, asyncio.CancelledError) as e:
|
| 77 |
-
|
| 78 |
-
print(f"
|
| 79 |
result = None
|
| 80 |
finally:
|
|
|
|
| 81 |
loop.close()
|
|
|
|
| 82 |
return result
|
| 83 |
|
|
|
|
|
|
|
| 84 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
| 85 |
gr.HTML("<center><h1>Compare-6</h1></center>")
|
| 86 |
-
with gr.Tab('Compare-6'):
|
|
|
|
| 87 |
txt_input = gr.Textbox(label='Your prompt:', lines=4)
|
|
|
|
| 88 |
gen_button = gr.Button('Generate up to 6 images in up to 3 minutes total')
|
| 89 |
with gr.Row():
|
|
|
|
| 90 |
seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 3999999999)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
|
|
|
|
| 91 |
seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary", scale=1)
|
|
|
|
| 92 |
seed_rand.click(randomize_seed, None, [seed], queue=False)
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
gen_button.click(lambda s: gr.update(interactive
|
|
|
|
| 96 |
|
| 97 |
with gr.Row():
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
| 101 |
for m, o in zip(current_models, output):
|
|
|
|
| 102 |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fnseed,
|
| 103 |
-
|
| 104 |
-
#
|
|
|
|
|
|
|
| 105 |
with gr.Accordion('Model selection'):
|
| 106 |
-
|
| 107 |
-
|
|
|
|
| 108 |
model_choice.change(update_imgbox, model_choice, output)
|
| 109 |
model_choice.change(extend_choices, model_choice, current_models)
|
|
|
|
| 110 |
with gr.Row():
|
|
|
|
| 111 |
gr.HTML(
|
| 112 |
)
|
| 113 |
|
|
|
|
|
|
|
| 114 |
demo.queue(default_concurrency_limit=200, max_size=200)
|
| 115 |
-
|
|
|
|
|
|
|
|
|
| 7 |
import asyncio
|
| 8 |
import os
|
| 9 |
from threading import RLock
|
| 10 |
+
|
| 11 |
+
# Create a lock to ensure thread safety when accessing shared resources
|
| 12 |
lock = RLock()
|
| 13 |
+
# Load Hugging Face token from environment variable, if available
|
| 14 |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
| 15 |
|
| 16 |
+
# Function to load all models specified in the 'models' list
|
| 17 |
def load_fn(models):
|
| 18 |
global models_load
|
| 19 |
models_load = {}
|
| 20 |
|
| 21 |
+
# Iterate through all models to load them
|
| 22 |
for model in models:
|
| 23 |
if model not in models_load.keys():
|
| 24 |
try:
|
| 25 |
+
# Log model loading attempt
|
| 26 |
+
print(f"Attempting to load model: {model}")
|
| 27 |
+
# Load model interface using externalmod function
|
| 28 |
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
|
| 29 |
+
print(f"Successfully loaded model: {model}")
|
| 30 |
except Exception as error:
|
| 31 |
+
# In case of an error, print it and create a placeholder interface
|
| 32 |
+
print(f"Error loading model {model}: {error}")
|
| 33 |
m = gr.Interface(lambda: None, ['text'], ['image'])
|
| 34 |
+
# Update the models_load dictionary with the loaded model
|
| 35 |
models_load.update({model: m})
|
| 36 |
|
| 37 |
+
# Load all models defined in the 'models' list
|
| 38 |
+
print("Loading models...")
|
| 39 |
load_fn(models)
|
| 40 |
+
print("Models loaded successfully.")
|
| 41 |
|
| 42 |
num_models = 6
|
| 43 |
|
| 44 |
+
# Set the default models to use for inference
|
| 45 |
default_models = models[:num_models]
|
| 46 |
inference_timeout = 600
|
| 47 |
+
MAX_SEED = 3999999999
|
| 48 |
+
# Generate a starting seed randomly between 1941 and 2024
|
| 49 |
starting_seed = randint(1941, 2024)
|
| 50 |
+
print(f"Starting seed: {starting_seed}")
|
| 51 |
|
| 52 |
+
# Extend the choices list to ensure it contains 'num_models' elements
|
| 53 |
def extend_choices(choices):
|
| 54 |
+
print(f"Extending choices: {choices}")
|
| 55 |
+
extended = choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
|
| 56 |
+
print(f"Extended choices: {extended}")
|
| 57 |
+
return extended
|
| 58 |
|
| 59 |
+
# Update the image boxes based on selected models
|
| 60 |
def update_imgbox(choices):
|
| 61 |
+
print(f"Updating image boxes with choices: {choices}")
|
| 62 |
choices_plus = extend_choices(choices[:num_models])
|
| 63 |
+
imgboxes = [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
|
| 64 |
+
print(f"Updated image boxes: {imgboxes}")
|
| 65 |
+
return imgboxes
|
| 66 |
|
| 67 |
+
# Asynchronous function to perform inference on a given model
|
| 68 |
async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
|
| 69 |
from pathlib import Path
|
| 70 |
kwargs = {}
|
| 71 |
noise = ""
|
| 72 |
kwargs["seed"] = seed
|
| 73 |
+
# Create an asynchronous task to run the model inference
|
| 74 |
+
print(f"Starting inference for model: {model_str} with prompt: '{prompt}' and seed: {seed}")
|
| 75 |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
|
| 76 |
prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
|
| 77 |
+
await asyncio.sleep(0) # Allow other tasks to run
|
| 78 |
try:
|
| 79 |
+
# Wait for the task to complete within the specified timeout
|
| 80 |
result = await asyncio.wait_for(task, timeout=timeout)
|
| 81 |
+
print(f"Inference completed for model: {model_str}")
|
| 82 |
except (Exception, asyncio.TimeoutError) as e:
|
| 83 |
+
# Handle any exceptions or timeout errors
|
| 84 |
+
print(f"Error during inference for model {model_str}: {e}")
|
| 85 |
+
if not task.done():
|
| 86 |
+
task.cancel()
|
| 87 |
+
print(f"Task cancelled for model: {model_str}")
|
| 88 |
result = None
|
| 89 |
+
# If the task completed successfully, save the result as an image
|
| 90 |
if task.done() and result is not None:
|
| 91 |
with lock:
|
| 92 |
png_path = "image.png"
|
| 93 |
result.save(png_path)
|
| 94 |
image = str(Path(png_path).resolve())
|
| 95 |
+
print(f"Result saved as image: {image}")
|
| 96 |
return image
|
| 97 |
+
print(f"No result for model: {model_str}")
|
| 98 |
return None
|
| 99 |
|
| 100 |
+
# Function to generate an image based on the given model, prompt, and seed
|
| 101 |
def gen_fnseed(model_str, prompt, seed=1):
|
| 102 |
if model_str == 'NA':
|
| 103 |
+
print(f"Model is 'NA', skipping generation.")
|
| 104 |
return None
|
| 105 |
try:
|
| 106 |
+
# Create a new event loop to run the asynchronous inference function
|
| 107 |
+
print(f"Generating image for model: {model_str} with prompt: '{prompt}' and seed: {seed}")
|
| 108 |
loop = asyncio.new_event_loop()
|
| 109 |
result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
|
| 110 |
except (Exception, asyncio.CancelledError) as e:
|
| 111 |
+
# Handle any exceptions or cancelled tasks
|
| 112 |
+
print(f"Error during generation for model {model_str}: {e}")
|
| 113 |
result = None
|
| 114 |
finally:
|
| 115 |
+
# Close the event loop
|
| 116 |
loop.close()
|
| 117 |
+
print(f"Event loop closed for model: {model_str}")
|
| 118 |
return result
|
| 119 |
|
| 120 |
+
# Create the Gradio Blocks interface with a custom theme
|
| 121 |
+
print("Creating Gradio interface...")
|
| 122 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
| 123 |
gr.HTML("<center><h1>Compare-6</h1></center>")
|
| 124 |
+
with gr.Tab('Compare-6'):
|
| 125 |
+
# Text input for user prompt
|
| 126 |
txt_input = gr.Textbox(label='Your prompt:', lines=4)
|
| 127 |
+
# Button to generate images
|
| 128 |
gen_button = gr.Button('Generate up to 6 images in up to 3 minutes total')
|
| 129 |
with gr.Row():
|
| 130 |
+
# Slider to select a seed for reproducibility
|
| 131 |
seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 3999999999)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
|
| 132 |
+
# Button to randomize the seed
|
| 133 |
seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary", scale=1)
|
| 134 |
+
# Set up click event to randomize the seed
|
| 135 |
seed_rand.click(randomize_seed, None, [seed], queue=False)
|
| 136 |
+
print("Seed randomization button set up.")
|
| 137 |
+
# Button click to start generation
|
| 138 |
+
gen_button.click(lambda s: gr.update(interactive=True), None)
|
| 139 |
+
print("Generation button set up.")
|
| 140 |
|
| 141 |
with gr.Row():
|
| 142 |
+
# Create image output components for each model
|
| 143 |
+
output = [gr.Image(label=m, min_width=480) for m in default_models]
|
| 144 |
+
# Create hidden textboxes to store the current models
|
| 145 |
+
current_models = [gr.Textbox(m, visible=False) for m in default_models]
|
| 146 |
+
|
| 147 |
+
# Set up generation events for each model and output image
|
| 148 |
for m, o in zip(current_models, output):
|
| 149 |
+
print(f"Setting up generation event for model: {m.value}")
|
| 150 |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fnseed,
|
| 151 |
+
inputs=[m, txt_input, seed], outputs=[o], concurrency_limit=None, queue=False)
|
| 152 |
+
# The commented stop button could be used to cancel the generation event
|
| 153 |
+
#stop_button.click(lambda s: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
|
| 154 |
+
# Accordion to allow model selection
|
| 155 |
with gr.Accordion('Model selection'):
|
| 156 |
+
# Checkbox group to select up to 'num_models' different models
|
| 157 |
+
model_choice = gr.CheckboxGroup(models, label=f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
|
| 158 |
+
# Update image boxes and current models based on model selection
|
| 159 |
model_choice.change(update_imgbox, model_choice, output)
|
| 160 |
model_choice.change(extend_choices, model_choice, current_models)
|
| 161 |
+
print("Model selection setup complete.")
|
| 162 |
with gr.Row():
|
| 163 |
+
# Placeholder HTML to add additional UI elements if needed
|
| 164 |
gr.HTML(
|
| 165 |
)
|
| 166 |
|
| 167 |
+
# Queue settings for handling multiple concurrent requests
|
| 168 |
+
print("Setting up queue...")
|
| 169 |
demo.queue(default_concurrency_limit=200, max_size=200)
|
| 170 |
+
print("Launching Gradio interface...")
|
| 171 |
+
demo.launch(show_api=False, max_threads=400)
|
| 172 |
+
print("Gradio interface launched successfully.")
|