Spaces:
Running
Running
File size: 3,478 Bytes
3ae84a3 78cdedf 0e720c7 3ae84a3 dc86583 9b28e54 3ae84a3 ab7a42f 0d69242 3ae84a3 0d69242 78cdedf 3ae84a3 a2223b7 9b28e54 78cdedf 0d69242 c45624f 19cb4eb 4e11544 b1acd4d 4e11544 dc2bc35 4e11544 cf19b82 222f245 4e11544 f5ca54e 4e11544 37e9f21 4e11544 3ae84a3 19cb4eb 2a60395 ab7a42f f5ca54e ab7a42f b09b835 ab7a42f d5255f4 ab7a42f a9cb5c7 ab7a42f 19cb4eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import pickle
import gradio as gr
from datasets import load_dataset
from transformers import AutoModel, AutoFeatureExtractor
from PIL import Image
# Only runs once when the script is first run.
with open("butts_1024_new.pickle", "rb") as handle:
index = pickle.load(handle)
# Load model for computing embeddings.
feature_extractor = AutoFeatureExtractor.from_pretrained(
"sasha/autotrain-butterfly-similarity-2490576840"
)
model = AutoModel.from_pretrained("sasha/autotrain-butterfly-similarity-2490576840")
# Candidate images.
dataset = load_dataset("sasha/butterflies_10k_names_multiple")
ds = dataset["train"]
def query(image, top_k=1):
inputs = feature_extractor(image, return_tensors="pt")
model_output = model(**inputs)
embedding = model_output.pooler_output.detach()
results = index.query(embedding, k=top_k)
inx = results[0][0].tolist()
logits = results[1][0].tolist()
butterfly = ds.select(inx)["image"][0]
return overlay_png_on_side_by_side_images(
image, butterfly, "cadre_hq.png", png_position=(0, 0)
)
def overlay_png_on_side_by_side_images(
person_image_path, insect_image_path, overlay_image_path, png_position=(0, 0)
):
"""
Overlays a PNG image onto two side-by-side images, resizing them to fit.
Args:
image1_path (str): Path to the first image.
image2_path (str): Path to the second image.
png_path (str): Path to the PNG image to overlay.
output_path (str): Path to save the resulting image.
png_position (tuple): (x, y) coordinates for the top-left corner of the PNG overlay.
"""
# Open images
img1 = Image.fromarray(person_image_path).convert("RGBA")
img2 = insect_image_path.convert("RGBA")
png_img = Image.open(overlay_image_path).convert("RGBA")
# Determine a common height for side-by-side images
min_height = 5400
# Resize images to the common height, maintaining aspect ratio
img1 = img1.resize((1800, 5400), Image.LANCZOS)
img2 = img2.resize((1800, 5400), Image.LANCZOS)
combined_width = img1.width + img2.width
# Create a blank canvas for the combined image
combined_image = Image.new(
"RGBA", (combined_width, min_height), (0, 0, 0, 0)
) # Transparent background
# Paste images side by side
combined_image.paste(img1, (0, 0))
combined_image.paste(img2, (img1.width, 0))
# Resize PNG to fit within the combined image dimensions if necessary, or to a desired size
# For simplicity, let's resize the PNG to a quarter of the combined image's width, maintaining aspect ratio
target_png_width = combined_image.width
png_img = png_img.resize((combined_image.width, min_height), Image.LANCZOS)
# Overlay the PNG image
combined_image.paste(png_img, png_position, png_img)
return combined_image
with gr.Blocks() as demo:
gr.Markdown("# Find my Butterfly 🦋 Trouver mon papillon 🦋")
with gr.Row():
with gr.Column(scale=1):
inputs = gr.Image(width=288, height=384)
btn = gr.Button("🦋")
description = gr.Markdown()
with gr.Column(scale=2):
outputs = gr.Image(format="jpg")
gr.Markdown("### Image Examples")
gr.Examples(
examples=["elton.jpg", "ken.jpg", "gaga.jpg", "taylor.jpg"],
inputs=inputs,
outputs=outputs,
fn=query,
cache_examples=True,
)
btn.click(query, inputs, outputs)
demo.launch()
|