Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
1f7a9ce
1
Parent(s):
69c66b9
delete
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +0 -37
- README.md +0 -10
- app.py +0 -24
- blend_processor.py +0 -262
- color_match_processor.py +0 -210
- cube_luts/35mm Portrait.cube +0 -0
- cube_luts/Agfa Optima.cube +0 -0
- cube_luts/Agfa Ultra 100.cube +0 -0
- cube_luts/Agfa Ultra.cube +0 -0
- cube_luts/Agfa Vista.cube +0 -0
- cube_luts/Bold Film.cube +0 -0
- cube_luts/Cali Vibes.cube +0 -0
- cube_luts/City Vista.cube +0 -0
- cube_luts/Classic Chrome.cube +0 -0
- cube_luts/Coastal Film.cube +0 -0
- cube_luts/Color 400.cube +0 -0
- cube_luts/Color Film.cube +0 -0
- cube_luts/Desert Heat.cube +0 -0
- cube_luts/Elite Chrome.cube +0 -0
- cube_luts/Fashion Film.cube +0 -0
- cube_luts/Fast Film.cube +0 -0
- cube_luts/Film Fade.cube +0 -0
- cube_luts/Filmmaker.cube +0 -0
- cube_luts/Flashback.cube +0 -0
- cube_luts/Free Spirit.cube +0 -0
- cube_luts/Fuji Astia.cube +0 -0
- cube_luts/Fuji Provia.cube +0 -0
- cube_luts/Gold 200.cube +0 -0
- cube_luts/Golden Light.cube +0 -0
- cube_luts/Good Vibes.cube +0 -0
- cube_luts/Kodachrome.cube +0 -0
- cube_luts/Kodacrome 64.cube +0 -0
- cube_luts/Kyoto.cube +0 -0
- cube_luts/Lomography.cube +0 -0
- cube_luts/Moody Blue.cube +0 -0
- cube_luts/Moody Film.cube +0 -0
- cube_luts/Moody Grade.cube +0 -0
- cube_luts/Moody Stock.cube +0 -0
- cube_luts/Nomad.cube +0 -0
- cube_luts/Pastel Morning.cube +0 -0
- cube_luts/Pastry.cube +0 -0
- cube_luts/Polaroid Color.cube +0 -0
- cube_luts/Portra 800.cube +0 -0
- cube_luts/Presetpro Color Film.cube +0 -0
- cube_luts/Rich Tones.cube +0 -0
- cube_luts/Santorini.cube +0 -0
- cube_luts/Top Gun.cube +0 -0
- cube_luts/Velvia 100.cube +0 -0
- cube_luts/Warm Tones.cube +0 -0
- histogram_processor.py +0 -119
.gitattributes
DELETED
|
@@ -1,37 +0,0 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
assets/teaser.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
-
demos/example_003.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Image Procesing
|
| 3 |
-
emoji: 😻
|
| 4 |
-
colorFrom: purple
|
| 5 |
-
colorTo: red
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 5.3.0
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
DELETED
|
@@ -1,24 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from pixelize_processor import create_pixelize_tab
|
| 3 |
-
from lut_processor import create_lut_tab
|
| 4 |
-
from sharpen_processor import create_sharpen_tab
|
| 5 |
-
from color_match_processor import create_color_match_tab
|
| 6 |
-
from simple_effects_processor import create_effects_tab
|
| 7 |
-
from histogram_processor import create_histogram_tab
|
| 8 |
-
from blend_processor import create_blend_tab
|
| 9 |
-
from matte_processor import create_matte_tab
|
| 10 |
-
|
| 11 |
-
with gr.Blocks(title="Image Processing Suite") as demo:
|
| 12 |
-
gr.Markdown("# Image Processing Suite")
|
| 13 |
-
|
| 14 |
-
create_pixelize_tab()
|
| 15 |
-
create_lut_tab()
|
| 16 |
-
create_sharpen_tab()
|
| 17 |
-
create_color_match_tab()
|
| 18 |
-
create_effects_tab()
|
| 19 |
-
create_histogram_tab()
|
| 20 |
-
create_blend_tab()
|
| 21 |
-
create_matte_tab() # Add this line
|
| 22 |
-
|
| 23 |
-
if __name__ == "__main__":
|
| 24 |
-
demo.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
blend_processor.py
DELETED
|
@@ -1,262 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import numpy as np
|
| 3 |
-
import cv2
|
| 4 |
-
from PIL import Image, ImageChops
|
| 5 |
-
from skimage import img_as_float, img_as_ubyte
|
| 6 |
-
import copy
|
| 7 |
-
from typing import Optional, Union
|
| 8 |
-
|
| 9 |
-
def cv22ski(cv2_image: np.ndarray) -> np.ndarray:
|
| 10 |
-
"""Convert CV2 image to skimage float format"""
|
| 11 |
-
return img_as_float(cv2_image)
|
| 12 |
-
|
| 13 |
-
def ski2cv2(ski: np.ndarray) -> np.ndarray:
|
| 14 |
-
"""Convert skimage float format to CV2 image"""
|
| 15 |
-
return img_as_ubyte(ski)
|
| 16 |
-
|
| 17 |
-
def cv22pil(cv2_img: np.ndarray) -> Image.Image:
|
| 18 |
-
"""Convert CV2 image to PIL Image"""
|
| 19 |
-
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
|
| 20 |
-
return Image.fromarray(cv2_img)
|
| 21 |
-
|
| 22 |
-
def pil2cv2(pil_img: Image.Image) -> np.ndarray:
|
| 23 |
-
"""Convert PIL Image to CV2 image"""
|
| 24 |
-
np_img_array = np.asarray(pil_img)
|
| 25 |
-
return cv2.cvtColor(np_img_array, cv2.COLOR_RGB2BGR)
|
| 26 |
-
|
| 27 |
-
def blend_color_burn(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 28 |
-
"""Apply color burn blend mode"""
|
| 29 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 30 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 31 |
-
img = 1 - (1 - img_2) / (img_1 + 0.001)
|
| 32 |
-
mask_1 = img < 0
|
| 33 |
-
mask_2 = img > 1
|
| 34 |
-
img = img * (1 - mask_1)
|
| 35 |
-
img = img * (1 - mask_2) + mask_2
|
| 36 |
-
return cv22pil(ski2cv2(img))
|
| 37 |
-
|
| 38 |
-
def blend_color_dodge(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 39 |
-
"""Apply color dodge blend mode"""
|
| 40 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 41 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 42 |
-
img = img_2 / (1.0 - img_1 + 0.001)
|
| 43 |
-
mask_2 = img > 1
|
| 44 |
-
img = img * (1 - mask_2) + mask_2
|
| 45 |
-
return cv22pil(ski2cv2(img))
|
| 46 |
-
|
| 47 |
-
def blend_linear_burn(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 48 |
-
"""Apply linear burn blend mode"""
|
| 49 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 50 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 51 |
-
img = img_1 + img_2 - 1
|
| 52 |
-
mask_1 = img < 0
|
| 53 |
-
img = img * (1 - mask_1)
|
| 54 |
-
return cv22pil(ski2cv2(img))
|
| 55 |
-
|
| 56 |
-
def blend_linear_dodge(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 57 |
-
"""Apply linear dodge blend mode"""
|
| 58 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 59 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 60 |
-
img = img_1 + img_2
|
| 61 |
-
mask_2 = img > 1
|
| 62 |
-
img = img * (1 - mask_2) + mask_2
|
| 63 |
-
return cv22pil(ski2cv2(img))
|
| 64 |
-
|
| 65 |
-
def blend_lighten(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 66 |
-
"""Apply lighten blend mode"""
|
| 67 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 68 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 69 |
-
img = img_1 - img_2
|
| 70 |
-
mask = img > 0
|
| 71 |
-
img = img_1 * mask + img_2 * (1 - mask)
|
| 72 |
-
return cv22pil(ski2cv2(img))
|
| 73 |
-
|
| 74 |
-
def blend_dark(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 75 |
-
"""Apply darken blend mode"""
|
| 76 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 77 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 78 |
-
img = img_1 - img_2
|
| 79 |
-
mask = img < 0
|
| 80 |
-
img = img_1 * mask + img_2 * (1 - mask)
|
| 81 |
-
return cv22pil(ski2cv2(img))
|
| 82 |
-
|
| 83 |
-
def blend_screen(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 84 |
-
"""Apply screen blend mode"""
|
| 85 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 86 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 87 |
-
img = 1 - (1 - img_1) * (1 - img_2)
|
| 88 |
-
return cv22pil(ski2cv2(img))
|
| 89 |
-
|
| 90 |
-
def blend_overlay(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 91 |
-
"""Apply overlay blend mode"""
|
| 92 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 93 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 94 |
-
mask = img_2 < 0.5
|
| 95 |
-
img = 2 * img_1 * img_2 * mask + (1 - mask) * (1 - 2 * (1 - img_1) * (1 - img_2))
|
| 96 |
-
return cv22pil(ski2cv2(img))
|
| 97 |
-
|
| 98 |
-
def blend_soft_light(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 99 |
-
"""Apply soft light blend mode"""
|
| 100 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 101 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 102 |
-
mask = img_1 < 0.5
|
| 103 |
-
T1 = (2 * img_1 - 1) * (img_2 - img_2 * img_2) + img_2
|
| 104 |
-
T2 = (2 * img_1 - 1) * (np.sqrt(img_2) - img_2) + img_2
|
| 105 |
-
img = T1 * mask + T2 * (1 - mask)
|
| 106 |
-
return cv22pil(ski2cv2(img))
|
| 107 |
-
|
| 108 |
-
def blend_hard_light(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 109 |
-
"""Apply hard light blend mode"""
|
| 110 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 111 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 112 |
-
mask = img_1 < 0.5
|
| 113 |
-
T1 = 2 * img_1 * img_2
|
| 114 |
-
T2 = 1 - 2 * (1 - img_1) * (1 - img_2)
|
| 115 |
-
img = T1 * mask + T2 * (1 - mask)
|
| 116 |
-
return cv22pil(ski2cv2(img))
|
| 117 |
-
|
| 118 |
-
def blend_vivid_light(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 119 |
-
"""Apply vivid light blend mode"""
|
| 120 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 121 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 122 |
-
mask = img_1 < 0.5
|
| 123 |
-
T1 = 1 - (1 - img_2) / (2 * img_1 + 0.001)
|
| 124 |
-
T2 = img_2 / (2 * (1 - img_1) + 0.001)
|
| 125 |
-
mask_1 = T1 < 0
|
| 126 |
-
mask_2 = T2 > 1
|
| 127 |
-
T1 = T1 * (1 - mask_1)
|
| 128 |
-
T2 = T2 * (1 - mask_2) + mask_2
|
| 129 |
-
img = T1 * mask + T2 * (1 - mask)
|
| 130 |
-
return cv22pil(ski2cv2(img))
|
| 131 |
-
|
| 132 |
-
def blend_pin_light(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 133 |
-
"""Apply pin light blend mode"""
|
| 134 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 135 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 136 |
-
mask_1 = img_2 < (img_1 * 2 - 1)
|
| 137 |
-
mask_2 = img_2 > 2 * img_1
|
| 138 |
-
T1 = 2 * img_1 - 1
|
| 139 |
-
T2 = img_2
|
| 140 |
-
T3 = 2 * img_1
|
| 141 |
-
img = T1 * mask_1 + T2 * (1 - mask_1) * (1 - mask_2) + T3 * mask_2
|
| 142 |
-
return cv22pil(ski2cv2(img))
|
| 143 |
-
|
| 144 |
-
def blend_linear_light(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 145 |
-
"""Apply linear light blend mode"""
|
| 146 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 147 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 148 |
-
img = img_2 + img_1 * 2 - 1
|
| 149 |
-
mask_1 = img < 0
|
| 150 |
-
mask_2 = img > 1
|
| 151 |
-
img = img * (1 - mask_1)
|
| 152 |
-
img = img * (1 - mask_2) + mask_2
|
| 153 |
-
return cv22pil(ski2cv2(img))
|
| 154 |
-
|
| 155 |
-
def blend_hard_mix(background_image: Image.Image, layer_image: Image.Image) -> Image.Image:
|
| 156 |
-
"""Apply hard mix blend mode"""
|
| 157 |
-
img_1 = cv22ski(pil2cv2(background_image))
|
| 158 |
-
img_2 = cv22ski(pil2cv2(layer_image))
|
| 159 |
-
img = img_1 + img_2
|
| 160 |
-
mask = img_1 + img_2 > 1
|
| 161 |
-
img = img * (1 - mask) + mask
|
| 162 |
-
img = img * mask
|
| 163 |
-
return cv22pil(ski2cv2(img))
|
| 164 |
-
|
| 165 |
-
def chop_image(background_image: Image.Image, layer_image: Image.Image, blend_mode: str, opacity: int) -> Image.Image:
|
| 166 |
-
"""Apply blend mode and opacity to images"""
|
| 167 |
-
ret_image = background_image
|
| 168 |
-
|
| 169 |
-
blend_functions = {
|
| 170 |
-
'normal': lambda: copy.deepcopy(layer_image),
|
| 171 |
-
'multiply': lambda: ImageChops.multiply(background_image, layer_image),
|
| 172 |
-
'screen': lambda: ImageChops.screen(background_image, layer_image),
|
| 173 |
-
'add': lambda: ImageChops.add(background_image, layer_image, 1, 0),
|
| 174 |
-
'subtract': lambda: ImageChops.subtract(background_image, layer_image, 1, 0),
|
| 175 |
-
'difference': lambda: ImageChops.difference(background_image, layer_image),
|
| 176 |
-
'darker': lambda: ImageChops.darker(background_image, layer_image),
|
| 177 |
-
'lighter': lambda: ImageChops.lighter(background_image, layer_image),
|
| 178 |
-
'color_burn': lambda: blend_color_burn(background_image, layer_image),
|
| 179 |
-
'color_dodge': lambda: blend_color_dodge(background_image, layer_image),
|
| 180 |
-
'linear_burn': lambda: blend_linear_burn(background_image, layer_image),
|
| 181 |
-
'linear_dodge': lambda: blend_linear_dodge(background_image, layer_image),
|
| 182 |
-
'overlay': lambda: blend_overlay(background_image, layer_image),
|
| 183 |
-
'soft_light': lambda: blend_soft_light(background_image, layer_image),
|
| 184 |
-
'hard_light': lambda: blend_hard_light(background_image, layer_image),
|
| 185 |
-
'vivid_light': lambda: blend_vivid_light(background_image, layer_image),
|
| 186 |
-
'pin_light': lambda: blend_pin_light(background_image, layer_image),
|
| 187 |
-
'linear_light': lambda: blend_linear_light(background_image, layer_image),
|
| 188 |
-
'hard_mix': lambda: blend_hard_mix(background_image, layer_image)
|
| 189 |
-
}
|
| 190 |
-
|
| 191 |
-
if blend_mode in blend_functions:
|
| 192 |
-
ret_image = blend_functions[blend_mode]()
|
| 193 |
-
|
| 194 |
-
# Apply opacity
|
| 195 |
-
if opacity == 0:
|
| 196 |
-
ret_image = background_image
|
| 197 |
-
elif opacity < 100:
|
| 198 |
-
alpha = 1.0 - float(opacity) / 100
|
| 199 |
-
ret_image = Image.blend(ret_image, background_image, alpha)
|
| 200 |
-
|
| 201 |
-
return ret_image
|
| 202 |
-
|
| 203 |
-
def process_images(background: Optional[np.ndarray],
|
| 204 |
-
layer: Optional[np.ndarray],
|
| 205 |
-
blend_mode: str,
|
| 206 |
-
opacity: float) -> Optional[np.ndarray]:
|
| 207 |
-
"""Process images with selected blend mode and opacity"""
|
| 208 |
-
if background is None or layer is None:
|
| 209 |
-
return None
|
| 210 |
-
|
| 211 |
-
# Convert numpy arrays to PIL Images
|
| 212 |
-
background_pil = Image.fromarray(background)
|
| 213 |
-
layer_pil = Image.fromarray(layer)
|
| 214 |
-
|
| 215 |
-
# Ensure images are in RGB mode
|
| 216 |
-
background_pil = background_pil.convert('RGB')
|
| 217 |
-
layer_pil = layer_pil.convert('RGB')
|
| 218 |
-
|
| 219 |
-
# Apply blend mode
|
| 220 |
-
result = chop_image(background_pil, layer_pil, blend_mode, int(opacity * 100))
|
| 221 |
-
|
| 222 |
-
# Convert back to numpy array
|
| 223 |
-
return np.array(result)
|
| 224 |
-
|
| 225 |
-
def create_blend_tab():
|
| 226 |
-
"""Create the blend modes tab interface"""
|
| 227 |
-
with gr.Tab("Blend Modes"):
|
| 228 |
-
with gr.Row():
|
| 229 |
-
with gr.Column():
|
| 230 |
-
background_image = gr.Image(label="Background Image", height=256)
|
| 231 |
-
layer_image = gr.Image(label="Layer Image", height=256)
|
| 232 |
-
|
| 233 |
-
blend_mode = gr.Dropdown(
|
| 234 |
-
choices=[
|
| 235 |
-
"normal", "multiply", "screen", "overlay",
|
| 236 |
-
"soft_light", "hard_light", "color_burn", "color_dodge",
|
| 237 |
-
"linear_burn", "linear_dodge", "vivid_light", "linear_light",
|
| 238 |
-
"pin_light", "hard_mix", "difference", "add", "subtract",
|
| 239 |
-
"darker", "lighter"
|
| 240 |
-
],
|
| 241 |
-
value="normal",
|
| 242 |
-
label="Blend Mode"
|
| 243 |
-
)
|
| 244 |
-
|
| 245 |
-
opacity = gr.Slider(
|
| 246 |
-
minimum=0.0,
|
| 247 |
-
maximum=1.0,
|
| 248 |
-
value=1.0,
|
| 249 |
-
step=0.01,
|
| 250 |
-
label="Opacity"
|
| 251 |
-
)
|
| 252 |
-
|
| 253 |
-
blend_btn = gr.Button("Apply Blend")
|
| 254 |
-
|
| 255 |
-
with gr.Column():
|
| 256 |
-
output_image = gr.Image(label="Blended Image")
|
| 257 |
-
|
| 258 |
-
blend_btn.click(
|
| 259 |
-
fn=process_images,
|
| 260 |
-
inputs=[background_image, layer_image, blend_mode, opacity],
|
| 261 |
-
outputs=output_image
|
| 262 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
color_match_processor.py
DELETED
|
@@ -1,210 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import torch
|
| 3 |
-
import torch.nn.functional as F
|
| 4 |
-
import kornia
|
| 5 |
-
import numpy as np
|
| 6 |
-
|
| 7 |
-
def compute_mean_std(tensor, mask=None):
|
| 8 |
-
if mask is not None:
|
| 9 |
-
# Apply mask to the tensor
|
| 10 |
-
masked_tensor = tensor * mask
|
| 11 |
-
mask_sum = mask.sum(dim=[2, 3], keepdim=True)
|
| 12 |
-
mask_sum = torch.clamp(mask_sum, min=1e-6)
|
| 13 |
-
|
| 14 |
-
mean = torch.nan_to_num(masked_tensor.sum(dim=[2, 3], keepdim=True) / mask_sum)
|
| 15 |
-
std = torch.sqrt(torch.nan_to_num(((masked_tensor - mean) ** 2 * mask).sum(dim=[2, 3], keepdim=True) / mask_sum))
|
| 16 |
-
else:
|
| 17 |
-
mean = tensor.mean(dim=[2, 3], keepdim=True)
|
| 18 |
-
std = tensor.std(dim=[2, 3], keepdim=True)
|
| 19 |
-
return mean, std
|
| 20 |
-
|
| 21 |
-
def apply_color_match(image, reference, color_space, factor, device='cpu'):
|
| 22 |
-
if image is None or reference is None:
|
| 23 |
-
return None
|
| 24 |
-
|
| 25 |
-
# Convert to torch tensors and normalize
|
| 26 |
-
image = torch.from_numpy(image).float() / 255.0
|
| 27 |
-
reference = torch.from_numpy(reference).float() / 255.0
|
| 28 |
-
|
| 29 |
-
# Add batch dimension and rearrange to BCHW
|
| 30 |
-
image = image.unsqueeze(0).permute(0, 3, 1, 2)
|
| 31 |
-
reference = reference.unsqueeze(0).permute(0, 3, 1, 2)
|
| 32 |
-
|
| 33 |
-
# Convert to target color space
|
| 34 |
-
if color_space == "LAB":
|
| 35 |
-
image_conv = kornia.color.rgb_to_lab(image)
|
| 36 |
-
reference_conv = kornia.color.rgb_to_lab(reference)
|
| 37 |
-
back_conversion = kornia.color.lab_to_rgb
|
| 38 |
-
elif color_space == "YCbCr":
|
| 39 |
-
image_conv = kornia.color.rgb_to_ycbcr(image)
|
| 40 |
-
reference_conv = kornia.color.rgb_to_ycbcr(reference)
|
| 41 |
-
back_conversion = kornia.color.ycbcr_to_rgb
|
| 42 |
-
elif color_space == "LUV":
|
| 43 |
-
image_conv = kornia.color.rgb_to_luv(image)
|
| 44 |
-
reference_conv = kornia.color.rgb_to_luv(reference)
|
| 45 |
-
back_conversion = kornia.color.luv_to_rgb
|
| 46 |
-
elif color_space == "YUV":
|
| 47 |
-
image_conv = kornia.color.rgb_to_yuv(image)
|
| 48 |
-
reference_conv = kornia.color.rgb_to_yuv(reference)
|
| 49 |
-
back_conversion = kornia.color.yuv_to_rgb
|
| 50 |
-
elif color_space == "XYZ":
|
| 51 |
-
image_conv = kornia.color.rgb_to_xyz(image)
|
| 52 |
-
reference_conv = kornia.color.rgb_to_xyz(reference)
|
| 53 |
-
back_conversion = kornia.color.xyz_to_rgb
|
| 54 |
-
else: # RGB
|
| 55 |
-
image_conv = image
|
| 56 |
-
reference_conv = reference
|
| 57 |
-
back_conversion = lambda x: x
|
| 58 |
-
|
| 59 |
-
# Compute statistics
|
| 60 |
-
reference_mean, reference_std = compute_mean_std(reference_conv)
|
| 61 |
-
image_mean, image_std = compute_mean_std(image_conv)
|
| 62 |
-
|
| 63 |
-
# Apply color matching
|
| 64 |
-
matched = torch.nan_to_num((image_conv - image_mean) / image_std) * reference_std + reference_mean
|
| 65 |
-
matched = factor * matched + (1 - factor) * image_conv
|
| 66 |
-
|
| 67 |
-
# Convert back to RGB
|
| 68 |
-
matched = back_conversion(matched)
|
| 69 |
-
|
| 70 |
-
# Convert back to HWC format and to uint8
|
| 71 |
-
output = matched.squeeze(0).permute(1, 2, 0)
|
| 72 |
-
output = (output.clamp(0, 1).numpy() * 255).astype(np.uint8)
|
| 73 |
-
|
| 74 |
-
return output
|
| 75 |
-
|
| 76 |
-
def analyze_color_statistics(image):
|
| 77 |
-
l, a, b = kornia.color.rgb_to_lab(image).chunk(3, dim=1)
|
| 78 |
-
mean_l = l.mean()
|
| 79 |
-
std_l = l.std()
|
| 80 |
-
mean_a = a.mean()
|
| 81 |
-
mean_b = b.mean()
|
| 82 |
-
std_ab = torch.sqrt(a.var() + b.var())
|
| 83 |
-
return mean_l, std_l, mean_a, mean_b, std_ab
|
| 84 |
-
|
| 85 |
-
def apply_adobe_color_match(image, reference, color_space, luminance_factor, color_intensity_factor, fade_factor, neutralization_factor):
|
| 86 |
-
if image is None or reference is None:
|
| 87 |
-
return None
|
| 88 |
-
|
| 89 |
-
# Convert to torch tensors and normalize
|
| 90 |
-
image = torch.from_numpy(image).float() / 255.0
|
| 91 |
-
reference = torch.from_numpy(reference).float() / 255.0
|
| 92 |
-
|
| 93 |
-
# Add batch dimension and rearrange to BCHW
|
| 94 |
-
image = image.unsqueeze(0).permute(0, 3, 1, 2)
|
| 95 |
-
reference = reference.unsqueeze(0).permute(0, 3, 1, 2)
|
| 96 |
-
|
| 97 |
-
# Analyze color statistics
|
| 98 |
-
source_stats = analyze_color_statistics(reference)
|
| 99 |
-
dest_stats = analyze_color_statistics(image)
|
| 100 |
-
|
| 101 |
-
# Convert to LAB
|
| 102 |
-
l, a, b = kornia.color.rgb_to_lab(image).chunk(3, dim=1)
|
| 103 |
-
|
| 104 |
-
# Unpack statistics
|
| 105 |
-
src_mean_l, src_std_l, src_mean_a, src_mean_b, src_std_ab = source_stats
|
| 106 |
-
dest_mean_l, dest_std_l, dest_mean_a, dest_mean_b, dest_std_ab = dest_stats
|
| 107 |
-
|
| 108 |
-
# Apply transformations
|
| 109 |
-
l_new = (l - dest_mean_l) * (src_std_l / dest_std_l) * luminance_factor + src_mean_l
|
| 110 |
-
|
| 111 |
-
# Neutralize color cast
|
| 112 |
-
a = a - neutralization_factor * dest_mean_a
|
| 113 |
-
b = b - neutralization_factor * dest_mean_b
|
| 114 |
-
|
| 115 |
-
# Adjust color intensity
|
| 116 |
-
a_new = a * (src_std_ab / dest_std_ab) * color_intensity_factor
|
| 117 |
-
b_new = b * (src_std_ab / dest_std_ab) * color_intensity_factor
|
| 118 |
-
|
| 119 |
-
# Combine channels
|
| 120 |
-
lab_new = torch.cat([l_new, a_new, b_new], dim=1)
|
| 121 |
-
|
| 122 |
-
# Convert back to RGB
|
| 123 |
-
rgb_new = kornia.color.lab_to_rgb(lab_new)
|
| 124 |
-
|
| 125 |
-
# Apply fade factor
|
| 126 |
-
result = fade_factor * rgb_new + (1 - fade_factor) * image
|
| 127 |
-
|
| 128 |
-
# Convert back to HWC format and to uint8
|
| 129 |
-
output = result.squeeze(0).permute(1, 2, 0)
|
| 130 |
-
output = (output.clamp(0, 1).numpy() * 255).astype(np.uint8)
|
| 131 |
-
|
| 132 |
-
return output
|
| 133 |
-
|
| 134 |
-
def create_color_match_tab():
|
| 135 |
-
with gr.Tab("Color Matching"):
|
| 136 |
-
|
| 137 |
-
with gr.Row():
|
| 138 |
-
with gr.Column():
|
| 139 |
-
input_image = gr.Image(label="Input Image", height=256)
|
| 140 |
-
reference_image = gr.Image(label="Reference Image", height=256)
|
| 141 |
-
|
| 142 |
-
with gr.Tabs():
|
| 143 |
-
with gr.Tab("Standard"):
|
| 144 |
-
color_space = gr.Dropdown(
|
| 145 |
-
choices=["LAB", "YCbCr", "RGB", "LUV", "YUV", "XYZ"],
|
| 146 |
-
value="LAB",
|
| 147 |
-
label="Color Space"
|
| 148 |
-
)
|
| 149 |
-
factor = gr.Slider(
|
| 150 |
-
minimum=0.0,
|
| 151 |
-
maximum=1.0,
|
| 152 |
-
value=1.0,
|
| 153 |
-
step=0.05,
|
| 154 |
-
label="Factor"
|
| 155 |
-
)
|
| 156 |
-
standard_btn = gr.Button("Apply Standard Color Match")
|
| 157 |
-
|
| 158 |
-
with gr.Tab("Adobe Style"):
|
| 159 |
-
adobe_color_space = gr.Dropdown(
|
| 160 |
-
choices=["RGB", "LAB"],
|
| 161 |
-
value="LAB",
|
| 162 |
-
label="Color Space"
|
| 163 |
-
)
|
| 164 |
-
luminance_factor = gr.Slider(
|
| 165 |
-
minimum=0.0,
|
| 166 |
-
maximum=2.0,
|
| 167 |
-
value=1.0,
|
| 168 |
-
step=0.05,
|
| 169 |
-
label="Luminance Factor"
|
| 170 |
-
)
|
| 171 |
-
color_intensity_factor = gr.Slider(
|
| 172 |
-
minimum=0.0,
|
| 173 |
-
maximum=2.0,
|
| 174 |
-
value=1.0,
|
| 175 |
-
step=0.05,
|
| 176 |
-
label="Color Intensity Factor"
|
| 177 |
-
)
|
| 178 |
-
fade_factor = gr.Slider(
|
| 179 |
-
minimum=0.0,
|
| 180 |
-
maximum=1.0,
|
| 181 |
-
value=1.0,
|
| 182 |
-
step=0.05,
|
| 183 |
-
label="Fade Factor"
|
| 184 |
-
)
|
| 185 |
-
neutralization_factor = gr.Slider(
|
| 186 |
-
minimum=0.0,
|
| 187 |
-
maximum=1.0,
|
| 188 |
-
value=0.0,
|
| 189 |
-
step=0.05,
|
| 190 |
-
label="Neutralization Factor"
|
| 191 |
-
)
|
| 192 |
-
adobe_btn = gr.Button("Apply Adobe Style Color Match")
|
| 193 |
-
|
| 194 |
-
with gr.Column():
|
| 195 |
-
output_image = gr.Image(label="Color Matched Image")
|
| 196 |
-
|
| 197 |
-
standard_btn.click(
|
| 198 |
-
fn=apply_color_match,
|
| 199 |
-
inputs=[input_image, reference_image, color_space, factor],
|
| 200 |
-
outputs=output_image
|
| 201 |
-
)
|
| 202 |
-
|
| 203 |
-
adobe_btn.click(
|
| 204 |
-
fn=apply_adobe_color_match,
|
| 205 |
-
inputs=[
|
| 206 |
-
input_image, reference_image, adobe_color_space,
|
| 207 |
-
luminance_factor, color_intensity_factor, fade_factor, neutralization_factor
|
| 208 |
-
],
|
| 209 |
-
outputs=output_image
|
| 210 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cube_luts/35mm Portrait.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Agfa Optima.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Agfa Ultra 100.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Agfa Ultra.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Agfa Vista.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Bold Film.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Cali Vibes.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/City Vista.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Classic Chrome.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Coastal Film.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Color 400.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Color Film.cube
DELETED
|
Binary file (542 Bytes)
|
|
|
cube_luts/Desert Heat.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Elite Chrome.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Fashion Film.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Fast Film.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Film Fade.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Filmmaker.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Flashback.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Free Spirit.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Fuji Astia.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Fuji Provia.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Gold 200.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Golden Light.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Good Vibes.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Kodachrome.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Kodacrome 64.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Kyoto.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Lomography.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Moody Blue.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Moody Film.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Moody Grade.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Moody Stock.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Nomad.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Pastel Morning.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Pastry.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Polaroid Color.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Portra 800.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Presetpro Color Film.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Rich Tones.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Santorini.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Top Gun.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Velvia 100.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cube_luts/Warm Tones.cube
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
histogram_processor.py
DELETED
|
@@ -1,119 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import torch
|
| 3 |
-
import torch.nn as nn
|
| 4 |
-
import torch.nn.functional as F
|
| 5 |
-
import numpy as np
|
| 6 |
-
from skimage.exposure import match_histograms
|
| 7 |
-
|
| 8 |
-
class HistogramMatcher(nn.Module):
|
| 9 |
-
def __init__(self, differentiable=False):
|
| 10 |
-
super(HistogramMatcher, self).__init__()
|
| 11 |
-
self.differentiable = differentiable
|
| 12 |
-
|
| 13 |
-
def forward(self, dst, ref):
|
| 14 |
-
B, C, H, W = dst.size()
|
| 15 |
-
hist_dst = self.cal_hist(dst)
|
| 16 |
-
hist_ref = self.cal_hist(ref)
|
| 17 |
-
tables = self.cal_trans_batch(hist_dst, hist_ref)
|
| 18 |
-
|
| 19 |
-
rst = dst.clone()
|
| 20 |
-
for b in range(B):
|
| 21 |
-
for c in range(C):
|
| 22 |
-
rst[b,c] = tables[b*c, (dst[b,c] * 255).long()]
|
| 23 |
-
|
| 24 |
-
return rst / 255.
|
| 25 |
-
|
| 26 |
-
def cal_hist(self, img):
|
| 27 |
-
B, C, H, W = img.size()
|
| 28 |
-
if self.differentiable:
|
| 29 |
-
hists = self.soft_histc_batch(img * 255, bins=256, min=0, max=256, sigma=75)
|
| 30 |
-
else:
|
| 31 |
-
hists = torch.stack([torch.histc(img[b,c] * 255, bins=256, min=0, max=255)
|
| 32 |
-
for b in range(B) for c in range(C)])
|
| 33 |
-
|
| 34 |
-
hists = hists.float()
|
| 35 |
-
hists = F.normalize(hists, p=1)
|
| 36 |
-
bc, n = hists.size()
|
| 37 |
-
triu = torch.ones(bc, n, n, device=hists.device).triu()
|
| 38 |
-
hists = torch.bmm(hists[:,None,:], triu)[:,0,:]
|
| 39 |
-
return hists
|
| 40 |
-
|
| 41 |
-
def soft_histc_batch(self, x, bins=256, min=0, max=256, sigma=75):
|
| 42 |
-
B, C, H, W = x.size()
|
| 43 |
-
x = x.view(B*C, -1)
|
| 44 |
-
delta = float(max - min) / float(bins)
|
| 45 |
-
centers = float(min) + delta * (torch.arange(bins, device=x.device) + 0.5)
|
| 46 |
-
|
| 47 |
-
x = torch.unsqueeze(x, 1)
|
| 48 |
-
centers = centers[None,:,None]
|
| 49 |
-
x = x - centers
|
| 50 |
-
x = torch.sigmoid(sigma * (x + delta/2)) - torch.sigmoid(sigma * (x - delta/2))
|
| 51 |
-
x = x.sum(dim=2)
|
| 52 |
-
return x
|
| 53 |
-
|
| 54 |
-
def cal_trans_batch(self, hist_dst, hist_ref):
|
| 55 |
-
hist_dst = hist_dst[:,None,:].repeat(1,256,1)
|
| 56 |
-
hist_ref = hist_ref[:,:,None].repeat(1,1,256)
|
| 57 |
-
table = hist_dst - hist_ref
|
| 58 |
-
table = torch.where(table>=0, 1., 0.)
|
| 59 |
-
table = torch.sum(table, dim=1) - 1
|
| 60 |
-
table = torch.clamp(table, min=0, max=255)
|
| 61 |
-
return table
|
| 62 |
-
|
| 63 |
-
def apply_histogram_matching(image, reference, factor):
|
| 64 |
-
if image is None or reference is None:
|
| 65 |
-
return None
|
| 66 |
-
|
| 67 |
-
# Convert to torch tensors and normalize
|
| 68 |
-
image = torch.from_numpy(image).float() / 255.0
|
| 69 |
-
reference = torch.from_numpy(reference).float() / 255.0
|
| 70 |
-
|
| 71 |
-
# Add batch dimension and rearrange to BCHW
|
| 72 |
-
image = image.unsqueeze(0).permute(0, 3, 1, 2)
|
| 73 |
-
reference = reference.unsqueeze(0).permute(0, 3, 1, 2)
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
matched = match_histograms(
|
| 78 |
-
image.permute(0, 2, 3, 1).numpy(),
|
| 79 |
-
reference.permute(0, 2, 3, 1).numpy(),
|
| 80 |
-
channel_axis=3
|
| 81 |
-
)
|
| 82 |
-
matched = torch.from_numpy(matched).permute(0, 3, 1, 2)
|
| 83 |
-
|
| 84 |
-
# Apply factor blending
|
| 85 |
-
result = factor * matched + (1 - factor) * image
|
| 86 |
-
|
| 87 |
-
# Convert back to HWC format and to uint8
|
| 88 |
-
output = result.squeeze(0).permute(1, 2, 0)
|
| 89 |
-
output = (output.clamp(0, 1).numpy() * 255).astype(np.uint8)
|
| 90 |
-
|
| 91 |
-
return output
|
| 92 |
-
|
| 93 |
-
def create_histogram_tab():
|
| 94 |
-
with gr.Tab("Histogram Matching"):
|
| 95 |
-
|
| 96 |
-
with gr.Row():
|
| 97 |
-
with gr.Column():
|
| 98 |
-
input_image = gr.Image(label="Input Image", height=256)
|
| 99 |
-
reference_image = gr.Image(label="Reference Image", height=256)
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
factor = gr.Slider(
|
| 103 |
-
minimum=0.0,
|
| 104 |
-
maximum=1.0,
|
| 105 |
-
value=0.5,
|
| 106 |
-
step=0.05,
|
| 107 |
-
label="Blend Factor"
|
| 108 |
-
)
|
| 109 |
-
|
| 110 |
-
match_btn = gr.Button("Apply Histogram Matching")
|
| 111 |
-
|
| 112 |
-
with gr.Column():
|
| 113 |
-
output_image = gr.Image(label="Matched Image")
|
| 114 |
-
|
| 115 |
-
match_btn.click(
|
| 116 |
-
fn=apply_histogram_matching,
|
| 117 |
-
inputs=[input_image, reference_image, factor],
|
| 118 |
-
outputs=output_image
|
| 119 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|