Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -20,7 +20,6 @@ oneFormer_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/on
|
|
| 20 |
# classification = pipeline("image-classification", model="google/vit-base-patch16-224")
|
| 21 |
# upscaling_client = InferenceClient(model="stabilityai/stable-diffusion-x4-upscaler")
|
| 22 |
# inpainting_client = InferenceClient(model="stabilityai/stable-diffusion-inpainting")
|
| 23 |
-
|
| 24 |
# Функции для обработки изображений
|
| 25 |
def segment_image(image):
|
| 26 |
image = Image.fromarray(image)
|
|
@@ -93,7 +92,7 @@ def merge_segments_by_labels(gallery_images, labels_input):
|
|
| 93 |
return gallery_images
|
| 94 |
|
| 95 |
|
| 96 |
-
def
|
| 97 |
try:
|
| 98 |
client = Client("tencent/Hunyuan3D-2", headers={"X-IP-Token": request.headers['x-ip-token']})
|
| 99 |
print("tencent/Hunyuan3D-2 Ip token")
|
|
@@ -102,7 +101,7 @@ def set_hunyuan_client(request: gr.Request):
|
|
| 102 |
print("tencent/Hunyuan3D-2 no token")
|
| 103 |
return Client("tencent/Hunyuan3D-2")
|
| 104 |
|
| 105 |
-
def
|
| 106 |
try:
|
| 107 |
client = Client("facebook/VFusion3D", headers={"X-IP-Token": request.headers['x-ip-token']})
|
| 108 |
print("facebook/VFusion3D Ip token")
|
|
@@ -111,9 +110,10 @@ def set_vFusion_client(request: gr.Request):
|
|
| 111 |
print("facebook/VFusion3D no token")
|
| 112 |
return Client("facebook/VFusion3D")
|
| 113 |
|
| 114 |
-
def generate_3d_model(
|
| 115 |
for i, (image_path, label) in enumerate(segment_output):
|
| 116 |
if label == segment_name:
|
|
|
|
| 117 |
result = client.predict(
|
| 118 |
caption="",
|
| 119 |
image=handle_file(image_path),
|
|
@@ -127,11 +127,10 @@ def generate_3d_model(client, segment_output, segment_name):
|
|
| 127 |
print(result)
|
| 128 |
return result[0]
|
| 129 |
|
| 130 |
-
def generate_3d_model_texture(
|
| 131 |
for i, (image_path, label) in enumerate(segment_output):
|
| 132 |
if label == segment_name:
|
| 133 |
-
client =
|
| 134 |
-
print("tencent/Hunyuan3D-2 new client token")
|
| 135 |
result = client.predict(
|
| 136 |
caption="",
|
| 137 |
image=handle_file(image_path),
|
|
@@ -145,9 +144,10 @@ def generate_3d_model_texture(client, segment_output, segment_name, request: gr.
|
|
| 145 |
print(result)
|
| 146 |
return result[1]
|
| 147 |
|
| 148 |
-
def generate_3d_model2(
|
| 149 |
for i, (image_path, label) in enumerate(segment_output):
|
| 150 |
if label == segment_name:
|
|
|
|
| 151 |
result = client.predict(
|
| 152 |
image=handle_file(image_path),
|
| 153 |
api_name="/step_1_generate_obj"
|
|
@@ -172,10 +172,11 @@ def generate_3d_model2(client, segment_output, segment_name):
|
|
| 172 |
# return inpainted
|
| 173 |
|
| 174 |
|
| 175 |
-
with gr.Blocks() as demo:
|
| 176 |
-
hunyuan_client = gr.State()
|
| 177 |
-
vFusion_client = gr.State()
|
| 178 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
gr.Markdown("# Анализ и редактирование помещений")
|
| 180 |
|
| 181 |
with gr.Tab("Сканирование"):
|
|
@@ -197,9 +198,9 @@ with gr.Blocks() as demo:
|
|
| 197 |
with gr.Column(scale=5):
|
| 198 |
trellis_output = gr.Model3D(label="3D Model")
|
| 199 |
|
| 200 |
-
hunyuan_button.click(generate_3d_model, inputs=[
|
| 201 |
-
hunyuan_button_texture.click(generate_3d_model_texture, inputs=[
|
| 202 |
-
vFusion_button.click(generate_3d_model2, inputs=[
|
| 203 |
|
| 204 |
segment_button.click(segment_image, inputs=image_input, outputs=segment_output)
|
| 205 |
# segment_button.click(segment_full_image, inputs=image_input, outputs=segment_output)
|
|
@@ -222,7 +223,4 @@ with gr.Blocks() as demo:
|
|
| 222 |
# model_button = gr.Button("Создать 3D модель")
|
| 223 |
# model_button.click(generate_3d_model, inputs=segment_input_3d, outputs=model_output)
|
| 224 |
|
| 225 |
-
|
| 226 |
-
demo.load(set_vFusion_client, None, vFusion_client)
|
| 227 |
-
|
| 228 |
-
demo.launch(debug=True, show_error=True)
|
|
|
|
| 20 |
# classification = pipeline("image-classification", model="google/vit-base-patch16-224")
|
| 21 |
# upscaling_client = InferenceClient(model="stabilityai/stable-diffusion-x4-upscaler")
|
| 22 |
# inpainting_client = InferenceClient(model="stabilityai/stable-diffusion-inpainting")
|
|
|
|
| 23 |
# Функции для обработки изображений
|
| 24 |
def segment_image(image):
|
| 25 |
image = Image.fromarray(image)
|
|
|
|
| 92 |
return gallery_images
|
| 93 |
|
| 94 |
|
| 95 |
+
def hunyuan_client(request: gr.Request):
|
| 96 |
try:
|
| 97 |
client = Client("tencent/Hunyuan3D-2", headers={"X-IP-Token": request.headers['x-ip-token']})
|
| 98 |
print("tencent/Hunyuan3D-2 Ip token")
|
|
|
|
| 101 |
print("tencent/Hunyuan3D-2 no token")
|
| 102 |
return Client("tencent/Hunyuan3D-2")
|
| 103 |
|
| 104 |
+
def vFusion_client(request: gr.Request):
|
| 105 |
try:
|
| 106 |
client = Client("facebook/VFusion3D", headers={"X-IP-Token": request.headers['x-ip-token']})
|
| 107 |
print("facebook/VFusion3D Ip token")
|
|
|
|
| 110 |
print("facebook/VFusion3D no token")
|
| 111 |
return Client("facebook/VFusion3D")
|
| 112 |
|
| 113 |
+
def generate_3d_model(segment_output, request: gr.Request):
|
| 114 |
for i, (image_path, label) in enumerate(segment_output):
|
| 115 |
if label == segment_name:
|
| 116 |
+
client = hunyuan_client(request)
|
| 117 |
result = client.predict(
|
| 118 |
caption="",
|
| 119 |
image=handle_file(image_path),
|
|
|
|
| 127 |
print(result)
|
| 128 |
return result[0]
|
| 129 |
|
| 130 |
+
def generate_3d_model_texture(segment_output, segment_name, request: gr.Request):
|
| 131 |
for i, (image_path, label) in enumerate(segment_output):
|
| 132 |
if label == segment_name:
|
| 133 |
+
client = hunyuan_client(request)
|
|
|
|
| 134 |
result = client.predict(
|
| 135 |
caption="",
|
| 136 |
image=handle_file(image_path),
|
|
|
|
| 144 |
print(result)
|
| 145 |
return result[1]
|
| 146 |
|
| 147 |
+
def generate_3d_model2(segment_output, segment_name, request: gr.Request):
|
| 148 |
for i, (image_path, label) in enumerate(segment_output):
|
| 149 |
if label == segment_name:
|
| 150 |
+
client = vFusion_client(request)
|
| 151 |
result = client.predict(
|
| 152 |
image=handle_file(image_path),
|
| 153 |
api_name="/step_1_generate_obj"
|
|
|
|
| 172 |
# return inpainted
|
| 173 |
|
| 174 |
|
|
|
|
|
|
|
|
|
|
| 175 |
|
| 176 |
+
|
| 177 |
+
########## GRADIO ##########
|
| 178 |
+
|
| 179 |
+
with gr.Blocks() as demo:
|
| 180 |
gr.Markdown("# Анализ и редактирование помещений")
|
| 181 |
|
| 182 |
with gr.Tab("Сканирование"):
|
|
|
|
| 198 |
with gr.Column(scale=5):
|
| 199 |
trellis_output = gr.Model3D(label="3D Model")
|
| 200 |
|
| 201 |
+
hunyuan_button.click(generate_3d_model, inputs=[segment_output, trellis_input], outputs=trellis_output)
|
| 202 |
+
hunyuan_button_texture.click(generate_3d_model_texture, inputs=[segment_output, trellis_input], outputs=trellis_output)
|
| 203 |
+
vFusion_button.click(generate_3d_model2, inputs=[segment_output, trellis_input], outputs=trellis_output)
|
| 204 |
|
| 205 |
segment_button.click(segment_image, inputs=image_input, outputs=segment_output)
|
| 206 |
# segment_button.click(segment_full_image, inputs=image_input, outputs=segment_output)
|
|
|
|
| 223 |
# model_button = gr.Button("Создать 3D модель")
|
| 224 |
# model_button.click(generate_3d_model, inputs=segment_input_3d, outputs=model_output)
|
| 225 |
|
| 226 |
+
demo.launch(debug=True, show_error=True)
|
|
|
|
|
|
|
|
|