updates
Browse files
app.py
CHANGED
|
@@ -19,7 +19,7 @@ model, transform = depth_pro.create_model_and_transforms()
|
|
| 19 |
model = model.to(device)
|
| 20 |
model.eval()
|
| 21 |
|
| 22 |
-
def resize_image(image_path, max_size=
|
| 23 |
with Image.open(image_path) as img:
|
| 24 |
# Calculate the new size while maintaining aspect ratio
|
| 25 |
ratio = max_size / max(img.size)
|
|
@@ -67,7 +67,7 @@ def predict_depth(input_image):
|
|
| 67 |
inverse_depth_clipped = np.clip(inverse_depth, 0, 10)
|
| 68 |
|
| 69 |
# Create a color map
|
| 70 |
-
plt.figure(figsize=(
|
| 71 |
plt.imshow(inverse_depth_clipped, cmap='viridis')
|
| 72 |
plt.colorbar(label='Inverse Depth')
|
| 73 |
plt.title('Predicted Inverse Depth Map')
|
|
@@ -75,7 +75,7 @@ def predict_depth(input_image):
|
|
| 75 |
|
| 76 |
# Save the plot to a file
|
| 77 |
output_path = "inverse_depth_map.png"
|
| 78 |
-
plt.savefig(output_path)
|
| 79 |
plt.close()
|
| 80 |
|
| 81 |
return output_path, f"Focal length: {focallength_px:.2f} pixels"
|
|
@@ -90,9 +90,12 @@ def predict_depth(input_image):
|
|
| 90 |
iface = gr.Interface(
|
| 91 |
fn=predict_depth,
|
| 92 |
inputs=gr.Image(type="filepath"),
|
| 93 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
| 94 |
title="DepthPro Demo",
|
| 95 |
-
description="[DepthPro](https://huggingface.co/apple/DepthPro) is a fast metric depth prediction model. Simply upload an image to predict its inverse depth map and focal length. Large images will be automatically resized."
|
| 96 |
)
|
| 97 |
|
| 98 |
# Launch the interface
|
|
|
|
| 19 |
model = model.to(device)
|
| 20 |
model.eval()
|
| 21 |
|
| 22 |
+
def resize_image(image_path, max_size=1536):
|
| 23 |
with Image.open(image_path) as img:
|
| 24 |
# Calculate the new size while maintaining aspect ratio
|
| 25 |
ratio = max_size / max(img.size)
|
|
|
|
| 67 |
inverse_depth_clipped = np.clip(inverse_depth, 0, 10)
|
| 68 |
|
| 69 |
# Create a color map
|
| 70 |
+
plt.figure(figsize=(15.36, 15.36), dpi=100) # Set figure size to 1536x1536 pixels
|
| 71 |
plt.imshow(inverse_depth_clipped, cmap='viridis')
|
| 72 |
plt.colorbar(label='Inverse Depth')
|
| 73 |
plt.title('Predicted Inverse Depth Map')
|
|
|
|
| 75 |
|
| 76 |
# Save the plot to a file
|
| 77 |
output_path = "inverse_depth_map.png"
|
| 78 |
+
plt.savefig(output_path, dpi=100, bbox_inches='tight', pad_inches=0)
|
| 79 |
plt.close()
|
| 80 |
|
| 81 |
return output_path, f"Focal length: {focallength_px:.2f} pixels"
|
|
|
|
| 90 |
iface = gr.Interface(
|
| 91 |
fn=predict_depth,
|
| 92 |
inputs=gr.Image(type="filepath"),
|
| 93 |
+
outputs=[
|
| 94 |
+
gr.Image(type="filepath", label="Inverse Depth Map", height=768, width=768), # Set a reasonable display size
|
| 95 |
+
gr.Textbox(label="Focal Length or Error Message")
|
| 96 |
+
],
|
| 97 |
title="DepthPro Demo",
|
| 98 |
+
description="[DepthPro](https://huggingface.co/apple/DepthPro) is a fast metric depth prediction model. Simply upload an image to predict its inverse depth map and focal length. Large images will be automatically resized to 1536x1536 pixels."
|
| 99 |
)
|
| 100 |
|
| 101 |
# Launch the interface
|