Spaces:
Running
Running
| """ | |
| Universal Contrast Analyzer for detecting low contrast between ALL adjacent objects. | |
| Optimized for Alzheimer's/dementia care environments. | |
| """ | |
| import numpy as np | |
| import cv2 | |
| from typing import Dict, List, Tuple, Optional | |
| import logging | |
| from scipy.spatial import distance | |
| from skimage.segmentation import find_boundaries | |
| from sklearn.cluster import DBSCAN | |
| import colorsys | |
| logger = logging.getLogger(__name__) | |
| class UniversalContrastAnalyzer: | |
| """ | |
| Analyzes contrast between ALL adjacent objects in a room. | |
| Ensures proper visibility for elderly individuals with Alzheimer's or dementia. | |
| """ | |
| def __init__(self, wcag_threshold: float = 4.5): | |
| self.wcag_threshold = wcag_threshold | |
| # Comprehensive ADE20K semantic class mappings | |
| self.semantic_classes = { | |
| # Floors and ground surfaces | |
| 'floor': [3, 4, 13, 28, 78], # floor, wood floor, rug, carpet, mat | |
| # Walls and vertical surfaces | |
| 'wall': [0, 1, 9, 21], # wall, building, brick, house | |
| # Ceiling | |
| 'ceiling': [5, 16], # ceiling, sky (for rooms with skylights) | |
| # Furniture - expanded list | |
| 'furniture': [ | |
| 10, 19, 15, 7, 18, 23, 30, 33, 34, 36, 44, 45, 57, 63, 64, 65, 75, | |
| # sofa, chair, table, bed, armchair, cabinet, desk, counter, stool, | |
| # bench, nightstand, coffee table, ottoman, wardrobe, dresser, shelf, | |
| # chest of drawers | |
| ], | |
| # Doors and openings | |
| 'door': [25, 14, 79], # door, windowpane, screen door | |
| # Windows | |
| 'window': [8, 14], # window, windowpane | |
| # Stairs and steps | |
| 'stairs': [53, 59], # stairs, step | |
| # Small objects that might be on floors/furniture | |
| 'objects': [ | |
| 17, 20, 24, 37, 38, 39, 42, 62, 68, 71, 73, 80, 82, 84, 89, 90, 92, 93, | |
| # curtain, book, picture, towel, clothes, pillow, box, bag, lamp, fan, | |
| # cushion, basket, bottle, plate, clock, vase, tray, bowl | |
| ], | |
| # Kitchen/bathroom fixtures | |
| 'fixtures': [ | |
| 32, 46, 49, 50, 54, 66, 69, 70, 77, 94, 97, 98, 99, 117, 118, 119, 120, | |
| # sink, toilet, bathtub, shower, dishwasher, oven, microwave, | |
| # refrigerator, stove, washer, dryer, range hood, kitchen island | |
| ], | |
| # Decorative elements | |
| 'decorative': [ | |
| 6, 12, 56, 60, 61, 72, 83, 91, 96, 100, 102, 104, 106, 110, 112, | |
| # painting, mirror, sculpture, chandelier, sconce, poster, tapestry | |
| ] | |
| } | |
| # Create reverse mapping for quick lookup | |
| self.class_to_category = {} | |
| for category, class_ids in self.semantic_classes.items(): | |
| for class_id in class_ids: | |
| self.class_to_category[class_id] = category | |
| def calculate_wcag_contrast(self, color1: np.ndarray, color2: np.ndarray) -> float: | |
| """Calculate WCAG 2.0 contrast ratio between two colors""" | |
| def relative_luminance(rgb): | |
| # Normalize to 0-1 | |
| rgb_norm = rgb / 255.0 | |
| # Apply gamma correction (linearize) | |
| rgb_linear = np.where( | |
| rgb_norm <= 0.03928, | |
| rgb_norm / 12.92, | |
| ((rgb_norm + 0.055) / 1.055) ** 2.4 | |
| ) | |
| # Calculate luminance using ITU-R BT.709 coefficients | |
| return np.dot(rgb_linear, [0.2126, 0.7152, 0.0722]) | |
| lum1 = relative_luminance(color1) | |
| lum2 = relative_luminance(color2) | |
| # Ensure lighter color is in numerator | |
| lighter = max(lum1, lum2) | |
| darker = min(lum1, lum2) | |
| return (lighter + 0.05) / (darker + 0.05) | |
| def calculate_hue_difference(self, color1: np.ndarray, color2: np.ndarray) -> float: | |
| """Calculate hue difference in degrees (0-180)""" | |
| # Convert RGB to HSV | |
| hsv1 = cv2.cvtColor(color1.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0] | |
| hsv2 = cv2.cvtColor(color2.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0] | |
| # Calculate circular hue difference (0-180 range in OpenCV) | |
| hue_diff = abs(hsv1[0] - hsv2[0]) | |
| if hue_diff > 90: | |
| hue_diff = 180 - hue_diff | |
| return hue_diff | |
| def calculate_saturation_difference(self, color1: np.ndarray, color2: np.ndarray) -> float: | |
| """Calculate saturation difference (0-255)""" | |
| hsv1 = cv2.cvtColor(color1.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0] | |
| hsv2 = cv2.cvtColor(color2.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0] | |
| return abs(int(hsv1[1]) - int(hsv2[1])) | |
| def extract_dominant_color(self, image: np.ndarray, mask: np.ndarray, | |
| sample_size: int = 1000) -> np.ndarray: | |
| """Extract dominant color from masked region using robust statistics""" | |
| if not np.any(mask): | |
| return np.array([128, 128, 128]) # Default gray | |
| # Get masked pixels | |
| masked_pixels = image[mask] | |
| if len(masked_pixels) == 0: | |
| return np.array([128, 128, 128]) | |
| # Sample if too many pixels (for efficiency) | |
| if len(masked_pixels) > sample_size: | |
| indices = np.random.choice(len(masked_pixels), sample_size, replace=False) | |
| masked_pixels = masked_pixels[indices] | |
| # Use DBSCAN clustering to find dominant color cluster | |
| if len(masked_pixels) > 50: | |
| try: | |
| clustering = DBSCAN(eps=30, min_samples=10).fit(masked_pixels) | |
| labels = clustering.labels_ | |
| # Get the largest cluster | |
| unique_labels, counts = np.unique(labels[labels >= 0], return_counts=True) | |
| if len(unique_labels) > 0: | |
| dominant_label = unique_labels[np.argmax(counts)] | |
| dominant_colors = masked_pixels[labels == dominant_label] | |
| return np.median(dominant_colors, axis=0).astype(int) | |
| except: | |
| pass | |
| # Fallback to median | |
| return np.median(masked_pixels, axis=0).astype(int) | |
| def find_adjacent_segments(self, segmentation: np.ndarray) -> Dict[Tuple[int, int], np.ndarray]: | |
| """ | |
| Find all pairs of adjacent segments and their boundaries. | |
| Returns dict mapping (seg1_id, seg2_id) to boundary mask. | |
| """ | |
| adjacencies = {} | |
| # Find boundaries using 4-connectivity | |
| boundaries = find_boundaries(segmentation, mode='inner') | |
| # For each boundary pixel, check its neighbors | |
| h, w = segmentation.shape | |
| for y in range(1, h-1): | |
| for x in range(1, w-1): | |
| if boundaries[y, x]: | |
| center_id = segmentation[y, x] | |
| # Check 8-connected neighbors for more complete boundaries | |
| neighbors = [ | |
| segmentation[y-1, x], # top | |
| segmentation[y+1, x], # bottom | |
| segmentation[y, x-1], # left | |
| segmentation[y, x+1], # right | |
| segmentation[y-1, x-1], # top-left | |
| segmentation[y-1, x+1], # top-right | |
| segmentation[y+1, x-1], # bottom-left | |
| segmentation[y+1, x+1] # bottom-right | |
| ] | |
| for neighbor_id in neighbors: | |
| if neighbor_id != center_id and neighbor_id != 0: # Different segment, not background | |
| # Create ordered pair (smaller id first) | |
| pair = tuple(sorted([center_id, neighbor_id])) | |
| # Add this boundary pixel to the adjacency map | |
| if pair not in adjacencies: | |
| adjacencies[pair] = np.zeros((h, w), dtype=bool) | |
| adjacencies[pair][y, x] = True | |
| # Filter out small boundaries (noise) | |
| min_boundary_pixels = 20 # Reduced threshold for better detection | |
| filtered_adjacencies = {} | |
| for pair, boundary in adjacencies.items(): | |
| if np.sum(boundary) >= min_boundary_pixels: | |
| filtered_adjacencies[pair] = boundary | |
| return filtered_adjacencies | |
| def is_contrast_sufficient(self, color1: np.ndarray, color2: np.ndarray, | |
| category1: str, category2: str) -> Tuple[bool, str]: | |
| """ | |
| Determine if contrast is sufficient based on WCAG and perceptual guidelines. | |
| Returns (is_sufficient, severity_if_not) | |
| """ | |
| wcag_ratio = self.calculate_wcag_contrast(color1, color2) | |
| hue_diff = self.calculate_hue_difference(color1, color2) | |
| sat_diff = self.calculate_saturation_difference(color1, color2) | |
| # Critical relationships requiring highest contrast | |
| critical_pairs = [ | |
| ('floor', 'stairs'), | |
| ('floor', 'door'), | |
| ('stairs', 'wall') | |
| ] | |
| # High priority relationships | |
| high_priority_pairs = [ | |
| ('floor', 'furniture'), | |
| ('wall', 'door'), | |
| ('wall', 'furniture'), | |
| ('floor', 'objects') | |
| ] | |
| # Check relationship type | |
| relationship = tuple(sorted([category1, category2])) | |
| # Determine thresholds based on relationship | |
| if relationship in critical_pairs: | |
| # Critical: require 7:1 contrast ratio | |
| if wcag_ratio < 7.0: | |
| return False, 'critical' | |
| if hue_diff < 30 and sat_diff < 50: | |
| return False, 'critical' | |
| elif relationship in high_priority_pairs: | |
| # High priority: require 4.5:1 contrast ratio | |
| if wcag_ratio < 4.5: | |
| return False, 'high' | |
| if wcag_ratio < 7.0 and hue_diff < 20 and sat_diff < 40: | |
| return False, 'high' | |
| else: | |
| # Standard: require 3:1 contrast ratio minimum | |
| if wcag_ratio < 3.0: | |
| return False, 'medium' | |
| if wcag_ratio < 4.5 and hue_diff < 15 and sat_diff < 30: | |
| return False, 'medium' | |
| return True, None | |
| def analyze_contrast(self, image: np.ndarray, segmentation: np.ndarray) -> Dict: | |
| """ | |
| Perform comprehensive contrast analysis between ALL adjacent objects. | |
| Args: | |
| image: RGB image | |
| segmentation: Segmentation mask with class IDs | |
| Returns: | |
| Dictionary containing analysis results and visualizations | |
| """ | |
| h, w = segmentation.shape | |
| results = { | |
| 'issues': [], | |
| 'visualization': image.copy(), | |
| 'statistics': { | |
| 'total_segments': 0, | |
| 'analyzed_pairs': 0, | |
| 'low_contrast_pairs': 0, | |
| 'critical_issues': 0, | |
| 'high_priority_issues': 0, | |
| 'medium_priority_issues': 0, | |
| 'floor_object_issues': 0 | |
| } | |
| } | |
| # Get unique segments | |
| unique_segments = np.unique(segmentation) | |
| unique_segments = unique_segments[unique_segments != 0] # Remove background | |
| results['statistics']['total_segments'] = len(unique_segments) | |
| # Build segment information | |
| segment_info = {} | |
| logger.info(f"Building segment information for {len(unique_segments)} segments...") | |
| for seg_id in unique_segments: | |
| mask = segmentation == seg_id | |
| area = np.sum(mask) | |
| if area < 50: # Skip very small segments | |
| continue | |
| category = self.class_to_category.get(seg_id, 'unknown') | |
| color = self.extract_dominant_color(image, mask) | |
| segment_info[seg_id] = { | |
| 'category': category, | |
| 'mask': mask, | |
| 'color': color, | |
| 'area': area, | |
| 'class_id': seg_id | |
| } | |
| # Find all adjacent segment pairs | |
| logger.info("Finding adjacent segments...") | |
| adjacencies = self.find_adjacent_segments(segmentation) | |
| logger.info(f"Found {len(adjacencies)} adjacent segment pairs") | |
| # Analyze each adjacent pair | |
| for (seg1_id, seg2_id), boundary in adjacencies.items(): | |
| if seg1_id not in segment_info or seg2_id not in segment_info: | |
| continue | |
| info1 = segment_info[seg1_id] | |
| info2 = segment_info[seg2_id] | |
| # Skip if both are unknown categories | |
| if info1['category'] == 'unknown' and info2['category'] == 'unknown': | |
| continue | |
| results['statistics']['analyzed_pairs'] += 1 | |
| # Check contrast sufficiency | |
| is_sufficient, severity = self.is_contrast_sufficient( | |
| info1['color'], info2['color'], | |
| info1['category'], info2['category'] | |
| ) | |
| if not is_sufficient: | |
| results['statistics']['low_contrast_pairs'] += 1 | |
| # Calculate detailed metrics | |
| wcag_ratio = self.calculate_wcag_contrast(info1['color'], info2['color']) | |
| hue_diff = self.calculate_hue_difference(info1['color'], info2['color']) | |
| sat_diff = self.calculate_saturation_difference(info1['color'], info2['color']) | |
| # Check if it's a floor-object issue | |
| is_floor_object = ( | |
| (info1['category'] == 'floor' and info2['category'] in ['furniture', 'objects']) or | |
| (info2['category'] == 'floor' and info1['category'] in ['furniture', 'objects']) | |
| ) | |
| if is_floor_object: | |
| results['statistics']['floor_object_issues'] += 1 | |
| # Count by severity | |
| if severity == 'critical': | |
| results['statistics']['critical_issues'] += 1 | |
| elif severity == 'high': | |
| results['statistics']['high_priority_issues'] += 1 | |
| elif severity == 'medium': | |
| results['statistics']['medium_priority_issues'] += 1 | |
| # Record the issue | |
| issue = { | |
| 'segment_ids': (seg1_id, seg2_id), | |
| 'categories': (info1['category'], info2['category']), | |
| 'colors': (info1['color'].tolist(), info2['color'].tolist()), | |
| 'wcag_ratio': float(wcag_ratio), | |
| 'hue_difference': float(hue_diff), | |
| 'saturation_difference': float(sat_diff), | |
| 'boundary_pixels': int(np.sum(boundary)), | |
| 'severity': severity, | |
| 'is_floor_object': is_floor_object, | |
| 'boundary_mask': boundary | |
| } | |
| results['issues'].append(issue) | |
| # Visualize on the output image | |
| self._visualize_issue(results['visualization'], boundary, severity) | |
| # Sort issues by severity | |
| severity_order = {'critical': 0, 'high': 1, 'medium': 2} | |
| results['issues'].sort(key=lambda x: severity_order.get(x['severity'], 3)) | |
| logger.info(f"Contrast analysis complete: {results['statistics']['low_contrast_pairs']} issues found") | |
| return results | |
| def _visualize_issue(self, image: np.ndarray, boundary: np.ndarray, severity: str): | |
| """Add visual indicators for contrast issues""" | |
| # Color coding by severity | |
| colors = { | |
| 'critical': (255, 0, 0), # Red | |
| 'high': (255, 128, 0), # Orange | |
| 'medium': (255, 255, 0), # Yellow | |
| } | |
| color = colors.get(severity, (255, 255, 255)) | |
| # Dilate boundary for better visibility | |
| kernel = np.ones((3, 3), np.uint8) | |
| dilated = cv2.dilate(boundary.astype(np.uint8), kernel, iterations=2) | |
| # Apply color overlay with transparency | |
| overlay = image.copy() | |
| overlay[dilated > 0] = color | |
| cv2.addWeighted(overlay, 0.5, image, 0.5, 0, image) | |
| return image | |
| def generate_report(self, results: Dict) -> str: | |
| """Generate a detailed text report of contrast analysis""" | |
| stats = results['statistics'] | |
| issues = results['issues'] | |
| report = [] | |
| report.append("=== Universal Contrast Analysis Report ===\n") | |
| # Summary statistics | |
| report.append(f"Total segments analyzed: {stats['total_segments']}") | |
| report.append(f"Adjacent pairs analyzed: {stats['analyzed_pairs']}") | |
| report.append(f"Low contrast pairs found: {stats['low_contrast_pairs']}") | |
| report.append(f"- Critical issues: {stats['critical_issues']}") | |
| report.append(f"- High priority issues: {stats['high_priority_issues']}") | |
| report.append(f"- Medium priority issues: {stats['medium_priority_issues']}") | |
| report.append(f"Floor-object contrast issues: {stats['floor_object_issues']}\n") | |
| # Detailed issues | |
| if issues: | |
| report.append("=== Contrast Issues (sorted by severity) ===\n") | |
| for i, issue in enumerate(issues[:10], 1): # Show top 10 issues | |
| cat1, cat2 = issue['categories'] | |
| wcag = issue['wcag_ratio'] | |
| hue_diff = issue['hue_difference'] | |
| sat_diff = issue['saturation_difference'] | |
| severity = issue['severity'].upper() | |
| report.append(f"{i}. [{severity}] {cat1} ↔ {cat2}") | |
| report.append(f" - WCAG Contrast Ratio: {wcag:.2f}:1 (minimum: 4.5:1)") | |
| report.append(f" - Hue Difference: {hue_diff:.1f}° (recommended: >30°)") | |
| report.append(f" - Saturation Difference: {sat_diff} (recommended: >50)") | |
| if issue['is_floor_object']: | |
| report.append(" - ⚠️ Object on floor - requires high visibility!") | |
| report.append(f" - Boundary size: {issue['boundary_pixels']} pixels") | |
| report.append("") | |
| else: | |
| report.append("✅ No contrast issues detected!") | |
| return "\n".join(report) | |