File size: 57,239 Bytes
628d76c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 |
import copy
import json
import os
from typing import Any
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from lychsim.api import LychSim
from lychsim.utils.camera_projection_utils import project_3d_to_2d, get_bbox3d
from dataclasses import dataclass
from typing import List, Optional, Dict, Tuple
from scipy.spatial import cKDTree
from collections import defaultdict
class EasyDict(dict):
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
def init_sampling_params(state):
# list of table and floor objects
# will be provided by Xingrui and Siyi
state.floor_objects = [
"/Game/ManagerOffice/Meshes/Props/SM_AmchairTreadle.SM_AmchairTreadle",
"/Game/ManagerOffice/Meshes/Props/SM_ArmchairManager.SM_ArmchairManager",
"/Game/ManagerOffice/Meshes/Props/SM_ColumnTable.SM_ColumnTable",
"/Game/ManagerOffice/Meshes/Props/SM_Decorative17.SM_Decorative17",
"/Game/ManagerOffice/Meshes/Props/SM_Komod.SM_Komod",
"/Game/ManagerOffice/Meshes/Props/SM_KomodB.SM_KomodB",
"/Game/ManagerOffice/Meshes/Props/SM_Plant2.SM_Plant2",
"/Game/ManagerOffice/Meshes/Props/SM_Plant1.SM_Plant1",
"/Game/ManagerOffice/Meshes/Props/SM_TeaTable.SM_TeaTable",
]
state.table_objects = [
"/Game/ManagerOffice/Meshes/Props/SM_Ashtray.SM_Ashtray",
"/Game/ManagerOffice/Meshes/Props/SM_Award3.SM_Award3",
"/Game/ManagerOffice/Meshes/Props/SM_Award9.SM_Award9",
"/Game/ManagerOffice/Meshes/Props/SM_Book2.SM_Book2",
"/Game/ManagerOffice/Meshes/Props/SM_CalendarDesk.SM_CalendarDesk",
"/Game/ManagerOffice/Meshes/Props/SM_Decorative10.SM_Decorative10",
"/Game/ManagerOffice/Meshes/Props/SM_Decorative37.SM_Decorative37",
"/Game/ManagerOffice/Meshes/Props/SM_Fruits.SM_Fruits",
"/Game/ManagerOffice/Meshes/Props/SM_PC.SM_PC",
"/Game/ManagerOffice/Meshes/Props/SM_MarkerMug.SM_MarkerMug",
]
mesh_extents = state.sim.get_mesh_extent(state.floor_objects + state.table_objects)[
"outputs"
]
state.mesh_extents = {
x["mesh_path"]: x["extent"] for x in mesh_extents if x["status"] == "ok"
}
for x in state.floor_objects:
if x not in state.mesh_extents:
print(f"Warning: Floor object {x} not found in the scene.")
for x in state.table_objects:
if x not in state.mesh_extents:
print(f"Warning: Table object {x} not found in the scene.")
state.floor_objects = [x for x in state.floor_objects if x in state.mesh_extents]
state.table_objects = [x for x in state.table_objects if x in state.mesh_extents]
state.table_height_margin_low, state.table_height_margin_high = (
-30.0,
50.0,
) # table object hit box [top-30, top+50]
state.table_object_threshold = (
0.75 # IoA threshold: intersection over object volume
)
# number of trials to sample floor objects
state.max_floor_sampling_trials = 20
# IoU threshold for floor object collision detection
state.floor_object_collision_iou_thr = 0.1
# threshold for worst addition on floor: if worse than this, skip adding floor objects
state.worst_floor_addition = -10
# number of trials to sample table objects
state.max_table_sampling_trials = 20
# IoU threshold for table object collision detection
state.table_object_collision_iou_thr = 0.1
# threshold for worst addition on table: if worse than this, skip adding table objects
state.worst_table_addition = -10
def add_selection_as_floor(state, num_objects):
objects = state.sim.list_selected()
if objects["status"] != "ok":
raise RuntimeError(f"Failed to get selected objects. Response: {objects}")
new_floors = []
for obj in objects["outputs"]:
obj_id = obj["object_id"]
new_floors.append((obj_id, num_objects))
before_count = len(state.floors)
state.floors.update(new_floors)
print(
f"Added {len(new_floors)} object(s) to the floor list (prev={before_count} "
f"-> now={len(state.floors)}):\n{state.floors}"
)
def add_selection_as_table(state, num_objects):
objects = state.sim.list_selected()
if objects["status"] != "ok":
raise RuntimeError(f"Failed to get selected objects. Response: {objects}")
new_tables = []
for obj in objects["outputs"]:
obj_id = obj["object_id"]
new_tables.append((obj_id, num_objects))
before_count = len(state.tables)
state.tables.update(new_tables)
print(
f"Added {len(new_tables)} object(s) to the table list (prev={before_count} "
f"-> now={len(state.tables)}):\n{state.tables}"
)
def add_camera_location(state):
cam_id = state.cam_id
loc = state.sim.get_cam_loc(0)
before_count = len(state.cam_locations)
state.cam_locations.append(loc)
print(f"New location added (prev={before_count} -> {len(state.cam_locations)}):")
for loc in state.cam_locations:
print(f"\t{loc}")
def get_objects_on_aabb(state, table_aabb, objs_aabb):
table_aabb, objs_aabb = copy.deepcopy(table_aabb), copy.deepcopy(objs_aabb)
object_list = []
target_center, target_extent = table_aabb["center"], table_aabb["extent"]
# we compute the space above the table
state.table_height_margin_low, state.table_height_margin_high = -30.0, 50.0
target_center[2] = (
target_center[2]
+ target_extent[2]
+ (state.table_height_margin_low + state.table_height_margin_high) / 2.0
)
target_extent[2] = (
state.table_height_margin_high - state.table_height_margin_low
) / 2.0
tgt_min = np.array(target_center) - np.array(target_extent)
tgt_max = np.array(target_center) + np.array(target_extent)
for aabb in objs_aabb:
if aabb["status"] != "ok" or aabb["object_id"] == table_aabb["object_id"]:
continue
aabb["extent"] = [max(x, 1e-6) for x in aabb["extent"]]
obj_min = np.array(aabb["center"]) - np.array(aabb["extent"])
obj_max = np.array(aabb["center"]) + np.array(aabb["extent"])
inter_min = np.maximum(obj_min, tgt_min)
inter_max = np.minimum(obj_max, tgt_max)
inter_extent = np.maximum(0.0, inter_max - inter_min)
inter_vol = np.prod(inter_extent)
obj_vol = np.prod(2 * np.array(aabb["extent"]))
if inter_vol / obj_vol >= state.table_object_threshold:
object_list.append(aabb["object_id"])
return object_list
def clear_table_objects(state, table_id, objs_aabb):
objs_aabb = copy.deepcopy(objs_aabb)
table_aabb = [x for x in objs_aabb if x["object_id"] == table_id][0]
objects_on_table = get_objects_on_aabb(state, table_aabb, objs_aabb)
for obj_id in objects_on_table:
state.sim.del_obj(obj_id)
def collide(center1, extent1, center2, extent2, thr):
center1, extent1 = np.array(center1), np.array(extent1)
center2, extent2 = np.array(center2), np.array(extent2)
min1, max1 = center1 - extent1, center1 + extent1
min2, max2 = center2 - extent2, center2 + extent2
inter_min = np.maximum(min1, min2)
inter_max = np.minimum(max1, max2)
inter_extent = np.maximum(0.0, inter_max - inter_min)
inter_vol = np.prod(inter_extent)
vol1, vol2 = np.prod(2 * extent1), np.prod(2 * extent2)
union_vol = vol1 + vol2 - inter_vol
iou = inter_vol / union_vol if union_vol > 0 else 0.0
return iou >= thr
def compute_addition_from_collision(state, objs_aabb, sampling):
addition = len(sampling)
# first check mutual collisions
for obj1 in sampling:
for obj2 in sampling:
if obj1 >= obj2:
continue
if collide(
sampling[obj1]["center"],
sampling[obj1]["extent"],
sampling[obj2]["center"],
sampling[obj2]["extent"],
state.floor_object_collision_iou_thr,
):
return -1e5, []
tables = [x[0] for x in state.tables]
all_collided_objects = []
for obj in sampling:
collided_objects = [
x
for x in objs_aabb
if collide(
x["center"],
x["extent"],
sampling[obj]["center"],
sampling[obj]["extent"],
state.floor_object_collision_iou_thr,
)
]
for x in collided_objects:
if x["object_id"] in tables:
return -1e5, []
addition -= len(collided_objects)
all_collided_objects.extend([x["object_id"] for x in collided_objects])
return addition, all_collided_objects
def sample_floor_objects(state, floor_id, num_objects, objs_aabb):
floor_aabb = state.sim.get_obj_aabb(floor_id)["outputs"][0]
target_center, target_extent = np.array(floor_aabb["center"]), np.array(
floor_aabb["extent"]
)
target_extent[0] *= 0.9
target_extent[1] *= 0.9
best_sampling, best_addition, best_collisions = None, -1e6, None
for _ in range(state.max_floor_sampling_trials):
sampling = {}
sampled_object_ids = [
state.floor_objects[i]
for i in np.random.choice(
len(state.floor_objects), num_objects, replace=False
)
]
for soi in sampled_object_ids:
horizontal_location = target_center[:2] + np.random.uniform(
-target_extent[:2] * 0.5, target_extent[:2] * 0.5
)
vertical_location = target_center[2] + target_extent[2]
sampling[soi] = dict(
center=list(horizontal_location) + [vertical_location],
extent=state.mesh_extents[soi],
)
addition, collisions = compute_addition_from_collision(
state, objs_aabb, sampling
)
if addition > best_addition:
best_addition = addition
best_sampling = sampling
best_collisions = collisions
if best_addition < state.worst_floor_addition:
# print(f"Best addition: {best_addition}, collisions: {best_collisions}")
return None
for obj_id in best_collisions:
state.sim.del_obj(obj_id)
# print(f"del {obj_id}")
for obj_id in best_sampling:
loc = best_sampling[obj_id]["center"]
rot = [0.0, float(np.random.uniform(0, 360)), 0.0]
state.sim.add_obj(f"{obj_id.split('.')[-1]}_{random_uuid()}", obj_id, loc, rot)
# print(f"add {obj_id}, {loc}, {rot}")
def sample_table_objects(state, table_id, num_objects, objs_aabb):
table_aabb = state.sim.get_obj_aabb(table_id)["outputs"][0]
target_center, target_extent = np.array(table_aabb["center"]), np.array(
table_aabb["extent"]
)
target_extent[0] *= 0.9
target_extent[1] *= 0.9
best_sampling, best_addition, best_collisions = None, -1e6, None
for _ in range(state.max_table_sampling_trials):
sampling = {}
sampled_object_ids = [
state.table_objects[i]
for i in np.random.choice(
len(state.table_objects), num_objects, replace=False
)
]
for soi in sampled_object_ids:
horizontal_location = target_center[:2] + np.random.uniform(
-target_extent[:2] * 0.5, target_extent[:2] * 0.5
)
vertical_location = target_center[2] + target_extent[2]
sampling[soi] = dict(
center=list(horizontal_location) + [vertical_location],
extent=state.mesh_extents[soi],
)
addition, collisions = compute_addition_from_collision(
state, objs_aabb, sampling
)
if addition > best_addition:
best_addition = addition
best_sampling = sampling
best_collisions = collisions
if best_addition < state.worst_table_addition:
print(f"Best addition: {best_addition}, collisions: {best_collisions}")
return None
for obj_id in best_collisions:
state.sim.del_obj(obj_id)
print(f"del {obj_id}")
for obj_id in best_sampling:
loc = best_sampling[obj_id]["center"]
rot = [0.0, float(np.random.uniform(0, 360)), 0.0]
state.sim.add_obj(f"{obj_id.split('.')[-1]}_{random_uuid()}", obj_id, loc, rot)
print(f"add {obj_id}, {loc}, {rot}")
def sample_random_placement(state):
objs_aabb = state.sim.get_obj_aabb()["outputs"]
for floor_id, num_objects in state.floors:
sample_floor_objects(state, floor_id, num_objects, objs_aabb)
for table_id, num_objects in state.tables:
clear_table_objects(state, table_id, objs_aabb)
sample_table_objects(state, table_id, num_objects, objs_aabb)
def get_random_camera_rotations(state):
def sample_rotation():
pitch = float(np.random.uniform(state.min_pitch, state.max_pitch))
yaw = float(np.random.uniform(0, 360))
roll = 0.0
return [pitch, yaw, roll]
return [sample_rotation() for _ in range(state.random_viewpoints_per_location)]
def get_random_camera_rotations_fixed_yaw(state):
yaw_list = np.arange(0, 360, 60)
def sample_rotation(i):
pitch = 0.0
yaw = yaw_list[i]
roll = 0.0
return [pitch, yaw, roll]
return [sample_rotation(i) for i in range(len(yaw_list))]
def add_random_camera_height_offset(loc, state):
offset = float(
np.random.uniform(
-state.random_camera_height_offset, state.random_camera_height_offset
)
)
new_loc = loc.copy()
new_loc[2] += offset
return new_loc
def set_camera_location_and_rotation(scene_state, cam_loc_final, cam_rot):
cam_id = scene_state.cam_id
sim = scene_state.sim
sim.set_cam_loc(cam_id, cam_loc_final)
sim.set_cam_rot(cam_id, cam_rot)
def save_state(scene_state):
save_state = {}
for k in scene_state:
if isinstance(scene_state[k], LychSim):
save_state[k] = str(type(scene_state[k]))
elif isinstance(scene_state[k], set):
save_state[k] = list(scene_state[k])
else:
save_state[k] = scene_state[k]
save_path = os.path.join(scene_state.save_path, scene_state.scene_name)
os.makedirs(save_path, exist_ok=True)
with open(os.path.join(save_path, "state.json"), "w") as f:
json.dump(save_state, f, indent=4)
def capture_and_save(scene_state, view_name, camera_warmup_steps=10):
scene_output_path = os.path.join(
scene_state.save_path, scene_state.scene_name, view_name
)
os.makedirs(scene_output_path, exist_ok=True)
scene_state.sim.warmup_cam(scene_state.cam_id, camera_warmup_steps)
image = scene_state.sim.get_cam_lit(scene_state.cam_id)
image.save(os.path.join(scene_output_path, "lit.png"))
seg = scene_state.sim.get_cam_seg(scene_state.cam_id)
seg.save(os.path.join(scene_output_path, "seg.png"))
depth = scene_state.sim.get_cam_depth(scene_state.cam_id)
np.save(os.path.join(scene_output_path, "depth.npy"), depth)
normal = scene_state.sim.get_cam_normal(scene_state.cam_id)
normal.save(os.path.join(scene_output_path, "normal.png"))
annots_obj = scene_state.sim.get_obj_annots()
with open(os.path.join(scene_output_path, "object_annots.json"), "w") as f:
json.dump(annots_obj, f)
annots_cam = scene_state.sim.get_cam_annots(scene_state.cam_id)
fov = annots_cam["outputs"]["fov"]
w = annots_cam["outputs"]["width"]
h = annots_cam["outputs"]["height"]
fovx = np.deg2rad(fov)
fx = 0.5 * w / np.tan(0.5 * fovx)
fovy = 2.0 * np.arctan((h / float(w)) * np.tan(0.5 * fovx))
fy = 0.5 * h / np.tan(0.5 * fovy)
annots_cam["outputs"]["fxfycxcy"] = [fx, fy, w / 2.0, h / 2.0]
with open(os.path.join(scene_output_path, "camera_annots.json"), "w") as f:
json.dump(annots_cam, f)
scene_state.sim.clear_annot_comps()
def capture_and_save_filter(scene_state, view_name, camera_warmup_steps=10):
scene_output_path = os.path.join(
scene_state.save_path, scene_state.scene_name, view_name
)
os.makedirs(scene_output_path, exist_ok=True)
seg = scene_state.sim.get_cam_seg(scene_state.cam_id)
seg.save(os.path.join(scene_output_path, "seg.png"))
depth = scene_state.sim.get_cam_depth(scene_state.cam_id)
np.save(os.path.join(scene_output_path, "depth.npy"), depth)
annots_obj = scene_state.sim.get_obj_annots()
with open(os.path.join(scene_output_path, "object_annots.json"), "w") as f:
json.dump(annots_obj, f)
annots_cam = scene_state.sim.get_cam_annots(scene_state.cam_id)
fov = annots_cam["outputs"]["fov"]
w = annots_cam["outputs"]["width"]
h = annots_cam["outputs"]["height"]
fovx = np.deg2rad(fov)
fx = 0.5 * w / np.tan(0.5 * fovx)
fovy = 2.0 * np.arctan((h / float(w)) * np.tan(0.5 * fovx))
fy = 0.5 * h / np.tan(0.5 * fovy)
annots_cam["outputs"]["fxfycxcy"] = [fx, fy, w / 2.0, h / 2.0]
with open(os.path.join(scene_output_path, "camera_annots.json"), "w") as f:
json.dump(annots_cam, f)
scene_state.sim.clear_annot_comps()
def capture_and_save_image(scene_state, view_name, camera_warmup_steps=10):
scene_output_path = os.path.join(
scene_state.save_path, scene_state.scene_name, view_name
)
os.makedirs(scene_output_path, exist_ok=True)
scene_state.sim.warmup_cam(scene_state.cam_id, camera_warmup_steps)
image = scene_state.sim.get_cam_lit(scene_state.cam_id)
image.save(os.path.join(scene_output_path, "lit.png"))
def visualize_bbox(img, corners_2d, edges, color=(255, 255, 0, 255), thickness=2):
for i, j in edges:
pt1 = (int(corners_2d[i, 0]), int(corners_2d[i, 1]))
pt2 = (int(corners_2d[j, 0]), int(corners_2d[j, 1]))
cv2.line(img, pt1, pt2, color, thickness)
plt.imshow(img)
return img
def draw_bbox_3d(img, center, extent, c2w, fov):
if isinstance(img, Image.Image):
img = np.array(img)
vis_img = np.array(img).copy()
corners, edges = get_bbox3d(center=center, extent=extent)
pts2d, in_front = project_3d_to_2d(corners, c2w, fov, 1920, 1080)
vis_img = visualize_bbox(vis_img, pts2d, edges, color=(0, 255, 0, 255))
return Image.fromarray(vis_img)
def random_uuid(length=4):
return "".join(
np.random.choice(list("abcdefghijklmnopqrstuvwxyz0123456789"), size=length)
)
class CameraPositionEvaluator:
"""
相机位置质量评估器 - 判断深度图和分割掩码质量是否合格
评分权重:深度40% + 分割60%
分割要求(非常严格):
- ⚠️ 物体总数<6个,分割评分直接返回0,必定不合格
- ⚠️ 任何物体占比>50%,分割评分直接返回0,必定不合格
- 物体总数≥20个为满分,12-20个部分得分
- 小物体(占比<5%)需要≥6个
- 平均物体占比2-8%为理想
- 最大物体占比理想范围10%-30%
深度异常值检测策略(极其严格):
- 自动过滤常见的无效深度值(65504, 65535, 0等)
- ⚠️ 如果有效深度<90%(即无效值>10%),深度评分直接返回0,必定不合格
- 检测深度单一性:如果深度值过于集中(如一面墙),会被降分
- 智能判断:如果深度有足够变化(标准差/熵高,说明墙前有物品),则放宽集中度要求
- 使用中位数而非均值计算比值(更鲁棒,不受极端值影响)
- 最大深度/中位数比:检测单个极端异常值
- 离群值占比:检测多个异常大的深度值(室外空旷区域)
"""
def __init__(self, threshold: float = 0.6, background_color: Tuple[int, int, int] = (0, 0, 0)):
"""
参数:
threshold: 合格阈值,0-1之间,默认0.6
background_color: 背景颜色RGB值,默认为黑色(0, 0, 0)
"""
self.threshold = threshold
self.depth_weight = 0.4 # 深度权重
self.seg_weight = 0.6 # 分割权重
self.background_color = background_color
def evaluate(self, depth_map: np.ndarray, seg_mask: np.ndarray) -> Dict:
"""
评估相机位置是否合格
参数:
depth_map: 深度图 (H, W),单位米
seg_mask: 分割掩码 (H, W, 4),值为RGBA颜色,格式为(r, g, b, 255)
返回:
包含评估结果的字典:
{
'is_qualified': bool, # 是否合格
'score': float, # 总评分 0-1
'depth_score': float, # 深度评分
'seg_score': float, # 分割评分
'details': dict # 详细指标
}
"""
# 验证分割掩码的形状
if len(seg_mask.shape) != 3 or seg_mask.shape[2] != 4:
raise ValueError(f"分割掩码形状应为 (H, W, 4),但得到 {seg_mask.shape}")
# 深度评估
depth_metrics = self._evaluate_depth(depth_map)
depth_score = self._score_depth(depth_metrics)
# 分割评估
seg_metrics = self._evaluate_segmentation(seg_mask)
seg_score = self._score_segmentation(seg_metrics)
# 综合评分 (深度40%,分割60%)
total_score = (depth_score * self.depth_weight +
seg_score * self.seg_weight)
# 判断是否合格
is_qualified = total_score >= self.threshold
return {
'is_qualified': is_qualified,
'score': round(total_score, 3),
'depth_score': round(depth_score, 3),
'seg_score': round(seg_score, 3),
'details': {
'depth': depth_metrics,
'segmentation': seg_metrics
}
}
def _evaluate_depth(self, depth_map: np.ndarray) -> Dict[str, float]:
"""评估深度图特征"""
# 常见的无效深度标记值
INVALID_DEPTH_VALUES = [65504.0, 65535.0, 0.0]
# 过滤无效深度值
valid_mask = depth_map > 0
for invalid_val in INVALID_DEPTH_VALUES:
valid_mask = valid_mask & (np.abs(depth_map - invalid_val) > 1.0)
valid_depth = depth_map[valid_mask]
if len(valid_depth) == 0:
return {
'coverage': 0.0,
'range_mean_ratio': 0.0,
'std_mean_ratio': 0.0,
'entropy': 0.0,
'max_depth': 0.0,
'far_pixel_ratio': 0.0,
'max_median_ratio': 0.0,
'outlier_ratio': 0.0,
'valid_depth_ratio': 0.0,
'depth_concentration': 0.0
}
# 1. 有效深度覆盖率 - 真正有效的深度像素占比
valid_depth_ratio = len(valid_depth) / depth_map.size
# 2. 深度覆盖率(向后兼容)
coverage = valid_depth_ratio
# 3. 深度范围与均值的比值
depth_range = float(np.max(valid_depth) - np.min(valid_depth))
mean_depth = float(np.mean(valid_depth))
range_mean_ratio = depth_range / mean_depth if mean_depth > 0 else 0.0
# 4. 深度标准差与均值的比值
std_depth = float(np.std(valid_depth))
std_mean_ratio = std_depth / mean_depth if mean_depth > 0 else 0.0
# 5. 深度分布熵
hist, _ = np.histogram(valid_depth, bins=20)
hist = hist / hist.sum()
hist = hist[hist > 0]
entropy = -np.sum(hist * np.log(hist))
# 6. 最大深度值
max_depth = float(np.max(valid_depth))
# 7. 使用中位数检测远距离像素
median_depth = float(np.median(valid_depth))
# 远距离像素占比
far_threshold = median_depth * 5.0
far_pixels = valid_depth > far_threshold
far_pixel_ratio = float(np.sum(far_pixels) / len(valid_depth))
# 8. 最大深度/中位数比值
max_median_ratio = max_depth / median_depth if median_depth > 0 else 0.0
# 9. 离群值占比
percentile_75 = float(np.percentile(valid_depth, 75))
outlier_threshold = percentile_75 * 10.0
outliers = valid_depth > outlier_threshold
outlier_ratio = float(np.sum(outliers) / len(valid_depth))
# 10. 深度集中度 - 检测深度值是否过于单一(比如大部分是一面墙)
# 计算在中位数±15%范围内的像素占比
median_threshold_low = median_depth * 0.85
median_threshold_high = median_depth * 1.15
concentrated_pixels = (valid_depth >= median_threshold_low) & (valid_depth <= median_threshold_high)
depth_concentration = float(np.sum(concentrated_pixels) / len(valid_depth))
return {
'coverage': float(coverage),
'range_mean_ratio': float(range_mean_ratio),
'std_mean_ratio': float(std_mean_ratio),
'entropy': float(entropy),
'max_depth': float(max_depth),
'far_pixel_ratio': float(far_pixel_ratio),
'max_median_ratio': float(max_median_ratio),
'outlier_ratio': float(outlier_ratio),
'valid_depth_ratio': float(valid_depth_ratio),
'depth_concentration': float(depth_concentration)
}
def _evaluate_segmentation(self, seg_mask: np.ndarray) -> Dict[str, float]:
"""
评估分割掩码特征
参数:
seg_mask: 分割掩码 (H, W, 4),RGBA格式
"""
# 提取RGB通道(忽略alpha通道)
rgb_mask = seg_mask[:, :, :3]
# 重塑为(H*W, 3)以便处理
h, w = rgb_mask.shape[:2]
total_pixels = h * w
rgb_flat = rgb_mask.reshape(-1, 3)
# 使用字典统计每个颜色的像素数
color_counts = defaultdict(int)
for pixel in rgb_flat:
color_tuple = tuple(pixel)
color_counts[color_tuple] += 1
# 过滤背景颜色
if self.background_color in color_counts:
del color_counts[self.background_color]
# 获取唯一颜色(物体)数量
num_objects = len(color_counts)
if num_objects == 0:
return {
'num_objects': 0,
'num_small_objects': 0,
'max_coverage': 0.0,
'min_coverage': 0.0,
'avg_coverage': 0.0,
'has_large_object': False,
'color_distribution': {}
}
# 计算每个物体的覆盖率
coverages = []
small_object_threshold = 0.05 # 占比<5%的算小物体
large_object_threshold = 0.5 # 占比>50%的算大物体
num_small_objects = 0
has_large_object = False
color_distribution = {}
for color, count in color_counts.items():
coverage = count / total_pixels
coverages.append(coverage)
# 统计小物体数量
if coverage < small_object_threshold:
num_small_objects += 1
# 检测大物体
if coverage > large_object_threshold:
has_large_object = True
# 记录颜色分布(可选,用于调试)
color_str = f"RGB{color}"
color_distribution[color_str] = round(coverage, 4)
# 对覆盖率排序,便于查看分布
color_distribution = dict(sorted(color_distribution.items(),
key=lambda x: x[1], reverse=True))
return {
'num_objects': float(num_objects),
'num_small_objects': float(num_small_objects),
'max_coverage': float(max(coverages)) if coverages else 0.0,
'min_coverage': float(min(coverages)) if coverages else 0.0,
'avg_coverage': float(np.mean(coverages)) if coverages else 0.0,
'has_large_object': has_large_object, # 添加大物体标记
'color_distribution': color_distribution # 添加颜色分布信息
}
def _score_segmentation(self, metrics: Dict[str, float]) -> float:
"""计算分割评分 (0-1) - 严格要求物体数量、小物体数量,并惩罚大面积物体"""
num_objects = metrics['num_objects']
num_small_objects = metrics['num_small_objects']
max_coverage = metrics['max_coverage']
# 硬性要求1:物体<6个直接不合格
if num_objects < 6:
return 0.0
# 硬性要求2:任何物体占比超过50%直接不合格
if max_coverage > 0.5:
return 0.0
score = 0.0
# 物体总数量评分 (12-20个部分得分,≥20个满分) - 权重30%
if num_objects >= 20:
score += 0.3
elif num_objects >= 12:
# 12-20个之间线性增长
score += ((num_objects - 12) / 8) * 0.3
else:
# 6-12个之间降低得分
score += ((num_objects - 6) / 6) * 0.15
# 小物体数量评分 (≥6个满分,<6个按比例) - 权重30%
if num_small_objects >= 6:
score += 0.3
else:
score += (num_small_objects / 6) * 0.3
# 最大物体占比评分 (理想范围10%-30%) - 权重20%
# 由于已经在50%处设置了硬性门槛,这里优化30%-50%之间的评分
if max_coverage <= 0.1:
# 太小也不理想(可能是分割过于碎片化)
score += max_coverage / 0.1 * 0.1
elif max_coverage <= 0.3:
# 10%-30%是理想范围
score += 0.2
else:
# 30%-50%之间线性下降
score += (0.5 - max_coverage) / 0.2 * 0.2
# 最小物体占比 (至少0.3%) - 权重10%
min_coverage = metrics['min_coverage']
if min_coverage >= 0.003:
score += 0.1
else:
score += min_coverage / 0.003 * 0.1
# 平均物体占比 (2-8%为理想,物体多所以占比要小) - 权重10%
avg_coverage = metrics['avg_coverage']
if 0.02 <= avg_coverage <= 0.08:
score += 0.1
elif avg_coverage < 0.02:
score += avg_coverage / 0.02 * 0.1
else:
score += max(0, (1 - (avg_coverage - 0.08) / 0.12)) * 0.1
return min(score, 1.0)
def _score_depth(self, metrics: Dict[str, float]) -> float:
"""计算深度评分 (0-1) - 严格惩罚无效值和单一深度场景"""
# 严格检查有效深度比例 - 无效值>10%直接不合格
valid_depth_ratio = metrics['valid_depth_ratio']
if valid_depth_ratio < 0.9:
# 有效深度<90%(即无效值>10%),直接返回0分
return 0.0
score = 0.0
# 有效深度覆盖率评分 (>98%为好) - 权重15%
if valid_depth_ratio >= 0.98:
score += 0.15
else:
# 90-98%之间线性评分
score += ((valid_depth_ratio - 0.9) / 0.08) * 0.15
# 深度范围/均值比评分 (0.5-2.0为理想) - 权重10%
range_mean_ratio = metrics['range_mean_ratio']
if 0.5 <= range_mean_ratio <= 2.0:
score += 0.1
elif range_mean_ratio < 0.5:
score += range_mean_ratio / 0.5 * 0.1
else:
score += max(0, (1 - (range_mean_ratio - 2.0) / 3.0)) * 0.1
# 深度标准差/均值比评分 (0.2-0.6为理想) - 权重10%
std_mean_ratio = metrics['std_mean_ratio']
if 0.2 <= std_mean_ratio <= 0.6:
score += 0.1
elif std_mean_ratio < 0.2:
score += std_mean_ratio / 0.2 * 0.1
else:
score += max(0, (1 - (std_mean_ratio - 0.6) / 0.6)) * 0.1
# 深度分布熵评分 (越高越好) - 权重10%
entropy = metrics['entropy']
max_entropy = 3.0
score += min(entropy / max_entropy, 1.0) * 0.1
# 深度集中度惩罚 - 权重15%(检测单一深度场景如一面墙)
depth_concentration = metrics['depth_concentration']
# 如果标准差/熵都比较高,说明有物品,放宽集中度要求
has_variation = (std_mean_ratio >= 0.25) or (entropy >= 2.0)
if has_variation:
# 有足够的深度变化(墙前有物品),集中度要求宽松
if depth_concentration <= 0.6:
score += 0.15
elif depth_concentration <= 0.8:
score += (0.8 - depth_concentration) / 0.2 * 0.15
else:
score += 0.05 # 即使有变化,但集中度过高也要扣一些分
else:
# 深度变化不足,严格要求集中度
if depth_concentration <= 0.5:
score += 0.15
elif depth_concentration <= 0.7:
score += (0.7 - depth_concentration) / 0.2 * 0.15
else:
# 集中度>70%且无变化,严重扣分
score += 0.0
# 最大深度/中位数比值惩罚 - 权重20%
max_median_ratio = metrics['max_median_ratio']
if max_median_ratio <= 5.0:
score += 0.2
elif max_median_ratio <= 10.0:
score += (10.0 - max_median_ratio) / 5.0 * 0.2
else:
penalty = max(0, 1 - (max_median_ratio - 10.0) / 50.0)
score += penalty * 0.2
# 离群值占比惩罚 - 权重20%
outlier_ratio = metrics['outlier_ratio']
if outlier_ratio <= 0.01:
score += 0.2
elif outlier_ratio <= 0.05:
score += (0.05 - outlier_ratio) / 0.04 * 0.2
else:
penalty = max(0, 1 - (outlier_ratio - 0.05) / 0.15)
score += penalty * 0.2
return min(score, 1.0)
@dataclass
class CameraConfig:
"""相机配置类,统一管理相机参数"""
width: float = 40.0
height: float = 40.0
depth: float = 40.0
@property
def size(self) -> List[float]:
return [self.width, self.height, self.depth]
@property
def half_extents(self) -> List[float]:
return [self.width/2, self.height/2, self.depth/2]
# 全局默认相机配置
DEFAULT_CAMERA = CameraConfig()
def compute_aabb_from_vertices(vertices):
"""
从顶点计算AABB(轴对齐包围盒)的中心和半长
Args:
vertices: (N, 3) array, 物体的顶点
Returns:
dict: {
'center': (3,) array,
'extent': (3,) array (半长),
'radius': float (包围球半径,用于快速排除)
}
"""
min_point = vertices.min(axis=0)
max_point = vertices.max(axis=0)
center = (min_point + max_point) / 2
extent = (max_point - min_point) / 2
# 计算包围球半径(用于快速排除)
radius = np.linalg.norm(extent)
return {
'center': center,
'extent': extent,
'radius': radius,
'min': min_point,
'max': max_point
}
def estimate_aabb_distance(aabb1_info, aabb2_info):
"""
估算两个AABB之间的距离
使用包围球距离减去半径作为下界估计
Args:
aabb1_info, aabb2_info: AABB信息字典
Returns:
float: 估算的最小距离(可能为负表示重叠)
"""
center_dist = np.linalg.norm(aabb2_info['center'] - aabb1_info['center'])
return center_dist - (aabb1_info['radius'] + aabb2_info['radius'])
def create_camera_aabb_vertices(position, camera_config=None):
"""
创建相机的AABB顶点
Args:
position: [x, y, z] 相机中心位置
camera_config: CameraConfig实例,None则使用默认配置
Returns:
(8, 3) array: 8个顶点坐标
"""
if camera_config is None:
camera_config = DEFAULT_CAMERA
x, y, z = position
w, h, d = camera_config.half_extents
# 创建8个顶点(AABB)
vertices = np.array([
[x - w, y - h, z - d], # 0: 底面左下
[x + w, y - h, z - d], # 1: 底面右下
[x + w, y + h, z - d], # 2: 底面右上
[x - w, y + h, z - d], # 3: 底面左上
[x - w, y - h, z + d], # 4: 顶面左下
[x + w, y - h, z + d], # 5: 顶面右下
[x + w, y + h, z + d], # 6: 顶面右上
[x - w, y + h, z + d], # 7: 顶面左上
])
return vertices
def check_camera_collision(camera_position,
object_vertices_list,
camera_config=None,
check_nearest=10,
collision_threshold=0.0,
use_improved_search=True):
"""
检查相机位置是否与场景中的物体发生碰撞
Args:
camera_position: [x, y, z] 相机位置
object_vertices_list: list of (N, 3) arrays,场景中所有物体的顶点
camera_config: CameraConfig实例,None则使用默认配置
check_nearest: 检查最近的几个物体
collision_threshold: IoU碰撞阈值,默认0(任何重叠都算碰撞)
use_improved_search: 是否使用改进的搜索方法
Returns:
dict: {
'collision': bool,
'colliding_indices': list,
'collision_ious': list, # 每个碰撞的IoU值
'nearest_indices': list,
'nearest_distances': list,
'checked_count': int
}
"""
if camera_config is None:
camera_config = DEFAULT_CAMERA
# 创建相机AABB
camera_center = np.array(camera_position)
camera_extent = np.array(camera_config.half_extents)
# 预计算所有物体的AABB信息
object_aabb_infos = [compute_aabb_from_vertices(verts)
for verts in object_vertices_list]
if use_improved_search:
# 改进的方法:使用包围球距离估算
camera_aabb_info = {
'center': camera_center,
'extent': camera_extent,
'radius': np.linalg.norm(camera_extent)
}
distances = []
for i, aabb_info in enumerate(object_aabb_infos):
# 使用包围球距离作为估算
dist = estimate_aabb_distance(camera_aabb_info, aabb_info)
distances.append((dist, i))
# 按距离排序
distances.sort(key=lambda x: x[0])
# 选择最近的物体进行精确检查
indices_to_check = [idx for _, idx in distances[:check_nearest]]
nearest_distances = [dist for dist, _ in distances[:check_nearest]]
else:
# 原始方法:使用中心点距离
object_centers = np.array([info['center'] for info in object_aabb_infos])
kdtree = cKDTree(object_centers)
center_distances, indices = kdtree.query(camera_position,
k=min(check_nearest, len(object_centers)))
if not isinstance(center_distances, np.ndarray):
center_distances = np.array([center_distances])
indices = np.array([indices])
indices_to_check = indices
nearest_distances = center_distances.tolist()
# 检查碰撞
colliding_indices = []
collision_ious = []
checked_count = 0
for idx in indices_to_check:
if not 0 <= idx < len(object_aabb_infos):
continue
checked_count += 1
# 使用新的collide函数检查碰撞
obj_info = object_aabb_infos[idx]
# 计算IoU用于记录
iou = compute_iou(camera_center, camera_extent,
obj_info['center'], obj_info['extent'])
if collide(camera_center, camera_extent,
obj_info['center'], obj_info['extent'],
collision_threshold):
colliding_indices.append(int(idx))
collision_ious.append(float(iou))
return {
'collision': len(colliding_indices) > 0,
'colliding_indices': colliding_indices,
'collision_ious': collision_ious,
'nearest_indices': [int(idx) for idx in indices_to_check],
'nearest_distances': nearest_distances,
'checked_count': checked_count
}
def compute_iou(center1, extent1, center2, extent2):
"""
计算两个AABB的IoU值
Args:
center1, extent1: 第一个AABB的中心和半长
center2, extent2: 第二个AABB的中心和半长
Returns:
float: IoU值(0到1之间)
"""
center1, extent1 = np.array(center1), np.array(extent1)
center2, extent2 = np.array(center2), np.array(extent2)
min1, max1 = center1 - extent1, center1 + extent1
min2, max2 = center2 - extent2, center2 + extent2
inter_min = np.maximum(min1, min2)
inter_max = np.minimum(max1, max2)
inter_extent = np.maximum(0.0, inter_max - inter_min)
inter_vol = np.prod(inter_extent)
vol1, vol2 = np.prod(2 * extent1), np.prod(2 * extent2)
union_vol = vol1 + vol2 - inter_vol
iou = inter_vol / union_vol if union_vol > 0 else 0.0
return iou
def compute_scene_bounds(object_vertices_list,
margin=30,
trim_percent=10,
camera_config=None):
"""
计算包含所有物体的边界框,去掉极值
Args:
object_vertices_list: list of (N, 3) arrays
margin: 边界内缩距离(cm)
trim_percent: 去掉的极值百分比(0-50)
camera_config: CameraConfig实例,用于确保边界足够大
Returns:
dict: 边界信息
"""
if camera_config is None:
camera_config = DEFAULT_CAMERA
# 收集所有顶点
all_vertices = np.vstack(object_vertices_list)
total_vertices = len(all_vertices)
# 计算要修剪的百分位数
lower_percentile = trim_percent
upper_percentile = 100 - trim_percent
# 对每个轴分别计算修剪后的范围
x_min = np.percentile(all_vertices[:, 0], lower_percentile)
x_max = np.percentile(all_vertices[:, 0], upper_percentile)
y_min = np.percentile(all_vertices[:, 1], lower_percentile)
y_max = np.percentile(all_vertices[:, 1], upper_percentile)
z_min = np.percentile(all_vertices[:, 2], lower_percentile)
z_max = np.percentile(all_vertices[:, 2], upper_percentile)
# 确保边界至少能容纳相机
min_width = camera_config.width + 2 * margin
min_height = camera_config.height + 2 * margin
min_depth = camera_config.depth + 2 * margin
# 应用边界内缩
x_min += margin
x_max -= margin
y_min += margin
y_max -= margin
z_min += margin
z_max -= margin
# 确保边界足够大
if x_max - x_min < min_width:
center_x = (x_min + x_max) / 2
x_min = center_x - min_width / 2
x_max = center_x + min_width / 2
if y_max - y_min < min_height:
center_y = (y_min + y_max) / 2
y_min = center_y - min_height / 2
y_max = center_y + min_height / 2
if z_max - z_min < min_depth:
center_z = (z_min + z_max) / 2
z_min = center_z - min_depth / 2
z_max = center_z + min_depth / 2
# 计算中心和尺寸
center = [(x_min + x_max) / 2, (y_min + y_max) / 2, (z_min + z_max) / 2]
size = [x_max - x_min, y_max - y_min, z_max - z_min]
# 统计被修剪的顶点
trimmed_mask = (
(all_vertices[:, 0] < x_min - margin) | (all_vertices[:, 0] > x_max + margin) |
(all_vertices[:, 1] < y_min - margin) | (all_vertices[:, 1] > y_max + margin) |
(all_vertices[:, 2] < z_min - margin) | (all_vertices[:, 2] > z_max + margin)
)
trimmed_count = trimmed_mask.sum()
return {
'x_min': x_min,
'x_max': x_max,
'y_min': y_min,
'y_max': y_max,
'z_min': z_min,
'z_max': z_max,
'center': center,
'size': size,
'trimmed_vertices_count': int(trimmed_count),
'total_vertices': total_vertices,
'trim_percent': trim_percent,
'camera_config': camera_config
}
def sample_positions_fixed_heights(bounds, num_samples_per_height=5, num_heights=3, min_distance=None):
"""
在固定高度上采样相机位置(XY平面泊松圆盘采样)
Args:
bounds: dict, 场景边界信息
num_samples_per_height: 每个高度层采样多少个位置
num_heights: 使用几个高度层(默认3个)
min_distance: XY平面上点之间的最小距离(cm),None则自动计算
Returns:
list of [x, y, z]: 所有采样位置(纯Python float类型)
"""
# 计算高度
z_min = bounds['z_min']
z_max = bounds['z_max']
z_levels = np.linspace(z_min, z_max, num_heights+2)
selected_heights = z_levels[1:-1]
print(f"Z轴范围: [{z_min:.1f}, {z_max:.1f}] cm")
print(f"选择的{num_heights}个高度: {[f'{z:.1f}' for z in selected_heights]}")
# 如果没有指定最小距离,自动计算
if min_distance is None:
area = (bounds['x_max'] - bounds['x_min']) * (bounds['y_max'] - bounds['y_min'])
avg_area_per_sample = area / num_samples_per_height
min_distance = np.sqrt(avg_area_per_sample) * 0.8
print(f"自动计算最小距离: {min_distance:.1f} cm")
# 在每个高度上采样XY位置
all_positions = []
for i, height in enumerate(selected_heights):
print(f"采样高度层 {i+1}/{num_heights}: Z={height:.1f} cm...", end=" ")
xy_positions = sample_xy_poisson(
bounds,
float(height), # 转换为float
num_samples_per_height,
min_distance
)
all_positions.extend(xy_positions)
print(f"完成 ({len(xy_positions)} 个点)")
print(f"总采样点数: {len(all_positions)}")
return all_positions
def sample_xy_poisson(bounds, z_height, num_samples, min_distance):
"""
在固定Z高度的XY平面上泊松圆盘采样
Args:
bounds: dict, 场景边界
z_height: 固定的Z高度
num_samples: 目标采样数量
min_distance: XY平面上点之间的最小距离(cm)
Returns:
list of [x, y, z]: 采样位置(纯Python float类型)
"""
positions = []
max_attempts = num_samples * 100
attempts = 0
# 第一个点:在中心附近随机选择
first_pos = [
float(np.random.uniform(bounds['x_min'], bounds['x_max'])),
float(np.random.uniform(bounds['y_min'], bounds['y_max'])),
float(z_height)
]
positions.append(first_pos)
# 活跃点列表
active_list = [0]
while len(positions) < num_samples and attempts < max_attempts:
attempts += 1
if len(active_list) == 0:
break
# 从活跃列表中随机选择一个点
idx = np.random.randint(0, len(active_list))
active_idx = active_list[idx]
base_pos = np.array(positions[active_idx][:2]) # 只取XY坐标
# 尝试在该点周围生成新点
found = False
for _ in range(30):
# 在min_distance到2*min_distance之间随机选择距离
angle = np.random.uniform(0, 2 * np.pi)
distance = np.random.uniform(min_distance, 2 * min_distance)
# 生成新候选点(只在XY平面)
new_xy = base_pos + distance * np.array([np.cos(angle), np.sin(angle)])
new_pos = [float(new_xy[0]), float(new_xy[1]), float(z_height)]
# 检查是否在边界内
if not (bounds['x_min'] <= new_pos[0] <= bounds['x_max'] and
bounds['y_min'] <= new_pos[1] <= bounds['y_max']):
continue
# 检查与所有现有点的距离(只考虑XY平面)
if len(positions) > 0:
existing_xy = np.array([p[:2] for p in positions])
distances = np.linalg.norm(existing_xy - new_xy, axis=1)
if np.all(distances >= min_distance):
positions.append(new_pos)
active_list.append(len(positions) - 1)
found = True
break
# 如果该活跃点无法生成新点,从活跃列表中移除
if not found:
active_list.pop(idx)
# 如果泊松采样没有达到目标数量,用简单的随机采样补充
if len(positions) < num_samples:
while len(positions) < num_samples:
candidate = [
float(np.random.uniform(bounds['x_min'], bounds['x_max'])),
float(np.random.uniform(bounds['y_min'], bounds['y_max'])),
float(z_height)
]
existing_xy = np.array([p[:2] for p in positions])
candidate_xy = np.array(candidate[:2])
distances = np.linalg.norm(existing_xy - candidate_xy, axis=1)
if np.all(distances >= min_distance * 0.5):
positions.append(candidate)
return positions[:num_samples]
# Sample Look at Camera for placement
from math import pi, cos, sin, acos
@dataclass
class CameraPose:
"""存储相机位姿,包括位置和观察目标点"""
position: List[float]
look_at: List[float]
def _generate_points_on_sphere(num_points: int, target_center: np.ndarray, distance: float, z_min_ratio=-0.2) -> List[np.ndarray]:
"""
使用斐波那契晶格在球面上生成均匀分布的点。
Args:
num_points: 要生成的点数。
target_center: 球心(即目标物体中心)。
distance: 球体半径(即相机与物体的距离)。
z_min_ratio: Z轴方向的最小余弦值,用于限制采样范围(例如,避免从正下方采样)。
-1.0 为完整球体,0.0 为上半球。默认 -0.2,稍微偏下一点。
Returns:
List of np.ndarray: 球面上的点坐标列表。
"""
points = []
phi = pi * (3. - np.sqrt(5.)) # 黄金角
for i in range(num_points):
# 均匀分布在 [-1, 1] 之间
y = 1 - (i / float(num_points - 1)) * 2
# 限制垂直范围
if y < z_min_ratio:
continue
radius = np.sqrt(1 - y * y) # 当前高度的半径
theta = phi * i # 黄金角增量
x = cos(theta) * radius
z = sin(theta) * radius
# 从单位向量转换为世界坐标
point_on_sphere = np.array([x, y, z]) * distance + target_center
points.append(point_on_sphere)
return points
def sample_cameras_around_targets(
target_object_indices: List[int],
object_vertices_list: List[np.ndarray],
scene_bounds: Dict,
samples_per_target: int = 20,
dist_factor_min: float = 2.0,
dist_factor_max: float = 3.5,
camera_config: Optional[CameraConfig] = None,
collision_threshold: float = 0.0
) -> List[CameraPose]:
"""
围绕指定的目标物体采样相机位姿。
Args:
target_object_indices: 目标物体的索引列表。
object_vertices_list: 场景中所有物体的顶点列表。
scene_bounds: 场景的边界信息(由 compute_scene_bounds 生成)。
samples_per_target: 每个目标物体周围尝试采样的相机数量。
dist_factor_min: 计算相机距离的最小系数(乘以物体AABB对角线长度)。
dist_factor_max: 计算相机距离的最大系数。
camera_config: 相机配置。
collision_threshold: 碰撞检测的IoU阈值。
Returns:
List[CameraPose]: 所有有效的相机位姿列表。
"""
if camera_config is None:
camera_config = DEFAULT_CAMERA
print(f"开始围绕 {len(target_object_indices)} 个目标物体进行采样...")
# 预先计算所有物体的AABB信息
all_object_aabbs = [compute_aabb_from_vertices(verts) for verts in object_vertices_list]
valid_camera_poses = []
for target_idx in target_object_indices:
if not 0 <= target_idx < len(all_object_aabbs):
print(f"警告: 目标索引 {target_idx} 超出范围,已跳过。")
continue
target_aabb = all_object_aabbs[target_idx]
target_center = target_aabb['center']
# 基于AABB对角线长度计算合适的相机距离
aabb_diagonal = np.linalg.norm(np.array(target_aabb['extent']) * 2)
cam_distance = np.random.uniform(
aabb_diagonal * dist_factor_min,
aabb_diagonal * dist_factor_max
)
print(f"\n正在处理目标物体 {target_idx}:")
print(f" - 中心点: [{target_center[0]:.1f}, {target_center[1]:.1f}, {target_center[2]:.1f}]")
print(f" - AABB对角线长度: {aabb_diagonal:.1f} cm")
print(f" - 采样距离: {cam_distance:.1f} cm")
# 在目标周围的球面上生成候选点
candidate_positions = _generate_points_on_sphere(
samples_per_target,
target_center,
cam_distance
)
valid_count = 0
for i, pos in enumerate(candidate_positions):
# 1. 检查是否在场景边界内
if not (scene_bounds['x_min'] <= pos[0] <= scene_bounds['x_max'] and
scene_bounds['y_min'] <= pos[1] <= scene_bounds['y_max'] and
scene_bounds['z_min'] <= pos[2] <= scene_bounds['z_max']):
continue
# 2. 检查与场景中所有物体的碰撞
collision_info = check_camera_collision(
camera_position=pos,
object_vertices_list=object_vertices_list,
camera_config=camera_config,
collision_threshold=collision_threshold,
use_improved_search=True # 推荐使用改进的搜索
)
# 如果没有发生碰撞
if not collision_info['collision']:
pose = CameraPose(
position=[float(p) for p in pos],
look_at=[float(c) for c in target_center]
)
valid_camera_poses.append(pose)
valid_count += 1
print(f" - 生成了 {len(candidate_positions)} 个候选位置,其中 {valid_count} 个有效。")
print(f"\n采样完成。总共获得了 {len(valid_camera_poses)} 个有效的相机位姿。")
return valid_camera_poses |