multi_points_pano2world.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. import os
  2. import json
  3. from tqdm import tqdm
  4. import cv2
  5. import numpy as np
  6. import open3d as o3d
  7. import torch
  8. from camera_spherical import Intrinsic_Spherical
  9. from tqdm import tqdm
  10. import open3d as o3d
  11. import numpy as np
  12. import glob
  13. import os
  14. import pathlib
  15. def merge_ply_files(directory_path, output_filename):
  16. directory_path = pathlib.Path(directory_path)
  17. ply_files = list(directory_path.rglob('*.ply'))
  18. if not ply_files:
  19. print(f"⚠️ 警告:在目录 '{directory_path}' 中没有找到任何 .ply 文件。")
  20. return None
  21. # 初始化一个空的点云对象用于存储合并后的数据
  22. combined_cloud = o3d.geometry.PointCloud()
  23. print(f"--- 找到 {len(ply_files)} 个 .ply 文件,开始合并... ---")
  24. for i, file_path in enumerate(ply_files):
  25. print(f"✅ 正在读取文件 {i + 1}/{len(ply_files)}: {os.path.basename(file_path)}")
  26. try:
  27. # 1. 读取点云文件
  28. current_cloud = o3d.io.read_point_cloud(file_path)
  29. # current_cloud = current_cloud.voxel_down_sample(voxel_size=0.03)
  30. # 2. 合并点云数据 (使用 '+' 运算符实现简单的连接)
  31. combined_cloud += current_cloud
  32. except Exception as e:
  33. print(f"❌ 读取文件 {file_path} 时发生错误: {e}")
  34. continue
  35. # 3. 可选:保存合并后的点云文件
  36. if len(combined_cloud.points) > 0:
  37. o3d.io.write_point_cloud(output_filename, combined_cloud)
  38. print(f"\n🎉 合并完成!总共包含 {len(combined_cloud.points)} 个点。")
  39. print(f"文件已保存为: {output_filename}")
  40. # 4. 可选:显示合并后的点云
  41. # o3d.visualization.draw_geometries([combined_cloud])
  42. return combined_cloud
  43. else:
  44. print("\n⚠️ 警告:合并后的点云为空,没有数据可保存。")
  45. return None
  46. def rgbd_vis(image0, depthmap0, save_ply_path, pose, vis=False):
  47. mask_flatten = (depthmap0 > 0.02).flatten()
  48. depthmap0 = torch.from_numpy(depthmap0)
  49. pixel_x, pixel_y = np.meshgrid(range(image0.shape[1]), range(image0.shape[0]))
  50. sph_cam = Intrinsic_Spherical(image0.shape[1], image0.shape[0])
  51. bearing = sph_cam.bearing([pixel_x, pixel_y])
  52. point_cloud_0 = [depthmap0 * bearing[0], depthmap0 * bearing[1], depthmap0 * bearing[2]]
  53. point_cloud_0 = np.transpose(
  54. [point_cloud_0[0].flatten().numpy(), point_cloud_0[1].flatten().numpy(), point_cloud_0[2].flatten().numpy()])
  55. point_cloud_0 = np.transpose(point_cloud_0)
  56. x0 = point_cloud_0[0].flatten()
  57. y0 = point_cloud_0[1].flatten()
  58. z0 = point_cloud_0[2].flatten()
  59. r0 = image0[pixel_y, pixel_x, 0].flatten()
  60. g0 = image0[pixel_y, pixel_x, 1].flatten()
  61. b0 = image0[pixel_y, pixel_x, 2].flatten()
  62. point_cloud_data0 = np.stack([x0, y0, z0, r0, g0, b0], axis=1)
  63. # open3d显示保存
  64. points = np.stack([x0, y0, z0], axis=1)
  65. color = np.stack([r0, g0, b0], axis=1) / 255.0
  66. points = points[mask_flatten]
  67. color = color[mask_flatten]
  68. pc = o3d.geometry.PointCloud()
  69. pc.points = o3d.utility.Vector3dVector(points)
  70. pc.colors = o3d.utility.Vector3dVector(color)
  71. if pose:
  72. # Apply 180-degree rotation around Z-axis locally first
  73. R_z_180 = np.array([[-1, 0, 0],
  74. [0, -1, 0],
  75. [0, 0, 1]])
  76. T_z_180 = np.eye(4)
  77. T_z_180[:3, :3] = R_z_180
  78. pc.transform(T_z_180)
  79. # Then apply the pose transformation from the file
  80. rotation_q = pose['rotation']
  81. translation_v = pose['translation']
  82. quat = np.array([rotation_q['w'], rotation_q['x'], rotation_q['y'], rotation_q['z']])
  83. rotation_m = o3d.geometry.get_rotation_matrix_from_quaternion(quat)
  84. trans = np.eye(4)
  85. trans[:3, :3] = rotation_m
  86. trans[:3, 3] = [translation_v['x'], translation_v['y'], translation_v['z']]
  87. pc.transform(trans)
  88. pc = pc.voxel_down_sample(voxel_size=0.03)
  89. o3d.io.write_point_cloud(save_ply_path, pc, write_ascii=True)
  90. if vis:
  91. o3d.visualization.draw_geometries([pc])
  92. if __name__ == '__main__':
  93. pano_folder = "pano"
  94. rgb_folder = os.path.join(pano_folder, 'rgb')
  95. depth_folder = os.path.join(pano_folder, 'depth')
  96. ply_folder = os.path.join(pano_folder, 'ply')
  97. json_path = os.path.join(pano_folder, 'inf.json')
  98. merge_ply_name = os.path.join(pano_folder, "merge.ply")
  99. with open(json_path, 'r') as f:
  100. pose_data = json.load(f)
  101. pose_lookup = {item['uuid']: item['pose'] for item in pose_data['sweepLocations']}
  102. os.makedirs(ply_folder, exist_ok=True)
  103. rgb_names = os.listdir(rgb_folder)
  104. for rgb_name in tqdm(rgb_names):
  105. flag = rgb_name.split('.')[0]
  106. pose = pose_lookup.get(flag)
  107. if not pose:
  108. print(f"Warning: Pose not found for {rgb_name}. Skipping transformation.")
  109. depth_name = flag + ".png"
  110. rgb_name_path = os.path.join(rgb_folder, rgb_name)
  111. save_ply_path = os.path.join(ply_folder, f"{flag}.ply")
  112. depth_name_path = os.path.join(depth_folder, depth_name)
  113. image0 = cv2.imread(rgb_name_path)
  114. image0 = cv2.resize(image0, (2048, 1024))
  115. image0 = cv2.cvtColor(image0, cv2.COLOR_BGR2RGB)
  116. depthmap0 = (cv2.imread(depth_name_path, cv2.IMREAD_UNCHANGED)) / 256.
  117. rgbd_vis(image0, depthmap0, save_ply_path, pose, vis=False)
  118. merge_ply_files(ply_folder, merge_ply_name)