| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157 |
- import os
- import json
- from tqdm import tqdm
- import cv2
- import numpy as np
- import open3d as o3d
- import torch
- from camera_spherical import Intrinsic_Spherical
- from tqdm import tqdm
- import open3d as o3d
- import numpy as np
- import glob
- import os
- import pathlib
- def merge_ply_files(directory_path, output_filename):
- directory_path = pathlib.Path(directory_path)
- ply_files = list(directory_path.rglob('*.ply'))
- if not ply_files:
- print(f"⚠️ 警告:在目录 '{directory_path}' 中没有找到任何 .ply 文件。")
- return None
- # 初始化一个空的点云对象用于存储合并后的数据
- combined_cloud = o3d.geometry.PointCloud()
- print(f"--- 找到 {len(ply_files)} 个 .ply 文件,开始合并... ---")
- for i, file_path in enumerate(ply_files):
- print(f"✅ 正在读取文件 {i + 1}/{len(ply_files)}: {os.path.basename(file_path)}")
- try:
- # 1. 读取点云文件
- current_cloud = o3d.io.read_point_cloud(file_path)
- # current_cloud = current_cloud.voxel_down_sample(voxel_size=0.03)
- # 2. 合并点云数据 (使用 '+' 运算符实现简单的连接)
- combined_cloud += current_cloud
- except Exception as e:
- print(f"❌ 读取文件 {file_path} 时发生错误: {e}")
- continue
- # 3. 可选:保存合并后的点云文件
- if len(combined_cloud.points) > 0:
- o3d.io.write_point_cloud(output_filename, combined_cloud)
- print(f"\n🎉 合并完成!总共包含 {len(combined_cloud.points)} 个点。")
- print(f"文件已保存为: {output_filename}")
- # 4. 可选:显示合并后的点云
- # o3d.visualization.draw_geometries([combined_cloud])
- return combined_cloud
- else:
- print("\n⚠️ 警告:合并后的点云为空,没有数据可保存。")
- return None
- def rgbd_vis(image0, depthmap0, save_ply_path, pose, vis=False):
- mask_flatten = (depthmap0 > 0.02).flatten()
- depthmap0 = torch.from_numpy(depthmap0)
- pixel_x, pixel_y = np.meshgrid(range(image0.shape[1]), range(image0.shape[0]))
- sph_cam = Intrinsic_Spherical(image0.shape[1], image0.shape[0])
- bearing = sph_cam.bearing([pixel_x, pixel_y])
- point_cloud_0 = [depthmap0 * bearing[0], depthmap0 * bearing[1], depthmap0 * bearing[2]]
- point_cloud_0 = np.transpose(
- [point_cloud_0[0].flatten().numpy(), point_cloud_0[1].flatten().numpy(), point_cloud_0[2].flatten().numpy()])
- point_cloud_0 = np.transpose(point_cloud_0)
- x0 = point_cloud_0[0].flatten()
- y0 = point_cloud_0[1].flatten()
- z0 = point_cloud_0[2].flatten()
- r0 = image0[pixel_y, pixel_x, 0].flatten()
- g0 = image0[pixel_y, pixel_x, 1].flatten()
- b0 = image0[pixel_y, pixel_x, 2].flatten()
- point_cloud_data0 = np.stack([x0, y0, z0, r0, g0, b0], axis=1)
- # open3d显示保存
- points = np.stack([x0, y0, z0], axis=1)
- color = np.stack([r0, g0, b0], axis=1) / 255.0
- points = points[mask_flatten]
- color = color[mask_flatten]
- pc = o3d.geometry.PointCloud()
- pc.points = o3d.utility.Vector3dVector(points)
- pc.colors = o3d.utility.Vector3dVector(color)
- if pose:
- # Apply 180-degree rotation around Z-axis locally first
- R_z_180 = np.array([[-1, 0, 0],
- [0, -1, 0],
- [0, 0, 1]])
- T_z_180 = np.eye(4)
- T_z_180[:3, :3] = R_z_180
- pc.transform(T_z_180)
- # Then apply the pose transformation from the file
- rotation_q = pose['rotation']
- translation_v = pose['translation']
- quat = np.array([rotation_q['w'], rotation_q['x'], rotation_q['y'], rotation_q['z']])
- rotation_m = o3d.geometry.get_rotation_matrix_from_quaternion(quat)
- trans = np.eye(4)
- trans[:3, :3] = rotation_m
- trans[:3, 3] = [translation_v['x'], translation_v['y'], translation_v['z']]
- pc.transform(trans)
- pc = pc.voxel_down_sample(voxel_size=0.03)
- o3d.io.write_point_cloud(save_ply_path, pc, write_ascii=True)
- if vis:
- o3d.visualization.draw_geometries([pc])
- if __name__ == '__main__':
- pano_folder = "pano"
- rgb_folder = os.path.join(pano_folder, 'rgb')
- depth_folder = os.path.join(pano_folder, 'depth')
- ply_folder = os.path.join(pano_folder, 'ply')
- json_path = os.path.join(pano_folder, 'inf.json')
- merge_ply_name = os.path.join(pano_folder, "merge.ply")
- with open(json_path, 'r') as f:
- pose_data = json.load(f)
- pose_lookup = {item['uuid']: item['pose'] for item in pose_data['sweepLocations']}
- os.makedirs(ply_folder, exist_ok=True)
- rgb_names = os.listdir(rgb_folder)
- for rgb_name in tqdm(rgb_names):
- flag = rgb_name.split('.')[0]
- pose = pose_lookup.get(flag)
- if not pose:
- print(f"Warning: Pose not found for {rgb_name}. Skipping transformation.")
- depth_name = flag + ".png"
- rgb_name_path = os.path.join(rgb_folder, rgb_name)
- save_ply_path = os.path.join(ply_folder, f"{flag}.ply")
- depth_name_path = os.path.join(depth_folder, depth_name)
- image0 = cv2.imread(rgb_name_path)
- image0 = cv2.resize(image0, (2048, 1024))
- image0 = cv2.cvtColor(image0, cv2.COLOR_BGR2RGB)
- depthmap0 = (cv2.imread(depth_name_path, cv2.IMREAD_UNCHANGED)) / 256.
- rgbd_vis(image0, depthmap0, save_ply_path, pose, vis=False)
- merge_ply_files(ply_folder, merge_ply_name)
|