diff --git a/.gitmodules b/.gitmodules
index 6d541e49..8b137891 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1 @@
-[submodule "ORB_SLAM3_ros"]
- path = ORB_SLAM3_ros
- url = git@github.com:joeaortiz/ORB_SLAM3_ros.git
+
diff --git a/ORB_SLAM3_ros b/ORB_SLAM3_ros
deleted file mode 160000
index 113b554c..00000000
--- a/ORB_SLAM3_ros
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 113b554cfe93033645484053f5d4f3e4849cb285
diff --git a/isdf/CMakeLists.txt b/isdf/CMakeLists.txt
deleted file mode 100644
index b58320ed..00000000
--- a/isdf/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-cmake_minimum_required(VERSION 3.0.2)
-project(isdf)
-
-find_package(catkin REQUIRED COMPONENTS
- rospy
-)
-
-catkin_package(
-# INCLUDE_DIRS include
-# LIBRARIES isdf
-# CATKIN_DEPENDS orb_slam3_ros_wrapper rospy
-# DEPENDS system_lib
-)
-
-include_directories(
-# include
- ${catkin_INCLUDE_DIRS}
-)
-
diff --git a/isdf/__init__.py b/isdf/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/isdf/datasets/dataset.py b/isdf/datasets/dataset.py
index c8cc2872..f52f2cea 100644
--- a/isdf/datasets/dataset.py
+++ b/isdf/datasets/dataset.py
@@ -315,6 +315,100 @@ def __getitem__(self, idx):
while data is None:
data = node.get_latest_frame(self.queue)
+ if data is not None:
+ image, depth, Twc = data
+
+ if self.rgb_transform:
+ image = self.rgb_transform(image)
+ if self.depth_transform:
+ depth = self.depth_transform(depth)
+
+ # undistort depth, using nn rather than linear interpolation
+
+ img_size = (depth.shape[1], depth.shape[0])
+ map1, map2 = cv2.initUndistortRectifyMap(
+ self.camera_matrix, self.distortion_coeffs, np.eye(3),
+ self.camera_matrix, img_size, cv2.CV_32FC1)
+ depth = cv2.remap(depth, map1, map2, cv2.INTER_NEAREST)
+
+ sample = {
+ "image": image,
+ "depth": depth,
+ "T": Twc,
+ }
+ return sample
+
+
+class ROS2Subscriber(Dataset):
+ def __init__(
+ self,
+ extrinsic_calib=None,
+ root_dir=None,
+ traj_file=None,
+ keep_ixs=None,
+ rgb_transform=None,
+ depth_transform=None,
+ noisy_depth=False,
+ col_ext=None,
+ distortion_coeffs=None,
+ camera_matrix=None,
+ ):
+ crop = False
+ self.rgb_transform = rgb_transform
+ self.depth_transform = depth_transform
+
+ self.distortion_coeffs = np.array(distortion_coeffs)
+ self.camera_matrix = camera_matrix
+
+ self.img = []
+ self.depth = []
+ self.Twc = []
+
+ self.max_size = 5
+
+ #Test
+ root_dir = '../../data/realsense_franka/6'
+ traj_file = root_dir + "/traj.txt"
+ if traj_file is not None:
+ self.Ts = np.loadtxt(traj_file)
+ self.Ts = self.Ts[:, 1:].reshape(-1, 4, 4)
+
+ self.rgb_dir = os.path.join(root_dir, "rgb")
+ self.depth_dir = os.path.join(root_dir, "depth")
+
+ for idx in range(10):
+ depth_file = os.path.join(self.depth_dir, str(idx).zfill(5) + ".npy")
+ rgb_file = os.path.join(self.rgb_dir, str(idx).zfill(5) + '.jpg')
+
+ depth = np.load(depth_file)
+ image = cv2.imread(rgb_file)
+
+ self.img.append(image)
+ self.depth.append(depth)
+
+ if self.Ts is not None:
+ self.Twc.append(self.Ts[idx])
+
+ def __len__(self):
+ return 1000000000
+
+ def add_frame(self, img, depth, Twc):
+ if len(self.img) >= self.max_size:
+ self.img.pop(0)
+ self.depth.pop(0)
+ self.Twc.pop(0)
+
+ self.img.append(img)
+ self.depth.append(depth)
+ self.Twc.append(Twc)
+
+ def __getitem__(self, idx):
+ data = None
+ while data is None:
+ if idx >= len(self.img):
+ idx = -1
+ data = (self.img[idx], self.depth[idx], self.Twc[idx])
+
if data is not None:
image, depth, Twc = data
diff --git a/isdf/modules/trainer.py b/isdf/modules/trainer.py
index 8998fdc3..653cc58e 100644
--- a/isdf/modules/trainer.py
+++ b/isdf/modules/trainer.py
@@ -30,6 +30,7 @@
from isdf.visualisation import draw, draw3D
from isdf.eval.metrics import start_timing, end_timing
+import open3d as o3d
class Trainer():
def __init__(
@@ -94,11 +95,15 @@ def __init__(
if self.sdf_transf_file is not None:
self.load_gt_sdf()
self.cosSim = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
+ self.nfid = 0
# Init functions ---------------------------------------
def get_latest_frame_id(self):
- return int(self.tot_step_time * self.fps)
+ self.nfid += 1
+ return self.nfid - 1
+
+ #return int(self.tot_step_time * self.fps)
def set_scene_properties(self, scene_mesh = None):
# if self.live:
@@ -145,8 +150,8 @@ def set_scene_properties(self, scene_mesh = None):
transform=self.bounds_transform,
scale=self.scene_scale,
)
+
self.grid_pc = self.grid_pc.view(-1, 3).to(self.device)
-
self.up_ix = np.argmax(np.abs(np.matmul(
self.up, self.bounds_transform_np[:3, :3])))
self.grid_up = self.bounds_transform_np[:3, self.up_ix]
@@ -493,6 +498,13 @@ def load_data(self):
ims_file = self.ext_calib # extrinsic calib
self.traj_file = None
camera_matrix = np.array([[self.fx, 0.0, self.cx], [0.0, self.fy, self.cy], [0.0, 0.0, 1.0]])
+ elif self.dataset_format == "ROS2":
+ dataset_class = dataset.ROS2Subscriber
+ col_ext = None
+ self.up = np.array([0., 0., 1.])
+ ims_file = self.ext_calib # extrinsic calib
+ self.traj_file = None
+ camera_matrix = np.array([[self.fx, 0.0, self.cx], [0.0, self.fy, self.cy], [0.0, 0.0, 1.0]])
elif self.dataset_format == "realsense_franka_offline":
dataset_class = dataset.RealsenseFrankaOffline
col_ext = ".jpg"
@@ -500,16 +512,16 @@ def load_data(self):
ims_file = self.ims_file
camera_matrix = np.array([[self.fx, 0.0, self.cx], [0.0, self.fy, self.cy], [0.0, 0.0, 1.0]])
- self.scene_dataset = dataset_class(
- ims_file,
- traj_file=self.traj_file,
- rgb_transform=rgb_transform,
- depth_transform=depth_transform,
- col_ext=col_ext,
- noisy_depth=noisy_depth,
- distortion_coeffs=self.distortion_coeffs,
- camera_matrix=camera_matrix,
- )
+ # self.scene_dataset = dataset_class(
+ # ims_file,
+ # traj_file=self.traj_file,
+ # rgb_transform=rgb_transform,
+ # depth_transform=depth_transform,
+ # col_ext=col_ext,
+ # noisy_depth=noisy_depth,
+ # distortion_coeffs=self.distortion_coeffs,
+ # camera_matrix=camera_matrix,
+ # )
if self.incremental is False:
if "im_indices" not in self.config["dataset"]:
@@ -636,12 +648,15 @@ def check_keyframe_latest(self):
depth_gt = self.frames.depth_batch[-1].unsqueeze(0)
self.last_is_keyframe = self.is_keyframe(T_WC, depth_gt)
+ '''
time_since_kf = self.tot_step_time - self.frames.frame_id[-2] / 30.
if time_since_kf > 5. and not self.live:
print("More than 5 seconds since last kf, so add new")
self.last_is_keyframe = True
+ '''
if self.last_is_keyframe:
+
self.optim_frames = self.iters_per_kf
self.noise_std = self.noise_kf
else:
@@ -679,6 +694,75 @@ def clear_keyframes(self):
self.gt_im_vis = None
# Main training methods ----------------------------------
+ def sample_points_pc(
+ self,
+ data,
+ n_rays=None,
+ dist_behind_surf=None,
+ n_strat_samples=None,
+ n_surf_samples=None,
+ ):
+
+ if n_rays is None:
+ n_rays = self.n_rays
+ if dist_behind_surf is None:
+ dist_behind_surf = self.dist_behind_surf
+ if n_strat_samples is None:
+ n_strat_samples = self.n_strat_samples
+ if n_surf_samples is None:
+ n_surf_samples = self.n_surf_samples
+
+ Twc = data['twc']
+ origin = Twc[:3, 3]
+ dirs_C = data['dirs_C']
+ pcd = data['pcd']
+ depth = data['depth']
+ norm_batch = np.asarray(pcd.normals)
+
+ size = 1000
+ random_index = torch.tensor(np.random.choice(depth.shape[0], size = size, replace = False), dtype = torch.long)
+
+ depth_sample = torch.tensor(depth, dtype=torch.float)
+ dirs_C_sample = torch.tensor(dirs_C, dtype=torch.float)
+ norm_sample = torch.tensor(norm_batch, dtype=torch.float)
+ T_WC_sample = torch.tensor(np.expand_dims(Twc, axis = 0), dtype = torch.float)
+
+ depth_sample = depth_sample[random_index].to(self.device)
+ dirs_C_sample = dirs_C_sample[random_index].to(self.device)
+ norm_sample = norm_sample[random_index].to(self.device)
+
+ expand_dimension = torch.tensor(np.zeros(size), dtype = torch.long)
+ T_WC_sample = T_WC_sample[expand_dimension].to(self.device)
+ max_depth = depth_sample + dist_behind_surf
+
+ pc, z_vals = sample.sample_along_rays(
+ T_WC_sample,
+ self.min_depth,
+ max_depth,
+ n_strat_samples,
+ n_surf_samples,
+ dirs_C_sample,
+ gt_depth=depth_sample,
+ grad=False,
+ )
+
+ #print(">>>>>>", pc.shape, pc.type())
+ #print(">>>>>>", z_vals.shape, z_vals.type())
+ #print(">>>>>>", dirs_C_sample.shape, dirs_C_sample.type())
+ #print(">>>>>>", depth_sample.shape, depth_sample.type())
+ #print(">>>>>>", T_WC_sample.shape, T_WC_sample.type())
+ #print(">>>>>>", norm_sample.shape, norm_sample.type())
+
+ sample_pts = {
+ "pc": pc,
+ "z_vals": z_vals,
+ "dirs_C_sample": dirs_C_sample,
+ "depth_sample": depth_sample,
+ "T_WC_sample": T_WC_sample,
+ "norm_sample": norm_sample,
+ }
+ return sample_pts
+
def sample_points(
self,
@@ -737,7 +821,7 @@ def sample_points(
norm_batch=norm_batch,
get_masks=get_masks,
)
-
+
max_depth = depth_sample + dist_behind_surf
pc, z_vals = sample.sample_along_rays(
T_WC_sample,
@@ -749,7 +833,8 @@ def sample_points(
gt_depth=depth_sample,
grad=False,
)
-
+
+
sample_pts = {
"depth_batch": depth_batch,
"pc": pc,
@@ -763,6 +848,14 @@ def sample_points(
"norm_sample": norm_sample,
"binary_masks": binary_masks,
}
+ # print(">>>>>>", T_WC_batch.shape)
+ # print(">>>>>>", pc.shape, pc.type())
+ # print(">>>>>>", z_vals.shape, z_vals.type())
+ # print(">>>>>>", dirs_C_sample.shape, dirs_C_sample.type())
+ # print(">>>>>>", depth_sample.shape, depth_sample.type())
+ # print(">>>>>>", T_WC_sample.shape, T_WC_sample.type())
+ # print(">>>>>>", norm_sample.shape, norm_sample.type())
+
return sample_pts
def sdf_eval_and_loss(
@@ -772,15 +865,15 @@ def sdf_eval_and_loss(
):
pc = sample["pc"]
z_vals = sample["z_vals"]
- indices_b = sample["indices_b"]
- indices_h = sample["indices_h"]
- indices_w = sample["indices_w"]
+ #indices_b = sample["indices_b"]
+ #indices_h = sample["indices_h"]
+ #indices_w = sample["indices_w"]
dirs_C_sample = sample["dirs_C_sample"]
depth_sample = sample["depth_sample"]
T_WC_sample = sample["T_WC_sample"]
norm_sample = sample["norm_sample"]
- binary_masks = sample["binary_masks"]
- depth_batch = sample["depth_batch"]
+ #binary_masks = sample["binary_masks"]
+ #depth_batch = sample["depth_batch"]
do_sdf_grad = self.eik_weight != 0 or self.grad_weight != 0
if do_sdf_grad:
@@ -793,7 +886,6 @@ def sdf_eval_and_loss(
sdf_grad = fc_map.gradient(pc, sdf)
# compute bounds
-
bounds, grad_vec = loss.bounds(
self.bounds_method,
dirs_C_sample,
@@ -836,11 +928,12 @@ def sdf_eval_and_loss(
)
loss_approx, frame_avg_loss = None, None
+ '''
if do_avg_loss:
loss_approx, frame_avg_loss = loss.frame_avg(
total_loss_mat, depth_batch, indices_b, indices_h, indices_w,
self.W, self.H, self.loss_approx_factor, binary_masks)
-
+ '''
# # # for plot
# z_to_euclidean_depth = dirs_C_sample.norm(dim=-1)
# ray_target = depth_sample[:, None] - z_vals
@@ -948,35 +1041,41 @@ def check_gt_sdf(self, depth_sample, z_vals, dirs_C_sample, pc,
scene.show()
- def step(self):
+ def step(self, data = None):
start, end = start_timing()
- depth_batch = self.frames.depth_batch
- T_WC_batch = self.frames.T_WC_batch
- norm_batch = self.frames.normal_batch if self.do_normal else None
+ if not isinstance(data, dict):
- if len(self.frames) > self.window_size and self.incremental:
- idxs = self.select_keyframes()
- # print("selected frame ids", self.frames.frame_id[idxs[:-1]])
- else:
- idxs = np.arange(T_WC_batch.shape[0])
- self.active_idxs = idxs
+ depth_batch = self.frames.depth_batch
+ T_WC_batch = self.frames.T_WC_batch
+ norm_batch = self.frames.normal_batch if self.do_normal else None
- depth_batch = depth_batch[idxs]
- T_WC_select = T_WC_batch[idxs]
+ if len(self.frames) > self.window_size and self.incremental:
+ idxs = self.select_keyframes()
+ # print("selected frame ids", self.frames.frame_id[idxs[:-1]])
+ else:
+ idxs = np.arange(T_WC_batch.shape[0])
+ self.active_idxs = idxs
+
+ depth_batch = depth_batch[idxs]
+ T_WC_select = T_WC_batch[idxs]
+
+ sample_pts = self.sample_points(
+ depth_batch, T_WC_select, norm_batch=norm_batch)
+
+ self.active_pixels = {
+ 'indices_b': sample_pts['indices_b'],
+ 'indices_h': sample_pts['indices_h'],
+ 'indices_w': sample_pts['indices_w'],
+ }
+ else:
+ sample_pts = self.sample_points_pc(data)
- sample_pts = self.sample_points(
- depth_batch, T_WC_select, norm_batch=norm_batch)
- self.active_pixels = {
- 'indices_b': sample_pts['indices_b'],
- 'indices_h': sample_pts['indices_h'],
- 'indices_w': sample_pts['indices_w'],
- }
total_loss, losses, active_loss_approx, frame_avg_loss = \
self.sdf_eval_and_loss(sample_pts, do_avg_loss=True)
- self.frames.frame_avg_losses[idxs] = frame_avg_loss
+ #self.frames.frame_avg_losses[idxs] = frame_avg_loss
total_loss.backward()
self.optimiser.step()
@@ -1423,7 +1522,12 @@ def draw_obj_3D(self, show_gt_mesh=True):
return scene
return None
- def get_sdf_grid(self):
+ def get_sdf_grid(self, grid_pc = None, dim = None):
+
+ if torch.is_tensor(grid_pc):
+ self.grid_pc = grid_pc.to('cuda')
+ self.grid_dim = dim
+
with torch.set_grad_enabled(False):
# gt_dist = sdf_util.eval_sdf_interp(
@@ -1443,8 +1547,8 @@ def get_sdf_grid(self):
return sdf
- def get_sdf_grid_pc(self, include_gt=False, mask_near_pc=False):
- sdf_grid = self.get_sdf_grid()
+ def get_sdf_grid_pc(self, include_gt=False, mask_near_pc=False, grid_pc = None, dim=None):
+ sdf_grid = self.get_sdf_grid(grid_pc, dim)
grid_pc = self.grid_pc.reshape(
self.grid_dim, self.grid_dim, self.grid_dim, 3)
sdf_grid_pc = torch.cat((grid_pc, sdf_grid[..., None]), dim=-1)
@@ -1516,14 +1620,12 @@ def mesh_rec(self, crop_mesh_with_pc=True):
self.set_scene_properties(pc_tm)
sdf = self.get_sdf_grid()
-
sdf_mesh = draw3D.draw_mesh(
sdf,
self.scene_scale_np,
self.bounds_transform_np,
color_by="none",
)
-
if crop_mesh_with_pc:
tree = KDTree(pc)
dists, _ = tree.query(sdf_mesh.vertices, k=1)
diff --git a/isdf/package.xml b/isdf/package.xml
deleted file mode 100644
index 054adef3..00000000
--- a/isdf/package.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
- isdf
- 0.0.0
- The iSDF package
-
-
- joe
-
-
- MIT
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- catkin
- orb_slam3_ros_wrapper
- rospy
-
-
-
-
-
-
-
-
diff --git a/isdf/train/configs/realsense_franka_offline1.json b/isdf/train/configs/realsense_franka_offline1.json
new file mode 100644
index 00000000..a5ff6cc3
--- /dev/null
+++ b/isdf/train/configs/realsense_franka_offline1.json
@@ -0,0 +1,137 @@
+{
+ "dataset": {
+ "format": "realsense_franka_offline",
+ "seq_dir": "../../data/realsense_franka/6",
+ "depth_scale": 1000.0,
+ "fps": 1,
+ "camera": {
+ "w": 640,
+ "h": 480,
+ "fx": 603.82641602,
+ "fy": 603.8012085,
+ "cx": 325.3427124,
+ "cy": 244.17706299,
+ "k1": 0.0,
+ "k2": 0.0,
+ "p1": 0.0,
+ "p2": 0.0,
+ "k3,": 0.0
+ },
+ "n_views": 20,
+ "random_views": 0
+ },
+ "eval": {
+ "do_vox_comparison": 0,
+ "eval_pts_root": "/mnt/datasets/data/eval_pts/",
+ "do_eval": 0,
+ "eval_freq_s": 1,
+ "sdf_eval": 1,
+ "mesh_eval": 0
+ },
+ "save": {
+ "save_period": 10,
+ "save_checkpoints": 0,
+ "save_slices": 0,
+ "save_meshes": 0
+ },
+
+ "optimiser": {
+ "lr": 0.0004,
+ "weight_decay": 0.012
+ },
+ "trainer": {
+ "steps": 5001
+ },
+ "sample": {
+ "n_rays": 200,
+ "n_rays_is_kf": 400,
+ "n_strat_samples": 19,
+ "n_surf_samples": 8,
+ "_depth_range": "[0.6, 6.0] for D455, [0.3, 3.0] for D435 ",
+ "depth_range": [0.1, 2.0],
+ "dist_behind_surf": 0.01
+ },
+ "model": {
+ "refine_poses": 0,
+ "do_active": 0,
+ "frac_time_perception": 1.0,
+ "scale_output": 0.14,
+ "noise_std": 0.025,
+ "noise_kf": 0.08,
+ "noise_frame": 0.04,
+ "window_size": 5,
+ "hidden_layers_block": 3,
+ "hidden_feature_size": 256,
+ "iters_per_kf": 100,
+ "iters_per_frame": 50,
+ "kf_dist_th": 0.12,
+ "kf_pixel_ratio": 0.65,
+ "embedding": {
+ "scale_input": 0.04,
+ "n_embed_funcs": 10,
+ "gauss_embed": 0,
+ "gauss_embed_std": 11,
+ "optim_embedding": 0
+ }
+ },
+ "loss": {
+ "bounds_method": "ray",
+ "loss_type": "L1",
+ "trunc_weight": 30.0,
+ "trunc_distance": 0.1,
+ "eik_weight": 0.268,
+ "eik_apply_dist": 0.1,
+ "grad_weight": 0.018,
+ "orien_loss": 0
+ },
+ "pose_refine": {
+ "pose_lr": 0.0004
+ },
+ "ext_calib" : [
+ {
+ "num_marker_seen": 57,
+ "stage2_retry": 11,
+ "pixel_error": 1.4073816361356746,
+ "proj_func": "world_marker_proj_hand_camera",
+ "camera_ee_ori": [
+ [
+ -0.7331957803068133,
+ -0.6784676819155312,
+ 0.04588629791613664
+ ],
+ [
+ 0.6797903691368288,
+ -0.7330245046025822,
+ 0.02366705898400078
+ ],
+ [
+ 0.017578446151389475,
+ 0.04854565117807486,
+ 0.9986662695728726
+ ]
+ ],
+ "camera_ee_ori_rotvec": [
+ 0.04384503983152567,
+ 0.04988863029258953,
+ 2.393739882277834
+ ],
+ "camera_ee_pos": [
+ 0.0630458124429231,
+ 0.024003126126725796,
+ 0.031195365016893357
+ ],
+ "marker_base_pos": [
+ 0.47837507566569787,
+ 0.04519535777928266,
+ -0.168236188751606
+ ]
+ }
+ ],
+ "workspace": {
+ "_comment": "Robot based is (0, 0, 0), X: breadth of table, Y: length of table, Z: height, _rotate_z: 0 for normal, 180 for flipped, _offset: transformation to the base [-0.5, 0.0, 0.0] for both _center: [0.5, 0.0, 0.0] for normal, [-0.5, 0.0, 0.0] for flipped, _extents: size of the table",
+ "rotate_z": 0,
+ "offset": [-0.5, 0.0, 0.0],
+ "center": [0.5, 0.0, 0.0],
+ "extents": [1.0, 1.2, 0.5]
+ }
+}
diff --git a/isdf/train/ramp.py b/isdf/train/ramp.py
new file mode 100755
index 00000000..855cfa01
--- /dev/null
+++ b/isdf/train/ramp.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+import torch
+import numpy as np
+import json
+import os
+from datetime import datetime
+import argparse
+import cv2
+
+import open3d as o3d
+import matplotlib.pyplot as plt
+import matplotlib
+from mpl_toolkits.mplot3d import Axes3D
+
+from isdf.modules import trainer
+
+import pickle
+from isdf.visualisation import draw, draw3D
+from scipy.spatial import KDTree
+
+def pickleLoader(pklFile):
+ try:
+ while True:
+ yield pickle.load(pklFile)
+ except EOFError:
+ pass
+
+pc = []
+robot_T_camera = []
+
+with open("dump1.pkl", "rb") as f:
+ for i, data in enumerate(pickleLoader(f)):
+ print(data.keys())
+ pc.append(data['pc'][:,:3])
+ robot_T_camera.append(data['robot_T_camera'])
+
+def plot_costmap(pos, cost):
+
+ xyz = pos
+ pcd = o3d.geometry.PointCloud()
+ pcd.points = o3d.utility.Vector3dVector(xyz)
+ # norm = plt.Normalize(vmin=norm_min, vmax=norm_max)
+ norm = plt.Normalize(vmin = np.min(cost), vmax = np.max(cost))
+ # norm = cost /(np.max(cost)-np.min(cost))
+ cmap = matplotlib.cm.get_cmap('Spectral')
+ colors = cmap(norm(cost))
+ pcd.colors = o3d.utility.Vector3dVector(colors[:,:3])
+ # pcd.paint_uniform_color([1, 0.706, 0])
+ o3d.visualization.draw_geometries([pcd])
+
+def plot_costmap1(pos, cost):
+
+
+ fig = plt.figure(figsize=(10, 10))
+ ax = fig.add_subplot(111, projection='3d')
+
+ norm = plt.Normalize(vmin = np.min(cost), vmax = np.max(cost))
+
+ cmap = matplotlib.cm.get_cmap('Spectral')
+ colors = cmap(norm(cost))
+ img = ax.scatter(pos[:,0], pos[:,1], pos[:,2], marker='s',
+ s=10, c = colors, alpha=0.05)
+ ax.set_title("3D Heatmap")
+ ax.set_xlabel('X-axis')
+ ax.set_ylabel('Y-axis')
+ ax.set_zlabel('Z-axis')
+ #plt.colorbar()
+ plt.show()
+
+
+
+if __name__ == "__main__":
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ seed = 1
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+
+ parser = argparse.ArgumentParser(description="iSDF.")
+ parser.add_argument("--config", type=str, required=True, help="input json config")
+ parser.add_argument(
+ "-ni",
+ "--no_incremental",
+ action="store_false",
+ help="disable incremental SLAM option",
+ )
+ args, _ = parser.parse_known_args() # ROS adds extra unrecongised args
+ config_file = args.config
+ incremental = args.no_incremental
+
+ # init trainer-------------------------------------------------------------
+ isdf_trainer = trainer.Trainer(
+ device,
+ config_file,
+ incremental=incremental,
+ )
+
+ for i in range(len(pc)):
+
+ tcw = np.linalg.inv(robot_T_camera[i])
+ pc_w = np.ones((pc[i].shape[0], 4))
+ pc_w[:, :3] = pc[i]
+ pc_c = (tcw @ pc_w.T).T
+ pc_c = pc_c[:, :3]
+
+ dirs_C = pc_c/np.expand_dims(np.linalg.norm(pc_c, axis = 1), axis = -1)
+ pcd = o3d.geometry.PointCloud()
+ pcd.points = o3d.utility.Vector3dVector(pc_c)
+ pcd.estimate_normals(
+ search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
+
+
+ data = {
+ 'pcd' : pcd,
+ 'twc' : robot_T_camera[i],
+ 'dirs_C': dirs_C,
+ 'depth': pc_c[:,2]
+ }
+
+ for x in range(100):
+ losses, step_time = isdf_trainer.step(data)
+ status = [k + ': {:.6f} '.format(losses[k]) for k in losses.keys()]
+ status = "".join(status) + '-- Step time: {:.2f} '.format(step_time)
+ print("STATUS :", status)
+
+ x_min = np.min(pc[i][:, 0])
+ x_max = np.max(pc[i][:, 0])
+
+ y_min = np.min(pc[i][:, 1])
+ y_max = np.max(pc[i][:, 1])
+
+ z_min = np.min(pc[i][:, 2])
+ z_max = np.max(pc[i][:, 2])
+ dim = 100
+ x = torch.linspace(x_min,x_max, steps=dim, device='cpu')
+ y = torch.linspace(y_min,y_max, steps=dim, device='cpu')
+ z = torch.linspace(z_min-0.1,z_max+0.6, steps=dim, device='cpu')
+
+ grid = torch.meshgrid(x, y, z)
+
+ grid_3d = torch.cat(
+ (grid[0][..., None],
+ grid[1][..., None],
+ grid[2][..., None]), dim=3
+ )
+
+ print(grid_3d.shape)
+ sdf = isdf_trainer.get_sdf_grid(grid_3d, dim)
+ print(sdf.shape)
+
+
+ isdf_pc, _ = isdf_trainer.get_sdf_grid_pc(grid_pc = grid_3d, dim = dim)
+ #isdf_pc = isdf_pc[::2,::2,::2,:]
+ isdf_pc = np.reshape(isdf_pc, (-1, 4))
+
+
+ print(z_max)
+
+ x_idx = np.logical_and(isdf_pc[:, 0]x_min)
+ y_idx = np.logical_and(isdf_pc[:, 1]y_min)
+ z_idx = np.logical_and(isdf_pc[:, 2]>z_min, isdf_pc[:, 2]= 0:
trainer.last_is_keyframe = True
trainer.optim_frames = 200
diff --git a/setup.py b/setup.py
index 7904379f..0f96e220 100644
--- a/setup.py
+++ b/setup.py
@@ -5,22 +5,24 @@
#!/usr/bin/env python
-from setuptools import setup
+from setuptools import setup, find_packages
import shlex
import subprocess
+print(find_packages(where="."))
def git_version():
cmd = 'git log --format="%h" -n 1'
return subprocess.check_output(shlex.split(cmd)).decode()
-version = git_version()
-
+#version = git_version()
+version = '1.1.0'
setup(
- name='incSDF',
+ name='isdf',
version=version,
author='Joe Ortiz',
author_email='joeaortiz16@gmail.com',
+ packages= find_packages(where="."),
py_modules=[]
)