diff --git a/.gitignore b/.gitignore
index eff634ad..28e00602 100644
--- a/.gitignore
+++ b/.gitignore
@@ -43,6 +43,8 @@
# Python
*.pyc
+**/*.egg-info
+**/__pycache__
# Reconstructions
*.ply
diff --git a/Jenkinsfile b/Jenkinsfile
index b764229b..9402f779 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -23,6 +23,11 @@ pipeline {
sh '''cd nvblox/build && cmake .. -DCMAKE_INSTALL_PREFIX=../install && make clean && make -j8 && make install'''
}
}
+ stage('Lint') {
+ steps {
+ sh '''bash nvblox/lint/lint_nvblox_h.sh'''
+ }
+ }
stage('Test x86') {
steps {
sh '''cd nvblox/build/tests && ctest -T test --no-compress-output'''
@@ -69,10 +74,10 @@ pipeline {
}
}
}
- stage("Jetson 5.0.2") {
+ stage("Jetson 5.1.1") {
agent {
dockerfile {
- label 'jetson-5.0.2'
+ label 'jp-5.1.1'
reuseNode true
filename 'docker/Dockerfile.jetson_deps'
args '-u root --runtime nvidia --gpus all -v /var/run/docker.sock:/var/run/docker.sock:rw'
@@ -86,6 +91,11 @@ pipeline {
sh '''cd nvblox/build && cmake .. -DCMAKE_INSTALL_PREFIX=../install && make clean && make -j8 && make install'''
}
}
+ stage('Lint') {
+ steps {
+ sh '''bash nvblox/lint/lint_nvblox_h.sh'''
+ }
+ }
stage('Test Jetson') {
steps {
sh '''cd nvblox/build/tests && ctest -T test --no-compress-output'''
diff --git a/README.md b/README.md
index 3fee1530..59200b77 100644
--- a/README.md
+++ b/README.md
@@ -1,30 +1,75 @@
-# nvblox
+# nvblox 
+
Signed Distance Functions (SDFs) on NVIDIA GPUs.
-

+
+
-An SDF library which offers
-* Support for storage of various voxel types
-* GPU accelerated agorithms such as:
+A GPU SDF library which offers
+* GPU accelerated algorithms such as:
* TSDF construction
+ * Occupancy mapping
* ESDF construction
* Meshing
-* ROS2 interface (see [isaac_ros_nvblox](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox))
-* ~~Python bindings~~ (coming soon)
+* ROS 2 interface (see [isaac_ros_nvblox](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox))
+* Support for storage of various voxel types, and easily extended to custom voxel types.
+
+Above we show reconstruction using data from the [3DMatch dataset](https://3dmatch.cs.princeton.edu/), specifically the [Sun3D](http://sun3d.cs.princeton.edu/) `mit_76_studyroom` scene.
+
+## Table of Contents
+
+- [nvblox ](#nvblox-)
+ - [Table of Contents](#table-of-contents)
+- [Why nvblox?](#why-nvblox)
+- [How to use nvblox](#how-to-use-nvblox)
+ - [Out-of-the-box Reconstruction/ROS 2 Interface](#out-of-the-box-reconstructionros-2-interface)
+ - [Public Datasets](#public-datasets)
+ - [C++ Interface](#c-interface)
+- [Native Installation](#native-installation)
+ - [Install dependencies](#install-dependencies)
+ - [Build and run tests](#build-and-run-tests)
+ - [Run an example](#run-an-example)
+- [Docker](#docker)
+- [Additional instructions for Jetson Xavier](#additional-instructions-for-jetson-xavier)
+ - [Open3D on Jetson](#open3d-on-jetson)
+- [Building for multiple GPU architectures](#building-for-multiple-gpu-architectures)
+- [Building redistributable binaries, with static dependencies](#building-redistributable-binaries-with-static-dependencies)
+- [License](#license)
+
+# Why nvblox?
Do we need another SDF library? That depends on your use case. If you're interested in:
-* **Path planning**: We provide GPU accelerated, incremental algorithms for calculating the Euclidian Signed Distance Field (ESDF) which is useful for colision checking and therefore robotic pathplanning. In contrast, existing GPU-accelerated libraries target reconstruction only, and are therefore generally not useful in a robotics context.
-* **GPU acceleration**: Our previous works [voxblox](https://github.com/ethz-asl/voxblox) and [voxgraph](https://github.com/ethz-asl/voxgraph) are used for path planning, however utilize CPU compute only, which limits the speed of these toolboxes (and therefore the resolution of the maps they can build in real-time).
+* **Path planning**: We provide GPU accelerated, incremental algorithms for calculating the Euclidean Signed Distance Field (ESDF) which is useful for collision checking for robotic path-planning.
+* **GPU acceleration**: Our previous works [voxblox](https://github.com/ethz-asl/voxblox) and [voxgraph](https://github.com/ethz-asl/voxgraph) are used for path-planning, however utilize CPU compute only, which limits the speed of these toolboxes, and therefore the resolution of the maps they can build in real-time. nvblox is *much* faster.
+* **Jetson Platform**: nvblox is written with the [NVIDIA jetson](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/) in mind. If you want to run reconstruction on an embedded GPU, you're in the right place.
-Here we show slices through a distance function generated from *nvblox* using data from the [3DMatch dataset](https://3dmatch.cs.princeton.edu/), specifically the [Sun3D](http://sun3d.cs.princeton.edu/) `mit_76_studyroom` scene:
+Below we visualize slices through a distance function (ESDF):

-# Note from the authors
-This package is under active development. Feel free to make an issue for bugs or feature requests, and we always welcome pull requests!
-# ROS2 Interface
-This repo contains the core library which can be linked into users' projects. If you want to use nvblox on a robot out-of-the-box, please see our [ROS2 interface](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox), which downloads and builds the core library during installation.
+# How to use nvblox
+How use nvblox depends on what you want to do.
+
+## Out-of-the-box Reconstruction/ROS 2 Interface
+
+For users who would like to use nvblox in a robotic system or connect easily to a sensor, we suggest using our [ROS 2 interface](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox).
+
+The ROS 2 interface includes examples which allow you to:
+* Build a reconstruction from a realsense camera using nvblox and NVIDIA VSLAM [here](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox/blob/main/docs/tutorial-nvblox-vslam-realsense.md).
+* Navigate a robot in Isaac Sim [here](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox/blob/main/docs/tutorial-isaac-sim.md).
+* Combine 3D reconstruction with image segmentation with [realsense data](https://gitlab-master.nvidia.com/isaac_ros/isaac_ros_nvblox/-/blob/envoy-dev/docs/tutorial-human-reconstruction-realsense.md) and in [simulation](https://gitlab-master.nvidia.com/isaac_ros/isaac_ros_nvblox/-/blob/envoy-dev/docs/tutorial-human-reconstruction-isaac-sim.md).
+
+The ROS 2 interface downloads and builds the library contained in this repository during installation, so you don't need to clone and build this repository at all.
+
+## Public Datasets
+
+If you would like to run nvblox on a public datasets, we include some executables for running reconstructions on [3DMatch](https://3dmatch.cs.princeton.edu/), [Replica](https://github.com/facebookresearch/Replica-Dataset), and [Redwood](http://redwood-data.org/indoor_lidar_rgbd/index.html) datasets. Please see our [tutorial](./docs/pages/tutorial_public_datasets.md) on running these.
+
+## C++ Interface
+
+If you want to build nvblox into a larger project, without ROS, or you would like to make modifications to nvblox's core reconstruction features, this repository contains the code you need. Our [tutorial](./docs/pages/tutorial_library_interface.md) provides some brief details of how to interact with the reconstruction in c++.
+
# Native Installation
If you want to build natively, please follow these instructions. Instructions for docker are [further below](#docker).
@@ -33,10 +78,11 @@ If you want to build natively, please follow these instructions. Instructions fo
We depend on:
- gtest
- glog
-- gflags (to run experiments)
-- CUDA 11.0 - 11.6 (others might work but are untested)
+- gflags
+- SQLite 3
+- CUDA 11.0 - 11.8 (others might work but are untested)
- Eigen (no need to explicitly install, a recent version is built into the library)
-- SQLite 3 (for serialization)
+- stdgpu (downloaded during compilation)
Please run
```
sudo apt-get install -y libgoogle-glog-dev libgtest-dev libgflags-dev python3-dev libsqlite3-dev
@@ -60,12 +106,11 @@ unzip ~/datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2.zip -d ~/datasets
Navigate to and run the `fuse_3dmatch` binary. From the nvblox base folder run
```
cd nvblox/build/executables
-./fuse_3dmatch ~/datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2/ --esdf_frame_subsampling 3000 --mesh_output_path mesh.ply
+./fuse_3dmatch ~/datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2/ mesh.ply
```
-Once it's done we can view the output mesh using the Open3D viewer.
+Once it's done we can view the output mesh using the Open3D viewer. Instructions for installing open3d-viewer can be found below.
```
-pip3 install open3d
-python3 ../../visualization/visualize_mesh.py mesh.ply
+Open3D mesh.ply
```
you should see a mesh of a room:

@@ -88,7 +133,7 @@ We have several dockerfiles (in the `docker` subfolder) which layer on top of on
* * Runs ours tests.
* * Useful for checking if things are likely to pass the tests in CI.
-We are reliant on nvidia docker. Install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) following the instructions on that website.
+We rely on nvidia docker. Install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) following the instructions on that website.
We use the GPU during build, not only at run time. In the default configuration the GPU is only used at at runtime. One must therefore set the default runtime. Add `"default-runtime": "nvidia"` to `/etc/docker/daemon.json` such that it looks like:
```
@@ -106,10 +151,12 @@ Restart docker
```
sudo systemctl restart docker
```
-Now Let's build Dockerfile.deps docker image. This image install contains our dependencies. (In case you are running this on the Jetson, simply substitute docker/`Dockerfile.jetson_deps` below and the rest of the instructions remain the same.
+Now Let's build Dockerfile.deps docker image. This image install contains our dependencies.
```
docker build -t nvblox_deps -f docker/Dockerfile.deps .
```
+> In case you are running this on the Jetson, substitute the dockerfile: `docker/Dockerfile.jetson_deps`
+
Now let's build the Dockerfile.build. This image layers on the last, and actually builds the nvblox library.
```
docker build -t nvblox -f docker/Dockerfile.build .
@@ -125,15 +172,17 @@ apt-get update
apt-get install unzip
wget http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-mit_76_studyroom-76-1studyroom2.zip -P ~/datasets/3dmatch
unzip ~/datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2.zip -d ~/datasets/3dmatch
-cd nvblox/nvblox/build/executables
-./fuse_3dmatch ~/datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2/ --esdf_frame_subsampling 3000 --mesh_output_path mesh.ply
+cd nvblox/nvblox/build/executables/
+./fuse_3dmatch ~/datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2/ mesh.ply
```
Now let's visualize. From the same executable folder run:
```
-apt-get install python3-pip libgl1-mesa-glx
-pip3 install open3d
-python3 ../../visualization/visualize_mesh.py mesh.ply
+apt-get install libgl1-mesa-glx libc++1 libc++1-10 libc++abi1-10 libglfw3 libpng16-16
+wget https://github.com/isl-org/Open3D/releases/download/v0.13.0/open3d-app-0.13.0-Ubuntu_20.04.deb
+dpkg -i open3d-app-0.13.0-Ubuntu_20.04.deb
+Open3D mesh.ply
```
+to visualize on the jetson see [below](#open3d-on-jetson).
# Additional instructions for Jetson Xavier
These instructions are for a native build on the Jetson Xavier. You can see the instructions above for running in docker.
@@ -149,7 +198,7 @@ wget -qO - https://apt.kitware.com/keys/kitware-archive-latest.asc |
```
2. Add the repository to your sources list and update.
```
-sudo apt-add-repository 'deb https://apt.kitware.com/ubuntu/ bionic main'
+sudo apt-add-repository 'deb https://apt.kitware.com/ubuntu/ focal main'
sudo apt-get update
```
3. Update!
@@ -161,6 +210,19 @@ sudo apt-get install cmake
export OPENBLAS_CORETYPE=ARMV8
```
+## Open3D on Jetson
+Open3D is available pre-compiled for the jetson ([details here](http://www.open3d.org/docs/release/arm.html)). Install via pip:
+```
+apt-get install python3-pip
+pip3 install open3d==0.16.0
+```
+> If version `0.16.0` is not available you need to upgrade your pip with `pip3 install -U pip`. You may additionally need to add the upgraded pip version to your path.
+
+View the mesh via:
+```
+open3d draw mesh.ply
+```
+
# Building for multiple GPU architectures
By default, the library builds ONLY for the compute capability (CC) of the machine it's being built on. To build binaries that can be used across multiple machines (i.e., pre-built binaries for CI, for example), you can use the `BUILD_FOR_ALL_ARCHS` flag and set it to true. Example:
```
@@ -168,7 +230,7 @@ cmake .. -DBUILD_FOR_ALL_ARCHS=True -DCMAKE_INSTALL_PREFIX=../install/ && make -
```
# Building redistributable binaries, with static dependencies
-If you want to include nvblox in another CMake project, simply `find_package(nvblox)` should bring in the correct libraries and headers. However, if you want to include it in a different build system such as Bazel, you can see the instructions here: [docs/redistibutable.md].
+If you want to include nvblox in another CMake project, simply `find_package(nvblox)` should bring in the correct libraries and headers. However, if you want to include it in a different build system such as Bazel, you can see the instructions [here](./docs/pages/redistibutable.md).
# License
This code is under an [open-source license](LICENSE) (Apache 2.0). :)
diff --git a/docs/conf.py b/docs/conf.py
index e8a29822..af2a69e2 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -19,34 +19,15 @@
}
extensions = [
- 'sphinx.ext.autosectionlabel', 'myst_parser', #'breathe', 'exhale',
+ 'sphinx.ext.autosectionlabel'
]
project = name
master_doc = 'root'
-# html_theme_options = {'logo_only': True}
html_extra_path = ['doxyoutput/html']
-
-# # Setup the breathe extension
-# breathe_projects = {"project": "./doxyoutput/xml"}
-# breathe_default_project = "project"
-
-# # Setup the exhale extension
-# exhale_args = {
-# "verboseBuild": False,
-# "containmentFolder": "./api",
-# "rootFileName": "library_root.rst",
-# "rootFileTitle": "Library API",
-# "doxygenStripFromPath": "..",
-# "createTreeView": True,
-# "exhaleExecutesDoxygen": True, # SWITCH TO TRUE
-# "exhaleUseDoxyfile": True, # SWITCH TO TRUE
-# "pageLevelConfigMeta": ":github_url: https://github.com/nvidia-isaac/" + name
-# }
-
-source_suffix = ['.rst', '.md']
+source_suffix = ['.md']
# Tell sphinx what the primary language being documented is.
primary_domain = 'cpp'
diff --git a/docs/images/3dmatch.gif b/docs/images/3dmatch.gif
new file mode 100644
index 00000000..3efce544
Binary files /dev/null and b/docs/images/3dmatch.gif differ
diff --git a/docs/images/redwood_apartment.png b/docs/images/redwood_apartment.png
new file mode 100644
index 00000000..746560cd
Binary files /dev/null and b/docs/images/redwood_apartment.png differ
diff --git a/docs/images/replica_office0.png b/docs/images/replica_office0.png
new file mode 100644
index 00000000..425fff45
Binary files /dev/null and b/docs/images/replica_office0.png differ
diff --git a/docs/pages/technical.md b/docs/pages/technical.md
index c918f260..ef0d3d84 100644
--- a/docs/pages/technical.md
+++ b/docs/pages/technical.md
@@ -2,7 +2,7 @@
## Input/Outputs
-Here we discuss the inputs you have to provide to nvblox, and the outputs it produces for downstream tasks. This is the default setup within ROS2 for 2D navigation, but note that other outputs are possible (such as the full 3D distance map).
+Here we discuss the inputs you have to provide to nvblox, and the outputs it produces for downstream tasks. This is the default setup within ROS 2 for 2D navigation, but note that other outputs are possible (such as the full 3D distance map).
_Inputs_:
* **Depth Images**: (@ref nvblox::Image) We require input from a sensor supplying depth per pixel. Examples of such sensors are the Intel Realsense series and Kinect cameras.
diff --git a/docs/pages/tutorial_library_interface.md b/docs/pages/tutorial_library_interface.md
new file mode 100644
index 00000000..88b149d9
--- /dev/null
+++ b/docs/pages/tutorial_library_interface.md
@@ -0,0 +1,142 @@
+# Library interface
+
+In this page give some brief details of how to interact with nvblox on a library level. For doxygen generated API docs see [our readthedocs page](https://nvblox.readthedocs.io/en/latest/index.html).
+
+## High-level Interface
+
+The top level interface is the `Mapper` class.
+
+```bash
+const float voxel_size_m = 0.05;
+const MemoryType memory_type = MemoryType::kDevice;
+Mapper(voxel_size_s, memory_type);
+```
+
+This creates a mapper, which also allocates an empty map. Here we specify that voxels will be 5cm is size, and will be stored on the GPU (device).
+
+The mapper has methods for adding depth and color images to the reconstruction.
+
+```bash
+mapper.integrateDepth(depth_image, T_L_C, camera);
+```
+
+The input image `depth_image`, the camera pose `T_L_C`, and the camera intrinsic model `camera` need to be supplied by the user of nvblox.
+
+The function call above integrates the observations into a 3D TSDF voxel grid.
+The TSDF is rarely the final desired output and usually we would like to generate a Euclidian Signed Distance Function (ESDF) for pathplanning, or to generate a mesh to view the reconstruction, from the TSDF.
+Mapper includes methods for doing this:
+
+```bash
+mapper.updateEsdf();
+mapper.updateMesh();
+```
+
+The word "update" here indicates that these functions don't generate the mesh or ESDF from scratch, but only update what's needed.
+
+We could then save the mesh to disk as a `.ply` file.
+
+```bash
+io::outputMeshLayerToPly(mapper.mesh_layer(), "/path/to/my/cool/mesh.ply");
+```
+
+## Accessing Voxels
+
+If you're using nvblox as a library you likely want to work with voxels directly.
+
+Voxels are stored in the class "Layer". A map is composed of multiple layers, which are co-located voxel grids which stored voxels of different types.
+A typical map has for example TSDF, Color layers.
+
+Layer provides voxel accessor methods.
+
+
+```cpp
+void getVoxels(const std::vector& positions_L,
+ std::vector* voxels_ptr,
+ std::vector* success_flags_ptr) const;
+
+void getVoxelsGPU(const device_vector& positions_L,
+ device_vector* voxels_ptr,
+ device_vector* success_flags_ptr) const;
+```
+These will return the caller with a vector of voxels on either the GPU or CPU.
+The flags indicate whether the relevant voxel could be found (we only allocate voxels in memory when that area of space is observed).
+If you request a voxel in unobserved space the lookup will fail and write a `false` to that entry in the `success_flags` vector.
+
+Calling these functions requires the GPU to run a kernel to retrieve voxels from the voxel grid and copy their values into the output vector.
+In the `getVoxels` we additionally copy the voxel back from the GPU to host (CPU) memory.
+
+Getting voxels using the functions above is a multistep process internally.
+The function has to:
+* Call a kernel which translates query positions to voxel memory locations,
+* Copies voxels into an output vector.
+* We the optionally have to copy the output vector from device to host memory.
+
+Therefore, advanced users who want maximum query speed should access voxels directly inside a GPU kernel.
+The next sections discusses this process.
+
+## Accessing Voxels on GPU
+
+If you want to write high performance code which uses voxel values directly, you'll likely want to access voxels in GPU kernels.
+
+We illustrate how this is done by a slightly simplified version of the `getVoxels` function described in the last section.
+
+```cpp
+__global__ void queryVoxelsKernel(
+ int num_queries, Index3DDeviceHashMapType block_hash,
+ float block_size, const Vector3f* query_locations_ptr,
+ TsdfVoxel* voxels_ptr, bool* success_flags_ptr) {
+ const int idx = threadIdx.x + blockIdx.x * blockDim.x;
+ if (idx >= num_queries) {
+ return;
+ }
+ const Vector3f query_location = query_locations_ptr[idx];
+
+ TsdfVoxel* voxel;
+ if (!getVoxelAtPosition(block_hash, query_location, block_size,
+ &voxel)) {
+ success_flags_ptr[idx] = false;
+ } else {
+ success_flags_ptr[idx] = true;
+ voxels_ptr[idx] = *voxel;
+ }
+}
+
+void getVoxelsGPU(
+ const TsdfLayer layer,
+ const device_vector& positions_L,
+ device_vector* voxels_ptr,
+ device_vector* success_flags_ptr) const {
+
+ const int num_queries = positions_L.size();
+
+ voxels_ptr->resize(num_queries);
+ success_flags_ptr->resize(num_queries);
+
+ constexpr int kNumThreads = 512;
+ const int num_blocks = num_queries / kNumThreads + 1;
+
+ GPULayerView gpu_layer_view = layer.getGpuLayerView();
+
+ queryVoxelsKernel<<>>(
+ num_queries, gpu_layer_view.getHash().impl_, layer.block_size(),
+ positions_L.data(), voxels_ptr->data(), success_flags_ptr->data());
+ checkCudaErrors(cudaDeviceSynchronize(cuda_stream));
+ checkCudaErrors(cudaPeekAtLastError());
+}
+```
+
+The first critical thing that happens in the code above is that we get a GPU view of the hash table representing the map.
+
+```cpp
+GPULayerView gpu_layer_view = layer.getGpuLayerView()
+```
+The hash table is used in the kernel to transform 3D query locations into memory locations for voxels.
+
+Inside the kernel we have
+```cpp
+TsdfVoxel* voxel;
+getVoxelAtPosition(block_hash, query_location, block_size, &voxel);
+```
+which places a pointer to the voxel in `voxel` and returns true if the voxel has been allocated.
+
+For a small example application which queries voxels on the GPU see `/nvblox/examples/src/esdf_query.cu`.
diff --git a/docs/pages/tutorial_public_datasets.md b/docs/pages/tutorial_public_datasets.md
new file mode 100644
index 00000000..b910b4ec
--- /dev/null
+++ b/docs/pages/tutorial_public_datasets.md
@@ -0,0 +1,61 @@
+# Public Datasets Tutorial
+
+If you would like to run nvblox on a public datasets, we include some executables for fusing [3DMatch](https://3dmatch.cs.princeton.edu/), [Replica](https://github.com/facebookresearch/Replica-Dataset), and [Redwood](http://redwood-data.org/indoor_lidar_rgbd/index.html) datasets.
+
+The executables are run by pointing the respective binary to a folder containing the dataset. We give details for each dataset below.
+
+## 3DMatch
+
+Instructions to run 3DMatch are given on the front page of the README [here](https://github.com/nvidia-isaac/nvblox#run-an-example).
+
+## Replica
+
+We use [Replica](https://github.com/facebookresearch/Replica-Dataset) sequences from the [NICE-SLAM](https://github.com/cvg/nice-slam).
+
+First download the dataset:
+
+```bash
+cd ~/datasets
+wget https://cvg-data.inf.ethz.ch/nice-slam/data/Replica.zip
+unzip Replica.zip
+```
+
+Now run nvblox and output a mesh.
+
+```bash
+cd nvblox/build/executables
+./fuse_replica ~/datasets/Replica/office0 --voxel_size=0.02 --color_frame_subsampling=20 mesh.ply
+```
+Note that here we specify via command line flags to run the reconstruction with 2cm voxels, and only to integrate 1 in 20 color frames.
+
+View the reconstruction in Open3D
+```bash
+Open3D mesh.ply
+```
+
+
+## Redwood
+
+The replica RGB-D datasets are available [here](http://redwood-data.org/indoor_lidar_rgbd/download.html).
+
+Download the "RGB-D sequence" and "Our camera poses" at the link above.
+
+Extract the data into a common folder. For example for the apartment sequence the resultant folder structure looks like:
+```bash
+~/datasets/redwood/apartment
+~/datasets/redwood/apartment/pose_apartment/...
+~/datasets/redwood/apartment/rgbd_apartment/...
+```
+
+Now we run the reconstruction
+```bash
+cd nvblox/build/executables
+./fuse_redwood ~/datasets/redwood/apartment --voxel_size=0.02 --color_frame_subsampling=20 mesh.ply
+```
+Note this dataset is large (~30000 images) so the reconstruction can take a couple of minutes.
+
+View the reconstruction in Open3D
+```bash
+Open3D mesh.ply
+```
+
diff --git a/nvblox/evaluation/replica/evaluation_utils/__init__.py b/docs/root.md
similarity index 100%
rename from nvblox/evaluation/replica/evaluation_utils/__init__.py
rename to docs/root.md
diff --git a/docs/root.rst b/docs/root.rst
deleted file mode 100644
index 50cba87b..00000000
--- a/docs/root.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-=======
-Table of Contents
-=======
-
-.. toctree::
- :maxdepth: 1
- :glob:
- root
diff --git a/docs/rst/examples/core_example.rst b/docs/rst/examples/core_example.rst
deleted file mode 100644
index 49eb237e..00000000
--- a/docs/rst/examples/core_example.rst
+++ /dev/null
@@ -1,63 +0,0 @@
-====================
-Core Library Example
-====================
-
-In this example we fuse data from the `3DMatch dataset `_. The commands to run the example are slightly different depending on if you've installed :ref:`natively ` or in a :ref:`docker container `.
-
-Core Library Example - Native
-=============================
-
-In this example we fuse data from the `3DMatch dataset `_. First let's grab the dataset. Here I'm downloading it to my dataset folder ``~/dataset/3dmatch``. ::
-
- wget http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets//datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2.zip -P ~/datasets/3dmatch
- unzip ~/datasets/3dmatch//datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2.zip -d ~/datasets/3dmatch
-
-Navigate to and run the ``fuse_3dmatch`` binary. From the nvblox base folder run::
-
- cd nvblox/build/experiments
- ./fuse_3dmatch ~/datasets/3dmatch//datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2/ --esdf_frame_subsampling 3000 --mesh_output_path mesh.ply
-
-Once it's done we can view the output mesh using the Open3D viewer. ::
-
- pip3 install open3d
- python3 ../../visualization/visualize_mesh.py mesh.ply
-
-you should see a mesh of a room:
-
-.. _example result:
-.. figure:: ../../images/reconstruction_in_docker_trim.png
- :align: center
-
- The result of running the core library example.
-
-
-
-
-Core Library Example - Docker
-=============================
-
-Now let's run the 3DMatch example inside the docker. Note there's some additional complexity in the ``docker run`` command such that we can forward X11 to the host (we're going to be viewing a reconstruction in a GUI). Run the container using::
-
- xhost local:docker
- docker run -it --net=host --env="DISPLAY" -v $HOME/.Xauthority:/root/.Xauthority:rw -v /tmp/.X11-unix:/tmp/.X11-unix:rw nvblox
-
-Let's download a dataset and run the example::
-
- apt-get update
- apt-get install unzip
- wget http://vision.princeton.edu/projects/2016/3DMatch/downloads/rgbd-datasets/sun3d-mit_76_studyroom-76-1studyroom2.zip -P ~/datasets/3dmatch
- unzip ~/datasets/3dmatch//datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2.zip -d ~/datasets/3dmatch
- cd nvblox/nvblox/build/experiments/
- ./fuse_3dmatch ~/datasets/3dmatch//datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2/ --esdf_frame_subsampling 3000 --mesh_output_path mesh.ply
-
-Now let's visualize. From the same experiments folder run::
-
- apt-get install python3-pip libgl1-mesa-glx
- pip3 install open3d
- python3 ../../visualization/visualize_mesh.py mesh.ply
-
-You should see the :ref:`image above `.
-
-
-
-
diff --git a/docs/rst/examples/index.rst b/docs/rst/examples/index.rst
deleted file mode 100644
index 4ceec22b..00000000
--- a/docs/rst/examples/index.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-========
-Examples
-========
-
-.. toctree::
- :maxdepth: 1
-
- core_example
- ros_example
diff --git a/docs/rst/examples/ros_example.rst b/docs/rst/examples/ros_example.rst
deleted file mode 100644
index ff7a36dd..00000000
--- a/docs/rst/examples/ros_example.rst
+++ /dev/null
@@ -1,86 +0,0 @@
-============
-ROS2 Example
-============
-
-In this example, we will use nvblox to build a reconstruction from simulation data streamed from `Isaac Sim `_. Data will flow from the simulator to nvblox using ROS2 and the `isaac_ros_nvblox `_ interface.
-
-.. _example result:
-.. figure:: ../../images/nvblox_navigation_trim.gif
- :align: center
-
-There are two ways to run nvblox in this example:
-
-* Inside a Docker container
-* In a ROS2 workspace installed directly on your machine
-
-This example treats running docker as the default choice.
-
-Example Description
-===================
-
-In this example, Isaac Sim will run natively on your machine and communicate with nvblox running inside a Docker container. Running in Isaac Sim is referred to as running on the *host* machine, differentiating it from running inside the *Docker*. If using the native setup, both will run on the host machine.
-
-Isaac Sim Setup (Host Machine)
-==============================
-
-Follow the standard instructions to install `Isaac Sim `_
-on the host machine.
-
-As part of the set-up, make sure to install a local Nucleus server (Nucleus manages simulation assets such as maps and objects), following the instructions `here `_. Mounting the Isaac share will give you access to the latest Isaac Sim samples, which these instructions use. Please also use the `latest URL for the mount `_ (rather than what's listed in the linked tutorial)::
-
- Name: Isaac
- Type: Amazon S3
- Host: d28dzv1nop4bat.cloudfront.net
- Service: s3
- Redirection: https://d28dzv1nop4bat.cloudfront.net
-
-You will launch Isaac Sim from Python scripts that automate the setup of the robot and environment. Isaac Sim uses its own python binary,
-which pulls in the modules that are dependencies. To run the Isaac Sim simulation launch scripts, you will use the Isaac Sim Python binary,
-which is located at ``~/.local/share/ov/pkg/{YOUR_ISAAC_SIM_VERSION}/python.sh``
-
-For convenience, you can create an alias to this Python binary in your ``~/.bashrc``. Using the Isaac Sim version ``isaac_sim-2021.2.1-release.1``
-as an example, add the following line to ``~/.bashrc``::
-
- alias omni_python='~/.local/share/ov/pkg/isaac_sim-2021.2.1-release.1/python.sh'
-
-.. note::
- Ensure ``isaac_sim-2021.2.1-release.1`` is the name of the Isaac Sim version installed on your system ::
-
-Now ``source`` the ``.bashrc`` to have access to this alias. ::
-
- source ~/.bashrc
-
-Running the Simulation (on the Host) and the Reconstruction (in the Docker)
-===========================================================================
-
-For this example, you will need two terminals. In the first terminal, you will run Isaac Sim.
-
-**Terminal 1**: Start up Isaac Sim with the correct sensors on the host machine::
-
- omni_python ~/workspaces/isaac_ros-dev/ros_ws/src/isaac_ros_nvblox/nvblox_isaac_sim/omniverse_scripts/carter_warehouse.py
-
-.. note::
- Ensure there is no ROS workspace sourced in this terminal.
-
-.. note::
- If Isaac Sim reports not finding a Nucleus server, follow the instructions `here `_ to download the required assets.
-
-**Terminal 2:** In another terminal, start the ``isaac_ros-dev`` Docker ::
-
- ~/workspaces/isaac_ros-dev/scripts/run_dev.sh
-
-Source the ``ros_ws`` ::
-
- source /workspaces/isaac_ros-dev/ros_ws/install/setup.bash
-
-Run nvblox and ROS2 Nav2::
-
- ros2 launch nvblox_nav2 carter_sim.launch.py
-
-You should see the robot reconstructing a mesh, with a costmap overlaid on top. To give it a command, you can select "2D Goal Pose"
-in the command window at the top and select a goal in the main window. You should then see the robot plan a green path toward the
-goal and navigate there, both in rviz and in simulation.
-
-.. _example result:
-.. figure:: ../../images/readme_nav2.gif
- :align: center
diff --git a/docs/rst/index.rst b/docs/rst/index.rst
deleted file mode 100644
index 384ed59d..00000000
--- a/docs/rst/index.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-=======
-Introduction to nvblox
-=======
-
-Nvblox is a package for building a 3D reconstruction of the environment around your robot from sensor observations in real-time. The reconstruction is intended to be used by path planners to generate collision-free paths. Under the hood, nvblox uses NVIDIA CUDA to accelerate this task to allow operation at real-time rates. This repository contains ROS2 integration for the nvblox core library.
-
-|pic1| |pic2|
-
-.. |pic1| image:: ./images/reconstruction_in_docker_trim.png
- :width: 45%
-
-.. |pic2| image:: /images/nvblox_navigation_trim.gif
- :width: 45%
-
-**Left**: nvblox used for reconstruction on a scan from the `Sun3D Dataset `_.
-**Right**: the nvblox ROS2 wrapper used to construct a costmap for `ROS2 Nav2 `_ for navigating of a robot inside `Isaac Sim `_.
-
-Nvblox is composed of two packages
-
-* `nvblox Core Library `_ Contains the core C++/CUDA reconstruction library.
-* `nvblox ROS2 Interface `_ Contains a ROS2 wrapper and integrations for simulation and path planning. Internally builds the core library.
-
-
-
-
-.. .. figure:: ./images/reconstruction_in_docker_trim.png
-.. :width: 50 %
-.. :align: center
-
-.. nvblox used for reconstruction on a scan from the `Sun3D Dataset http://sun3d.cs.princeton.edu/`_
diff --git a/docs/rst/installation/core.rst b/docs/rst/installation/core.rst
deleted file mode 100644
index b96abfd2..00000000
--- a/docs/rst/installation/core.rst
+++ /dev/null
@@ -1,119 +0,0 @@
-=========================
-Core Library Installation
-=========================
-
-There are two ways to install the nvblox core library: :ref:`natively ` on your system or inside a :ref:`docker container `.
-
-Native Installation
-===================
-
-If you want to build natively, please follow these instructions. Instructions for docker are :ref:`further below `.
-
-Install dependencies
---------------------
-
-We depend on:
-
-* gtest
-* glog
-* gflags
-* CUDA 10.2 - 11.5 (others might work but are untested)
-* Eigen (no need to explicitly install, a recent version is built into the library)
-
-Please run::
-
- sudo apt-get install -y libgoogle-glog-dev libgtest-dev libgflags-dev python3-dev
- cd /usr/src/googletest && sudo cmake . && sudo cmake --build . --target install
-
-Build and run tests
--------------------
-Build and run with::
-
- cd nvblox/nvblox
- mkdir build
- cd build
- cmake .. && make && ctest
-
-All tests should pass.
-
-Now you can run :ref:`core library example `
-
-
-Docker Installation
-===================
-
-We have several dockerfiles, each of which layers on top of the preceding one for the following purposes:
-
-* **Docker.deps**
-
- - This sets up the environment and installs our dependencies.
- - This is used in our CI, where the later steps (building and testing) are taken care of by Jenkins (and not docker).
-* **Docker.build**
-
- - Layers on top of Docker.deps.
- - This builds our package.
- - This is where you get off the layer train if you wanna run stuff (and don't care if it's tested).
-
-* **Docker.test**
-
- - Layers on top of Docker.build.
- - Runs ours tests.
- - Useful for checking, on your machine, if things are likely to pass the tests in CI.
-
-Install NVIDIA Container Toolkit
---------------------------------
-
-We are reliant on nvidia docker. Install the `NVIDIA Container Toolkit `_ following the instructions on that website.
-
-We use the GPU during build, not only at run time. In the default configuration the GPU is only used at at runtime. One must therefore set the default runtime. Add `"default-runtime": "nvidia"` to `/etc/docker/daemon.json` such that it looks like::
-
- {
- "runtimes": {
- "nvidia": {
- "path": "/usr/bin/nvidia-container-runtime",
- "runtimeArgs": []
- }
- },
- "default-runtime": "nvidia"
- }
-
-Restart docker::
-
- sudo systemctl restart docker
-
-Build the Image
----------------
-
-Now Let's build Dockerfile.deps docker image. This image install contains our dependencies. ::
-
- docker build -t nvblox_deps -f Dockerfile.deps .
-
-Now let's build the Dockerfile.build. This image layers on the last, and actually builds the nvblox library. ::
-
- docker build -t nvblox -f Dockerfile.build .
-
-Now you can run :ref:`core library example `
-
-
-Additional instructions for Jetson Xavier
-=========================================
-
-These instructions are for a **native** build on the Jetson Xavier. A Docker based build is coming soon.
-
-The instructions for the native build above work, with one exception:
-
-We build using CMake's modern CUDA integration and therefore require a more modern version of CMAKE than (currently) ships with jetpack. Luckily the cmake developer team provide a means obtaining recent versions of CMake through apt.
-
-1. Obtain a copy of the signing key::
-
- wget -qO - https://apt.kitware.com/keys/kitware-archive-latest.asc |
- sudo apt-key add -
-
-2. Add the repository to your sources list::
-
- sudo apt-add-repository 'deb https://apt.kitware.com/ubuntu/ bionic main'
- sudo apt-get update
-
-3. Update::
-
- sudo apt-get install cmake
diff --git a/docs/rst/installation/index.rst b/docs/rst/installation/index.rst
deleted file mode 100644
index 134e5753..00000000
--- a/docs/rst/installation/index.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-============
-Installation
-============
-
-There are two catagories of installation:
-
-
-.. toctree::
- :maxdepth: 1
-
- core
- ros
diff --git a/docs/rst/installation/ros.rst b/docs/rst/installation/ros.rst
deleted file mode 100644
index 27c572fa..00000000
--- a/docs/rst/installation/ros.rst
+++ /dev/null
@@ -1,165 +0,0 @@
-=================
-ROS2 Installation
-=================
-
-If you want to use nvblox for navigation, or out-of-the-box in a robotic system the best way to do that is to use our `ROS2 wrappers `_. There's no need to install the core library if installing this way, ROS2 downloads and builds the core library before bundling it into the wrapper.
-
-Below is an example of the nvblox being used with ROS2 Nav2 for real-time reconstruction and navigation in Isaac Sim.
-
-.. _example navigation:
-.. figure:: ../../images/nvblox_navigation_trim.gif
- :align: center
-
-Packages in this repository
-===========================
-
-+------------------------------------+---------------+------------------------------------------------------+
-| nvblox ROS2 package | Description |
-+====================================+===============+======================================================+
-| isaac_ros_nvblox | A meta-package. (Just a build target that builds a nvblox_ros and |
-| | it's dependencies) |
-+------------------------------------+---------------+------------------------------------------------------+
-| nvblox_isaac_sim | Contains scripts for launching Isaac Sim configured for use with |
-| | nvblox. |
-+------------------------------------+---------------+------------------------------------------------------+
-| nvblox_msgs | Custom messages for transmitting the output distance map slice and |
-| | mesh over ROS2. |
-+------------------------------------+---------------+------------------------------------------------------+
-| nvblox_nav2 | Contains a custom plugin that allows ROS2 Nav2 to consume nvblox |
-| | distance map outputs, as well as launch files for launching a |
-| | navigation solution for use in simulation. |
-+------------------------------------+---------------+------------------------------------------------------+
-| nvblox_ros | The ROS2 wrapper for the core reconstruction library and the nvblox |
-| | node. |
-+------------------------------------+---------------+------------------------------------------------------+
-| nvblox_rviz_plugin | A plugin for displaying nvblox's (custom) mesh type in RVIZ. |
-+------------------------------------+---------------+------------------------------------------------------+
-| [submodule] nvblox | The core (ROS independent) reconstruction library. |
-+------------------------------------+---------------+------------------------------------------------------+
-
-System Requirements
-===================
-This Isaac ROS package is designed and tested to be compatible with ROS2 Foxy on x86 and Jetson hardware.
-
-
-Jetson
-------
-- `Jetson AGX Xavier or Xavier NX `_
-- `JetPack 4.6.1 `_
-
-x86_64
-------
-- Ubuntu 20.04+
-- CUDA 11.4+ supported discrete GPU with 2+ GB of VRAM
-
-.. note::
- If running `Isaac Sim `_, more VRAM will be required to store the simulated world.
-
-.. note::
- For best performance on Jetson, ensure that power settings are configured appropriately (`Power Management for Jetson `_).
-
-
-Installation Options
-====================
-
-There are two ways to build the ROS2 interface. Either :ref:`natively ` or inside a :ref:`Docker container `. Note that because precompiled ROS2 Foxy packages are not available for JetPack 4.6.1 (it's based on Ubuntu 18.04 Bionic), we recommend following the docker-based instructions if building on Jetson.
-
-Native Installation
-===================
-
-First, follow the instal the dependencies of the the core library::
-
- sudo apt-get install -y libgoogle-glog-dev libgtest-dev libgflags-dev python3-dev
- cd /usr/src/googletest && sudo cmake . && sudo cmake --build . --target install
-
-Additionally, you need `CUDA `_ version 10.2 - 11.5 installed. To make sure Linux finds CUDA on your machine, make sure something like the following is present in your `~/.bashrc`::
-
- export PATH=/usr/local/cuda/bin${PATH:+:${PATH}}
- export LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/lib:${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
-
-Install ROS2 foxy using the `Debian instructions `_.
-
-.. caution::
- Sourcing ROS2 in your workspace automatically (i.e., in your ``.bashrc``) will cause Isaac Sim to break. We recommend creating an alias for sourcing your ROS2 workspace instead.
-
-To create an alias::
-
- alias source_ros2="source /opt/ros/foxy/setup.bash;source ~/ros_ws/install/local_setup.bash"
-
-Check out the nvblox repo to a path like ``~/ros_ws/src``::
-
- mkdir -p ~/ros_ws/src
- git clone --recurse-submodules https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nvblox
-
-Then, build the entire workspace::
-
- cd ~/ros_ws/ && colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release
-
-Again, we recommend creating an alias for the build command::
-
- alias cn="sh -c 'cd ~/ros_ws/ && colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release'"
-
-Now that nvblox is installed you can run the :ref:`navigation example `.
-
-
-
-Docker Installation
-===================
-
-A docker based build can be used on both x86 and Jetson platforms. However, there is a particular impetus to consider it for building on Jetson platforms.
-
-JetPack 4.6.1, which currently ships with Jetson, is based on Ubuntu 18.04, and nvblox requires ROS2 Foxy, which is targeted at Ubuntu 20.04. Therefore, to use nvblox on jetson you have two options:
-
-* manually compile ROS2 Foxy and required dependent packages from source
-* or use the Isaac ROS development Docker image from `Isaac ROS Common `_.
-
-We recommend the second option.
-
-Nvidia Container Toolkit Setup
-------------------------------
-
-The Jetson issue aside, to use the Isaac ROS development Docker image, you must first install the `Nvidia Container Toolkit `__ to make use of the Docker container development/runtime environment.
-
-Configure ``nvidia-container-runtime`` as the default runtime for Docker by editing ``/etc/docker/daemon.json`` to include the following::
-
- "runtimes": {
- "nvidia": {
- "path": "nvidia-container-runtime",
- "runtimeArgs": []
- }
- },
- "default-runtime": "nvidia"
-
-Then restart Docker: ``sudo systemctl daemon-reload && sudo systemctl restart docker``
-
-
-Isaac ROS Docker Setup
-----------------------
-
-Clone the ``isaac_ros_common`` repo into a folder on your system at ``~/workspaces/isaac_ros-dev/ros_ws/src``::
-
- mkdir -p ~/workspaces/isaac_ros-dev/ros_ws/src
- cd ~/workspaces/isaac_ros-dev/ros_ws/src
- git clone --recurse-submodules https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_common.git
-
-Clone the nvblox into ``~/workspaces/isaac_ros-dev/ros_ws/src``. This folder will be mapped by the docker container as a ROS workspace. ::
-
- cd ~/workspaces/isaac_ros-dev/ros_ws/src
- git clone --recurse-submodules https://gitlab-master.nvidia.com/isaac_ros/isaac_ros_nvblox.git
-
-Start the Docker instance by running the start script::
-
- ~/workspaces/isaac_ros-dev/ros_ws/src/isaac_ros_common/scripts/run_dev.sh
-
-Install the dependencies for your ROS workspace::
-
- cd /workspaces/isaac_ros-dev/ros_ws
- rosdep install -i -r --from-paths src --rosdistro foxy -y --skip-keys "libopencv-dev libopencv-contrib-dev libopencv-imgproc-dev python-opencv python3-opencv"
-
-To build the code, first navigate to ``/workspaces/isaac_ros-dev/ros_ws`` inside the Docker container, then use the following command::
-
- colcon build --packages-up-to nvblox_nav2 nvblox_ros nvblox_msgs nvblox_rviz_plugin
-
-The build should pass.
-
-Now that nvblox is installed you can run the :ref:`navigation example `.
diff --git a/docs/rst/integrators.rst b/docs/rst/integrators.rst
deleted file mode 100644
index de3b60c8..00000000
--- a/docs/rst/integrators.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-===========
-Integrators
-===========
-
-An integrator is, very generally, a class that modifies the content of a layers.
-
-The integrators currently offered can be split into two types:
-
-* Those which fuse incoming sensor data into a layer. For example, the :ref:`TsdfIntegrator ` and :ref:`ColorIntegrator ` fused depth images and color images into ``TsdfLayer`` and ``ColorLayer`` respectively.
-* Those which transform the contents of one layer to update the data in another layer. For example the :ref:`EsdfIntegrator ` transforms an TSDF in a ``TsdfLayer`` into an ESDF in a ``EsdfLayer``.
-
-API
-===
-
-The API for the currently available integrators are here:
-
-* :ref:`TsdfIntegrator `
-* :ref:`ColorIntegrator `
-* :ref:`EsdfIntegrator `
-* :ref:`MeshIntegrator `
-
diff --git a/docs/rst/map.rst b/docs/rst/map.rst
deleted file mode 100644
index bcc12d90..00000000
--- a/docs/rst/map.rst
+++ /dev/null
@@ -1,61 +0,0 @@
-===
-Map
-===
-
-We implement a hierarchical sparse voxel grid for storing data. At the top level we have the ``LayerCake``, which contains several layers, each of which contains a different type of mapped quantity (eg TSDF and ESDF). A layer is a collection of sparsely allocated blocks. Each block is in charge of mapping a cubular small region of space. Most blocks are composed of many voxels, each of which captures a single value of the mapped quantity (eg the TSDF).
-
-.. image:: ../images/map_structure.png
- :align: center
-
-The API for the various classes implementing a map nvblox map:
-
-Voxels
-======
-
-* :ref:`TsdfVoxel `
-* :ref:`EsdfVoxel `
-* :ref:`ColorVoxel `
-
-Blocks
-======
-
-A template for a block containing voxels.
-
-* :ref:`VoxelBlock `
-
-Blocks containing specific voxel types. These are just typedefs of the above voxel blocks.
-
-* :ref:`TsdfBlock `
-* :ref:`EsdfBlock `
-* :ref:`ColorBlock `
-
-We store the mesh of the environment as a collection of smaller meshes. In particular, a ``MeshLayer`` contains ``MeshBlocks``, each of which contains a mesh of the surfaces in that region contained within that block. Note that we store a mesh per *block*, there are no mesh *voxels*.
-Storage of the mesh in this way allows us to perform incremental updates (we only update the mesh in blocks where the underlying TSDF has changed).
-
-* :ref:`MeshBlock `
-
-Layers
-======
-
-Template classes for layers containing blocks, and the child class for layers containing ``VoxelBlock`` s.
-
-* :ref:`BlockLayer `
-* :ref:`VoxelBlockLayer `
-
-Typedefs for layers containing specific block types and voxel block types.
-
-* :ref:`TsdfLayer `
-* :ref:`EsdfLayer `
-* :ref:`ColorLayer `
-* :ref:`MeshLayer `
-
-Layer Cake
-==========
-
-The ``LayerCake`` combines several layers into a single object.
-
-* :ref:`LayerCake `
-
-
-
-
diff --git a/docs/rst/mapper.rst b/docs/rst/mapper.rst
deleted file mode 100644
index c007972c..00000000
--- a/docs/rst/mapper.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-======
-Mapper
-======
-
-The mapper class wraps together a number of layers (stored as a :ref:`LayerCake `) along with integrators which operate on these layers.
-Included with the nvblox core library is the :ref:`RgbdMapper ` which offers what we consider to be the default (but extensible) mapping behaviour in nvblox.
-
-The structure of the :ref:`RgbdMapper ` and how it fits into a system is shown below:
-
-.. image:: ../images/mapper.png
- :align: center
-
-The :ref:`RgbdMapper ` class contains **Layers**,
-
-* :ref:`TsdfLayer `
-* :ref:`ColorLayer `
-* :ref:`EsdfLayer `
-* :ref:`MeshLayer `
-
-and **integrators**
-
-* :ref:`ProjectiveTsdfIntegrator `
-* :ref:`ProjectiveColorIntegrator `
-* :ref:`EsdfIntegrator `
-* :ref:`MeshIntegrator `
-
-API
-===
-
-* :ref:`MapperBase `
-* :ref:`RgbdMapper `
diff --git a/docs/rst/rays.rst b/docs/rst/rays.rst
deleted file mode 100644
index 61d9e30e..00000000
--- a/docs/rst/rays.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-====
-Rays
-====
-
-We implement several classes for working with rays
-
-API
-===
-
-* :ref:`SphereTracer `
diff --git a/nvblox/CMakeLists.txt b/nvblox/CMakeLists.txt
index 9dc56a97..b592b478 100644
--- a/nvblox/CMakeLists.txt
+++ b/nvblox/CMakeLists.txt
@@ -29,6 +29,13 @@ set(GFLAGS_BASE_PATH "" CACHE STRING "Base path to static output for gflags, bui
# The penalty for doing this is increased build times.
option(BUILD_FOR_ALL_ARCHS "Build for all GPU architectures" OFF)
+# This option avoids any implementations using std::string in their signature in header files
+# Useful for Nvblox PyTorch wrapper, which requires the old Pre-CXX11 ABI
+option(PRE_CXX11_ABI_LINKABLE "Better support pre-C++11 ABI library users" OFF)
+IF(PRE_CXX11_ABI_LINKABLE)
+ ADD_DEFINITIONS(-DPRE_CXX11_ABI_LINKABLE)
+ENDIF(PRE_CXX11_ABI_LINKABLE)
+
# specify the C++ standard
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED True)
@@ -62,7 +69,7 @@ endif()
# "display_error_number" shows a warning number with all warnings, and the
# rest is just suppressing specific warnings from Eigen. Note that the numbers
# keep changing with every CUDA release so this list is a bit arbitrary.
-set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr --disable-warnings --generate-line-info -lineinfo -Xcudafe --display_error_number")
+set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --extended-lambda --expt-relaxed-constexpr --disable-warnings --generate-line-info -lineinfo -Xcudafe --display_error_number")
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} --compiler-options -fPIC")
################
@@ -83,14 +90,16 @@ find_package(CUDA REQUIRED)
# Treat redistributable and non-redistributable builds differently for
# the other dependencies.
-if (BUILD_REDISTRIBUTABLE)
- if (SQLITE3_BASE_PATH STREQUAL "")
+if(BUILD_REDISTRIBUTABLE)
+ if(SQLITE3_BASE_PATH STREQUAL "")
message(FATAL_ERROR "No SQLite3 static libraries specified! Set the SQLITE3_BASE_PATH variable!")
endif()
- if (GLOG_BASE_PATH STREQUAL "")
+
+ if(GLOG_BASE_PATH STREQUAL "")
message(FATAL_ERROR "No Glog static libraries specified! Set the GLOG_BASE_PATH variable!")
endif()
- if (GFLAGS_BASE_PATH STREQUAL "")
+
+ if(GFLAGS_BASE_PATH STREQUAL "")
message(FATAL_ERROR "No gflags static libraries specified! Set the GFLAGS_BASE_PATH variable!")
endif()
@@ -113,9 +122,11 @@ else()
# In the case of ROS builds, glog will likely be found at a higher level.
# We want to link against that version in that case.
set(GLOG_PREFER_EXPORTED_GLOG_CMAKE_CONFIGURATION FALSE)
+
if(NOT Glog_FOUND)
find_package(Glog REQUIRED)
endif()
+
find_package(gflags REQUIRED)
find_package(SQLite3 REQUIRED)
@@ -126,13 +137,6 @@ endif()
############
# INCLUDES #
############
-message(INFO "SQLite3_LIBRARIES: ${SQLite3_LIBRARIES}\n"
- "GLOG_LIBRARIES: ${GLOG_LIBRARIES}\n"
- "gflags_LIBRARIES: ${gflags_LIBRARIES}\n"
- "SQLite3_INCLUDE_DIRS: ${SQLite3_INCLUDE_DIRS}\n"
- "GLOG_INCLUDE_DIRS: ${GLOG_INCLUDE_DIRS}\n"
- "gflags_INCLUDE_DIRS: ${gflags_INCLUDE_DIRS}")
-
# Include dirs
include_directories(${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
include_directories(include)
@@ -147,50 +151,53 @@ include_directories(${GLOG_INCLUDE_DIRS} ${gflags_INCLUDE_DIRS})
# We compile the first *without* the separable code flag, and the second with. This is the only way
# I've managed to get things working so far.
add_library(nvblox_gpu_hash STATIC
- src/gpu_hash/cuda/gpu_layer_view.cu
- src/gpu_hash/cuda/gpu_set.cu
+ src/core/error_check.cu
src/utils/timing.cpp
src/utils/nvtx_ranges.cpp
- src/core/cuda/error_check.cu
+ src/gpu_hash/gpu_layer_view.cu
+ src/gpu_hash/gpu_set.cu
)
add_dependencies(nvblox_gpu_hash nvblox_eigen stdgpu)
-target_link_libraries(nvblox_gpu_hash PUBLIC
+target_link_libraries(nvblox_gpu_hash
+ PUBLIC
stdgpu
nvblox_eigen
${CUDA_nvToolsExt_LIBRARY}
+ PRIVATE
+ ${GLOG_LIBRARIES}
+ ${gflags_LIBRARIES}
)
target_link_options(nvblox_gpu_hash PUBLIC ${nvblox_link_options})
add_library(nvblox_lib SHARED
- src/core/bounding_boxes.cpp
- src/core/bounding_spheres.cpp
- src/core/camera.cpp
- src/core/color.cpp
- src/core/cuda/blox.cu
- src/core/cuda/image_cuda.cu
- src/core/cuda/layer.cu
- src/core/cuda/warmup.cu
- src/core/image.cpp
- src/core/interpolation_3d.cpp
- src/core/mapper.cpp
- src/core/cuda/pointcloud.cu
- src/integrators/cuda/view_calculator.cu
- src/integrators/cuda/projective_tsdf_integrator.cu
- src/integrators/cuda/projective_color_integrator.cu
- src/integrators/cuda/esdf_integrator.cu
- src/integrators/esdf_integrator.cpp
- src/integrators/view_calculator.cpp
- src/integrators/projective_integrator_base.cpp
- src/rays/cuda/sphere_tracer.cu
- src/io/csv.cpp
+ src/core/warmup.cu
+ src/core/error_check.cu
+ src/map/blox.cu
+ src/map/layer.cu
+ src/sensors/camera.cpp
+ src/sensors/color.cpp
+ src/sensors/pointcloud.cu
+ src/sensors/image.cu
+ src/geometry/bounding_boxes.cpp
+ src/geometry/bounding_spheres.cpp
+ src/mapper/mapper.cpp
+ src/mapper/multi_mapper.cpp
+ src/integrators/view_calculator.cu
+ src/integrators/occupancy_decay_integrator.cu
+ src/integrators/projective_occupancy_integrator.cu
+ src/integrators/projective_tsdf_integrator.cu
+ src/integrators/projective_color_integrator.cu
+ src/integrators/esdf_integrator.cu
+ src/rays/sphere_tracer.cu
+ src/interpolation/interpolation_3d.cpp
src/io/mesh_io.cpp
src/io/ply_writer.cpp
src/io/layer_cake_io.cpp
src/io/pointcloud_io.cpp
+ src/io/image_io.cpp
src/mesh/marching_cubes.cu
src/mesh/mesh_block.cu
src/mesh/mesh_integrator_color.cu
- src/mesh/mesh_integrator.cpp
src/mesh/mesh_integrator.cu
src/mesh/mesh.cpp
src/primitives/primitives.cpp
@@ -200,11 +207,10 @@ add_library(nvblox_lib SHARED
src/serialization/serializer.cpp
src/serialization/sqlite_database.cpp
src/serialization/layer_type_register.cpp
- src/semantics/cuda/image_masker.cu
- src/semantics/cuda/image_projector.cu
- src/core/cuda/error_check.cu
+ src/semantics/image_masker.cu
+ src/semantics/image_projector.cu
)
-target_link_libraries(nvblox_lib
+target_link_libraries(nvblox_lib
PUBLIC
${GLOG_LIBRARIES}
${gflags_LIBRARIES}
@@ -229,6 +235,7 @@ if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.18)
set_property(TARGET nvblox_lib APPEND PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
set_property(TARGET nvblox_lib APPEND PROPERTY EXPORT_PROPERTIES CUDA_ARCHITECTURES)
endif()
+
set_property(TARGET nvblox_lib PROPERTY CMAKE_CUDA_FLAGS ${CMAKE_CUDA_FLAGS})
set_property(TARGET nvblox_lib APPEND PROPERTY EXPORT_PROPERTIES CMAKE_CUDA_FLAGS)
@@ -251,6 +258,7 @@ add_subdirectory(examples)
# TESTS #
#########
include(CTest)
+
if(BUILD_TESTING)
find_package(GTest REQUIRED)
enable_testing()
@@ -260,9 +268,7 @@ endif()
###############
# EXPERIMENTS #
###############
-if(BUILD_EXPERIMENTS)
- add_subdirectory(experiments)
-endif()
+add_subdirectory(experiments)
##########
# EXPORT #
diff --git a/nvblox/experiments/experiments/compare_branches/__init__.py b/nvblox/evaluation/COLCON_IGNORE
similarity index 100%
rename from nvblox/experiments/experiments/compare_branches/__init__.py
rename to nvblox/evaluation/COLCON_IGNORE
diff --git a/nvblox/evaluation/README.md b/nvblox/evaluation/README.md
new file mode 100644
index 00000000..a8a22a57
--- /dev/null
+++ b/nvblox/evaluation/README.md
@@ -0,0 +1,5 @@
+## Nvblox Evaluation Scripts
+
+Scripts for performing evaluation on nvblox.
+
+See the sub-folder READMEs for more info.
\ No newline at end of file
diff --git a/nvblox/experiments/experiments/threaded_image_loading/__init__.py b/nvblox/evaluation/nvblox_evaluation/__init__.py
similarity index 100%
rename from nvblox/experiments/experiments/threaded_image_loading/__init__.py
rename to nvblox/evaluation/nvblox_evaluation/__init__.py
diff --git a/nvblox/evaluation/replica/.gitignore b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/.gitignore
similarity index 100%
rename from nvblox/evaluation/replica/.gitignore
rename to nvblox/evaluation/nvblox_evaluation/compare_branch_timings/.gitignore
diff --git a/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/README.md b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/README.md
new file mode 100644
index 00000000..4bd171b1
--- /dev/null
+++ b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/README.md
@@ -0,0 +1,10 @@
+# Compare Branch Timings
+
+This folder contains a script: `compare_branch_timings.py` which generates a graph comparing nvblox timings on the 3DMatch dataset which are generated by running the current branch and another branch.
+
+Typically this is used to assess the effects on timing of the changes on the current branch to the state of the repo on `main`.
+
+An example of how to use this script to compare the timings across 5 runs of the mit studyroom sequence.
+```bash
+./compare_branch_timings.py ~/datasets/3dmatch/sun3d-mit_76_studyroom-76-1studyroom2/ main --num_runs 5
+```
diff --git a/nvblox/experiments/python/nvblox_experiments/__init__.py b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/__init__.py
similarity index 100%
rename from nvblox/experiments/python/nvblox_experiments/__init__.py
rename to nvblox/evaluation/nvblox_evaluation/compare_branch_timings/__init__.py
diff --git a/nvblox/experiments/experiments/compare_branches/compare_branches.py b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/compare_branch_timings.py
similarity index 91%
rename from nvblox/experiments/experiments/compare_branches/compare_branches.py
rename to nvblox/evaluation/nvblox_evaluation/compare_branch_timings/compare_branch_timings.py
index c31762a0..296af494 100755
--- a/nvblox/experiments/experiments/compare_branches/compare_branches.py
+++ b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/compare_branch_timings.py
@@ -24,9 +24,8 @@
import git
-import nvblox_experiments.threedmatch as threedmatch
-
-from plot_comparison import plot_timings
+import helpers.run_threedmatch as threedmatch
+from helpers.plot_timing_comparison import plot_timings
def generate_timings(dataset_path: str, other_branch_or_hash: str, num_runs: int) -> None:
@@ -73,9 +72,9 @@ def generate_timings(dataset_path: str, other_branch_or_hash: str, num_runs: int
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
threedmatch_binary_path = os.path.join(
- build_dir, 'experiments/fuse_3dmatch')
+ build_dir, 'executables/fuse_3dmatch')
threedmatch.run_multiple(num_runs, threedmatch_binary_path, dataset_path,
- output_dir)
+ output_dir, warmup_run=True)
# Reset to the original branch
print('Checking out: ' + current_branch_name)
@@ -91,7 +90,7 @@ def generate_timings(dataset_path: str, other_branch_or_hash: str, num_runs: int
if __name__ == '__main__':
parser = argparse.ArgumentParser(
- description="Generate timing results comparing the current and another branch/hash.")
+ description="Generate a graph which compares the timers between the current and another branch/hash.")
parser.add_argument("dataset_base_path", metavar="dataset_base_path", type=str,
help="Path to the 3DMatch dataset root directory.")
parser.add_argument("other_branch_or_hash", metavar="other_branch_or_hash", type=str,
diff --git a/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/helpers/__init__.py b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/helpers/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/nvblox/experiments/experiments/compare_branches/plot_comparison.py b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/helpers/plot_timing_comparison.py
similarity index 98%
rename from nvblox/experiments/experiments/compare_branches/plot_comparison.py
rename to nvblox/evaluation/nvblox_evaluation/compare_branch_timings/helpers/plot_timing_comparison.py
index fa8968ee..cefe87df 100644
--- a/nvblox/experiments/experiments/compare_branches/plot_comparison.py
+++ b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/helpers/plot_timing_comparison.py
@@ -26,7 +26,7 @@
import matplotlib.pyplot as plt
from matplotlib import rcParams
-from nvblox_experiments.timing import get_timings_as_dataframe
+from nvblox_evaluation.evaluation_utils.parse_nvblox_timing import get_timings_as_dataframe
rcParams.update({'figure.autolayout': True})
diff --git a/nvblox/experiments/python/nvblox_experiments/threedmatch.py b/nvblox/evaluation/nvblox_evaluation/compare_branch_timings/helpers/run_threedmatch.py
similarity index 100%
rename from nvblox/experiments/python/nvblox_experiments/threedmatch.py
rename to nvblox/evaluation/nvblox_evaluation/compare_branch_timings/helpers/run_threedmatch.py
diff --git a/nvblox/evaluation/nvblox_evaluation/evaluation_utils/__init__.py b/nvblox/evaluation/nvblox_evaluation/evaluation_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/nvblox/evaluation/replica/evaluation_utils/esdf_evaluation.py b/nvblox/evaluation/nvblox_evaluation/evaluation_utils/esdf_evaluation.py
similarity index 98%
rename from nvblox/evaluation/replica/evaluation_utils/esdf_evaluation.py
rename to nvblox/evaluation/nvblox_evaluation/evaluation_utils/esdf_evaluation.py
index 3688a12f..4ce955b6 100644
--- a/nvblox/evaluation/replica/evaluation_utils/esdf_evaluation.py
+++ b/nvblox/evaluation/nvblox_evaluation/evaluation_utils/esdf_evaluation.py
@@ -19,7 +19,7 @@
import open3d as o3d
-from evaluation_utils.voxel_grid import VoxelGrid
+from nvblox_evaluation.evaluation_utils.voxel_grid import VoxelGrid
def generate_esdf_from_mesh(gt_mesh: o3d.geometry.TriangleMesh, points_xyz: np.ndarray) -> VoxelGrid:
diff --git a/nvblox/evaluation/replica/timing.py b/nvblox/evaluation/nvblox_evaluation/evaluation_utils/parse_nvblox_timing.py
similarity index 100%
rename from nvblox/evaluation/replica/timing.py
rename to nvblox/evaluation/nvblox_evaluation/evaluation_utils/parse_nvblox_timing.py
diff --git a/nvblox/evaluation/replica/evaluation_utils/quad_mesh.py b/nvblox/evaluation/nvblox_evaluation/evaluation_utils/quad_mesh.py
similarity index 100%
rename from nvblox/evaluation/replica/evaluation_utils/quad_mesh.py
rename to nvblox/evaluation/nvblox_evaluation/evaluation_utils/quad_mesh.py
diff --git a/nvblox/evaluation/replica/evaluation_utils/surface_evaluation.py b/nvblox/evaluation/nvblox_evaluation/evaluation_utils/surface_evaluation.py
similarity index 100%
rename from nvblox/evaluation/replica/evaluation_utils/surface_evaluation.py
rename to nvblox/evaluation/nvblox_evaluation/evaluation_utils/surface_evaluation.py
diff --git a/nvblox/evaluation/replica/evaluation_utils/voxel_grid.py b/nvblox/evaluation/nvblox_evaluation/evaluation_utils/voxel_grid.py
similarity index 100%
rename from nvblox/evaluation/replica/evaluation_utils/voxel_grid.py
rename to nvblox/evaluation/nvblox_evaluation/evaluation_utils/voxel_grid.py
diff --git a/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/.gitignore b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/.gitignore
new file mode 100644
index 00000000..6caf68af
--- /dev/null
+++ b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/.gitignore
@@ -0,0 +1 @@
+output
\ No newline at end of file
diff --git a/nvblox/evaluation/replica/README.md b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/README.md
similarity index 100%
rename from nvblox/evaluation/replica/README.md
rename to nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/README.md
diff --git a/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/__init__.py b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/nvblox/evaluation/replica/replica.py b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica.py
similarity index 98%
rename from nvblox/evaluation/replica/replica.py
rename to nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica.py
index 792ad80e..06440121 100755
--- a/nvblox/evaluation/replica/replica.py
+++ b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica.py
@@ -54,7 +54,7 @@ def get_default_fuse_replica_binary_path() -> Path:
Path: Path to the default fuse_replica binary.
"""
script_dir = Path(__file__).resolve().parent
- return script_dir.parents[1] / 'build' / 'executables' / 'fuse_replica'
+ return script_dir.parents[2] / 'build' / 'executables' / 'fuse_replica'
def get_dataset_name_from_groundtruth_mesh_path(groundtruth_mesh_path: Path) -> str:
diff --git a/nvblox/evaluation/replica/replica_esdf_evaluation.py b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_esdf_evaluation.py
similarity index 97%
rename from nvblox/evaluation/replica/replica_esdf_evaluation.py
rename to nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_esdf_evaluation.py
index 18b6942b..d0e61f9c 100755
--- a/nvblox/evaluation/replica/replica_esdf_evaluation.py
+++ b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_esdf_evaluation.py
@@ -25,9 +25,9 @@
import plotly.express as px
import open3d as o3d
-from evaluation_utils.voxel_grid import VoxelGrid
-from evaluation_utils import esdf_evaluation
-from evaluation_utils import quad_mesh
+from nvblox_evaluation.evaluation_utils.voxel_grid import VoxelGrid
+from nvblox_evaluation.evaluation_utils import esdf_evaluation
+from nvblox_evaluation.evaluation_utils import quad_mesh
from replica_reconstruction import replica_reconstruction
import replica
diff --git a/nvblox/evaluation/replica/replica_reconstruction.py b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_reconstruction.py
similarity index 97%
rename from nvblox/evaluation/replica/replica_reconstruction.py
rename to nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_reconstruction.py
index 795c21f8..0314b74f 100755
--- a/nvblox/evaluation/replica/replica_reconstruction.py
+++ b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_reconstruction.py
@@ -25,7 +25,7 @@
import pandas as pd
import replica
-import timing
+from nvblox_evaluation.evaluation_utils.parse_nvblox_timing import get_timings_as_dataframe
def replica_reconstruction(dataset_path: Path,
@@ -89,7 +89,7 @@ def replica_reconstruction(dataset_path: Path,
mesh_frame_subsampling_flag, f"{mesh_frame_subsampling}"])
# Extract the means of the timers
- timings_df = timing.get_timings_as_dataframe(timing_path)
+ timings_df = get_timings_as_dataframe(timing_path)
means_series = timings_df['mean']
means_series.index = ['mean/' + row_name for row_name in means_series.index]
total_series = timings_df['total_time']
diff --git a/nvblox/evaluation/replica/replica_surface_evaluation.py b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_surface_evaluation.py
similarity index 98%
rename from nvblox/evaluation/replica/replica_surface_evaluation.py
rename to nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_surface_evaluation.py
index 25065cbc..fdfa0bc7 100755
--- a/nvblox/evaluation/replica/replica_surface_evaluation.py
+++ b/nvblox/evaluation/nvblox_evaluation/replica_reconstruction_evaluation/replica_surface_evaluation.py
@@ -23,8 +23,8 @@
import numpy as np
import open3d as o3d
-from evaluation_utils import quad_mesh
-from evaluation_utils import surface_evaluation
+from nvblox_evaluation.evaluation_utils import quad_mesh
+from nvblox_evaluation.evaluation_utils import surface_evaluation
from replica_reconstruction import replica_reconstruction
import replica
diff --git a/nvblox/evaluation/requirements.txt b/nvblox/evaluation/requirements.txt
new file mode 100644
index 00000000..7dbdc2b8
--- /dev/null
+++ b/nvblox/evaluation/requirements.txt
@@ -0,0 +1,7 @@
+matplotlib
+numpy
+plotly
+open3d
+pandas
+scipy
+trimesh
diff --git a/nvblox/evaluation/setup.py b/nvblox/evaluation/setup.py
new file mode 100644
index 00000000..3e9f4ed7
--- /dev/null
+++ b/nvblox/evaluation/setup.py
@@ -0,0 +1,17 @@
+from setuptools import setup, find_packages
+
+with open('requirements.txt', 'r') as f:
+ install_requires = [line.strip() for line in f.readlines() if line]
+
+setup(
+ name='nvblox_evaluation',
+ version='0.0.0',
+ description='Scripts for evaluating nvblox.',
+ author='nvblox team.',
+ author_email='amillane@nvidia.com + remos@nvidia.com',
+ long_description=open('README.md').read(),
+ long_description_content_type='text/markdown',
+ install_requires=install_requires,
+ include_package_data=True,
+ packages=find_packages()
+)
diff --git a/nvblox/examples/src/esdf_query.cu b/nvblox/examples/src/esdf_query.cu
index 1e959279..5d2e6a96 100644
--- a/nvblox/examples/src/esdf_query.cu
+++ b/nvblox/examples/src/esdf_query.cu
@@ -13,17 +13,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-#include
#include
+#include
#include "nvblox/core/indexing.h"
-#include "nvblox/core/layer.h"
-#include "nvblox/core/mapper.h"
#include "nvblox/core/types.h"
-#include "nvblox/core/voxels.h"
-#include "nvblox/gpu_hash/cuda/gpu_indexing.cuh"
+#include "nvblox/gpu_hash/internal/cuda/gpu_indexing.cuh"
#include "nvblox/io/ply_writer.h"
#include "nvblox/io/pointcloud_io.h"
+#include "nvblox/map/layer.h"
+#include "nvblox/map/voxels.h"
+#include "nvblox/mapper/mapper.h"
#include "nvblox/primitives/scene.h"
#include "nvblox/utils/timing.h"
@@ -40,14 +40,14 @@ class EsdfQueryExample {
float voxel_size_ = 0.10;
// Mapping class which contains all the relevant layers and integrators.
- std::unique_ptr mapper_;
+ std::unique_ptr mapper_;
// A simulation scene, used in place of real data.
primitives::Scene scene_;
};
void EsdfQueryExample::createMap() {
- mapper_.reset(new RgbdMapper(voxel_size_, MemoryType::kDevice));
+ mapper_.reset(new Mapper(voxel_size_, MemoryType::kDevice));
// Create a map that's a box with a sphere in the middle.
scene_.aabb() = AxisAlignedBoundingBox(Vector3f(-5.5f, -5.5f, -0.5f),
@@ -64,7 +64,7 @@ void EsdfQueryExample::createMap() {
// frames).
// We need to create a temp layer unfortunately.
TsdfLayer gt_tsdf(voxel_size_, MemoryType::kHost);
- scene_.generateSdfFromScene(4 * voxel_size_, >_tsdf);
+ scene_.generateLayerFromScene(4 * voxel_size_, >_tsdf);
mapper_->tsdf_layer() = std::move(gt_tsdf);
diff --git a/nvblox/examples/src/load_map_and_mesh.cpp b/nvblox/examples/src/load_map_and_mesh.cpp
index 5186b31d..21ce4127 100644
--- a/nvblox/examples/src/load_map_and_mesh.cpp
+++ b/nvblox/examples/src/load_map_and_mesh.cpp
@@ -15,11 +15,11 @@ limitations under the License.
*/
#include "gflags/gflags.h"
-#include "glog/logging.h"
+#include "nvblox/utils/logging.h"
#include "nvblox/nvblox.h"
-#include "nvblox/serialization/layer_type_register.h"
+#include "nvblox/serialization/internal/layer_type_register.h"
using namespace nvblox;
diff --git a/nvblox/executables/include/nvblox/datasets/3dmatch.h b/nvblox/executables/include/nvblox/datasets/3dmatch.h
index 2086d29e..82c4c508 100644
--- a/nvblox/executables/include/nvblox/datasets/3dmatch.h
+++ b/nvblox/executables/include/nvblox/datasets/3dmatch.h
@@ -34,10 +34,22 @@ std::unique_ptr createFuser(const std::string base_path,
///@brief A class for loading 3DMatch data
class DataLoader : public RgbdDataLoaderInterface {
public:
+ /// Constructors not intended to be called directly, use factor
+ /// DataLoader::create();
DataLoader(const std::string& base_path, const int seq_id,
- bool multithreaded = true);
+ bool multithreaded = true);
virtual ~DataLoader() = default;
+ /// Builds a DatasetLoader
+ ///@param base_path Path to the replica dataset sequence base folder.
+ ///@param seq_id Sequence index of the dataset to be loaded.
+ ///@param multithreaded Whether or not to multi-thread image loading
+ ///@return std::unique_ptr The dataset loader. May be nullptr if
+ /// construction fails.
+ static std::unique_ptr create(const std::string& base_path,
+ const int seq_id,
+ bool multithreaded = true);
+
/// Interface for a function that loads the next frames in a dataset
///@param[out] depth_frame_ptr The loaded depth frame.
///@param[out] T_L_C_ptr Transform from Camera to the Layer frame.
diff --git a/nvblox/executables/include/nvblox/datasets/data_loader.h b/nvblox/executables/include/nvblox/datasets/data_loader.h
index ce3492d7..682cd701 100644
--- a/nvblox/executables/include/nvblox/datasets/data_loader.h
+++ b/nvblox/executables/include/nvblox/datasets/data_loader.h
@@ -15,10 +15,10 @@ limitations under the License.
*/
#pragma once
-#include "nvblox/core/camera.h"
-#include "nvblox/core/image.h"
#include "nvblox/core/types.h"
#include "nvblox/datasets/image_loader.h"
+#include "nvblox/sensors/camera.h"
+#include "nvblox/sensors/image.h"
namespace nvblox {
namespace datasets {
@@ -27,8 +27,9 @@ enum class DataLoadResult { kSuccess, kBadFrame, kNoMoreData };
class RgbdDataLoaderInterface {
public:
- RgbdDataLoaderInterface(std::unique_ptr>&& depth_image_loader,
- std::unique_ptr>&& color_image_loader)
+ RgbdDataLoaderInterface(
+ std::unique_ptr>&& depth_image_loader,
+ std::unique_ptr>&& color_image_loader)
: depth_image_loader_(std::move(depth_image_loader)),
color_image_loader_(std::move(color_image_loader)) {}
virtual ~RgbdDataLoaderInterface() = default;
@@ -48,6 +49,11 @@ class RgbdDataLoaderInterface {
// Objects which do (multithreaded) image loading.
std::unique_ptr> depth_image_loader_;
std::unique_ptr> color_image_loader_;
+
+ // Indicates if the dataset loader was constructed in a state that was good to
+ // go. Initializes to true, so child class constructors indicate failure by
+ // setting it to false;
+ bool setup_success_ = true;
};
} // namespace datasets
diff --git a/nvblox/executables/include/nvblox/datasets/image_loader.h b/nvblox/executables/include/nvblox/datasets/image_loader.h
index d90b625d..d71fd05a 100644
--- a/nvblox/executables/include/nvblox/datasets/image_loader.h
+++ b/nvblox/executables/include/nvblox/datasets/image_loader.h
@@ -19,8 +19,8 @@ limitations under the License.
#include
#include
-#include "nvblox/core/image.h"
#include "nvblox/core/types.h"
+#include "nvblox/sensors/image.h"
namespace nvblox {
namespace datasets {
@@ -72,10 +72,10 @@ using ImageOptional = std::pair;
template
class MultiThreadedImageLoader : public ImageLoader {
public:
- MultiThreadedImageLoader(IndexToFilepathFunction index_to_filepath,
- int num_threads,
- MemoryType memory_type = kDefaultImageMemoryType,
- float depth_image_scaling_factor = kDefaultUintDepthScaleFactor);
+ MultiThreadedImageLoader(
+ IndexToFilepathFunction index_to_filepath, int num_threads,
+ MemoryType memory_type = kDefaultImageMemoryType,
+ float depth_image_scaling_factor = kDefaultUintDepthScaleFactor);
~MultiThreadedImageLoader();
bool getNextImage(ImageType* image_ptr) override;
diff --git a/nvblox/executables/include/nvblox/datasets/redwood.h b/nvblox/executables/include/nvblox/datasets/redwood.h
index 2ed1b342..31f29647 100644
--- a/nvblox/executables/include/nvblox/datasets/redwood.h
+++ b/nvblox/executables/include/nvblox/datasets/redwood.h
@@ -34,9 +34,19 @@ std::unique_ptr createFuser(const std::string base_path);
///@brief A class for loading Redwood data
class DataLoader : public RgbdDataLoaderInterface {
public:
+ /// Constructors not intended to be called directly, use factor
+ /// DataLoader::create();
DataLoader(const std::string& base_path, bool multithreaded = true);
virtual ~DataLoader() = default;
+ /// Builds a DatasetLoader
+ ///@param base_path Path to the replica dataset sequence base folder.
+ ///@param multithreaded Whether or not to multi-thread image loading
+ ///@return std::unique_ptr The dataset loader. May be nullptr if
+ /// construction fails.
+ static std::unique_ptr create(const std::string& base_path,
+ bool multithreaded = true);
+
/// Interface for a function that loads the next frames in a dataset
///@param[out] depth_frame_ptr The loaded depth frame.
///@param[out] T_L_C_ptr Transform from Camera to the Layer frame.
diff --git a/nvblox/executables/include/nvblox/datasets/replica.h b/nvblox/executables/include/nvblox/datasets/replica.h
index 24015c4d..939647aa 100644
--- a/nvblox/executables/include/nvblox/datasets/replica.h
+++ b/nvblox/executables/include/nvblox/datasets/replica.h
@@ -15,9 +15,9 @@ limitations under the License.
*/
#pragma once
+#include
#include
#include
-#include
#include "nvblox/core/types.h"
#include "nvblox/datasets/data_loader.h"
@@ -34,9 +34,19 @@ std::unique_ptr createFuser(const std::string base_path);
///@brief A class for loading Replica data
class DataLoader : public RgbdDataLoaderInterface {
public:
+ /// Constructors not intended to be called directly, use factor
+ /// DataLoader::create();
DataLoader(const std::string& base_path, bool multithreaded = true);
virtual ~DataLoader() = default;
+ /// Builds a DatasetLoader
+ ///@param base_path Path to the replica dataset sequence base folder.
+ ///@param multithreaded Whether or not to multi-thread image loading
+ ///@return std::unique_ptr The dataset loader. May be nullptr if
+ /// construction fails.
+ static std::unique_ptr create(const std::string& base_path,
+ bool multithreaded = true);
+
/// Interface for a function that loads the next frames in a dataset
///@param[out] depth_frame_ptr The loaded depth frame.
///@param[out] T_L_C_ptr Transform from Camera to the Layer frame.
@@ -49,6 +59,7 @@ class DataLoader : public RgbdDataLoaderInterface {
ColorImage* color_frame_ptr = nullptr) override;
protected:
+ // Base path of the dataset
const std::string base_path_;
// Cached camera
diff --git a/nvblox/executables/include/nvblox/executables/fuser.h b/nvblox/executables/include/nvblox/executables/fuser.h
index 7ac2a6f2..ffac81aa 100644
--- a/nvblox/executables/include/nvblox/executables/fuser.h
+++ b/nvblox/executables/include/nvblox/executables/fuser.h
@@ -15,24 +15,25 @@ limitations under the License.
*/
#pragma once
+#include
+
#include
#include
-#include
-
-#include "nvblox/core/blox.h"
-#include "nvblox/core/layer.h"
-#include "nvblox/core/layer_cake.h"
-#include "nvblox/core/mapper.h"
-#include "nvblox/core/voxels.h"
#include "nvblox/datasets/data_loader.h"
#include "nvblox/gpu_hash/gpu_layer_view.h"
#include "nvblox/integrators/esdf_integrator.h"
#include "nvblox/integrators/projective_color_integrator.h"
#include "nvblox/integrators/projective_tsdf_integrator.h"
+#include "nvblox/map/blox.h"
+#include "nvblox/map/layer.h"
+#include "nvblox/map/layer_cake.h"
+#include "nvblox/map/voxels.h"
+#include "nvblox/mapper/mapper.h"
#include "nvblox/mesh/mesh_block.h"
#include "nvblox/mesh/mesh_integrator.h"
#include "nvblox/rays/sphere_tracer.h"
+#include "nvblox/utils/logging.h"
namespace nvblox {
@@ -49,19 +50,23 @@ class Fuser {
// Set various settings.
void setVoxelSize(float voxel_size);
- void setTsdfFrameSubsampling(int subsample);
+ void setProjectiveFrameSubsampling(int subsample);
void setColorFrameSubsampling(int subsample);
void setMeshFrameSubsampling(int subsample);
void setEsdfFrameSubsampling(int subsample);
- void setEsdfMode(RgbdMapper::EsdfMode esdf_mode);
+ void setEsdfMode(Mapper::EsdfMode esdf_mode);
// Integrate certain layers.
bool integrateFrame(const int frame_number);
bool integrateFrames();
void updateEsdf();
+ // Output a pointcloud tsdf as PLY file.
+ bool outputTsdfPointcloudPly();
+ // Output a pointcloud occupancy as PLY file.
+ bool outputOccupancyPointcloudPly();
// Output a pointcloud ESDF as PLY file.
- bool outputPointcloudPly();
+ bool outputESDFPointcloudPly();
// Output a file with the mesh.
bool outputMeshPly();
// Output timings to a file
@@ -70,7 +75,7 @@ class Fuser {
bool outputMapToFile();
// Get the mapper (useful for experiments where we modify mapper settings)
- RgbdMapper& mapper();
+ Mapper& mapper();
// Dataset settings.
int num_frames_to_integrate_ = std::numeric_limits::max();
@@ -78,9 +83,11 @@ class Fuser {
// Params
float voxel_size_m_ = 0.05;
- int tsdf_frame_subsampling_ = 1;
+ ProjectiveLayerType projective_layer_type_ = ProjectiveLayerType::kTsdf;
+ int projective_frame_subsampling_ = 1;
int color_frame_subsampling_ = 1;
- // By default we just do the mesh and esdf once at the end (if output paths exist)
+ // By default we just do the mesh and esdf once at the end
+ // (if output paths exist)
int mesh_frame_subsampling_ = -1;
int esdf_frame_subsampling_ = -1;
@@ -90,14 +97,16 @@ class Fuser {
float z_slice_ = 0.75f;
// ESDF mode
- RgbdMapper::EsdfMode esdf_mode_ = RgbdMapper::EsdfMode::k3D;
+ Mapper::EsdfMode esdf_mode_ = Mapper::EsdfMode::k3D;
// Mapper - Contains map layers and integrators
- std::unique_ptr mapper_;
+ std::unique_ptr mapper_;
// Output paths
std::string timing_output_path_;
+ std::string tsdf_output_path_;
std::string esdf_output_path_;
+ std::string occupancy_output_path_;
std::string mesh_output_path_;
std::string map_output_path_;
};
diff --git a/nvblox/executables/src/datasets/3dmatch.cpp b/nvblox/executables/src/datasets/3dmatch.cpp
index 70aa508a..3d8869fb 100644
--- a/nvblox/executables/src/datasets/3dmatch.cpp
+++ b/nvblox/executables/src/datasets/3dmatch.cpp
@@ -15,7 +15,7 @@ limitations under the License.
*/
#include "nvblox/datasets/3dmatch.h"
-#include
+#include "nvblox/utils/logging.h"
#include
#include
@@ -116,10 +116,26 @@ std::unique_ptr> createColorImageLoader(
std::unique_ptr createFuser(const std::string base_path,
const int seq_id) {
- auto data_loader = std::make_unique(base_path, seq_id);
+ auto data_loader = DataLoader::create(base_path, seq_id);
+ if (!data_loader) {
+ return std::unique_ptr();
+ }
return std::make_unique(std::move(data_loader));
}
+std::unique_ptr DataLoader::create(const std::string& base_path,
+ const int seq_id,
+ bool multithreaded) {
+ // Construct a dataset loader but only return it if everything worked.
+ auto dataset_loader =
+ std::make_unique(base_path, seq_id, multithreaded);
+ if (dataset_loader->setup_success_) {
+ return dataset_loader;
+ } else {
+ return std::unique_ptr();
+ }
+}
+
DataLoader::DataLoader(const std::string& base_path, const int seq_id,
bool multithreaded)
: RgbdDataLoaderInterface(threedmatch::internal::createDepthImageLoader(
@@ -140,6 +156,7 @@ DataLoader::DataLoader(const std::string& base_path, const int seq_id,
DataLoadResult DataLoader::loadNext(DepthImage* depth_frame_ptr,
Transform* T_L_C_ptr, Camera* camera_ptr,
ColorImage* color_frame_ptr) {
+ CHECK(setup_success_);
CHECK_NOTNULL(depth_frame_ptr);
CHECK_NOTNULL(T_L_C_ptr);
CHECK_NOTNULL(camera_ptr);
@@ -194,7 +211,7 @@ DataLoadResult DataLoader::loadNext(DepthImage* depth_frame_ptr,
return DataLoadResult::kNoMoreData;
}
- // Rotate the world frame since Y is up in the normal 3D match dasets.
+ // Rotate the world frame since Y is up in the normal 3D match datasets.
Eigen::Quaternionf q_L_O =
Eigen::Quaternionf::FromTwoVectors(Vector3f(0, 1, 0), Vector3f(0, 0, 1));
*T_L_C_ptr = q_L_O * T_O_C;
diff --git a/nvblox/executables/src/datasets/image_loader.cpp b/nvblox/executables/src/datasets/image_loader.cpp
index b0eccef3..38172d8b 100644
--- a/nvblox/executables/src/datasets/image_loader.cpp
+++ b/nvblox/executables/src/datasets/image_loader.cpp
@@ -70,7 +70,8 @@ bool load8BitColorImage(const std::string& filename,
if (image_data == nullptr) {
return false;
}
- // Currently we only support loading 3 channel (rgb) or 4 channel (rgba) images.
+ // Currently we only support loading 3 channel (rgb) or 4 channel (rgba)
+ // images.
CHECK(num_channels == 3 || num_channels == 4);
CHECK_EQ(sizeof(Color), 4 * sizeof(uint8_t))
diff --git a/nvblox/executables/src/datasets/redwood.cpp b/nvblox/executables/src/datasets/redwood.cpp
index 69347010..146e49bc 100644
--- a/nvblox/executables/src/datasets/redwood.cpp
+++ b/nvblox/executables/src/datasets/redwood.cpp
@@ -15,7 +15,7 @@ limitations under the License.
*/
#include "nvblox/datasets/redwood.h"
-#include
+#include "nvblox/utils/logging.h"
#include
#include
@@ -105,10 +105,24 @@ std::unique_ptr> createColorImageLoader(
} // namespace internal
std::unique_ptr createFuser(const std::string base_path) {
- auto data_loader = std::make_unique(base_path);
+ auto data_loader = DataLoader::create(base_path);
+ if (!data_loader) {
+ return std::unique_ptr();
+ }
return std::make_unique(std::move(data_loader));
}
+std::unique_ptr DataLoader::create(const std::string& base_path,
+ bool multithreaded) {
+ // Construct a dataset loader but only return it if everything worked.
+ auto dataset_loader = std::make_unique(base_path, multithreaded);
+ if (dataset_loader->setup_success_) {
+ return dataset_loader;
+ } else {
+ return std::unique_ptr();
+ }
+}
+
DataLoader::DataLoader(const std::string& base_path, bool multithreaded)
: RgbdDataLoaderInterface(
internal::createDepthImageLoader(
@@ -137,6 +151,7 @@ DataLoader::DataLoader(const std::string& base_path, bool multithreaded)
DataLoadResult DataLoader::loadNext(DepthImage* depth_frame_ptr,
Transform* T_L_C_ptr, Camera* camera_ptr,
ColorImage* color_frame_ptr) {
+ CHECK(setup_success_);
CHECK_NOTNULL(depth_frame_ptr);
CHECK_NOTNULL(T_L_C_ptr);
CHECK_NOTNULL(camera_ptr);
diff --git a/nvblox/executables/src/datasets/replica.cpp b/nvblox/executables/src/datasets/replica.cpp
index 371f0ecb..085ee8db 100644
--- a/nvblox/executables/src/datasets/replica.cpp
+++ b/nvblox/executables/src/datasets/replica.cpp
@@ -15,7 +15,7 @@ limitations under the License.
*/
#include "nvblox/datasets/replica.h"
-#include
+#include "nvblox/utils/logging.h"
#include
#include
@@ -142,7 +142,10 @@ std::unique_ptr> createColorImageLoader(
} // namespace internal
std::unique_ptr createFuser(const std::string base_path) {
- auto data_loader = std::make_unique(base_path);
+ auto data_loader = DataLoader::create(base_path);
+ if (!data_loader) {
+ return std::unique_ptr();
+ }
return std::make_unique(std::move(data_loader));
}
@@ -157,11 +160,26 @@ DataLoader::DataLoader(const std::string& base_path, bool multithreaded)
// We load the scale from camera file and reset the depth image loader to
// include it.
float inv_depth_image_scaling_factor;
- CHECK(replica::internal::parseCameraFromFile(
- replica::internal::getPathForCameraIntrinsics(base_path_), &camera_,
- &inv_depth_image_scaling_factor));
- depth_image_loader_ = replica::internal::createDepthImageLoader(
- base_path, 1.0f / inv_depth_image_scaling_factor, multithreaded);
+ if (replica::internal::parseCameraFromFile(
+ replica::internal::getPathForCameraIntrinsics(base_path_), &camera_,
+ &inv_depth_image_scaling_factor)) {
+ depth_image_loader_ = replica::internal::createDepthImageLoader(
+ base_path, 1.0f / inv_depth_image_scaling_factor, multithreaded);
+ setup_success_ = true;
+ } else {
+ setup_success_ = false;
+ }
+}
+
+std::unique_ptr DataLoader::create(const std::string& base_path,
+ bool multithreaded) {
+ // Construct a dataset loader but only return it if everything worked.
+ auto dataset_loader = std::make_unique(base_path, multithreaded);
+ if (dataset_loader->setup_success_) {
+ return dataset_loader;
+ } else {
+ return std::unique_ptr();
+ }
}
/// Interface for a function that loads the next frames in a dataset
@@ -173,6 +191,7 @@ DataLoader::DataLoader(const std::string& base_path, bool multithreaded)
DataLoadResult DataLoader::loadNext(DepthImage* depth_frame_ptr,
Transform* T_L_C_ptr, Camera* camera_ptr,
ColorImage* color_frame_ptr) {
+ CHECK(setup_success_);
CHECK_NOTNULL(depth_frame_ptr);
CHECK_NOTNULL(T_L_C_ptr);
CHECK_NOTNULL(camera_ptr);
@@ -212,7 +231,7 @@ DataLoadResult DataLoader::loadNext(DepthImage* depth_frame_ptr,
} else {
if (!replica::internal::parseCameraFromFile(
replica::internal::getPathForCameraIntrinsics(base_path_), &camera_,
- &scale)) {
+ &scale)) {
LOG(INFO) << "Couldn't find camera params file";
return DataLoadResult::kNoMoreData;
}
diff --git a/nvblox/executables/src/fuse_3dmatch.cpp b/nvblox/executables/src/fuse_3dmatch.cpp
index b5edc03a..ef6aadaf 100644
--- a/nvblox/executables/src/fuse_3dmatch.cpp
+++ b/nvblox/executables/src/fuse_3dmatch.cpp
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
#include
-#include
+#include "nvblox/utils/logging.h"
#include "nvblox/datasets/3dmatch.h"
#include "nvblox/executables/fuser.h"
@@ -46,6 +46,9 @@ int main(int argc, char* argv[]) {
constexpr int seq_id = 1;
std::unique_ptr fuser =
datasets::threedmatch::createFuser(base_path, seq_id);
+ if (!fuser) {
+ LOG(FATAL) << "Creation of the Fuser failed";
+ }
// Mesh location (optional)
if (argc >= 3) {
diff --git a/nvblox/executables/src/fuse_redwood.cpp b/nvblox/executables/src/fuse_redwood.cpp
index cb5f8f07..e817fff4 100644
--- a/nvblox/executables/src/fuse_redwood.cpp
+++ b/nvblox/executables/src/fuse_redwood.cpp
@@ -14,7 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-
#include
#include
#include
@@ -23,13 +22,13 @@ limitations under the License.
#include
#include
-#include
+#include "nvblox/utils/logging.h"
-#include "nvblox/core/image.h"
#include "nvblox/core/types.h"
#include "nvblox/datasets/image_loader.h"
#include "nvblox/datasets/redwood.h"
#include "nvblox/executables/fuser.h"
+#include "nvblox/sensors/image.h"
using namespace nvblox;
@@ -49,6 +48,9 @@ int main(int argc, char* argv[]) {
// Fuser
std::unique_ptr fuser = datasets::redwood::createFuser(base_path);
+ if (!fuser) {
+ LOG(FATAL) << "Creation of the Fuser failed";
+ }
// Mesh location (optional)
if (argc >= 3) {
diff --git a/nvblox/executables/src/fuse_replica.cpp b/nvblox/executables/src/fuse_replica.cpp
index 2a9029ae..339d0cb1 100644
--- a/nvblox/executables/src/fuse_replica.cpp
+++ b/nvblox/executables/src/fuse_replica.cpp
@@ -14,7 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-
#include
#include
#include
@@ -23,13 +22,13 @@ limitations under the License.
#include
#include
-#include
+#include "nvblox/utils/logging.h"
-#include "nvblox/core/image.h"
#include "nvblox/core/types.h"
#include "nvblox/datasets/image_loader.h"
#include "nvblox/datasets/replica.h"
#include "nvblox/executables/fuser.h"
+#include "nvblox/sensors/image.h"
using namespace nvblox;
@@ -49,6 +48,9 @@ int main(int argc, char* argv[]) {
// Fuser
std::unique_ptr fuser = datasets::replica::createFuser(base_path);
+ if (!fuser) {
+ LOG(FATAL) << "Creation of the Fuser failed";
+ }
// Mesh location (optional)
if (argc >= 3) {
diff --git a/nvblox/executables/src/fuser.cpp b/nvblox/executables/src/fuser.cpp
index 0d2f16de..4f7d2680 100644
--- a/nvblox/executables/src/fuser.cpp
+++ b/nvblox/executables/src/fuser.cpp
@@ -16,7 +16,7 @@ limitations under the License.
#include "nvblox/executables/fuser.h"
#include
-#include
+#include "nvblox/utils/logging.h"
#include "nvblox/executables/fuser.h"
#include "nvblox/io/mesh_io.h"
@@ -26,6 +26,9 @@ limitations under the License.
// Layer params
DEFINE_double(voxel_size, 0.0f, "Voxel resolution in meters.");
+DEFINE_bool(use_occupancy_layer, false,
+ "Whether to use an occupancy grid for projective integration. If "
+ "the flag is set to false a tsdf layer is used.");
// Dataset flags
DEFINE_int32(num_frames, -1,
@@ -34,15 +37,19 @@ DEFINE_int32(num_frames, -1,
// The output paths
DEFINE_string(timing_output_path, "",
"File in which to save the timing results.");
+DEFINE_string(tsdf_output_path, "",
+ "File in which to save the TSDF pointcloud.");
+DEFINE_string(occupancy_output_path, "",
+ "File in which to save the occupancy pointcloud.");
DEFINE_string(esdf_output_path, "",
"File in which to save the ESDF pointcloud.");
DEFINE_string(mesh_output_path, "", "File in which to save the surface mesh.");
DEFINE_string(map_output_path, "", "File in which to save the serialize map.");
// Subsampling
-DEFINE_int32(tsdf_frame_subsampling, 0,
- "By what amount to subsample the TSDF frames. A subsample of 3 "
- "means only every 3rd frame is taken.");
+DEFINE_int32(projective_frame_subsampling, 0,
+ "By what amount to subsample the TSDF or occupancy frames. A "
+ "subsample of 3 means only every 3rd frame is taken.");
DEFINE_int32(color_frame_subsampling, 0,
"How much to subsample the color integration by.");
DEFINE_int32(mesh_frame_subsampling, 0,
@@ -50,15 +57,26 @@ DEFINE_int32(mesh_frame_subsampling, 0,
DEFINE_int32(esdf_frame_subsampling, 0,
"How much to subsample the ESDF integration by.");
-// TSDF Integrator settings
-DEFINE_double(tsdf_integrator_max_integration_distance_m, -1.0,
+// Projective Integrator settings (TSDF and occupancy)
+DEFINE_double(projective_integrator_max_integration_distance_m, -1.0,
"Maximum distance (in meters) from the camera at which to "
- "integrate data into the TSDF.");
-DEFINE_double(tsdf_integrator_truncation_distance_vox, -1.0,
+ "integrate data into the TSDF or occupancy grid.");
+DEFINE_double(projective_integrator_truncation_distance_vox, -1.0,
"Truncation band (in voxels).");
DEFINE_double(
tsdf_integrator_max_weight, -1.0,
"The maximum weight that a tsdf voxel can accumulate through integration.");
+DEFINE_double(free_region_occupancy_probability, -1.0,
+ "The inverse sensor model occupancy probability for voxels "
+ "observed as free space.");
+DEFINE_double(occupied_region_occupancy_probability, -1.0,
+ "The inverse sensor model occupancy probability for voxels "
+ "observed as occupied.");
+DEFINE_double(
+ unobserved_region_occupancy_probability, -1.0,
+ "The inverse sensor model occupancy probability for unobserved voxels.");
+DEFINE_double(occupied_region_half_width_m, -1.0,
+ "Half the width of the region which is consided as occupied.");
// Mesh integrator settings
DEFINE_double(mesh_integrator_min_weight, -1.0,
@@ -79,12 +97,36 @@ DEFINE_double(esdf_integrator_max_site_distance_vox, -1.0,
DEFINE_double(esdf_integrator_max_distance_m, -1.0,
"The maximum distance which we integrate ESDF distances out to.");
+// Integrator weighting scheme
+// NOTE(alexmillane): Only one of these should be true at once (we'll check for
+// that). By default all are false and we use the internal defaults.
+DEFINE_bool(weighting_scheme_constant, false,
+ "Integration weighting scheme: constant");
+DEFINE_bool(weighting_scheme_constant_dropoff, false,
+ "Integration weighting scheme: constant + dropoff");
+DEFINE_bool(weighting_scheme_inverse_square, false,
+ "Integration weighting scheme: square");
+DEFINE_bool(weighting_scheme_inverse_square_dropoff, false,
+ "Integration weighting scheme: square + dropoff");
+
namespace nvblox {
Fuser::Fuser(std::unique_ptr&& data_loader)
: data_loader_(std::move(data_loader)) {
- // NOTE(alexmillane): We require the voxel size before we construct the
- // mapper, so we grab this parameter first and separately.
+ // NOTE(alexmillane): We require the voxel size and projective layer variant
+ // before we construct the mapper, so we grab this parameters first and
+ // separately.
+ if (FLAGS_use_occupancy_layer) {
+ projective_layer_type_ = ProjectiveLayerType::kOccupancy;
+ LOG(INFO) << "Projective layer variant = Occupancy\n"
+ "Attention: ESDF and Mesh integration is not yet implemented "
+ "for occupancy.";
+ } else {
+ projective_layer_type_ = ProjectiveLayerType::kTsdf;
+ LOG(INFO) << "Projective layer variant = TSDF"
+ " (for occupancy set the use_occupancy_layer flag)";
+ }
+
if (!gflags::GetCommandLineFlagInfoOrDie("voxel_size").is_default) {
LOG(INFO) << "Command line parameter found: voxel_size = "
<< FLAGS_voxel_size;
@@ -92,13 +134,16 @@ Fuser::Fuser(std::unique_ptr&& data_loader)
}
// Initialize the mapper
- mapper_ = std::make_unique(voxel_size_m_);
+ mapper_ = std::make_unique(voxel_size_m_, MemoryType::kDevice,
+ projective_layer_type_);
// Default parameters
- mapper_->mesh_integrator().min_weight(2.0f);
mapper_->color_integrator().max_integration_distance_m(5.0f);
mapper_->tsdf_integrator().max_integration_distance_m(5.0f);
mapper_->tsdf_integrator().view_calculator().raycast_subsampling_factor(4);
+ mapper_->occupancy_integrator().max_integration_distance_m(5.0f);
+ mapper_->occupancy_integrator().view_calculator().raycast_subsampling_factor(
+ 4);
mapper_->esdf_integrator().max_distance_m(4.0f);
mapper_->esdf_integrator().min_weight(2.0f);
@@ -119,11 +164,22 @@ void Fuser::readCommandLineFlags() {
<< FLAGS_timing_output_path;
timing_output_path_ = FLAGS_timing_output_path;
}
+ if (!gflags::GetCommandLineFlagInfoOrDie("tsdf_output_path").is_default) {
+ LOG(INFO) << "Command line parameter found: tsdf_output_path = "
+ << FLAGS_tsdf_output_path;
+ tsdf_output_path_ = FLAGS_tsdf_output_path;
+ }
+ if (!gflags::GetCommandLineFlagInfoOrDie("occupancy_output_path")
+ .is_default) {
+ LOG(INFO) << "Command line parameter found: occupancy_output_path = "
+ << FLAGS_occupancy_output_path;
+ occupancy_output_path_ = FLAGS_occupancy_output_path;
+ }
if (!gflags::GetCommandLineFlagInfoOrDie("esdf_output_path").is_default) {
LOG(INFO) << "Command line parameter found: esdf_output_path = "
<< FLAGS_esdf_output_path;
esdf_output_path_ = FLAGS_esdf_output_path;
- setEsdfMode(RgbdMapper::EsdfMode::k3D);
+ setEsdfMode(Mapper::EsdfMode::k3D);
}
if (!gflags::GetCommandLineFlagInfoOrDie("mesh_output_path").is_default) {
LOG(INFO) << "Command line parameter found: mesh_output_path = "
@@ -136,11 +192,11 @@ void Fuser::readCommandLineFlags() {
map_output_path_ = FLAGS_map_output_path;
}
// Subsampling flags
- if (!gflags::GetCommandLineFlagInfoOrDie("tsdf_frame_subsampling")
+ if (!gflags::GetCommandLineFlagInfoOrDie("projective_frame_subsampling")
.is_default) {
- LOG(INFO) << "Command line parameter found: tsdf_frame_subsampling = "
- << FLAGS_tsdf_frame_subsampling;
- setTsdfFrameSubsampling(FLAGS_tsdf_frame_subsampling);
+ LOG(INFO) << "Command line parameter found: projective_frame_subsampling = "
+ << FLAGS_projective_frame_subsampling;
+ setProjectiveFrameSubsampling(FLAGS_projective_frame_subsampling);
}
if (!gflags::GetCommandLineFlagInfoOrDie("color_frame_subsampling")
.is_default) {
@@ -160,24 +216,28 @@ void Fuser::readCommandLineFlags() {
<< FLAGS_esdf_frame_subsampling;
setEsdfFrameSubsampling(FLAGS_esdf_frame_subsampling);
}
- // TSDF integrator
+ // Projective integrator
if (!gflags::GetCommandLineFlagInfoOrDie(
- "tsdf_integrator_max_integration_distance_m")
+ "projective_integrator_max_integration_distance_m")
.is_default) {
LOG(INFO) << "Command line parameter found: "
- "tsdf_integrator_max_integration_distance_m= "
- << FLAGS_tsdf_integrator_max_integration_distance_m;
+ "projective_integrator_max_integration_distance_m= "
+ << FLAGS_projective_integrator_max_integration_distance_m;
mapper_->tsdf_integrator().max_integration_distance_m(
- FLAGS_tsdf_integrator_max_integration_distance_m);
+ FLAGS_projective_integrator_max_integration_distance_m);
+ mapper_->occupancy_integrator().max_integration_distance_m(
+ FLAGS_projective_integrator_max_integration_distance_m);
}
if (!gflags::GetCommandLineFlagInfoOrDie(
- "tsdf_integrator_truncation_distance_vox")
+ "projective_integrator_truncation_distance_vox")
.is_default) {
LOG(INFO) << "Command line parameter found: "
- "tsdf_integrator_truncation_distance_vox = "
- << FLAGS_tsdf_integrator_truncation_distance_vox;
+ "projective_integrator_truncation_distance_vox = "
+ << FLAGS_projective_integrator_truncation_distance_vox;
mapper_->tsdf_integrator().truncation_distance_vox(
- FLAGS_tsdf_integrator_truncation_distance_vox);
+ FLAGS_projective_integrator_truncation_distance_vox);
+ mapper_->occupancy_integrator().truncation_distance_vox(
+ FLAGS_projective_integrator_truncation_distance_vox);
}
if (!gflags::GetCommandLineFlagInfoOrDie("tsdf_integrator_max_weight")
.is_default) {
@@ -185,6 +245,39 @@ void Fuser::readCommandLineFlags() {
<< FLAGS_tsdf_integrator_max_weight;
mapper_->tsdf_integrator().max_weight(FLAGS_tsdf_integrator_max_weight);
}
+ if (!gflags::GetCommandLineFlagInfoOrDie("free_region_occupancy_probability")
+ .is_default) {
+ LOG(INFO)
+ << "Command line parameter found: free_region_occupancy_probability = "
+ << FLAGS_free_region_occupancy_probability;
+ mapper_->occupancy_integrator().free_region_occupancy_probability(
+ FLAGS_free_region_occupancy_probability);
+ }
+ if (!gflags::GetCommandLineFlagInfoOrDie(
+ "occupied_region_occupancy_probability")
+ .is_default) {
+ LOG(INFO) << "Command line parameter found: "
+ "occupied_region_occupancy_probability = "
+ << FLAGS_occupied_region_occupancy_probability;
+ mapper_->occupancy_integrator().occupied_region_occupancy_probability(
+ FLAGS_occupied_region_occupancy_probability);
+ }
+ if (!gflags::GetCommandLineFlagInfoOrDie(
+ "unobserved_region_occupancy_probability")
+ .is_default) {
+ LOG(INFO) << "Command line parameter found: "
+ "unobserved_region_occupancy_probability = "
+ << FLAGS_unobserved_region_occupancy_probability;
+ mapper_->occupancy_integrator().unobserved_region_occupancy_probability(
+ FLAGS_unobserved_region_occupancy_probability);
+ }
+ if (!gflags::GetCommandLineFlagInfoOrDie("occupied_region_half_width_m")
+ .is_default) {
+ LOG(INFO) << "Command line parameter found: occupied_region_half_width_m = "
+ << FLAGS_occupied_region_half_width_m;
+ mapper_->occupancy_integrator().occupied_region_half_width_m(
+ FLAGS_occupied_region_half_width_m);
+ }
// Mesh integrator
if (!gflags::GetCommandLineFlagInfoOrDie("mesh_integrator_min_weight")
.is_default) {
@@ -234,6 +327,55 @@ void Fuser::readCommandLineFlags() {
mapper_->esdf_integrator().max_distance_m(
FLAGS_esdf_integrator_max_distance_m);
}
+
+ // Weighting scheme
+ int num_weighting_schemes_requested = 0;
+ if (!gflags::GetCommandLineFlagInfoOrDie("weighting_scheme_constant")
+ .is_default) {
+ LOG(INFO) << "Command line parameter found: weighting_scheme_constant = "
+ << FLAGS_weighting_scheme_constant;
+ mapper_->tsdf_integrator().weighting_function_type(
+ WeightingFunctionType::kConstantWeight);
+ mapper_->color_integrator().weighting_function_type(
+ WeightingFunctionType::kConstantWeight);
+ ++num_weighting_schemes_requested;
+ }
+ if (!gflags::GetCommandLineFlagInfoOrDie("weighting_scheme_constant_dropoff")
+ .is_default) {
+ LOG(INFO)
+ << "Command line parameter found: weighting_scheme_constant_dropoff = "
+ << FLAGS_weighting_scheme_constant_dropoff;
+ mapper_->tsdf_integrator().weighting_function_type(
+ WeightingFunctionType::kConstantDropoffWeight);
+ mapper_->color_integrator().weighting_function_type(
+ WeightingFunctionType::kConstantDropoffWeight);
+ ++num_weighting_schemes_requested;
+ }
+ if (!gflags::GetCommandLineFlagInfoOrDie("weighting_scheme_inverse_square")
+ .is_default) {
+ LOG(INFO) << "Command line parameter found: weighting_scheme_square = "
+ << FLAGS_weighting_scheme_inverse_square;
+ mapper_->tsdf_integrator().weighting_function_type(
+ WeightingFunctionType::kInverseSquareWeight);
+ mapper_->color_integrator().weighting_function_type(
+ WeightingFunctionType::kInverseSquareWeight);
+ ++num_weighting_schemes_requested;
+ }
+ if (!gflags::GetCommandLineFlagInfoOrDie(
+ "weighting_scheme_inverse_square_dropoff")
+ .is_default) {
+ LOG(INFO) << "Command line parameter found: "
+ "weighting_scheme_inverse_square_dropoff = "
+ << FLAGS_weighting_scheme_inverse_square_dropoff;
+ mapper_->tsdf_integrator().weighting_function_type(
+ WeightingFunctionType::kInverseSquareDropoffWeight);
+ mapper_->color_integrator().weighting_function_type(
+ WeightingFunctionType::kInverseSquareDropoffWeight);
+ ++num_weighting_schemes_requested;
+ }
+ CHECK_LT(num_weighting_schemes_requested, 2)
+ << "You requested two weighting schemes on the command line. Maximum "
+ "one.";
}
int Fuser::run() {
@@ -244,6 +386,34 @@ int Fuser::run() {
return 1;
}
+ if (!occupancy_output_path_.empty()) {
+ if (projective_layer_type_ == ProjectiveLayerType::kOccupancy) {
+ LOG(INFO) << "Outputting occupancy pointcloud ply file to "
+ << occupancy_output_path_;
+ outputOccupancyPointcloudPly();
+ } else {
+ LOG(ERROR)
+ << "Occupancy pointcloud can not be stored to "
+ << occupancy_output_path_
+ << " because occupancy wasn't selected as projective layer variant.\n"
+ "Please set the use_occupancy_layer flag for an occupancy output.";
+ }
+ }
+
+ if (!tsdf_output_path_.empty()) {
+ if (projective_layer_type_ == ProjectiveLayerType::kTsdf) {
+ LOG(INFO) << "Outputting tsdf pointcloud ply file to "
+ << tsdf_output_path_;
+ outputTsdfPointcloudPly();
+ } else {
+ LOG(ERROR)
+ << "TSDF pointcloud can not be stored to " << tsdf_output_path_
+ << " because tsdf wasn't selected as projective layer variant.\n"
+ "Please leave/set the use_occupancy_layer flag false for an tsdf "
+ "output.";
+ }
+ }
+
if (!mesh_output_path_.empty()) {
LOG(INFO) << "Generating the mesh.";
mapper_->updateMesh();
@@ -255,7 +425,7 @@ int Fuser::run() {
LOG(INFO) << "Generating the ESDF.";
updateEsdf();
LOG(INFO) << "Outputting ESDF pointcloud ply file to " << esdf_output_path_;
- outputPointcloudPly();
+ outputESDFPointcloudPly();
}
if (!map_output_path_.empty()) {
@@ -271,12 +441,12 @@ int Fuser::run() {
return 0;
}
-RgbdMapper& Fuser::mapper() { return *mapper_; }
+Mapper& Fuser::mapper() { return *mapper_; }
void Fuser::setVoxelSize(float voxel_size) { voxel_size_m_ = voxel_size; }
-void Fuser::setTsdfFrameSubsampling(int subsample) {
- tsdf_frame_subsampling_ = subsample;
+void Fuser::setProjectiveFrameSubsampling(int subsample) {
+ projective_frame_subsampling_ = subsample;
}
void Fuser::setColorFrameSubsampling(int subsample) {
@@ -291,9 +461,9 @@ void Fuser::setEsdfFrameSubsampling(int subsample) {
esdf_frame_subsampling_ = subsample;
}
-void Fuser::setEsdfMode(RgbdMapper::EsdfMode esdf_mode) {
- if (esdf_mode_ != RgbdMapper::EsdfMode::kUnset) {
- LOG(WARNING) << "EsdfMode already set. Cannot change once set once. Not "
+void Fuser::setEsdfMode(Mapper::EsdfMode esdf_mode) {
+ if (esdf_mode_ != Mapper::EsdfMode::kUnset) {
+ LOG(WARNING) << "EsdfMode already set. Cannot change once set. Not "
"doing anything.";
}
esdf_mode_ = esdf_mode;
@@ -317,8 +487,8 @@ bool Fuser::integrateFrame(const int frame_number) {
}
timing::Timer per_frame_timer("fuser/time_per_frame");
- if ((frame_number + 1) % tsdf_frame_subsampling_ == 0) {
- timing::Timer timer_integrate("fuser/integrate_tsdf");
+ if ((frame_number + 1) % projective_frame_subsampling_ == 0) {
+ timing::Timer timer_integrate("fuser/projective_integration");
mapper_->integrateDepth(depth_frame, T_L_C, camera);
timer_integrate.Stop();
}
@@ -362,18 +532,29 @@ bool Fuser::integrateFrames() {
void Fuser::updateEsdf() {
switch (esdf_mode_) {
- case RgbdMapper::EsdfMode::kUnset:
+ case Mapper::EsdfMode::kUnset:
break;
- case RgbdMapper::EsdfMode::k3D:
+ case Mapper::EsdfMode::k3D:
mapper_->updateEsdf();
break;
- case RgbdMapper::EsdfMode::k2D:
+ case Mapper::EsdfMode::k2D:
mapper_->updateEsdfSlice(z_min_, z_max_, z_slice_);
break;
}
}
-bool Fuser::outputPointcloudPly() {
+bool Fuser::outputTsdfPointcloudPly() {
+ timing::Timer timer_write("fuser/tsdf/write");
+ return io::outputVoxelLayerToPly(mapper_->tsdf_layer(), tsdf_output_path_);
+}
+
+bool Fuser::outputOccupancyPointcloudPly() {
+ timing::Timer timer_write("fuser/occupancy/write");
+ return io::outputVoxelLayerToPly(mapper_->occupancy_layer(),
+ occupancy_output_path_);
+}
+
+bool Fuser::outputESDFPointcloudPly() {
timing::Timer timer_write("fuser/esdf/write");
return io::outputVoxelLayerToPly(mapper_->esdf_layer(), esdf_output_path_);
}
diff --git a/nvblox/experiments/.gitignore b/nvblox/experiments/.gitignore
deleted file mode 100644
index f32081e2..00000000
--- a/nvblox/experiments/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*output*
\ No newline at end of file
diff --git a/nvblox/experiments/CMakeLists.txt b/nvblox/experiments/CMakeLists.txt
index 6b40559f..c8c7776a 100644
--- a/nvblox/experiments/CMakeLists.txt
+++ b/nvblox/experiments/CMakeLists.txt
@@ -1,41 +1,10 @@
-# Common experiments library
-add_library(nvblox_experiments_common SHARED
- src/integrators/cuda/depth_frame_texture.cu
- src/integrators/cuda/experimental_integrator_input_frames.cu
- src/integrators/cuda/experimental_projective_tsdf_integrators.cu
+add_executable(test_bench
+ src/test_bench.cpp
)
-target_include_directories(nvblox_experiments_common PUBLIC
- include
+target_include_directories(test_bench PUBLIC
+ $
+ $
+)
+target_link_libraries(test_bench
+ nvblox_lib nvblox_datasets
)
-add_dependencies(nvblox_experiments_common nvblox_datasets)
-target_link_libraries(nvblox_experiments_common nvblox_lib nvblox_datasets)
-set_target_properties(nvblox_experiments_common PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
-
-# Macro for installing python script SYMBOLICALLY to the buildspace
-macro(makeLink src dest target)
- add_custom_command(TARGET ${target} PRE_BUILD
- COMMAND ln -sf ${src} ${dest} DEPENDS ${dest} COMMENT "mklink ${src} -> ${dest}")
-endmacro()
-
-# Python module for experiments
-set(PYTHON_EXPERIMENTS_MODULE_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/python")
-set(PYTHON_EXPERIMENTS_MODULE_DST_DIR "${CMAKE_CURRENT_BINARY_DIR}/python")
-file(MAKE_DIRECTORY ${PYTHON_EXPERIMENTS_MODULE_DST_DIR})
-file(COPY ${PYTHON_EXPERIMENTS_MODULE_SRC_DIR}/
- DESTINATION ${PYTHON_EXPERIMENTS_MODULE_DST_DIR})
-
-# Script for comparing branches
-add_subdirectory(experiments/compare_branches)
-
-# Experiments
-# NOTE(alexmillane): Experiments disabled with "EXCLUDE_FROM_ALL" are no longer
-# maintained/building but are kept around for posterity
-add_subdirectory(experiments/texture_vs_global_memory_interpolation EXCLUDE_FROM_ALL)
-add_subdirectory(experiments/unified_vs_device_memory)
-add_subdirectory(experiments/threaded_image_loading)
-add_subdirectory(experiments/realistic_timings)
-add_subdirectory(experiments/vector_copies)
-add_subdirectory(experiments/isolate_tsdf_block_update)
-add_subdirectory(experiments/stream_compaction)
-add_subdirectory(experiments/layer_cake_interface)
-add_subdirectory(experiments/ratio_of_freespace)
diff --git a/nvblox/experiments/README.md b/nvblox/experiments/README.md
new file mode 100644
index 00000000..ee36687a
--- /dev/null
+++ b/nvblox/experiments/README.md
@@ -0,0 +1,2 @@
+# nvblox Experiments Folder
+Often in the early stages of developing a new feature for nvblox, it is useful to have a space to try things out *in the source tree* but not mixed with the library code. This folder is a space to do that in. Simply create a new executable, or modify `test_bench.cpp` and try things out.
diff --git a/nvblox/experiments/experiments/compare_branches/CMakeLists.txt b/nvblox/experiments/experiments/compare_branches/CMakeLists.txt
deleted file mode 100644
index 1de54cce..00000000
--- a/nvblox/experiments/experiments/compare_branches/CMakeLists.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-add_custom_target( compare_branches )
-makeLink("${CMAKE_CURRENT_SOURCE_DIR}/compare_branches.py" "${CMAKE_CURRENT_BINARY_DIR}/" compare_branches)
-makeLink("${CMAKE_CURRENT_SOURCE_DIR}/plot_comparison.py" "${CMAKE_CURRENT_BINARY_DIR}/" compare_branches)
diff --git a/nvblox/experiments/experiments/isolate_tsdf_block_update/CMakeLists.txt b/nvblox/experiments/experiments/isolate_tsdf_block_update/CMakeLists.txt
deleted file mode 100644
index 3714f920..00000000
--- a/nvblox/experiments/experiments/isolate_tsdf_block_update/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-add_executable(isolate_tsdf_block_update main.cpp)
-target_link_libraries(isolate_tsdf_block_update nvblox_lib nvblox_datasets)
diff --git a/nvblox/experiments/experiments/isolate_tsdf_block_update/main.cpp b/nvblox/experiments/experiments/isolate_tsdf_block_update/main.cpp
deleted file mode 100644
index 1691123b..00000000
--- a/nvblox/experiments/experiments/isolate_tsdf_block_update/main.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright 2022 NVIDIA CORPORATION
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-#include
-#include
-
-#include
-#include
-
-#include "nvblox/core/camera.h"
-#include "nvblox/core/common_names.h"
-#include "nvblox/core/cuda/warmup.h"
-#include "nvblox/core/types.h"
-#include "nvblox/datasets/3dmatch.h"
-#include "nvblox/integrators/internal/integrators_common.h"
-#include "nvblox/integrators/projective_tsdf_integrator.h"
-#include "nvblox/utils/timing.h"
-
-DECLARE_bool(alsologtostderr);
-
-using namespace nvblox;
-
-// Just a class so we can acces integrator internals
-class ProjectiveTsdfIntegratorExperiment : public ProjectiveTsdfIntegrator {
- public:
- ProjectiveTsdfIntegratorExperiment() : ProjectiveTsdfIntegrator() {}
- virtual ~ProjectiveTsdfIntegratorExperiment(){};
-
- // Expose this publically
- template
- void integrateBlocksTemplate(const std::vector& block_indices,
- const DepthImage& depth_frame,
- const Transform& T_L_C, const SensorType& sensor,
- const float truncation_distance_m,
- TsdfLayer* layer) {
- ProjectiveTsdfIntegrator::integrateBlocksTemplate(
- block_indices, depth_frame, T_L_C, sensor, layer);
- }
-
- // Expose this publically
- // std::vector getBlocksInViewUsingRaycasting(
- // const DepthImage& depth_frame, const Transform& T_L_C,
- // const Camera& camera, const float block_size) const {
- // return ProjectiveTsdfIntegrator::getBlocksInViewUsingRaycasting(
- // depth_frame, T_L_C, camera, block_size);
- // }
-};
-
-int main(int argc, char* argv[]) {
- gflags::ParseCommandLineFlags(&argc, &argv, true);
- FLAGS_alsologtostderr = true;
- google::InstallFailureSignalHandler();
-
- const std::string dataset_base_path = "../../../tests/data/3dmatch";
- constexpr int kSeqNum = 1;
-
- constexpr float kVoxelSize = 0.05;
- TsdfLayer tsdf_layer(kVoxelSize, MemoryType::kDevice);
-
- ProjectiveTsdfIntegratorExperiment tsdf_integrator;
-
- const unsigned int frustum_raycast_subsampling_rate = 4;
- tsdf_integrator.view_calculator().raycast_subsampling_factor(
- frustum_raycast_subsampling_rate);
-
- const float truncation_distance_m =
- tsdf_integrator.truncation_distance_vox() * kVoxelSize;
-
- // Update identified blocks (many times)
- constexpr int kNumIntegrations = 1000;
- for (int i = 0; i < kNumIntegrations; i++) {
- // Load images
- auto image_loader_ptr =
- datasets::threedmatch::internal::createDepthImageLoader(
- dataset_base_path, kSeqNum);
-
- DepthImage depth_frame;
- CHECK(image_loader_ptr->getNextImage(&depth_frame));
-
- Eigen::Matrix3f camera_intrinsics;
- CHECK(datasets::threedmatch::internal::parseCameraFromFile(
- datasets::threedmatch::internal::getPathForCameraIntrinsics(
- dataset_base_path),
- &camera_intrinsics));
- const auto camera = Camera::fromIntrinsicsMatrix(
- camera_intrinsics, depth_frame.width(), depth_frame.height());
-
- Transform T_L_C;
- CHECK(datasets::threedmatch::internal::parsePoseFromFile(
- datasets::threedmatch::internal::getPathForFramePose(dataset_base_path,
- kSeqNum, 0),
- &T_L_C));
-
- // Identify blocks we can (potentially) see (CPU)
- timing::Timer blocks_in_view_timer("tsdf/integrate/get_blocks_in_view");
- const std::vector block_indices =
- tsdf_integrator.view_calculator().getBlocksInImageViewRaycast(
- depth_frame, T_L_C, camera, tsdf_layer.block_size(),
- truncation_distance_m,
- tsdf_integrator.max_integration_distance_m());
- blocks_in_view_timer.Stop();
-
- // Allocate blocks (CPU)
- timing::Timer allocate_blocks_timer("tsdf/integrate/allocate_blocks");
- allocateBlocksWhereRequired(block_indices, &tsdf_layer);
- allocate_blocks_timer.Stop();
-
- timing::Timer update_blocks_timer("tsdf/integrate/update_blocks");
- tsdf_integrator.integrateBlocksTemplate(block_indices, depth_frame, T_L_C,
- camera, truncation_distance_m,
- &tsdf_layer);
- update_blocks_timer.Stop();
-
- // Reset the layer such that we do TsdfBlock allocation.
- // tsdf_layer.clear();
- }
-
- std::cout << timing::Timing::Print() << std::endl;
-
- return 0;
-}
\ No newline at end of file
diff --git a/nvblox/experiments/experiments/layer_cake_interface/CMakeLists.txt b/nvblox/experiments/experiments/layer_cake_interface/CMakeLists.txt
deleted file mode 100644
index b51552ba..00000000
--- a/nvblox/experiments/experiments/layer_cake_interface/CMakeLists.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-add_executable(layer_cake_interface_dynamic
- main_dynamic.cpp
- src/cuda/user_defined_block.cu
-)
-target_include_directories(layer_cake_interface_dynamic PUBLIC
-include
-)
-target_link_libraries(layer_cake_interface_dynamic nvblox_lib)
-
-add_executable(layer_cake_interface_static
- main_static.cpp
-)
-target_include_directories(layer_cake_interface_static PUBLIC
-include
-)
-target_link_libraries(layer_cake_interface_static nvblox_lib)
-
-add_executable(layer_cake_interface_type_erasure
- main_type_erasure.cpp
-)
-target_include_directories(layer_cake_interface_type_erasure PUBLIC
-include
-)
-target_link_libraries(layer_cake_interface_type_erasure nvblox_lib)
-
diff --git a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_common.h b/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_common.h
deleted file mode 100644
index bc16d20a..00000000
--- a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_common.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
-Copyright 2022 NVIDIA CORPORATION
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-#pragma once
-
-namespace nvblox {
-namespace experiments {
-
-// Unique types
-template
-struct unique_types;
-
-// Count occurances
-template
-struct count_type_occurrence;
-
-} // namespace experiments
-} // namespace nvblox
-
-#include "nvblox/experiments/impl/cake_common_impl.h"
diff --git a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_dynamic.h b/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_dynamic.h
deleted file mode 100644
index d9a941ed..00000000
--- a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_dynamic.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright 2022 NVIDIA CORPORATION
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-#pragma once
-
-#include
-#include
-#include
-
-#include
-#include
-
-#include "nvblox/experiments/cake_common.h"
-
-namespace nvblox {
-namespace experiments {
-
-class LayerCakeDynamic {
- public:
- LayerCakeDynamic(float voxel_size, MemoryType memory_type)
- : voxel_size_(voxel_size), memory_type_(memory_type) {}
-
- template
- LayerType* add();
-
- template
- LayerType* getPtr();
-
- template
- bool exists() const;
-
- // Factory accepting a list of LayerTypes
- template
- static LayerCakeDynamic create(float voxel_size, MemoryType memory_type);
-
- private:
- // Params
- const float voxel_size_;
- const MemoryType memory_type_;
-
- // NOTE(alexmillane): Could move to multimap if we want more than one layer
- // with the same type
- std::unordered_map> layers_;
-};
-
-} // namespace experiments
-} // namespace nvblox
-
-#include "nvblox/experiments/impl/cake_dynamic_impl.h"
diff --git a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_static.h b/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_static.h
deleted file mode 100644
index 5b2e9105..00000000
--- a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_static.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2022 NVIDIA CORPORATION
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-#pragma once
-
-namespace nvblox {
-namespace experiments {
-
-template
-class LayerCakeStatic {
- public:
- LayerCakeStatic(float voxel_size, MemoryType memory_type)
- : layers_(std::move(
- LayerTypes(sizeArgumentFromVoxelSize(voxel_size),
- memory_type))...) {}
-
- // Access by LayerType.
- // This requires that the list of types contained in this LayerCakeStatic are
- // unique with respect to one another.
- template
- LayerType* getPtr();
- template
- const LayerType& get() const;
-
- template
- int count() const;
-
- private:
- std::tuple layers_;
-};
-
-} // namespace experiments
-} // namespace nvblox
-
-#include "nvblox/experiments/impl/cake_static_impl.h"
diff --git a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_type_erasure.h b/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_type_erasure.h
deleted file mode 100644
index 550eb062..00000000
--- a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/cake_type_erasure.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
-Copyright 2022 NVIDIA CORPORATION
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-#pragma once
-
-#include
-#include
-#include
-
-#include "nvblox/core/layer.h"
-#include "nvblox/core/types.h"
-
-namespace nvblox {
-namespace experiments {
-
-// Note: A good resource for type erasure can be found here:
-// https://github.com/alecjacobson/better-code-runtime-polymorphism/blob/master/library.hpp
-
-template
-bool loadLayer(const std::string& filename, LayerType* layer_ptr) {
- LOG(INFO) << "Loading Layer of type: " << typeid(*layer_ptr).name();
- return true;
-}
-
-class LayerInterface {
- public:
- template
- LayerInterface(LayerType&& layer)
- : layer_(std::make_unique>(std::move(layer))) {}
-
- bool load(const std::string& filename) { return layer_->load(filename); }
-
- private:
- struct LayerConcept {
- virtual ~LayerConcept() = default;
- virtual bool load(const std::string& filename) = 0;
- };
-
- template
- struct LayerModel : LayerConcept {
- LayerModel(LayerType&& layer) : layer_(std::move(layer)) {}
-
- bool load(const std::string& filename) override {
- return loadLayer(filename, &layer_);
- }
-
- // Where the layer is actually stored
- LayerType layer_;
- };
-
- std::unique_ptr layer_;
-};
-
-class LayerCakeTypeErasure {
- public:
- LayerCakeTypeErasure(float voxel_size, MemoryType memory_type)
- : voxel_size_(voxel_size), memory_type_(memory_type){};
-
- template
- void add() {
- layers_.push_back(LayerType(
- sizeArgumentFromVoxelSize(voxel_size_), memory_type_));
- }
-
- bool load(const std::string& filename) {
- bool success = true;
- for (auto& layer : layers_) {
- success &= layer.load(filename);
- }
- return success;
- }
-
- private:
- // Params
- const float voxel_size_;
- const MemoryType memory_type_;
-
- // Data
- std::vector layers_;
-};
-
-} // namespace experiments
-} // namespace nvblox
\ No newline at end of file
diff --git a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_common_impl.h b/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_common_impl.h
deleted file mode 100644
index 9c81fba8..00000000
--- a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_common_impl.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-Copyright 2022 NVIDIA CORPORATION
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-#pragma once
-
-namespace nvblox {
-namespace experiments {
-
-// Unique type
-template
-struct unique_types {
- static constexpr bool value = unique_types::value &&
- unique_types::value &&
- unique_types::value;
-};
-
-template
-struct unique_types {
- static constexpr bool value = !std::is_same::value;
-};
-
-template
-struct unique_types {
- static constexpr bool value = true;
-};
-
-template <>
-struct unique_types<> {
- static constexpr bool value = true;
-};
-
-// Count occurances
-template
-struct count_type_occurrence {
- static constexpr int value = count_type_occurrence::value +
- count_type_occurrence::value;
-};
-
-template
-struct count_type_occurrence {
- static constexpr int value =
- static_cast(std::is_same::value);
-};
-
-template
-struct count_type_occurrence {
- static constexpr int value = 0;
-};
-
-} // namespace experiments
-} // namespace nvblox
\ No newline at end of file
diff --git a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_dynamic_impl.h b/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_dynamic_impl.h
deleted file mode 100644
index c2be15ae..00000000
--- a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_dynamic_impl.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Copyright 2022 NVIDIA CORPORATION
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-#pragma once
-
-namespace nvblox {
-namespace experiments {
-
-template
-LayerType* LayerCakeDynamic::add() {
- if (layers_.count(typeid(LayerType)) == 0) {
- // Allocate
- auto layer_ptr = std::make_unique(
- sizeArgumentFromVoxelSize(voxel_size_), memory_type_);
- LayerType* return_ptr = layer_ptr.get();
- // Store (as BaseLayer ptr)
- layers_.emplace(std::type_index(typeid(LayerType)), std::move(layer_ptr));
- LOG(INFO) << "Adding Layer with type: " << typeid(LayerType).name()
- << " to LayerCake.";
- return return_ptr;
- } else {
- LOG(WARNING) << "Request to add a LayerType that's already in the cake. "
- "Currently we only support single layers of each "
- "LayerType. So we did nothing.";
- return nullptr;
- }
-}
-
-template
-LayerType* LayerCakeDynamic::getPtr() {
- const auto it = layers_.find(std::type_index(typeid(LayerType)));
- if (it != layers_.end()) {
- BaseLayer* base_ptr = it->second.get();
- LayerType* ptr = dynamic_cast(base_ptr);
- CHECK_NOTNULL(ptr);
- return ptr;
- } else {
- LOG(WARNING) << "Request for a LayerType which is not in the cake.";
- return nullptr;
- }
-}
-
-template
-bool LayerCakeDynamic::exists() const {
- const auto it = layers_.find(std::type_index(typeid(LayerType)));
- return (it != layers_.end());
-}
-
-template
-LayerCakeDynamic LayerCakeDynamic::create(float voxel_size,
- MemoryType memory_type) {
- static_assert(unique_types::value,
- "At the moment we only support LayerCakes containing unique "
- "LayerTypes.");
- LayerCakeDynamic cake(voxel_size, memory_type);
- BaseLayer* ignored[] = {cake.add()...};
- return cake;
-}
-
-} // namespace experiments
-} // namespace nvblox
diff --git a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_static_impl.h b/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_static_impl.h
deleted file mode 100644
index 55ef5aa7..00000000
--- a/nvblox/experiments/experiments/layer_cake_interface/include/nvblox/experiments/impl/cake_static_impl.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2022 NVIDIA CORPORATION
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-#pragma once
-
-#include "nvblox/experiments/cake_common.h"
-
-namespace nvblox {
-namespace experiments {
-
-template
-template
-LayerType* LayerCakeStatic::getPtr() {
- static_assert(count_type_occurrence::value > 0,
- "LayerCake does not contain requested layer");
- static_assert(count_type_occurrence::value < 2,
- "To get a Layer by type, the LayerCake must only contain "
- "only a single matching LayerType");
- return &std::get(layers_);
-}
-
-template
-template
-const LayerType& LayerCakeStatic::get() const {
- static_assert(count_type_occurrence::value > 0,
- "LayerCake does not contain requested layer");
- static_assert(count_type_occurrence::value < 2,
- "To get a Layer by type, the LayerCake must only contain "
- "only a single matching LayerType");
- return std::get