Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added .DS_Store
Binary file not shown.
50 changes: 50 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
cmake_minimum_required(VERSION 3.10)
project(nerf_cpp)

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

# Find required packages
find_package(Torch REQUIRED)
find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)
find_package(nlohmann_json REQUIRED)

# Include directories
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/include
${TORCH_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIR}
${nlohmann_json_INCLUDE_DIRS}
)

# Add source files
file(GLOB_RECURSE SOURCES
"src/*.cpp"
)

# Create executable
add_executable(nerf_train examples/train.cpp ${SOURCES})

# Link libraries
target_link_libraries(nerf_train
${TORCH_LIBRARIES}
${OpenCV_LIBS}
Eigen3::Eigen
nlohmann_json::nlohmann_json
)

# Set compiler flags
if(MSVC)
target_compile_options(nerf_train PRIVATE /W4)
else()
target_compile_options(nerf_train PRIVATE -Wall -Wextra -Wpedantic)
endif()

# Set CUDA properties if available
if(TORCH_CUDA_AVAILABLE)
set_target_properties(nerf_train PROPERTIES
CUDA_SEPARABLE_COMPILATION ON
)
endif()
191 changes: 191 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -161,3 +161,194 @@ However, if you find this implementation or pre-trained models helpful, please c
year={2020}
}
```

# NeRF C++ Implementation

This is a C++ implementation of [NeRF](http://www.matthewtancik.com/nerf) (Neural Radiance Fields), a method that achieves state-of-the-art results for synthesizing novel views of complex scenes.

## Dependencies

You can install dependencies either through Conda or system package manager.

### Using Conda (Recommended)

1. Install Miniconda or Anaconda if you haven't already.

2. Create and activate a new conda environment:
```bash
# Create new environment
conda create -n nerf-cpp python=3.8
conda activate nerf-cpp

# Install dependencies
conda install -c pytorch pytorch torchvision cudatoolkit=11.8
conda install -c conda-forge opencv eigen nlohmann_json
conda install -c conda-forge cmake ninja
```

3. Set environment variables for CMake:
```bash
export CMAKE_PREFIX_PATH=$CONDA_PREFIX
export Torch_DIR=$CONDA_PREFIX/lib/python3.8/site-packages/torch/share/cmake/Torch
```

### Using System Package Manager (Alternative)

#### Ubuntu/Debian

```bash
# Install system dependencies
sudo apt-get update
sudo apt-get install -y \
build-essential \
cmake \
git \
libopencv-dev \
libeigen3-dev \
nlohmann-json3-dev

# Install LibTorch (PyTorch C++ API)
wget https://download.pytorch.org/libtorch/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0%2Bcu118.zip
unzip libtorch-cxx11-abi-shared-with-deps-2.1.0+cu118.zip
sudo mv libtorch /usr/local/
```

#### macOS

```bash
# Install system dependencies using Homebrew
brew install cmake opencv eigen nlohmann-json

# Install LibTorch
wget https://download.pytorch.org/libtorch/cpu/libtorch-macos-2.1.0.zip
unzip libtorch-macos-2.1.0.zip
sudo mv libtorch /usr/local/
```

## Building the Project

1. Clone the repository:
```bash
git clone https://github.com/yourusername/nerf-cpp.git
cd nerf-cpp
```

2. Create a build directory and build the project:
```bash
mkdir build && cd build

# If using Conda
cmake -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DTorch_DIR=$CONDA_PREFIX/lib/python3.8/site-packages/torch/share/cmake/Torch ..

# If using system packages
cmake ..

make -j$(nproc)
```

## Running the Project

### Download Example Data

First, download the example datasets:

```bash
bash download_example_data.sh
```

This will download the `lego` and `fern` datasets to the `data` directory.

### Training

To train a NeRF model on the lego dataset:

```bash
./nerf_train configs/lego.txt
```

The training process will:
1. Load the dataset from `data/nerf_synthetic/lego`
2. Train the model for 100,000 iterations
3. Save checkpoints every 1,000 iterations
4. Save the final model as `final_model.pt`

### Configuration

The configuration files in the `configs` directory control various aspects of training:

- `expname`: Name of the experiment
- `basedir`: Directory to save logs and checkpoints
- `datadir`: Directory containing the dataset
- `dataset_type`: Type of dataset ("blender" or "llff")
- `N_samples`: Number of samples per ray
- `N_importance`: Number of importance samples
- `use_viewdirs`: Whether to use view-dependent effects
- `white_bkgd`: Whether to use white background

## Project Structure

```
nerf-cpp/
├── CMakeLists.txt
├── include/
│ └── nerf/
│ ├── model.hpp
│ ├── renderer.hpp
│ └── dataset.hpp
├── src/
│ ├── model.cpp
│ ├── renderer.cpp
│ └── dataset.cpp
├── examples/
│ └── train.cpp
├── configs/
│ ├── lego.txt
│ └── fern.txt
└── data/
├── nerf_synthetic/
└── nerf_llff_data/
```

## Troubleshooting

### Common Issues

1. **CUDA not found**
- Make sure you have CUDA installed
- If using Conda, make sure you installed the correct CUDA toolkit version
- Set `TORCH_CUDA_VERSION` in CMake if needed

2. **OpenCV not found**
- If using Conda, make sure you activated the environment
- If using system packages, install OpenCV development packages
- Set `OpenCV_DIR` in CMake if needed

3. **Eigen3 not found**
- If using Conda, make sure you activated the environment
- If using system packages, install Eigen3 development packages
- Set `EIGEN3_INCLUDE_DIR` in CMake if needed

### Memory Usage

The default configuration uses:
- Batch size: 1024 rays
- Samples per ray: 64
- Network width: 256
- Network depth: 8

You can adjust these parameters in the configuration files to reduce memory usage if needed.

## Citation

If you find this implementation helpful, please consider citing:

```bibtex
@misc{mildenhall2020nerf,
title={NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis},
author={Ben Mildenhall and Pratul P. Srinivasan and Matthew Tancik and Jonathan T. Barron and Ravi Ramamoorthi and Ren Ng},
year={2020},
eprint={2003.08934},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
18 changes: 14 additions & 4 deletions configs/lego.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,23 @@ no_batching = True

use_viewdirs = True
white_bkgd = True
lrate_decay = 500

# 保持渲染質量的關鍵參數
N_samples = 64
N_importance = 128
N_rand = 1024
multires = 10
multires_views = 4
netdepth = 8
netwidth = 256

# 優化速度的參數
N_rand = 2048
chunk = 32768
netchunk = 65536

# 學習率策略優化
lrate = 1e-3
lrate_decay = 250

precrop_iters = 500
precrop_frac = 0.5

half_res = True
109 changes: 109 additions & 0 deletions examples/train.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
#include "nerf/model.hpp"
#include "nerf/renderer.hpp"
#include "nerf/dataset.hpp"
#include <torch/torch.h>
#include <iostream>
#include <chrono>
#include <filesystem>

using namespace nerf;

int main(int argc, char* argv[]) {
try {
// Parse command line arguments
if (argc < 2) {
std::cerr << "Usage: " << argv[0] << " <config_file>" << std::endl;
return 1;
}

// Load configuration
std::string config_file = argv[1];
// TODO: Implement config loading

// Set device
torch::Device device(torch::kCUDA);

// Create model
auto model = std::make_shared<NeRFModel>(
8, // netdepth
256, // netwidth
8, // netdepth_fine
256, // netwidth_fine
10, // multires
4, // multires_views
true // use_viewdirs
);
model->to(device);

// Create renderer
auto renderer = std::make_shared<Renderer>(
model,
64, // N_samples
64, // N_importance
true, // use_viewdirs
1.0f, // raw_noise_std
true // white_bkgd
);

// Create dataset
auto dataset = std::make_shared<Dataset>(
"./data/nerf_synthetic/lego", // datadir
"blender", // dataset_type
8, // factor
true, // use_viewdirs
true // white_bkgd
);

// Create optimizer
torch::optim::Adam optimizer(
model->parameters(),
torch::optim::AdamOptions(1e-3)
);

// Training loop
int num_epochs = 100000;
int batch_size = 1024;

for (int epoch = 0; epoch < num_epochs; ++epoch) {
// Get batch of rays
auto [rays_o, rays_d, target_rgb] = dataset->get_data();
rays_o = rays_o.to(device);
rays_d = rays_d.to(device);
target_rgb = target_rgb.to(device);

// Forward pass
auto [rgb_map, depth_map, acc_map, _] = renderer->render_rays(
rays_o, rays_d, rays_d,
dataset->get_near().to(device),
dataset->get_far().to(device)
);

// Compute loss
auto loss = torch::mse_loss(rgb_map, target_rgb);

// Backward pass
optimizer.zero_grad();
loss.backward();
optimizer.step();

// Print progress
if (epoch % 100 == 0) {
std::cout << "Epoch " << epoch << ", Loss: " << loss.item<float>() << std::endl;
}

// Save checkpoint
if (epoch % 1000 == 0) {
torch::save(model, "checkpoint_" + std::to_string(epoch) + ".pt");
}
}

// Save final model
torch::save(model, "final_model.pt");

} catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << std::endl;
return 1;
}

return 0;
}
Loading