Skip to content

Commit 8b3d0a6

Browse files
author
Mike Lambeta
committed
added 3d surface reconstruction from sensor image, added point cloud and 3d visualization, 3d surface model added to zoo
1 parent 413b914 commit 8b3d0a6

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+2946
-19
lines changed

.flake8

+2
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@ exclude =
33
.git
44
,.nox
55
,__init__.py
6+
,pytouch/models/pix2pix/thirdparty
7+
,train
68
max-line-length = 119
79
copyright-check = True
810
select = E,F,W,C

AUTHORS.md

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# PyTouch Team
2+
3+
* Mike Lambeta
4+
* Huazhe Xu
5+
* Jingwei Xu
6+
* Po-Wei Chou
7+
* Shaoxiong Wang
8+
* Roberto Calandra
9+
10+
# Additional Authors
11+
12+
* Paloma Sodhi - Surface 3D patch graph algorithms
13+
14+
# Special Thanks To
15+
16+
* Haozhou Wang - for kindly providing the PyPi repository PyTouch.

CONTRIBUTING.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ We want to make contributing to this project as easy and transparent as possible
44
## Pull Requests
55
We actively welcome your pull requests.
66

7-
1. Fork the repo and create your branch from `master`.
7+
1. Fork the repo and create your branch from `main`.
88
2. If you have added code that should be tested, add tests.
99
3. If you have changed APIs, update the documentation.
1010
4. Ensure the test suite passes.

README.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -56,4 +56,5 @@ If you use PyTouch in your research please cite the corresponding [paper](https:
5656
```
5757

5858
## Acknowledgments
59-
We would like to thank Haozhou Wang for kindly providing the PyPi repository PyTouch.
59+
60+
PyTouch would like to acknowledge the [list of contributors](AUTHORS.md).

examples/configs/digit_surface3d.yaml

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
## examples/surface_3d.py
2+
3+
dataset:
4+
path: "/path/to/dataset"
5+
batch_size: 1
6+
shuffle: False
7+
num_workers: 8
8+
9+
sensor:
10+
max_depth: 0.0198 # max depth value (close to avg gel depth)
11+
remove_background_depth: True # sets depth>max_depth to 0 which is filtered out in points3d
12+
13+
T_cam_offset_sim: [[2.22e-16, 2.22e-16, -1.00e+00, 0.00e+00],
14+
[-1.00e+00, 0.00e+00, -2.22e-16, 0.00e+00],
15+
[0.00e+00, 1.00e+00, 2.22e-16, 1.50e-02],
16+
[0.00e+00, 0.00e+00, 0.00e+00, 1.00e+00]]
17+
18+
# rotation of +90 degrees about x-axis
19+
T_cam_offset_real: [[1., 0., 0., 0.],
20+
[0., 0., -1, 0.],
21+
[0., 1., 0., 0.],
22+
[0., 0., 0., 1.]]
23+
24+
P: [[2.30940108e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
25+
[0.00000000e+00, 1.73205081e+00, 0.00000000e+00, 0.00000000e+00],
26+
[0.00000000e+00, 0.00000000e+00, -1.04081633e+00, -2.04081633e-03],
27+
[0.00000000e+00, 0.00000000e+00, -1.00000000e+00, 0.00000000e+00]]
28+
29+
z_near: 0.001
30+
z_far: 0.05
31+
32+
gel min depth: 0.01910434
33+
gel_depth_offset: 1e-4
34+
35+
gel_width: 0.02 # gel width (y-axis) in meters
36+
gel_height: 0.03 # gel height (x-axis) in meters

examples/surface_3d.py

+46
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2+
3+
import hydra
4+
5+
from pytouch.common.visualizer import Visualizer3D, Visualizer3DViewParams
6+
from pytouch.datasets.sequence import ImageSequenceDataset
7+
from pytouch.sensors import DigitSensor
8+
from pytouch.tasks import Surface3D
9+
10+
11+
@hydra.main(config_path="configs", config_name="digit_surface3d.yaml")
12+
def visualize_surface_3d(cfg):
13+
# load touch sequence dataset
14+
img_seq_ds = ImageSequenceDataset(cfg.dataset.path)
15+
16+
# define a custom camera view
17+
view_params = Visualizer3DViewParams(
18+
fov=60, # field of view
19+
front=[0.4257, -0.2125, -0.8795], # front vector
20+
lookat=[0.02, 0.0, 0.0], # look at vector
21+
up=[0.9768, -0.0694, 0.2024], # up vector
22+
zoom=0.25, # zoom
23+
)
24+
25+
# initialize point cloud visualizer
26+
visualizer = Visualizer3D(view_params=view_params)
27+
28+
# initialize surface 3d model
29+
surface3d = Surface3D(
30+
DigitSensor,
31+
sensor_params=cfg.sensor,
32+
)
33+
34+
# get first sequence
35+
sequence = img_seq_ds[0]
36+
for img in sequence:
37+
output = surface3d.point_cloud_3d(img_color=img)
38+
visualizer.render(output.points_3d)
39+
# you may also plot the color, predicted normal, and predicted depth images
40+
# color = output.color
41+
# depth = output.depth
42+
# normal = output.normal
43+
44+
45+
if __name__ == "__main__":
46+
visualize_surface_3d()

examples/zoo_models.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def main():
1212

1313
# load DIGIT sensor touch detect model from pytouch zoo
1414
touch_detect_model = pytouch_zoo.load_model_from_zoo( # noqa: F841
15-
"touchdetect_resnet18", sensors.DigitSensor
15+
"touchdetect_resnet", sensors.DigitSensor
1616
)
1717

1818
# load custom PyTorch-Lightning saved model

noxfile.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
BASE = os.path.abspath(os.path.dirname(__file__))
77

8-
DEFAULT_PYTHON_VERSIONS = ["3.7", "3.8", "3.9"]
8+
DEFAULT_PYTHON_VERSIONS = ["3.7", "3.8"]
99

1010
LINT_SETUP_DEPS = ["black", "flake8", "flake8-copyright", "isort"]
1111

pytouch/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,4 @@
55
from pytouch.common import SensorDataSources
66
from pytouch.models import PyTouchZoo
77

8-
__version__ = "0.4.0"
8+
__version__ = "0.4.2"

pytouch/common/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
22

33
from .constants import SensorDataSources
4+
from .visualizer import Visualizer3D

pytouch/common/constants.py

+2
Original file line numberDiff line numberDiff line change
@@ -14,3 +14,5 @@ class SensorDataSources(Enum):
1414
VIDEO = "video"
1515
# Direct input from sensor
1616
SENSOR = "sensor"
17+
# Simulator input
18+
SIM = "simulator"

0 commit comments

Comments
 (0)