diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml new file mode 100644 index 00000000..5f5dcf1d --- /dev/null +++ b/.github/workflows/ruff.yml @@ -0,0 +1,22 @@ +name: Ruff linter + +on: + pull_request: + push: + branches: + - main + +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v4 + with: + python-version: 3.11.5 + + - uses: chartboost/ruff-action@v1 + with: + version: 0.5.4 + args: check --output-format github diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4d217e4a..f6f614be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,6 @@ +ci: + skip: [ruff] + repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 @@ -10,5 +13,9 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.5.4 hooks: + - id: ruff + types_or: [ python, pyi ] + args: [ --fix ] + - id: ruff-format types_or: [ python, pyi ] diff --git a/demos/differentiable_renderer/patch_tracking/demo_utils.py b/demos/differentiable_renderer/patch_tracking/demo_utils.py index e67a9ec8..81ed9d9a 100644 --- a/demos/differentiable_renderer/patch_tracking/demo_utils.py +++ b/demos/differentiable_renderer/patch_tracking/demo_utils.py @@ -67,6 +67,27 @@ def transform_from_axis_angle(axis, angle): vec_transform_axis_angle = jax.vmap(transform_from_axis_angle, (None, 0)) +def unproject_depth(depth, intrinsics): + """Unprojects a depth image into a point cloud. + + Args: + depth (jnp.ndarray): The depth image. Shape (H, W) + intrinsics (b.camera.Intrinsics): The camera intrinsics. + Returns: + jnp.ndarray: The point cloud. Shape (H, W, 3) + """ + mask = (depth < intrinsics.far) * (depth > intrinsics.near) + depth = depth * mask + intrinsics.far * (1.0 - mask) + y, x = jnp.mgrid[: depth.shape[0], : depth.shape[1]] + x = (x - intrinsics.cx) / intrinsics.fx + y = (y - intrinsics.cy) / intrinsics.fy + point_cloud_image = jnp.stack([x, y, jnp.ones_like(x)], axis=-1) * depth[:, :, None] + return point_cloud_image + + +unproject_depth_vec = jax.vmap(unproject_depth, (0, None)) + + ### Convenience wrapper for common code used in demos ### def get_renderer_boxdata_and_patch(): width = 100 diff --git a/demos/sparse_model/sparse_model_cotracker.py b/demos/sparse_model/sparse_model_cotracker.py index 860bfa8f..bb691512 100644 --- a/demos/sparse_model/sparse_model_cotracker.py +++ b/demos/sparse_model/sparse_model_cotracker.py @@ -382,8 +382,6 @@ def viz_params(params, start_t, end_t): ), ) -print(loss_function(SECOND_T, params, cluster_assignments, gt_info)) - cluster_assignments = cluster_assignments.at[top_indices].set( cluster_assignments.max() + 1 ) diff --git a/demos/tracking_online_learning.py b/demos/tracking_online_learning.py index fea1cb1e..2bde0719 100644 --- a/demos/tracking_online_learning.py +++ b/demos/tracking_online_learning.py @@ -244,14 +244,14 @@ def enumerative_proposal(trace, addressses, key, all_deltas): # ) # Outliers are AND of the RGB and Depth outlier masks - outler_mask = outliers - rr.log("outliers", rr.Image(jnp.tile((outler_mask * 1.0)[..., None], (1, 1, 3)))) + outlier_mask = outliers + rr.log("outliers", rr.Image(jnp.tile((outlier_mask * 1.0)[..., None], (1, 1, 3)))) # Get the point cloud corresponding to the outliers point_cloud = b3d.xyz_from_depth(trace["observed_rgb_depth"][1], fx, fy, cx, cy)[ - outler_mask + outlier_mask ] - point_cloud_colors = trace["observed_rgb_depth"][0][outler_mask] + point_cloud_colors = trace["observed_rgb_depth"][0][outlier_mask] # Segment the outlier cloud. assignment = b3d.segment_point_cloud(point_cloud) @@ -334,9 +334,10 @@ def enumerative_proposal(trace, addressses, key, all_deltas): )[0] b3d.rerun_visualize_trace_t(trace, t) rr.set_time_sequence("frame", t) - outler_mask = jnp.logical_and(rgb_outliers, depth_outliers) rgb_inliers, rgb_outliers = b3d.get_rgb_inlier_outlier_from_trace(trace) depth_inliers, depth_outliers = b3d.get_depth_inlier_outlier_from_trace(trace) - rr.log("outliers", rr.Image(jnp.tile((outler_mask * 1.0)[..., None], (1, 1, 3)))) + outlier_mask = jnp.logical_and(rgb_outliers, depth_outliers) + + rr.log("outliers", rr.Image(jnp.tile((outlier_mask * 1.0)[..., None], (1, 1, 3)))) diff --git a/src/b3d/io/utils.py b/src/b3d/io/utils.py index 7e7829f1..e36dd512 100644 --- a/src/b3d/io/utils.py +++ b/src/b3d/io/utils.py @@ -172,10 +172,15 @@ def video_input_from_mp4( downsize=1, reverse_color_channel=False, ): + info = load_video_info(video_fname) + + if info is None: + return None + if times is None: + T = info.timesteps times = np.arange(T, step=step) - info = load_video_info(video_fname) intr = np.load(intrinsics_fname, allow_pickle=True) vid = load_video_to_numpy( video_fname, diff --git a/src/b3d/renderer/renderer_original.py b/src/b3d/renderer/renderer_original.py index 6a4358c2..161bd471 100644 --- a/src/b3d/renderer/renderer_original.py +++ b/src/b3d/renderer/renderer_original.py @@ -75,9 +75,14 @@ def interpolate_fwd(self, attr, rast, faces): return output, (attr, rast, faces) +# def rasterize_bwd(self, saved_tensors, diffs): +# pos, tri = saved_tensors +# return jnp.zeros_like(pos), jnp.zeros_like(tri) + + def interpolate_bwd(self, saved_tensors, diffs): - _attr, _rast, _faces = saved_tensors - return jnp.zeros_like(pos), jnp.zeros_like(tri) + attr, rast, faces = saved_tensors + return jnp.zeros_like(attr), jnp.zeros_like(rast), jnp.zeros_like(faces) interpolate_prim.defvjp(interpolate_fwd, interpolate_bwd)