Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions .github/workflows/gpu-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
name: GPU Tests

on:
push:
branches: [main]
paths:
- 'src/**'
- 'tests/**'
- 'pyproject.toml'
- 'pixi.lock'
- '.github/workflows/gpu-tests.yml'
pull_request:
branches: [main]
paths:
- 'src/**'
- 'tests/**'
- 'pyproject.toml'
- 'pixi.lock'
- '.github/workflows/gpu-tests.yml'
workflow_dispatch:

concurrency:
group: gpu-tests-${{ github.ref }}
cancel-in-progress: true

jobs:
gpu-tests:
runs-on: gpu-1
timeout-minutes: 30
environment: gpu-testing
permissions:
contents: read
strategy:
fail-fast: false
matrix:
environment: [boltz-dev, protenix-dev, rf3-dev]

name: ${{ matrix.environment }}

steps:
- name: Checkout code
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

- name: Install pixi
uses: prefix-dev/setup-pixi@19eac09b398e3d0c747adc7921926a6d802df4da # v0.8.8

- name: Build CUDA extensions
run: pixi run -e ${{ matrix.environment }} python3 -c "from sampleworks.core.forward_models.xray.real_space_density_deps.ops.csrc import dilate_points_cuda"

- name: Run tests
run: pixi run -e ${{ matrix.environment }} gpu-tests
Comment thread
coderabbitai[bot] marked this conversation as resolved.
3,303 changes: 3,235 additions & 68 deletions pixi.lock

Large diffs are not rendered by default.

11 changes: 10 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,11 @@ warn_return_any = true
warn_unused_configs = true

[tool.pixi.activation.env]
CUDA_HOME = "$CONDA_PREFIX"
PYTHONNOUSERSITE = "1"

[tool.pixi.dependencies]
cuda-toolkit = ">=12,<13"
gcc_linux-64 = ">=9,<13"
gxx_linux-64 = ">=9,<13"
ninja = "*"
Expand Down Expand Up @@ -83,6 +85,12 @@ cuda = "12"
args = [{arg = "flags", default = ""}]
cmd = "pytest {{ flags }}"

[tool.pixi.tasks.cpu-tests]
cmd = "pytest -m 'not gpu'"

[tool.pixi.tasks.gpu-tests]
cmd = "pytest -m gpu"

# Cross-env tasks use bash to orchestrate across environments because pixi's
# depends-on/alias mechanism only works within a single environment.
# This continues past failures, exits 1 if any env failed.
Expand All @@ -105,7 +113,8 @@ platforms = ["linux-64"]
[tool.pytest.ini_options]
addopts = "-v --strict-markers"
markers = [
"slow: marks tests as slow (deselect with '-m \"not slow\"')"
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
"gpu: marks tests that require a GPU runner"
Comment thread
coderabbitai[bot] marked this conversation as resolved.
]
python_classes = ["Test*"]
python_files = ["test_*.py"]
Expand Down
4 changes: 4 additions & 0 deletions tests/integration/test_pipeline_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -904,6 +904,7 @@ def test_sampling_determinism_with_seed(self, device: torch.device):
# ============================================================================


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", get_slow_wrappers(), ids=lambda w: w.value)
@pytest.mark.parametrize("structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", ""))
Expand Down Expand Up @@ -1021,6 +1022,7 @@ def test_device_consistency(
assert step_output.state.device == device


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", get_slow_wrappers(), ids=lambda w: w.value)
@pytest.mark.parametrize(
Expand Down Expand Up @@ -1069,6 +1071,7 @@ def test_trajectory_scaler_returns_guidance_output(
assert len(result.trajectory) == 3


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", get_slow_wrappers(), ids=lambda w: w.value)
@pytest.mark.parametrize("structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", ""))
Expand Down Expand Up @@ -1128,6 +1131,7 @@ def test_reward_inputs_are_valid(
)


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", get_slow_wrappers(), ids=lambda w: w.value)
class TestRealWrapperNumericalStability:
Expand Down
2 changes: 2 additions & 0 deletions tests/metrics/test_lddt_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def altlocB_backbone(structure_6b8x_with_altlocs) -> AtomArrayStack:
return ensure_atom_array_stack(altlocB_bb)


@pytest.mark.gpu
def test_all_atom_lddt_end_to_end(altlocA_backbone, altlocB_backbone):
selection_string = "res_id > 179 and res_id < 190"
allatom = AllAtomLDDT()
Expand Down Expand Up @@ -79,6 +80,7 @@ def test_all_atom_lddt_end_to_end(altlocA_backbone, altlocB_backbone):
)


@pytest.mark.gpu
def test_selected_lddt_end_to_end(altlocA_backbone, altlocB_backbone):
import numpy as np

Expand Down
6 changes: 6 additions & 0 deletions tests/models/boltz/test_boltz_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,7 @@ def test_load_model_atom_array_from_structures_dir(self, temp_output_dir: Path):
assert len(arr) == 2


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", BOLTZ_WRAPPER_TYPES, ids=lambda w: w.value)
class TestBoltzWrapperInitialization:
Expand All @@ -234,6 +235,7 @@ def test_model_on_correct_device(self, wrapper_type: StructurePredictor, device,
assert next(wrapper.model.parameters()).device == device


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", BOLTZ_WRAPPER_TYPES, ids=lambda w: w.value)
@pytest.mark.parametrize("structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", ""))
Expand Down Expand Up @@ -289,6 +291,7 @@ def test_featurize_with_ensemble_size(
assert features.x_init.shape[0] == ensemble_size


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", BOLTZ_WRAPPER_TYPES, ids=lambda w: w.value)
@pytest.mark.parametrize("structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", ""))
Expand Down Expand Up @@ -423,6 +426,7 @@ def test_step_denoises_input(
assert not torch.allclose(result, x_init)


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", BOLTZ_WRAPPER_TYPES, ids=lambda w: w.value)
@pytest.mark.parametrize("structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", ""))
Expand Down Expand Up @@ -483,6 +487,7 @@ def test_initialize_from_prior_batch_dimension(
assert result.shape[0] == batch_size


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", BOLTZ_WRAPPER_TYPES, ids=lambda w: w.value)
class TestBoltzWrapperInitializeFromPriorValidation:
Expand Down Expand Up @@ -516,6 +521,7 @@ def test_initialize_from_prior_requires_features_or_shape(
wrapper.initialize_from_prior(batch_size=2)


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize("wrapper_type", BOLTZ_WRAPPER_TYPES, ids=lambda w: w.value)
@pytest.mark.parametrize("structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", ""))
Expand Down
6 changes: 6 additions & 0 deletions tests/models/test_model_wrapper_protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ def test_isinstance_flow_model_wrapper(self, wrapper_info: ComponentInfo, reques
f"{wrapper_info.name} does not implement FlowModelWrapper protocol"
)

@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize(
"structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", "")
Expand Down Expand Up @@ -73,6 +74,7 @@ def test_featurize_returns_generative_model_input(
f"got {type(features.conditioning)}"
)

@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize(
"structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", "")
Expand Down Expand Up @@ -103,6 +105,7 @@ def test_featurize_x_init_shape(
f"got {features.x_init.shape[2]}"
)

@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize(
"structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", "")
Expand Down Expand Up @@ -132,6 +135,7 @@ def test_step_returns_tensor(
f"{features.x_init.shape}"
)

@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize(
"structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", "")
Expand All @@ -152,6 +156,7 @@ def test_step_with_float_t(

assert torch.is_tensor(result), f"{wrapper_info.name}.step must return Tensor with float t"

@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize(
"structure_fixture", STRUCTURES, ids=lambda s: s.replace("structure_", "")
Expand Down Expand Up @@ -182,6 +187,7 @@ def test_initialize_from_prior_with_features(
f"{result.shape[-1]}"
)

@pytest.mark.gpu
@pytest.mark.slow
def test_initialize_from_prior_with_shape(self, wrapper_info: ComponentInfo, request):
"""Test initialize_from_prior with explicit shape."""
Expand Down
6 changes: 6 additions & 0 deletions tests/rewards/test_real_space_density_reward.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
)


@pytest.mark.gpu
@pytest.mark.slow
class TestRewardFunctionBasics:
"""Test basic functionality of the RealSpaceRewardFunction class."""
Expand Down Expand Up @@ -131,6 +132,7 @@ def test_reward_function_deterministic(
torch.testing.assert_close(loss1, loss2)


@pytest.mark.gpu
@pytest.mark.slow
class TestDensityCorrelation:
"""Test that reward function correlates with underlying electron density."""
Expand Down Expand Up @@ -270,6 +272,7 @@ def test_loss_monotonic_with_perturbation(
assert losses[i + 1] >= losses[i]


@pytest.mark.gpu
@pytest.mark.slow
class TestVmapCompatibility:
"""Test vmap functionality for use in FK steering and particle methods."""
Expand Down Expand Up @@ -466,6 +469,7 @@ def test_vmap_consistency(self, reward_function_1vme, test_coordinates_1vme, dev
torch.testing.assert_close(result_vmap, result_sequential, rtol=1e-5, atol=1e-6)


@pytest.mark.gpu
@pytest.mark.slow
class TestGradientFlow:
"""Test gradient computation for coordinate optimization."""
Expand Down Expand Up @@ -614,6 +618,7 @@ def test_gradient_magnitudes_reasonable(
assert grad_norm < 1e6


@pytest.mark.gpu
@pytest.mark.slow
@pytest.mark.parametrize(
"shape",
Expand Down Expand Up @@ -659,6 +664,7 @@ def test_batch_shape(self, reward_function_1vme, test_coordinates_1vme, device,
assert torch.isfinite(loss)


@pytest.mark.gpu
@pytest.mark.slow
class TestEdgeCases:
"""Test edge cases and error handling."""
Expand Down
1 change: 1 addition & 0 deletions tests/utils/test_atom_reconciler.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def test_arbitrary_batch_dims(self, mismatch_rec):
assert result.shape == (3, 4, 5, 3)


@pytest.mark.gpu
class TestDeviceHandling:
"""Reconciler.to() moves index tensors to match coordinate device."""

Expand Down
3 changes: 3 additions & 0 deletions tests/utils/test_density_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ def test_empty_array_initialized(self, synthetic_grid):
assert np.all(xmap.array == 0.0)


@pytest.mark.gpu
class TestComputeDensityFromAtomarray:
"""Tests for compute_density_from_atomarray function."""

Expand Down Expand Up @@ -335,6 +336,7 @@ def test_with_real_structure(
assert density_no_xmap.shape != density_xmap.shape # different resolutions


@pytest.mark.gpu
class TestComputeDensityErrors:
"""Tests for error handling in compute_density_from_atomarray."""

Expand Down Expand Up @@ -418,6 +420,7 @@ def test_single_array_shapes_unchanged(
assert occupancies.shape == (1, n_atoms)


@pytest.mark.gpu
class TestComputeDensityFromAtomArrayStack:
"""Tests for compute_density_from_atomarray with AtomArrayStack input."""

Expand Down
Loading