Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
# needs: format_and_lint
strategy:
matrix:
python-version: ["3.11", "3.12"]
python-version: ["3.13"]
extractor: [
"ctranspath",
"chief-ctranspath",
Expand Down Expand Up @@ -59,7 +59,7 @@ jobs:
# needs: format_and_lint
strategy:
matrix:
python-version: ["3.11", "3.12"]
python-version: ["3.13"]

steps:
- uses: actions/checkout@v4
Expand Down Expand Up @@ -95,12 +95,12 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
python-version: '3.13'
- name: Install the code linting and formatting tool Ruff
run: pipx install ruff
- name: Lint code with Ruff
run: ruff check --output-format=github --target-version=py311
run: ruff check --output-format=github --target-version=py313
- name: Check code formatting with Ruff
run: ruff format --diff --target-version=py311
run: ruff format --diff --target-version=py313
# continue-on-error: true

2 changes: 1 addition & 1 deletion .python-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
3.12
3.13
30 changes: 26 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,20 +64,42 @@ cd STAMP
```

```bash
# GPU (CUDA) Installation (Using flash-attn on CUDA systems for gigapath and other models)
# GPU (CUDA) Installation (excluding conchv1_5, gigapath and musk)

# And then this for all models:
uv sync --extra build --extra gpu
uv sync --extra gpu
source .venv/bin/activate
```

```bash
# CPU-only Installation (excluding COBRA, Gigapath (and flash-attn))
# CPU-only Installation (excluding conchv1_5, gigapath and musk)

uv sync --extra cpu
source .venv/bin/activate
```

> [!CAUTION]
> In the next step we will build [flash-attn](https://github.com/dao-ailab/flash-attention), this might take an extended amount of time and consume a lot of RAM and CPU time!
>
> Please make sure you have [Nvidia CUDA Toolkit 13.0](https://developer.nvidia.com/cuda-13-0-2-download-archive) installed! You must use Nvidia Driver version 580 or newer!
>
> The `nvcc --version` command must indicate that 13.0 is installed and is currently in PATH: `Cuda compilation tools, release 13.0, V13.0.88`.
>
> If you get another version or `Command 'nvcc' not found`, add it to the PATH:
> ```bash
> export CUDA_HOME=/usr/local/cuda-13.0
> export PATH="${CUDA_HOME}/bin:$PATH"
> ```
>
> Run `nvcc --version` to ensure flash-attn will be built for CUDA 13.0


```bash
# GPU (CUDA) Installation - building flash-attn for supporting conchv1_5, gigapath and musk

MAX_JOBS=2 uv sync --extra gpu_all # to speed up the build time increase max_jobs! This might use more RAM!
source .venv/bin/activate
```

If you encounter errors during installation please read Installation Troubleshooting [below](#installation-troubleshooting).

### Additional Dependencies
Expand Down
148 changes: 79 additions & 69 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ authors = [
]
description = "A protocol for Solid Tumor Associative Modeling in Pathology"
readme = "README.md"
requires-python = ">=3.11"
requires-python = ">=3.13,<3.14"

classifiers = [
"Programming Language :: Python :: 3",
Expand All @@ -23,31 +23,30 @@ classifiers = [
]

dependencies = [
"beartype>=0.21.0",
"einops>=0.8.1",
"h5py>=3.14.0",
"jaxtyping>=0.3.2",
"lightning>=2.5.2",
"matplotlib>=3.10.5",
"numpy>=2.2.2",
"opencv-python>=4.12.0.88",
"openpyxl>=3.1.5",
"openslide-bin>=4.0.0.8",
"openslide-python>=1.4.2",
"packaging>=25.0",
"pandas>=2.3.1",
"pillow>=11.3.0",
"pydantic>=2.11.7",
"pyyaml>=6.0.2",
"scikit-learn>=1.6.1",
"scipy>=1.16.1",
"torch>=2.7.1,<2.8.0",
"torchmetrics>=1.6.0",
"torchvision>=0.22.1",
"tqdm>=4.67.1",
"timm>=1.0.19",
"transformers>=4.55.0",
"lifelines>=0.28.0",
"beartype~=0.22.9",
"einops~=0.8.2",
"h5py~=3.16.0",
"jaxtyping~=0.3.9",
"lightning~=2.6.1",
"matplotlib~=3.10.8",
"numpy~=2.4.3",
"opencv-python~=4.13.0.92",
"openpyxl~=3.1.5",
"openslide-bin~=4.0.0.13",
"openslide-python~=1.4.3",
"packaging~=26.0",
"pandas~=2.3.3",
"pillow~=12.1.1",
"pydantic~=2.12.5",
"pyyaml~=6.0.3",
"scikit-learn~=1.8.0",
"scipy~=1.17.1",
"torchmetrics~=1.9.0",
"tqdm~=4.67.3",
"timm~=1.0.25",
"transformers~=4.57.6",
"lifelines~=0.30.3",
"huggingface-hub~=0.36.2"
]

[project.optional-dependencies]
Expand All @@ -58,14 +57,13 @@ build = [
"ninja"
]
flash-attention = [
"flash-attn>=2.8.3",
"flash-attn==2.8.3",
]
conch = [
"huggingface-hub>=0.26.2",
"conch @ git+https://github.com/KatherLab/CONCH",
]
conch1_5_cpu = [
"transformers>=4.45.2",
"einops-exts==0.0.4",
]
conch1_5 = [
Expand Down Expand Up @@ -99,19 +97,21 @@ virchow2 = [
"torch>=2.0.0",
]
cobra = [
"stamp[flash-attention]",
"causal-conv1d>=1.5.3.post1",
"mamba-ssm>=2.2.6.post3",
"causal-conv1d @ https://github.com/Dao-AILab/causal-conv1d/releases/download/v1.6.1.post4/causal_conv1d-1.6.1+cu13torch2.10cxx11abiTRUE-cp313-cp313-linux_x86_64.whl ; sys_platform == 'linux' and platform_machine == 'x86_64' and python_version == '3.13' and extra == 'cobra'",
"causal-conv1d @ https://github.com/Dao-AILab/causal-conv1d/releases/download/v1.6.1.post4/causal_conv1d-1.6.1+cu13torch2.10cxx11abiTRUE-cp313-cp313-linux_aarch64.whl ; sys_platform == 'linux' and platform_machine == 'aarch64' and python_version == '3.13' and extra == 'cobra'",
"mamba-ssm @ https://github.com/state-spaces/mamba/releases/download/v2.3.1/mamba_ssm-2.3.1+cu13torch2.10cxx11abiTRUE-cp313-cp313-linux_x86_64.whl ; sys_platform == 'linux' and platform_machine == 'x86_64' and python_version == '3.13'",
"mamba-ssm @ https://github.com/state-spaces/mamba/releases/download/v2.3.1/mamba_ssm-2.3.1+cu13torch2.10cxx11abiTRUE-cp313-cp313-linux_aarch64.whl ; sys_platform == 'linux' and platform_machine == 'aarch64' and python_version == '3.13'",

"cobra @ git+http://github.com/KatherLab/COBRA.git@49d231191db5a9c2ea37a2398dd922c8c9ee9cdb",
"jinja2>=3.1.4",
"triton"
"jinja2>=3.1.4",
"triton",
]
prism_cpu = [
"sacremoses==0.1.1",
"environs==11.0.0",
]
prism = [
"stamp[prism_cpu, flash-attention]",
"stamp[prism_cpu]",
]
madeleine = [
"madeleine @ git+https://github.com/mahmoodlab/MADELEINE.git@de7c85acc2bdad352e6df8eee5694f8b6f288012"
Expand All @@ -127,16 +127,29 @@ plip_cpu = [
"transformers>=4.45.2"
]
plip = [
"stamp[plip_cpu, flash-attention]",
"stamp[plip_cpu]",
]
mcp = [
"fastmcp>=2.11.2",
]


# Blanket target
cpu = ["stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,conch1_5_cpu,prism_cpu,madeleine,musk_cpu,plip_cpu]"]
gpu = ["stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,conch1_5,prism,madeleine,musk,plip,gigapath,cobra]"]
cpu = [
"torch~=2.10.0",
"torchvision~=0.25.0",
"stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,conch1_5_cpu,prism_cpu,madeleine,musk_cpu,plip_cpu]"
]
gpu = [
"torch~=2.10.0",
"torchvision~=0.25.0",
"stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,conch1_5_cpu,prism,madeleine,plip,musk_cpu,cobra]"
]
gpu_all = [
"torch~=2.10.0",
"torchvision~=0.25.0",
"stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,musk,gigapath,conch1_5,prism,madeleine,plip,cobra]"
]
all = ["stamp[cpu]"]

[project.scripts]
Expand All @@ -148,11 +161,11 @@ all = ["stamp[cpu]"]

[dependency-groups]
dev = [
"huggingface-hub>=0.34.3",
"ipykernel>=6.30.1",
"pyright>=1.1.403",
"pytest>=8.4.1",
"ruff>=0.12.7",
"huggingface-hub~=0.36.2",
"ipykernel~=7.2.0",
"pyright~=1.1.408",
"pytest~=9.0.2",
"ruff>=0.15.7",
]

[build-system]
Expand All @@ -173,28 +186,41 @@ lint.ignore = ["F722"] # https://docs.kidger.site/jaxtyping/faq/#flake8-or-ruff-

[tool.uv]
conflicts = [
[
{ extra = "cpu" },
{ extra = "gpu" }
]
[
{ extra = "cpu" },
{ extra = "gpu" },
],
[
{ extra = "cpu" },
{ extra = "gpu_all" },
],
]
build-constraint-dependencies = [
"torch<2.8",
"torchvision<0.23",
"torch==2.10.0",
"torchvision==0.25.0",
]


[tool.uv.sources]
torch = [
{ index = "pytorch-cu128", marker = "sys_platform != 'darwin'" }
{ index = "pytorch-cpu", extra = "cpu" },
{ index = "pytorch-cu130", extra = "gpu" },
{ index = "pytorch-cu130", extra = "gpu_all" },
]
torchvision = [
{ index = "pytorch-cu128", marker = "sys_platform != 'darwin'" }
{ index = "pytorch-cpu", extra = "cpu" },
{ index = "pytorch-cu130", extra = "gpu" },
{ index = "pytorch-cu130", extra = "gpu_all" },
]

[[tool.uv.index]]
name = "pytorch-cu128"
url = "https://download.pytorch.org/whl/cu128"
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
explicit = true

[[tool.uv.index]]
name = "pytorch-cu130"
url = "https://download.pytorch.org/whl/cu130"
explicit = true


Expand All @@ -210,7 +236,7 @@ requires-dist = [
"scikit-learn",
"tqdm",
"transformers",
"xformers; sys_platform != 'darwin'" # xformers is not supported on macOS
"xformers; (sys_platform == 'linux' and platform_machine == 'x86_64') or (sys_platform == 'win32' and platform_machine == 'AMD64')"
]


Expand All @@ -221,23 +247,7 @@ requires-dist = [
"einops",
]


[[tool.uv.dependency-metadata]]
name = "mamba-ssm"
requires-dist = [
"setuptools",
]

[[tool.uv.dependency-metadata]]
name = "causal-conv1d"
requires-dist = [
"setuptools",
"torch"
]

[tool.uv.extra-build-dependencies]
cobra = [{ requirement = "torch", match-runtime = true }]
flash-attn = [{ requirement = "torch", match-runtime = true }]
mamba-ssm = [{ requirement = "torch", match-runtime = true }]
gigapath = [{ requirement = "torch", match-runtime = true }]
conch = [{ requirement = "torch", match-runtime = true }]
4 changes: 3 additions & 1 deletion src/stamp/modeling/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,9 @@ def load_model_from_ckpt(path: Union[str, Path]):
hparams["task"], hparams["supported_features"], ModelName(hparams["model_name"])
)

return LitModelClass.load_from_checkpoint(path, model_class=ModelClass)
return LitModelClass.load_from_checkpoint(
path, model_class=ModelClass, weights_only=False
)


def deploy_categorical_model_(
Expand Down
4 changes: 3 additions & 1 deletion src/stamp/modeling/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,9 @@ def train_model_(

# Reload the best model using the same class as the input model
ModelClass = type(model)
return ModelClass.load_from_checkpoint(model_checkpoint.best_model_path)
return ModelClass.load_from_checkpoint(
model_checkpoint.best_model_path, weights_only=False
)


def _compute_class_weights_and_check_categories(
Expand Down
Loading
Loading