Skip to content

Commit

Permalink
Merge branch 'branch-25.02' into bc_with_nodes
Browse files Browse the repository at this point in the history
  • Loading branch information
eriknw authored Jan 31, 2025
2 parents a4370d5 + edd96f0 commit 49716ce
Show file tree
Hide file tree
Showing 18 changed files with 293 additions and 24 deletions.
2 changes: 1 addition & 1 deletion .devcontainer/cuda11.8-pip/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
"args": {
"CUDA": "11.8",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:25.02-cpp-cuda11.8-ucx1.17.0-openmpi-ubuntu22.04"
"BASE": "rapidsai/devcontainers:25.02-cpp-cuda11.8-ucx1.18.0-openmpi-ubuntu22.04"
}
},
"runArgs": [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "12.5",
"CUDA": "12.8",
"PYTHON_PACKAGE_MANAGER": "conda",
"BASE": "rapidsai/devcontainers:25.02-cpp-mambaforge-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.02-cuda12.5-conda"
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.02-cuda12.8-conda"
],
"hostRequirements": {"gpu": "optional"},
"features": {
Expand All @@ -20,7 +20,7 @@
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.5-envs}"],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.8-envs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/nx-cugraph,type=bind,consistency=consistent",
Expand All @@ -29,7 +29,7 @@
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.5-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent"
"source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.8-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,20 @@
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "12.5",
"CUDA": "12.8",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:25.02-cpp-cuda12.5-ucx1.17.0-openmpi-ubuntu22.04"
"BASE": "rapidsai/devcontainers:25.02-cpp-cuda12.8-ucx1.18.0-openmpi-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.02-cuda12.5-pip"
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.02-cuda12.8-pip"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/cuda:25.2": {
"version": "12.5",
"version": "12.8",
"installcuBLAS": true,
"installcuSOLVER": true,
"installcuRAND": true,
Expand All @@ -28,15 +28,15 @@
"ghcr.io/rapidsai/devcontainers/features/cuda",
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.5-venvs}"],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.8-venvs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/nx-cugraph,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.5-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent"
"source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.8-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ jobs:
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
arch: '["amd64"]'
cuda: '["12.5"]'
cuda: '["12.8"]'
build_command: |
sccache -z;
build-all --verbose -j$(nproc --ignore=1);
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ repos:
# Don't have strict linting for miscellaneous code
args: [--extend-exclude, "benchmarks/,ci/,docs/,notebooks/"]
- repo: https://github.com/rapidsai/dependency-file-generator
rev: v1.16.0
rev: v1.17.0
hooks:
- id: rapids-dependency-file-generator
args: ["--clean"]
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,8 @@ Below is the list of algorithms that are currently supported in nx-cugraph.
│ └─ <a href="https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.link_analysis.hits_alg.hits.html#networkx.algorithms.link_analysis.hits_alg.hits">hits</a>
└─ <a href="https://networkx.org/documentation/stable/reference/algorithms/link_analysis.html#module-networkx.algorithms.link_analysis.pagerank_alg">pagerank_alg</a>
└─ <a href="https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.link_analysis.pagerank_alg.pagerank.html#networkx.algorithms.link_analysis.pagerank_alg.pagerank">pagerank</a>
<a href="https://networkx.org/documentation/stable/reference/algorithms/link_prediction.html#module-networkx.algorithms.link_prediction">link_prediction</a>
└─ <a href="https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.link_prediction.jaccard_coefficient.html#networkx.algorithms.link_prediction.jaccard_coefficient">jaccard_coefficient</a>
<a href="https://networkx.org/documentation/stable/reference/algorithms/operators.html">operators</a>
└─ <a href="https://networkx.org/documentation/stable/reference/algorithms/operators.html#module-networkx.algorithms.operators.unary">unary</a>
├─ <a href="https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.operators.unary.complement.html#networkx.algorithms.operators.unary.complement">complement</a>
Expand Down
7 changes: 6 additions & 1 deletion _nx_cugraph/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
# Copyright (c) 2023-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -110,11 +110,13 @@
"is_tree",
"is_weakly_connected",
"isolates",
"jaccard_coefficient",
"k_truss",
"karate_club_graph",
"katz_centrality",
"krackhardt_kite_graph",
"ladder_graph",
"leiden_communities",
"les_miserables_graph",
"lollipop_graph",
"louvain_communities",
Expand Down Expand Up @@ -235,6 +237,9 @@
"katz_centrality": {
"dtype : dtype or None, optional": "The data type (np.float32, np.float64, or None) to use for the edge weights in the algorithm. If None, then dtype is determined by the edge values.",
},
"leiden_communities": {
"dtype : dtype or None, optional": "The data type (np.float32, np.float64, or None) to use for the edge weights in the algorithm. If None, then dtype is determined by the edge values.",
},
"louvain_communities": {
"dtype : dtype or None, optional": "The data type (np.float32, np.float64, or None) to use for the edge weights in the algorithm. If None, then dtype is determined by the edge values.",
},
Expand Down
56 changes: 55 additions & 1 deletion benchmarks/pytest-based/bench_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,11 @@ def get_graph_obj_for_benchmark(graph_obj, backend_wrapper):
"""
G = graph_obj
if backend_wrapper.backend_name == "cugraph-preconverted":
G = nxcg.from_networkx(G, preserve_all_attrs=True)
G = nxcg.from_networkx(
G,
preserve_all_attrs=True,
use_compat_graph=True,
)
return G


Expand Down Expand Up @@ -319,6 +323,27 @@ def bench_louvain_communities(benchmark, graph_obj, backend_wrapper):
assert type(result) is list


@pytest.mark.skipif("not hasattr(nx.community, 'leiden_communities')")
def bench_leiden_communities(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
# DiGraphs are not supported
if G.is_directed():
G = G.to_undirected()
if G.__networkx_backend__ not in nx.community.leiden_communities.backends:
pytest.skip(
reason=f"leiden_communities not implemented by {G.__networkx_backend__!r}"
)
return
result = benchmark.pedantic(
target=backend_wrapper(nx.community.leiden_communities),
args=(G,),
rounds=rounds,
iterations=iterations,
warmup_rounds=warmup_rounds,
)
assert type(result) is list


def bench_degree_centrality(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
result = benchmark.pedantic(
Expand Down Expand Up @@ -898,6 +923,35 @@ def bench_bipartite_BC_n1000_m3000_k100000(benchmark, backend_wrapper):
assert type(result) is dict


def bench_jaccard(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)

# ebunch is a list of node pairs to limit the jaccard run.
nodes = list(G.nodes)
start = nodes[0]
ebunch = [(start, n) for n in nodes[1:]]
start = nodes[1]
ebunch += [(start, n) for n in nodes[2:]]
start = nodes[2]
ebunch += [(start, n) for n in nodes[3:]]

# DiGraphs are not supported
if G.is_directed():
G = G.to_undirected()

result = benchmark.pedantic(
target=backend_wrapper(nx.jaccard_coefficient, force_unlazy_eval=True),
args=(G,),
kwargs=dict(
ebunch=ebunch,
),
rounds=rounds,
iterations=iterations,
warmup_rounds=warmup_rounds,
)
assert type(result) is list


@pytest.mark.skip(reason="benchmark not implemented")
def bench_complete_bipartite_graph(benchmark, graph_obj, backend_wrapper):
pass
Expand Down
6 changes: 3 additions & 3 deletions benchmarks/pytest-based/run-main-benchmarks.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
# Copyright (c) 2024, NVIDIA CORPORATION.
# Copyright (c) 2024-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -58,8 +58,8 @@ fi

for algo in $algos; do
for dataset in $datasets; do
# this script can be used to download benchmarking datasets by name via cugraph.datasets
python get_graph_bench_dataset.py $dataset
# this script can be used to download benchmarking datasets by name via cugraph.datasets
python get_graph_bench_dataset.py $dataset
for backend in $backends; do
name="${backend}__${algo}__${dataset}"
echo "Running: $backend, $dataset, bench_$algo"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ channels:
- conda-forge
- nvidia
dependencies:
- cuda-version=12.5
- cuda-version=12.8
- cudf==25.2.*,>=0.0.0a0
- cupy>=12.0.0
- graphviz
Expand All @@ -33,4 +33,4 @@ dependencies:
- sphinx-markdown-tables
- sphinxcontrib-websupport
- wheel
name: all_cuda-125_arch-x86_64
name: all_cuda-128_arch-x86_64
6 changes: 5 additions & 1 deletion dependencies.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ files:
all:
output: [conda]
matrix:
cuda: ["11.8", "12.5"]
cuda: ["11.8", "12.8"]
arch: [x86_64]
includes:
- checks
Expand Down Expand Up @@ -113,6 +113,10 @@ dependencies:
cuda: "12.5"
packages:
- cuda-version=12.5
- matrix:
cuda: "12.8"
packages:
- cuda-version=12.8
docs:
common:
- output_types: [conda]
Expand Down
4 changes: 3 additions & 1 deletion nx_cugraph/algorithms/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
# Copyright (c) 2023-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand All @@ -17,6 +17,7 @@
community,
components,
link_analysis,
link_prediction,
operators,
shortest_paths,
traversal,
Expand All @@ -30,6 +31,7 @@
from .dag import *
from .isolate import *
from .link_analysis import *
from .link_prediction import *
from .operators import *
from .reciprocity import *
from .shortest_paths import *
Expand Down
3 changes: 2 additions & 1 deletion nx_cugraph/algorithms/community/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2023, NVIDIA CORPORATION.
# Copyright (c) 2023-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand All @@ -10,4 +10,5 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .leiden import *
from .louvain import *
52 changes: 52 additions & 0 deletions nx_cugraph/algorithms/community/leiden.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Copyright (c) 2024-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
import pylibcugraph as plc

from nx_cugraph.convert import _to_undirected_graph
from nx_cugraph.utils import (
_dtype_param,
_get_float_dtype,
_groupby,
_seed_to_int,
networkx_algorithm,
not_implemented_for,
)

__all__ = ["leiden_communities"]


@not_implemented_for("directed")
@networkx_algorithm(extra_params=_dtype_param, version_added="25.02", _plc="leiden")
def leiden_communities(
G, weight="weight", resolution=1, max_level=None, seed=None, *, dtype=None
):
# Warning: this API is experimental and may change. It is not yet in NetworkX.
# See: https://github.com/networkx/networkx/pull/7743
seed = _seed_to_int(seed)
G = _to_undirected_graph(G, weight, 1, np.float32)
dtype = _get_float_dtype(dtype, graph=G, weight=weight)
if max_level is None or max_level < 0:
max_level = 500
node_ids, clusters, modularity = plc.leiden(
resource_handle=plc.ResourceHandle(),
random_state=seed,
graph=G._get_plc_graph(weight, 1, dtype),
max_level=max_level,
resolution=resolution,
theta=1, # TODO: expose theta as a backend-only parameter once it's used
do_expensive_check=False,
)
groups = _groupby(clusters, node_ids, groups_are_canonical=True)
return [set(G._nodearray_to_list(ids)) for ids in groups.values()]
Loading

0 comments on commit 49716ce

Please sign in to comment.