diff --git a/.github/workflows/ci_linux.yml b/.github/workflows/ci_linux.yml index 2e548ec9..1e74d754 100644 --- a/.github/workflows/ci_linux.yml +++ b/.github/workflows/ci_linux.yml @@ -163,7 +163,6 @@ jobs: --exclude cust ' - # Exclude cust_raw because it triggers hundreds of warnings. - name: Check documentation run: | docker exec "$CONTAINER_NAME" bash -lc 'set -euo pipefail diff --git a/.github/workflows/ci_windows.yml b/.github/workflows/ci_windows.yml index 63082087..7923fb66 100644 --- a/.github/workflows/ci_windows.yml +++ b/.github/workflows/ci_windows.yml @@ -106,7 +106,6 @@ jobs: run: cargo test --workspace --exclude blastoff --exclude cudnn --exclude cudnn-sys --exclude cust # Exclude crates that require cuDNN, not available on Windows CI: cudnn, cudnn-sys. - # Exclude cust_raw because it triggers hundreds of warnings. - name: Check documentation env: RUSTDOCFLAGS: -Dwarnings diff --git a/crates/nvvm/src/lib.rs b/crates/nvvm/src/lib.rs index cb19c6e7..c6706ea0 100644 --- a/crates/nvvm/src/lib.rs +++ b/crates/nvvm/src/lib.rs @@ -312,8 +312,10 @@ pub enum NvvmArch { /// This default value of 7.5 corresponds to Turing and later devices. We default to this /// because it is the minimum supported by CUDA 13.0 while being in the middle of the range /// supported by CUDA 12.x. - // WARNING: If you change the default, consider updating the `--target-arch` values used for - // compiletests in `ci_linux.yml` and `.github/workflows/ci_{linux,windows}.yml`. + // WARNING: If you change the default, consider updating: + // - The `--target-arch` values used for compiletests in `ci_linux.yml` and + // `.github/workflows/ci_{linux,windows}.yml`. + // - The CUDA versions used in `setup_cuda_environment` in `compiletests`. #[default] Compute75, Compute80, diff --git a/tests/compiletests/src/main.rs b/tests/compiletests/src/main.rs index 0e345813..b6888e95 100644 --- a/tests/compiletests/src/main.rs +++ b/tests/compiletests/src/main.rs @@ -576,30 +576,41 @@ fn setup_cuda_environment() { // Set library path to include CUDA NVVM libraries let lib_path_var = dylib_path_envvar(); - // Try to find CUDA installation - let cuda_paths = vec![ - "/usr/local/cuda/nvvm/lib64", - "/usr/local/cuda-12/nvvm/lib64", - "/usr/local/cuda-11/nvvm/lib64", - "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v12.8\\nvvm\\lib\\x64", - "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v12.0\\nvvm\\lib\\x64", - "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.8\\nvvm\\lib\\x64", - ]; - let mut found_cuda_paths = Vec::new(); // Check CUDA_PATH environment variable if let Ok(cuda_path) = env::var("CUDA_PATH") { - let nvvm_path = Path::new(&cuda_path).join("nvvm").join("lib64"); - if nvvm_path.exists() { - found_cuda_paths.push(nvvm_path.to_string_lossy().to_string()); + #[cfg(unix)] + { + let nvvm_path = Path::new(&cuda_path).join("nvvm").join("lib64"); + if nvvm_path.exists() { + found_cuda_paths.push(nvvm_path.to_string_lossy().to_string()); + } } - let nvvm_path_win = Path::new(&cuda_path).join("nvvm").join("lib").join("x64"); - if nvvm_path_win.exists() { - found_cuda_paths.push(nvvm_path_win.to_string_lossy().to_string()); + #[cfg(windows)] + { + let nvvm_path = Path::new(&cuda_path).join("nvvm").join("lib").join("x64"); + if nvvm_path.exists() { + found_cuda_paths.push(nvvm_path.to_string_lossy().to_string()); + } } } + // Try to find CUDA installation + #[cfg(unix)] + let cuda_paths = vec![ + "/usr/local/cuda/nvvm/lib64", + "/usr/local/cuda-13/nvvm/lib64", + "/usr/local/cuda-12/nvvm/lib64", + ]; + #[cfg(windows)] + let cuda_paths = vec![ + "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v13.0\\nvvm\\lib\\x64", + "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v12.9\\nvvm\\lib\\x64", + "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v12.8\\nvvm\\lib\\x64", + "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v12.0\\nvvm\\lib\\x64", + ]; + // Check standard paths for path in &cuda_paths { if Path::new(path).exists() {