diff --git a/.github/actions/get-cudaq-version/action.yaml b/.github/actions/get-cudaq-version/action.yaml index 907f122..dea4ef0 100644 --- a/.github/actions/get-cudaq-version/action.yaml +++ b/.github/actions/get-cudaq-version/action.yaml @@ -14,11 +14,13 @@ runs: - name: Install jq run: | - if [ -x "$(command -v apt-get)" ]; then - apt-get update - apt-get install -y --no-install-recommends jq - elif [ -x "$(command -v dnf)" ]; then - dnf install -y --nobest --setopt=install_weak_deps=False jq + if [ ! -x "$(command -v jq)" ]; then + if [ -x "$(command -v apt-get)" ]; then + apt-get update + apt-get install -y --no-install-recommends jq + elif [ -x "$(command -v dnf)" ]; then + dnf install -y --nobest --setopt=install_weak_deps=False jq + fi fi shell: bash diff --git a/.github/actions/get-cudaq-wheels/action.yaml b/.github/actions/get-cudaq-wheels/action.yaml new file mode 100644 index 0000000..475499a --- /dev/null +++ b/.github/actions/get-cudaq-wheels/action.yaml @@ -0,0 +1,155 @@ +name: Get CUDAQ wheels +description: 'Either restore CUDAQ wheels from cache or build them' + +inputs: + repo: + description: 'CUDAQ repository.' + required: true + ref: + description: 'The branch, tag or SHA to checkout.' + required: true + token: + description: 'CUDAQ repository access token.' + default: '' + required: false + pr-number: + description: 'Unique pull request identifier.' + default: '' + required: false + save-build: + description: 'Indicates whether to save the build' + default: 'false' + required: false + lookup-only: + description: 'Check if a cache entry exists without downloading the cache' + default: 'false' + required: false + platform: + description: 'Platform (amd64 or arm64)' + default: '' + required: true +outputs: + found-cache: + description: 'A boolean value to indicate that a cache entry was found.' + value: ${{ steps.check-cache.outputs.valid }} + +runs: + using: "composite" + steps: + # ========================================================================== + # Try to restore from cache + # ========================================================================== + + - name: Create CUDAQ wheel cache key + id: cudaq-wheels-key + env: + # This are a list of files that when changed should require a new cudaq build + to_hash: | + .github/actions/get-cudaq-wheels/** + .cudaq_version + run: | + hash=${{ hashFiles(format('{0}', env.to_hash)) }} + echo "main=cudaq-wheels-${{ inputs.platform }}-${{ inputs.ref }}-$hash" >> $GITHUB_OUTPUT + if [[ -n "${{ inputs.pr-number }}" ]]; then + echo "pr=-pr${{ inputs.pr-number }}" >> $GITHUB_OUTPUT + fi + sudo mkdir /cudaq-wheels && sudo chmod 777 /cudaq-wheels + shell: bash --noprofile --norc -euo pipefail {0} + + - name: Try to restoring CUDAQ wheels from cache + id: restore-cudaq-wheels + uses: actions/cache/restore@v4 + with: + fail-on-cache-miss: false + path: /cudaq-wheels + key: ${{ steps.cudaq-wheels-key.outputs.main }}${{ steps.cudaq-wheels-key.outputs.pr }} + restore-keys: ${{ steps.cudaq-wheels-key.outputs.main }} + lookup-only: ${{ inputs.lookup-only }} + + # The restore action could find a partial match using the `restore-keys`. In such cases + # it would still report `cache-hit` as false, but would load the cache from the partial + # one. Thus, we need to check whether the cache is valid by other means. + - name: Check if cache is valid + id: check-cache + run: | + if [[ "${{ steps.restore-cudaq-wheels.outputs.cache-matched-key }}" == "" ]]; then + echo "valid=false" >> $GITHUB_OUTPUT + else + echo "valid=true" >> $GITHUB_OUTPUT + fi + shell: bash --noprofile --norc -euo pipefail {0} + + # ========================================================================== + # Build CUDAQ wheels + # ========================================================================== + + - name: Login to GitHub CR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + + - name: Get CUDAQ code + if: steps.check-cache.outputs.valid == 'false' && inputs.lookup-only == 'false' + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repo }} + ref: ${{ inputs.ref }} + path: cudaq + set-safe-directory: true + + - name: Set up context for buildx + run: | + docker context create builder_context + shell: bash --noprofile --norc -euo pipefail {0} + + - name: Set up buildx runner + uses: docker/setup-buildx-action@v3 + with: + endpoint: builder_context + driver-opts: network=host + + - name: Build 3.10 wheel + uses: docker/build-push-action@v5 + with: + context: cudaq + file: ./cudaq/docker/release/cudaq.wheel.Dockerfile + build-args: | + base_image=ghcr.io/nvidia/cuda-quantum-devdeps:manylinux-${{ inputs.platform }}-cu12.0-gcc11-main + release_version=0.99.99 + python_version=3.10 + outputs: /cudaq-wheels + + - name: Build 3.11 wheel + uses: docker/build-push-action@v5 + with: + context: cudaq + file: ./cudaq/docker/release/cudaq.wheel.Dockerfile + build-args: | + base_image=ghcr.io/nvidia/cuda-quantum-devdeps:manylinux-${{ inputs.platform }}-cu12.0-gcc11-main + release_version=0.99.99 + python_version=3.11 + outputs: /cudaq-wheels + + - name: Build 3.12 wheel + uses: docker/build-push-action@v5 + with: + context: cudaq + file: ./cudaq/docker/release/cudaq.wheel.Dockerfile + build-args: | + base_image=ghcr.io/nvidia/cuda-quantum-devdeps:manylinux-${{ inputs.platform }}-cu12.0-gcc11-main + release_version=0.99.99 + python_version=3.12 + outputs: /cudaq-wheels + + # ========================================================================== + # Store CUDAQ wheels cache + # ========================================================================== + + - name: Store CUDAQ wheels in the cache + if: steps.check-cache.outputs.valid == 'false' && inputs.save-build == 'true' && inputs.lookup-only == 'false' + uses: actions/cache/save@v4 + with: + path: /cudaq-wheels + key: ${{ steps.cudaq-wheels-key.outputs.main }}${{ steps.cudaq-wheels-key.outputs.pr }} diff --git a/.github/workflows/build_wheels.yaml b/.github/workflows/build_wheels.yaml index 687ec38..190cf11 100644 --- a/.github/workflows/build_wheels.yaml +++ b/.github/workflows/build_wheels.yaml @@ -6,20 +6,32 @@ on: build_type: type: choice required: true - description: 'Build Type' + description: 'Build Type (ignored if using artifacts from prior run)' default: 'Release' options: - 'Release' - 'Debug' - + cudaq_wheels: + type: choice + required: true + description: 'CUDA-Q wheel source (released version from PyPI or Custom built using .cudaq_version in repo)' + default: 'Custom' + options: + - 'Custom' + - 'PyPI' + artifacts_from_run: + type: string + description: Optional argument to take artifacts from a prior run of this workflow; facilitates rerunning a failed workflow without re-building the artifacts. + required: false concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: - linux-build: - name: Linux build + build-cudaqx-wheels: + name: Build CUDA-QX wheels + if: ${{ !inputs.artifacts_from_run }} runs-on: linux-${{ matrix.platform }}-cpu8 # CUDAQ requires a highly specialized environment to build. Thus, it is much # easier to rely on their's devdeps images to do the building. @@ -60,16 +72,186 @@ jobs: - name: Build CUDAQ toolchain run: | - .github/workflows/scripts/build_cudaq.sh + .github/workflows/scripts/build_cudaq.sh ${{ matrix.python }} - - name: Build wheels + - name: Build CUDA-QX wheels run: | .github/workflows/scripts/build_wheels.sh \ --cudaq-prefix $HOME/.cudaq \ - --build-type ${{ inputs.build_type }} + --build-type ${{ inputs.build_type }} \ + --python-version ${{ matrix.python }} - name: Upload artifact uses: actions/upload-artifact@v4 with: name: wheels-py${{ matrix.python }}-${{ matrix.platform }} path: /wheels/** + retention-days: 14 + + # Building the CUDA-Q wheels must be done outside of a container context, so + # this is a separate job. + build-cudaq-wheels: + name: Build CUDA-Q wheels + if: ${{ !inputs.artifacts_from_run && inputs.cudaq_wheels == 'Custom'}} + strategy: + fail-fast: false + matrix: + platform: ['amd64', 'arm64'] + # Use 32 CPUs rather than 8 (above) because we are only spawning one job per + # platform rather than one job per Python version per platform. + runs-on: ${{ startsWith(github.repository, 'NVIDIA/cudaqx') && format('linux-{0}-cpu32', matrix.platform) || 'ubuntu-latest' }} + permissions: + actions: write + contents: read + pull-requests: read + steps: + - name: Get code + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Get required CUDAQ version + id: get-cudaq-version + uses: ./.github/actions/get-cudaq-version + + - name: Get CUDAQ wheels + uses: ./.github/actions/get-cudaq-wheels + with: + repo: ${{ steps.get-cudaq-version.outputs.repo }} + ref: ${{ steps.get-cudaq-version.outputs.ref }} + token: ${{ secrets.CUDAQ_ACCESS_TOKEN }} + save-build: true + platform: ${{ matrix.platform }} + + - name: Upload CUDAQ wheels + uses: actions/upload-artifact@v4 + with: + name: cudaq-wheels-${{ matrix.platform }} + path: /cudaq-wheels + retention-days: 14 + if-no-files-found: error + + test-cudaqx-wheels: + name: Test CUDA-QX wheels (CPU) + needs: [build-cudaqx-wheels, build-cudaq-wheels] + if: ${{ !failure() && !cancelled() }} + runs-on: linux-${{ matrix.platform }}-cpu4 + container: ubuntu:22.04 + permissions: + actions: write + contents: read + strategy: + fail-fast: false + matrix: + python: ['3.10', '3.11', '3.12'] + platform: ['amd64', 'arm64'] + + steps: + - name: Get code + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Install requirements + run: | + bash .github/workflows/scripts/install_git_cli.sh + apt install -y --no-install-recommends libgfortran5 unzip + + - name: Download CUDA-Q wheels + if: ${{ inputs.cudaq_wheels == 'Custom' }} + uses: actions/download-artifact@v4 + with: + name: cudaq-wheels-${{ matrix.platform }} + path: /cudaq-wheels + run-id: ${{ inputs.artifacts_from_run || github.run_id }} + github-token: ${{ inputs.artifacts_from_run && secrets.WORKFLOW_TOKEN || github.token }} + + - name: Download CUDA-QX wheels + uses: actions/download-artifact@v4 + with: + name: wheels-py${{ matrix.python }}-${{ matrix.platform }} + path: /wheels + run-id: ${{ inputs.artifacts_from_run || github.run_id }} + github-token: ${{ inputs.artifacts_from_run && secrets.WORKFLOW_TOKEN || github.token }} + + - name: Test wheels + run: | + ls /wheels + bash scripts/ci/test_wheels.sh ${{ matrix.python }} + + test-wheels-gpu: + name: Test CUDA-QX wheels (GPU) + needs: [build-cudaqx-wheels, build-cudaq-wheels] + if: ${{ !failure() && !cancelled() }} + runs-on: linux-${{ matrix.runner.arch }}-gpu-${{ matrix.runner.gpu }}-latest-1 + container: + image: nvidia/cuda:12.0.0-base-ubuntu22.04 + # Enable this if you want to collect core files. (You may might to enable + # Debug builds if you're doing this.) + #options: --privileged --ulimit core=-1 --security-opt seccomp=unconfined + env: + NVIDIA_VISIBLE_DEVICES: ${{ env.NVIDIA_VISIBLE_DEVICES }} + permissions: + actions: write + contents: read + strategy: + fail-fast: false + matrix: + runner: [ + { arch: arm64, gpu: a100 }, + { arch: amd64, gpu: v100 }, + ] + python: ['3.10', '3.11', '3.12'] + + steps: + - name: Get code + uses: actions/checkout@v4 + with: + set-safe-directory: true + + - name: Install Python ${{ matrix.python }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Install requirements + run: | + bash .github/workflows/scripts/install_git_cli.sh + apt install -y --no-install-recommends libgfortran5 unzip + #echo 'core.%p' | tee /proc/sys/kernel/core_pattern + #echo "Running cat /proc/sys/kernel/core_pattern" + #cat /proc/sys/kernel/core_pattern + + - name: Download CUDA-Q wheels + if: ${{ inputs.cudaq_wheels == 'Custom' }} + uses: actions/download-artifact@v4 + with: + name: cudaq-wheels-${{ matrix.runner.arch }} + path: /cudaq-wheels + run-id: ${{ inputs.artifacts_from_run || github.run_id }} + github-token: ${{ inputs.artifacts_from_run && secrets.WORKFLOW_TOKEN || github.token }} + + - name: Download CUDA-QX wheels + uses: actions/download-artifact@v4 + with: + name: wheels-py${{ matrix.python }}-${{ matrix.runner.arch }} + path: /wheels + run-id: ${{ inputs.artifacts_from_run || github.run_id }} + github-token: ${{ inputs.artifacts_from_run && secrets.WORKFLOW_TOKEN || github.token }} + + - name: Test wheels + run: | + ls /wheels + bash scripts/ci/test_wheels.sh ${{ matrix.python }} + + # - name: Upload any core files + # if: success() || failure() + # uses: actions/upload-artifact@v4 + # with: + # name: core-files-${{ matrix.python }}-arm64 + # path: core.* diff --git a/.github/workflows/pr_sanity_checks.yaml b/.github/workflows/pr_sanity_checks.yaml index dd1ff29..7761d84 100644 --- a/.github/workflows/pr_sanity_checks.yaml +++ b/.github/workflows/pr_sanity_checks.yaml @@ -107,6 +107,7 @@ jobs: with: name: clang-format-patch path: clang-*.patch + retention-days: 14 check-python: name: Check Python code formatting @@ -165,6 +166,7 @@ jobs: with: name: yapf-format-patch path: yapf-*.patch + retention-days: 14 # This job is used for branch protection checks. verify: diff --git a/.github/workflows/scripts/build_cudaq.sh b/.github/workflows/scripts/build_cudaq.sh index 2f207c1..6fc49a2 100755 --- a/.github/workflows/scripts/build_cudaq.sh +++ b/.github/workflows/scripts/build_cudaq.sh @@ -16,7 +16,8 @@ source /opt/rh/gcc-toolset-11/enable export CC=gcc export CXX=g++ -python_version=3.10 +python_version=$1 +python_version=${python_version:-3.10} python=python${python_version} ${python} -m pip install --no-cache-dir numpy auditwheel diff --git a/.github/workflows/scripts/build_wheels.sh b/.github/workflows/scripts/build_wheels.sh index 6bbe097..1d8ec59 100755 --- a/.github/workflows/scripts/build_wheels.sh +++ b/.github/workflows/scripts/build_wheels.sh @@ -19,6 +19,7 @@ show_help() { echo " --build-type Build type (e.g., Release)" echo " --cudaq-prefix Path to CUDA-Q's install prefix" echo " (default: \$HOME/.cudaq)" + echo " --python-version Python version to build wheel for (e.g. 3.10)" } parse_options() { @@ -42,6 +43,15 @@ parse_options() { exit 1 fi ;; + --python-version) + if [[ -n "$2" && "$2" != -* ]]; then + python_version=("$2") + shift 2 + else + echo "Error: Argument for $1 is missing" >&2 + exit 1 + fi + ;; -*) echo "Error: Unknown option $1" >&2 show_help @@ -56,18 +66,20 @@ parse_options() { done } -# Initialize an empty array to store libs names +# Defaults cudaq_prefix=$HOME/.cudaq build_type=Release +python_version=3.10 # Parse options parse_options "$@" +echo "Building in $build_type mode for Python $python_version" + # ============================================================================== # Helpers # ============================================================================== -python_version=3.10 python=python${python_version} ARCH=$(uname -m) diff --git a/.gitignore b/.gitignore index 506d28e..ccec909 100644 --- a/.gitignore +++ b/.gitignore @@ -82,6 +82,7 @@ __pycache__/ docs/sphinx/_doxygen docs/sphinx/_mdgen **/_build/* +**/_skbuild/* _version.py # third party integrations diff --git a/libs/solvers/lib/operators/molecule/drivers/library_utils.h b/libs/core/include/cuda-qx/core/library_utils.h similarity index 77% rename from libs/solvers/lib/operators/molecule/drivers/library_utils.h rename to libs/core/include/cuda-qx/core/library_utils.h index 54da7bb..b0404d3 100644 --- a/libs/solvers/lib/operators/molecule/drivers/library_utils.h +++ b/libs/core/include/cuda-qx/core/library_utils.h @@ -20,9 +20,12 @@ namespace cudaqx::__internal__ { +enum class CUDAQXLibraryType { Solvers, QEC }; + /// @brief Structure to hold CUDAQX library data. struct CUDAQXLibraryData { - std::string path; ///< The path to the CUDAQX library. + std::string path; // The path to the CUDAQX library + std::string libName; // The name to search for }; #if defined(__APPLE__) && defined(__MACH__) @@ -31,11 +34,11 @@ struct CUDAQXLibraryData { /// path. inline static void getCUDAQXLibraryPath(CUDAQXLibraryData *data) { auto nLibs = _dyld_image_count(); + auto casted = static_cast(data); for (uint32_t i = 0; i < nLibs; i++) { auto ptr = _dyld_get_image_name(i); std::string libName(ptr); - if (libName.find("cudaq-core") != std::string::npos) { - auto casted = static_cast(data); + if (libName.find(casted->libName) != std::string::npos) { casted->path = std::string(ptr); } } @@ -51,8 +54,8 @@ inline static void getCUDAQXLibraryPath(CUDAQXLibraryData *data) { inline static int getCUDAQXLibraryPath(struct dl_phdr_info *info, size_t size, void *data) { std::string libraryName(info->dlpi_name); - if (libraryName.find("cudaq-solvers") != std::string::npos) { - auto casted = static_cast(data); + auto casted = static_cast(data); + if (libraryName.find(casted->libName) != std::string::npos) { casted->path = std::string(info->dlpi_name); } return 0; @@ -61,8 +64,18 @@ inline static int getCUDAQXLibraryPath(struct dl_phdr_info *info, size_t size, /// @brief Retrieves the path of the CUDAQX library. /// @return A string containing the path to the CUDAQX library. -inline static std::string getCUDAQXLibraryPath() { +inline static std::string getCUDAQXLibraryPath(const CUDAQXLibraryType lib) { __internal__::CUDAQXLibraryData data; + data.libName = [&]() -> std::string { + switch (lib) { + case CUDAQXLibraryType::QEC: + return "/libcudaq-qec."; + case CUDAQXLibraryType::Solvers: + return "/libcudaq-solvers."; + } + return "UNKNOWN"; + }(); + #if defined(__APPLE__) && defined(__MACH__) getCUDAQXLibraryPath(&data); #else diff --git a/libs/qec/include/cudaq/qec/plugin_loader.h b/libs/qec/include/cudaq/qec/plugin_loader.h index 280b1f1..4a7cdd4 100644 --- a/libs/qec/include/cudaq/qec/plugin_loader.h +++ b/libs/qec/include/cudaq/qec/plugin_loader.h @@ -6,14 +6,15 @@ * the terms of the Apache License 2.0 which accompanies this distribution. * ******************************************************************************/ -#ifndef PLUGIN_LOADER_H -#define PLUGIN_LOADER_H +#pragma once #include #include #include #include +namespace cudaq::qec { + /// @brief Enum to define different types of plugins enum class PluginType { DECODER, // Decoder plugins @@ -21,11 +22,21 @@ enum class PluginType { // Add other plugin types here as needed }; +struct PluginDeleter // deleter +{ + void operator()(void *h) const { + if (h) + dlclose(h); + }; +}; + /// @brief A struct to store plugin handle with its type struct PluginHandle { - std::shared_ptr handle; // Pointer to the shared library handle. This is - // the result of dlopen() function. - PluginType type; // Type of the plugin (e.g., decoder, code, etc) + // Pointer to the shared library handle. This is the result of dlopen() + // function. + std::unique_ptr handle; + // Type of the plugin (e.g., decoder, code, etc) + PluginType type; }; /// @brief Function to load plugins from a directory based on type @@ -39,4 +50,4 @@ void load_plugins(const std::string &plugin_dir, PluginType type); /// be cleaned up. void cleanup_plugins(PluginType type); -#endif // PLUGIN_LOADER_H +} // namespace cudaq::qec diff --git a/libs/qec/lib/CMakeLists.txt b/libs/qec/lib/CMakeLists.txt index 55b5433..bdbe043 100644 --- a/libs/qec/lib/CMakeLists.txt +++ b/libs/qec/lib/CMakeLists.txt @@ -9,7 +9,6 @@ set(LIBRARY_NAME cudaq-qec) add_compile_options(-Wno-attributes) -add_compile_definitions(DECODER_PLUGIN_DIR="${CMAKE_INSTALL_PREFIX}/lib/decoder-plugins") # FIXME?: This must be a shared library. Trying to build a static one will fail. add_library(${LIBRARY_NAME} SHARED diff --git a/libs/qec/lib/decoder.cpp b/libs/qec/lib/decoder.cpp index 9f4cc56..f7833d5 100644 --- a/libs/qec/lib/decoder.cpp +++ b/libs/qec/lib/decoder.cpp @@ -7,6 +7,7 @@ ******************************************************************************/ #include "cudaq/qec/decoder.h" +#include "cuda-qx/core/library_utils.h" #include "cudaq/qec/plugin_loader.h" #include #include @@ -73,16 +74,19 @@ std::unique_ptr get_decoder(const std::string &name, const cudaqx::heterogeneous_map options) { return decoder::get(name, H, options); } -} // namespace cudaq::qec // Constructor function for auto-loading plugins __attribute__((constructor)) void load_decoder_plugins() { // Load plugins from the decoder-specific plugin directory - load_plugins(DECODER_PLUGIN_DIR, PluginType::DECODER); + std::filesystem::path libPath{cudaqx::__internal__::getCUDAQXLibraryPath( + cudaqx::__internal__::CUDAQXLibraryType::QEC)}; + auto pluginPath = libPath.parent_path() / "decoder-plugins"; + load_plugins(pluginPath.string(), PluginType::DECODER); } // Destructor function to clean up only decoder plugins __attribute__((destructor)) void cleanup_decoder_plugins() { // Clean up decoder-specific plugins cleanup_plugins(PluginType::DECODER); -} \ No newline at end of file +} +} // namespace cudaq::qec diff --git a/libs/qec/lib/decoders/plugins/example/CMakeLists.txt b/libs/qec/lib/decoders/plugins/example/CMakeLists.txt index 2cb1ea3..9399faf 100644 --- a/libs/qec/lib/decoders/plugins/example/CMakeLists.txt +++ b/libs/qec/lib/decoders/plugins/example/CMakeLists.txt @@ -47,18 +47,18 @@ set_target_properties(${MODULE_NAME} PROPERTIES # ============================================================================== if (NOT SKBUILD) - set_target_properties(${LIBRARY_NAME} PROPERTIES + set_target_properties(${MODULE_NAME} PROPERTIES BUILD_RPATH "$ORIGIN" INSTALL_RPATH "$ORIGIN:$ORIGIN/.." ) # Let CMake automatically add paths of linked libraries to the RPATH: - set_target_properties(${LIBRARY_NAME} PROPERTIES + set_target_properties(${MODULE_NAME} PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE) else() # CUDA-Q install its libraries in site-packages/lib (or dist-packages/lib) # Thus, we need the $ORIGIN/../lib - set_target_properties(${LIBRARY_NAME} PROPERTIES + set_target_properties(${MODULE_NAME} PROPERTIES INSTALL_RPATH "$ORIGIN/../../lib" ) endif() diff --git a/libs/qec/lib/plugin_loader.cpp b/libs/qec/lib/plugin_loader.cpp index 35a69f5..c84dd69 100644 --- a/libs/qec/lib/plugin_loader.cpp +++ b/libs/qec/lib/plugin_loader.cpp @@ -12,6 +12,8 @@ namespace fs = std::filesystem; +namespace cudaq::qec { + static std::map &get_plugin_handles() { static std::map plugin_handles; return plugin_handles; @@ -28,15 +30,11 @@ void load_plugins(const std::string &plugin_dir, PluginType type) { if (entry.path().extension() == ".so") { void *raw_handle = dlopen(entry.path().c_str(), RTLD_NOW); if (raw_handle) { - // Custom deleter ensures dlclose is called - auto deleter = [](void *h) { - if (h) - dlclose(h); - }; - get_plugin_handles().emplace( entry.path().filename().string(), - PluginHandle{std::shared_ptr(raw_handle, deleter), type}); + PluginHandle{std::unique_ptr(raw_handle, + PluginDeleter()), + type}); } else { std::cerr << "ERROR: Failed to load plugin: " << entry.path() << " Error: " << dlerror() << std::endl; @@ -57,3 +55,5 @@ void cleanup_plugins(PluginType type) { } } } + +} // namespace cudaq::qec diff --git a/libs/qec/pyproject.toml b/libs/qec/pyproject.toml index 1c37ac5..ea809cd 100644 --- a/libs/qec/pyproject.toml +++ b/libs/qec/pyproject.toml @@ -11,7 +11,7 @@ maintainers = [{name = "NVIDIA Corporation & Affiliates"}] requires-python = ">=3.10" readme = "README.md" dependencies = [ - 'cuda-quantum-cu12 ~= 0.9.0', + 'cuda-quantum-cu12 >= 0.9', ] classifiers = [ 'Intended Audience :: Science/Research', @@ -37,7 +37,7 @@ build-dir = "_skbuild" build.verbose = true cmake.version = ">=3.28" cmake.build-type = "Release" -install.components = ["qec-python", "qec-lib"] +install.components = ["qec-python", "qec-lib", "qec-lib-plugins"] wheel.packages = [] logging.level = "DEBUG" ninja.version = ">=1.10" diff --git a/libs/solvers/lib/operators/molecule/drivers/pyscf_driver.cpp b/libs/solvers/lib/operators/molecule/drivers/pyscf_driver.cpp index a5bb696..205c07b 100644 --- a/libs/solvers/lib/operators/molecule/drivers/pyscf_driver.cpp +++ b/libs/solvers/lib/operators/molecule/drivers/pyscf_driver.cpp @@ -8,8 +8,8 @@ #include "nlohmann/json.hpp" +#include "cuda-qx/core/library_utils.h" #include "cuda-qx/core/tensor.h" -#include "library_utils.h" #include "process.h" #include "cudaq/solvers/operators/molecule/fermion_compiler.h" #include "cudaq/solvers/operators/molecule/molecule_package_driver.h" @@ -62,7 +62,8 @@ class RESTPySCFDriver : public MoleculePackageDriver { std::unique_ptr make_available() const override { // Start up the web service, if failed, return nullptr - std::filesystem::path libPath{cudaqx::__internal__::getCUDAQXLibraryPath()}; + std::filesystem::path libPath{cudaqx::__internal__::getCUDAQXLibraryPath( + cudaqx::__internal__::CUDAQXLibraryType::Solvers)}; auto cudaqLibPath = libPath.parent_path(); auto cudaqPySCFTool = cudaqLibPath.parent_path() / "bin" / "cudaq-pyscf"; auto argString = cudaqPySCFTool.string() + " --server-mode"; diff --git a/libs/solvers/pyproject.toml b/libs/solvers/pyproject.toml index ebe26a3..dd75f0b 100644 --- a/libs/solvers/pyproject.toml +++ b/libs/solvers/pyproject.toml @@ -11,7 +11,7 @@ maintainers = [{name = "NVIDIA Corporation & Affiliates"}] requires-python = ">=3.10" readme = "README.md" dependencies = [ - 'cuda-quantum-cu12 ~= 0.9.0', + 'cuda-quantum-cu12 >= 0.9', 'fastapi', 'networkx', 'pyscf', diff --git a/scripts/ci/test_wheels.sh b/scripts/ci/test_wheels.sh index 6983b41..9f91a0b 100644 --- a/scripts/ci/test_wheels.sh +++ b/scripts/ci/test_wheels.sh @@ -11,28 +11,35 @@ # Exit immediately if any command returns a non-zero status set -e +# Uncomment these lines to enable core files +#set +e +#ulimit -c unlimited + # Installing dependencies -python_version=3.10 +python_version=$1 +python_version_no_dot=$(echo $python_version | tr -d '.') # 3.10 --> 310 python=python${python_version} -apt-get update && apt-get install -y --no-install-recommends \ - libgfortran5 python${python_version} python$(echo ${python_version} | cut -d . -f 1)-pip - -${python} -m pip install --no-cache-dir pytest nvidia-cublas-cu11 +${python} -m pip install --no-cache-dir pytest -cd /cuda-qx - -${python} -m pip install wheels/cuda_quantum_cu12-0.0.0-cp310-cp310-manylinux_2_28_x86_64.whl +# If special CUDA-Q wheels have been built for this test, install them here. This will +if [ -d /cudaq-wheels ]; then + echo "Custom CUDA-Q wheels directory found; installing ..." + echo "First ls /cudaq-wheels" + ls /cudaq-wheels + echo "Now show what will be pip installed" + ls -1 /cudaq-wheels/cuda_quantum_*-cp${python_version_no_dot}-cp${python_version_no_dot}-*.whl + ${python} -m pip install /cudaq-wheels/cuda_quantum_*-cp${python_version_no_dot}-cp${python_version_no_dot}-*.whl +fi # QEC library # ====================================== -${python} -m pip install wheels/cudaq_qec-0.0.1-cp310-cp310-*.whl -${python} -m pytest libs/qec/python/tests/ +${python} -m pip install /wheels/cudaq_qec-*-cp${python_version_no_dot}-cp${python_version_no_dot}-*.whl +${python} -m pytest -s libs/qec/python/tests/ # Solvers library # ====================================== -${python} -m pip install wheels/cudaq_solvers-0.0.1-cp310-cp310-*.whl +${python} -m pip install /wheels/cudaq_solvers-*-cp${python_version_no_dot}-cp${python_version_no_dot}-*.whl ${python} -m pytest libs/solvers/python/tests/ -