Skip to content

Add benchmarks #491

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 24 commits into from
May 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 23 additions & 33 deletions .github/workflows/manual_benchmark.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Run Benchmark
name: Benchmark

on:
workflow_dispatch:
Expand All @@ -9,17 +9,16 @@ on:
default: "All"
type: choice
options:
- "Manually Selected"
- "All"
- "Default"
synthetic_2C1D_1C:
description: "synthetic_2C1D_1C benchmark"
required: false
default: false
type: boolean
- "Transfer Learning"
- "Synthetic"
- "Non Transfer Learning"

env:
DEFAULT_BENCHMARKS: '["synthetic_2C1D_1C"]'
TRANSFER_LEARNING_BENCHMARKS: '["aryl_halide_CT_IM_tl","aryl_halide_IP_CP_tl","aryl_halide_CT_I_BM_tl","direct_arylation_tl_temperature","easom_tl_47_negate_noise5","hartmann_tl_3_20_15","michalewicz_tl_continuous"]'
SYNTHETIC_BENCHMARKS: '["synthetic_2C1D_1C","hartmann_3d_discretized","hartmann_6d","hartmann_3d"]'
ALL_BENCHMARKS: '["direct_arylation_multi_batch","direct_arylation_single_batch","aryl_halide_CT_IM_tl","aryl_halide_IP_CP_tl","aryl_halide_CT_I_BM_tl","direct_arylation_tl_temperature","easom_tl_47_negate_noise5","hartmann_tl_3_20_15","michalewicz_tl_continuous","synthetic_2C1D_1C","hartmann_3d_discretized","hartmann_6d","hartmann_3d"]'
NON_TL_BENCHMARKS: '["direct_arylation_multi_batch","direct_arylation_single_batch","synthetic_2C1D_1C","hartmann_3d_discretized","hartmann_6d","hartmann_3d"]'

permissions:
contents: read
Expand All @@ -31,39 +30,28 @@ jobs:
outputs:
benchmarks_to_execute: ${{ steps.set_benchmarks.outputs.benchmarks_to_execute }}
steps:
- name: Build matrix from inputs
if: ${{ github.event.inputs.group_selection == 'Manually Selected' || github.event.inputs.group_selection == 'All' }}
id: build_matrix_from_inputs
run: |
benchmarks_to_execute='{"benchmark_list": ['
run_all_benchmarks=${{ github.event.inputs.group_selection == 'All' }}

for key in $(echo '${{ toJson(github.event.inputs) }}' | jq -r 'keys_unsorted[]'); do
if [ "$key" != "group_selection" ]; then
value=$(echo '${{ toJson(github.event.inputs) }}' | jq -r --arg k "$key" '.[$k]')
if [ "$value" = "true" ] || [ "$run_all_benchmarks" = "true" ]; then
benchmarks_to_execute="$benchmarks_to_execute \"$key\","
fi
fi
done
benchmarks_to_execute=$(echo "$benchmarks_to_execute" | sed 's/,$//')
benchmarks_to_execute="$benchmarks_to_execute ]}"

echo "benchmarks_to_execute=$benchmarks_to_execute" >> "$GITHUB_ENV"

- name: Build matrix from group
if: ${{ github.event.inputs.group_selection != 'Manually Selected' && github.event.inputs.group_selection != 'All' }}
id: build_matrix_from_group
run: |
benchmarks_to_execute='{"benchmark_list": []}'
run_all_benchmarks="${{ github.event.inputs.group_selection }}"

if [ "$run_all_benchmarks" = "Default" ]; then
benchmarks_to_execute='{"benchmark_list": ${{ env.DEFAULT_BENCHMARKS }} }'
if [ "$run_all_benchmarks" = "Transfer Learning" ]; then
benchmarks_to_execute='{"benchmark_list": ${{ env.TRANSFER_LEARNING_BENCHMARKS }} }'
fi
if [ "$run_all_benchmarks" = "Non Transfer Learning" ]; then
benchmarks_to_execute='{"benchmark_list": ${{ env.ALL_BENCHMARKS }} }'
fi
if [ "$run_all_benchmarks" = "Synthetic" ]; then
benchmarks_to_execute='{"benchmark_list": ${{ env.SYNTHETIC_BENCHMARKS }} }'
fi
if [ "$run_all_benchmarks" = "All" ]; then
benchmarks_to_execute='{"benchmark_list": ${{ env.ALL_BENCHMARKS }} }'
fi

echo "benchmarks_to_execute=$benchmarks_to_execute" >> "$GITHUB_ENV"


- name: Set benchmarks output
id: set_benchmarks
run: |
Expand Down Expand Up @@ -107,6 +95,7 @@ jobs:
fi

benchmark-test:
name: run
needs: [prepare, add-runner]
runs-on: self-hosted
strategy:
Expand All @@ -115,6 +104,7 @@ jobs:
timeout-minutes: 1440
env:
BAYBE_BENCHMARKING_PERSISTENCE_PATH: ${{ secrets.TEST_RESULT_S3_BUCKET }}
BAYBE_PARALLEL_SIMULATION_RUNS: false
steps:
- uses: actions/checkout@v4
with:
Expand All @@ -126,4 +116,4 @@ jobs:
- name: Benchmark
run: |
pip install '.[benchmarking]'
python -m benchmarks --benchmark-list "${{ matrix.benchmark_list }}"
python -W ignore -m benchmarks --benchmark-list "${{ matrix.benchmark_list }}"
Loading
Loading