diff --git a/tests/CMake/BoostUT.cmake b/tests/CMake/BoostUT.cmake new file mode 100644 index 0000000..1abd5b2 --- /dev/null +++ b/tests/CMake/BoostUT.cmake @@ -0,0 +1,85 @@ +FetchContent_Declare(boost_ut + GIT_REPOSITORY https://github.com/boost-ext/ut.git + GIT_TAG v2.3.1) +FetchContent_MakeAvailable(boost_ut) + + + +# this target builds an executable that contains all the tests +# this can be faster than building each separately for local development +add_executable(jumbo_test EXCLUDE_FROM_ALL test_runner.cpp) +set_target_properties(jumbo_test PROPERTIES + CXX_STANDARD 20 + UNITY_BUILD ON + ) +target_link_libraries(jumbo_test + PRIVATE + nuts::nuts + Eigen3::Eigen + Boost::ut +) + +# We build a 'main' object that is linked into each test +add_library(boost_ut_runner OBJECT test_runner.cpp) +target_link_libraries(boost_ut_runner PUBLIC Boost::ut) + +function(add_boost_ut_test TEST_NAME) + add_executable(${TEST_NAME} ${TEST_NAME}.cpp) + # boost.UT requires C++20, but our project overall is only C++17 + set_property(TARGET ${TEST_NAME} PROPERTY CXX_STANDARD 20) + + target_link_libraries(${TEST_NAME} + PRIVATE + boost_ut_runner + Eigen3::Eigen + nuts::nuts + ) + + # alternative to add_test() that discovers each test in the executable + discover_boost_ut_test(${TEST_NAME}) + + target_sources(jumbo_test PRIVATE ${TEST_NAME}.cpp) +endfunction() + + +# Based on https://gitlab.kitware.com/cmake/cmake/-/blob/master/Modules/GoogleTest.cmake#L545 +# This defines a function that is an alternative to add_test() +# that adds each individual test in the executable as its own ctest test +# A nice overview of this feature is described here: https://www.kitware.com/dynamic-google-test-discovery-in-cmake-3-10/ +function (discover_boost_ut_test TARGET) + + set(ctest_file_base "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}") + set(ctest_include_file "${ctest_file_base}_include.cmake") + set(ctest_tests_file "${ctest_file_base}_tests.cmake") + + file(WRITE "${ctest_include_file}" + "if(EXISTS \"${ctest_tests_file}\")\n" + " include(\"${ctest_tests_file}\")\n" + "else()\n" + " add_test(${TARGET}_NOT_BUILT ${TARGET}_NOT_BUILT)\n" + "endif()\n" + ) + + add_custom_command( + TARGET ${TARGET} POST_BUILD + BYPRODUCTS "${ctest_tests_file}" + COMMAND "${CMAKE_COMMAND}" + -D "TEST_TARGET=${TARGET}" + -D "TEST_EXECUTABLE=$" + -D "CTEST_FILE=${ctest_tests_file}" + -P "${_UT_DISCOVER_TESTS_SCRIPT}" + VERBATIM + ) + + + # Add discovered tests to directory TEST_INCLUDE_FILES + set_property(DIRECTORY + APPEND PROPERTY TEST_INCLUDE_FILES "${ctest_include_file}" + ) + +endfunction() + + +set(_UT_DISCOVER_TESTS_SCRIPT + ${CMAKE_CURRENT_LIST_DIR}/BoostUTAddTests.cmake +) diff --git a/tests/CMake/BoostUTAddTests.cmake b/tests/CMake/BoostUTAddTests.cmake new file mode 100644 index 0000000..537fc87 --- /dev/null +++ b/tests/CMake/BoostUTAddTests.cmake @@ -0,0 +1,64 @@ +# based on https://gitlab.kitware.com/cmake/cmake/-/blob/master/Modules/GoogleTestAddTests.cmake?ref_type=heads +# this file defines a script that lists all the tests in a given executable +# and adds each one individually as a ctest test +# A nice overview of this feature is described here: https://www.kitware.com/dynamic-google-test-discovery-in-cmake-3-10/ + +set(script) +set(suite) +set(tests) + +function(add_command NAME) + set(_args "") + foreach(_arg ${ARGN}) + if(_arg MATCHES "[^-./:a-zA-Z0-9_]") + set(_args "${_args} [==[${_arg}]==]") + else() + set(_args "${_args} ${_arg}") + endif() + endforeach() + set(script "${script}${NAME}(${_args})\n" PARENT_SCOPE) +endfunction() + +# Run test executable to get list of available tests +if(NOT EXISTS "${TEST_EXECUTABLE}") + message(FATAL_ERROR + "Specified test executable '${TEST_EXECUTABLE}' does not exist" + ) +endif() +execute_process( + COMMAND "${TEST_EXECUTABLE}" --list-test-names-only --use-colour no + OUTPUT_VARIABLE output + RESULT_VARIABLE result +) +if(NOT ${result} EQUAL 0) + message(FATAL_ERROR + "Error running test executable '${TEST_EXECUTABLE}':\n" + " Result: ${result}\n" + " Output: ${output}\n" + ) +endif() + +string(REPLACE "\n" ";" output "${output}") + +foreach(test ${output}) + if (test MATCHES "^Suite '") + continue() # Skip suite names + endif() + add_command(add_test + "${TEST_TARGET}:${test}" + "${TEST_EXECUTABLE}" + "${test}" + "--success" + "--durations" + ) + message(CONFIGURE_LOG "Discovered test: ${TEST_TARGET}:${test}") + list(APPEND tests "${test}") + +endforeach() + +# Create a list of all discovered tests, which users may use to e.g. set +# properties on the tests +add_command(set ${TEST_LIST} ${tests}) + +# Write CTest script +file(WRITE "${CTEST_FILE}" "${script}") diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 225ec7d..05419c1 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,34 +1,6 @@ -if(MSVC) - set(gtest_force_shared_crt on) -endif() +include(CMake/BoostUT.cmake) -FetchContent_Declare(googletest - DOWNLOAD_EXTRACT_TIMESTAMP ON - GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG main) -FetchContent_MakeAvailable(googletest) - -include(GoogleTest) - -# Set up testing framework, then add tests -# define a function to add a gtest‐based nuts test -function(add_nuts_test TEST_NAME TEST_SOURCE) - # create the executable - add_executable(${TEST_NAME} ${TEST_SOURCE}) - - # link in GoogleTest, Eigen, and your nuts lib - target_link_libraries(${TEST_NAME} - PRIVATE - gtest_main - Eigen3::Eigen - nuts::nuts - ) - - # register it with CTest - gtest_discover_tests(${TEST_NAME}) -endfunction() - -add_nuts_test(combine_span_test combine_span_test.cpp) -add_nuts_test(online_moments_test online_moments_test.cpp) -add_nuts_test(dual_average_test dual_average_test.cpp) -add_nuts_test(util_test util_test.cpp) +add_boost_ut_test(combine_span_test) +add_boost_ut_test(dual_average_test) +add_boost_ut_test(online_moments_test) +add_boost_ut_test(util_test) diff --git a/tests/combine_span_test.cpp b/tests/combine_span_test.cpp index 9a33dba..b71dda2 100644 --- a/tests/combine_span_test.cpp +++ b/tests/combine_span_test.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include @@ -44,7 +44,7 @@ struct MockRandom { double uniform_real_01() { return value; } }; -// Googletest allows you to parametrize tests over _types_, +// Boost.UT allows you to parametrize tests over _types_, // but to use non-type template parameters you must first // wrap them in a type like std::integral_constant using Forward = @@ -60,226 +60,214 @@ using BackwardAndMetropolis = std::tuple; using ForwardAndBarker = std::tuple; using BackwardAndBarker = std::tuple; -} // namespace span_test - -// macros to compare parts of Spans -#define EXPECT_FORWARD_ENDPOINT_EQUAL(span_expected, span_received) \ - EXPECT_EQ(span_expected.theta_fw_, span_received.theta_fw_); \ - EXPECT_EQ(span_expected.rho_fw_, span_received.rho_fw_); \ - EXPECT_EQ(span_expected.grad_theta_fw_, span_received.grad_theta_fw_); \ - EXPECT_EQ(span_expected.logp_fw_, span_received.logp_fw_); - -#define EXPECT_BACKWARD_ENDPOINT_EQUAL(span_expected, span_received) \ - EXPECT_EQ(span_expected.theta_bk_, span_received.theta_bk_); \ - EXPECT_EQ(span_expected.rho_bk_, span_received.rho_bk_); \ - EXPECT_EQ(span_expected.grad_theta_bk_, span_received.grad_theta_bk_); \ - EXPECT_EQ(span_expected.logp_bk_, span_received.logp_bk_); - -#define EXPECT_SELECTED_POINT_EQUAL(span_expected, logp_expected, \ - span_received) \ - EXPECT_EQ(span_expected.theta_select_, span_received.theta_select_); \ - EXPECT_EQ(span_expected.grad_select_, span_received.grad_select_); \ - EXPECT_FLOAT_EQ(logp_expected, span_received.logp_); - -// Tests in the CombineSpansUpdateAgnostic suite hold regardless of the update -// rule used (Metropolis or Barker). They can still be sensitive to the -// direction. -template -class CombineSpansUpdateAgnostic : public ::testing::Test {}; - -using BothUpdates = ::testing::Types; -TYPED_TEST_SUITE(CombineSpansUpdateAgnostic, BothUpdates); - -TYPED_TEST(CombineSpansUpdateAgnostic, EndpointsCorrectWhenGoingForward) { - constexpr auto update = TypeParam::value; - - span_test::MockRandom rng{0}; - nuts::SpanW span_from = span_test::dummy_span(0.3); - nuts::SpanW span_to = span_test::dummy_span(0.1); - - nuts::SpanW span_old = span_from; - nuts::SpanW span_new = span_to; - nuts::SpanW combined = - nuts::combine(rng, std::move(span_old), - std::move(span_new)); - - EXPECT_BACKWARD_ENDPOINT_EQUAL(span_from, combined); - EXPECT_FORWARD_ENDPOINT_EQUAL(span_to, combined); -} - -TYPED_TEST(CombineSpansUpdateAgnostic, EndpointsCorrectWhenGoingBackward) { - constexpr auto update = TypeParam::value; - - span_test::MockRandom rng{0}; - nuts::SpanW span_from = span_test::dummy_span(0.3); - nuts::SpanW span_to = span_test::dummy_span(0.1); +// now we can actually start testing - nuts::SpanW span_old = span_from; - nuts::SpanW span_new = span_to; - nuts::SpanW combined = - nuts::combine(rng, std::move(span_old), - std::move(span_new)); +using namespace boost::ut; - EXPECT_BACKWARD_ENDPOINT_EQUAL(span_to, combined); - EXPECT_FORWARD_ENDPOINT_EQUAL(span_from, combined); +void expect_forward_endpoint_equal(auto& span_expected, auto& span_received, + const reflection::source_location& location = + reflection::source_location::current()) { + expect(span_expected.theta_fw_ == span_received.theta_fw_, location); + expect(span_expected.rho_fw_ == span_received.rho_fw_, location); + expect(span_expected.grad_theta_fw_ == span_received.grad_theta_fw_, + location); + expect(span_expected.logp_fw_ == span_received.logp_fw_, location); } -// Tests in the CombineSpansSymmetric suite hold regardless of going forwards or -// backwards in time. -template -class CombineSpansSymmetric : public ::testing::Test {}; - -using BothDirections = - ::testing::Types; -TYPED_TEST_SUITE(CombineSpansSymmetric, BothDirections); - -TYPED_TEST(CombineSpansSymmetric, MetropolisAcceptsBarkerRejects) { - constexpr auto direction = TypeParam::value; - - span_test::MockRandom rng{0.5}; - nuts::SpanW span_prev = span_test::dummy_span(1.2); - nuts::SpanW span_next = span_test::dummy_span(1.0); - double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); - - // log(0.5) = -0.3 - // metropolis: 1.2 - 1.0 = 0.2 > -0.3, accepts - { - nuts::SpanW span_old = span_prev; - nuts::SpanW span_new = span_next; - nuts::SpanW combined = - nuts::combine( - rng, std::move(span_old), std::move(span_new)); - - EXPECT_SELECTED_POINT_EQUAL(span_next, new_logp, combined); - } - - // barker: 1.2 - logsumexp(1.2, 1.0) = 1.2 - 1.78 = -0.58 < -0.3, rejects - { - nuts::SpanW span_old = span_prev; - nuts::SpanW span_new = span_next; - nuts::SpanW combined = - nuts::combine(rng, std::move(span_old), - std::move(span_new)); - - EXPECT_SELECTED_POINT_EQUAL(span_prev, new_logp, combined); - } +void expect_backward_endpoint_equal( + auto& span_expected, auto& span_received, + const reflection::source_location& location = + reflection::source_location::current()) { + expect(span_expected.theta_bk_ == span_received.theta_bk_, location); + expect(span_expected.rho_bk_ == span_received.rho_bk_, location); + expect(span_expected.grad_theta_bk_ == span_received.grad_theta_bk_, + location); + expect(span_expected.logp_bk_ == span_received.logp_bk_, location); } -// metropolis is always strightly higher than barker, so no case where barker -// accepts and metropolis rejects for a given random uniform draw +void expect_selected_point_equal(auto& span_expected, double logp_expected, + auto& span_received, + const reflection::source_location& location = + reflection::source_location::current()) { + expect(span_expected.theta_select_ == span_received.theta_select_, location); + expect(span_expected.grad_select_ == span_received.grad_select_, location); + expect(approx(logp_expected, span_received.logp_, 1e-6), location); +} -TYPED_TEST(CombineSpansSymmetric, BarkerAndMetropolisBothReject) { - constexpr auto direction = TypeParam::value; +suite<"combine_spans"> tests = [] { + "endpoints_correct_going_forward"_test = [] { + constexpr nuts::Update update = U::value; + MockRandom rng{0.5}; - span_test::MockRandom rng{0.5}; + nuts::SpanW span_from = dummy_span(0.3); + nuts::SpanW span_to = dummy_span(0.1); - nuts::SpanW span_prev = span_test::dummy_span(1.8); - nuts::SpanW span_next = span_test::dummy_span(0.3); - double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); + nuts::SpanW span_old = span_from; + nuts::SpanW span_new = span_to; - // metropolis: 0.3 - 1.8 = -1.5 < -0.3, rejects - { - nuts::SpanW span_old = span_prev; - nuts::SpanW span_new = span_next; nuts::SpanW combined = - nuts::combine( + nuts::combine( rng, std::move(span_old), std::move(span_new)); - EXPECT_SELECTED_POINT_EQUAL(span_prev, new_logp, combined); - } + expect_forward_endpoint_equal(span_to, combined); + expect_backward_endpoint_equal(span_from, combined); + } | std::tuple(); - // barker: 0.3 - logsumexp(0.3, 1.8) = 0.3 - 2.00 = -2.00 < -0.3, rejects - { - nuts::SpanW span_old = span_prev; - nuts::SpanW span_new = span_next; - nuts::SpanW combined = - nuts::combine(rng, std::move(span_old), - std::move(span_new)); + "endpoints_correct_going_backward"_test = [] { + constexpr nuts::Update update = U::value; + MockRandom rng{0.5}; - EXPECT_SELECTED_POINT_EQUAL(span_prev, new_logp, combined); - } -} + nuts::SpanW span_from = dummy_span(0.3); + nuts::SpanW span_to = dummy_span(0.1); -TYPED_TEST(CombineSpansSymmetric, BarkerAndMetropolisBothAccept) { - constexpr auto direction = TypeParam::value; + nuts::SpanW span_old = span_from; + nuts::SpanW span_new = span_to; - span_test::MockRandom rng{0.5}; - nuts::SpanW span_prev = span_test::dummy_span(0.3); - nuts::SpanW span_next = span_test::dummy_span(1.8); - double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); - - // metropolis: 1.8 - 0.3 = 1.5 > -0.3, accepts - { - nuts::SpanW span_old = span_prev; - nuts::SpanW span_new = span_next; nuts::SpanW combined = - nuts::combine( + nuts::combine( rng, std::move(span_old), std::move(span_new)); - EXPECT_SELECTED_POINT_EQUAL(span_next, new_logp, combined); - } - - // barker: 1.8 - logsumexp(0.3, 1.8) = 1.8 - 2.0 = -.2 > -0.3, accepts - { - nuts::SpanW span_old = span_prev; - nuts::SpanW span_new = span_next; - nuts::SpanW combined = - nuts::combine(rng, std::move(span_old), - std::move(span_new)); - - EXPECT_SELECTED_POINT_EQUAL(span_next, new_logp, combined); - } -} - -// Tests in the CombineSpansUniversal suite hold regardless of going forwards or -// backwards in time, *and* regardless of the update rule used (Metropolis or -// Barker) -template -class CombineSpansUniversal : public ::testing::Test {}; - -using AllCombinations = - ::testing::Types; -TYPED_TEST_SUITE(CombineSpansUniversal, AllCombinations); - -TYPED_TEST(CombineSpansUniversal, SelectedPointCameFromSecondSpanOnAcceptance) { - constexpr auto direction = std::tuple_element_t<0, TypeParam>::value; - constexpr auto update = std::tuple_element_t<1, TypeParam>::value; - - nuts::SpanW span_prev = span_test::dummy_span(0.3); - nuts::SpanW span_next = span_test::dummy_span(0.1); - double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); - - span_test::MockRandom rng_accept{0}; // always accept - - nuts::SpanW span_old = span_prev; - nuts::SpanW span_new = span_next; - nuts::SpanW combined = nuts::combine( - rng_accept, std::move(span_old), std::move(span_new)); - - EXPECT_SELECTED_POINT_EQUAL(span_next, new_logp, combined); -} - -TYPED_TEST(CombineSpansUniversal, SelectedPointCameFromFirstSpanOnRejection) { - constexpr auto direction = std::tuple_element_t<0, TypeParam>::value; - constexpr auto update = std::tuple_element_t<1, TypeParam>::value; - - nuts::SpanW span_prev = span_test::dummy_span(0.3); - nuts::SpanW span_next = span_test::dummy_span(0.1); - double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); - - span_test::MockRandom rng_reject{1}; // always reject - - nuts::SpanW span_old = span_prev; - nuts::SpanW span_new = span_next; - nuts::SpanW combined = nuts::combine( - rng_reject, std::move(span_old), std::move(span_new)); - - EXPECT_SELECTED_POINT_EQUAL(span_prev, new_logp, combined); -} + expect_backward_endpoint_equal(span_to, combined); + expect_forward_endpoint_equal(span_from, combined); + } | std::tuple(); + + "metropolis_accepts_barker_rejects"_test = [] { + constexpr nuts::Direction direction = D::value; + + MockRandom rng{0.5}; + nuts::SpanW span_prev = dummy_span(1.2); + nuts::SpanW span_next = dummy_span(1.0); + double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); + + // log(0.5) = -0.3 + // metropolis: 1.2 - 1.0 = 0.2 > -0.3, accepts + { + nuts::SpanW span_old = span_prev; + nuts::SpanW span_new = span_next; + nuts::SpanW combined = + nuts::combine( + rng, std::move(span_old), std::move(span_new)); + + expect_selected_point_equal(span_next, new_logp, combined); + } + + // barker: 1.2 - logsumexp(1.2, 1.0) = 1.2 - 1.78 = -0.58 < -0.3, rejects + { + nuts::SpanW span_old = span_prev; + nuts::SpanW span_new = span_next; + nuts::SpanW combined = + nuts::combine( + rng, std::move(span_old), std::move(span_new)); + + expect_selected_point_equal(span_prev, new_logp, combined); + } + } | std::tuple(); + + // metropolis is always strightly higher than barker, so no case where barker + // accepts and metropolis rejects for a given random uniform draw + + "barker_and_metropolis_both_reject"_test = [] { + constexpr nuts::Direction direction = D::value; + + MockRandom rng{0.5}; + + nuts::SpanW span_prev = dummy_span(1.8); + nuts::SpanW span_next = dummy_span(0.3); + double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); + + // metropolis: 0.3 - 1.8 = -1.5 < -0.3, rejects + { + nuts::SpanW span_old = span_prev; + nuts::SpanW span_new = span_next; + nuts::SpanW combined = + nuts::combine( + rng, std::move(span_old), std::move(span_new)); + + expect_selected_point_equal(span_prev, new_logp, combined); + } + + // barker: 0.3 - logsumexp(0.3, 1.8) = 0.3 - 2.00 = -2.00 < -0.3, rejects + { + nuts::SpanW span_old = span_prev; + nuts::SpanW span_new = span_next; + nuts::SpanW combined = + nuts::combine( + rng, std::move(span_old), std::move(span_new)); + + expect_selected_point_equal(span_prev, new_logp, combined); + } + } | std::tuple(); + + "barker_and_metropolis_both_accept"_test = [] { + constexpr nuts::Direction direction = D::value; + + MockRandom rng{0.5}; + nuts::SpanW span_prev = dummy_span(0.3); + nuts::SpanW span_next = dummy_span(1.8); + double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); + + // metropolis: 1.8 - 0.3 = 1.5 > -0.3, accepts + { + nuts::SpanW span_old = span_prev; + nuts::SpanW span_new = span_next; + nuts::SpanW combined = + nuts::combine( + rng, std::move(span_old), std::move(span_new)); + + expect_selected_point_equal(span_next, new_logp, combined); + } + + // barker: 1.8 - logsumexp(0.3, 1.8) = 1.8 - 2.0 = -.2 > -0.3, accepts + { + nuts::SpanW span_old = span_prev; + nuts::SpanW span_new = span_next; + nuts::SpanW combined = + nuts::combine( + rng, std::move(span_old), std::move(span_new)); + + expect_selected_point_equal(span_next, new_logp, combined); + } + } | std::tuple(); + + "selected_point_came_from_second_span_on_acceptance"_test = + [] { + constexpr auto direction = std::tuple_element_t<0, T>::value; + constexpr auto update = std::tuple_element_t<1, T>::value; + + nuts::SpanW span_prev = dummy_span(0.3); + nuts::SpanW span_next = dummy_span(0.1); + double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); + + MockRandom rng_accept{0}; // always accept + + nuts::SpanW span_old = span_prev; + nuts::SpanW span_new = span_next; + nuts::SpanW combined = nuts::combine( + rng_accept, std::move(span_old), std::move(span_new)); + + expect_selected_point_equal(span_next, new_logp, combined); + } | + std::tuple(); + + "selected_point_came_from_first_span_on_rejection"_test = + [] { + constexpr auto direction = std::tuple_element_t<0, T>::value; + constexpr auto update = std::tuple_element_t<1, T>::value; + + nuts::SpanW span_prev = dummy_span(0.3); + nuts::SpanW span_next = dummy_span(0.1); + double new_logp = nuts::log_sum_exp(span_prev.logp_, span_next.logp_); + MockRandom rng_reject{1}; // always reject + + nuts::SpanW span_old = span_prev; + nuts::SpanW span_new = span_next; + nuts::SpanW combined = nuts::combine( + rng_reject, std::move(span_old), std::move(span_new)); + expect_selected_point_equal(span_prev, new_logp, combined); + } | + std::tuple(); +}; -// macro clean up -#undef EXPECT_FORWARD_ENDPOINT_EQUAL -#undef EXPECT_BACKWARD_ENDPOINT_EQUAL -#undef EXPECT_SELECTED_POINT_EQUAL +} // namespace span_test diff --git a/tests/dual_average_test.cpp b/tests/dual_average_test.cpp index 5807525..a594db4 100644 --- a/tests/dual_average_test.cpp +++ b/tests/dual_average_test.cpp @@ -1,14 +1,13 @@ -#include #include -#include #include -#include -#include #include +#include #include +namespace dual_average_test { + static double std_normal_lpdf(double x) { return -0.5 * x * x; } template @@ -25,30 +24,37 @@ static double sim_metropolis_accept(G& rng, double step_size) { return std::fmin(1.0, std::exp(std_normal_lpdf(x1) - std_normal_lpdf(x0))); } -TEST(DualAverage, Metropolis1D) { - // theory says that if we target 0.44 accept rate, the step size will be 2.4 - unsigned int seed = 7635445; - std::mt19937 rng(seed); - - double delta = 0.44; // optimal acceptance probability for D=1 - double init = 1.0; - double t0 = 10.0; // equal to default from Stan's NUTS - double gamma = 0.1; // equal to default from Stan's NUTS - double kappa = 0.9; // higher than default from Stan's NUTS - nuts::DualAverage da(init, delta, t0, gamma, kappa); - int N = 100000; // large N to account for different random behavior on - // different OSes - double total = 0; - double count = 0; - for (int n = 0; n < N; ++n) { +using namespace boost::ut; + +suite<"dual_average"> tests = [] { + "metropolis_1d"_test = [] { + // theory says that if we target 0.44 accept rate, the step size will + // be 2.4 + unsigned int seed = 7635445; + std::mt19937 rng(seed); + + double delta = 0.44; // optimal acceptance probability for D=1 + double init = 1.0; + double t0 = 10.0; // equal to default from Stan's NUTS + double gamma = 0.1; // equal to default from Stan's NUTS + double kappa = 0.9; // higher than default from Stan's NUTS + nuts::DualAverage da(init, delta, t0, gamma, kappa); + int N = 100000; // large N to account for different random behavior on + // different OSes + double total = 0; + double count = 0; + for (int n = 0; n < N; ++n) { + double step_size_hat = da.step_size(); + double alpha = sim_metropolis_accept(rng, step_size_hat); + da.observe(alpha); + total += alpha; + count += 1.0; + } double step_size_hat = da.step_size(); - double alpha = sim_metropolis_accept(rng, step_size_hat); - da.observe(alpha); - total += alpha; - count += 1.0; - } - double step_size_hat = da.step_size(); - EXPECT_NEAR(2.4, step_size_hat, 0.2); // step size not so accurate - double accept_hat = total / count; - EXPECT_NEAR(delta, accept_hat, 0.01); // achieved acceptance very accurate -} + expect(approx(2.4, step_size_hat, 0.2)); // step size not so accurate + double accept_hat = total / count; + expect( + approx(delta, accept_hat, 0.01)); // achieved acceptance very accurate + }; +}; +} // namespace dual_average_test diff --git a/tests/online_moments_test.cpp b/tests/online_moments_test.cpp index 4c4edd7..c24fb1a 100644 --- a/tests/online_moments_test.cpp +++ b/tests/online_moments_test.cpp @@ -1,15 +1,14 @@ -#include #include #include -#include -#include #include -#include #include +#include #include +namespace online_moments_test { + static Eigen::VectorXd discounted_mean(const std::vector& ys, double alpha) { std::size_t N = ys.size(); @@ -40,103 +39,109 @@ static Eigen::VectorXd discounted_variance( return weighted_sq_diff_sum / weight_sum; } -TEST(Welford, test_zero_observations) { - double alpha = 0.95; - long D = 2; - nuts::OnlineMoments acc(alpha, D); - - Eigen::VectorXd m = acc.mean(); - Eigen::VectorXd v = acc.variance(); - EXPECT_EQ(2, m.size()); - EXPECT_FLOAT_EQ(0.0, m(0)); - EXPECT_FLOAT_EQ(0.0, m(1)); - EXPECT_EQ(2, v.size()); - EXPECT_FLOAT_EQ(1.0, v(0)); - EXPECT_FLOAT_EQ(1.0, v(1)); -} - -TEST(Welford, test_one_observation) { - double alpha = 0.95; - long D = 2; - nuts::OnlineMoments acc(alpha, D); - - Eigen::VectorXd y(2); - y << 0.2, -1.3; - acc.observe(y); - - Eigen::VectorXd m = acc.mean(); - Eigen::VectorXd v = acc.variance(); - - EXPECT_EQ(2, m.size()); - EXPECT_FLOAT_EQ(0.2, m(0)); - EXPECT_FLOAT_EQ(-1.3, m(1)); - EXPECT_EQ(2, v.size()); - EXPECT_FLOAT_EQ(0.0, v(0)); - EXPECT_FLOAT_EQ(0.0, v(1)); -} - -TEST(Welford, test_no_discounting) { - long D = 2; - std::size_t N = 100; - std::vector ys(N); - for (std::size_t n = 0; n < N; ++n) { - ys[n] = Eigen::VectorXd::Zero(D); - } - for (std::size_t n = 0; n < N; ++n) { - double x = static_cast(n); - ys[n] << x, std::sqrt(x); - } - - Eigen::VectorXd sum = Eigen::VectorXd::Zero(D); - for (auto y : ys) { - sum += y; - } - Eigen::VectorXd mean_expected = sum / N; - - Eigen::VectorXd sum_sq_diffs = Eigen::VectorXd::Zero(D); - for (auto y : ys) { - sum_sq_diffs += - ((y - mean_expected).array() * (y - mean_expected).array()).matrix(); - } - Eigen::VectorXd variance_expected = sum_sq_diffs / N; - - double alpha = 1.0; - nuts::OnlineMoments acc(alpha, D); - - for (std::size_t n = 0; n < N; ++n) { - acc.observe(ys[n]); - } - Eigen::VectorXd m = acc.mean(); - Eigen::VectorXd v = acc.variance(); - - EXPECT_TRUE(m.isApprox(mean_expected, 1e-8)); - EXPECT_TRUE(v.isApprox(variance_expected, 1e-8)); -} - -TEST(Welford, test_ten_observations) { - long D = 3; - std::size_t N = 10; - std::vector ys(N); - for (std::size_t n = 0; n < N; ++n) { - ys[n] = Eigen::VectorXd::Zero(D); - } - for (std::size_t n = 0; n < N; ++n) { - double x = static_cast(n); - ys[n] << x, x * x, std::exp(x); - } - - double alpha = 0.95; - nuts::OnlineMoments acc(alpha, D); - - for (std::size_t n = 0; n < N; ++n) { - acc.observe(ys[n]); - } - Eigen::VectorXd m = acc.mean(); - Eigen::VectorXd v = acc.variance(); - - Eigen::VectorXd mean_expected = discounted_mean(ys, alpha); - EXPECT_TRUE(m.isApprox(mean_expected, 1e-8)); - - Eigen::VectorXd variance_expected = discounted_variance(ys, alpha); - EXPECT_TRUE(v.isApprox(variance_expected, 1e-8)); -} +using namespace boost::ut; + +suite<"welford"> tests = [] { + "zero_observations"_test = [] { + double alpha = 0.95; + long D = 2; + nuts::OnlineMoments acc(alpha, D); + + Eigen::VectorXd m = acc.mean(); + Eigen::VectorXd v = acc.variance(); + expect(m.size() == 2_u); + expect(m(0) == 0.0_d); + expect(m(1) == 0.0_d); + expect(v.size() == 2_u); + expect(v(0) == 1.0_d); + expect(v(1) == 1.0_d); + }; + + "one_observation"_test = [] { + double alpha = 0.95; + long D = 2; + nuts::OnlineMoments acc(alpha, D); + + Eigen::VectorXd y(2); + y << 0.2, -1.3; + acc.observe(y); + + Eigen::VectorXd m = acc.mean(); + Eigen::VectorXd v = acc.variance(); + + expect(m.size() == 2_u); + expect(m(0) == 0.2_d); + expect(m(1) == -1.3_d); + expect(v.size() == 2_u); + expect(v(0) == 0.0_d); + expect(v(1) == 0.0_d); + }; + + "no_discounting"_test = [] { + long D = 2; + std::size_t N = 100; + std::vector ys(N); + for (std::size_t n = 0; n < N; ++n) { + ys[n] = Eigen::VectorXd::Zero(D); + } + for (std::size_t n = 0; n < N; ++n) { + double x = static_cast(n); + ys[n] << x, std::sqrt(x); + } + + Eigen::VectorXd sum = Eigen::VectorXd::Zero(D); + for (auto y : ys) { + sum += y; + } + Eigen::VectorXd mean_expected = sum / N; + + Eigen::VectorXd sum_sq_diffs = Eigen::VectorXd::Zero(D); + for (auto y : ys) { + sum_sq_diffs += + ((y - mean_expected).array() * (y - mean_expected).array()).matrix(); + } + Eigen::VectorXd variance_expected = sum_sq_diffs / N; + + double alpha = 1.0; + nuts::OnlineMoments acc(alpha, D); + + for (std::size_t n = 0; n < N; ++n) { + acc.observe(ys[n]); + } + Eigen::VectorXd m = acc.mean(); + Eigen::VectorXd v = acc.variance(); + + expect(m.isApprox(mean_expected, 1e-8)); + expect(v.isApprox(variance_expected, 1e-8)); + }; + + "ten_observations"_test = [] { + long D = 2; + std::size_t N = 10; + std::vector ys(N); + for (std::size_t n = 0; n < N; ++n) { + ys[n] = Eigen::VectorXd::Zero(D); + } + for (std::size_t n = 0; n < N; ++n) { + double x = static_cast(n); + ys[n] << x, std::sqrt(x); + } + + double alpha = 0.95; + nuts::OnlineMoments acc(alpha, D); + + for (std::size_t n = 0; n < N; ++n) { + acc.observe(ys[n]); + } + Eigen::VectorXd m = acc.mean(); + Eigen::VectorXd v = acc.variance(); + + Eigen::VectorXd mean_expected = discounted_mean(ys, alpha); + expect(m.isApprox(mean_expected, 1e-8)); + + Eigen::VectorXd variance_expected = discounted_variance(ys, alpha); + expect(v.isApprox(variance_expected, 1e-8)); + }; +}; + +} // namespace online_moments_test diff --git a/tests/test_runner.cpp b/tests/test_runner.cpp new file mode 100644 index 0000000..3dfbebd --- /dev/null +++ b/tests/test_runner.cpp @@ -0,0 +1,6 @@ +#include + +int main(int argc, const char* argv[]) { + namespace ut = boost::ut; + return ut::cfg.run({.argc = argc, .argv = argv}); +} diff --git a/tests/util_test.cpp b/tests/util_test.cpp index ed1da3e..4fd9a2e 100644 --- a/tests/util_test.cpp +++ b/tests/util_test.cpp @@ -2,12 +2,14 @@ #include -#include +#include #include #include #include +namespace util_test { + using S = double; using Vec = Eigen::Matrix; @@ -17,111 +19,115 @@ static Vec vec(S x1, S x2) { return y; } -TEST(Util, Walnuts) { - EXPECT_EQ(2 + 2, 4); - Vec thetabk1 = vec(-3, 0); - Vec thetafw1 = vec(-1, 0); - Vec thetabk2 = vec(1, 0); - Vec thetafw2 = vec(3, 0); - - Vec rhobk1 = vec(1, -1); - Vec rhofw1 = vec(0, 1); - Vec rhobk2 = vec(0, 1); - Vec rhofw2 = vec(-1, -1); - - // unused for U-turn, but needed for span - Vec gradbk1 = vec(0, 0); - Vec gradfw1 = vec(0, 0); - Vec gradbk2 = vec(0, 0); - Vec gradfw2 = vec(0, 0); - - S logpbk1 = 0; - S logpfw1 = 0; - S logpbk2 = 0; - S logpfw2 = 0; - - Vec theta1 = vec(0, 0); - Vec theta2 = vec(0, 0); - Vec grad1 = vec(0, 0); - Vec grad2 = vec(0, 0); - - S logp1 = 0; - S logp2 = 0; - - Vec inv_mass = vec(1, 1); - - nuts::SpanW span1bk(std::move(thetabk1), std::move(rhobk1), - std::move(gradbk1), logpbk1); - nuts::SpanW span1fw(std::move(thetafw1), std::move(rhofw1), - std::move(gradfw1), logpfw1); - nuts::SpanW span2bk(std::move(thetabk2), std::move(rhobk2), - std::move(gradbk2), logpbk2); - nuts::SpanW span2fw(std::move(thetafw2), std::move(rhofw2), - std::move(gradfw2), logpfw2); - - nuts::SpanW span1(std::move(span1bk), std::move(span1fw), - std::move(theta1), std::move(grad1), logp1); - nuts::SpanW span2(std::move(span2bk), std::move(span2fw), - std::move(theta2), std::move(grad2), logp2); - - EXPECT_TRUE((nuts::uturn>( - span1, span2, inv_mass))); - EXPECT_FALSE((nuts::uturn>( - span2, span1, inv_mass))); - - EXPECT_TRUE((nuts::uturn>( - span2, span1, inv_mass))); - EXPECT_FALSE((nuts::uturn>( - span1, span2, inv_mass))); -} - -TEST(Util, WalnutsRegression) { - Vec thetabk1 = vec(3, 0); - Vec thetafw1 = vec(0, 0); - Vec thetabk2 = vec(1, 0); - Vec thetafw2 = vec(3, 0); - - Vec rhobk1 = vec(-1, 1); - Vec rhofw1 = vec(0, 1); - Vec rhobk2 = vec(0, 1); - Vec rhofw2 = vec(1, -1); - - // unused for U-turn, but needed for span - Vec gradbk1 = vec(0, 0); - Vec gradfw1 = vec(0, 0); - Vec gradbk2 = vec(0, 0); - Vec gradfw2 = vec(0, 0); - - S logpbk1 = 0; - S logpfw1 = 0; - S logpbk2 = 0; - S logpfw2 = 0; - - Vec theta1 = vec(0, 0); - Vec theta2 = vec(0, 0); - Vec grad1 = vec(0, 0); - Vec grad2 = vec(0, 0); - - S logp1 = 0; - S logp2 = 0; - - Vec inv_mass = vec(1, 1); - - nuts::SpanW span1bk(std::move(thetabk1), std::move(rhobk1), - std::move(gradbk1), logpbk1); - nuts::SpanW span1fw(std::move(thetafw1), std::move(rhofw1), - std::move(gradfw1), logpfw1); - nuts::SpanW span2bk(std::move(thetabk2), std::move(rhobk2), - std::move(gradbk2), logpbk2); - nuts::SpanW span2fw(std::move(thetafw2), std::move(rhofw2), - std::move(gradfw2), logpfw2); - - nuts::SpanW span1(std::move(span1bk), std::move(span1fw), - std::move(theta1), std::move(grad1), logp1); - nuts::SpanW span2(std::move(span2bk), std::move(span2fw), - std::move(theta2), std::move(grad2), logp2); - - // following test fails in the original code with buggy uturn condition - EXPECT_FALSE((nuts::uturn>( - span1, span2, inv_mass))); -} +using namespace boost::ut; + +suite<"util"> tests = [] { + "basic_uturn"_test = [] { + Vec thetabk1 = vec(-3, 0); + Vec thetafw1 = vec(-1, 0); + Vec thetabk2 = vec(1, 0); + Vec thetafw2 = vec(3, 0); + + Vec rhobk1 = vec(1, -1); + Vec rhofw1 = vec(0, 1); + Vec rhobk2 = vec(0, 1); + Vec rhofw2 = vec(-1, -1); + + // unused for U-turn, but needed for span + Vec gradbk1 = vec(0, 0); + Vec gradfw1 = vec(0, 0); + Vec gradbk2 = vec(0, 0); + Vec gradfw2 = vec(0, 0); + + S logpbk1 = 0; + S logpfw1 = 0; + S logpbk2 = 0; + S logpfw2 = 0; + + Vec theta1 = vec(0, 0); + Vec theta2 = vec(0, 0); + Vec grad1 = vec(0, 0); + Vec grad2 = vec(0, 0); + + S logp1 = 0; + S logp2 = 0; + + Vec inv_mass = vec(1, 1); + + nuts::SpanW span1bk(std::move(thetabk1), std::move(rhobk1), + std::move(gradbk1), logpbk1); + nuts::SpanW span1fw(std::move(thetafw1), std::move(rhofw1), + std::move(gradfw1), logpfw1); + nuts::SpanW span2bk(std::move(thetabk2), std::move(rhobk2), + std::move(gradbk2), logpbk2); + nuts::SpanW span2fw(std::move(thetafw2), std::move(rhofw2), + std::move(gradfw2), logpfw2); + + nuts::SpanW span1(std::move(span1bk), std::move(span1fw), + std::move(theta1), std::move(grad1), logp1); + nuts::SpanW span2(std::move(span2bk), std::move(span2fw), + std::move(theta2), std::move(grad2), logp2); + + expect(nuts::uturn>( + span1, span2, inv_mass)); + expect(!nuts::uturn>( + span2, span1, inv_mass)); + expect(nuts::uturn>( + span2, span1, inv_mass)); + expect(!nuts::uturn>( + span1, span2, inv_mass)); + }; + + "uturn_regression"_test = [] { + Vec thetabk1 = vec(3, 0); + Vec thetafw1 = vec(0, 0); + Vec thetabk2 = vec(1, 0); + Vec thetafw2 = vec(3, 0); + + Vec rhobk1 = vec(-1, 1); + Vec rhofw1 = vec(0, 1); + Vec rhobk2 = vec(0, 1); + Vec rhofw2 = vec(1, -1); + + // unused for U-turn, but needed for span + Vec gradbk1 = vec(0, 0); + Vec gradfw1 = vec(0, 0); + Vec gradbk2 = vec(0, 0); + Vec gradfw2 = vec(0, 0); + + S logpbk1 = 0; + S logpfw1 = 0; + S logpbk2 = 0; + S logpfw2 = 0; + + Vec theta1 = vec(0, 0); + Vec theta2 = vec(0, 0); + Vec grad1 = vec(0, 0); + Vec grad2 = vec(0, 0); + + S logp1 = 0; + S logp2 = 0; + + Vec inv_mass = vec(1, 1); + + nuts::SpanW span1bk(std::move(thetabk1), std::move(rhobk1), + std::move(gradbk1), logpbk1); + nuts::SpanW span1fw(std::move(thetafw1), std::move(rhofw1), + std::move(gradfw1), logpfw1); + nuts::SpanW span2bk(std::move(thetabk2), std::move(rhobk2), + std::move(gradbk2), logpbk2); + nuts::SpanW span2fw(std::move(thetafw2), std::move(rhofw2), + std::move(gradfw2), logpfw2); + + nuts::SpanW span1(std::move(span1bk), std::move(span1fw), + std::move(theta1), std::move(grad1), logp1); + nuts::SpanW span2(std::move(span2bk), std::move(span2fw), + std::move(theta2), std::move(grad2), logp2); + + // following test fails in the original code with buggy uturn condition + + expect(!nuts::uturn>( + span1, span2, inv_mass)); + }; +}; +} // namespace util_test