Skip to content

Commit

Permalink
-fixing scoping issues in fixtures: previously T.class_setup style se…
Browse files Browse the repository at this point in the history
…tup/teardown should've all

 been @classmethod. py.test doesn't even work without this setting (and it was very very fragile before)
-fixing function scoping issues in general (classmethod/staticmethod where appropriate)
  • Loading branch information
suntzu86 committed Oct 15, 2014
1 parent c38456f commit 1eda073
Show file tree
Hide file tree
Showing 22 changed files with 164 additions and 137 deletions.
3 changes: 1 addition & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@ production:
python setup.py install

test:
py.test -v moe/tests
py.test -v moe_examples/tests
py.test -v moe/tests moe_examples/tests

style-test:
pip install flake8 flake8-import-order pep8-naming flake8-docstrings pyflakes
Expand Down
21 changes: 11 additions & 10 deletions moe/tests/bandit/bandit_interface_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,20 @@
from moe.tests.bandit.bandit_test_case import BanditTestCase


class TestBanditInterface(BanditTestCase):
@pytest.fixture()
def disable_logging(request):
"""Disable logging (for the duration of this test case)."""
logging.disable(logging.CRITICAL)

"""Verify that different historical infos return correct results."""
def finalize():
"""Re-enable logging (so other test cases are unaffected)."""
logging.disable(logging.NOTSET)
request.addfinalizer(finalize)

@pytest.fixture()
def disable_logging(self, request):
"""Disable logging (for the duration of this test case)."""
logging.disable(logging.CRITICAL)

def finalize():
"""Re-enable logging (so other test cases are unaffected)."""
logging.disable(logging.NOTSET)
request.addfinalizer(finalize)
class TestBanditInterface(BanditTestCase):

"""Verify that different historical infos return correct results."""

@pytest.mark.usefixtures("disable_logging")
def test_empty_arm_invalid(self):
Expand Down
21 changes: 11 additions & 10 deletions moe/tests/bandit/data_containers_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,20 @@
from moe.tests.bandit.bandit_test_case import BanditTestCase


class TestDataContainers(BanditTestCase):
@pytest.fixture()
def disable_logging(request):
"""Disable logging (for the duration of this test case)."""
logging.disable(logging.CRITICAL)

"""Tests functions in :class:`moe.bandit.data_containers.SampleArm` and :class:`moe.bandit.data_containers.HistoricalData`."""
def finalize():
"""Re-enable logging (so other test cases are unaffected)."""
logging.disable(logging.NOTSET)
request.addfinalizer(finalize)

@pytest.fixture()
def disable_logging(self, request):
"""Disable logging (for the duration of this test case)."""
logging.disable(logging.CRITICAL)

def finalize():
"""Re-enable logging (so other test cases are unaffected)."""
logging.disable(logging.NOTSET)
request.addfinalizer(finalize)
class TestDataContainers(BanditTestCase):

"""Tests functions in :class:`moe.bandit.data_containers.SampleArm` and :class:`moe.bandit.data_containers.HistoricalData`."""

def test_sample_arm_str(self):
"""Test SampleArm's __str__ overload operator."""
Expand Down
21 changes: 11 additions & 10 deletions moe/tests/bandit/epsilon/epsilon_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,20 @@
from moe.tests.bandit.epsilon.epsilon_test_case import EpsilonTestCase


class TestEpsilon(EpsilonTestCase):
@pytest.fixture()
def disable_logging(request):
"""Disable logging (for the duration of this test case)."""
logging.disable(logging.CRITICAL)

"""Verify that different sample_arms return correct results."""
def finalize():
"""Re-enable logging (so other test cases are unaffected)."""
logging.disable(logging.NOTSET)
request.addfinalizer(finalize)

@pytest.fixture()
def disable_logging(self, request):
"""Disable logging (for the duration of this test case)."""
logging.disable(logging.CRITICAL)

def finalize():
"""Re-enable logging (so other test cases are unaffected)."""
logging.disable(logging.NOTSET)
request.addfinalizer(finalize)
class TestEpsilon(EpsilonTestCase):

"""Verify that different sample_arms return correct results."""

@pytest.mark.usefixtures("disable_logging")
def test_empty_arm_invalid(self):
Expand Down
21 changes: 11 additions & 10 deletions moe/tests/bandit/utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,20 @@
from moe.tests.bandit.bandit_test_case import BanditTestCase


class TestUtils(BanditTestCase):
@pytest.fixture()
def disable_logging(request):
"""Disable logging (for the duration of this test case)."""
logging.disable(logging.CRITICAL)

"""Tests :func:`moe.bandit.utils.get_winning_arm_names_from_payoff_arm_name_list` and :func:`moe.bandit.utils.get_equal_arm_allocations`."""
def finalize():
"""Re-enable logging (so other test cases are unaffected)."""
logging.disable(logging.NOTSET)
request.addfinalizer(finalize)

@pytest.fixture()
def disable_logging(self, request):
"""Disable logging (for the duration of this test case)."""
logging.disable(logging.CRITICAL)

def finalize():
"""Re-enable logging (so other test cases are unaffected)."""
logging.disable(logging.NOTSET)
request.addfinalizer(finalize)
class TestUtils(BanditTestCase):

"""Tests :func:`moe.bandit.utils.get_winning_arm_names_from_payoff_arm_name_list` and :func:`moe.bandit.utils.get_equal_arm_allocations`."""

@pytest.mark.usefixtures("disable_logging")
def test_get_winning_arm_names_from_payoff_arm_name_list_empty_list_invalid(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,17 @@ class TestExpectedImprovement(GaussianProcessTestCase):

num_sampled_list = (1, 2, 5, 10, 16, 20, 42, 50)

@pytest.fixture(autouse=True)
def base_setup(self):
@classmethod
@pytest.fixture(autouse=True, scope='class')
def base_setup(cls):
"""Run the standard setup but seed the RNG first (for repeatability).
It is easy to stumble into test cases where EI is very small (e.g., < 1.e-20),
which makes it difficult to set meaningful tolerances for the checks.
"""
numpy.random.seed(8794)
super(TestExpectedImprovement, self).base_setup()
super(TestExpectedImprovement, cls).base_setup()

def test_python_and_cpp_return_same_1d_analytic_ei_and_gradient(self):
"""Compare the 1D analytic EI/grad EI results from Python & C++, checking several random points per test case."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,17 @@ class TestGaussianProcess(GaussianProcessTestCase):

precompute_gaussian_process_data = True

@pytest.fixture(autouse=True)
def base_setup(self):
@classmethod
@pytest.fixture(autouse=True, scope='class')
def base_setup(cls):
"""Run the standard setup but seed the RNG first (for repeatability).
It is easy to stumble into test cases where mean, var terms are very small (e.g., < 1.e-20),
which makes it difficult to set meaningful tolerances for the checks.
"""
numpy.random.seed(8794)
super(TestGaussianProcess, self).base_setup()
super(TestGaussianProcess, cls).base_setup()

def test_sample_point_from_gp(self):
"""Test that sampling points from the GP works."""
Expand Down
30 changes: 16 additions & 14 deletions moe/tests/optimal_learning/python/gaussian_process_test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,29 +147,31 @@ class GaussianProcessTestCase(OptimalLearningTestCase):
num_sampled_list = (1, 2, 3, 5, 10, 16, 20, 42)
num_to_sample_list = (1, 2, 3, 8)

@pytest.fixture(autouse=True)
def base_setup(self):
"""Build a Gaussian Process prior for each problem size in ``self.num_sampled_list`` if precomputation is desired.
@classmethod
@pytest.fixture(autouse=True, scope='class')
def base_setup(cls):
"""Build a Gaussian Process prior for each problem size in ``cls.num_sampled_list`` if precomputation is desired.
**Requires**
* self.num_sampled_list: (*list of int*) problem sizes to consider
* self.gp_test_environment_input: (*GaussianProcessTestEnvironmentInput*) specification of how to build the
* cls.num_sampled_list: (*list of int*) problem sizes to consider
* cls.gp_test_environment_input: (*GaussianProcessTestEnvironmentInput*) specification of how to build the
gaussian process prior
**Outputs**
* self.gp_test_environments: (*list of GaussianProcessTestEnvironment*) gaussian process data for each of the
specified problem sizes (``self.num_sampled_list``)
* cls.gp_test_environments: (*list of GaussianProcessTestEnvironment*) gaussian process data for each of the
specified problem sizes (``cls.num_sampled_list``)
"""
if self.precompute_gaussian_process_data:
self.gp_test_environments = []
for num_sampled in self.num_sampled_list:
self.gp_test_environment_input.num_sampled = num_sampled
self.gp_test_environments.append(self._build_gaussian_process_test_data(self.gp_test_environment_input))

def _build_gaussian_process_test_data(self, test_environment):
if cls.precompute_gaussian_process_data:
cls.gp_test_environments = []
for num_sampled in cls.num_sampled_list:
cls.gp_test_environment_input.num_sampled = num_sampled
cls.gp_test_environments.append(cls._build_gaussian_process_test_data(cls.gp_test_environment_input))

@staticmethod
def _build_gaussian_process_test_data(test_environment):
"""Build up a Gaussian Process randomly by repeatedly drawing from and then adding to the prior.
:param test_environment: parameters describing how to construct a GP prior
Expand Down
30 changes: 16 additions & 14 deletions moe/tests/optimal_learning/python/geometry_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,9 @@ class TestLatinHypercubeRandomPointGeneration(OptimalLearningTestCase):
"""

@pytest.fixture(autouse=True)
def base_setup(self):
@classmethod
@pytest.fixture(autouse=True, scope='class')
def base_setup(cls):
"""Set up parameters for test cases."""
domain_bounds_to_test = [
ClosedInterval.build_closed_intervals_from_list([[-1.0, 1.0]]),
Expand All @@ -44,8 +45,8 @@ def base_setup(self):
ClosedInterval.build_closed_intervals_from_list([[-7000.0, 10000.0], [-8000.0, -7999.0], [10000.06, 10000.0601]]),
]

self.domains_to_test = [TensorProductDomain(domain_bounds) for domain_bounds in domain_bounds_to_test]
self.num_points_to_test = (1, 2, 5, 10, 20)
cls.domains_to_test = [TensorProductDomain(domain_bounds) for domain_bounds in domain_bounds_to_test]
cls.num_points_to_test = (1, 2, 5, 10, 20)

def test_latin_hypercube_within_domain(self):
"""Test that generate_latin_hypercube_points returns points within the domain."""
Expand Down Expand Up @@ -131,23 +132,24 @@ class TestClosedInterval(OptimalLearningTestCase):

"""Tests for ClosedInterval's member functions."""

@pytest.fixture(autouse=True)
def base_setup(self):
@classmethod
@pytest.fixture(autouse=True, scope='class')
def base_setup(cls):
"""Set up test cases (described inline)."""
self.test_cases = [
cls.test_cases = [
ClosedInterval(9.378, 9.378), # min == max
ClosedInterval(-2.71, 3.14), # min < max
ClosedInterval(-2.71, -3.14), # min > max
ClosedInterval(0.0, numpy.inf), # infinte range
]

self.points_to_check = numpy.empty((len(self.test_cases), 5))
for i, case in enumerate(self.test_cases):
self.points_to_check[i, 0] = (case.min + case.max) * 0.5 # midpoint
self.points_to_check[i, 1] = case.min # left boundary
self.points_to_check[i, 2] = case.max # right boundary
self.points_to_check[i, 3] = case.min - 0.5 # outside on the left
self.points_to_check[i, 4] = case.max + 0.5 # outside on the right
cls.points_to_check = numpy.empty((len(cls.test_cases), 5))
for i, case in enumerate(cls.test_cases):
cls.points_to_check[i, 0] = (case.min + case.max) * 0.5 # midpoint
cls.points_to_check[i, 1] = case.min # left boundary
cls.points_to_check[i, 2] = case.max # right boundary
cls.points_to_check[i, 3] = case.min - 0.5 # outside on the left
cls.points_to_check[i, 4] = case.max + 0.5 # outside on the right

def test_length(self):
"""Check that length works."""
Expand Down
16 changes: 10 additions & 6 deletions moe/tests/optimal_learning/python/optimal_learning_test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ class OptimalLearningTestCase(object):
"""

def assert_scalar_within_absolute(self, value, truth, tol):
@staticmethod
def assert_scalar_within_absolute(value, truth, tol):
"""Check whether a scalar ``value`` is equal to ``truth``: ``|value - truth| <= tol``.
:param value: scalar to check
Expand All @@ -33,7 +34,8 @@ def assert_scalar_within_absolute(self, value, truth, tol):
diff = numpy.fabs(value - truth)
assert diff <= tol, 'value = {0:.18E}, truth = {1:.18E}, diff = {2:.18E}, tol = {3:.18E}'.format(value, truth, diff, tol)

def assert_scalar_within_relative(self, value, truth, tol):
@staticmethod
def assert_scalar_within_relative(value, truth, tol):
"""Check whether a scalar ``value`` is relatively equal to ``truth``: ``|value - truth|/|truth| <= tol``.
:param value: scalar to check
Expand All @@ -52,7 +54,8 @@ def assert_scalar_within_relative(self, value, truth, tol):
diff = numpy.fabs((value - truth) / denom)
assert diff <= tol, 'value = {0:.18E}, truth = {1:.18E}, diff = {2:.18E}, tol = {3:.18E}'.format(value, truth, diff, tol)

def assert_vector_within_relative(self, value, truth, tol):
@staticmethod
def assert_vector_within_relative(value, truth, tol):
"""Check whether a vector is element-wise relatively equal to ``truth``: ``|value[i] - truth[i]|/|truth[i]| <= tol``.
:param value: scalar to check
Expand All @@ -67,9 +70,10 @@ def assert_vector_within_relative(self, value, truth, tol):
__tracebackhide__ = True
assert value.shape == truth.shape, 'value.shape = {0} != truth.shape = {1}'.format(value.shape, truth.shape)
for index in numpy.ndindex(value.shape):
self.assert_scalar_within_relative(value[index], truth[index], tol)
OptimalLearningTestCase.assert_scalar_within_relative(value[index], truth[index], tol)

def assert_points_distinct(self, point_list, tol):
@staticmethod
def assert_points_distinct(point_list, tol):
"""Check whether the distance between every pair of points is larger than tolerance.
:param point_list: points to check
Expand All @@ -84,4 +88,4 @@ def assert_points_distinct(self, point_list, tol):
for j in xrange(i + 1, point_list.shape[0]):
temp = point_list[i, ...] - point_list[j, ...]
dist = numpy.linalg.norm(temp)
self.assert_scalar_within_relative(dist, 0.0, tol)
OptimalLearningTestCase.assert_scalar_within_relative(dist, 0.0, tol)
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,14 @@ class TestSquareExponential(OptimalLearningTestCase):
"""

@pytest.fixture(autouse=True)
def base_setup(self):
@classmethod
@pytest.fixture(autouse=True, scope='class')
def base_setup(cls):
"""Set up parameters for test cases."""
self.epsilon = 2.0 * numpy.finfo(numpy.float64).eps
self.CovarianceClass = SquareExponential
cls.epsilon = 2.0 * numpy.finfo(numpy.float64).eps
cls.CovarianceClass = SquareExponential

self.one_dim_test_sets = numpy.array([
cls.one_dim_test_sets = numpy.array([
[1.0, 0.1],
[2.0, 0.1],
[1.0, 1.0],
Expand All @@ -43,7 +44,7 @@ def base_setup(self):
[0.1, 10.0],
])

self.three_dim_test_sets = numpy.array([
cls.three_dim_test_sets = numpy.array([
[1.0, 0.1, 0.1, 0.1],
[1.0, 0.1, 0.2, 0.1],
[1.0, 0.1, 0.2, 0.3],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,16 +68,17 @@ class TestExpectedImprovement(GaussianProcessTestCase):
epsilon,
)

@pytest.fixture(autouse=True)
def base_setup(self):
@classmethod
@pytest.fixture(autouse=True, scope='class')
def base_setup(cls):
"""Run the standard setup but seed the RNG first (for repeatability).
It is easy to stumble into test cases where EI is very small (e.g., < 1.e-20),
which makes it difficult to set meaningful tolerances for the checks.
"""
numpy.random.seed(7859)
super(TestExpectedImprovement, self).base_setup()
super(TestExpectedImprovement, cls).base_setup()

def test_expected_improvement_and_gradient(self):
"""Test EI by comparing the vectorized and "naive" versions.
Expand Down Expand Up @@ -154,7 +155,8 @@ def test_expected_improvement_and_gradient(self):
# Restore state
numpy.random.set_state(rng_state)

def _check_ei_symmetry(self, ei_eval, point_to_sample, shifts):
@classmethod
def _check_ei_symmetry(cls, ei_eval, point_to_sample, shifts):
"""Compute ei at each ``[point_to_sample +/- shift for shift in shifts]`` and check for equality.
:param ei_eval: properly configured ExpectedImprovementEvaluator object
Expand All @@ -176,8 +178,8 @@ def _check_ei_symmetry(self, ei_eval, point_to_sample, shifts):
right_ei = ei_eval.compute_expected_improvement()
right_grad_ei = ei_eval.compute_grad_expected_improvement()

self.assert_scalar_within_relative(left_ei, right_ei, 0.0)
self.assert_vector_within_relative(left_grad_ei, -right_grad_ei, 0.0)
cls.assert_scalar_within_relative(left_ei, right_ei, 0.0)
cls.assert_vector_within_relative(left_grad_ei, -right_grad_ei, 0.0)

def test_1d_analytic_ei_edge_cases(self):
"""Test cases where analytic EI would attempt to compute 0/0 without variance lower bounds."""
Expand Down
Loading

0 comments on commit 1eda073

Please sign in to comment.