diff --git a/Makefile b/Makefile index 908e9ce..40bed8e 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,9 @@ ## install : Install project package locally and install pre-commit. .PHONY : install install : - pip install pip-tools + pip3 install pip-tools pip-compile requirements.in - pip install -r requirements.txt + pip3 install -r requirements.txt ## help : Documentation for make targets. .PHONY : help diff --git a/src/pylogit/choice_calcs.py b/src/pylogit/choice_calcs.py index 5e24be5..1d75ee9 100644 --- a/src/pylogit/choice_calcs.py +++ b/src/pylogit/choice_calcs.py @@ -524,9 +524,8 @@ def quadratic_prod_wrt_dp_ds(left, if weights is None: weights = np.ones(probs.shape[0]) # Convert matrixlib objects to ndarrays - left = left.A if isinstance(left, np.matrixlib.defmatrix.matrix) else left - right =\ - right.A if isinstance(right, np.matrixlib.defmatrix.matrix) else right + left = np.asarray(left) if isinstance(left, np.matrixlib.defmatrix.matrix) else left + right = np.asarray(right) if isinstance(right, np.matrixlib.defmatrix.matrix) else right # Determine properties of left and right left_is_ndarray = isinstance(left, np.ndarray) left_is_sparse, right_is_sparse = issparse(left), issparse(right) @@ -957,8 +956,8 @@ def calc_fisher_info_matrix(beta, ########## if shape_params is not None and intercept_params is not None: if isinstance(dh_dc, np.matrixlib.defmatrix.matrix): - # Note that the '.A' transforms the matrix into a numpy ndarray - gradient_vec = d_ll_dh.T * np.concatenate((dh_dc.A, + # Note that the 'np.asarray()' transforms the matrix into a numpy ndarray + gradient_vec = d_ll_dh.T * np.concatenate((np.asarray(dh_dc), dh_d_alpha.toarray(), dh_db), axis=1) else: @@ -967,8 +966,8 @@ def calc_fisher_info_matrix(beta, dh_db), axis=1) elif shape_params is not None and intercept_params is None: if isinstance(dh_dc, np.matrixlib.defmatrix.matrix): - # Note that the '.A' transforms the matrix into a numpy ndarray - gradient_vec = d_ll_dh.T * np.concatenate((dh_dc.A, dh_db), axis=1) + # Note that the 'np.asarray()' transforms the matrix into a numpy ndarray + gradient_vec = d_ll_dh.T * np.concatenate((np.asarray(dh_dc), dh_db), axis=1) else: gradient_vec = d_ll_dh.T * np.concatenate((dh_dc.toarray(), dh_db), axis=1) diff --git a/src/pylogit/choice_tools.py b/src/pylogit/choice_tools.py index c9653d7..6845756 100644 --- a/src/pylogit/choice_tools.py +++ b/src/pylogit/choice_tools.py @@ -12,7 +12,10 @@ import warnings from collections import OrderedDict -from collections import Iterable +try: + from collections.abc import Iterable +except ImportError: + from collections import Iterable from numbers import Number import numpy as np @@ -971,7 +974,7 @@ def create_long_form_mappings(long_form, if dense: for key in mapping_dict: if mapping_dict[key] is not None: - mapping_dict[key] = mapping_dict[key].A + mapping_dict[key] = mapping_dict[key].toarray() return mapping_dict diff --git a/src/pylogit/mixed_logit.py b/src/pylogit/mixed_logit.py index 41716ac..dc280c2 100755 --- a/src/pylogit/mixed_logit.py +++ b/src/pylogit/mixed_logit.py @@ -662,7 +662,7 @@ def __filter_past_mappings(self, orig_map = past_mappings[key] # Initialize the resultant array that is desired new_map = orig_map.multiply(np.tile(mask_array, - (1, orig_map.shape[1]))).A + (1, orig_map.shape[1]))).toarray() # Perform the desired filtering current_filter = (new_map.sum(axis=1) != 0) if current_filter.shape[0] > 0: diff --git a/src/pylogit/nested_choice_calcs.py b/src/pylogit/nested_choice_calcs.py index c949a38..3513073 100644 --- a/src/pylogit/nested_choice_calcs.py +++ b/src/pylogit/nested_choice_calcs.py @@ -173,12 +173,14 @@ def calc_nested_probs(nest_coefs, long_exp_sums_per_nest = np.asarray(long_exp_sums_per_nest) # Get the relevant log-sum for each row of the long-format data - # Note the .A converts the numpy matrix into a numpy array + # Note the .toarray() converts the numpy matrix into a numpy array # This is sum _{j \in C_m} exp(V_{ij} / \lambda_m) for the nest # belonging to each row - long_exp_sums = (rows_to_nests.multiply(long_exp_sums_per_nest) - .sum(axis=1) - .A).ravel() + long_exp_sums_temp = rows_to_nests.multiply(long_exp_sums_per_nest).sum(axis=1) + if isinstance(long_exp_sums_temp, np.matrixlib.defmatrix.matrix): + long_exp_sums = np.asarray(long_exp_sums_temp).ravel() + else: + long_exp_sums = long_exp_sums_temp.toarray().ravel() # Get the denominators for each individual ind_denom = (np.power(ind_exp_sums_per_nest, @@ -255,7 +257,7 @@ def calc_nested_probs(nest_coefs, zero_idx = (nest_choice_probs == 0) nest_choice_probs[zero_idx] = min_comp_value # Return dictionary. - # Note the ".A" converts the numpy matrix into a numpy array + # Note the ".toarray()" converts the numpy matrix into a numpy array prob_dict["prob_given_nest"] = prob_given_nest prob_dict["nest_choice_probs"] = nest_choice_probs prob_dict["ind_sums_per_nest"] = ind_exp_sums_per_nest @@ -407,10 +409,11 @@ def prep_vectors_for_gradient(nest_coefs, # Create the "long_nest_parameters" which is an array with one element per # alternative per observation, where each element is the nest parameter for # the alternative corresponding to the given row - long_nest_params = (rows_to_nests.multiply(nest_coefs[None, :]) - .sum(axis=1) - .A - .ravel()) + long_nest_params_temp = rows_to_nests.multiply(nest_coefs[None, :]).sum(axis=1) + if isinstance(long_nest_params_temp, np.matrixlib.defmatrix.matrix): + long_nest_params = np.asarray(long_nest_params_temp).ravel() + else: + long_nest_params = long_nest_params_temp.toarray().ravel() # Calculate y-tilde scaled_y = choice_vec / long_nest_params @@ -420,14 +423,19 @@ def prep_vectors_for_gradient(nest_coefs, # Determine which nest was chosen by each row's individual. # Resulting matrix has shape (num_rows, num_nests) - obs_to_chosen_nests = (rows_to_obs.T * - rows_to_nests.multiply(choice_vec[:, None])).A + obs_to_chosen_nests_temp = (rows_to_obs.T * + rows_to_nests.multiply(choice_vec[:, None])) + if isinstance(obs_to_chosen_nests_temp, np.matrixlib.defmatrix.matrix): + obs_to_chosen_nests = np.asarray(obs_to_chosen_nests_temp) + else: + obs_to_chosen_nests = obs_to_chosen_nests_temp.toarray() row_to_chosen_nest = rows_to_obs * obs_to_chosen_nests # Determine whether the given row is part of the nest that was chosen - long_chosen_nest = (rows_to_nests.multiply(row_to_chosen_nest) - .sum(axis=1) - .A - .ravel()) + long_chosen_nest_temp = rows_to_nests.multiply(row_to_chosen_nest).sum(axis=1) + if isinstance(long_chosen_nest_temp, np.matrixlib.defmatrix.matrix): + long_chosen_nest = np.asarray(long_chosen_nest_temp).ravel() + else: + long_chosen_nest = long_chosen_nest_temp.toarray().ravel() # Get the various probabilities prob_dict = calc_nested_probs(nest_coefs, @@ -566,7 +574,10 @@ def calc_nested_gradient(orig_nest_coefs, # Calculate the weights for the sample if weights is None: weights = np.ones(design.shape[0]) - weights_per_obs = np.max(rows_to_obs.toarray() * weights[:, None], axis=0) + if isinstance(rows_to_obs, np.matrixlib.defmatrix.matrix): + weights_per_obs = np.max(np.asarray(rows_to_obs) * weights[:, None], axis=0) + else: + weights_per_obs = np.max(rows_to_obs.toarray() * weights[:, None], axis=0) # Transform the nest coefficients into their "always positive" versions nest_coefs = naturalize_nest_coefs(orig_nest_coefs) @@ -743,7 +754,10 @@ def calc_bhhh_hessian_approximation(orig_nest_coefs, # Calculate the weights for the sample if weights is None: weights = np.ones(design.shape[0]) - weights_per_obs = np.max(rows_to_obs.toarray() * weights[:, None], axis=0) + if isinstance(rows_to_obs, np.matrixlib.defmatrix.matrix): + weights_per_obs = np.max(np.asarray(rows_to_obs) * weights[:, None], axis=0) + else: + weights_per_obs = np.max(rows_to_obs.toarray() * weights[:, None], axis=0) # Transform the nest coefficients into their "always positive" versions nest_coefs = naturalize_nest_coefs(orig_nest_coefs) @@ -790,7 +804,11 @@ def calc_bhhh_hessian_approximation(orig_nest_coefs, spread_half_deriv = rows_to_nests.multiply(half_deriv) # Aggregate the spread out half-derivatives to the individual level # This object should have shape (num_obs, num_nests) - nest_gradient_term_2 = rows_to_obs.transpose().dot(spread_half_deriv).A + nest_gradient_term_2_temp = rows_to_obs.transpose().dot(spread_half_deriv) + if isinstance(nest_gradient_term_2_temp, np.matrixlib.defmatrix.matrix): + nest_gradient_term_2 = np.asarray(nest_gradient_term_2_temp) + else: + nest_gradient_term_2 = nest_gradient_term_2_temp.toarray() # Calculate the third term of the derivative of the log-likelihood # with respect to the nest parameters @@ -810,7 +828,11 @@ def calc_bhhh_hessian_approximation(orig_nest_coefs, # Get the nest-wide version of this piece of the gradient spread_out_term_3b = rows_to_nests.multiply(nest_gradient_term_3b[:, None]) - nest_gradient_term_3 = rows_to_obs.transpose().dot(spread_out_term_3b).A + nest_gradient_term_3_temp = rows_to_obs.transpose().dot(spread_out_term_3b) + if isinstance(nest_gradient_term_3_temp, np.matrixlib.defmatrix.matrix): + nest_gradient_term_3 = np.asarray(nest_gradient_term_3_temp) + else: + nest_gradient_term_3 = nest_gradient_term_3_temp.toarray() # Combine the terms. Note the "nest_coefs * (1 - nest_coefs)" is due to the # fact that we're estimating the logit of the nest coefficients instead of diff --git a/tests/test_asym_logit.py b/tests/test_asym_logit.py index ce31985..05b8c6b 100644 --- a/tests/test_asym_logit.py +++ b/tests/test_asym_logit.py @@ -321,7 +321,7 @@ def test_keyword_argument_constructor_in_fit_mle(self): # create the init_vals object since the ridge error check is after # the creation of this argurment. for kwargs in [kwargs_1, kwargs_2]: - self.assertRaisesRegexp(TypeError, + self.assertRaisesRegex(TypeError, "ridge", self.model_obj.fit_mle, *fit_args, @@ -343,7 +343,7 @@ def test_init_vals_length_error_in_fit_mle(self): # Test to ensure that the ValueError when using an # init_intercepts kwarg with an incorrect number of parameters - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "dimension", self.model_obj.fit_mle, np.arange(num_coefs), @@ -786,7 +786,7 @@ def test_asym_transform_deriv_v(self): self.assertIsInstance(derivative, type(output)) self.assertEqual(len(derivative.shape), 2) self.assertEqual(derivative.shape, (num_rows, num_rows)) - npt.assert_allclose(np.diag(derivative.A), results) + npt.assert_allclose(np.diag(derivative.toarray()), results) return None @@ -811,7 +811,7 @@ def test_asym_transform_deriv_alpha(self): if test_output is None: self.assertIsNone(derivative_results) else: - npt.assert_allclose(test_output.A, derivative_results.A) + npt.assert_allclose(test_output.toarray(), derivative_results.toarray()) return None @@ -957,12 +957,15 @@ def test_asym_transform_deriv_shape(self): interim_results = self.fake_rows_to_alts.multiply(interim_results) # Calculate the correct results - results = interim_results.A.dot(dc_d_eta) + results = interim_results.toarray().dot(dc_d_eta) # Ensure the results are as expected self.assertIsInstance(derivative, type(output)) self.assertEqual(len(derivative.shape), 2) self.assertEqual(derivative.shape, (num_rows, num_alts - 1)) - npt.assert_allclose(derivative.A, results) + if isinstance(derivative, np.matrixlib.defmatrix.matrix): + npt.assert_allclose(np.asarray(derivative), results) + else: + npt.assert_allclose(derivative.toarray(), results) return None diff --git a/tests/test_base_multinomial_cm.py b/tests/test_base_multinomial_cm.py index a4d4917..f5a0566 100644 --- a/tests/test_base_multinomial_cm.py +++ b/tests/test_base_multinomial_cm.py @@ -209,7 +209,7 @@ def test_numeric_validity_check_for_specification_cols(self): """ # Create a variety of "bad" columns for 'x' bad_exogs = [np.array(['foo', 'bar', 'gerbil', 'sat', 'sun']), - np.array([1, 2, 3, np.NaN, 1]), + np.array([1, 2, 3, np.nan, 1]), np.array([1, 2, np.inf, 0.5, 0.9]), np.array([1, 2, -np.inf, 0.5, 0.9]), np.array([1, 'foo', -np.inf, 0.5, 0.9])] @@ -461,13 +461,13 @@ def test_ensure_all_mixing_vars_are_in_the_name_dict(self): bad_mixing_vars, name_dict, independent_variable_names) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, msg_with_name_dict, func, bad_mixing_vars, name_dict, independent_variable_names) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, msg_without_name_dict, func, bad_mixing_vars, @@ -496,7 +496,7 @@ def test_ensure_all_alternatives_are_chosen(self): # Perform the requisite tests self.assertIsNone(func("alt_id", "choice", good_df)) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "The following alternative ID's were not" " chosen in any choice situation:", func, @@ -688,7 +688,7 @@ def test_check_for_choice_col_based_on_return_long_probs(self): for arg_set in good_args: self.assertIsNone(func(*arg_set)) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, msg, func, *bad_args) @@ -937,7 +937,7 @@ def test_check_result_dict_for_needed_keys(self): # Delete the needed key from the dictionary del base_dict[key] # Make sure that we get a value error when testing the function - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "keys are missing", func, base_dict) @@ -979,7 +979,7 @@ def test_create_results_summary(self): for attr in needed_attributes: delattr(self.model_obj, attr) # Make sure that we get a value error when testing the function - self.assertRaisesRegexp(NotImplementedError, + self.assertRaisesRegex(NotImplementedError, msg, func) # Set the attribute back @@ -1029,7 +1029,7 @@ def test_record_values_for_fit_summary_and_statsmodels(self): msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" error_msg = msg + msg_2 - self.assertRaisesRegexp(NotImplementedError, + self.assertRaisesRegex(NotImplementedError, error_msg, func) # Put the attribute back. @@ -1094,7 +1094,7 @@ def test_create_fit_summary(self): msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" error_msg = msg + msg_2 - self.assertRaisesRegexp(NotImplementedError, + self.assertRaisesRegex(NotImplementedError, error_msg, func) # Put the attribute back. diff --git a/tests/test_bootstrap_abc.py b/tests/test_bootstrap_abc.py index 21ac35f..a7b11d8 100644 --- a/tests/test_bootstrap_abc.py +++ b/tests/test_bootstrap_abc.py @@ -2,7 +2,10 @@ Tests for the bootstrap_abc.py file. """ import unittest -from collections import Iterable +try: + from collections.abc import Iterable +except ImportError: + from collections import Iterable import numpy as np import numpy.testing as npt @@ -33,7 +36,7 @@ def nonsense_func(self): # Perform the desired tests self.assertIsNone(func(good_obj)) - self.assertRaisesRegexp(ValueError, err_msg, func, bad_obj) + self.assertRaisesRegex(ValueError, err_msg, func, bad_obj) return None def test_ensure_rows_to_obs_validity(self): @@ -49,7 +52,7 @@ def test_ensure_rows_to_obs_validity(self): for good_obj in good_objects: self.assertIsNone(func(good_obj)) for bad_obj in bad_objects: - self.assertRaisesRegexp(ValueError, err_msg, func, bad_obj) + self.assertRaisesRegex(ValueError, err_msg, func, bad_obj) return None def test_ensure_wide_weights_is_1D_or_2D_ndarray(self): @@ -68,7 +71,7 @@ def test_ensure_wide_weights_is_1D_or_2D_ndarray(self): self.assertIsNone(func(good_obj)) for pos, bad_obj in enumerate(bad_objects): err_msg = err_msgs[pos] - self.assertRaisesRegexp(ValueError, err_msg, func, bad_obj) + self.assertRaisesRegex(ValueError, err_msg, func, bad_obj) return None diff --git a/tests/test_bootstrap_controller.py b/tests/test_bootstrap_controller.py index 1a4c721..0a3caa8 100644 --- a/tests/test_bootstrap_controller.py +++ b/tests/test_bootstrap_controller.py @@ -287,7 +287,7 @@ def test_ensure_replicates_kwarg_validity(self): for good_arg in good_args: self.assertIsNone(func(good_arg)) for bad_arg in bad_args: - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, expected_error_msg, func, bad_arg) @@ -715,7 +715,7 @@ def test_interval_type_error_in_calc_conf_intervals(self): "interval_type MUST be in `\['pi', 'bca', 'abc', 'all'\]`" # Ensure that the appropriate errors are raised. - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, expected_error_msg, func, self.conf_percentage, diff --git a/tests/test_bootstrap_mle.py b/tests/test_bootstrap_mle.py index c26bcb5..17844b8 100644 --- a/tests/test_bootstrap_mle.py +++ b/tests/test_bootstrap_mle.py @@ -38,7 +38,7 @@ def __init__(self, fake_model_type): # Perform a test that should fail. Ensure the correct error is raised. current_obj = FakeModel(bad_type) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, err_msg, func, current_obj) diff --git a/tests/test_bootstrap_sampler.py b/tests/test_bootstrap_sampler.py index 0e0da46..a4a36ac 100644 --- a/tests/test_bootstrap_sampler.py +++ b/tests/test_bootstrap_sampler.py @@ -220,7 +220,7 @@ def test_check_column_existence(self): "in `df.columns`.") self.assertIsNone(func(col, fake_df, **current_good_kwargs)) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, pattern, func, col, @@ -244,7 +244,7 @@ def test_ensure_resampled_obs_ids_in_df(self): # Perform the desired tests self.assertIsNone(func(good_resampled_obs_ids, fake_orig_obs_ids)) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, expected_err_msg, func, bad_resampled_obs_ids, diff --git a/tests/test_bootstrap_utils.py b/tests/test_bootstrap_utils.py index b14f103..64412bd 100644 --- a/tests/test_bootstrap_utils.py +++ b/tests/test_bootstrap_utils.py @@ -23,7 +23,7 @@ def test_check_conf_percentage_validity(self): for arg in good_args: self.assertIsNone(func(arg)) for arg in bad_args: - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, expected_err_msg, func, arg) @@ -46,7 +46,7 @@ def test_ensure_samples_is_ndim_ndarray(self): for arg in good_args: self.assertIsNone(func(arg, name=fake_name)) for arg in bad_args: - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, expected_err_msg, func, arg, diff --git a/tests/test_choice_calcs.py b/tests/test_choice_calcs.py index accfb36..6554544 100644 --- a/tests/test_choice_calcs.py +++ b/tests/test_choice_calcs.py @@ -268,7 +268,7 @@ def test_array_size_error_in_calc_probabilities(self): msg_2 = " 2D coefficient array." msg = msg_1 + msg_2 - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, msg, func, *args) @@ -297,7 +297,7 @@ def test_return_argument_error_in_calc_probabilities(self): # Note the error message that should be shown. msg = "chosen_row_to_obs is None AND return_long_probs is False" - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, msg, func, *args) @@ -669,7 +669,7 @@ def transform_deriv_shapes(*args): # and finally to (Y- P)'[dh_d_intercept | dh_dv * X] error_vec = (self.choice_array - probs)[None, :] dh_d_beta = dh_dv.dot(self.fake_design) - dh_d_theta = np.concatenate((dh_d_intercept.A, dh_d_beta), axis=1) + dh_d_theta = np.concatenate((dh_d_intercept.toarray(), dh_d_beta), axis=1) expected_gradient = error_vec.dot(dh_d_theta).ravel() # Alias the function being tested @@ -752,7 +752,7 @@ def transform_deriv_shapes(*args): # and finally to (Y- P)'[dh_d_shape | dh_dv * X] error_vec = (self.choice_array - probs)[None, :] dh_d_beta = dh_dv.dot(self.fake_design) - dh_d_theta = np.concatenate((dh_d_shape.A, dh_d_beta), axis=1) + dh_d_theta = np.concatenate((dh_d_shape.toarray(), dh_d_beta), axis=1) expected_gradient = error_vec.dot(dh_d_theta).ravel() # Alias the function being tested @@ -847,8 +847,8 @@ def transform_deriv_shapes(*args): # and finally to (Y- P)'[dh_d_shape | dh_d_intercept | dh_dv * X] error_vec = (self.choice_array - probs)[None, :] dh_d_beta = dh_dv.dot(self.fake_design) - dh_d_theta = np.concatenate((dh_d_shape.A, - dh_d_intercept.A, + dh_d_theta = np.concatenate((dh_d_shape.toarray(), + dh_d_intercept.toarray(), dh_d_beta), axis=1) expected_gradient = error_vec.dot(dh_d_theta).ravel() @@ -1430,16 +1430,16 @@ def transform_deriv_shapes(sys_utilities, # h_33 is -X^T * dP_dH * X. This is the hessian in the standard MNL h_33 = np.asarray(-1 * design.T.dot(dP_dH.dot(design))) # h_32 is -X^T * dH_dV^T * dP_dH * dH_d_intercept - h_32 = np.asarray(-1 * design.T.dot(dP_dH.dot(dh_d_intercept.A))) + h_32 = np.asarray(-1 * design.T.dot(dP_dH.dot(dh_d_intercept.toarray()))) # h_31 is -X^T * dH_dV^T * dP_dH * dH_d_shape - h_31 = np.asarray(-1 * design.T.dot(dP_dH.dot(dh_d_shape.A))) + h_31 = np.asarray(-1 * design.T.dot(dP_dH.dot(dh_d_shape.toarray()))) # h_21 = -dH_d_intercept^T * dP_dH * dH_d_shape - h_21 = np.asarray(-1 * dh_d_intercept.T.dot(dP_dH.dot(dh_d_shape.A))) + h_21 = np.asarray(-1 * dh_d_intercept.T.dot(dP_dH.dot(dh_d_shape.toarray()))) # h_22 = -dH_d_intercept^T * dP_dH * dH_d_intercept h_22 = np.asarray(-1 * - dh_d_intercept.T.dot(dP_dH.dot(dh_d_intercept.A))) + dh_d_intercept.T.dot(dP_dH.dot(dh_d_intercept.toarray()))) # h_11 = -dH_d_shape^T * dP_dH * dH_d_shape - h_11 = np.asarray(-1 * dh_d_shape.T.dot(dP_dH.dot(dh_d_shape.A))) + h_11 = np.asarray(-1 * dh_d_shape.T.dot(dP_dH.dot(dh_d_shape.toarray()))) # Create the final hessian top_row = np.concatenate((h_11, h_21.T, h_31.T), axis=1) @@ -1557,10 +1557,10 @@ def transform_deriv_shapes(sys_utilities, # h_33 is -X^T * dP_dH * X. This is the hessian in the standard MNL h_33 = np.asarray(-1 * design.T.dot(dP_dH.dot(design))) # h_32 is -X^T * dH_dV^T * dP_dH * dH_d_intercept - h_32 = np.asarray(-1 * design.T.dot(dP_dH.dot(dh_d_intercept.A))) + h_32 = np.asarray(-1 * design.T.dot(dP_dH.dot(dh_d_intercept.toarray()))) # h_22 = -dH_d_intercept^T * dP_dH * dH_d_intercept h_22 = np.asarray(-1 * - dh_d_intercept.T.dot(dP_dH.dot(dh_d_intercept.A))) + dh_d_intercept.T.dot(dP_dH.dot(dh_d_intercept.toarray()))) # Create the final hessian middle_row = np.concatenate((h_22, h_32.T), axis=1) @@ -1670,9 +1670,9 @@ def transform_deriv_shapes(sys_utilities, # h_33 is -X^T * dP_dH * X. This is the hessian in the standard MNL h_33 = np.asarray(-1 * design.T.dot(dP_dH.dot(design))) # h_31 is -X^T * dH_dV^T * dP_dH * dH_d_shape - h_31 = np.asarray(-1 * design.T.dot(dP_dH.dot(dh_d_shape.A))) + h_31 = np.asarray(-1 * design.T.dot(dP_dH.dot(dh_d_shape.toarray()))) # h_11 = -dH_d_shape^T * dP_dH * dH_d_shape - h_11 = np.asarray(-1 * dh_d_shape.T.dot(dP_dH.dot(dh_d_shape.A))) + h_11 = np.asarray(-1 * dh_d_shape.T.dot(dP_dH.dot(dh_d_shape.toarray()))) # Create the final hessian top_row = np.concatenate((h_11, h_31.T), axis=1) diff --git a/tests/test_choice_tools.py b/tests/test_choice_tools.py index e967ed2..a0524f2 100644 --- a/tests/test_choice_tools.py +++ b/tests/test_choice_tools.py @@ -756,11 +756,11 @@ def test_ensure_contiguity_in_observation_rows(self): # Perform the tests self.assertIsNone(func(good_obs_ids)) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "are not contiguous:", func, bad_obs_ids) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "[2]", func, bad_obs_ids) @@ -782,7 +782,7 @@ def test_ensure_object_is_string(self): # 'bad' objects. self.assertIsNone(func(good_obj, "test_object")) for bad_obj in bad_objects: - self.assertRaisesRegexp(TypeError, + self.assertRaisesRegex(TypeError, "must be a string.", func, bad_obj, @@ -805,7 +805,7 @@ def test_ensure_object_is_ndarray(self): # 'bad' objects. self.assertIsNone(func(good_obj, "test_object")) for bad_obj in bad_objects: - self.assertRaisesRegexp(TypeError, + self.assertRaisesRegex(TypeError, "must be a np.ndarray.", func, bad_obj, @@ -866,9 +866,9 @@ def test_create_sparse_mapping(self): self.assertTrue(isspmatrix_csr(sorted_results)) self.assertTrue(isspmatrix_csr(null_results)) - npt.assert_allclose(orig_order_mapping, orig_order_results.A) - npt.assert_allclose(sorted_mapping, sorted_results.A) - npt.assert_allclose(null_id_mapping, null_results.A) + npt.assert_allclose(orig_order_mapping, orig_order_results.toarray()) + npt.assert_allclose(sorted_mapping, sorted_results.toarray()) + npt.assert_allclose(null_id_mapping, null_results.toarray()) return None diff --git a/tests/test_clog_log.py b/tests/test_clog_log.py index 12aa0da..1ff618d 100644 --- a/tests/test_clog_log.py +++ b/tests/test_clog_log.py @@ -249,7 +249,7 @@ def test_cloglog_transform_deriv_v(self): # derivative should be 1. test_index = np.array([-40, 1, 7]) # Note we use a compressed sparse-row matrix so that we can easily - # convert the output matrix to a numpy array using the '.A' attribute. + # convert the output matrix to a numpy array using the '.toarray()' attribute. test_output = diags(np.ones(test_index.shape[0]), 0, format='csr') @@ -274,7 +274,7 @@ def test_cloglog_transform_deriv_v(self): self.assertIsInstance(derivative, type(test_output)) self.assertEqual(len(derivative.shape), 2) self.assertEqual(derivative.shape, (3, 3)) - npt.assert_allclose(correct_derivatives, derivative.A) + npt.assert_allclose(correct_derivatives, derivative.toarray()) return None @@ -314,7 +314,7 @@ def test_cloglog_transform_deriv_alpha(self): if test_output is None: self.assertIsNone(derivative_results) else: - npt.assert_allclose(test_output.A, derivative_results.A) + npt.assert_allclose(test_output.toarray(), derivative_results.toarray()) return None @@ -495,7 +495,7 @@ def test_keyword_argument_constructor_in_fit_mle(self): # Test to ensure that the raised Value Err is printed when using # either of these two kwargs for kwargs in [kwargs_1, kwargs_2]: - self.assertRaisesRegexp(TypeError, + self.assertRaisesRegex(TypeError, "ridge", self.base_clog.fit_mle, *fit_args, @@ -525,7 +525,7 @@ def test_insufficient_initial_values_in_fit_mle(self): for bad_kwargs in [kwargs, kwargs_2]: # Test to ensure that the ValueError when not passing # kwarg with an incorrect number of parameters - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "must pass init_coefs", self.base_clog.fit_mle, *fit_args, **bad_kwargs) @@ -546,7 +546,7 @@ def test_init_vals_length_error_in_fit_mle(self): # Test to ensure that the ValueError when using an # init_intercepts kwarg with an incorrect number of parameters - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "dimension", self.base_clog.fit_mle, np.arange(num_coefs), diff --git a/tests/test_estimation.py b/tests/test_estimation.py index f09b55b..90dce1e 100644 --- a/tests/test_estimation.py +++ b/tests/test_estimation.py @@ -202,7 +202,7 @@ def test_constructor(self): # Get the mapping matrix as stored on the model object. matrix_on_object = getattr(estimation_object, attr) if matrix_on_object is not None: - npt.assert_allclose(matrix_on_object.A, mapping_dict[attr].A) + npt.assert_allclose(matrix_on_object.toarray(), mapping_dict[attr].toarray()) else: self.assertIsNone(mapping_dict[attr]) @@ -252,7 +252,7 @@ def test_not_implemented_error_in_example_functions(self): for method_name in example_methods: func = getattr(estimation_object, method_name) error_msg = "Method should be defined by descendant classes" - self.assertRaisesRegexp(NotImplementedError, + self.assertRaisesRegex(NotImplementedError, error_msg, func, None) @@ -277,7 +277,7 @@ def test_ensure_positivity_and_length_of_weights(self): for weights in good_weights: self.assertIsNone(func(weights, fake_data)) for pos, weights in enumerate(bad_weights): - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, expected_error_msgs[pos], func, weights, diff --git a/tests/test_mixed_logit.py b/tests/test_mixed_logit.py index d170a80..c688cc7 100755 --- a/tests/test_mixed_logit.py +++ b/tests/test_mixed_logit.py @@ -313,7 +313,7 @@ def test_create_expanded_design_for_mixing(self): # Ensre that a ValueError is raised if we execute # mlc.create_expanded_design_for_mixing with the wrong arguments. args[2] = [2, 3, 4] - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "mixing_pos", mlc.create_expanded_design_for_mixing, *args) @@ -331,13 +331,13 @@ def test_check_length_of_initial_values(self): for i in [-1, 1]: init_vals = np.ones(self.fake_design_3d.shape[2] + i) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "wrong dimension", func, self.fake_design_3d, init_vals) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "wrong dimension", func_2, init_vals) @@ -442,7 +442,7 @@ def test_calc_choice_sequence_probs(self): # Ensure that the approrpriate error is raised if we execute # calc_choice_sequence_probs() with incorrect arguments. args[-1] = "foo" - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "return_type", mlc.calc_choice_sequence_probs, *args) @@ -895,7 +895,7 @@ def test_panel_predict(self): del new_predictive_df[col] # Ensure the panel_predict function raises an error - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "not in data.columns", self.mixl_obj.panel_predict, new_predictive_df, @@ -916,7 +916,7 @@ def test_value_error_in_panel_predict_for_incorrect_args(self): args = [None, 20] msg = "choice_col is None AND return_long_probs == False" - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, msg, func, *args, diff --git a/tests/test_nested_choice_calcs.py b/tests/test_nested_choice_calcs.py index 4b636db..b381949 100644 --- a/tests/test_nested_choice_calcs.py +++ b/tests/test_nested_choice_calcs.py @@ -141,7 +141,7 @@ def test_2d_error_in_calc_nested_probs(self): args[pos] = array_2d # Ensure that the appropriate error is raised. - self.assertRaisesRegexp(NotImplementedError, + self.assertRaisesRegex(NotImplementedError, msg, func, *args) @@ -174,7 +174,7 @@ def test_return_type_error_in_calc_nested_probs(self): kwargs = {"return_type": "long_probs"} for return_string in bad_return_types: kwargs["return_type"] = return_string - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, msg, func, *args, @@ -206,7 +206,7 @@ def test_return_type_mismatch_error_in_calc_nested_probs(self): "chosen_row_to_obs": None} for return_string in bad_return_types: kwargs["return_type"] = return_string - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, msg, func, *args, diff --git a/tests/test_nested_logit.py b/tests/test_nested_logit.py index 75810ed..6dd926a 100644 --- a/tests/test_nested_logit.py +++ b/tests/test_nested_logit.py @@ -276,7 +276,7 @@ def test_invalid_init_vals_length_in_estimate(self): for i in [-1, 1]: init_values = np.arange(self.fake_all_params.shape[0] + i) - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "values are of the wrong dimension", func, init_values) diff --git a/tests/test_pylogit.py b/tests/test_pylogit.py index 2bf3f71..0fa47a0 100644 --- a/tests/test_pylogit.py +++ b/tests/test_pylogit.py @@ -281,10 +281,10 @@ def test_ensure_valid_model_type(self): for good_example in test_types: self.assertIsNone(func(good_example, test_types)) for bad_example in bad_types: - self.assertRaisesRegexp(ValueError, - partial_error_msg, - func, - bad_example, - test_types) + self.assertRaisesRegex(ValueError, + partial_error_msg, + func, + bad_example, + test_types) return None diff --git a/tests/test_scobit.py b/tests/test_scobit.py index 9f523c7..0d834df 100644 --- a/tests/test_scobit.py +++ b/tests/test_scobit.py @@ -314,7 +314,7 @@ def test_keyword_argument_constructor_in_fit_mle(self): # Test to ensure that the raised Value Err is printed when using # either of these two kwargs for kwargs in [kwargs_1, kwargs_2]: - self.assertRaisesRegexp(TypeError, + self.assertRaisesRegex(TypeError, "ridge", self.model_obj.fit_mle, *fit_args, @@ -336,7 +336,7 @@ def test_init_vals_length_error_in_fit_mle(self): # Test to ensure that the ValueError when using an # init_intercepts kwarg with an incorrect number of parameters - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "dimension", self.model_obj.fit_mle, np.arange(num_coefs), @@ -606,7 +606,7 @@ def test_scobit_transform_deriv_v(self): # be 1. test_index = np.array([-2, 0, 2, -800, 300]) # Note we use a compressed sparse-row matrix so that we can easily - # convert the output matrix to a numpy array using the '.A' attribute. + # convert the output matrix to a numpy array using the '.toarray()' attribute. num_rows = test_index.shape[0] test_output = diags(np.ones(num_rows), 0, format='csr') @@ -643,7 +643,7 @@ def test_scobit_transform_deriv_v(self): self.assertEqual(len(derivative.shape), 2) self.assertEqual(derivative.shape, (num_rows, num_rows)) npt.assert_allclose(correct_derivatives, - np.diag(derivative.A)) + np.diag(derivative.toarray())) return None @@ -668,7 +668,7 @@ def test_scobit_transform_deriv_alpha(self): if test_output is None: self.assertIsNone(derivative_results) else: - npt.assert_allclose(test_output.A, derivative_results.A) + npt.assert_allclose(test_output.toarray(), derivative_results.toarray()) return None @@ -688,7 +688,7 @@ def test_scobit_transform_deriv_shape(self): # of the natural log of 1 + exp(-index). test_index = np.array([-10, 0, 2, -800, 300]) # Note we use a compressed sparse-row matrix so that we can easily - # convert the output matrix to a numpy array using the '.A' attribute. + # convert the output matrix to a numpy array using the '.toarray()' attribute. num_rows = test_index.shape[0] test_output = diags(np.ones(num_rows), 0, format='csr') @@ -735,6 +735,6 @@ def test_scobit_transform_deriv_shape(self): self.assertEqual(len(derivative.shape), 2) self.assertEqual(derivative.shape, (num_rows, num_rows)) npt.assert_allclose(correct_derivatives, - np.diag(derivative.A)) + np.diag(derivative.toarray())) return None diff --git a/tests/test_uneven_logit.py b/tests/test_uneven_logit.py index d184c29..6409e84 100644 --- a/tests/test_uneven_logit.py +++ b/tests/test_uneven_logit.py @@ -321,7 +321,7 @@ def test_keyword_argument_constructor_in_fit_mle(self): # Test to ensure that the raised Value Err is printed when using # either of these two kwargs for kwargs in [kwargs_1, kwargs_2]: - self.assertRaisesRegexp(TypeError, + self.assertRaisesRegex(TypeError, "ridge", self.model_obj.fit_mle, *fit_args, @@ -343,7 +343,7 @@ def test_init_vals_length_error_in_fit_mle(self): # Test to ensure that the ValueError when using an # init_intercepts kwarg with an incorrect number of parameters - self.assertRaisesRegexp(ValueError, + self.assertRaisesRegex(ValueError, "dimension", self.model_obj.fit_mle, np.arange(num_coefs), @@ -552,7 +552,7 @@ def test_uneven_transform_deriv_v(self): # overflow when calculating the derivative. test_index = np.array([-2, 0, 2, -3000, 800]) # Note we use a compressed sparse-row matrix so that we can easily - # convert the output matrix to a numpy array using the '.A' attribute. + # convert the output matrix to a numpy array using the '.toarray()' attribute. num_rows = test_index.shape[0] test_output = diags(np.ones(num_rows), 0, format='csr') @@ -587,7 +587,7 @@ def test_uneven_transform_deriv_v(self): self.assertEqual(len(derivative.shape), 2) self.assertEqual(derivative.shape, (num_rows, num_rows)) npt.assert_allclose(correct_derivatives, - np.diag(derivative.A)) + np.diag(derivative.toarray())) return None @@ -612,7 +612,7 @@ def test_uneven_transform_deriv_alpha(self): if test_output is None: self.assertIsNone(derivative_results) else: - npt.assert_allclose(test_output.A, derivative_results.A) + npt.assert_allclose(test_output.toarray(), derivative_results.toarray()) return None @@ -633,7 +633,7 @@ def test_uneven_transform_deriv_shape(self): test_index = np.array([-10, 0, 2, -10000, 1e8]) # Note we use a compressed sparse-row matrix so that we can easily - # convert the output matrix to a numpy array using the '.A' attribute. + # convert the output matrix to a numpy array using the '.toarray()' attribute. num_rows = test_index.shape[0] test_output = diags(np.ones(num_rows), 0, format='csr') @@ -673,6 +673,6 @@ def test_uneven_transform_deriv_shape(self): self.assertEqual(len(derivative.shape), 2) self.assertEqual(derivative.shape, (num_rows, num_rows)) npt.assert_allclose(correct_derivatives, - np.diag(derivative.A)) + np.diag(derivative.toarray())) return None