|
| 1 | +# |
| 2 | +# Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | +# contributor license agreements. See the NOTICE file distributed with |
| 4 | +# this work for additional information regarding copyright ownership. |
| 5 | +# The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | +# (the "License"); you may not use this file except in compliance with |
| 7 | +# the License. You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, software |
| 12 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | +# See the License for the specific language governing permissions and |
| 15 | +# limitations under the License. |
| 16 | +# |
| 17 | + |
| 18 | +import tempfile |
| 19 | +import unittest |
| 20 | + |
| 21 | +import numpy as np |
| 22 | + |
| 23 | +from pyspark.ml.linalg import Vectors |
| 24 | +from pyspark.sql import SparkSession |
| 25 | +from pyspark.ml.regression import ( |
| 26 | + LinearRegression, |
| 27 | + LinearRegressionModel, |
| 28 | + LinearRegressionSummary, |
| 29 | + LinearRegressionTrainingSummary, |
| 30 | +) |
| 31 | + |
| 32 | + |
| 33 | +class RegressionTestsMixin: |
| 34 | + @property |
| 35 | + def df(self): |
| 36 | + return ( |
| 37 | + self.spark.createDataFrame( |
| 38 | + [ |
| 39 | + (1.0, 1.0, Vectors.dense(0.0, 5.0)), |
| 40 | + (0.0, 2.0, Vectors.dense(1.0, 2.0)), |
| 41 | + (1.5, 3.0, Vectors.dense(2.0, 1.0)), |
| 42 | + (0.7, 4.0, Vectors.dense(1.5, 3.0)), |
| 43 | + ], |
| 44 | + ["label", "weight", "features"], |
| 45 | + ) |
| 46 | + .coalesce(1) |
| 47 | + .sortWithinPartitions("weight") |
| 48 | + ) |
| 49 | + |
| 50 | + def test_linear_regression(self): |
| 51 | + df = self.df |
| 52 | + lr = LinearRegression( |
| 53 | + regParam=0.0, |
| 54 | + maxIter=2, |
| 55 | + solver="normal", |
| 56 | + weightCol="weight", |
| 57 | + ) |
| 58 | + self.assertEqual(lr.getRegParam(), 0) |
| 59 | + self.assertEqual(lr.getMaxIter(), 2) |
| 60 | + self.assertEqual(lr.getSolver(), "normal") |
| 61 | + self.assertEqual(lr.getWeightCol(), "weight") |
| 62 | + |
| 63 | + # Estimator save & load |
| 64 | + with tempfile.TemporaryDirectory(prefix="linear_regression") as d: |
| 65 | + lr.write().overwrite().save(d) |
| 66 | + lr2 = LinearRegression.load(d) |
| 67 | + self.assertEqual(str(lr), str(lr2)) |
| 68 | + |
| 69 | + model = lr.fit(df) |
| 70 | + self.assertEqual(model.numFeatures, 2) |
| 71 | + self.assertTrue(np.allclose(model.scale, 1.0, atol=1e-4)) |
| 72 | + self.assertTrue(np.allclose(model.intercept, -0.35, atol=1e-4)) |
| 73 | + self.assertTrue(np.allclose(model.coefficients, [0.65, 0.1125], atol=1e-4)) |
| 74 | + |
| 75 | + output = model.transform(df) |
| 76 | + expected_cols = [ |
| 77 | + "label", |
| 78 | + "weight", |
| 79 | + "features", |
| 80 | + "prediction", |
| 81 | + ] |
| 82 | + self.assertEqual(output.columns, expected_cols) |
| 83 | + self.assertEqual(output.count(), 4) |
| 84 | + |
| 85 | + self.assertTrue( |
| 86 | + np.allclose(model.predict(Vectors.dense(0.0, 5.0)), 0.21249999999999963, atol=1e-4) |
| 87 | + ) |
| 88 | + |
| 89 | + # Model summary |
| 90 | + summary = model.summary |
| 91 | + self.assertTrue(isinstance(summary, LinearRegressionSummary)) |
| 92 | + self.assertTrue(isinstance(summary, LinearRegressionTrainingSummary)) |
| 93 | + self.assertEqual(summary.predictions.columns, expected_cols) |
| 94 | + self.assertEqual(summary.predictions.count(), 4) |
| 95 | + self.assertEqual(summary.residuals.columns, ["residuals"]) |
| 96 | + self.assertEqual(summary.residuals.count(), 4) |
| 97 | + |
| 98 | + self.assertEqual(summary.degreesOfFreedom, 1) |
| 99 | + self.assertEqual(summary.numInstances, 4) |
| 100 | + self.assertEqual(summary.objectiveHistory, [0.0]) |
| 101 | + self.assertTrue( |
| 102 | + np.allclose( |
| 103 | + summary.coefficientStandardErrors, |
| 104 | + [1.2859821149611763, 0.6248749874975031, 3.1645497310044184], |
| 105 | + atol=1e-4, |
| 106 | + ) |
| 107 | + ) |
| 108 | + self.assertTrue( |
| 109 | + np.allclose( |
| 110 | + summary.devianceResiduals, [-0.7424621202458727, 0.7875000000000003], atol=1e-4 |
| 111 | + ) |
| 112 | + ) |
| 113 | + self.assertTrue( |
| 114 | + np.allclose( |
| 115 | + summary.pValues, |
| 116 | + [0.7020630236843428, 0.8866003086182783, 0.9298746994547682], |
| 117 | + atol=1e-4, |
| 118 | + ) |
| 119 | + ) |
| 120 | + self.assertTrue( |
| 121 | + np.allclose( |
| 122 | + summary.tValues, |
| 123 | + [0.5054502643838291, 0.1800360108036021, -0.11060025272186746], |
| 124 | + atol=1e-4, |
| 125 | + ) |
| 126 | + ) |
| 127 | + self.assertTrue(np.allclose(summary.explainedVariance, 0.07997500000000031, atol=1e-4)) |
| 128 | + self.assertTrue(np.allclose(summary.meanAbsoluteError, 0.4200000000000002, atol=1e-4)) |
| 129 | + self.assertTrue(np.allclose(summary.meanSquaredError, 0.20212500000000005, atol=1e-4)) |
| 130 | + self.assertTrue(np.allclose(summary.rootMeanSquaredError, 0.44958314025327956, atol=1e-4)) |
| 131 | + self.assertTrue(np.allclose(summary.r2, 0.4427212572373862, atol=1e-4)) |
| 132 | + self.assertTrue(np.allclose(summary.r2adj, -0.6718362282878414, atol=1e-4)) |
| 133 | + |
| 134 | + summary2 = model.evaluate(df) |
| 135 | + self.assertTrue(isinstance(summary2, LinearRegressionSummary)) |
| 136 | + self.assertFalse(isinstance(summary2, LinearRegressionTrainingSummary)) |
| 137 | + self.assertEqual(summary2.predictions.columns, expected_cols) |
| 138 | + self.assertEqual(summary2.predictions.count(), 4) |
| 139 | + self.assertEqual(summary2.residuals.columns, ["residuals"]) |
| 140 | + self.assertEqual(summary2.residuals.count(), 4) |
| 141 | + |
| 142 | + self.assertEqual(summary2.degreesOfFreedom, 1) |
| 143 | + self.assertEqual(summary2.numInstances, 4) |
| 144 | + self.assertTrue( |
| 145 | + np.allclose( |
| 146 | + summary2.devianceResiduals, [-0.7424621202458727, 0.7875000000000003], atol=1e-4 |
| 147 | + ) |
| 148 | + ) |
| 149 | + self.assertTrue(np.allclose(summary2.explainedVariance, 0.07997500000000031, atol=1e-4)) |
| 150 | + self.assertTrue(np.allclose(summary2.meanAbsoluteError, 0.4200000000000002, atol=1e-4)) |
| 151 | + self.assertTrue(np.allclose(summary2.meanSquaredError, 0.20212500000000005, atol=1e-4)) |
| 152 | + self.assertTrue(np.allclose(summary2.rootMeanSquaredError, 0.44958314025327956, atol=1e-4)) |
| 153 | + self.assertTrue(np.allclose(summary2.r2, 0.4427212572373862, atol=1e-4)) |
| 154 | + self.assertTrue(np.allclose(summary2.r2adj, -0.6718362282878414, atol=1e-4)) |
| 155 | + |
| 156 | + # Model save & load |
| 157 | + with tempfile.TemporaryDirectory(prefix="linear_regression_model") as d: |
| 158 | + model.write().overwrite().save(d) |
| 159 | + model2 = LinearRegressionModel.load(d) |
| 160 | + self.assertEqual(str(model), str(model2)) |
| 161 | + |
| 162 | + |
| 163 | +class RegressionTests(RegressionTestsMixin, unittest.TestCase): |
| 164 | + def setUp(self) -> None: |
| 165 | + self.spark = SparkSession.builder.master("local[4]").getOrCreate() |
| 166 | + |
| 167 | + def tearDown(self) -> None: |
| 168 | + self.spark.stop() |
| 169 | + |
| 170 | + |
| 171 | +if __name__ == "__main__": |
| 172 | + from pyspark.ml.tests.test_regression import * # noqa: F401,F403 |
| 173 | + |
| 174 | + try: |
| 175 | + import xmlrunner # type: ignore[import] |
| 176 | + |
| 177 | + testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2) |
| 178 | + except ImportError: |
| 179 | + testRunner = None |
| 180 | + unittest.main(testRunner=testRunner, verbosity=2) |
0 commit comments