Skip to content

Commit 2c584a1

Browse files
committed
fix
1 parent 0e26c3d commit 2c584a1

File tree

1 file changed

+0
-6
lines changed
  • sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/python

1 file changed

+0
-6
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/python/PythonScan.scala

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,18 +16,14 @@
1616
*/
1717
package org.apache.spark.sql.execution.datasources.v2.python
1818

19-
import org.apache.commons.lang3.StringUtils
20-
2119
import org.apache.spark.JobArtifactSet
22-
import org.apache.spark.sql.SparkSession
2320
import org.apache.spark.sql.connector.metric.CustomMetric
2421
import org.apache.spark.sql.connector.read._
2522
import org.apache.spark.sql.connector.read.streaming.MicroBatchStream
2623
import org.apache.spark.sql.internal.connector.SupportsMetadata
2724
import org.apache.spark.sql.sources.Filter
2825
import org.apache.spark.sql.types.StructType
2926
import org.apache.spark.sql.util.CaseInsensitiveStringMap
30-
import org.apache.spark.util.Utils
3127

3228
class PythonScan(
3329
ds: PythonDataSourceV2,
@@ -36,8 +32,6 @@ class PythonScan(
3632
options: CaseInsensitiveStringMap,
3733
supportedFilters: Array[Filter]
3834
) extends Scan with SupportsMetadata {
39-
private lazy val sparkSession = SparkSession.active
40-
4135
override def toBatch: Batch = new PythonBatch(ds, shortName, outputSchema, options)
4236

4337
override def toMicroBatchStream(checkpointLocation: String): MicroBatchStream =

0 commit comments

Comments
 (0)