Skip to content

Commit

Permalink
Revert "[SPARK-37779][SQL] Make ColumnarToRowExec plan canonicalizabl…
Browse files Browse the repository at this point in the history
…e after (de)serialization"

This reverts commit e17ab6e.
  • Loading branch information
dongjoon-hyun committed Jan 5, 2022
1 parent 3bcd036 commit 94a69ff
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 21 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,7 @@ trait ColumnarToRowTransition extends UnaryExecNode
* [[MapPartitionsInRWithArrowExec]]. Eventually this should replace those implementations.
*/
case class ColumnarToRowExec(child: SparkPlan) extends ColumnarToRowTransition with CodegenSupport {
// supportsColumnar requires to be only called on driver side, see also SPARK-37779.
assert(TaskContext.get != null || child.supportsColumnar)
assert(child.supportsColumnar)

override def output: Seq[Attribute] = child.output

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,23 +88,4 @@ class SparkPlanSuite extends QueryTest with SharedSparkSession {
test("SPARK-30780 empty LocalTableScan should use RDD without partitions") {
assert(LocalTableScanExec(Nil, Nil).execute().getNumPartitions == 0)
}

test("SPARK-37779: ColumnarToRowExec should be canonicalizable after being (de)serialized") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") {
withTempPath { path =>
spark.range(1).write.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
val columnarToRowExec =
df.queryExecution.executedPlan.collectFirst { case p: ColumnarToRowExec => p }.get
try {
spark.range(1).foreach { _ =>
columnarToRowExec.canonicalized
()
}
} catch {
case e: Throwable => fail("ColumnarToRowExec was not canonicalizable", e)
}
}
}
}
}

0 comments on commit 94a69ff

Please sign in to comment.