Skip to content
This repository was archived by the owner on Jan 9, 2020. It is now read-only.

Commit af8a34c

Browse files
ueshingatorsmile
authored andcommitted
[SPARK-22159][SQL][FOLLOW-UP] Make config names consistently end with "enabled".
## What changes were proposed in this pull request? This is a follow-up of apache#19384. In the previous pr, only definitions of the config names were modified, but we also need to modify the names in runtime or tests specified as string literal. ## How was this patch tested? Existing tests but modified the config names. Author: Takuya UESHIN <[email protected]> Closes apache#19462 from ueshin/issues/SPARK-22159/fup1.
1 parent bebd2e1 commit af8a34c

File tree

6 files changed

+23
-23
lines changed

6 files changed

+23
-23
lines changed

python/pyspark/sql/dataframe.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1878,7 +1878,7 @@ def toPandas(self):
18781878
1 5 Bob
18791879
"""
18801880
import pandas as pd
1881-
if self.sql_ctx.getConf("spark.sql.execution.arrow.enable", "false").lower() == "true":
1881+
if self.sql_ctx.getConf("spark.sql.execution.arrow.enabled", "false").lower() == "true":
18821882
try:
18831883
import pyarrow
18841884
tables = self._collectAsArrow()
@@ -1889,7 +1889,7 @@ def toPandas(self):
18891889
return pd.DataFrame.from_records([], columns=self.columns)
18901890
except ImportError as e:
18911891
msg = "note: pyarrow must be installed and available on calling Python process " \
1892-
"if using spark.sql.execution.arrow.enable=true"
1892+
"if using spark.sql.execution.arrow.enabled=true"
18931893
raise ImportError("%s\n%s" % (e.message, msg))
18941894
else:
18951895
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)

python/pyspark/sql/tests.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3088,7 +3088,7 @@ class ArrowTests(ReusedPySparkTestCase):
30883088
def setUpClass(cls):
30893089
ReusedPySparkTestCase.setUpClass()
30903090
cls.spark = SparkSession(cls.sc)
3091-
cls.spark.conf.set("spark.sql.execution.arrow.enable", "true")
3091+
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
30923092
cls.schema = StructType([
30933093
StructField("1_str_t", StringType(), True),
30943094
StructField("2_int_t", IntegerType(), True),
@@ -3120,9 +3120,9 @@ def test_null_conversion(self):
31203120

31213121
def test_toPandas_arrow_toggle(self):
31223122
df = self.spark.createDataFrame(self.data, schema=self.schema)
3123-
self.spark.conf.set("spark.sql.execution.arrow.enable", "false")
3123+
self.spark.conf.set("spark.sql.execution.arrow.enabled", "false")
31243124
pdf = df.toPandas()
3125-
self.spark.conf.set("spark.sql.execution.arrow.enable", "true")
3125+
self.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
31263126
pdf_arrow = df.toPandas()
31273127
self.assertFramesEqual(pdf_arrow, pdf)
31283128

sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -539,7 +539,7 @@ case class HashAggregateExec(
539539
private def enableTwoLevelHashMap(ctx: CodegenContext) = {
540540
if (!checkIfFastHashMapSupported(ctx)) {
541541
if (modes.forall(mode => mode == Partial || mode == PartialMerge) && !Utils.isTesting) {
542-
logInfo("spark.sql.codegen.aggregate.map.twolevel.enable is set to true, but"
542+
logInfo("spark.sql.codegen.aggregate.map.twolevel.enabled is set to true, but"
543543
+ " current version of codegened fast hashmap does not support this aggregate.")
544544
}
545545
} else {

sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,29 +24,29 @@ import org.apache.spark.SparkConf
2424
class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
2525
override protected def sparkConf: SparkConf = super.sparkConf
2626
.set("spark.sql.codegen.fallback", "false")
27-
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
27+
.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
2828

2929
// adding some checking after each test is run, assuring that the configs are not changed
3030
// in test code
3131
after {
3232
assert(sparkConf.get("spark.sql.codegen.fallback") == "false",
3333
"configuration parameter changed in test body")
34-
assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == "false",
34+
assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enabled") == "false",
3535
"configuration parameter changed in test body")
3636
}
3737
}
3838

3939
class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
4040
override protected def sparkConf: SparkConf = super.sparkConf
4141
.set("spark.sql.codegen.fallback", "false")
42-
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
42+
.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
4343

4444
// adding some checking after each test is run, assuring that the configs are not changed
4545
// in test code
4646
after {
4747
assert(sparkConf.get("spark.sql.codegen.fallback") == "false",
4848
"configuration parameter changed in test body")
49-
assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == "true",
49+
assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enabled") == "true",
5050
"configuration parameter changed in test body")
5151
}
5252
}
@@ -57,15 +57,15 @@ class TwoLevelAggregateHashMapWithVectorizedMapSuite
5757

5858
override protected def sparkConf: SparkConf = super.sparkConf
5959
.set("spark.sql.codegen.fallback", "false")
60-
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
60+
.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
6161
.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
6262

6363
// adding some checking after each test is run, assuring that the configs are not changed
6464
// in test code
6565
after {
6666
assert(sparkConf.get("spark.sql.codegen.fallback") == "false",
6767
"configuration parameter changed in test body")
68-
assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == "true",
68+
assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enabled") == "true",
6969
"configuration parameter changed in test body")
7070
assert(sparkConf.get("spark.sql.codegen.aggregate.map.vectorized.enable") == "true",
7171
"configuration parameter changed in test body")

sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -107,14 +107,14 @@ class AggregateBenchmark extends BenchmarkBase {
107107

108108
benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
109109
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
110-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
110+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
111111
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
112112
f()
113113
}
114114

115115
benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
116116
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
117-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
117+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
118118
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
119119
f()
120120
}
@@ -149,14 +149,14 @@ class AggregateBenchmark extends BenchmarkBase {
149149

150150
benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
151151
sparkSession.conf.set("spark.sql.codegen.wholeStage", value = true)
152-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
152+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
153153
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
154154
f()
155155
}
156156

157157
benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
158158
sparkSession.conf.set("spark.sql.codegen.wholeStage", value = true)
159-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
159+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
160160
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
161161
f()
162162
}
@@ -189,14 +189,14 @@ class AggregateBenchmark extends BenchmarkBase {
189189

190190
benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
191191
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
192-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
192+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
193193
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
194194
f()
195195
}
196196

197197
benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
198198
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
199-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
199+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
200200
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
201201
f()
202202
}
@@ -228,14 +228,14 @@ class AggregateBenchmark extends BenchmarkBase {
228228

229229
benchmark.addCase(s"codegen = T hashmap = F") { iter =>
230230
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
231-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
231+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
232232
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
233233
f()
234234
}
235235

236236
benchmark.addCase(s"codegen = T hashmap = T") { iter =>
237237
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
238-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
238+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
239239
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
240240
f()
241241
}
@@ -277,14 +277,14 @@ class AggregateBenchmark extends BenchmarkBase {
277277

278278
benchmark.addCase(s"codegen = T hashmap = F") { iter =>
279279
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
280-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
280+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
281281
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
282282
f()
283283
}
284284

285285
benchmark.addCase(s"codegen = T hashmap = T") { iter =>
286286
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
287-
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
287+
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
288288
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
289289
f()
290290
}

sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1015,7 +1015,7 @@ class HashAggregationQueryWithControlledFallbackSuite extends AggregationQuerySu
10151015

10161016
override protected def checkAnswer(actual: => DataFrame, expectedAnswer: Seq[Row]): Unit = {
10171017
Seq("true", "false").foreach { enableTwoLevelMaps =>
1018-
withSQLConf("spark.sql.codegen.aggregate.map.twolevel.enable" ->
1018+
withSQLConf("spark.sql.codegen.aggregate.map.twolevel.enabled" ->
10191019
enableTwoLevelMaps) {
10201020
(1 to 3).foreach { fallbackStartsAt =>
10211021
withSQLConf("spark.sql.TungstenAggregate.testFallbackStartsAt" ->

0 commit comments

Comments
 (0)