Skip to content

Commit 8526f2e

Browse files
committed
[MINOR] Fix typos and misspellings
## What changes were proposed in this pull request? Fix typos and misspellings, per apache/spark-website#158 (comment) ## How was this patch tested? Existing tests. Closes apache#22950 from srowen/Typos. Authored-by: Sean Owen <[email protected]> Signed-off-by: Sean Owen <[email protected]> (cherry picked from commit c0d1bf0) Signed-off-by: Sean Owen <[email protected]>
1 parent af2ec97 commit 8526f2e

File tree

8 files changed

+21
-22
lines changed

8 files changed

+21
-22
lines changed

core/src/main/java/org/apache/spark/ExecutorPlugin.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,18 +20,18 @@
2020
import org.apache.spark.annotation.DeveloperApi;
2121

2222
/**
23-
* A plugin which can be automaticaly instantiated within each Spark executor. Users can specify
23+
* A plugin which can be automatically instantiated within each Spark executor. Users can specify
2424
* plugins which should be created with the "spark.executor.plugins" configuration. An instance
2525
* of each plugin will be created for every executor, including those created by dynamic allocation,
2626
* before the executor starts running any tasks.
2727
*
2828
* The specific api exposed to the end users still considered to be very unstable. We will
29-
* hopefully be able to keep compatability by providing default implementations for any methods
29+
* hopefully be able to keep compatibility by providing default implementations for any methods
3030
* added, but make no guarantees this will always be possible across all Spark releases.
3131
*
3232
* Spark does nothing to verify the plugin is doing legitimate things, or to manage the resources
3333
* it uses. A plugin acquires the same privileges as the user running the task. A bad plugin
34-
* could also intefere with task execution and make the executor fail in unexpected ways.
34+
* could also interfere with task execution and make the executor fail in unexpected ways.
3535
*/
3636
@DeveloperApi
3737
public interface ExecutorPlugin {

core/src/test/java/org/apache/spark/ExecutorPluginSuite.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,10 @@ private SparkConf initializeSparkConf(String pluginNames) {
6363

6464
@Test
6565
public void testPluginClassDoesNotExist() {
66-
SparkConf conf = initializeSparkConf("nonexistant.plugin");
66+
SparkConf conf = initializeSparkConf("nonexistent.plugin");
6767
try {
6868
sc = new JavaSparkContext(conf);
69-
fail("No exception thrown for nonexistant plugin");
69+
fail("No exception thrown for nonexistent plugin");
7070
} catch (Exception e) {
7171
// We cannot catch ClassNotFoundException directly because Java doesn't think it'll be thrown
7272
assertTrue(e.toString().startsWith("java.lang.ClassNotFoundException"));

docs/sql-migration-guide-upgrade.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ displayTitle: Spark SQL Upgrading Guide
107107

108108
- Since Spark 2.4, Metadata files (e.g. Parquet summary files) and temporary files are not counted as data files when calculating table size during Statistics computation.
109109

110-
- Since Spark 2.4, empty strings are saved as quoted empty strings `""`. In version 2.3 and earlier, empty strings are equal to `null` values and do not reflect to any characters in saved CSV files. For example, the row of `"a", null, "", 1` was writted as `a,,,1`. Since Spark 2.4, the same row is saved as `a,,"",1`. To restore the previous behavior, set the CSV option `emptyValue` to empty (not quoted) string.
110+
- Since Spark 2.4, empty strings are saved as quoted empty strings `""`. In version 2.3 and earlier, empty strings are equal to `null` values and do not reflect to any characters in saved CSV files. For example, the row of `"a", null, "", 1` was written as `a,,,1`. Since Spark 2.4, the same row is saved as `a,,"",1`. To restore the previous behavior, set the CSV option `emptyValue` to empty (not quoted) string.
111111

112112
- Since Spark 2.4, The LOAD DATA command supports wildcard `?` and `*`, which match any one character, and zero or more characters, respectively. Example: `LOAD DATA INPATH '/tmp/folder*/'` or `LOAD DATA INPATH '/tmp/part-?'`. Special Characters like `space` also now work in paths. Example: `LOAD DATA INPATH '/tmp/folder name/'`.
113113

mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ private[r] object AFTSurvivalRegressionWrapper extends MLReadable[AFTSurvivalReg
6262
private val FORMULA_REGEXP = """Surv\(([^,]+), ([^,]+)\) ~ (.+)""".r
6363

6464
private def formulaRewrite(formula: String): (String, String) = {
65-
var rewritedFormula: String = null
65+
var rewrittenFormula: String = null
6666
var censorCol: String = null
6767
try {
6868
val FORMULA_REGEXP(label, censor, features) = formula
@@ -71,14 +71,14 @@ private[r] object AFTSurvivalRegressionWrapper extends MLReadable[AFTSurvivalReg
7171
throw new UnsupportedOperationException(
7272
"Terms of survreg formula can not support dot operator.")
7373
}
74-
rewritedFormula = label.trim + "~" + features.trim
74+
rewrittenFormula = label.trim + "~" + features.trim
7575
censorCol = censor.trim
7676
} catch {
7777
case e: MatchError =>
7878
throw new SparkException(s"Could not parse formula: $formula")
7979
}
8080

81-
(rewritedFormula, censorCol)
81+
(rewrittenFormula, censorCol)
8282
}
8383

8484

mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ object Summarizer extends Logging {
9696
* - numNonzeros: a vector with the number of non-zeros for each coefficients
9797
* - max: the maximum for each coefficient.
9898
* - min: the minimum for each coefficient.
99-
* - normL2: the Euclidian norm for each coefficient.
99+
* - normL2: the Euclidean norm for each coefficient.
100100
* - normL1: the L1 norm of each coefficient (sum of the absolute values).
101101
* @param metrics metrics that can be provided.
102102
* @return a builder.
@@ -536,7 +536,7 @@ private[ml] object SummaryBuilderImpl extends Logging {
536536
}
537537

538538
/**
539-
* L2 (Euclidian) norm of each dimension.
539+
* L2 (Euclidean) norm of each dimension.
540540
*/
541541
def normL2: Vector = {
542542
require(requestedMetrics.contains(NormL2))

mllib/src/main/scala/org/apache/spark/mllib/stat/MultivariateOnlineSummarizer.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ class MultivariateOnlineSummarizer extends MultivariateStatisticalSummary with S
273273
}
274274

275275
/**
276-
* L2 (Euclidian) norm of each dimension.
276+
* L2 (Euclidean) norm of each dimension.
277277
*
278278
*/
279279
@Since("1.2.0")

python/pyspark/ml/stat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,7 @@ def metrics(*metrics):
336336
- numNonzeros: a vector with the number of non-zeros for each coefficients
337337
- max: the maximum for each coefficient.
338338
- min: the minimum for each coefficient.
339-
- normL2: the Euclidian norm for each coefficient.
339+
- normL2: the Euclidean norm for each coefficient.
340340
- normL1: the L1 norm of each coefficient (sum of the absolute values).
341341
342342
:param metrics:

sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ package org.apache.spark.sql.hive
2020
import java.io.File
2121

2222
import org.apache.spark.sql.{AnalysisException, Dataset, QueryTest, SaveMode}
23-
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
2423
import org.apache.spark.sql.catalyst.parser.ParseException
2524
import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
2625
import org.apache.spark.sql.execution.datasources.{CatalogFileIndex, HadoopFsRelation, LogicalRelation}
@@ -97,24 +96,24 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto
9796
}
9897
}
9998

100-
test("DROP nonexistant table") {
101-
sql("DROP TABLE IF EXISTS nonexistantTable")
99+
test("DROP nonexistent table") {
100+
sql("DROP TABLE IF EXISTS nonexistentTable")
102101
}
103102

104-
test("uncache of nonexistant tables") {
105-
val expectedErrorMsg = "Table or view not found: nonexistantTable"
103+
test("uncache of nonexistent tables") {
104+
val expectedErrorMsg = "Table or view not found: nonexistentTable"
106105
// make sure table doesn't exist
107-
var e = intercept[AnalysisException](spark.table("nonexistantTable")).getMessage
106+
var e = intercept[AnalysisException](spark.table("nonexistentTable")).getMessage
108107
assert(e.contains(expectedErrorMsg))
109108
e = intercept[AnalysisException] {
110-
spark.catalog.uncacheTable("nonexistantTable")
109+
spark.catalog.uncacheTable("nonexistentTable")
111110
}.getMessage
112111
assert(e.contains(expectedErrorMsg))
113112
e = intercept[AnalysisException] {
114-
sql("UNCACHE TABLE nonexistantTable")
113+
sql("UNCACHE TABLE nonexistentTable")
115114
}.getMessage
116115
assert(e.contains(expectedErrorMsg))
117-
sql("UNCACHE TABLE IF EXISTS nonexistantTable")
116+
sql("UNCACHE TABLE IF EXISTS nonexistentTable")
118117
}
119118

120119
test("no error on uncache of non-cached table") {

0 commit comments

Comments
 (0)