Skip to content

Commit 362dfff

Browse files
authored
fix(misc): Add space checkers in scalastyle (spark-jobserver#879)
There several small checks that we can add to scalastyle to improve readability and to make the code base consistent. One of them is space checks before and after tokens.
1 parent 92dbd77 commit 362dfff

28 files changed

+85
-70
lines changed

akka-app/src/main/scala/spark/jobserver/common/akka/web/CommonRoutes.scala

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,8 @@ object MetricsSerializer {
8181

8282
private def process(metric: Metric): Map[String, Any] = {
8383
metric match {
84-
case c: Counter => Map("type" -> "counter", "count" -> c.count())
85-
case m: Meter => Map("type" -> "meter") ++ meterToMap(m)
84+
case c: Counter => Map("type" -> "counter", "count" -> c.count())
85+
case m: Meter => Map("type" -> "meter") ++ meterToMap(m)
8686
case g: Gauge[_] => Map("type" -> "gauge", "value" -> g.value())
8787
// For Timers, ignore the min/max/mean values, as they are for all time. We're just interested
8888
// in the recent (biased) histogram values.
@@ -97,17 +97,17 @@ object MetricsSerializer {
9797
private def meterToMap(m: Metered) =
9898
Map("units" -> m.rateUnit.toString.toLowerCase,
9999
"count" -> m.count,
100-
"mean" -> m.meanRate,
101-
"m1" -> m.oneMinuteRate,
102-
"m5" -> m.fiveMinuteRate,
103-
"m15" -> m.fifteenMinuteRate)
100+
"mean" -> m.meanRate,
101+
"m1" -> m.oneMinuteRate,
102+
"m5" -> m.fiveMinuteRate,
103+
"m15" -> m.fifteenMinuteRate)
104104

105105
/** Extracts the histogram (Median, 75%, 95%, 98%, 99% 99.9%) values to a map */
106106
private def histogramToMap(h: Sampling) =
107107
Map("median" -> h.getSnapshot().getMedian(),
108-
"p75" -> h.getSnapshot().get75thPercentile(),
109-
"p95" -> h.getSnapshot().get95thPercentile(),
110-
"p98" -> h.getSnapshot().get98thPercentile(),
111-
"p99" -> h.getSnapshot().get99thPercentile(),
112-
"p999" -> h.getSnapshot().get999thPercentile())
108+
"p75" -> h.getSnapshot().get75thPercentile(),
109+
"p95" -> h.getSnapshot().get95thPercentile(),
110+
"p98" -> h.getSnapshot().get98thPercentile(),
111+
"p99" -> h.getSnapshot().get99thPercentile(),
112+
"p999" -> h.getSnapshot().get999thPercentile())
113113
}

akka-app/src/main/scala/spark/jobserver/common/akka/web/JsonUtils.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,13 +35,13 @@ object JsonUtils {
3535
JsObject(pairs)
3636
}
3737
case a: Array[_] => seqFormat[Any].write(a.toSeq)
38-
case true => JsTrue
39-
case false => JsFalse
40-
case p: Product => seqFormat[Any].write(p.productIterator.toSeq)
41-
case null => JsNull
38+
case true => JsTrue
39+
case false => JsFalse
40+
case p: Product => seqFormat[Any].write(p.productIterator.toSeq)
41+
case null => JsNull
4242
case m: java.util.Map[_, _] => AnyJsonFormat.write(m.asScala.toMap)
4343
case l: java.util.List[_] => seqFormat[Any].write(l.asScala)
44-
case x => JsString(x.toString)
44+
case x => JsString(x.toString)
4545
}
4646
def read(value: JsValue): Any = value match {
4747
case JsNumber(n) => n.intValue()

job-server-api/src/main/scala/spark/jobserver/NamedRddSupport.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class RDDPersister[T] extends NamedObjectPersister[NamedRDD[T]] {
2222
rdd.setName(name)
2323
rdd.getStorageLevel match {
2424
case StorageLevel.NONE => rdd.persist(storageLevel)
25-
case currentLevel => rdd.persist(currentLevel)
25+
case currentLevel => rdd.persist(currentLevel)
2626
}
2727
// perform some action to force computation of the RDD
2828
if (forceComputation) rdd.count()
@@ -174,7 +174,7 @@ trait NamedRddSupport extends NamedObjectSupport { self: SparkJob =>
174174
val namedObj: Option[NamedRDD[T]] = namedObjects.get(name)(timeout)
175175
namedObj match {
176176
case Some(NamedRDD(namedRdd: RDD[T], _, _)) => Some(namedRdd)
177-
case _ => None
177+
case _ => None
178178
}
179179
}
180180

job-server-api/src/main/scala/spark/jobserver/SparkJob.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ trait SparkJobBase extends NewSparkJob {
3333
def validate(sc: C, runtime: JobEnvironment, config: Config): JobData Or Every[ValidationProblem] = {
3434
namedObjects = runtime.namedObjects
3535
validate(sc, config) match {
36-
case SparkJobValid => Good(config)
36+
case SparkJobValid => Good(config)
3737
case i: SparkJobInvalid => Bad(One(i))
3838
}
3939
}

job-server-api/src/main/scala/spark/jobserver/context/SparkContextFactory.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ trait SparkContextFactory {
5050
* @param contextName the name of the context to start
5151
* @return the newly created context.
5252
*/
53-
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C
53+
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C
5454

5555
/**
5656
* Creates a SparkContext or derived context.
@@ -90,7 +90,7 @@ trait ScalaContextFactory extends SparkContextFactory {
9090
if (isValidJob(job)) Good(ScalaJobContainer(job)) else Bad(JobWrongType)
9191
} catch {
9292
case _: ClassNotFoundException => Bad(JobClassNotFound)
93-
case err: Exception => Bad(JobLoadError(err))
93+
case err: Exception => Bad(JobLoadError(err))
9494
}
9595
}
9696

@@ -122,7 +122,7 @@ trait JavaContextFactory extends SparkContextFactory {
122122
if (isValidJob(job)) Good(ScalaJobContainer(JavaJob(job))) else Bad(JobWrongType)
123123
} catch {
124124
case _: ClassNotFoundException => Bad(JobClassNotFound)
125-
case err: Exception => Bad(JobLoadError(err))
125+
case err: Exception => Bad(JobLoadError(err))
126126
}
127127
}
128128

@@ -140,7 +140,7 @@ class DefaultSparkContextFactory extends ScalaContextFactory {
140140

141141
type C = SparkContext with ContextLike
142142

143-
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C = {
143+
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C = {
144144
val sc = new SparkContext(sparkConf) with ContextLike {
145145
def sparkContext: SparkContext = this
146146
}

job-server-api/src/main/scala/spark/jobserver/util/SparkJobUtils.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ object SparkJobUtils {
7272
// This is useful for setting configurations for hadoop connectors such as
7373
// elasticsearch, cassandra, etc.
7474
for (e <- Try(contextConfig.getConfig("passthrough"))) {
75-
e.entrySet().asScala.map { s=>
75+
e.entrySet().asScala.map { s =>
7676
conf.set(s.getKey, s.getValue.unwrapped.toString)
7777
}
7878
}
@@ -104,7 +104,7 @@ object SparkJobUtils {
104104
case "yarn-client" =>
105105
Try(config.getDuration(yarn,
106106
TimeUnit.MILLISECONDS).toInt / 1000).getOrElse(40)
107-
case _ =>
107+
case _ =>
108108
Try(config.getDuration(standalone,
109109
TimeUnit.MILLISECONDS).toInt / 1000).getOrElse(15)
110110
}

job-server-extras/src/main/scala/spark/jobserver/NamedObjectsTestJob.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ class NamedObjectsTestJob extends SparkJob with NamedObjectSupport {
3535
namedObjects.update("rdd1", NamedRDD(rows(sc), true, StorageLevel.MEMORY_ONLY))
3636
}
3737

38-
if (config.hasPath(CREATE_BROADCAST)){
39-
val broadcast = sc.broadcast(Set(1,2,3,4,5))
38+
if (config.hasPath(CREATE_BROADCAST)) {
39+
val broadcast = sc.broadcast(Set(1, 2, 3, 4, 5))
4040
namedObjects.update("broadcast1", NamedBroadcast(broadcast))
4141
}
4242

job-server-extras/src/main/scala/spark/jobserver/context/HiveContextFactory.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ class HiveContextFactory extends ScalaContextFactory {
1010

1111
def isValidJob(job: api.SparkJobBase): Boolean = job.isInstanceOf[SparkHiveJob]
1212

13-
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C = {
13+
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C = {
1414
contextFactory(sparkConf)
1515
}
1616

job-server-extras/src/main/scala/spark/jobserver/context/SQLContextFactory.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ class SQLContextFactory extends ScalaContextFactory {
1111

1212
def isValidJob(job: api.SparkJobBase): Boolean = job.isInstanceOf[SparkSqlJob]
1313

14-
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C = {
14+
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C = {
1515
new SQLContext(new SparkContext(sparkConf)) with ContextLike {
1616
def stop() { this.sparkContext.stop() }
1717
}
1818
}
19-
}
19+
}

job-server-extras/src/main/scala/spark/jobserver/context/StreamingContextFactory.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ class StreamingContextFactory extends ScalaContextFactory {
1111

1212
def isValidJob(job: api.SparkJobBase): Boolean = job.isInstanceOf[SparkStreamingJob]
1313

14-
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C = {
14+
def makeContext(sparkConf: SparkConf, config: Config, contextName: String): C = {
1515
val interval = config.getInt("streaming.batch_interval")
1616
val stopGracefully = config.getBoolean("streaming.stopGracefully")
1717
val stopSparkContext = config.getBoolean("streaming.stopSparkContext")

0 commit comments

Comments
 (0)