@@ -3,6 +3,8 @@ import Dependencies._
3
3
import JobServerRelease ._
4
4
import sbtassembly .AssemblyPlugin .autoImport .assemblyMergeStrategy
5
5
import sbtassembly .MergeStrategy
6
+ import scala .xml .{Node => XmlNode , NodeSeq => XmlNodeSeq , _ }
7
+ import scala .xml .transform .{RewriteRule , RuleTransformer }
6
8
7
9
transitiveClassifiers in Global := Seq ()
8
10
lazy val dirSettings = Seq ()
@@ -11,7 +13,7 @@ lazy val akkaApp = Project(id = "akka-app", base = file("akka-app"))
11
13
.settings(description := " Common Akka application stack: metrics, tracing, logging, and more." )
12
14
.settings(commonSettings)
13
15
.settings(libraryDependencies ++= coreTestDeps ++ akkaDeps)
14
- .settings(publishSettings )
16
+ .settings(noPublishSettings )
15
17
.disablePlugins(SbtScalariform )
16
18
17
19
lazy val jobServer = Project (id = " job-server" , base = file(" job-server" ))
@@ -32,7 +34,7 @@ lazy val jobServer = Project(id = "job-server", base = file("job-server"))
32
34
test in assembly := {},
33
35
fork in Test := true
34
36
)
35
- .settings(publishSettings )
37
+ .settings(noPublishSettings )
36
38
.dependsOn(akkaApp, jobServerApi)
37
39
.disablePlugins(SbtScalariform )
38
40
@@ -251,9 +253,36 @@ lazy val scoverageSettings = {
251
253
coverageExcludedPackages := " .+Benchmark.*"
252
254
}
253
255
256
+ /** Used for publishing `extras`, `api` and `python` jars. Main Spark Job Server assembly is published
257
+ * as always. */
254
258
lazy val publishSettings = Seq (
259
+ autoScalaLibrary := false ,
260
+ credentials += Credentials (Path .userHome / " .sbt" / " .credentials" ),
261
+ publishMavenStyle := true ,
262
+ publishTo := Some (sys.env(" MVN_PUBLISH_REPO" ) at sys.env(" MVN_PUBLISH_URL" )),
255
263
licenses += (" Apache-2.0" , url(" http://choosealicense.com/licenses/apache/" )),
256
- bintrayOrganization := Some (" spark-jobserver" )
264
+ pomIncludeRepository := { _ => false },
265
+ /** Since users are encouraged to use dse-spark-dependencies, which provides most of the needed
266
+ * dependencies, we remove most of the Spark Job Server deps here. Provided, test and blacklisted
267
+ * deps are removed from resulting poms. */
268
+ pomPostProcess := { (node : XmlNode ) =>
269
+ new RuleTransformer (new RewriteRule {
270
+ val pomDependencyBlacklist = Seq (" job-server_" , " joda-convert" , " joda-time" )
271
+ val emptyElement = Text (" " )
272
+
273
+ def hasTestOrProvidedScope (e : Elem ): Boolean = e.child.exists(child =>
274
+ child.label == " scope" && (child.text == " provided" || child.text == " test" ))
275
+
276
+ def isBlacklisted (e : Elem ): Boolean = e.child.exists(child =>
277
+ child.label == " artifactId" && pomDependencyBlacklist.exists(child.text.startsWith))
278
+
279
+ override def transform (node : XmlNode ): XmlNodeSeq = node match {
280
+ case e : Elem if e.label == " dependency" && (hasTestOrProvidedScope(e) || isBlacklisted(e)) =>
281
+ emptyElement
282
+ case _ => node
283
+ }
284
+ }).transform(node).head
285
+ }
257
286
)
258
287
259
288
// This is here so we can easily switch back to Logback when Spark fixes its log4j dependency.
0 commit comments