Skip to content

Commit f6ed467

Browse files
committed
Release tweaks.
1 parent bd5dfb8 commit f6ed467

File tree

15 files changed

+49
-26
lines changed

15 files changed

+49
-26
lines changed

.java-version

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
1.8

RELEASE.md

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,10 @@
88
a. `clean`
99
b. `test it:test`
1010
c. `makeSite`
11-
d. `rf-notebook/publishLocal`
12-
e. `publishSigned` (LocationTech credentials required)
13-
f. `sonatypeReleaseAll`. It can take a while, but should eventually show up [here](https://search.maven.org/search?q=g:org.locationtech.rasterframes).
14-
g. `docs/ghpagesPushSite`
15-
h. `rf-notebook/publish`
11+
d. `publishSigned` (LocationTech credentials required)
12+
e. `sonatypeReleaseAll`. It can take a while, but should eventually show up [here](https://search.maven.org/search?q=g:org.locationtech.rasterframes).
13+
f. `docs/ghpagesPushSite`
14+
g. `rf-notebook/publish`
1615
6. `cd pyrasterframes/target/python/dist`
1716
7. `python3 -m twine upload pyrasterframes-x.y.z-py2.py3-none-any.whl`
1817
8. Commit any changes that were necessary.

docs/src/main/paradox/release-notes.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
* Upgraded many of the pyrasterframes dependencies, including:
1414
`descartes`, `fiona`, `folium`, `geopandas`, `matplotlib`, `numpy`, `pandas`, `rasterio`, `shapely`
1515
* Changed `rasterframes.prefer-gdal` configuration parameter to default to `False`, as JVM GeoTIFF performs just as well for COGs as the GDAL one.
16+
* Fixed [#545](https://github.com/locationtech/rasterframes/issues/545).
1617

1718
### 0.9.0
1819

project/PythonBuildPlugin.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ object PythonBuildPlugin extends AutoPlugin {
101101
val retcode = pySetup.toTask(" build bdist_wheel").value
102102
if(retcode != 0) throw new MessageOnlyException(s"'python setup.py' returned $retcode")
103103
val whls = (buildDir / "dist" ** "pyrasterframes*.whl").get()
104-
require(whls.length == 1, "Running setup.py should have produced a single .whl file. Try running `clean` first.")
104+
require(whls.length == 1, s"Running setup.py should have produced a single .whl file. Found $whls")
105105
log.info(s"Python .whl file written to '${whls.head}'")
106106
whls.head
107107
}.dependsOn(pyWhlJar)

project/RFProjectPlugin.scala

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import xerial.sbt.Sonatype.autoImport._
99
*/
1010
object RFProjectPlugin extends AutoPlugin {
1111
override def trigger: PluginTrigger = allRequirements
12-
override def requires = GitPlugin
12+
override def requires = GitPlugin && RFDependenciesPlugin
1313

1414
override def projectSettings = Seq(
1515
organization := "org.locationtech.rasterframes",
@@ -22,6 +22,7 @@ object RFProjectPlugin extends AutoPlugin {
2222
licenses += ("Apache-2.0", url("https://www.apache.org/licenses/LICENSE-2.0.html")),
2323
scalaVersion := "2.11.12",
2424
scalacOptions ++= Seq(
25+
"-target:jvm-1.8",
2526
"-feature",
2627
"-deprecation",
2728
"-Ywarn-dead-code",
@@ -30,34 +31,42 @@ object RFProjectPlugin extends AutoPlugin {
3031
scalacOptions in (Compile, doc) ++= Seq("-no-link-warnings"),
3132
Compile / console / scalacOptions := Seq("-feature"),
3233
javacOptions ++= Seq("-source", "1.8", "-target", "1.8"),
34+
initialize := {
35+
val _ = initialize.value // run the previous initialization
36+
val sparkVer = VersionNumber(RFDependenciesPlugin.autoImport.rfSparkVersion.value)
37+
if (sparkVer.matchesSemVer(SemanticSelector("<3.0"))) {
38+
val curr = VersionNumber(sys.props("java.specification.version"))
39+
val req = SemanticSelector("=1.8")
40+
assert(curr.matchesSemVer(req), s"Java $req required for $sparkVer. Found $curr.")
41+
}
42+
},
3343
cancelable in Global := true,
3444
publishTo in ThisBuild := sonatypePublishTo.value,
3545
publishMavenStyle := true,
3646
publishArtifact in (Compile, packageDoc) := true,
3747
publishArtifact in Test := false,
3848
fork in Test := true,
39-
javaOptions in Test := Seq("-Xmx1500m", "-XX:+HeapDumpOnOutOfMemoryError",
40-
"-XX:HeapDumpPath=/tmp"),
49+
javaOptions in Test := Seq("-Xmx1500m", "-XX:+HeapDumpOnOutOfMemoryError", "-XX:HeapDumpPath=/tmp"),
4150
parallelExecution in Test := false,
4251
testOptions in Test += Tests.Argument("-oDF"),
4352
developers := List(
4453
Developer(
4554
id = "metasim",
4655
name = "Simeon H.K. Fitch",
4756
email = "[email protected]",
48-
url = url("http://www.astraea.earth")
57+
url = url("https://github.com/metasim")
4958
),
5059
Developer(
5160
id = "vpipkt",
5261
name = "Jason Brown",
5362
email = "[email protected]",
54-
url = url("http://www.astraea.earth")
63+
url = url("https://github.com/vpipkt")
5564
),
5665
Developer(
57-
id = "mteldridge",
58-
name = "Matt Eldridge",
59-
email = "[email protected]",
60-
url = url("http://www.astraea.earth")
66+
id = "echeipesh",
67+
name = "Eugene Cheipesh",
68+
email = "[email protected]",
69+
url = url("https://github.com/echeipesh")
6170
),
6271
Developer(
6372
id = "bguseman",

project/build.properties

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
sbt.version=1.3.8
1+
sbt.version=1.4.9

pyrasterframes/src/main/python/docs/aggregation.pymd

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
```python, setup, echo=False
44
from pyrasterframes import rf_ipython
5-
from docs import *
65
from pyrasterframes.utils import create_rf_spark_session
76
from pyrasterframes.rasterfunctions import *
87
from pyspark.sql import *

pyrasterframes/src/main/python/docs/raster-join.pymd

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,10 @@ import pandas as pd
77
from pyrasterframes.utils import create_rf_spark_session
88
from pyrasterframes.rasterfunctions import *
99
from pyspark.sql.functions import *
10-
spark = create_rf_spark_session()
10+
spark = create_rf_spark_session(**{
11+
'spark.driver.memory': '4G',
12+
'spark.ui.enabled': 'false'
13+
})
1114

1215
```
1316

pyrasterframes/src/main/python/docs/raster-read.pymd

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,10 @@ import pandas as pd
77
from pyrasterframes.utils import create_rf_spark_session
88
from pyrasterframes.rasterfunctions import *
99
from pyspark.sql.functions import *
10-
spark = create_rf_spark_session()
10+
spark = create_rf_spark_session(**{
11+
'spark.driver.memory': '4G',
12+
'spark.ui.enabled': 'false'
13+
})
1114
```
1215

1316
RasterFrames registers a DataSource named `raster` that enables reading of GeoTIFFs (and other formats when @ref:[GDAL is installed](getting-started.md#installing-gdal)) from arbitrary URIs. The `raster` DataSource operates on either a single raster file location or another DataFrame, called a _catalog_, containing pointers to many raster file locations.

pyrasterframes/src/main/python/docs/raster-write.pymd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ If there are many _tile_ or projected raster columns in the DataFrame, the GeoTI
6969

7070
* `path`: the path local to the driver where the file will be written
7171
* `crs`: the PROJ4 string of the CRS the GeoTIFF is to be written in
72-
* `raster_dimensions`: optional, a tuple of two ints giving the size of the resulting file. If specified, RasterFrames will downsample the data in distributed fashion using bilinear resampling. If not specified, the default is to write the dataframe at full resolution, which can result in an `OutOfMemoryError`.
72+
* `raster_dimensions`: optional, a tuple of two ints giving the size of the resulting file. If specified, RasterFrames will downsample the data in distributed fashion using bilinear resampling. If not specified, the default is to write the dataframe at full resolution, which can result in an out of memory error.
7373

7474
### Example
7575

0 commit comments

Comments
 (0)