Skip to content

Commit c4905a8

Browse files
authored
Make all undocumented unintended public API parts private (#331)
Needed by #330.
1 parent 4b53733 commit c4905a8

File tree

9 files changed

+14
-14
lines changed

9 files changed

+14
-14
lines changed

src/main/scala-spark-3.2/uk/co/gresearch/spark/parquet/BlockMetaDataUtil.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,6 @@ package uk.co.gresearch.spark.parquet
1818

1919
import org.apache.parquet.hadoop.metadata.BlockMetaData
2020

21-
object BlockMetaDataUtil {
21+
private[spark] object BlockMetaDataUtil {
2222
def getOrdinal(block: BlockMetaData): Option[Int] = Some(block.getOrdinal)
2323
}

src/main/scala-spark-3.2/uk/co/gresearch/spark/parquet/PrimitiveTypeUtil.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package uk.co.gresearch.spark.parquet
1818

1919
import org.apache.parquet.schema.PrimitiveType
2020

21-
object PrimitiveTypeUtil {
21+
private[spark] object PrimitiveTypeUtil {
2222
def getLogicalTypeAnnotation(primitive: PrimitiveType): Option[String] =
2323
Option(primitive.getLogicalTypeAnnotation).map(_.toString)
2424
}

src/main/scala-spark-3.2/uk/co/gresearch/spark/parquet/SplitFile.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ package uk.co.gresearch.spark.parquet
1818

1919
import org.apache.spark.sql.execution.datasources.PartitionedFile
2020

21-
case class SplitFile(filePath: String, start: Long, length: Long, fileSize: Option[Long])
21+
private[spark] case class SplitFile(filePath: String, start: Long, length: Long, fileSize: Option[Long])
2222

23-
object SplitFile {
23+
private[spark] object SplitFile {
2424
def apply(file: PartitionedFile): SplitFile = SplitFile(file.filePath, file.start, file.length, None)
2525
}

src/main/scala-spark-3.3/uk/co/gresearch/spark/parquet/SplitFile.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ package uk.co.gresearch.spark.parquet
1818

1919
import org.apache.spark.sql.execution.datasources.PartitionedFile
2020

21-
case class SplitFile(filePath: String, start: Long, length: Long, fileSize: Option[Long])
21+
private[spark] case class SplitFile(filePath: String, start: Long, length: Long, fileSize: Option[Long])
2222

23-
object SplitFile {
23+
private[spark] object SplitFile {
2424
def apply(file: PartitionedFile): SplitFile = SplitFile(file.filePath, file.start, file.length, Some(file.fileSize))
2525
}

src/main/scala-spark-3.4/uk/co/gresearch/spark/parquet/FileMetaDataUtil.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,6 @@ package uk.co.gresearch.spark.parquet
1818

1919
import org.apache.parquet.hadoop.metadata.FileMetaData
2020

21-
object FileMetaDataUtil {
21+
private[spark] object FileMetaDataUtil {
2222
def getEncryptionType(fileMetaData: FileMetaData): Option[String] = None
2323
}

src/main/scala-spark-3.5/uk/co/gresearch/spark/parquet/FileMetaDataUtil.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package uk.co.gresearch.spark.parquet
1818

1919
import org.apache.parquet.hadoop.metadata.FileMetaData
2020

21-
object FileMetaDataUtil {
21+
private[spark] object FileMetaDataUtil {
2222
def getEncryptionType(fileMetaData: FileMetaData): Option[String] =
2323
Some(fileMetaData.getEncryptionType.name())
2424
}

src/main/scala-spark-4.0/uk/co/gresearch/spark/parquet/SplitFile.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ package uk.co.gresearch.spark.parquet
1818

1919
import org.apache.spark.sql.execution.datasources.PartitionedFile
2020

21-
case class SplitFile(filePath: String, start: Long, length: Long, fileSize: Option[Long])
21+
private[spark] case class SplitFile(filePath: String, start: Long, length: Long, fileSize: Option[Long])
2222

23-
object SplitFile {
23+
private[spark] object SplitFile {
2424
def apply(file: PartitionedFile): SplitFile = SplitFile(file.filePath.toString, file.start, file.length, Some(file.fileSize))
2525
}

src/main/scala/uk/co/gresearch/spark/group/package.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ package object group {
7676
}
7777
}
7878

79-
object SortedGroupByDataset {
79+
private[spark] object SortedGroupByDataset {
8080
def apply[K: Ordering: Encoder, V](
8181
ds: Dataset[V],
8282
groupColumns: Seq[Column],
@@ -149,7 +149,7 @@ package object group {
149149
}
150150
}
151151

152-
class GroupedIterator[K: Ordering, V](iter: Iterator[(K, V)]) extends Iterator[(K, Iterator[V])] {
152+
private[group] class GroupedIterator[K: Ordering, V](iter: Iterator[(K, V)]) extends Iterator[(K, Iterator[V])] {
153153
private val values = iter.buffered
154154
private var currentKey: Option[K] = None
155155
private var currentGroup: Option[Iterator[V]] = None
@@ -180,7 +180,7 @@ package object group {
180180
}
181181
}
182182

183-
class GroupIterator[K: Ordering, V](iter: BufferedIterator[(K, V)]) extends Iterator[V] {
183+
private[group] class GroupIterator[K: Ordering, V](iter: BufferedIterator[(K, V)]) extends Iterator[V] {
184184
private val ordering = implicitly[Ordering[K]]
185185
private val key = iter.head._1
186186

src/main/scala/uk/co/gresearch/spark/parquet/package.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ import scala.collection.JavaConverters.{collectionAsScalaIterableConverter, mapA
2929
import scala.collection.convert.ImplicitConversions.`iterable AsScalaIterable`
3030

3131
package object parquet {
32-
lazy val conf = new Configuration()
32+
private lazy val conf = new Configuration()
3333

3434
/**
3535
* Implicit class to extend a Spark DataFrameReader.

0 commit comments

Comments
 (0)