Skip to content

Commit f12b239

Browse files
committed
#799 Ignore errors when getting the default block size for the Hadoop filesystem.
1 parent 4b427f6 commit f12b239

File tree

1 file changed

+14
-8
lines changed
  • spark-cobol/src/main/scala/za/co/absa/cobrix/spark/cobol/utils

1 file changed

+14
-8
lines changed

spark-cobol/src/main/scala/za/co/absa/cobrix/spark/cobol/utils/HDFSUtils.scala

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import org.apache.hadoop.fs.{FileSystem, Path}
2222

2323
import java.nio.charset.StandardCharsets
2424
import scala.collection.JavaConverters._
25+
import scala.util.control.NonFatal
2526

2627
/**
2728
* This object provides utility methods for interacting with HDFS internals.
@@ -71,17 +72,22 @@ object HDFSUtils {
7172
* @return A block size in megabytes and None in case of an error
7273
*/
7374
def getHDFSDefaultBlockSizeMB(fileSystem: FileSystem, path: Option[String] = None): Option[Int] = {
74-
val hdfsPath = new Path(path.getOrElse("/"))
75-
val blockSizeInBytes = fileSystem.getDefaultBlockSize(hdfsPath)
76-
if (blockSizeInBytes > 0) {
77-
val blockSizeInBM = (blockSizeInBytes / bytesInMegabyte).toInt
78-
if (blockSizeInBM>0) {
79-
Some (blockSizeInBM)
75+
try {
76+
val hdfsPath = new Path(path.getOrElse("/"))
77+
val blockSizeInBytes = fileSystem.getDefaultBlockSize(hdfsPath)
78+
if (blockSizeInBytes > 0) {
79+
val blockSizeInBM = (blockSizeInBytes / bytesInMegabyte).toInt
80+
if (blockSizeInBM>0) {
81+
Some (blockSizeInBM)
82+
} else {
83+
None
84+
}
8085
} else {
8186
None
8287
}
83-
} else {
84-
None
88+
} catch {
89+
case NonFatal(ex) =>
90+
None
8591
}
8692
}
8793

0 commit comments

Comments
 (0)