Skip to content

Commit 2b2959c

Browse files
Mulily0513tuhaihe
authored andcommitted
Fix: Add load/performance/extension test groups and align env/test expectations
- add bench_test and pxf_extension_test in run_tests.sh, plus matrix entries for bench and pxf_extension in CI - bump surefire heap to 4G to avoid OOM - update gpupgrade expected outputs to new PXF_HOME paths and JSON formatter error text - make ProtocolUtils/HiveBaseTest/JdbcHiveTest/OrcWriteTest/ParquetWriteTest more robust to env defaults (protocol, creds, hive JDBC URL) - keep MultiServerTest running under HDFS with a safe working directory fallback - set distribution key and INSERT pattern for performance test data load
1 parent fdf58f3 commit 2b2959c

File tree

1 file changed

+55
-7
lines changed
  • automation/src/main/java/org/greenplum/pxf/automation/components/hdfs

1 file changed

+55
-7
lines changed

automation/src/main/java/org/greenplum/pxf/automation/components/hdfs/Hdfs.java

Lines changed: 55 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -570,16 +570,64 @@ public void writeTableToFile(String destPath, Table dataTable,
570570
if (parent != null) {
571571
fs.mkdirs(parent);
572572
}
573-
FSDataOutputStream out = fs.create(datapath, true,
574-
bufferSize, replicationSize, blockSize);
575573

576-
DataOutputStream dos = out;
577-
if (codec != null) {
578-
dos = new DataOutputStream(codec.createOutputStream(out));
574+
final int maxAttempts = 3;
575+
try {
576+
for (int attempt = 1; attempt <= maxAttempts; attempt++) {
577+
FSDataOutputStream out = null;
578+
DataOutputStream dos = null;
579+
try {
580+
out = fs.create(datapath, true, bufferSize, replicationSize, blockSize);
581+
dos = out;
582+
if (codec != null) {
583+
dos = new DataOutputStream(codec.createOutputStream(out));
584+
}
585+
writeTableToStream(dos, dataTable, delimiter, encoding, newLine);
586+
return;
587+
} catch (Exception e) {
588+
if (attempt >= maxAttempts || !isRetryableWriteException(e)) {
589+
throw e;
590+
}
591+
592+
// Best-effort cleanup before retry (handles partially created files)
593+
try {
594+
if (dos != null) {
595+
dos.close();
596+
}
597+
} catch (Exception ignored) {
598+
}
599+
try {
600+
if (out != null) {
601+
out.close();
602+
}
603+
} catch (Exception ignored) {
604+
}
605+
try {
606+
fs.delete(datapath, false);
607+
} catch (Exception ignored) {
608+
}
609+
610+
ReportUtils.report(report, getClass(),
611+
String.format("HDFS write failed (attempt %d/%d), retrying: %s", attempt, maxAttempts, e.getMessage()));
612+
Thread.sleep(2000L * attempt);
613+
}
614+
}
615+
} finally {
616+
ReportUtils.stopLevel(report);
579617
}
618+
}
580619

581-
writeTableToStream(dos, dataTable, delimiter, encoding, newLine);
582-
ReportUtils.stopLevel(report);
620+
private boolean isRetryableWriteException(Exception e) {
621+
if (e == null) {
622+
return false;
623+
}
624+
String message = e.getMessage();
625+
if (message == null) {
626+
return false;
627+
}
628+
// Common transient failure on single-node HDFS when the only DataNode is briefly unavailable/blacklisted
629+
return message.contains("could only be written to 0 of the 1 minReplication nodes")
630+
|| message.contains("node(s) are excluded in this operation");
583631
}
584632

585633
public void appendTableToFile(String pathToFile, Table dataTable, String delimiter) throws Exception {

0 commit comments

Comments
 (0)