@@ -142,20 +142,29 @@ private boolean isWindows() {
142142 public void failoverHDFS (String from , String to ) throws IOException {
143143 assert isHA () && haConfiguration != null : "HA Configuration must be set up before performing failover" ;
144144 LOGGER .info ("Swapping active namenodes: [{}] to standby and [{}] to active" , from , to );
145- try {
146- AccessController .doPrivileged ((PrivilegedExceptionAction <Void >) () -> {
147- CloseableHAAdmin haAdmin = new CloseableHAAdmin ();
148- haAdmin .setConf (haConfiguration );
149- try {
150- haAdmin .transitionToStandby (from );
151- haAdmin .transitionToActive (to );
152- } finally {
153- haAdmin .close ();
154- }
155- return null ;
156- });
157- } catch (PrivilegedActionException pae ) {
158- throw new IOException ("Unable to perform namenode failover" , pae );
145+ // Synchronize to prevent race conditions in concurrent test execution
146+ synchronized (STATIC_CONFIG_LOCK ) {
147+ // Save current locale and set to English for consistent HDFS failover behavior
148+ Locale originalLocale = Locale .getDefault ();
149+ try {
150+ Locale .setDefault (Locale .ENGLISH );
151+ AccessController .doPrivileged ((PrivilegedExceptionAction <Void >) () -> {
152+ CloseableHAAdmin haAdmin = new CloseableHAAdmin ();
153+ haAdmin .setConf (haConfiguration );
154+ try {
155+ haAdmin .transitionToStandby (from );
156+ haAdmin .transitionToActive (to );
157+ } finally {
158+ haAdmin .close ();
159+ }
160+ return null ;
161+ });
162+ } catch (PrivilegedActionException pae ) {
163+ throw new IOException ("Unable to perform namenode failover" , pae );
164+ } finally {
165+ // Restore original locale
166+ Locale .setDefault (originalLocale );
167+ }
159168 }
160169 }
161170
@@ -288,6 +297,7 @@ private void tryStartingHdfs(Path hdfsHome) throws ClassNotFoundException, NoSuc
288297 builder .nnTopology (namenodeTopology );
289298 }
290299 dfs = builder .build ();
300+ // dfs.waitClusterUp();
291301 // Configure contents of the filesystem
292302 org .apache .hadoop .fs .Path esUserPath = new org .apache .hadoop .fs .Path ("/user/elasticsearch" );
293303 FileSystem fs ;
@@ -337,20 +347,31 @@ private boolean isSecure() {
337347
338348 @ Override
339349 protected void after () {
340- if (dfs != null ) {
350+ // Synchronize to prevent race conditions in concurrent test execution
351+ synchronized (STATIC_CONFIG_LOCK ) {
352+ // Save current locale and set to English for consistent HDFS shutdown behavior
353+ Locale originalLocale = Locale .getDefault ();
341354 try {
342- if (isHA ()) {
343- dfs .getFileSystem (0 ).close ();
344- dfs .getFileSystem (1 ).close ();
345- } else {
346- dfs .getFileSystem ().close ();
355+ Locale .setDefault (Locale .ENGLISH );
356+ if (dfs != null ) {
357+ try {
358+ if (isHA ()) {
359+ dfs .getFileSystem (0 ).close ();
360+ dfs .getFileSystem (1 ).close ();
361+ } else {
362+ dfs .getFileSystem ().close ();
363+ }
364+ } catch (IOException e ) {
365+ throw new RuntimeException (e );
366+ }
367+ dfs .close ();
347368 }
348- } catch (IOException e ) {
349- throw new RuntimeException (e );
369+ temporaryFolder .delete ();
370+ } finally {
371+ // Restore original locale
372+ Locale .setDefault (originalLocale );
350373 }
351- dfs .close ();
352374 }
353- temporaryFolder .delete ();
354375 }
355376
356377 private boolean isHA () {
0 commit comments