@@ -255,47 +255,48 @@ public List<DataSegment> getCachedSegments() throws IOException
255255 private void addFilesToCachedSegments (File file , AtomicInteger ignored , List <DataSegment > cachedSegments ) throws IOException
256256 {
257257 final DataSegment segment = jsonMapper .readValue (file , DataSegment .class );
258- boolean removeInfo = false ;
259258 if (!segment .getId ().toString ().equals (file .getName ())) {
260259 log .warn ("Ignoring cache file[%s] for segment[%s]." , file .getPath (), segment .getId ());
261260 ignored .incrementAndGet ();
262- } else {
263- removeInfo = true ;
264- final SegmentCacheEntry cacheEntry = new SegmentCacheEntry (segment );
265- for (StorageLocation location : locations ) {
266- // check for migrate from old nested local storage path format
267- final File legacyPath = new File (location .getPath (), DataSegmentPusher .getDefaultStorageDir (segment , false ));
268- if (legacyPath .exists ()) {
269- final File destination = cacheEntry .toPotentialLocation (location .getPath ());
270- FileUtils .mkdirp (destination );
271- final File [] oldFiles = legacyPath .listFiles ();
272- final File [] newFiles = destination .listFiles ();
273- // make sure old files exist and new files do not exist
274- if (oldFiles != null && oldFiles .length > 0 && newFiles != null && newFiles .length == 0 ) {
275- Files .move (legacyPath .toPath (), destination .toPath (), StandardCopyOption .ATOMIC_MOVE );
276- }
277- cleanupLegacyCacheLocation (location .getPath (), legacyPath );
261+ return ;
262+ }
263+
264+ boolean removeInfo = true ;
265+
266+ final SegmentCacheEntry cacheEntry = new SegmentCacheEntry (segment );
267+ for (StorageLocation location : locations ) {
268+ // check for migrate from old nested local storage path format
269+ final File legacyPath = new File (location .getPath (), DataSegmentPusher .getDefaultStorageDir (segment , false ));
270+ if (legacyPath .exists ()) {
271+ final File destination = cacheEntry .toPotentialLocation (location .getPath ());
272+ FileUtils .mkdirp (destination );
273+ final File [] oldFiles = legacyPath .listFiles ();
274+ final File [] newFiles = destination .listFiles ();
275+ // make sure old files exist and new files do not exist
276+ if (oldFiles != null && oldFiles .length > 0 && newFiles != null && newFiles .length == 0 ) {
277+ Files .move (legacyPath .toPath (), destination .toPath (), StandardCopyOption .ATOMIC_MOVE );
278278 }
279+ cleanupLegacyCacheLocation (location .getPath (), legacyPath );
280+ }
279281
280- if (cacheEntry .checkExists (location .getPath ())) {
281- removeInfo = false ;
282- final boolean reserveResult ;
283- if (config .isVirtualStorage ()) {
284- reserveResult = location .reserveWeak (cacheEntry );
285- } else {
286- reserveResult = location .reserve (cacheEntry );
287- }
288- if (!reserveResult ) {
289- log .makeAlert (
290- "storage[%s:%,d] has more segments than it is allowed. Currently loading Segment[%s:%,d]. Please increase druid.segmentCache.locations maxSize param" ,
291- location .getPath (),
292- location .availableSizeBytes (),
293- segment .getId (),
294- segment .getSize ()
295- ).emit ();
296- }
297- cachedSegments .add (segment );
282+ if (cacheEntry .checkExists (location .getPath ())) {
283+ removeInfo = false ;
284+ final boolean reserveResult ;
285+ if (config .isVirtualStorage ()) {
286+ reserveResult = location .reserveWeak (cacheEntry );
287+ } else {
288+ reserveResult = location .reserve (cacheEntry );
289+ }
290+ if (!reserveResult ) {
291+ log .makeAlert (
292+ "storage[%s:%,d] has more segments than it is allowed. Currently loading Segment[%s:%,d]. Please increase druid.segmentCache.locations maxSize param" ,
293+ location .getPath (),
294+ location .availableSizeBytes (),
295+ segment .getId (),
296+ segment .getSize ()
297+ ).emit ();
298298 }
299+ cachedSegments .add (segment );
299300 }
300301 }
301302
0 commit comments