optionsSet = EnumSet.noneOf(StandardOpenOption.class);
+ for (StandardOpenOption option : options)
+ optionsSet.add(option);
+
//Emulate the old FileSystemProvider.newOutputStream behavior for open options.
if (optionsSet.isEmpty())
{
@@ -888,159 +599,116 @@ public static void setFSErrorHandler(FSErrorHandler handler)
fsErrorHandler.getAndSet(Optional.ofNullable(handler));
}
- /**
- * Returns the size of the specified partition.
- * This method handles large file system by returning {@code Long.MAX_VALUE} if the size overflow.
- * See JDK-8179320 for more information.
- *
- * @param file the partition
- * @return the size, in bytes, of the partition or {@code 0L} if the abstract pathname does not name a partition
- */
- public static long getTotalSpace(File file)
+ @Deprecated
+ public static void createDirectory(String directory)
{
- return handleLargeFileSystem(file.getTotalSpace());
+ createDirectory(new File(directory));
}
- /**
- * Returns the number of unallocated bytes on the specified partition.
- * This method handles large file system by returning {@code Long.MAX_VALUE} if the number of unallocated bytes
- * overflow. See JDK-8179320 for more information
- *
- * @param file the partition
- * @return the number of unallocated bytes on the partition or {@code 0L}
- * if the abstract pathname does not name a partition.
- */
- public static long getFreeSpace(File file)
+ @Deprecated
+ public static void createDirectory(File directory)
{
- return handleLargeFileSystem(file.getFreeSpace());
+ PathUtils.createDirectoriesIfNotExists(directory.toPath());
}
- /**
- * Returns the number of available bytes on the specified partition.
- * This method handles large file system by returning {@code Long.MAX_VALUE} if the number of available bytes
- * overflow. See JDK-8179320 for more information
- *
- * @param file the partition
- * @return the number of available bytes on the partition or {@code 0L}
- * if the abstract pathname does not name a partition.
- */
- public static long getUsableSpace(File file)
+ @Deprecated
+ public static boolean delete(String file)
{
- return handleLargeFileSystem(file.getUsableSpace());
+ return new File(file).tryDelete();
}
- /**
- * Returns the {@link FileStore} representing the file store where a file
- * is located. This {@link FileStore} handles large file system by returning {@code Long.MAX_VALUE}
- * from {@code FileStore#getTotalSpace()}, {@code FileStore#getUnallocatedSpace()} and {@code FileStore#getUsableSpace()}
- * it the value is bigger than {@code Long.MAX_VALUE}. See JDK-8162520
- * for more information.
- *
- * @param path the path to the file
- * @return the file store where the file is stored
- */
- public static FileStore getFileStore(Path path) throws IOException
+ @Deprecated
+ public static void delete(File... files)
{
- return new SafeFileStore(Files.getFileStore(path));
+ for (File file : files)
+ file.tryDelete();
}
/**
- * Handle large file system by returning {@code Long.MAX_VALUE} when the size overflows.
- * @param size returned by the Java's FileStore methods
- * @return the size or {@code Long.MAX_VALUE} if the size was bigger than {@code Long.MAX_VALUE}
+ * Deletes all files and subdirectories under "dir".
+ * @param dir Directory to be deleted
+ * @throws FSWriteError if any part of the tree cannot be deleted
*/
- private static long handleLargeFileSystem(long size)
+ @Deprecated
+ public static void deleteRecursiveWithThrottle(File dir, RateLimiter rateLimiter)
{
- return size < 0 ? Long.MAX_VALUE : size;
+ dir.deleteRecursive(rateLimiter);
}
/**
- * Private constructor as the class contains only static methods.
+ * Deletes all files and subdirectories under "dir".
+ * @param dir Directory to be deleted
+ * @throws FSWriteError if any part of the tree cannot be deleted
*/
- private FileUtils()
+ @Deprecated
+ public static void deleteRecursive(File dir)
{
+ dir.deleteRecursive();
}
/**
- * FileStore decorator used to safely handle large file system.
- *
- * Java's FileStore methods (getTotalSpace/getUnallocatedSpace/getUsableSpace) are limited to reporting bytes as
- * signed long (2^63-1), if the filesystem is any bigger, then the size overflows. {@code SafeFileStore} will
- * return {@code Long.MAX_VALUE} if the size overflow.
- *
- * @see JDK-8162520.
+ * Schedules deletion of all file and subdirectories under "dir" on JVM shutdown.
+ * @param dir Directory to be deleted
*/
- private static final class SafeFileStore extends FileStore
+ @Deprecated
+ public static void deleteRecursiveOnExit(File dir)
{
- /**
- * The decorated {@code FileStore}
- */
- private final FileStore fileStore;
-
- public SafeFileStore(FileStore fileStore)
- {
- this.fileStore = fileStore;
- }
-
- @Override
- public String name()
- {
- return fileStore.name();
- }
+ dir.deleteRecursiveOnExit();
+ }
- @Override
- public String type()
- {
- return fileStore.type();
- }
+ @Deprecated
+ public static boolean isSubDirectory(File parent, File child)
+ {
+ return parent.isAncestorOf(child);
+ }
- @Override
- public boolean isReadOnly()
- {
- return fileStore.isReadOnly();
- }
+ @Deprecated
+ public static Throwable deleteWithConfirm(File file, Throwable accumulate)
+ {
+ return file.delete(accumulate, null);
+ }
- @Override
- public long getTotalSpace() throws IOException
- {
- return handleLargeFileSystem(fileStore.getTotalSpace());
- }
+ @Deprecated
+ public static Throwable deleteWithConfirm(File file, Throwable accumulate, RateLimiter rateLimiter)
+ {
+ return file.delete(accumulate, rateLimiter);
+ }
- @Override
- public long getUsableSpace() throws IOException
- {
- return handleLargeFileSystem(fileStore.getUsableSpace());
- }
+ @Deprecated
+ public static void deleteWithConfirm(String file)
+ {
+ deleteWithConfirm(new File(file));
+ }
- @Override
- public long getUnallocatedSpace() throws IOException
- {
- return handleLargeFileSystem(fileStore.getUnallocatedSpace());
- }
+ @Deprecated
+ public static void deleteWithConfirm(File file)
+ {
+ file.delete();
+ }
- @Override
- public boolean supportsFileAttributeView(Class extends FileAttributeView> type)
- {
- return fileStore.supportsFileAttributeView(type);
- }
+ @Deprecated
+ public static void renameWithOutConfirm(String from, String to)
+ {
+ new File(from).tryMove(new File(to));
+ }
- @Override
- public boolean supportsFileAttributeView(String name)
- {
- return fileStore.supportsFileAttributeView(name);
- }
+ @Deprecated
+ public static void renameWithConfirm(String from, String to)
+ {
+ renameWithConfirm(new File(from), new File(to));
+ }
- @Override
- public V getFileStoreAttributeView(Class type)
- {
- return fileStore.getFileStoreAttributeView(type);
- }
+ @Deprecated
+ public static void renameWithConfirm(File from, File to)
+ {
+ from.move(to);
+ }
- @Override
- public Object getAttribute(String attribute) throws IOException
- {
- return fileStore.getAttribute(attribute);
- }
+ /**
+ * Private constructor as the class contains only static methods.
+ */
+ private FileUtils()
+ {
}
/**
@@ -1060,9 +728,9 @@ public static void moveRecursively(Path source, Path target) throws IOException
{
Files.createDirectories(target);
- for (File f : source.toFile().listFiles())
+ for (File f : new File(source).tryList())
{
- String fileName = f.getName();
+ String fileName = f.name();
moveRecursively(source.resolve(fileName), target.resolve(fileName));
}
diff --git a/src/java/org/apache/cassandra/io/util/FileWriter.java b/src/java/org/apache/cassandra/io/util/FileWriter.java
new file mode 100644
index 000000000000..bbfb59543140
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/FileWriter.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.io.util;
+
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+
+import org.apache.cassandra.io.util.File.WriteMode;
+
+public class FileWriter extends OutputStreamWriter
+{
+ @SuppressWarnings("resource") // FOSP is closed by OSW::close
+ public FileWriter(File file) throws IOException
+ {
+ super(new FileOutputStreamPlus(file));
+ }
+
+ @SuppressWarnings("resource") // FOSP is closed by OSW::close
+ public FileWriter(File file, WriteMode mode) throws IOException
+ {
+ super(new FileOutputStreamPlus(file, mode));
+ }
+}
diff --git a/src/java/org/apache/cassandra/io/util/PathUtils.java b/src/java/org/apache/cassandra/io/util/PathUtils.java
new file mode 100644
index 000000000000..9eef8a429435
--- /dev/null
+++ b/src/java/org/apache/cassandra/io/util/PathUtils.java
@@ -0,0 +1,727 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.util;
+
+import java.io.*;
+import java.nio.channels.FileChannel;
+import java.nio.file.*;
+import java.nio.file.attribute.*;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import java.util.function.*;
+import java.util.stream.Stream;
+
+import javax.annotation.Nullable;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.RateLimiter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.io.FSError;
+import org.apache.cassandra.io.FSReadError;
+import org.apache.cassandra.io.FSWriteError;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.NoSpamLogger;
+
+import static java.nio.file.StandardOpenOption.*;
+import static java.nio.file.StandardOpenOption.CREATE;
+import static java.nio.file.StandardOpenOption.WRITE;
+import static java.util.Collections.unmodifiableSet;
+import static org.apache.cassandra.utils.Throwables.merge;
+
+/**
+ * Vernacular: tryX means return false or 0L on any failure; XIfNotY means propagate any exceptions besides those caused by Y
+ *
+ * This class tries to apply uniform IOException handling, and does not propagate IOException except for NoSuchFileException.
+ * Any harmless/application error exceptions are propagated as UncheckedIOException, and anything else as an FSReadError or FSWriteError.
+ * Semantically this is a little incoherent throughout the codebase, as we intercept IOException haphazardly and treaat
+ * it inconsistently - we should ideally migrate to using {@link #propagate(IOException, Path, boolean)} et al globally.
+ */
+public final class PathUtils
+{
+ private static final boolean consistentDirectoryListings = CassandraRelevantProperties.CONSISTENT_DIRECTORY_LISTINGS.getBoolean();
+
+ private static final Set READ_OPTIONS = unmodifiableSet(EnumSet.of(READ));
+ private static final Set WRITE_OPTIONS = unmodifiableSet(EnumSet.of(WRITE, CREATE));
+ private static final Set WRITE_APPEND_OPTIONS = unmodifiableSet(EnumSet.of(WRITE, CREATE, APPEND));
+ private static final Set READ_WRITE_OPTIONS = unmodifiableSet(EnumSet.of(READ, WRITE, CREATE));
+ private static final FileAttribute>[] NO_ATTRIBUTES = new FileAttribute[0];
+
+ private static final Logger logger = LoggerFactory.getLogger(PathUtils.class);
+ private static final NoSpamLogger nospam1m = NoSpamLogger.getLogger(logger, 1, TimeUnit.MINUTES);
+
+ private static Consumer onDeletion = path -> {
+ if (StorageService.instance.isDaemonSetupCompleted())
+ setDeletionListener(ignore -> {});
+ else
+ logger.info("Deleting file during startup: {}", path);
+ };
+
+ public static FileChannel newReadChannel(Path path) throws NoSuchFileException
+ {
+ return newFileChannel(path, READ_OPTIONS);
+ }
+
+ public static FileChannel newReadWriteChannel(Path path) throws NoSuchFileException
+ {
+ return newFileChannel(path, READ_WRITE_OPTIONS);
+ }
+
+ public static FileChannel newWriteOverwriteChannel(Path path) throws NoSuchFileException
+ {
+ return newFileChannel(path, WRITE_OPTIONS);
+ }
+
+ public static FileChannel newWriteAppendChannel(Path path) throws NoSuchFileException
+ {
+ return newFileChannel(path, WRITE_APPEND_OPTIONS);
+ }
+
+ private static FileChannel newFileChannel(Path path, Set options) throws NoSuchFileException
+ {
+ try
+ {
+ return FileChannel.open(path, options, PathUtils.NO_ATTRIBUTES);
+ }
+ catch (IOException e)
+ {
+ throw propagateUncheckedOrNoSuchFileException(e, path, options.contains(WRITE));
+ }
+ }
+
+ public static void setDeletionListener(Consumer newOnDeletion)
+ {
+ onDeletion = newOnDeletion;
+ }
+
+ public static String filename(Path path)
+ {
+ return path.getFileName().toString();
+ }
+
+ public static T[] list(Path path, Function, Stream> transform, IntFunction arrayFactory)
+ {
+ try (Stream stream = Files.list(path))
+ {
+ return transform.apply(consistentDirectoryListings ? stream.sorted() : stream)
+ .toArray(arrayFactory);
+ }
+ catch (NoSuchFileException e)
+ {
+ return null;
+ }
+ catch (IOException e)
+ {
+ throw propagateUnchecked(e, path, false);
+ }
+ }
+
+ public static T[] tryList(Path path, Function, Stream> transform, IntFunction arrayFactory)
+ {
+ try (Stream stream = Files.list(path))
+ {
+ return transform.apply(consistentDirectoryListings ? stream.sorted() : stream)
+ .toArray(arrayFactory);
+ }
+ catch (IOException e)
+ {
+ return null;
+ }
+ }
+
+ public static void forEach(Path path, Consumer forEach)
+ {
+ try (Stream stream = Files.list(path))
+ {
+ (consistentDirectoryListings ? stream.sorted() : stream).forEach(forEach);
+ }
+ catch (IOException e)
+ {
+ throw propagateUnchecked(e, path, false);
+ }
+ }
+
+ public static void forEachRecursive(Path path, Consumer forEach)
+ {
+ Consumer forEachRecursive = new Consumer()
+ {
+ @Override
+ public void accept(Path child)
+ {
+ forEach.accept(child);
+ forEach(child, this);
+ }
+ };
+ forEach(path, forEachRecursive);
+ }
+
+ public static long tryGetLength(Path path)
+ {
+ return tryOnPath(path, Files::size);
+ }
+
+ public static long tryGetLastModified(Path path)
+ {
+ return tryOnPath(path, p -> Files.getLastModifiedTime(p).toMillis());
+ }
+
+ public static boolean trySetLastModified(Path path, long lastModified)
+ {
+ try
+ {
+ Files.setLastModifiedTime(path, FileTime.fromMillis(lastModified));
+ return true;
+ }
+ catch (IOException e)
+ {
+ return false;
+ }
+ }
+
+ public static boolean trySetReadable(Path path, boolean readable)
+ {
+ return trySet(path, PosixFilePermission.OWNER_READ, readable);
+ }
+
+ public static boolean trySetWritable(Path path, boolean writeable)
+ {
+ return trySet(path, PosixFilePermission.OWNER_WRITE, writeable);
+ }
+
+ public static boolean trySetExecutable(Path path, boolean executable)
+ {
+ return trySet(path, PosixFilePermission.OWNER_EXECUTE, executable);
+ }
+
+ public static boolean trySet(Path path, PosixFilePermission permission, boolean set)
+ {
+ try
+ {
+ PosixFileAttributeView view = path.getFileSystem().provider().getFileAttributeView(path, PosixFileAttributeView.class);
+ PosixFileAttributes attributes = view.readAttributes();
+ Set permissions = attributes.permissions();
+ if (set == permissions.contains(permission))
+ return true;
+ if (set) permissions.add(permission);
+ else permissions.remove(permission);
+ view.setPermissions(permissions);
+ return true;
+ }
+ catch (IOException e)
+ {
+ return false;
+ }
+ }
+
+ public static Throwable delete(Path file, Throwable accumulate)
+ {
+ try
+ {
+ delete(file);
+ }
+ catch (FSError t)
+ {
+ accumulate = merge(accumulate, t);
+ }
+ return accumulate;
+ }
+
+ public static void delete(Path file)
+ {
+ try
+ {
+ Files.delete(file);
+ onDeletion.accept(file);
+ }
+ catch (IOException e)
+ {
+ throw propagateUnchecked(e, file, true);
+ }
+ }
+
+ public static boolean tryDelete(Path file)
+ {
+ try
+ {
+ Files.delete(file);
+ onDeletion.accept(file);
+ return true;
+ }
+ catch (IOException e)
+ {
+ return false;
+ }
+ }
+
+ public static void delete(Path file, @Nullable RateLimiter rateLimiter)
+ {
+ if (rateLimiter != null)
+ {
+ double throttled = rateLimiter.acquire();
+ if (throttled > 0.0)
+ nospam1m.warn("Throttling file deletion: waited {} seconds to delete {}", throttled, file);
+ }
+ delete(file);
+ }
+
+ public static Throwable delete(Path file, Throwable accumulate, @Nullable RateLimiter rateLimiter)
+ {
+ try
+ {
+ delete(file, rateLimiter);
+ }
+ catch (Throwable t)
+ {
+ accumulate = merge(accumulate, t);
+ }
+ return accumulate;
+ }
+
+ /**
+ * Deletes all files and subdirectories under "path".
+ * @param path file to be deleted
+ * @throws FSWriteError if any part of the tree cannot be deleted
+ */
+ public static void deleteRecursive(Path path)
+ {
+ if (isDirectory(path))
+ forEach(path, PathUtils::deleteRecursive);
+
+ // The directory is now empty so now it can be smoked
+ delete(path);
+ }
+
+ /**
+ * Deletes all files and subdirectories under "path".
+ * @param path file to be deleted
+ * @throws FSWriteError if any part of the tree cannot be deleted
+ */
+ public static void deleteRecursive(Path path, RateLimiter rateLimiter)
+ {
+ deleteRecursive(path, rateLimiter, p -> deleteRecursive(p, rateLimiter));
+ }
+
+ /**
+ * Deletes all files and subdirectories under "path".
+ * @param path file to be deleted
+ * @throws FSWriteError if any part of the tree cannot be deleted
+ */
+ private static void deleteRecursive(Path path, RateLimiter rateLimiter, Consumer deleteRecursive)
+ {
+ if (isDirectory(path))
+ forEach(path, deleteRecursive);
+
+ // The directory is now empty so now it can be smoked
+ delete(path, rateLimiter);
+ }
+
+ /**
+ * Schedules deletion of all file and subdirectories under "dir" on JVM shutdown.
+ * @param dir Directory to be deleted
+ */
+ public synchronized static void deleteRecursiveOnExit(Path dir)
+ {
+ ON_EXIT.add(dir, true);
+ }
+
+ /**
+ * Schedules deletion of the file only on JVM shutdown.
+ * @param file File to be deleted
+ */
+ public synchronized static void deleteOnExit(Path file)
+ {
+ ON_EXIT.add(file, false);
+ }
+
+ public static boolean tryRename(Path from, Path to)
+ {
+ logger.trace("Renaming {} to {}", from, to);
+ // this is not FSWE because usually when we see it it's because we didn't close the file before renaming it,
+ // and Windows is picky about that.
+ try
+ {
+ atomicMoveWithFallback(from, to);
+ return true;
+ }
+ catch (IOException e)
+ {
+ logger.trace("Could not move file {} to {}", from, to, e);
+ return false;
+ }
+ }
+
+ public static void rename(Path from, Path to)
+ {
+ logger.trace("Renaming {} to {}", from, to);
+ // this is not FSWE because usually when we see it it's because we didn't close the file before renaming it,
+ // and Windows is picky about that.
+ try
+ {
+ atomicMoveWithFallback(from, to);
+ }
+ catch (IOException e)
+ {
+ logger.trace("Could not move file {} to {}", from, to, e);
+
+ // TODO: this should be an FSError (either read or write)?
+ // (but for now this is maintaining legacy semantics)
+ throw new RuntimeException(String.format("Failed to rename %s to %s", from, to), e);
+ }
+ }
+
+ /**
+ * Move a file atomically, if it fails, it falls back to a non-atomic operation
+ */
+ private static void atomicMoveWithFallback(Path from, Path to) throws IOException
+ {
+ try
+ {
+ Files.move(from, to, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
+ }
+ catch (AtomicMoveNotSupportedException e)
+ {
+ logger.trace("Could not do an atomic move", e);
+ Files.move(from, to, StandardCopyOption.REPLACE_EXISTING);
+ }
+ }
+
+ // true if can determine exists, false if any exception occurs
+ public static boolean exists(Path path)
+ {
+ return Files.exists(path);
+ }
+
+ // true if can determine is a directory, false if any exception occurs
+ public static boolean isDirectory(Path path)
+ {
+ return Files.isDirectory(path);
+ }
+
+ // true if can determine is a regular file, false if any exception occurs
+ public static boolean isFile(Path path)
+ {
+ return Files.isRegularFile(path);
+ }
+
+ /**
+ * @param path create file if not exists
+ * @throws IOError if cannot perform the operation
+ * @return true if a new file was created
+ */
+ public static boolean createFileIfNotExists(Path path)
+ {
+ return ifNotExists(path, Files::createFile);
+ }
+
+ /**
+ * @param path create directory if not exists
+ * @throws IOError if cannot perform the operation
+ * @return true if a new directory was created
+ */
+ public static boolean createDirectoryIfNotExists(Path path)
+ {
+ return ifNotExists(path, Files::createDirectory);
+ }
+
+ /**
+ * @param path create directory (and parents) if not exists
+ * @throws IOError if cannot perform the operation
+ * @return true if a new directory was created
+ */
+ public static boolean createDirectoriesIfNotExists(Path path)
+ {
+ return ifNotExists(path, Files::createDirectories);
+ }
+
+ /**
+ * @param path create directory if not exists and action can be performed
+ * @return true if a new directory was created, false otherwise (for any reason)
+ */
+ public static boolean tryCreateDirectory(Path path)
+ {
+ return tryConsume(path, Files::createDirectory);
+ }
+
+ /**
+ * @param path create directory (and parents) if not exists and action can be performed
+ * @return true if the new directory was created, false otherwise (for any reason)
+ */
+ public static boolean tryCreateDirectories(Path path)
+ {
+ if (exists(path))
+ return false;
+
+ tryCreateDirectories(path.toAbsolutePath().getParent());
+ return tryCreateDirectory(path);
+ }
+
+ /**
+ * @return file if exists, otherwise nearest parent that exists; null if nothing in path exists
+ */
+ public static Path findExistingAncestor(Path file)
+ {
+ if (!file.equals(file.normalize()))
+ throw new IllegalArgumentException("Must be invoked on a path without redundant elements");
+
+ Path parent = file;
+ while (parent != null && !Files.exists(parent))
+ parent = parent.getParent();
+ return parent;
+ }
+
+ /**
+ * 1) Convert to an absolute path without redundant path elements;
+ * 2) If the file exists, resolve any links to the underlying fille;
+ * 3) If the file does not exist, find the first ancestor that does and resolve the path from there
+ */
+ public static Path toCanonicalPath(Path file)
+ {
+ Preconditions.checkNotNull(file);
+
+ file = file.toAbsolutePath().normalize();
+ Path parent = findExistingAncestor(file);
+
+ if (parent == null)
+ return file;
+ if (parent == file)
+ return toRealPath(file);
+ return toRealPath(parent).resolve(parent.relativize(file));
+ }
+
+ private static Path toRealPath(Path path)
+ {
+ try
+ {
+ return path.toRealPath();
+ }
+ catch (IOException e)
+ {
+ throw propagateUnchecked(e, path, false);
+ }
+ }
+
+ /**
+ * Return true if file's canonical path is contained in folder's canonical path.
+ *
+ * Propagates any exceptions encountered finding canonical paths.
+ */
+ public static boolean isContained(Path folder, Path file)
+ {
+ Path realFolder = toCanonicalPath(folder), realFile = toCanonicalPath(file);
+ return realFile.startsWith(realFolder);
+ }
+
+ private static final class DeleteOnExit implements Runnable
+ {
+ private boolean isRegistered;
+ private final Set deleteRecursivelyOnExit = new HashSet<>();
+ private final Set deleteOnExit = new HashSet<>();
+
+ synchronized void add(Path path, boolean recursive)
+ {
+ if (!isRegistered)
+ {
+ Runtime.getRuntime().addShutdownHook(new Thread(this));
+ isRegistered = true;
+ }
+ logger.trace("Scheduling deferred {}deletion of file: {}", recursive ? "recursive " : "", path);
+ (recursive ? deleteRecursivelyOnExit : deleteOnExit).add(path);
+ }
+
+ public void run()
+ {
+ for (Path path : deleteOnExit)
+ {
+ try
+ {
+ if (exists(path))
+ delete(path);
+ }
+ catch (Throwable t)
+ {
+ logger.warn("Failed to delete {} on exit", path, t);
+ }
+ }
+ for (Path path : deleteRecursivelyOnExit)
+ {
+ try
+ {
+ if (exists(path))
+ deleteRecursive(path);
+ }
+ catch (Throwable t)
+ {
+ logger.warn("Failed to delete {} on exit", path, t);
+ }
+ }
+ }
+ }
+ private static final DeleteOnExit ON_EXIT = new DeleteOnExit();
+
+ public interface IOConsumer { void accept(Path path) throws IOException; }
+ public interface IOToLongFunction { long apply(V path) throws IOException; }
+
+ private static boolean ifNotExists(Path path, IOConsumer consumer)
+ {
+ try
+ {
+ consumer.accept(path);
+ return true;
+ }
+ catch (FileAlreadyExistsException fae)
+ {
+ return false;
+ }
+ catch (IOException e)
+ {
+ throw propagateUnchecked(e, path, true);
+ }
+ }
+
+ private static boolean tryConsume(Path path, IOConsumer function)
+ {
+ try
+ {
+ function.accept(path);
+ return true;
+ }
+ catch (IOException e)
+ {
+ return false;
+ }
+ }
+
+ private static long tryOnPath(Path path, IOToLongFunction function)
+ {
+ try
+ {
+ return function.apply(path);
+ }
+ catch (IOException e)
+ {
+ return 0L;
+ }
+ }
+
+ private static long tryOnFileStore(Path path, IOToLongFunction function)
+ {
+ return tryOnFileStore(path, function, ignore -> {});
+ }
+
+ private static long tryOnFileStore(Path path, IOToLongFunction function, Consumer orElse)
+ {
+ try
+ {
+ Path ancestor = findExistingAncestor(path);
+ if (ancestor == null)
+ {
+ orElse.accept(new NoSuchFileException(path.toString()));
+ return 0L;
+ }
+ return function.apply(Files.getFileStore(ancestor));
+ }
+ catch (IOException e)
+ {
+ orElse.accept(e);
+ return 0L;
+ }
+ }
+
+ /**
+ * Returns the number of bytes (determined by the provided MethodHandle) on the specified partition.
+ * This method handles large file system by returning {@code Long.MAX_VALUE} if the number of available bytes
+ * overflow. See JDK-8179320 for more information
+ *
+ * @param path the partition (or a file within it)
+ */
+ public static long tryGetSpace(Path path, IOToLongFunction getSpace)
+ {
+ return handleLargeFileSystem(tryOnFileStore(path, getSpace));
+ }
+
+ public static long tryGetSpace(Path path, IOToLongFunction getSpace, Consumer orElse)
+ {
+ return handleLargeFileSystem(tryOnFileStore(path, getSpace, orElse));
+ }
+
+ /**
+ * Handle large file system by returning {@code Long.MAX_VALUE} when the size overflows.
+ * @param size returned by the Java's FileStore methods
+ * @return the size or {@code Long.MAX_VALUE} if the size was bigger than {@code Long.MAX_VALUE}
+ */
+ private static long handleLargeFileSystem(long size)
+ {
+ return size < 0 ? Long.MAX_VALUE : size;
+ }
+
+ /**
+ * Private constructor as the class contains only static methods.
+ */
+ private PathUtils()
+ {
+ }
+
+ /**
+ * propagate an IOException as an FSWriteError, FSReadError or UncheckedIOException
+ */
+ public static RuntimeException propagateUnchecked(IOException ioe, Path path, boolean write)
+ {
+ if (ioe instanceof FileAlreadyExistsException
+ || ioe instanceof NoSuchFileException
+ || ioe instanceof AtomicMoveNotSupportedException
+ || ioe instanceof java.nio.file.DirectoryNotEmptyException
+ || ioe instanceof java.nio.file.FileSystemLoopException
+ || ioe instanceof java.nio.file.NotDirectoryException
+ || ioe instanceof java.nio.file.NotLinkException)
+ throw new UncheckedIOException(ioe);
+
+ if (write) throw new FSWriteError(ioe, path);
+ else throw new FSReadError(ioe, path);
+ }
+
+ /**
+ * propagate an IOException as an FSWriteError, FSReadError or UncheckedIOException - except for NoSuchFileException
+ */
+ public static NoSuchFileException propagateUncheckedOrNoSuchFileException(IOException ioe, Path path, boolean write) throws NoSuchFileException
+ {
+ if (ioe instanceof NoSuchFileException)
+ throw (NoSuchFileException) ioe;
+
+ throw propagateUnchecked(ioe, path, write);
+ }
+
+ /**
+ * propagate an IOException either as itself or an FSWriteError or FSReadError
+ */
+ public static E propagate(E ioe, Path path, boolean write) throws E
+ {
+ if (ioe instanceof FileAlreadyExistsException
+ || ioe instanceof NoSuchFileException
+ || ioe instanceof AtomicMoveNotSupportedException
+ || ioe instanceof java.nio.file.DirectoryNotEmptyException
+ || ioe instanceof java.nio.file.FileSystemLoopException
+ || ioe instanceof java.nio.file.NotDirectoryException
+ || ioe instanceof java.nio.file.NotLinkException)
+ throw ioe;
+
+ if (write) throw new FSWriteError(ioe, path);
+ else throw new FSReadError(ioe, path);
+ }
+}
diff --git a/src/java/org/apache/cassandra/io/util/RandomAccessReader.java b/src/java/org/apache/cassandra/io/util/RandomAccessReader.java
index 33d01276ce98..4118bb3afd82 100644
--- a/src/java/org/apache/cassandra/io/util/RandomAccessReader.java
+++ b/src/java/org/apache/cassandra/io/util/RandomAccessReader.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.io.util;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteOrder;
@@ -26,6 +25,7 @@
import com.google.common.primitives.Ints;
import org.apache.cassandra.io.compress.BufferType;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.Rebufferer.BufferHolder;
@NotThreadSafe
diff --git a/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java b/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java
index 3ddb1437c786..5d38e80e3fb0 100644
--- a/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java
+++ b/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java
@@ -33,4 +33,4 @@ public interface ReaderFileProxy extends AutoCloseable
* Needed for tests. Returns the table's CRC check chance, which is only set for compressed tables.
*/
double getCrcCheckChance();
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/Rebufferer.java b/src/java/org/apache/cassandra/io/util/Rebufferer.java
index 2fc7ffa8aec3..9920de90d93b 100644
--- a/src/java/org/apache/cassandra/io/util/Rebufferer.java
+++ b/src/java/org/apache/cassandra/io/util/Rebufferer.java
@@ -81,4 +81,4 @@ public void release()
// nothing to do
}
};
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/RewindableDataInput.java b/src/java/org/apache/cassandra/io/util/RewindableDataInput.java
index c202f601ac48..0a0eee4caf22 100644
--- a/src/java/org/apache/cassandra/io/util/RewindableDataInput.java
+++ b/src/java/org/apache/cassandra/io/util/RewindableDataInput.java
@@ -27,4 +27,4 @@ public interface RewindableDataInput extends DataInputPlus
void reset(DataPosition mark) throws IOException;
long bytesPastMark(DataPosition mark);
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java b/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java
deleted file mode 100644
index a1842bcfc9a6..000000000000
--- a/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java
+++ /dev/null
@@ -1,571 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.io.util;
-
-import java.io.Closeable;
-import java.io.File;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.RandomAccessFile;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.apache.cassandra.utils.Throwables.maybeFail;
-import static org.apache.cassandra.utils.Throwables.merge;
-
-/**
- * Adds mark/reset functionality to another input stream by caching read bytes to a memory buffer and
- * spilling to disk if necessary.
- *
- * When the stream is marked via {@link #mark()} or {@link #mark(int)}, up to
- * maxMemBufferSize
will be cached in memory (heap). If more than
- * maxMemBufferSize
bytes are read while the stream is marked, the
- * following bytes are cached on the spillFile
for up to maxDiskBufferSize
.
- *
- * Please note that successive calls to {@link #mark()} and {@link #reset()} will write
- * sequentially to the same spillFile
until maxDiskBufferSize
is reached.
- * At this point, if less than maxDiskBufferSize
bytes are currently cached on the
- * spillFile
, the remaining bytes are written to the beginning of the file,
- * treating the spillFile
as a circular buffer.
- *
- * If more than maxMemBufferSize + maxDiskBufferSize
are cached while the stream is marked,
- * the following {@link #reset()} invocation will throw a {@link IllegalStateException}.
- *
- */
-public class RewindableDataInputStreamPlus extends FilterInputStream implements RewindableDataInput, Closeable
-{
- private boolean marked = false;
- private boolean exhausted = false;
- private AtomicBoolean closed = new AtomicBoolean(false);
-
- protected int memAvailable = 0;
- protected int diskTailAvailable = 0;
- protected int diskHeadAvailable = 0;
-
- private final File spillFile;
- private final int initialMemBufferSize;
- private final int maxMemBufferSize;
- private final int maxDiskBufferSize;
-
- private volatile byte memBuffer[];
- private int memBufferSize;
- private RandomAccessFile spillBuffer;
-
- private final DataInputPlus dataReader;
-
- public RewindableDataInputStreamPlus(InputStream in, int initialMemBufferSize, int maxMemBufferSize,
- File spillFile, int maxDiskBufferSize)
- {
- super(in);
- dataReader = new DataInputStreamPlus(this);
- this.initialMemBufferSize = initialMemBufferSize;
- this.maxMemBufferSize = maxMemBufferSize;
- this.spillFile = spillFile;
- this.maxDiskBufferSize = maxDiskBufferSize;
- }
-
- /* RewindableDataInput methods */
-
- /**
- * Marks the current position of a stream to return to this position later via the {@link #reset(DataPosition)} method.
- * @return An empty @link{DataPosition} object
- */
- public DataPosition mark()
- {
- mark(0);
- return new RewindableDataInputPlusMark();
- }
-
- /**
- * Rewinds to the previously marked position via the {@link #mark()} method.
- * @param mark it's not possible to return to a custom position, so this parameter is ignored.
- * @throws IOException if an error ocurs while resetting
- */
- public void reset(DataPosition mark) throws IOException
- {
- reset();
- }
-
- public long bytesPastMark(DataPosition mark)
- {
- return maxMemBufferSize - memAvailable + (diskTailAvailable == -1? 0 : maxDiskBufferSize - diskHeadAvailable - diskTailAvailable);
- }
-
-
- protected static class RewindableDataInputPlusMark implements DataPosition
- {
- }
-
- /* InputStream methods */
-
- public boolean markSupported()
- {
- return true;
- }
-
- /**
- * Marks the current position of a stream to return to this position
- * later via the {@link #reset()} method.
- * @param readlimit the maximum amount of bytes to cache
- */
- public synchronized void mark(int readlimit)
- {
- if (marked)
- throw new IllegalStateException("Cannot mark already marked stream.");
-
- if (memAvailable > 0 || diskHeadAvailable > 0 || diskTailAvailable > 0)
- throw new IllegalStateException("Can only mark stream after reading previously marked data.");
-
- marked = true;
- memAvailable = maxMemBufferSize;
- diskHeadAvailable = -1;
- diskTailAvailable = -1;
- }
-
- public synchronized void reset() throws IOException
- {
- if (!marked)
- throw new IOException("Must call mark() before calling reset().");
-
- if (exhausted)
- throw new IOException(String.format("Read more than capacity: %d bytes.", maxMemBufferSize + maxDiskBufferSize));
-
- memAvailable = maxMemBufferSize - memAvailable;
- memBufferSize = memAvailable;
-
- if (diskTailAvailable == -1)
- {
- diskHeadAvailable = 0;
- diskTailAvailable = 0;
- }
- else
- {
- int initialPos = diskTailAvailable > 0 ? 0 : (int)getIfNotClosed(spillBuffer).getFilePointer();
- int diskMarkpos = initialPos + diskHeadAvailable;
- getIfNotClosed(spillBuffer).seek(diskMarkpos);
-
- diskHeadAvailable = diskMarkpos - diskHeadAvailable;
- diskTailAvailable = (maxDiskBufferSize - diskTailAvailable) - diskMarkpos;
- }
-
- marked = false;
- }
-
- public int available() throws IOException
- {
-
- return super.available() + (marked? 0 : memAvailable + diskHeadAvailable + diskTailAvailable);
- }
-
- public int read() throws IOException
- {
- int read = readOne();
- if (read == -1)
- return read;
-
- if (marked)
- {
- //mark exhausted
- if (isExhausted(1))
- {
- exhausted = true;
- return read;
- }
-
- writeOne(read);
- }
-
- return read;
- }
-
- public int read(byte[] b, int off, int len) throws IOException
- {
- int readBytes = readMulti(b, off, len);
- if (readBytes == -1)
- return readBytes;
-
- if (marked)
- {
- //check we have space on buffer
- if (isExhausted(readBytes))
- {
- exhausted = true;
- return readBytes;
- }
-
- writeMulti(b, off, readBytes);
- }
-
- return readBytes;
- }
-
- private void maybeCreateDiskBuffer() throws IOException
- {
- if (spillBuffer == null)
- {
- if (!spillFile.getParentFile().exists())
- spillFile.getParentFile().mkdirs();
- spillFile.createNewFile();
-
- this.spillBuffer = new RandomAccessFile(spillFile, "rw");
- }
- }
-
-
- private int readOne() throws IOException
- {
- if (!marked)
- {
- if (memAvailable > 0)
- {
- int pos = memBufferSize - memAvailable;
- memAvailable--;
- return getIfNotClosed(memBuffer)[pos] & 0xff;
- }
-
- if (diskTailAvailable > 0 || diskHeadAvailable > 0)
- {
- int read = getIfNotClosed(spillBuffer).read();
- if (diskTailAvailable > 0)
- diskTailAvailable--;
- else if (diskHeadAvailable > 0)
- diskHeadAvailable++;
- if (diskTailAvailable == 0)
- spillBuffer.seek(0);
- return read;
- }
- }
-
- return getIfNotClosed(in).read();
- }
-
- private boolean isExhausted(int readBytes)
- {
- return exhausted || readBytes > memAvailable + (long)(diskTailAvailable == -1? maxDiskBufferSize : diskTailAvailable + diskHeadAvailable);
- }
-
- private int readMulti(byte[] b, int off, int len) throws IOException
- {
- int readBytes = 0;
- if (!marked)
- {
- if (memAvailable > 0)
- {
- readBytes += memAvailable < len ? memAvailable : len;
- int pos = memBufferSize - memAvailable;
- System.arraycopy(memBuffer, pos, b, off, readBytes);
- memAvailable -= readBytes;
- off += readBytes;
- len -= readBytes;
- }
- if (len > 0 && diskTailAvailable > 0)
- {
- int readFromTail = diskTailAvailable < len? diskTailAvailable : len;
- readFromTail = getIfNotClosed(spillBuffer).read(b, off, readFromTail);
- readBytes += readFromTail;
- diskTailAvailable -= readFromTail;
- off += readFromTail;
- len -= readFromTail;
- if (diskTailAvailable == 0)
- spillBuffer.seek(0);
- }
- if (len > 0 && diskHeadAvailable > 0)
- {
- int readFromHead = diskHeadAvailable < len? diskHeadAvailable : len;
- readFromHead = getIfNotClosed(spillBuffer).read(b, off, readFromHead);
- readBytes += readFromHead;
- diskHeadAvailable -= readFromHead;
- off += readFromHead;
- len -= readFromHead;
- }
- }
-
- if (len > 0)
- readBytes += getIfNotClosed(in).read(b, off, len);
-
- return readBytes;
- }
-
- private void writeMulti(byte[] b, int off, int len) throws IOException
- {
- if (memAvailable > 0)
- {
- if (memBuffer == null)
- memBuffer = new byte[initialMemBufferSize];
- int pos = maxMemBufferSize - memAvailable;
- int memWritten = memAvailable < len? memAvailable : len;
- if (pos + memWritten >= getIfNotClosed(memBuffer).length)
- growMemBuffer(pos, memWritten);
- System.arraycopy(b, off, memBuffer, pos, memWritten);
- off += memWritten;
- len -= memWritten;
- memAvailable -= memWritten;
- }
-
- if (len > 0)
- {
- if (diskTailAvailable == -1)
- {
- maybeCreateDiskBuffer();
- diskHeadAvailable = (int)spillBuffer.getFilePointer();
- diskTailAvailable = maxDiskBufferSize - diskHeadAvailable;
- }
-
- if (len > 0 && diskTailAvailable > 0)
- {
- int diskTailWritten = diskTailAvailable < len? diskTailAvailable : len;
- getIfNotClosed(spillBuffer).write(b, off, diskTailWritten);
- off += diskTailWritten;
- len -= diskTailWritten;
- diskTailAvailable -= diskTailWritten;
- if (diskTailAvailable == 0)
- spillBuffer.seek(0);
- }
-
- if (len > 0 && diskTailAvailable > 0)
- {
- int diskHeadWritten = diskHeadAvailable < len? diskHeadAvailable : len;
- getIfNotClosed(spillBuffer).write(b, off, diskHeadWritten);
- }
- }
- }
-
- private void writeOne(int value) throws IOException
- {
- if (memAvailable > 0)
- {
- if (memBuffer == null)
- memBuffer = new byte[initialMemBufferSize];
- int pos = maxMemBufferSize - memAvailable;
- if (pos == getIfNotClosed(memBuffer).length)
- growMemBuffer(pos, 1);
- getIfNotClosed(memBuffer)[pos] = (byte)value;
- memAvailable--;
- return;
- }
-
- if (diskTailAvailable == -1)
- {
- maybeCreateDiskBuffer();
- diskHeadAvailable = (int)spillBuffer.getFilePointer();
- diskTailAvailable = maxDiskBufferSize - diskHeadAvailable;
- }
-
- if (diskTailAvailable > 0 || diskHeadAvailable > 0)
- {
- getIfNotClosed(spillBuffer).write(value);
- if (diskTailAvailable > 0)
- diskTailAvailable--;
- else if (diskHeadAvailable > 0)
- diskHeadAvailable--;
- if (diskTailAvailable == 0)
- spillBuffer.seek(0);
- return;
- }
- }
-
- public int read(byte[] b) throws IOException
- {
- return read(b, 0, b.length);
- }
-
- private void growMemBuffer(int pos, int writeSize)
- {
- int newSize = Math.min(2 * (pos + writeSize), maxMemBufferSize);
- byte newBuffer[] = new byte[newSize];
- System.arraycopy(memBuffer, 0, newBuffer, 0, pos);
- memBuffer = newBuffer;
- }
-
- public long skip(long n) throws IOException
- {
- long skipped = 0;
-
- if (marked)
- {
- //if marked, we need to cache skipped bytes
- while (n-- > 0 && read() != -1)
- {
- skipped++;
- }
- return skipped;
- }
-
- if (memAvailable > 0)
- {
- skipped += memAvailable < n ? memAvailable : n;
- memAvailable -= skipped;
- n -= skipped;
- }
- if (n > 0 && diskTailAvailable > 0)
- {
- int skipFromTail = diskTailAvailable < n? diskTailAvailable : (int)n;
- getIfNotClosed(spillBuffer).skipBytes(skipFromTail);
- diskTailAvailable -= skipFromTail;
- skipped += skipFromTail;
- n -= skipFromTail;
- if (diskTailAvailable == 0)
- spillBuffer.seek(0);
- }
- if (n > 0 && diskHeadAvailable > 0)
- {
- int skipFromHead = diskHeadAvailable < n? diskHeadAvailable : (int)n;
- getIfNotClosed(spillBuffer).skipBytes(skipFromHead);
- diskHeadAvailable -= skipFromHead;
- skipped += skipFromHead;
- n -= skipFromHead;
- }
-
- if (n > 0)
- skipped += getIfNotClosed(in).skip(n);
-
- return skipped;
- }
-
- private T getIfNotClosed(T in) throws IOException
- {
- if (closed.get())
- throw new IOException("Stream closed");
- return in;
- }
-
- public void close() throws IOException
- {
- close(true);
- }
-
- public void close(boolean closeUnderlying) throws IOException
- {
- if (closed.compareAndSet(false, true))
- {
- Throwable fail = null;
- if (closeUnderlying)
- {
- try
- {
- super.close();
- }
- catch (IOException e)
- {
- fail = merge(fail, e);
- }
- }
- try
- {
- if (spillBuffer != null)
- {
- this.spillBuffer.close();
- this.spillBuffer = null;
- }
- } catch (IOException e)
- {
- fail = merge(fail, e);
- }
- try
- {
- if (spillFile.exists())
- {
- spillFile.delete();
- }
- }
- catch (Throwable e)
- {
- fail = merge(fail, e);
- }
- maybeFail(fail, IOException.class);
- }
- }
-
- /* DataInputPlus methods */
-
- public void readFully(byte[] b) throws IOException
- {
- dataReader.readFully(b);
- }
-
- public void readFully(byte[] b, int off, int len) throws IOException
- {
- dataReader.readFully(b, off, len);
- }
-
- public int skipBytes(int n) throws IOException
- {
- return dataReader.skipBytes(n);
- }
-
- public boolean readBoolean() throws IOException
- {
- return dataReader.readBoolean();
- }
-
- public byte readByte() throws IOException
- {
- return dataReader.readByte();
- }
-
- public int readUnsignedByte() throws IOException
- {
- return dataReader.readUnsignedByte();
- }
-
- public short readShort() throws IOException
- {
- return dataReader.readShort();
- }
-
- public int readUnsignedShort() throws IOException
- {
- return dataReader.readUnsignedShort();
- }
-
- public char readChar() throws IOException
- {
- return dataReader.readChar();
- }
-
- public int readInt() throws IOException
- {
- return dataReader.readInt();
- }
-
- public long readLong() throws IOException
- {
- return dataReader.readLong();
- }
-
- public float readFloat() throws IOException
- {
- return dataReader.readFloat();
- }
-
- public double readDouble() throws IOException
- {
- return dataReader.readDouble();
- }
-
- public String readLine() throws IOException
- {
- return dataReader.readLine();
- }
-
- public String readUTF() throws IOException
- {
- return dataReader.readUTF();
- }
-}
diff --git a/src/java/org/apache/cassandra/io/util/SequentialWriter.java b/src/java/org/apache/cassandra/io/util/SequentialWriter.java
index 9ad944be3bc0..431ece397614 100644
--- a/src/java/org/apache/cassandra/io/util/SequentialWriter.java
+++ b/src/java/org/apache/cassandra/io/util/SequentialWriter.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.io.util;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
@@ -114,7 +113,7 @@ private static FileChannel openChannel(File file)
FileChannel channel = FileChannel.open(file.toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
try
{
- SyncUtil.trySyncDir(file.getParentFile());
+ SyncUtil.trySyncDir(file.parent());
}
catch (Throwable t)
{
@@ -163,7 +162,7 @@ public SequentialWriter(File file, SequentialWriterOption option, boolean strict
this.strictFlushing = strictFlushing;
this.fchannel = (FileChannel)channel;
- this.filePath = file.getAbsolutePath();
+ this.filePath = file.absolutePath();
this.option = option;
}
diff --git a/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java b/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java
index bc1a5297a9bc..05fdb6b0aff7 100644
--- a/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java
+++ b/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java
@@ -69,4 +69,4 @@ public String toString()
bufferSize,
fileLength());
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/locator/CloudstackSnitch.java b/src/java/org/apache/cassandra/locator/CloudstackSnitch.java
index be6d3c432e14..d8579534fb58 100644
--- a/src/java/org/apache/cassandra/locator/CloudstackSnitch.java
+++ b/src/java/org/apache/cassandra/locator/CloudstackSnitch.java
@@ -20,9 +20,7 @@
import java.io.DataInputStream;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
-import java.io.FileReader;
import java.io.IOException;
-import java.io.File;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URI;
@@ -31,6 +29,8 @@
import java.util.regex.Pattern;
import java.util.regex.Matcher;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.db.SystemKeyspace;
diff --git a/src/java/org/apache/cassandra/locator/RangesByEndpoint.java b/src/java/org/apache/cassandra/locator/RangesByEndpoint.java
index cbf5a637cd57..023d7ee2b437 100644
--- a/src/java/org/apache/cassandra/locator/RangesByEndpoint.java
+++ b/src/java/org/apache/cassandra/locator/RangesByEndpoint.java
@@ -53,4 +53,4 @@ public RangesByEndpoint build()
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java
index 163981c901d3..7fcdc051e31a 100644
--- a/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java
+++ b/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java
@@ -265,4 +265,4 @@ protected WritableByteChannel newDefaultChannel()
throw new UnsupportedOperationException();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java
index 8ef0a8f0e631..e1bcfed19394 100644
--- a/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java
+++ b/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java
@@ -128,4 +128,4 @@ public void discard()
buffer = null;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java
index 3a9c07500cc7..9c0f265cdf46 100644
--- a/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java
+++ b/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java
@@ -267,4 +267,4 @@ public void discard()
buffer = null;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/net/Verb.java b/src/java/org/apache/cassandra/net/Verb.java
index dd7fab39dc0a..9e3f5f60d1cc 100644
--- a/src/java/org/apache/cassandra/net/Verb.java
+++ b/src/java/org/apache/cassandra/net/Verb.java
@@ -448,4 +448,4 @@ class VerbTimeouts
static final ToLongFunction pingTimeout = DatabaseDescriptor::getPingTimeout;
static final ToLongFunction longTimeout = units -> Math.max(DatabaseDescriptor.getRpcTimeout(units), units.convert(5L, TimeUnit.MINUTES));
static final ToLongFunction noTimeout = units -> { throw new IllegalStateException(); };
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java b/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java
index 8b123a7bbc23..76ff5aac896b 100644
--- a/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java
+++ b/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java
@@ -99,4 +99,4 @@ public String toString()
"perHostDifferences=" + perHostDifferences +
'}';
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java b/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java
index e8ca85d02e14..b2622ef365ac 100644
--- a/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java
+++ b/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java
@@ -25,4 +25,4 @@
public interface PreferedNodeFilter
{
public Set apply(InetAddressAndPort streamingNode, Set toStream);
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java b/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java
index ed30792fd490..863c68c4bc92 100644
--- a/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java
+++ b/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java
@@ -44,4 +44,4 @@ public void doVerb(Message message)
Message> response = message.responseWith(SchemaKeyspace.convertSchemaToMutations());
MessagingService.instance().send(response, message.from());
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/schema/TableMetadataRef.java b/src/java/org/apache/cassandra/schema/TableMetadataRef.java
index 3325510b8d6e..dc4ff1da8ae5 100644
--- a/src/java/org/apache/cassandra/schema/TableMetadataRef.java
+++ b/src/java/org/apache/cassandra/schema/TableMetadataRef.java
@@ -96,4 +96,4 @@ public String toString()
{
return get().toString();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/schema/Views.java b/src/java/org/apache/cassandra/schema/Views.java
index f926c07f0497..15d13f35afe8 100644
--- a/src/java/org/apache/cassandra/schema/Views.java
+++ b/src/java/org/apache/cassandra/schema/Views.java
@@ -252,4 +252,4 @@ private static ViewsDiff diff(Views before, Views after)
return new ViewsDiff(created, dropped, altered.build());
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/security/SSLFactory.java b/src/java/org/apache/cassandra/security/SSLFactory.java
index 22f0a9da7239..215a90641489 100644
--- a/src/java/org/apache/cassandra/security/SSLFactory.java
+++ b/src/java/org/apache/cassandra/security/SSLFactory.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.security;
-import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
@@ -45,6 +44,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/src/java/org/apache/cassandra/service/CassandraDaemon.java b/src/java/org/apache/cassandra/service/CassandraDaemon.java
index 2bf1646d41cb..bce1f3a13cfa 100644
--- a/src/java/org/apache/cassandra/service/CassandraDaemon.java
+++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.service;
-import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryPoolMXBean;
@@ -37,6 +36,7 @@
import javax.management.remote.JMXConnectorServer;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.cassandra.io.util.File;
import com.google.common.collect.ImmutableList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java b/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java
index d5e3e531c1c5..04cb11c6ee83 100644
--- a/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java
+++ b/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java
@@ -18,8 +18,8 @@
package org.apache.cassandra.service;
-import java.io.File;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -78,10 +78,10 @@ public void handleFSError(FSError e)
}
// for both read and write errors mark the path as unwritable.
- DisallowedDirectories.maybeMarkUnwritable(e.path);
+ DisallowedDirectories.maybeMarkUnwritable(new File(e.path));
if (e instanceof FSReadError)
{
- File directory = DisallowedDirectories.maybeMarkUnreadable(e.path);
+ File directory = DisallowedDirectories.maybeMarkUnreadable(new File(e.path));
if (directory != null)
Keyspace.removeUnreadableSSTables(directory);
}
diff --git a/src/java/org/apache/cassandra/service/StartupChecks.java b/src/java/org/apache/cassandra/service/StartupChecks.java
index 4f9b82f9d3d0..5cb938b24449 100644
--- a/src/java/org/apache/cassandra/service/StartupChecks.java
+++ b/src/java/org/apache/cassandra/service/StartupChecks.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.service;
import java.io.BufferedReader;
-import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
@@ -32,12 +31,14 @@
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.jpountz.lz4.LZ4Factory;
import org.apache.cassandra.cql3.QueryProcessor;
import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.io.util.PathUtils;
import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.config.Config;
import org.apache.cassandra.config.DatabaseDescriptor;
@@ -339,8 +340,7 @@ public void execute()
Iterable dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()),
Arrays.asList(DatabaseDescriptor.getCommitLogLocation(),
DatabaseDescriptor.getSavedCachesLocation(),
- DatabaseDescriptor.getHintsDirectory().getAbsolutePath()));
-
+ DatabaseDescriptor.getHintsDirectory().absolutePath()));
for (String dataDir : dirs)
{
logger.debug("Checking directory {}", dataDir);
@@ -351,7 +351,7 @@ public void execute()
{
logger.warn("Directory {} doesn't exist", dataDir);
// if they don't, failing their creation, stop cassandra.
- if (!dir.mkdirs())
+ if (!dir.tryCreateDirectories())
throw new StartupException(StartupException.ERR_WRONG_DISK_STATE,
"Has no permission to create directory "+ dataDir);
}
@@ -377,7 +377,7 @@ public void execute() throws StartupException
{
public FileVisitResult visitFile(Path path, BasicFileAttributes attrs)
{
- File file = path.toFile();
+ File file = new File(path);
if (!Descriptor.isValidFile(file))
return FileVisitResult.CONTINUE;
@@ -398,7 +398,7 @@ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) th
String name = dir.getFileName().toString();
return (name.equals(Directories.SNAPSHOT_SUBDIR)
|| name.equals(Directories.BACKUPS_SUBDIR)
- || nonSSTablePaths.contains(dir.toFile().getCanonicalPath()))
+ || nonSSTablePaths.contains(PathUtils.toCanonicalPath(dir).toString()))
? FileVisitResult.SKIP_SUBTREE
: FileVisitResult.CONTINUE;
}
@@ -408,7 +408,7 @@ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) th
{
try
{
- Files.walkFileTree(Paths.get(dataDir), sstableVisitor);
+ Files.walkFileTree(new File(dataDir).toPath(), sstableVisitor);
}
catch (IOException e)
{
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index 803c5b49f695..5c0b8f475033 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -17,7 +17,11 @@
*/
package org.apache.cassandra.service;
-import java.io.*;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOError;
+import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
@@ -61,6 +65,7 @@
import org.apache.cassandra.fql.FullQueryLogger;
import org.apache.cassandra.fql.FullQueryLoggerOptions;
import org.apache.cassandra.fql.FullQueryLoggerOptionsCompositeData;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.locator.ReplicaCollection.Builder.Conflict;
import org.apache.cassandra.utils.concurrent.Future;
import org.apache.cassandra.utils.concurrent.ImmediateFuture;
@@ -3986,7 +3991,7 @@ public void clearSnapshot(String tag, String... keyspaceNames) throws IOExceptio
Set keyspaces = new HashSet<>();
for (String dataDir : DatabaseDescriptor.getAllDataFileLocations())
{
- for(String keyspaceDir : new File(dataDir).list())
+ for(String keyspaceDir : new File(dataDir).tryListNames())
{
// Only add a ks if it has been specified as a param, assuming params were actually provided.
if (keyspaceNames.length > 0 && !Arrays.asList(keyspaceNames).contains(keyspaceDir))
diff --git a/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java b/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
index 7e64e036756f..bc2c79e10083 100644
--- a/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
+++ b/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
@@ -232,4 +232,4 @@ public int maxRemaining()
{
return remaining;
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java
index 554bd51e9e15..7f1c4e9a7ed8 100644
--- a/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java
+++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.service.snapshot;
-import java.io.File;
import java.time.Instant;
import java.util.Collection;
import java.util.Comparator;
@@ -42,6 +41,7 @@
import com.google.common.annotations.VisibleForTesting;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.utils.ExecutorUtils;
import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
index 5d44acbf4f77..d8d900a8a956 100644
--- a/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
+++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
@@ -18,8 +18,7 @@
package org.apache.cassandra.service.snapshot;
-import java.io.File;
-import java.io.IOException;
+import java.io.*;
import java.time.Instant;
import java.util.List;
import java.util.Objects;
@@ -30,6 +29,7 @@
import org.apache.cassandra.config.Duration;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import com.fasterxml.jackson.databind.DeserializationFeature;
+import org.apache.cassandra.io.util.File;
// Only serialize fields
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY,
@@ -84,12 +84,12 @@ public Instant getExpiresAt()
public void serializeToJsonFile(File outputFile) throws IOException
{
- mapper.writeValue(outputFile, this);
+ mapper.writeValue(outputFile.toJavaIOFile(), this);
}
public static SnapshotManifest deserializeFromJsonFile(File file) throws IOException
{
- return mapper.readValue(file, SnapshotManifest.class);
+ return mapper.readValue(file.toJavaIOFile(), SnapshotManifest.class);
}
@Override
diff --git a/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java b/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
index 7e852ec0a16a..185cd45c4bb9 100644
--- a/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
+++ b/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.service.snapshot;
-import java.io.File;
import java.time.Instant;
import java.util.Collection;
import java.util.Map;
@@ -26,6 +25,7 @@
import java.util.function.Function;
import java.util.stream.Collectors;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
public class TableSnapshot
diff --git a/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java b/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java
index e378af7a17c6..bce47a1dede2 100644
--- a/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java
@@ -23,6 +23,7 @@
import org.apache.cassandra.io.util.DataInputPlus;
import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.streaming.StreamRequest;
import org.apache.cassandra.streaming.StreamSession;
import org.apache.cassandra.streaming.StreamSummary;
diff --git a/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java b/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java
index ff2cdecc9697..a1dd03c787e4 100644
--- a/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java
@@ -21,6 +21,7 @@
import org.apache.cassandra.io.util.DataInputPlus;
import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.TableId;
import org.apache.cassandra.streaming.StreamSession;
diff --git a/src/java/org/apache/cassandra/tools/AuditLogViewer.java b/src/java/org/apache/cassandra/tools/AuditLogViewer.java
index dd0e839832e1..f226aa2e706d 100644
--- a/src/java/org/apache/cassandra/tools/AuditLogViewer.java
+++ b/src/java/org/apache/cassandra/tools/AuditLogViewer.java
@@ -17,12 +17,12 @@
*/
package org.apache.cassandra.tools;
-import java.io.File;
import java.util.Arrays;
import java.util.List;
import java.util.function.Consumer;
import java.util.stream.Collectors;
+import org.apache.cassandra.io.util.File;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
@@ -76,7 +76,7 @@ static void dump(List pathList, String rollCycle, boolean follow, boolea
Pauser pauser = Pauser.millis(100);
List tailers = pathList.stream()
.distinct()
- .map(path -> SingleChronicleQueueBuilder.single(new File(path)).readOnly(true).rollCycle(RollCycles.valueOf(rollCycle)).build())
+ .map(path -> SingleChronicleQueueBuilder.single(new File(path).toJavaIOFile()).readOnly(true).rollCycle(RollCycles.valueOf(rollCycle)).build())
.map(SingleChronicleQueue::createTailer)
.collect(Collectors.toList());
boolean hadWork = true;
diff --git a/src/java/org/apache/cassandra/tools/BulkLoader.java b/src/java/org/apache/cassandra/tools/BulkLoader.java
index bb29bbe1945e..43df49c1de4a 100644
--- a/src/java/org/apache/cassandra/tools/BulkLoader.java
+++ b/src/java/org/apache/cassandra/tools/BulkLoader.java
@@ -58,7 +58,7 @@ public static void load(LoaderOptions options) throws BulkLoadException
DatabaseDescriptor.toolInitialization();
OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
SSTableLoader loader = new SSTableLoader(
- options.directory.getAbsoluteFile(),
+ options.directory.toAbsolute(),
new ExternalClient(
options.hosts,
options.storagePort,
diff --git a/src/java/org/apache/cassandra/tools/JMXTool.java b/src/java/org/apache/cassandra/tools/JMXTool.java
index e9171794df58..d054716ea594 100644
--- a/src/java/org/apache/cassandra/tools/JMXTool.java
+++ b/src/java/org/apache/cassandra/tools/JMXTool.java
@@ -18,8 +18,6 @@
package org.apache.cassandra.tools;
-import java.io.File;
-import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -71,6 +69,8 @@
import io.airlift.airline.Help;
import io.airlift.airline.HelpOption;
import io.airlift.airline.Option;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
import org.yaml.snakeyaml.TypeDescription;
import org.yaml.snakeyaml.Yaml;
import org.yaml.snakeyaml.constructor.Constructor;
@@ -229,8 +229,8 @@ public Void call() throws Exception
Preconditions.checkArgument(files.size() == 2, "files requires 2 arguments but given %s", files);
Map left;
Map right;
- try (FileInputStream leftStream = new FileInputStream(files.get(0));
- FileInputStream rightStream = new FileInputStream(files.get(1)))
+ try (FileInputStreamPlus leftStream = new FileInputStreamPlus(files.get(0));
+ FileInputStreamPlus rightStream = new FileInputStreamPlus(files.get(1)))
{
left = format.load(leftStream);
right = format.load(rightStream);
diff --git a/src/java/org/apache/cassandra/tools/LoaderOptions.java b/src/java/org/apache/cassandra/tools/LoaderOptions.java
index ca1bd40f71da..62f5046e1cc0 100644
--- a/src/java/org/apache/cassandra/tools/LoaderOptions.java
+++ b/src/java/org/apache/cassandra/tools/LoaderOptions.java
@@ -20,7 +20,6 @@
*/
package org.apache.cassandra.tools;
-import java.io.File;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.*;
@@ -32,6 +31,7 @@
import org.apache.cassandra.config.*;
import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.tools.BulkLoader.CmdLineOptions;
@@ -376,7 +376,7 @@ public Builder parseArgs(String cmdArgs[])
{
errorMsg("Config file not found", options);
}
- config = new YamlConfigurationLoader().loadConfig(configFile.toURI().toURL());
+ config = new YamlConfigurationLoader().loadConfig(configFile.toPath().toUri().toURL());
}
else
{
diff --git a/src/java/org/apache/cassandra/tools/NodeTool.java b/src/java/org/apache/cassandra/tools/NodeTool.java
index 91a008e1c730..7d3a3caca8e6 100644
--- a/src/java/org/apache/cassandra/tools/NodeTool.java
+++ b/src/java/org/apache/cassandra/tools/NodeTool.java
@@ -22,18 +22,20 @@
import static com.google.common.collect.Lists.newArrayList;
import static java.lang.Integer.parseInt;
import static java.lang.String.format;
+import static org.apache.cassandra.io.util.File.WriteMode.APPEND;
import static org.apache.commons.lang3.ArrayUtils.EMPTY_STRING_ARRAY;
import static org.apache.commons.lang3.StringUtils.EMPTY;
import static org.apache.commons.lang3.StringUtils.isEmpty;
import static org.apache.commons.lang3.StringUtils.isNotEmpty;
import java.io.Console;
-import java.io.File;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileWriter;
import java.io.FileNotFoundException;
-import java.io.FileWriter;
import java.io.IOError;
import java.io.IOException;
import java.net.UnknownHostException;
+import java.nio.file.NoSuchFileException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
@@ -278,7 +280,7 @@ private static void printHistory(String... args)
String cmdLine = Joiner.on(" ").skipNulls().join(args);
cmdLine = cmdLine.replaceFirst("(?<=(-pw|--password))\\s+\\S+", " ");
- try (FileWriter writer = new FileWriter(new File(FBUtilities.getToolsOutputDirectory(), HISTORYFILE), true))
+ try (FileWriter writer = new File(FBUtilities.getToolsOutputDirectory(), HISTORYFILE).newWriter(APPEND))
{
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
writer.append(sdf.format(new Date())).append(": ").append(cmdLine).append(System.lineSeparator());
@@ -374,7 +376,7 @@ private String readUserPasswordFromFile(String username, String passwordFilePath
String password = EMPTY;
File passwordFile = new File(passwordFilePath);
- try (Scanner scanner = new Scanner(passwordFile).useDelimiter("\\s+"))
+ try (Scanner scanner = new Scanner(passwordFile.toJavaIOFile()).useDelimiter("\\s+"))
{
while (scanner.hasNextLine())
{
@@ -389,7 +391,8 @@ private String readUserPasswordFromFile(String username, String passwordFilePath
}
scanner.nextLine();
}
- } catch (FileNotFoundException e)
+ }
+ catch (FileNotFoundException e)
{
throw new RuntimeException(e);
}
diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java
index 5be67d72c74a..b3000d0a0588 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExport.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.tools;
-import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
@@ -26,6 +25,7 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.DecoratedKey;
@@ -135,7 +135,7 @@ public static void main(String[] args) throws ConfigurationException
printUsage();
System.exit(1);
}
- String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();
+ String ssTableFileName = new File(cmd.getArgs()[0]).absolutePath();
if (!new File(ssTableFileName).exists())
{
diff --git a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
old mode 100755
new mode 100644
index 8c1f5db4bcd9..b7164e8d7e9f
--- a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
+++ b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
@@ -25,7 +25,6 @@
import static org.apache.commons.lang3.time.DurationFormatUtils.formatDurationWords;
import java.io.DataInputStream;
-import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.PrintWriter;
@@ -60,6 +59,7 @@
import org.apache.cassandra.io.sstable.metadata.MetadataType;
import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
import org.apache.cassandra.io.sstable.metadata.ValidationMetadata;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.schema.TableMetadataRef;
import org.apache.cassandra.tools.Util.TermHistogram;
@@ -544,7 +544,7 @@ public static void main(String[] args) throws IOException
File sstable = new File(fname);
if (sstable.exists())
{
- metawriter.printSStableMetadata(sstable.getAbsolutePath(), fullScan);
+ metawriter.printSStableMetadata(sstable.absolutePath(), fullScan);
}
else
{
diff --git a/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java b/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java
index 79fec81f345b..72c1e99bcc41 100644
--- a/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java
+++ b/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.tools;
-import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
@@ -35,6 +34,7 @@
import com.google.common.collect.SetMultimap;
import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.Schema;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.DecoratedKey;
diff --git a/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java b/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
index 31d80facc65d..62dd76ee567c 100644
--- a/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
+++ b/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
@@ -17,7 +17,9 @@
*/
package org.apache.cassandra.tools;
-import java.io.*;
+
+import java.io.IOException;
+import java.io.PrintStream;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Paths;
@@ -39,6 +41,8 @@
* sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14)
* }
*/
+import org.apache.cassandra.io.util.File;
+
public class SSTableRepairedAtSetter
{
/**
diff --git a/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java b/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java
index cca48fc395e9..06618b39b4cb 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java
@@ -25,10 +25,10 @@
import org.apache.cassandra.utils.OutputHandler;
import org.apache.commons.cli.*;
-import java.io.File;
import java.io.IOException;
import java.util.function.BiPredicate;
+import org.apache.cassandra.io.util.File;
import static org.apache.cassandra.tools.BulkLoader.CmdLineOptions;
public class StandaloneSSTableUtil
@@ -87,7 +87,7 @@ private static void listFiles(Options options, TableMetadata metadata, OutputHan
for (File dir : directories.getCFDirectories())
{
for (File file : LifecycleTransaction.getFiles(dir.toPath(), getFilter(options), Directories.OnTxnErr.THROW))
- handler.output(file.getCanonicalPath());
+ handler.output(file.canonicalPath());
}
}
diff --git a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
index 4dfa4abdce91..6ee320e1073a 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java
@@ -18,7 +18,6 @@
*/
package org.apache.cassandra.tools;
-import java.io.File;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
@@ -27,6 +26,7 @@
import java.util.Set;
import java.util.concurrent.TimeUnit;
+import org.apache.cassandra.io.util.File;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
@@ -127,7 +127,7 @@ public static void main(String args[])
listResult.add(Pair.create(descriptor, components));
File snapshotDirectory = Directories.getSnapshotDirectory(descriptor, snapshotName);
- SSTableReader.createLinks(descriptor, components, snapshotDirectory.getPath());
+ SSTableReader.createLinks(descriptor, components, snapshotDirectory.path());
}
System.out.println(String.format("Pre-scrub sstables snapshotted into snapshot %s", snapshotName));
diff --git a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
index e3c80f1243ec..cd3affa755da 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
@@ -18,10 +18,10 @@
*/
package org.apache.cassandra.tools;
-import java.io.File;
import java.util.*;
import java.util.concurrent.TimeUnit;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.Schema;
import org.apache.cassandra.io.sstable.format.SSTableReader;
import org.apache.commons.cli.*;
@@ -133,7 +133,7 @@ else if (!cfName.equals(desc.cfname))
if (options.snapshot) {
File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName);
- sstable.createLinks(snapshotDirectory.getPath());
+ sstable.createLinks(snapshotDirectory.path());
}
}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java b/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java
index a075ded04dad..2639ec81b32f 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java
@@ -44,4 +44,4 @@ public void execute(NodeProbe probe)
throw new RuntimeException(e);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java b/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java
index 8fdf803c6745..6a06fd4f2d6d 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java
@@ -26,4 +26,4 @@
@Deprecated
public class CfHistograms extends TableHistograms
{
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/CfStats.java b/src/java/org/apache/cassandra/tools/nodetool/CfStats.java
index 2d27ea0f1dcb..42e2bc3023ad 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/CfStats.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/CfStats.java
@@ -26,4 +26,4 @@
@Deprecated
public class CfStats extends TableStats
{
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Compact.java b/src/java/org/apache/cassandra/tools/nodetool/Compact.java
index 7278eada333e..ca560cd3bba1 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Compact.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Compact.java
@@ -94,4 +94,4 @@ public void execute(NodeProbe probe)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Decommission.java b/src/java/org/apache/cassandra/tools/nodetool/Decommission.java
index 0e58687ff169..98b6d5846c5f 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Decommission.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Decommission.java
@@ -46,4 +46,4 @@ public void execute(NodeProbe probe)
throw new IllegalStateException("Unsupported operation: " + e.getMessage(), e);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java b/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java
index 35653aef7f95..6d878a0b1655 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.disableAuditLog();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java b/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java
index b9fc7d67d95d..39a4c76352d4 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java
@@ -50,4 +50,4 @@ public void execute(NodeProbe probe)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java b/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java
index 4b0bfbea8440..4ee6340da883 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.setIncrementalBackupsEnabled(false);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java b/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java
index 463f2b0a626a..79b921908240 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.stopNativeTransport();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java
index 8820e5f0cbe2..aa5d0b709241 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.stopFullQueryLogger();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java b/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java
index 6f950bbc0593..7b6c348549e4 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.stopGossiping();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java
index d7ec35fae881..62465a395b09 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.disableHintedHandoff();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java b/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java
index d65c70bd43ea..3615a997303c 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java
@@ -39,4 +39,4 @@ public void execute(NodeProbe probe)
probe.disableHintsForDC(args.get(0));
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java b/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java
index 20830629fef5..875647127c4f 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.disableOldProtocolVersions();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Drain.java b/src/java/org/apache/cassandra/tools/nodetool/Drain.java
index eaa537aa411b..a152057798e6 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Drain.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Drain.java
@@ -39,4 +39,4 @@ public void execute(NodeProbe probe)
throw new RuntimeException("Error occurred during flushing", e);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java b/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java
index 51498769cd1f..ae0bb42d3d6a 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java
@@ -82,4 +82,4 @@ public void execute(NodeProbe probe)
probe.enableAuditLog(logger, Collections.EMPTY_MAP, included_keyspaces, excluded_keyspaces, included_categories, excluded_categories, included_users, excluded_users,
archiveRetries, bblocking, rollCycle, maxLogSize, maxQueueWeight, archiveCommand);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java b/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java
index 795ab133c2bb..f8b98ff2899f 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java
@@ -50,4 +50,4 @@ public void execute(NodeProbe probe)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java b/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java
index d1773d9c42c0..7ebad8a4d887 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.setIncrementalBackupsEnabled(true);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java b/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java
index 506945fd25d8..2e37e6ff4c12 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.startNativeTransport();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java
index 9873e5a01aed..50848946e0df 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java
@@ -61,4 +61,4 @@ public void execute(NodeProbe probe)
}
probe.enableFullQueryLogger(path, rollCycle, bblocking, maxQueueWeight, maxLogSize, archiveCommand, archiveRetries);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java b/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java
index 900c427ccea7..3433c3ec610c 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.startGossiping();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java
index bccf7e761867..be64e120ebdc 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.enableHintedHandoff();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java b/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java
index f6d5be5269af..06c9f8d02f91 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java
@@ -31,4 +31,4 @@ public void execute(NodeProbe probe)
{
probe.enableOldProtocolVersions();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Flush.java b/src/java/org/apache/cassandra/tools/nodetool/Flush.java
index c83e420cbf7f..fb2446d22f7a 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Flush.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Flush.java
@@ -49,4 +49,4 @@ public void execute(NodeProbe probe)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java
index 0e9bdc134c05..bd98d343064d 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java
@@ -33,4 +33,4 @@ public void execute(NodeProbe probe)
else
System.out.println("Snapshot throttle is disabled");
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/HostStat.java b/src/java/org/apache/cassandra/tools/nodetool/HostStat.java
index 19c0448e8bf7..56c46ee83d0e 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/HostStat.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/HostStat.java
@@ -38,4 +38,4 @@ public String ipOrDns()
{
return resolveIp ? endpoint.getHostName() : endpoint.getHostAddress();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java
index aef77bdb7370..3cba8e0cd200 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.invalidateCounterCache();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java
index cfe7d2f582ab..4414b42cc996 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.invalidateKeyCache();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java
index 7357e2785787..1a10ed00f29d 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.invalidateRowCache();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Move.java b/src/java/org/apache/cassandra/tools/nodetool/Move.java
index 8654d25d8642..075e00850391 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Move.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Move.java
@@ -43,4 +43,4 @@ public void execute(NodeProbe probe)
throw new RuntimeException("Error during moving node", e);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java
index 4ec70d8201b2..fde9eef271df 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.pauseHintsDelivery();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java b/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java
index a083cde99b76..a16e8f22f141 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java
@@ -57,4 +57,4 @@ public void execute(NodeProbe probe)
probe.rebuild(sourceDataCenterName, keyspace, tokens, specificSources);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java b/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java
index 4a6b071510ee..f7a3b6f1eeea 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java
@@ -40,4 +40,4 @@ public void execute(NodeProbe probe)
checkArgument(args.size() >= 3, "rebuild_index requires ks, cf and idx args");
probe.rebuildIndex(args.get(0), args.get(1), toArray(args.subList(2, args.size()), String.class));
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java b/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java
index 6ca90fbecb38..8727a610838e 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.reloadTriggers();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java
index 786852d96f54..d7ac30107bda 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.resetFullQueryLogger();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java b/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java
index 708636f9d072..62775a481d7b 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java
@@ -38,4 +38,4 @@ public void execute(NodeProbe probe)
throw new RuntimeException(e);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java
index a3984f87e4e1..bda98aaec033 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java
@@ -30,4 +30,4 @@ public void execute(NodeProbe probe)
{
probe.resumeHintsDelivery();
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java
index 65bb8f50d20c..3c6370c4e1c7 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java
@@ -34,4 +34,4 @@ public void execute(NodeProbe probe)
{
probe.setBatchlogReplayThrottle(batchlogReplayThrottle);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java b/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java
index 461f6aeed257..b07eb9e00892 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java
@@ -42,4 +42,4 @@ public void execute(NodeProbe probe)
checkArgument(args.size() == 3, "setcachecapacity requires key-cache-capacity, row-cache-capacity, and counter-cache-capacity args.");
probe.setCacheCapacities(args.get(0), args.get(1), args.get(2));
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java b/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java
index 18197e6ee3dc..de9bab58c8bf 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java
@@ -42,4 +42,4 @@ public void execute(NodeProbe probe)
checkArgument(args.size() == 3, "setcachekeystosave requires key-cache-keys-to-save, row-cache-keys-to-save, and counter-cache-keys-to-save args.");
probe.setCacheKeysToSave(args.get(0), args.get(1), args.get(2));
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java
index 56e558ff470a..52bb5bc2906b 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java
@@ -47,4 +47,4 @@ public void execute(NodeProbe probe)
probe.setCompactionThreshold(args.get(0), args.get(1), minthreshold, maxthreshold);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java
index 80e722259257..4d01f619db35 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java
@@ -34,4 +34,4 @@ public void execute(NodeProbe probe)
{
probe.setCompactionThroughput(compactionThroughput);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java b/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java
index feb945b9b1d8..96f1bdf588e7 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java
@@ -34,4 +34,4 @@ public void execute(NodeProbe probe)
{
probe.setHintedHandoffThrottleInKB(throttleInKB);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java b/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java
index c43abe1869d6..116087610356 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java
@@ -51,4 +51,4 @@ public void add(String token, String host, Map ownerships) t
Float owns = ownerships.get(endpoint);
hostStats.add(new HostStat(token, endpoint, resolveIp, owns));
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java b/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java
index 8d9ad90cde49..66d6283cc7de 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java
@@ -100,4 +100,4 @@ else if (target.equals("ring"))
for (String classQualifier : classQualifiers)
probe.setLoggingLevel(classQualifier, level);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java
index a4c49b812250..045ccc171370 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java
@@ -33,4 +33,4 @@ public void execute(NodeProbe probe)
{
probe.setSnapshotLinksPerSecond(snapshotThrottle);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java
index 069a6e98b40f..672d5fe05d74 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java
@@ -34,4 +34,4 @@ public void execute(NodeProbe probe)
{
probe.setStreamThroughput(streamThroughput);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java b/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java
index e08198026111..ef9f4980baed 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java
@@ -36,4 +36,4 @@ public void execute(NodeProbe probe)
checkArgument(traceProbability >= 0 && traceProbability <= 1, "Trace probability must be between 0 and 1");
probe.setTraceProbability(traceProbability);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/Sjk.java b/src/java/org/apache/cassandra/tools/nodetool/Sjk.java
index 3ad2c94c6e42..d7f7a043f606 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/Sjk.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/Sjk.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.tools.nodetool;
-import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.lang.reflect.Field;
@@ -57,6 +56,7 @@
import com.beust.jcommander.Parameterized;
import io.airlift.airline.Arguments;
import io.airlift.airline.Command;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.tools.Output;
import org.gridkit.jvmtool.JmxConnectionInfo;
import org.gridkit.jvmtool.cli.CommandLauncher;
@@ -464,15 +464,15 @@ static void listFiles(List results, URL packageURL, String path) throws
{
// loop through files in classpath
File dir = new File(packageURL.getFile());
- String cp = dir.getCanonicalPath();
+ String cp = dir.canonicalPath();
File root = dir;
while (true)
{
- if (cp.equals(new File(root, path).getCanonicalPath()))
+ if (cp.equals(new File(root, path).canonicalPath()))
{
break;
}
- root = root.getParentFile();
+ root = root.parent();
}
listFiles(results, root, dir);
}
@@ -480,10 +480,10 @@ static void listFiles(List results, URL packageURL, String path) throws
static void listFiles(List names, File root, File dir)
{
- String rootPath = root.getAbsolutePath();
+ String rootPath = root.absolutePath();
if (dir.exists() && dir.isDirectory())
{
- for (File file : dir.listFiles())
+ for (File file : dir.tryList())
{
if (file.isDirectory())
{
@@ -491,7 +491,7 @@ static void listFiles(List names, File root, File dir)
}
else
{
- String name = file.getAbsolutePath().substring(rootPath.length() + 1);
+ String name = file.absolutePath().substring(rootPath.length() + 1);
name = name.replace('\\', '/');
names.add(name);
}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java b/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java
index a3a004963625..2a19d3a62978 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java
@@ -38,4 +38,4 @@ public void execute(NodeProbe probe)
else
probe.truncateHints(endpoint);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java
index c35e1fed7ed9..a345ce0018d2 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java
@@ -26,4 +26,4 @@
public interface StatsHolder
{
public Map convert2Map();
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java
index dc153325c84e..89d77059f559 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java
@@ -75,4 +75,4 @@ public double writeLatency()
? totalWriteTime / writeCount / 1000
: Double.NaN;
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java
index 389efba9ace6..037227bfe4aa 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java
@@ -66,4 +66,4 @@ public void print(T data, PrintStream out)
out.println(yaml.dump(data.convert2Map()));
}
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/triggers/CustomClassLoader.java b/src/java/org/apache/cassandra/triggers/CustomClassLoader.java
index 6948c2dc942b..16b182ecf4b4 100644
--- a/src/java/org/apache/cassandra/triggers/CustomClassLoader.java
+++ b/src/java/org/apache/cassandra/triggers/CustomClassLoader.java
@@ -21,22 +21,22 @@
*/
-import java.io.File;
-import java.io.FilenameFilter;
import java.io.IOException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiPredicate;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.io.Files;
-
import org.apache.cassandra.io.FSWriteError;
import org.apache.cassandra.io.util.FileUtils;
+import static java.nio.file.Files.*;
+
/**
* Custom class loader will load the classes from the class path, CCL will load
* the classes from the the URL first, if it cannot find the required class it
@@ -69,28 +69,22 @@ public void addClassPath(File dir)
{
if (dir == null || !dir.exists())
return;
- FilenameFilter filter = new FilenameFilter()
- {
- public boolean accept(File dir, String name)
- {
- return name.endsWith(".jar");
- }
- };
- for (File inputJar : dir.listFiles(filter))
+ BiPredicate filter = (ignore, name) -> name.endsWith(".jar");
+ for (File inputJar : dir.tryList(filter))
{
File lib = new File(FileUtils.getTempDir(), "lib");
if (!lib.exists())
{
- lib.mkdir();
+ lib.tryCreateDirectory();
lib.deleteOnExit();
}
File out = FileUtils.createTempFile("cassandra-", ".jar", lib);
out.deleteOnExit();
- logger.info("Loading new jar {}", inputJar.getAbsolutePath());
+ logger.info("Loading new jar {}", inputJar.absolutePath());
try
{
- Files.copy(inputJar, out);
- addURL(out.toURI().toURL());
+ copy(inputJar.toPath(), out.toPath());
+ addURL(out.toPath().toUri().toURL());
}
catch (IOException ex)
{
diff --git a/src/java/org/apache/cassandra/triggers/TriggerExecutor.java b/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
index 295003fff930..298ac5693293 100644
--- a/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
+++ b/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
@@ -18,7 +18,6 @@
*/
package org.apache.cassandra.triggers;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.*;
@@ -33,6 +32,7 @@
import org.apache.cassandra.db.partitions.PartitionUpdate;
import org.apache.cassandra.exceptions.CassandraException;
import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.TableId;
import org.apache.cassandra.schema.TriggerMetadata;
import org.apache.cassandra.schema.Triggers;
diff --git a/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java b/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
index d3c08b53cbed..8506ce5b98d3 100644
--- a/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
+++ b/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
@@ -17,10 +17,13 @@
*/
package org.apache.cassandra.utils;
+import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
+import java.io.InputStream;
import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.io.util.DataInputPlus;
import org.apache.cassandra.io.util.DataOutputPlus;
import org.apache.cassandra.utils.obs.IBitSet;
import org.apache.cassandra.utils.obs.OffHeapBitSet;
@@ -38,7 +41,7 @@ public static void serialize(BloomFilter bf, DataOutputPlus out) throws IOExcept
}
@SuppressWarnings("resource")
- public static BloomFilter deserialize(DataInputStream in, boolean oldBfFormat) throws IOException
+ public static BloomFilter deserialize(I in, boolean oldBfFormat) throws IOException
{
int hashes = in.readInt();
IBitSet bs = OffHeapBitSet.deserialize(in, oldBfFormat);
diff --git a/src/java/org/apache/cassandra/utils/ByteArrayUtil.java b/src/java/org/apache/cassandra/utils/ByteArrayUtil.java
index 75734ada9690..3673f6760436 100644
--- a/src/java/org/apache/cassandra/utils/ByteArrayUtil.java
+++ b/src/java/org/apache/cassandra/utils/ByteArrayUtil.java
@@ -253,4 +253,4 @@ public static void copyBytes(byte[] src, int srcPos, ByteBuffer dst, int dstPos,
{
FastByteOperations.copy(src, srcPos, dst, dstPos, length);
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java b/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java
index c1fb6e040c8b..97fc22ea83ad 100644
--- a/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java
+++ b/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.utils;
-import java.io.File;
import java.io.IOException;
import java.nio.file.FileVisitResult;
import java.nio.file.Path;
@@ -28,6 +27,8 @@
/**
* Walks directory recursively, summing up total contents of files within.
*/
+import org.apache.cassandra.io.util.File;
+
public class DirectorySizeCalculator extends SimpleFileVisitor
{
protected volatile long size = 0;
diff --git a/src/java/org/apache/cassandra/utils/ExecutorUtils.java b/src/java/org/apache/cassandra/utils/ExecutorUtils.java
index e26d776e0083..5bb841f32bdd 100644
--- a/src/java/org/apache/cassandra/utils/ExecutorUtils.java
+++ b/src/java/org/apache/cassandra/utils/ExecutorUtils.java
@@ -152,4 +152,4 @@ public static void shutdownNowAndWait(long timeout, TimeUnit unit, Object ... ex
{
shutdownNowAndWait(timeout, unit, Arrays.asList(executors));
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java
index e3161e8e215d..93f951c53fff 100644
--- a/src/java/org/apache/cassandra/utils/FBUtilities.java
+++ b/src/java/org/apache/cassandra/utils/FBUtilities.java
@@ -17,7 +17,14 @@
*/
package org.apache.cassandra.utils;
-import java.io.*;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
import java.lang.reflect.Field;
import java.math.BigInteger;
import java.net.*;
@@ -38,6 +45,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.utils.concurrent.*;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
@@ -364,7 +372,7 @@ public static String resourceToFile(String filename) throws ConfigurationExcepti
if (scpurl == null)
throw new ConfigurationException("unable to locate " + filename);
- return new File(scpurl.getFile()).getAbsolutePath();
+ return new File(scpurl.getFile()).absolutePath();
}
public static File cassandraTriggerDir()
diff --git a/src/java/org/apache/cassandra/utils/HeapUtils.java b/src/java/org/apache/cassandra/utils/HeapUtils.java
index 4dd0d46b43d2..c0910d87fc86 100644
--- a/src/java/org/apache/cassandra/utils/HeapUtils.java
+++ b/src/java/org/apache/cassandra/utils/HeapUtils.java
@@ -17,9 +17,13 @@
*/
package org.apache.cassandra.utils;
-import java.io.*;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
+import org.apache.cassandra.io.util.File;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.text.StrBuilder;
@@ -81,14 +85,8 @@ private static String getJcmdPath()
if (javaHome == null)
return null;
File javaBinDirectory = new File(javaHome, "bin");
- File[] files = javaBinDirectory.listFiles(new FilenameFilter()
- {
- public boolean accept(File dir, String name)
- {
- return name.startsWith("jcmd");
- }
- });
- return ArrayUtils.isEmpty(files) ? null : files[0].getPath();
+ File[] files = javaBinDirectory.tryList((dir, name) -> name.startsWith("jcmd"));
+ return ArrayUtils.isEmpty(files) ? null : files[0].path();
}
/**
diff --git a/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java b/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java
index 597e5bbb2353..bd2f70a7417b 100644
--- a/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java
+++ b/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java
@@ -35,4 +35,4 @@ public interface IndexedSearchIterator extends SearchIterator
* @throws java.util.NoSuchElementException if next() returned null
*/
public int indexOfCurrent();
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java b/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java
index 158baf2556ad..6470f1ff3d8b 100644
--- a/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java
+++ b/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java
@@ -19,6 +19,7 @@
import java.io.FileNotFoundException;
import java.net.SocketException;
+import java.nio.file.FileSystemException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@@ -141,7 +142,7 @@ else if (t instanceof UnrecoverableIllegalStateException)
fn.accept(t);
// Check for file handle exhaustion
- if (t instanceof FileNotFoundException || t instanceof SocketException)
+ if (t instanceof FileNotFoundException || t instanceof FileSystemException || t instanceof SocketException)
if (t.getMessage() != null && t.getMessage().contains("Too many open files"))
isUnstable = true;
diff --git a/src/java/org/apache/cassandra/utils/NativeLibrary.java b/src/java/org/apache/cassandra/utils/NativeLibrary.java
index e5b5da7308ff..01225aa37e4f 100644
--- a/src/java/org/apache/cassandra/utils/NativeLibrary.java
+++ b/src/java/org/apache/cassandra/utils/NativeLibrary.java
@@ -17,14 +17,14 @@
*/
package org.apache.cassandra.utils;
-import java.io.File;
import java.io.FileDescriptor;
-import java.io.FileInputStream;
import java.io.IOException;
import java.lang.reflect.Field;
import java.nio.channels.FileChannel;
import java.util.concurrent.TimeUnit;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -216,7 +216,7 @@ public static void trySkipCache(String path, long offset, long len)
if (!f.exists())
return;
- try (FileInputStream fis = new FileInputStream(f))
+ try (FileInputStreamPlus fis = new FileInputStreamPlus(f))
{
trySkipCache(getfd(fis.getChannel()), offset, len, path);
}
diff --git a/src/java/org/apache/cassandra/utils/ResourceWatcher.java b/src/java/org/apache/cassandra/utils/ResourceWatcher.java
index 5e7cbdd4f3da..e8dcb8574372 100644
--- a/src/java/org/apache/cassandra/utils/ResourceWatcher.java
+++ b/src/java/org/apache/cassandra/utils/ResourceWatcher.java
@@ -17,9 +17,9 @@
*/
package org.apache.cassandra.utils;
-import java.io.File;
import java.util.concurrent.TimeUnit;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/src/java/org/apache/cassandra/utils/SyncUtil.java b/src/java/org/apache/cassandra/utils/SyncUtil.java
index b4a4bee2be4a..6055859531d1 100644
--- a/src/java/org/apache/cassandra/utils/SyncUtil.java
+++ b/src/java/org/apache/cassandra/utils/SyncUtil.java
@@ -30,6 +30,7 @@
import org.apache.cassandra.config.Config;
import com.google.common.base.Preconditions;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -172,12 +173,6 @@ public static void force(FileChannel fc, boolean metaData) throws IOException
}
}
- public static void sync(RandomAccessFile ras) throws IOException
- {
- Preconditions.checkNotNull(ras);
- sync(ras.getFD());
- }
-
public static void sync(FileOutputStream fos) throws IOException
{
Preconditions.checkNotNull(fos);
@@ -197,7 +192,7 @@ public static void trySyncDir(File dir)
if (SKIP_SYNC)
return;
- int directoryFD = NativeLibrary.tryOpenDirectory(dir.getPath());
+ int directoryFD = NativeLibrary.tryOpenDirectory(dir.path());
try
{
trySync(directoryFD);
diff --git a/src/java/org/apache/cassandra/utils/Throwables.java b/src/java/org/apache/cassandra/utils/Throwables.java
index 73e7d24ee5f7..7012132c0362 100644
--- a/src/java/org/apache/cassandra/utils/Throwables.java
+++ b/src/java/org/apache/cassandra/utils/Throwables.java
@@ -18,7 +18,7 @@
*/
package org.apache.cassandra.utils;
-import java.io.File;
+import org.apache.cassandra.io.util.File;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays;
@@ -152,7 +152,7 @@ public static Throwable perform(Throwable accumulate, Iterator extends Discret
@SafeVarargs
public static void perform(File against, FileOpType opType, DiscreteAction extends IOException> ... actions)
{
- perform(against.getPath(), opType, actions);
+ perform(against.path(), opType, actions);
}
@SafeVarargs
diff --git a/src/java/org/apache/cassandra/utils/binlog/BinLog.java b/src/java/org/apache/cassandra/utils/binlog/BinLog.java
index 8b8588afcaf4..a9bb55ac8d84 100644
--- a/src/java/org/apache/cassandra/utils/binlog/BinLog.java
+++ b/src/java/org/apache/cassandra/utils/binlog/BinLog.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.utils.binlog;
-import java.io.File;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
@@ -31,6 +30,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -363,11 +363,11 @@ public static class Builder
public Builder path(Path path)
{
Preconditions.checkNotNull(path, "path was null");
- File pathAsFile = path.toFile();
+ File pathAsFile = new File(path);
//Exists and is a directory or can be created
Preconditions.checkArgument(!pathAsFile.toString().isEmpty(), "you might have forgotten to specify a directory to save logs");
- Preconditions.checkArgument((pathAsFile.exists() && pathAsFile.isDirectory()) || (!pathAsFile.exists() && pathAsFile.mkdirs()), "path exists and is not a directory or couldn't be created");
- Preconditions.checkArgument(pathAsFile.canRead() && pathAsFile.canWrite() && pathAsFile.canExecute(), "path is not readable, writable, and executable");
+ Preconditions.checkArgument((pathAsFile.exists() && pathAsFile.isDirectory()) || (!pathAsFile.exists() && pathAsFile.tryCreateDirectories()), "path exists and is not a directory or couldn't be created");
+ Preconditions.checkArgument(pathAsFile.isReadable() && pathAsFile.isWritable() && pathAsFile.isExecutable(), "path is not readable, writable, and executable");
this.path = path;
return this;
}
@@ -432,7 +432,7 @@ public BinLog build(boolean cleanDirectory)
logger.info("Cleaning directory: {} as requested", path);
if (path.toFile().exists())
{
- Throwable error = cleanDirectory(path.toFile(), null);
+ Throwable error = cleanDirectory(new File(path), null);
if (error != null)
{
throw new RuntimeException(error);
@@ -471,7 +471,7 @@ public static Throwable cleanDirectory(File directory, Throwable accumulate)
{
return Throwables.merge(accumulate, new RuntimeException(String.format("%s is not a directory", directory)));
}
- for (File f : directory.listFiles())
+ for (File f : directory.tryList())
{
accumulate = deleteRecursively(f, accumulate);
}
@@ -486,7 +486,7 @@ private static Throwable deleteRecursively(File fileOrDirectory, Throwable accum
{
if (fileOrDirectory.isDirectory())
{
- for (File f : fileOrDirectory.listFiles())
+ for (File f : fileOrDirectory.tryList())
{
accumulate = FileUtils.deleteWithConfirm(f, accumulate);
}
diff --git a/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java b/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java
index a23f460812b4..3c1991afd123 100644
--- a/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java
+++ b/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java
@@ -133,4 +133,4 @@ public int indexOfCurrent()
int current = forwards ? nextPos - 1 : nextPos + 1;
return forwards ? current - lowerBound : upperBound - current;
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/btree/NodeCursor.java b/src/java/org/apache/cassandra/utils/btree/NodeCursor.java
index e9fa89e9317f..4c7e9935181b 100644
--- a/src/java/org/apache/cassandra/utils/btree/NodeCursor.java
+++ b/src/java/org/apache/cassandra/utils/btree/NodeCursor.java
@@ -195,4 +195,4 @@ K value()
{
return (K) node[position];
}
-}
\ No newline at end of file
+}
diff --git a/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java b/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java
index 8d118e7f300f..8b0550f278c3 100644
--- a/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java
+++ b/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java
@@ -17,7 +17,7 @@
*/
package org.apache.cassandra.utils.obs;
-import java.io.DataInputStream;
+import java.io.*;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
@@ -143,7 +143,7 @@ public long serializedSize()
}
@SuppressWarnings("resource")
- public static OffHeapBitSet deserialize(DataInputStream in, boolean oldBfFormat) throws IOException
+ public static OffHeapBitSet deserialize(I in, boolean oldBfFormat) throws IOException
{
long byteCount = in.readInt() * 8L;
Memory memory = Memory.allocate(byteCount);
diff --git a/src/java/org/apache/cassandra/utils/streamhist/HistogramDataConsumer.java b/src/java/org/apache/cassandra/utils/streamhist/HistogramDataConsumer.java
old mode 100755
new mode 100644
diff --git a/src/java/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilder.java b/src/java/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilder.java
old mode 100755
new mode 100644
diff --git a/src/java/org/apache/cassandra/utils/streamhist/TombstoneHistogram.java b/src/java/org/apache/cassandra/utils/streamhist/TombstoneHistogram.java
old mode 100755
new mode 100644
diff --git a/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java b/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java
index 9b23041ae41c..da453fed4e30 100644
--- a/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java
+++ b/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java
@@ -54,4 +54,4 @@ GlobalInboundSettings withTemplate(InboundConnectionSettings template)
{
return new GlobalInboundSettings(queueCapacity, endpointReserveLimit, globalReserveLimit, template);
}
-}
\ No newline at end of file
+}
diff --git a/test/burn/org/apache/cassandra/transport/DriverBurnTest.java b/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
index 56558672b9cf..7856b1b40d35 100644
--- a/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
+++ b/test/burn/org/apache/cassandra/transport/DriverBurnTest.java
@@ -438,4 +438,4 @@ public int encodedSize(QueryMessage queryMessage, ProtocolVersion version)
System.out.println("99p: " + stats.getPercentile(0.99));
}
}
-// TODO: test disconnecting and reconnecting constantly
\ No newline at end of file
+// TODO: test disconnecting and reconnecting constantly
diff --git a/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java b/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java
index 7e57916eb1a4..2d863cf02049 100644
--- a/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java
+++ b/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java
@@ -212,4 +212,4 @@ public int encodedSize(QueryMessage queryMessage, ProtocolVersion version)
server.stop();
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java b/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java
index 201555f70c2e..217dcd5d6c91 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.distributed.impl;
-import java.io.File;
import java.lang.annotation.Annotation;
import java.net.InetSocketAddress;
import java.util.ArrayList;
@@ -45,7 +44,6 @@
import javax.annotation.concurrent.GuardedBy;
import com.google.common.collect.Sets;
-import org.apache.cassandra.utils.concurrent.Condition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -78,7 +76,10 @@
import org.apache.cassandra.distributed.shared.ShutdownException;
import org.apache.cassandra.distributed.shared.Versions;
import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.PathUtils;
import org.apache.cassandra.net.Verb;
+import org.apache.cassandra.utils.concurrent.Condition;
import org.apache.cassandra.utils.FBUtilities;
import org.reflections.Reflections;
import org.reflections.util.ConfigurationBuilder;
@@ -388,7 +389,7 @@ public String toString()
protected AbstractCluster(AbstractBuilder, ?> builder)
{
- this.root = builder.getRoot();
+ this.root = new File(builder.getRoot());
this.sharedClassLoader = builder.getSharedClassLoader();
this.subnet = builder.getSubnet();
this.tokenSupplier = builder.getTokenSupplier();
@@ -857,6 +858,7 @@ public void close()
instances.clear();
instanceMap.clear();
+ PathUtils.setDeletionListener(ignore -> {});
// Make sure to only delete directory when threads are stopped
if (root.exists())
FileUtils.deleteRecursive(root);
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java b/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
index 0f48a234fa94..14db56183da3 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.distributed.impl;
-import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
@@ -28,6 +27,7 @@
import com.google.common.io.Closeables;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.utils.AbstractIterator;
import org.apache.cassandra.distributed.api.LogAction;
import org.apache.cassandra.distributed.api.LineIterator;
@@ -53,7 +53,7 @@ public LineIterator match(long startPosition, Predicate fn)
RandomAccessFile reader;
try
{
- reader = new RandomAccessFile(file, "r");
+ reader = new RandomAccessFile(file.toJavaIOFile(), "r");
}
catch (FileNotFoundException e)
{
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
index c980f3f2040f..cd5bb4af8dce 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
@@ -20,7 +20,6 @@
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
-import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
@@ -97,6 +96,7 @@
import org.apache.cassandra.io.util.DataInputBuffer;
import org.apache.cassandra.io.util.DataOutputBuffer;
import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.metrics.CassandraMetricsRegistry;
@@ -200,7 +200,7 @@ public LogAction logs()
if (!f.exists())
f = new File(String.format("build/test/logs/%s/%s/%s/system.log", tag, clusterId, instanceId));
if (!f.exists())
- throw new AssertionError("Unable to locate system.log under " + new File("build/test/logs").getAbsolutePath() + "; make sure ICluster.setup() is called or extend TestBaseImpl and do not define a static beforeClass function with @BeforeClass");
+ throw new AssertionError("Unable to locate system.log under " + new File("build/test/logs").absolutePath() + "; make sure ICluster.setup() is called or extend TestBaseImpl and do not define a static beforeClass function with @BeforeClass");
return new FileLogAction(f);
}
@@ -606,11 +606,11 @@ public void startup(ICluster cluster)
private void mkdirs()
{
- new File(config.getString("saved_caches_directory")).mkdirs();
- new File(config.getString("hints_directory")).mkdirs();
- new File(config.getString("commitlog_directory")).mkdirs();
+ new File(config.getString("saved_caches_directory")).tryCreateDirectories();
+ new File(config.getString("hints_directory")).tryCreateDirectories();
+ new File(config.getString("commitlog_directory")).tryCreateDirectories();
for (String dir : (String[]) config.get("data_file_directories"))
- new File(dir).mkdirs();
+ new File(dir).tryCreateDirectories();
}
private Config loadConfig(IInstanceConfig overrides)
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java b/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java
index 1bbdd0be1ee3..81e254d3e5b2 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.distributed.impl;
-import java.io.File;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Collections;
@@ -37,6 +36,7 @@
import org.apache.cassandra.distributed.shared.NetworkTopology;
import org.apache.cassandra.distributed.shared.Shared;
import org.apache.cassandra.distributed.upgrade.UpgradeTestBase;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.locator.SimpleSeedProvider;
@@ -275,7 +275,7 @@ public static InstanceConfig generate(int nodeNum,
private static String[] datadirs(int datadirCount, File root, int nodeNum)
{
- String datadirFormat = String.format("%s/node%d/data%%d", root.getPath(), nodeNum);
+ String datadirFormat = String.format("%s/node%d/data%%d", root.path(), nodeNum);
String [] datadirs = new String[datadirCount];
for (int i = 0; i < datadirs.length; i++)
datadirs[i] = String.format(datadirFormat, i);
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java b/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java
index bc27ec765e21..b4dd10c3f46f 100644
--- a/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java
+++ b/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.distributed.shared;
-import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.lang.reflect.Method;
@@ -42,6 +41,7 @@
import com.google.common.base.StandardSystemProperty;
import com.google.common.io.ByteStreams;
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -96,7 +96,7 @@ public static Byteman createFromScripts(String... scripts)
List texts = Stream.of(scripts).map(p -> {
try
{
- return Files.toString(new File(p), StandardCharsets.UTF_8);
+ return Files.toString(new File(p).toJavaIOFile(), StandardCharsets.UTF_8);
}
catch (IOException e)
{
@@ -155,11 +155,11 @@ public void install(ClassLoader cl)
if (DEBUG_TRANSFORMATIONS)
{
File f = new File(StandardSystemProperty.JAVA_IO_TMPDIR.value(), "byteman/" + details.klassPath + ".class");
- f.getParentFile().mkdirs();
- File original = new File(f.getParentFile(), "original-" + f.getName());
- logger.info("Writing class file for {} to {}", details.klassPath, f.getAbsolutePath());
- Files.asByteSink(f).write(newBytes);
- Files.asByteSink(original).write(details.bytes);
+ f.parent().tryCreateDirectories();
+ File original = new File(f.parent(), "original-" + f.name());
+ logger.info("Writing class file for {} to {}", details.klassPath, f.absolutePath());
+ Files.asByteSink(f.toJavaIOFile()).write(newBytes);
+ Files.asByteSink(original.toJavaIOFile()).write(details.bytes);
}
}
}
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java b/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
index 382f5a7f968b..1821f9c87807 100644
--- a/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
+++ b/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.distributed.shared;
-import java.io.File;
import java.lang.reflect.Field;
import java.net.InetSocketAddress;
import java.util.ArrayList;
@@ -40,6 +39,7 @@
import java.util.stream.Collectors;
import com.google.common.util.concurrent.Futures;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.apache.cassandra.dht.Token;
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java b/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java
index d2b5bf7d35cf..973835704c47 100644
--- a/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java
+++ b/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java
@@ -27,4 +27,4 @@ public ShutdownException(List uncaughtExceptions)
super("Uncaught exceptions were thrown during test");
uncaughtExceptions.forEach(super::addSuppressed);
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java b/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java
index 2a45b86dc9e8..3b54398406d6 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java
@@ -150,4 +150,4 @@ private String json(int i, int j)
{
return String.format("system.fromjson('{\"foo\":\"%d\", \"bar\":\"%d\"}')", i, j);
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java b/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java
index 0a81359dcff9..e1733ce00eff 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java
@@ -94,4 +94,4 @@ public void test() throws Throwable
{
testLargeColumns(2, 16 << 20, 5);
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java b/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java
index 6ea186c6eec5..9cae1bdc4ff1 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java
@@ -335,4 +335,4 @@ private static void assertTimeOut(Runnable r)
// ignore
}
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java b/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java
index 153d7de70647..f92a3157e8e3 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java
@@ -100,4 +100,4 @@ else if (traceEntry.activity.contains("Enqueuing forwarded write to "))
TracingUtil.setWaitForTracingEventTimeoutSecs(originalTraceTimeout);
}
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java b/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java
index 0905b92f538f..d2febdb6b297 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java
@@ -78,4 +78,4 @@ public void withCounters() throws Throwable
cluster.close();
}
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java b/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java
index 5f74c7773b9a..36bde630a795 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java
@@ -85,4 +85,4 @@ private void executeTest(boolean compressionEnabled) throws Exception
NetstatsOutputParser.validate(NetstatsOutputParser.parse(results));
}
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java
index c07b128e89cb..44ee07ebdc62 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java
@@ -286,4 +286,4 @@ else if (reverse)
return this;
}
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java
index c3a36cbb58d2..a73d9687d36c 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java
@@ -159,4 +159,4 @@ static long readRepairRequestsCount(IInvokableInstance node, String table)
return cfs.metric.readRepairRequests.getCount();
});
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java
index f891dfee5c60..2e39f18ad0bb 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java
@@ -241,4 +241,4 @@ private long rowsCachedPerQueryCount(IInvokableInstance instance, String tableNa
.getColumnFamilyStore(tableName)
.metric.rfpRowsCachedPerQuery.getCount());
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java b/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java
index 5430800c02aa..8358c2e27aed 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.distributed.test;
-import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.nio.file.FileSystems;
@@ -29,6 +28,7 @@
import java.util.function.Consumer;
import javax.management.MBeanServer;
+import org.apache.cassandra.io.util.File;
import org.junit.Ignore;
import org.junit.Test;
@@ -123,7 +123,7 @@ static void dumpOpenFiles(String description) throws IOException, InterruptedExc
long pid = getProcessId();
ProcessBuilder map = new ProcessBuilder("/usr/sbin/lsof", "-p", Long.toString(pid));
File output = new File(outputFilename("lsof", description, ".txt"));
- map.redirectOutput(output);
+ map.redirectOutput(output.toJavaIOFile());
map.redirectErrorStream(true);
map.start().waitFor();
}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java
index 69b074f5a1b4..2e26659243a6 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java
@@ -544,4 +544,4 @@ private void dropTable()
cluster.schemaChange(format("DROP TABLE IF EXISTS %s"));
}
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java
index 81861abc80c1..fd62d30e3ba5 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java
@@ -120,4 +120,4 @@ public static Map count(ICluster cluster)
.collect(Collectors.toMap(nodeId -> nodeId,
nodeId -> (Long) cluster.get(nodeId).executeInternal("SELECT count(*) FROM " + KEYSPACE + ".tbl")[0][0]));
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java
index d6825d9adac5..848318c95dfa 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java
@@ -73,4 +73,4 @@ public void internodeConnectionsDuringDecom() throws Throwable
}
}
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java
index 8daa58adedf1..2e702b219b05 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java
@@ -106,4 +106,4 @@ public void testPendingWrites() throws Throwable
Assert.assertEquals("Node " + e.getKey() + " has incorrect row state", e.getValue().longValue(), 150L);
}
}
-}
\ No newline at end of file
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
index 4e3074d67100..fcb04824c2fd 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
@@ -100,4 +100,4 @@ public void mixedModeReadRepairUpdate() throws Throwable
})
.run();
}
-}
\ No newline at end of file
+}
diff --git a/test/long/org/apache/cassandra/cql3/CorruptionTest.java b/test/long/org/apache/cassandra/cql3/CorruptionTest.java
index f2ed36a6f937..8068a2df8b14 100644
--- a/test/long/org/apache/cassandra/cql3/CorruptionTest.java
+++ b/test/long/org/apache/cassandra/cql3/CorruptionTest.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.cql3;
-import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -28,6 +27,7 @@
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
+import org.apache.cassandra.io.util.File;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -145,10 +145,10 @@ private void dumpKeys(byte[] putdata, byte[] getdata) throws IOException {
String basename = "bad-data-tid" + Thread.currentThread().getId();
File put = new File(basename+"-put");
File get = new File(basename+"-get");
- try(FileWriter pw = new FileWriter(put)) {
+ try(FileWriter pw = new FileWriter(put.toJavaIOFile())) {
pw.write(new String(putdata));
}
- try(FileWriter pw = new FileWriter(get)) {
+ try(FileWriter pw = new FileWriter(get.toJavaIOFile())) {
pw.write(new String(getdata));
}
}
diff --git a/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java b/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java
index a4f98e9945d2..93faf252e184 100644
--- a/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java
+++ b/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java
@@ -30,6 +30,8 @@
import com.google.common.util.concurrent.RateLimiter;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -101,7 +103,7 @@ public CommitLogStressTest(ParameterizedClass commitLogCompression, EncryptionCo
@BeforeClass
static public void initialize() throws IOException
{
- try (FileInputStream fis = new FileInputStream("CHANGES.txt"))
+ try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt"))
{
dataSource = ByteBuffer.allocateDirect((int) fis.getChannel().size());
while (dataSource.hasRemaining())
@@ -123,15 +125,15 @@ public void cleanDir() throws IOException
File dir = new File(location);
if (dir.isDirectory())
{
- File[] files = dir.listFiles();
+ File[] files = dir.tryList();
for (File f : files)
- if (!f.delete())
+ if (!f.tryDelete())
Assert.fail("Failed to delete " + f);
}
else
{
- dir.mkdir();
+ dir.tryCreateDirectory();
}
}
@@ -245,13 +247,13 @@ private void testLog(CommitLog commitLog) throws IOException, InterruptedExcepti
System.out.println("Stopped. Replaying... ");
System.out.flush();
Reader reader = new Reader();
- File[] files = new File(location).listFiles();
+ File[] files = new File(location).tryList();
DummyHandler handler = new DummyHandler();
reader.readAllFiles(handler, files);
for (File f : files)
- if (!f.delete())
+ if (!f.tryDelete())
Assert.fail("Failed to delete " + f);
if (hash == reader.hash && cells == reader.cells)
@@ -278,7 +280,7 @@ private void verifySizes(CommitLog commitLog)
commitLog.segmentManager.awaitManagementTasksCompletion();
long combinedSize = 0;
- for (File f : new File(commitLog.segmentManager.storageDirectory).listFiles())
+ for (File f : new File(commitLog.segmentManager.storageDirectory).tryList())
combinedSize += f.length();
Assert.assertEquals(combinedSize, commitLog.getActiveOnDiskSize());
diff --git a/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java b/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java
index 5f9aa31467af..420981d19d99 100644
--- a/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java
+++ b/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java
@@ -540,4 +540,4 @@ private void updateSummary(ReplicationAwareTokenAllocator t, Summary su, S
System.out.format("Worst intermediate unit\t%s token %s\n", su, st);
}
}
-}
\ No newline at end of file
+}
diff --git a/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java b/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java
index 24a9a78a7abf..8b43c8e076a8 100644
--- a/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java
+++ b/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.hints;
-import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
@@ -29,6 +28,7 @@
import com.google.common.collect.Iterables;
+import org.apache.cassandra.io.util.File;
import org.junit.Test;
import org.apache.cassandra.SchemaLoader;
@@ -63,7 +63,7 @@ public void testWriteReadCycle() throws IOException
HintsDescriptor descriptor = new HintsDescriptor(UUID.randomUUID(), System.currentTimeMillis());
- File directory = Files.createTempDirectory(null).toFile();
+ File directory = new File(Files.createTempDirectory(null));
try
{
testWriteReadCycle(directory, descriptor);
diff --git a/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java b/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
index 19a8ec36f2da..9499401026e2 100644
--- a/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
+++ b/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java
@@ -20,6 +20,8 @@
*/
package org.apache.cassandra.io.compress;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -116,7 +118,7 @@ private static void testPerformance(ICompressor compressor, BufferType in, Buffe
public static void main(String[] args) throws IOException
{
- try (FileInputStream fis = new FileInputStream("CHANGES.txt"))
+ try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt"))
{
int len = (int)fis.getChannel().size();
dataSource = ByteBuffer.allocateDirect(len);
diff --git a/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java b/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java
index a6f428a22396..f2bbfa6a6023 100644
--- a/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java
+++ b/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java
@@ -18,13 +18,13 @@
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
import org.junit.BeforeClass;
import org.junit.Test;
import org.apache.cassandra.SchemaLoader;
@@ -46,9 +46,9 @@ public void testWideRow() throws Exception
String TABLE = "table1";
int size = 30000;
- File tempdir = Files.createTempDir();
- File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE);
- assert dataDir.mkdirs();
+ File tempdir = new File(Files.createTempDir());
+ File dataDir = new File(tempdir.absolutePath() + File.pathSeparator() + KS + File.pathSeparator() + TABLE);
+ assert dataDir.tryCreateDirectories();
StringBuilder schemaColumns = new StringBuilder();
StringBuilder queryColumns = new StringBuilder();
diff --git a/test/long/org/apache/cassandra/streaming/LongStreamingTest.java b/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
index 186476078cf4..c4259fbbc070 100644
--- a/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
+++ b/test/long/org/apache/cassandra/streaming/LongStreamingTest.java
@@ -18,12 +18,12 @@
package org.apache.cassandra.streaming;
-import java.io.File;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -80,9 +80,9 @@ private void testStream(boolean useSstableCompression) throws InvalidRequestExce
String KS = useSstableCompression ? "sstable_compression_ks" : "stream_compression_ks";
String TABLE = "table1";
- File tempdir = Files.createTempDir();
- File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE);
- assert dataDir.mkdirs();
+ File tempdir = new File(Files.createTempDir());
+ File dataDir = new File(tempdir.absolutePath() + File.pathSeparator() + KS + File.pathSeparator() + TABLE);
+ assert dataDir.tryCreateDirectories();
String schema = "CREATE TABLE " + KS + '.' + TABLE + " ("
+ " k int PRIMARY KEY,"
@@ -108,11 +108,11 @@ private void testStream(boolean useSstableCompression) throws InvalidRequestExce
writer.close();
System.err.println(String.format("Writer finished after %d seconds....", TimeUnit.NANOSECONDS.toSeconds(nanoTime() - start)));
- File[] dataFiles = dataDir.listFiles((dir, name) -> name.endsWith("-Data.db"));
+ File[] dataFiles = dataDir.tryList((dir, name) -> name.endsWith("-Data.db"));
long dataSize = 0l;
for (File file : dataFiles)
{
- System.err.println("File : "+file.getAbsolutePath());
+ System.err.println("File : "+file.absolutePath());
dataSize += file.length();
}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java b/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java
index 922281145f52..32e048dca7b2 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java
@@ -18,18 +18,16 @@
package org.apache.cassandra.test.microbench;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
+import org.apache.cassandra.io.util.DataOutputStreamPlus;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.TimeUnit;
import org.apache.cassandra.db.BufferDecoratedKey;
import org.apache.cassandra.dht.Murmur3Partitioner;
-import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.BloomFilter;
import org.apache.cassandra.utils.BloomFilterSerializer;
@@ -77,7 +75,7 @@ public void serializationTest() throws IOException
{
BloomFilter filter = (BloomFilter) FilterFactory.getFilter(numElemsInK * 1024, 0.01d);
filter.add(wrap(testVal));
- DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(file));
+ DataOutputStreamPlus out = new FileOutputStreamPlus(file);
if (oldBfFormat)
SerializationsTest.serializeOldBfFormat(filter, out);
else
@@ -85,14 +83,14 @@ public void serializationTest() throws IOException
out.close();
filter.close();
- DataInputStream in = new DataInputStream(new FileInputStream(file));
+ FileInputStreamPlus in = new FileInputStreamPlus(file);
BloomFilter filter2 = BloomFilterSerializer.deserialize(in, oldBfFormat);
FileUtils.closeQuietly(in);
filter2.close();
}
finally
{
- file.delete();
+ file.tryDelete();
}
}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java b/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java
index 41220a2a655c..a745054b4209 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java
@@ -19,7 +19,6 @@
package org.apache.cassandra.test.microbench;
-import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.*;
@@ -29,6 +28,7 @@
import org.apache.cassandra.db.Directories;
import org.apache.cassandra.db.Keyspace;
import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.openjdk.jmh.annotations.*;
@@ -108,7 +108,7 @@ public void resetSnapshot()
for (File file : directories)
{
- for (File f : file.listFiles())
+ for (File f : file.tryList())
{
if (f.isDirectory())
continue;
@@ -119,7 +119,7 @@ public void resetSnapshot()
for (File file : snapshotFiles)
- FileUtils.createHardLink(file, new File(file.toPath().getParent().getParent().getParent().toFile(), file.getName()));
+ FileUtils.createHardLink(file, new File(new File(file.toPath().getParent().getParent().getParent()), file.name()));
cfs.loadNewSSTables();
}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java b/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java
index 7272c1f39ec8..a5b5fdd89b95 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java
@@ -18,13 +18,13 @@
package org.apache.cassandra.test.microbench;
-import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.nio.file.Files;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.DirectorySizeCalculator;
import org.openjdk.jmh.annotations.*;
@@ -45,7 +45,7 @@ public class DirectorySizerBench
@Setup(Level.Trial)
public void setUp() throws IOException
{
- tempDir = Files.createTempDirectory(randString()).toFile();
+ tempDir = new File(Files.createTempDirectory(randString()));
// Since #'s on laptops and commodity desktops are so useful in considering enterprise virtualized server environments...
@@ -84,7 +84,7 @@ private void populateRandomFiles(File dir, int count) throws IOException
{
for (int i = 0; i < count; i++)
{
- PrintWriter pw = new PrintWriter(dir + File.separator + randString(), "UTF-8");
+ PrintWriter pw = new PrintWriter(dir + File.pathSeparator() + randString(), "UTF-8");
pw.write(randString());
pw.close();
}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java b/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java
index cd15646471b1..890f74c43a52 100644
--- a/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java
+++ b/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java
@@ -288,4 +288,4 @@ public void testRLargeStringBDOSP() throws IOException {
public void testRLargeLegacyWriteUTF() throws IOException {
BufferedDataOutputStreamTest.writeUTFLegacy(large, hole);
}
-}
\ No newline at end of file
+}
diff --git a/test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java b/test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java
old mode 100755
new mode 100644
diff --git a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
index 3611f0e639b2..f5cdd5a0a4e8 100644
--- a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
+++ b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
@@ -20,20 +20,15 @@
package org.apache.cassandra;
import org.apache.cassandra.io.IVersionedSerializer;
-import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
-import org.apache.cassandra.io.util.DataOutputBuffer;
-import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
+import org.apache.cassandra.io.util.*;
import org.apache.cassandra.net.MessagingService;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
+import org.apache.cassandra.io.util.File;
+
public class AbstractSerializationsTester
{
protected static final String CUR_VER = System.getProperty("cassandra.version", "4.0");
@@ -57,16 +52,16 @@ protected void testSerializedSize(T obj, IVersionedSerializer serializer)
assert out.getLength() == serializer.serializedSize(obj, getVersion());
}
- protected static DataInputStreamPlus getInput(String name) throws IOException
+ protected static FileInputStreamPlus getInput(String name) throws IOException
{
return getInput(CUR_VER, name);
}
- protected static DataInputStreamPlus getInput(String version, String name) throws IOException
+ protected static FileInputStreamPlus getInput(String version, String name) throws IOException
{
File f = new File("test/data/serialization/" + version + '/' + name);
- assert f.exists() : f.getPath();
- return new DataInputPlus.DataInputStreamPlus(new FileInputStream(f));
+ assert f.exists() : f.path();
+ return new FileInputStreamPlus(f);
}
@SuppressWarnings("resource")
@@ -79,7 +74,7 @@ protected static DataOutputStreamPlus getOutput(String name) throws IOException
protected static DataOutputStreamPlus getOutput(String version, String name) throws IOException
{
File f = new File("test/data/serialization/" + version + '/' + name);
- f.getParentFile().mkdirs();
- return new BufferedDataOutputStreamPlus(new FileOutputStream(f).getChannel());
+ f.parent().tryCreateDirectories();
+ return new FileOutputStreamPlus(f);
}
}
diff --git a/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java b/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java
index a6c5997fae45..88dbc52941f5 100644
--- a/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java
+++ b/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java
@@ -316,4 +316,4 @@ public void formatSkip(Test test, String message) {
public void testAssumptionFailure(Test test, Throwable cause) {
formatSkip(test, cause.getMessage());
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java b/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java
index 8c37be7688fd..5f65227cba86 100644
--- a/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java
+++ b/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java
@@ -104,4 +104,4 @@ protected void finalize()
}
}
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java b/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java
index 5015be926775..da8e4077f350 100644
--- a/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java
+++ b/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java
@@ -388,4 +388,4 @@ public void testAssumptionFailure(final Test test, final Throwable failure) {
skippedTests.put(createDescription(test), test);
}
-} // XMLJUnitResultFormatter
\ No newline at end of file
+} // XMLJUnitResultFormatter
diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java b/test/unit/org/apache/cassandra/SchemaLoader.java
index 9021c6eda55b..62fcebf5f155 100644
--- a/test/unit/org/apache/cassandra/SchemaLoader.java
+++ b/test/unit/org/apache/cassandra/SchemaLoader.java
@@ -18,7 +18,12 @@
package org.apache.cassandra;
import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
import org.apache.cassandra.auth.AuthKeyspace;
import org.apache.cassandra.auth.AuthSchemaChangeListener;
diff --git a/test/unit/org/apache/cassandra/ServerTestUtils.java b/test/unit/org/apache/cassandra/ServerTestUtils.java
index 221a23a526eb..fff968de72c1 100644
--- a/test/unit/org/apache/cassandra/ServerTestUtils.java
+++ b/test/unit/org/apache/cassandra/ServerTestUtils.java
@@ -17,9 +17,9 @@
*/
package org.apache.cassandra;
-import java.io.File;
import java.io.IOException;
import java.net.UnknownHostException;
+import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
@@ -31,6 +31,7 @@
import org.apache.cassandra.db.Keyspace;
import org.apache.cassandra.db.SystemKeyspace;
import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.locator.AbstractEndpointSnitch;
import org.apache.cassandra.locator.InetAddressAndPort;
@@ -167,7 +168,7 @@ private static void cleanupDirectory(File directory)
{
if (directory.exists())
{
- FileUtils.deleteChildrenRecursive(directory);
+ Arrays.stream(directory.tryList()).forEach(File::deleteRecursive);
}
}
diff --git a/test/unit/org/apache/cassandra/Util.java b/test/unit/org/apache/cassandra/Util.java
index 4b7b6eaec908..2f696ba93f05 100644
--- a/test/unit/org/apache/cassandra/Util.java
+++ b/test/unit/org/apache/cassandra/Util.java
@@ -21,7 +21,6 @@
import java.io.Closeable;
import java.io.EOFException;
-import java.io.File;
import java.io.IOError;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
@@ -38,6 +37,7 @@
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
+import org.apache.cassandra.io.util.File;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
@@ -792,11 +792,11 @@ public static void assertOnDiskState(ColumnFamilyStore cfs, int expectedSSTableC
int fileCount = 0;
for (File f : cfs.getDirectories().getCFDirectories())
{
- for (File sst : f.listFiles())
+ for (File sst : f.tryList())
{
- if (sst.getName().contains("Data"))
+ if (sst.name().contains("Data"))
{
- Descriptor d = Descriptor.fromFilename(sst.getAbsolutePath());
+ Descriptor d = Descriptor.fromFilename(sst.absolutePath());
assertTrue(liveGenerations.contains(d.generation));
fileCount++;
}
diff --git a/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java b/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java
index b593c2e30884..62bc767d6614 100644
--- a/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java
+++ b/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java
@@ -202,4 +202,4 @@ public void isFiltered_NullInputs()
excludeSet.add("b");
Assert.assertFalse(isFiltered(null, includeSet, excludeSet));
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java b/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java
index c64323f4ce41..ee21340438b0 100644
--- a/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java
+++ b/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java
@@ -310,4 +310,4 @@ private static void createTestRole()
assertLogEntry(logEntry, AuditLogEntryType.CREATE_ROLE, getCreateRoleCql(TEST_ROLE, true, false, true), CASS_USER);
assertEquals(0, getInMemAuditLogger().size());
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/audit/BinAuditLoggerTest.java b/test/unit/org/apache/cassandra/audit/BinAuditLoggerTest.java
index ea1bbbd41a15..678c11cf2b82 100644
--- a/test/unit/org/apache/cassandra/audit/BinAuditLoggerTest.java
+++ b/test/unit/org/apache/cassandra/audit/BinAuditLoggerTest.java
@@ -19,6 +19,7 @@
import java.nio.file.Path;
+import org.apache.cassandra.io.util.File;
import org.junit.BeforeClass;
import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/auth/AuthTestUtils.java b/test/unit/org/apache/cassandra/auth/AuthTestUtils.java
index a012b6286c59..9e0b737dba8f 100644
--- a/test/unit/org/apache/cassandra/auth/AuthTestUtils.java
+++ b/test/unit/org/apache/cassandra/auth/AuthTestUtils.java
@@ -139,7 +139,7 @@ public static long getRolesReadCount()
return rolesTable.metric.readLatency.latency.getCount();
}
- public static RoleOptions getLoginRoleOprions()
+ public static RoleOptions getLoginRoleOptions()
{
RoleOptions roleOptions = new RoleOptions();
roleOptions.setOption(IRoleManager.Option.SUPERUSER, false);
@@ -147,4 +147,4 @@ public static RoleOptions getLoginRoleOprions()
roleOptions.setOption(IRoleManager.Option.PASSWORD, "ignored");
return roleOptions;
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java b/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java
index 5dd4ab5559b5..985e58d1d1e5 100644
--- a/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java
+++ b/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java
@@ -142,4 +142,4 @@ public static void tearDown()
{
schemaChange("DROP KEYSPACE " + SchemaConstants.AUTH_KEYSPACE_NAME);
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java b/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java
index 1086521bbfc8..de6e0d21f20a 100644
--- a/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java
+++ b/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java
@@ -142,4 +142,4 @@ public void testInteger_null()
System.clearProperty(TEST_PROP.getKey());
}
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java b/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
index 3512f32d56f6..c0fd6b27ccfe 100644
--- a/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
+++ b/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
@@ -136,12 +136,16 @@ public class DatabaseDescriptorRefTest
"org.apache.cassandra.io.compress.LZ4Compressor",
"org.apache.cassandra.io.sstable.metadata.MetadataType",
"org.apache.cassandra.io.util.BufferedDataOutputStreamPlus",
+ "org.apache.cassandra.io.util.FileInputStreamPlus",
+ "org.apache.cassandra.io.util.FileOutputStreamPlus",
+ "org.apache.cassandra.io.util.File",
"org.apache.cassandra.io.util.DataOutputBuffer",
"org.apache.cassandra.io.util.DataOutputBufferFixed",
"org.apache.cassandra.io.util.DataOutputStreamPlus",
"org.apache.cassandra.io.util.DataOutputPlus",
"org.apache.cassandra.io.util.DiskOptimizationStrategy",
"org.apache.cassandra.io.util.SpinningDiskOptimizationStrategy",
+ "org.apache.cassandra.io.util.PathUtils$IOToLongFunction",
"org.apache.cassandra.locator.Replica",
"org.apache.cassandra.locator.SimpleSeedProvider",
"org.apache.cassandra.locator.SeedProvider",
diff --git a/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java b/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
index 23a6c005a314..35c1c0488936 100644
--- a/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
+++ b/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
@@ -18,11 +18,11 @@
package org.apache.cassandra.config;
-import java.io.File;
import java.util.Collections;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java
index 56be6f64b794..af95352764d0 100644
--- a/test/unit/org/apache/cassandra/cql3/CQLTester.java
+++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.cql3;
-import java.io.File;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.BigInteger;
@@ -46,6 +45,7 @@
import com.google.common.base.Objects;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
+import org.apache.cassandra.io.util.File;
import org.junit.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -612,7 +612,7 @@ private static void removeAllSSTables(String ks, List tables)
// clean up data directory which are stored as data directory/keyspace/data files
for (File d : Directories.getKSChildDirectories(ks))
{
- if (d.exists() && containsAny(d.getName(), tables))
+ if (d.exists() && containsAny(d.name(), tables))
FileUtils.deleteRecursive(d);
}
}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java
index f69d8d54e9e8..1d10eabf7192 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java
@@ -214,4 +214,4 @@ public void testIndexOnUDT() throws Throwable
assertEmpty(execute("SELECT id, company FROM %s WHERE home = " + addressString));
assertRows(execute("SELECT id, company FROM %s WHERE home = " + newAddressString), row(1, companyName));
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
index 71d632dee0af..9fc50531aae2 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
@@ -293,4 +293,4 @@ public void testBlobAsFunction() throws Throwable
execute("INSERT INTO %s (k, v) VALUES (0, blobAsInt(0x00000001))");
assertRows(execute("select v from %s where k=0"), row(1));
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java
index 70d3fc324ee3..39b76c39aa43 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java
@@ -5148,4 +5148,4 @@ private void testUpdateWithCompactFormat(String tableQuery) throws Throwable
assertInvalidMessage("Undefined column name column1", "UPDATE %s SET column1 = 6 WHERE a = 0");
assertInvalidMessage("Undefined column name value", "UPDATE %s SET value = 6 WHERE a = 0");
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java
index 19aba6470aa2..d385639be865 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java
@@ -17,9 +17,9 @@
*/
package org.apache.cassandra.cql3.validation.operations;
-import java.io.File;
import java.util.List;
+import org.apache.cassandra.io.util.File;
import org.junit.Test;
import org.apache.cassandra.config.DatabaseDescriptor;
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
index 1c1f1161ac0c..97d95a058e81 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
@@ -18,11 +18,10 @@
package org.apache.cassandra.cql3.validation.operations;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
+
import java.io.IOException;
+import org.apache.cassandra.io.util.File;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -39,6 +38,8 @@
import org.apache.cassandra.db.Keyspace;
import org.apache.cassandra.db.rows.AbstractCell;
import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
import org.apache.cassandra.tools.StandaloneScrubber;
import org.apache.cassandra.tools.ToolRunner;
import org.apache.cassandra.tools.ToolRunner.ToolResult;
@@ -451,7 +452,7 @@ private void copySSTablesToTableDir(String table, boolean simple, boolean cluste
{
File destDir = Keyspace.open(keyspace()).getColumnFamilyStore(table).getDirectories().getCFDirectories().iterator().next();
File sourceDir = getTableDir(table, simple, clustering);
- for (File file : sourceDir.listFiles())
+ for (File file : sourceDir.tryList())
{
copyFile(file, destDir);
}
@@ -467,12 +468,13 @@ private static void copyFile(File src, File dest) throws IOException
byte[] buf = new byte[65536];
if (src.isFile())
{
- File target = new File(dest, src.getName());
+ File target = new File(dest, src.name());
int rd;
- FileInputStream is = new FileInputStream(src);
- FileOutputStream os = new FileOutputStream(target);
+ FileInputStreamPlus is = new FileInputStreamPlus(src);
+ FileOutputStreamPlus os = new FileOutputStreamPlus(target);
while ((rd = is.read(buf)) >= 0)
os.write(buf, 0, rd);
+ os.close();
}
}
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index 583c1eb5f346..d26f3dd7fdf1 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@ -18,7 +18,6 @@
*/
package org.apache.cassandra.db;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Path;
@@ -49,6 +48,7 @@
import org.apache.cassandra.db.rows.*;
import org.apache.cassandra.db.partitions.*;
import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.sstable.Component;
import org.apache.cassandra.io.sstable.Descriptor;
import org.apache.cassandra.io.sstable.format.SSTableFormat;
@@ -458,7 +458,7 @@ public void testSnapshotWithoutFlushWithSecondaryIndexes() throws Exception
String baseTableFile = manifest.getFiles().get(0);
String indexTableFile = manifest.getFiles().get(1);
assertThat(baseTableFile).isNotEqualTo(indexTableFile);
- assertThat(Directories.isSecondaryIndexFolder(new File(indexTableFile).getParentFile())).isTrue();
+ assertThat(Directories.isSecondaryIndexFolder(new File(indexTableFile).parent())).isTrue();
assertThat(indexTableFile).endsWith(baseTableFile);
}
@@ -547,7 +547,7 @@ public void testScrubDataDirectories() throws Throwable
String dataFileName = ssTable.descriptor.filenameFor(Component.DATA);
String tmpDataFileName = ssTable.descriptor.tmpFilenameFor(Component.DATA);
- new File(dataFileName).renameTo(new File(tmpDataFileName));
+ new File(dataFileName).tryMove(new File(tmpDataFileName));
ssTable.selfRef().release();
diff --git a/test/unit/org/apache/cassandra/db/DirectoriesTest.java b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
index ba499cee947a..9c1857be0033 100644
--- a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
+++ b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
@@ -17,8 +17,6 @@
*/
package org.apache.cassandra.db;
-import java.io.File;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
@@ -29,6 +27,8 @@
import java.util.concurrent.Future;
import com.google.common.collect.Sets;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
import org.apache.commons.lang3.StringUtils;
import org.junit.AfterClass;
@@ -102,8 +102,8 @@ public void beforeTest() throws IOException
}
tempDataDir = FileUtils.createTempFile("cassandra", "unittest");
- tempDataDir.delete(); // hack to create a temp dir
- tempDataDir.mkdir();
+ tempDataDir.tryDelete(); // hack to create a temp dir
+ tempDataDir.tryCreateDirectory();
// Create two fake data dir for tests, one using CF directories, one that do not.
createTestFiles();
@@ -127,17 +127,17 @@ private static void createTestFiles() throws IOException
List allSStables = new ArrayList<>();
sstablesByTableName.put(cfm.name, allSStables);
File tableDir = cfDir(cfm);
- tableDir.mkdirs();
+ tableDir.tryCreateDirectories();
allSStables.addAll(createFakeSSTable(tableDir, cfm.name, 1));
allSStables.addAll(createFakeSSTable(tableDir, cfm.name, 2));
File backupDir = new File(tableDir, Directories.BACKUPS_SUBDIR);
- backupDir.mkdir();
+ backupDir.tryCreateDirectory();
allSStables.addAll(createFakeSSTable(backupDir, cfm.name, 1));
- File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.separator + LEGACY_SNAPSHOT_NAME);
- snapshotDir.mkdirs();
+ File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + LEGACY_SNAPSHOT_NAME);
+ snapshotDir.tryCreateDirectories();
allSStables.addAll(createFakeSSTable(snapshotDir, cfm.name, 1));
}
}
@@ -175,9 +175,9 @@ private TableMetadata createFakeTable(String table)
public FakeSnapshot createFakeSnapshot(TableMetadata table, String tag, boolean createManifest) throws IOException
{
File tableDir = cfDir(table);
- tableDir.mkdirs();
- File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.separator + tag);
- snapshotDir.mkdirs();
+ tableDir.tryCreateDirectories();
+ File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + tag);
+ snapshotDir.tryCreateDirectories();
Descriptor sstableDesc = new Descriptor(snapshotDir, KS, table.name, 1, SSTableFormat.Type.BIG);
createFakeSSTable(sstableDesc);
@@ -205,7 +205,7 @@ private static List createFakeSSTable(Descriptor desc) throws IOException
for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER })
{
File f = new File(desc.filenameFor(c));
- f.createNewFile();
+ f.createFileIfNotExists();
components.add(f);
}
return components;
@@ -219,13 +219,13 @@ private static File cfDir(TableMetadata metadata)
{
// secondary index
return new File(tempDataDir,
- metadata.keyspace + File.separator +
- metadata.name.substring(0, idx) + '-' + tableId + File.separator +
+ metadata.keyspace + File.pathSeparator() +
+ metadata.name.substring(0, idx) + '-' + tableId + File.pathSeparator() +
metadata.name.substring(idx));
}
else
{
- return new File(tempDataDir, metadata.keyspace + File.separator + metadata.name + '-' + tableId);
+ return new File(tempDataDir, metadata.keyspace + File.pathSeparator() + metadata.name + '-' + tableId);
}
}
@@ -238,11 +238,11 @@ public void testStandardDirs() throws IOException
assertEquals(cfDir(cfm), directories.getDirectoryForNewSSTables());
Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, 1, SSTableFormat.Type.BIG);
- File snapshotDir = new File(cfDir(cfm), File.separator + Directories.SNAPSHOT_SUBDIR + File.separator + LEGACY_SNAPSHOT_NAME);
- assertEquals(snapshotDir.getCanonicalFile(), Directories.getSnapshotDirectory(desc, LEGACY_SNAPSHOT_NAME));
+ File snapshotDir = new File(cfDir(cfm), File.pathSeparator() + Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + LEGACY_SNAPSHOT_NAME);
+ assertEquals(snapshotDir.toCanonical(), Directories.getSnapshotDirectory(desc, LEGACY_SNAPSHOT_NAME));
- File backupsDir = new File(cfDir(cfm), File.separator + Directories.BACKUPS_SUBDIR);
- assertEquals(backupsDir.getCanonicalFile(), Directories.getBackupsDirectory(desc));
+ File backupsDir = new File(cfDir(cfm), File.pathSeparator() + Directories.BACKUPS_SUBDIR);
+ assertEquals(backupsDir.toCanonical(), Directories.getBackupsDirectory(desc));
}
}
@@ -307,7 +307,7 @@ public void testMaybeManifestLoading() throws Exception {
File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, tag);
List files = new LinkedList<>();
- files.add(parentSnapshotDirectory.getAbsolutePath());
+ files.add(parentSnapshotDirectory.toAbsolute().absolutePath());
File manifestFile = directories.getSnapshotManifestFile(tag);
@@ -316,7 +316,7 @@ public void testMaybeManifestLoading() throws Exception {
Set dirs = new HashSet<>();
- dirs.add(manifestFile.getParentFile());
+ dirs.add(manifestFile.parent());
dirs.add(new File("buzz"));
SnapshotManifest loadedManifest = Directories.maybeLoadManifest(KS, cfm.name, tag, dirs);
@@ -356,9 +356,10 @@ public void testSecondaryIndexDirectories()
// snapshot dir should be created under its parent's
File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, "test");
File indexSnapshotDirectory = Directories.getSnapshotDirectory(indexDesc, "test");
- assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.getParentFile());
+ assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.parent());
// check if snapshot directory exists
+ parentSnapshotDirectory.tryCreateDirectories();
assertTrue(parentDirectories.snapshotExists("test"));
assertTrue(indexDirectories.snapshotExists("test"));
@@ -383,13 +384,13 @@ public void testSecondaryIndexDirectories()
// check backup directory
File parentBackupDirectory = Directories.getBackupsDirectory(parentDesc);
File indexBackupDirectory = Directories.getBackupsDirectory(indexDesc);
- assertEquals(parentBackupDirectory, indexBackupDirectory.getParentFile());
+ assertEquals(parentBackupDirectory, indexBackupDirectory.parent());
}
private File createFile(String fileName, int size)
{
File newFile = new File(fileName);
- try (FileOutputStream writer = new FileOutputStream(newFile))
+ try (FileOutputStreamPlus writer = new FileOutputStreamPlus(newFile);)
{
writer.write(new byte[size]);
writer.flush();
@@ -416,7 +417,7 @@ private void checkFiles(TableMetadata cfm, Directories directories)
listed = new HashSet<>(lister.listFiles());
for (File f : sstablesByTableName.get(cfm.name))
{
- if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR) || f.getPath().contains(Directories.BACKUPS_SUBDIR))
+ if (f.path().contains(Directories.SNAPSHOT_SUBDIR) || f.path().contains(Directories.BACKUPS_SUBDIR))
assertFalse(f + " should not be listed", listed.contains(f));
else
assertTrue(f + " is missing", listed.contains(f));
@@ -427,7 +428,7 @@ private void checkFiles(TableMetadata cfm, Directories directories)
listed = new HashSet<>(lister.listFiles());
for (File f : sstablesByTableName.get(cfm.name))
{
- if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR))
+ if (f.path().contains(Directories.SNAPSHOT_SUBDIR))
assertFalse(f + " should not be listed", listed.contains(f));
else
assertTrue(f + " is missing", listed.contains(f));
@@ -438,9 +439,9 @@ private void checkFiles(TableMetadata cfm, Directories directories)
listed = new HashSet<>(lister.listFiles());
for (File f : sstablesByTableName.get(cfm.name))
{
- if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR) || f.getPath().contains(Directories.BACKUPS_SUBDIR))
+ if (f.path().contains(Directories.SNAPSHOT_SUBDIR) || f.path().contains(Directories.BACKUPS_SUBDIR))
assertFalse(f + " should not be listed", listed.contains(f));
- else if (f.getName().contains("tmp-"))
+ else if (f.name().contains("tmp-"))
assertFalse(f + " should not be listed", listed.contains(f));
else
assertTrue(f + " is missing", listed.contains(f));
@@ -455,9 +456,9 @@ public void testTemporaryFile() throws IOException
Directories directories = new Directories(cfm, toDataDirectories(tempDataDir));
File tempDir = directories.getTemporaryWriteableDirectoryAsFile(10);
- tempDir.mkdir();
+ tempDir.tryCreateDirectory();
File tempFile = new File(tempDir, "tempFile");
- tempFile.createNewFile();
+ tempFile.createFileIfNotExists();
assertTrue(tempDir.exists());
assertTrue(tempFile.exists());
@@ -491,14 +492,13 @@ public void testDiskFailurePolicy_best_effort()
if (!directories.isEmpty())
{
String[] path = new String[] {KS, "bad"};
- File dir = new File(first.location, StringUtils.join(path, File.separator));
+ File dir = new File(first.location, StringUtils.join(path, File.pathSeparator()));
JVMStabilityInspector.inspectThrowable(new FSWriteError(new IOException("Unable to create directory " + dir), dir));
}
- File file = new File(first.location, new File(KS, "bad").getPath());
+ File file = new File(first.location, new File(KS, "bad").path());
assertTrue(DisallowedDirectories.isUnwritable(file));
-
- }
+ }
finally
{
DatabaseDescriptor.setDiskFailurePolicy(origPolicy);
@@ -641,8 +641,8 @@ public void testGetLocationForDisk()
Directories dirs = new Directories(cfm, paths);
for (DataDirectory dir : paths)
{
- String p = dirs.getLocationForDisk(dir).getAbsolutePath() + File.separator;
- assertTrue(p.startsWith(dir.location.getAbsolutePath() + File.separator));
+ String p = dirs.getLocationForDisk(dir).absolutePath() + File.pathSeparator();
+ assertTrue(p.startsWith(dir.location.absolutePath() + File.pathSeparator()));
}
}
}
@@ -658,11 +658,11 @@ public void testGetLocationWithSymlinks() throws IOException
Path p2 = Files.createDirectories(ddir.resolve("p2"));
Path l1 = Files.createSymbolicLink(p2.resolve("ks"), symlinktarget);
- DataDirectory path1 = new DataDirectory(p1.toFile());
- DataDirectory path2 = new DataDirectory(p2.toFile());
+ DataDirectory path1 = new DataDirectory(new File(p1));
+ DataDirectory path2 = new DataDirectory(new File(p2));
Directories dirs = new Directories(CFM.iterator().next(), new DataDirectory[] {path1, path2});
- dirs.getLocationForDisk(new DataDirectory(p1.toFile()));
- dirs.getLocationForDisk(new DataDirectory(p2.toFile()));
+ dirs.getLocationForDisk(new DataDirectory(new File(p1)));
+ dirs.getLocationForDisk(new DataDirectory(new File(p2)));
assertTrue(dirs.getLocationForDisk(path2).toPath().startsWith(l1));
assertTrue(dirs.getLocationForDisk(path1).toPath().startsWith(p1));
@@ -682,8 +682,8 @@ public void getDataDirectoryForFile()
for (DataDirectory dir : paths)
{
Descriptor d = Descriptor.fromFilename(new File(dir.location, getNewFilename(cfm, false)).toString());
- String p = dirs.getDataDirectoryForFile(d).location.getAbsolutePath() + File.separator;
- assertTrue(p.startsWith(dir.location.getAbsolutePath() + File.separator));
+ String p = dirs.getDataDirectoryForFile(d).location.absolutePath() + File.pathSeparator();
+ assertTrue(p.startsWith(dir.location.absolutePath() + File.pathSeparator()));
}
}
}
@@ -704,16 +704,16 @@ public void testDirectoriesSymlinks() throws IOException
Path symlinktarget = Files.createDirectories(p.resolve("symlinktarget"));
Path ddir1 = Files.createDirectories(p.resolve("datadir1"));
Path ddir2 = Files.createSymbolicLink(p.resolve("datadir11"), symlinktarget);
- DataDirectory dd1 = new DataDirectory(ddir1.toFile());
- DataDirectory dd2 = new DataDirectory(ddir2.toFile());
+ DataDirectory dd1 = new DataDirectory(new File(ddir1));
+ DataDirectory dd2 = new DataDirectory(new File(ddir2));
for (TableMetadata tm : CFM)
{
Directories dirs = new Directories(tm, Sets.newHashSet(dd1, dd2));
- Descriptor desc = Descriptor.fromFilename(ddir1.resolve(getNewFilename(tm, false)).toFile());
- assertEquals(ddir1.toFile(), dirs.getDataDirectoryForFile(desc).location);
- desc = Descriptor.fromFilename(ddir2.resolve(getNewFilename(tm, false)).toFile());
- assertEquals(ddir2.toFile(), dirs.getDataDirectoryForFile(desc).location);
+ Descriptor desc = Descriptor.fromFilename(new File(ddir1.resolve(getNewFilename(tm, false))));
+ assertEquals(new File(ddir1), dirs.getDataDirectoryForFile(desc).location);
+ desc = Descriptor.fromFilename(new File(ddir2.resolve(getNewFilename(tm, false))));
+ assertEquals(new File(ddir2), dirs.getDataDirectoryForFile(desc).location);
}
}
@@ -755,15 +755,15 @@ private void testDirectoriesSymlinksHelper(boolean oldStyle) throws IOException
Files.createSymbolicLink(keyspacedir.resolve(tabledir), symlinktarget);
}
- DataDirectory dd1 = new DataDirectory(ddir1.toFile());
- DataDirectory dd2 = new DataDirectory(ddir2.toFile());
+ DataDirectory dd1 = new DataDirectory(new File(ddir1));
+ DataDirectory dd2 = new DataDirectory(new File(ddir2));
for (TableMetadata tm : CFM)
{
Directories dirs = new Directories(tm, Sets.newHashSet(dd1, dd2));
- Descriptor desc = Descriptor.fromFilename(ddir1.resolve(getNewFilename(tm, oldStyle)).toFile());
- assertEquals(ddir1.toFile(), dirs.getDataDirectoryForFile(desc).location);
- desc = Descriptor.fromFilename(ddir2.resolve(getNewFilename(tm, oldStyle)).toFile());
- assertEquals(ddir2.toFile(), dirs.getDataDirectoryForFile(desc).location);
+ Descriptor desc = Descriptor.fromFilename(new File(ddir1.resolve(getNewFilename(tm, oldStyle))));
+ assertEquals(new File(ddir1), dirs.getDataDirectoryForFile(desc).location);
+ desc = Descriptor.fromFilename(new File(ddir2.resolve(getNewFilename(tm, oldStyle))));
+ assertEquals(new File(ddir2), dirs.getDataDirectoryForFile(desc).location);
}
}
@@ -795,11 +795,11 @@ public void testDataDirectoriesIterator() throws IOException
Iterator iter = directories.iterator();
assertTrue(iter.hasNext());
- assertEquals(new DataDirectory(subDir_1.toFile()), iter.next());
+ assertEquals(new DataDirectory(new File(subDir_1)), iter.next());
assertTrue(iter.hasNext());
- assertEquals(new DataDirectory(subDir_2.toFile()), iter.next());
+ assertEquals(new DataDirectory(new File(subDir_2)), iter.next());
assertTrue(iter.hasNext());
- assertEquals(new DataDirectory(subDir_3.toFile()), iter.next());
+ assertEquals(new DataDirectory(new File(subDir_3)), iter.next());
assertFalse(iter.hasNext());
directories = new DataDirectories(new String[]{subDir_1.toString(), subDir_2.toString()},
@@ -807,15 +807,15 @@ public void testDataDirectoriesIterator() throws IOException
iter = directories.iterator();
assertTrue(iter.hasNext());
- assertEquals(new DataDirectory(subDir_1.toFile()), iter.next());
+ assertEquals(new DataDirectory(new File(subDir_1)), iter.next());
assertTrue(iter.hasNext());
- assertEquals(new DataDirectory(subDir_2.toFile()), iter.next());
+ assertEquals(new DataDirectory(new File(subDir_2)), iter.next());
assertFalse(iter.hasNext());
}
private String getNewFilename(TableMetadata tm, boolean oldStyle)
{
- return tm.keyspace + File.separator + tm.name + (oldStyle ? "" : Component.separator + tm.id.toHexString()) + "/na-1-big-Data.db";
+ return tm.keyspace + File.pathSeparator() + tm.name + (oldStyle ? "" : Component.separator + tm.id.toHexString()) + "/na-1-big-Data.db";
}
private List getWriteableDirectories(DataDirectory[] dataDirectories, long writeSize)
diff --git a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
index a9d69019217b..2138f529a0c2 100644
--- a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
+++ b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java
@@ -18,11 +18,11 @@
package org.apache.cassandra.db;
-import java.io.File;
import java.net.UnknownHostException;
import java.util.List;
import com.google.common.collect.Lists;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/db/ImportTest.java b/test/unit/org/apache/cassandra/db/ImportTest.java
index c0c3799b858b..85d32516e2ea 100644
--- a/test/unit/org/apache/cassandra/db/ImportTest.java
+++ b/test/unit/org/apache/cassandra/db/ImportTest.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.db;
-import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.file.Files;
@@ -42,6 +41,7 @@
import org.apache.cassandra.dht.BootStrapper;
import org.apache.cassandra.io.sstable.Component;
import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.locator.TokenMetadata;
import org.apache.cassandra.service.CacheService;
@@ -227,36 +227,36 @@ private File moveToBackupDir(Set sstables) throws IOException
{
Path temp = Files.createTempDirectory("importtest");
SSTableReader sst = sstables.iterator().next();
- String tabledir = sst.descriptor.directory.getName();
- String ksdir = sst.descriptor.directory.getParentFile().getName();
+ String tabledir = sst.descriptor.directory.name();
+ String ksdir = sst.descriptor.directory.parent().name();
Path backupdir = createDirectories(temp.toString(), ksdir, tabledir);
for (SSTableReader sstable : sstables)
{
sstable.selfRef().release();
- for (File f : sstable.descriptor.directory.listFiles())
+ for (File f : sstable.descriptor.directory.tryList())
{
if (f.toString().contains(sstable.descriptor.baseFilename()))
{
System.out.println("move " + f.toPath() + " to " + backupdir);
- File moveFileTo = new File(backupdir.toFile(), f.getName());
+ File moveFileTo = new File(backupdir, f.name());
moveFileTo.deleteOnExit();
Files.move(f.toPath(), moveFileTo.toPath());
}
}
}
- return backupdir.toFile();
+ return new File(backupdir);
}
private Path createDirectories(String base, String ... subdirs)
{
File b = new File(base);
- b.mkdir();
+ b.tryCreateDirectory();
System.out.println("mkdir "+b);
b.deleteOnExit();
for (String subdir : subdirs)
{
b = new File(b, subdir);
- b.mkdir();
+ b.tryCreateDirectory();
System.out.println("mkdir "+b);
b.deleteOnExit();
}
@@ -291,8 +291,8 @@ public void testGetCorrectDirectory() throws Throwable
importer.importNewSSTables(SSTableImporter.Options.options(dir.toString()).build());
for (SSTableReader sstable : mock.getLiveSSTables())
{
- File movedDir = sstable.descriptor.directory.getCanonicalFile();
- File correctDir = mock.getDiskBoundaries().getCorrectDiskForSSTable(sstable).location.getCanonicalFile();
+ File movedDir = sstable.descriptor.directory.toCanonical();
+ File correctDir = mock.getDiskBoundaries().getCorrectDiskForSSTable(sstable).location.toCanonical();
assertTrue(movedDir.toString().startsWith(correctDir.toString()));
}
for (SSTableReader sstable : mock.getLiveSSTables())
@@ -331,7 +331,7 @@ private void testCorruptHelper(boolean verify, boolean copy) throws Throwable
getCurrentColumnFamilyStore().clearUnsafe();
File backupdirCorrect = moveToBackupDir(correctSSTables);
- Set beforeImport = Sets.newHashSet(backupdir.listFiles());
+ Set beforeImport = Sets.newHashSet(backupdir.tryList());
// first we moved out 2 sstables, one correct and one corrupt in to a single directory (backupdir)
// then we moved out 1 sstable, a correct one (in backupdirCorrect).
// now import should fail import on backupdir, but import the one in backupdirCorrect.
@@ -346,7 +346,7 @@ private void testCorruptHelper(boolean verify, boolean copy) throws Throwable
assertTrue("pk = "+pk, pk >= 100 && pk < 130);
}
assertEquals("Data dir should contain one file", 1, countFiles(getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables()));
- assertEquals("backupdir contained 2 files before import, should still contain 2 after failing to import it", beforeImport, Sets.newHashSet(backupdir.listFiles()));
+ assertEquals("backupdir contained 2 files before import, should still contain 2 after failing to import it", beforeImport, Sets.newHashSet(backupdir.tryList()));
if (copy)
{
assertEquals("backupdirCorrect contained 1 file before import, should contain 1 after import too", 1, countFiles(backupdirCorrect));
@@ -355,14 +355,13 @@ private void testCorruptHelper(boolean verify, boolean copy) throws Throwable
{
assertEquals("backupdirCorrect contained 1 file before import, should be empty after import", 0, countFiles(backupdirCorrect));
}
-
}
private int countFiles(File dir)
{
int fileCount = 0;
- for (File f : dir.listFiles())
+ for (File f : dir.tryList())
{
if (f.isFile() && f.toString().contains("-Data.db"))
{
@@ -618,8 +617,8 @@ public void testRefreshCorrupt() throws Throwable
assertTrue(new File(sstable.descriptor.filenameFor(Component.DATA)).exists());
getCurrentColumnFamilyStore().truncateBlocking();
LifecycleTransaction.waitForDeletions();
- for (File f : sstableToCorrupt.descriptor.directory.listFiles()) // clean up the corrupt files which truncate does not handle
- f.delete();
+ for (File f : sstableToCorrupt.descriptor.directory.tryList()) // clean up the corrupt files which truncate does not handle
+ f.tryDelete();
}
diff --git a/test/unit/org/apache/cassandra/db/MmapFileTest.java b/test/unit/org/apache/cassandra/db/MmapFileTest.java
index 71a218eff893..c8338cb20776 100644
--- a/test/unit/org/apache/cassandra/db/MmapFileTest.java
+++ b/test/unit/org/apache/cassandra/db/MmapFileTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.db;
-import java.io.File;
import java.io.RandomAccessFile;
import java.lang.management.ManagementFactory;
import java.nio.MappedByteBuffer;
@@ -26,6 +25,7 @@
import javax.management.MBeanServer;
import javax.management.ObjectName;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.Test;
@@ -56,17 +56,17 @@ public void testMmapFile() throws Exception
{
int size = 1024 * 1024;
- try (RandomAccessFile raf = new RandomAccessFile(f1, "rw"))
+ try (RandomAccessFile raf = new RandomAccessFile(f1.toJavaIOFile(), "rw"))
{
raf.setLength(size);
}
- try (RandomAccessFile raf = new RandomAccessFile(f2, "rw"))
+ try (RandomAccessFile raf = new RandomAccessFile(f2.toJavaIOFile(), "rw"))
{
raf.setLength(size);
}
- try (RandomAccessFile raf = new RandomAccessFile(f3, "rw"))
+ try (RandomAccessFile raf = new RandomAccessFile(f3.toJavaIOFile(), "rw"))
{
raf.setLength(size);
}
@@ -148,16 +148,16 @@ public void testMmapFile() throws Exception
Assert.assertEquals("# of mapped buffers should be 0", Long.valueOf(0L), mmapCount);
Assert.assertEquals("amount of mapped memory should be 0", Long.valueOf(0L), mmapMemoryUsed);
- Assert.assertTrue(f1.delete());
- Assert.assertTrue(f2.delete());
- Assert.assertTrue(f3.delete());
+ Assert.assertTrue(f1.tryDelete());
+ Assert.assertTrue(f2.tryDelete());
+ Assert.assertTrue(f3.tryDelete());
}
finally
{
Runtime.getRuntime().gc();
- f1.delete();
- f2.delete();
- f3.delete();
+ f1.tryDelete();
+ f2.tryDelete();
+ f3.tryDelete();
}
}
}
diff --git a/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java b/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java
index 81d97359b29e..09ab8a1e21ec 100644
--- a/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java
+++ b/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java
@@ -43,4 +43,4 @@ public void testMakePKString()
assertEquals("aaa, bbb and 1 more.", makeTopKeysString(new ArrayList<>(keys), 8));
assertEquals("aaa, bbb and 1 more.", makeTopKeysString(new ArrayList<>(keys), 10));
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/ReadMessageTest.java b/test/unit/org/apache/cassandra/db/ReadMessageTest.java
index 5b052536fb27..cd003c63a353 100644
--- a/test/unit/org/apache/cassandra/db/ReadMessageTest.java
+++ b/test/unit/org/apache/cassandra/db/ReadMessageTest.java
@@ -21,6 +21,7 @@
import static org.junit.Assert.*;
import java.io.*;
+import org.apache.cassandra.io.util.File;
import com.google.common.base.Predicate;
diff --git a/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java b/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java
index 4044fff612fc..21058671b8af 100644
--- a/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java
+++ b/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java
@@ -18,12 +18,12 @@
*/
package org.apache.cassandra.db;
-import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -112,9 +112,9 @@ public void testMissingHeader() throws IOException
keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();
// nuke the header
- for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
+ for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).tryList())
{
- if (file.getName().endsWith(".header"))
+ if (file.name().endsWith(".header"))
FileUtils.deleteWithConfirm(file);
}
diff --git a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
index 7b774eb03793..cac8038e6a33 100644
--- a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
+++ b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.db;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -28,6 +27,7 @@
import java.util.List;
import com.google.common.primitives.Ints;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java b/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java
index b928ebfe6d07..5a6b69ef0fec 100644
--- a/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java
+++ b/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java
@@ -22,6 +22,7 @@
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.FileReader;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -40,7 +41,6 @@
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
-import java.io.FileReader;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.Arrays;
@@ -402,7 +402,7 @@ public void testSnapshot() throws Throwable
ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
cfs.snapshot(SNAPSHOT);
- String schema = Files.toString(cfs.getDirectories().getSnapshotSchemaFile(SNAPSHOT), Charset.defaultCharset());
+ String schema = Files.toString(cfs.getDirectories().getSnapshotSchemaFile(SNAPSHOT).toJavaIOFile(), Charset.defaultCharset());
assertThat(schema,
allOf(containsString(String.format("CREATE TYPE IF NOT EXISTS %s.%s (\n" +
" a1 varint,\n" +
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index 7c24d7ced186..1e2c8e052f45 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -18,7 +18,6 @@
*/
package org.apache.cassandra.db;
-import java.io.File;
import java.io.IOError;
import java.io.IOException;
import java.io.RandomAccessFile;
@@ -36,6 +35,7 @@
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.cassandra.io.util.File;
import org.apache.commons.lang3.StringUtils;
import org.junit.AfterClass;
@@ -336,7 +336,7 @@ public void testScrubNoIndex() throws ExecutionException, InterruptedException,
assertOrderedAll(cfs, 10);
for (SSTableReader sstable : cfs.getLiveSSTables())
- assertTrue(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)).delete());
+ assertTrue(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)).tryDelete());
CompactionManager.instance.performScrub(cfs, false, true, 2);
@@ -352,10 +352,10 @@ public void testScrubOutOfOrder()
DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
// Create out-of-order SSTable
- File tempDir = FileUtils.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile();
+ File tempDir = FileUtils.createTempFile("ScrubTest.testScrubOutOfOrder", "").parent();
// create ks/cf directory
- File tempDataDir = new File(tempDir, String.join(File.separator, ksName, CF));
- assertTrue(tempDataDir.mkdirs());
+ File tempDataDir = new File(tempDir, String.join(File.pathSeparator(), ksName, CF));
+ assertTrue(tempDataDir.tryCreateDirectories());
try
{
CompactionManager.instance.disableAutoCompaction();
diff --git a/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java b/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java
index 32a9a6d57d18..34c15bb7391e 100644
--- a/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java
+++ b/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java
@@ -44,7 +44,6 @@
import org.junit.Assert;
import org.junit.Test;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.concurrent.Callable;
@@ -52,6 +51,8 @@
import java.util.function.BiFunction;
import java.util.function.Function;
+import org.apache.cassandra.io.util.File;
+
public class SerializationHeaderTest
{
private static String KEYSPACE = "SerializationHeaderTest";
@@ -84,7 +85,7 @@ public void testWrittenAsDifferentKind() throws Exception
schemaWithRegular = schemaWithRegular.unbuild().recordColumnDrop(columnStatic, 0L).build();
final AtomicInteger generation = new AtomicInteger();
- File dir = Files.createTempDir();
+ File dir = new File(Files.createTempDir());
try
{
BiFunction>, Callable> writer = (schema, clusteringFunction) -> () -> {
diff --git a/test/unit/org/apache/cassandra/db/VerifyTest.java b/test/unit/org/apache/cassandra/db/VerifyTest.java
index 65bff51d2fc9..9593b7bd82f1 100644
--- a/test/unit/org/apache/cassandra/db/VerifyTest.java
+++ b/test/unit/org/apache/cassandra/db/VerifyTest.java
@@ -39,6 +39,7 @@
import org.apache.cassandra.io.sstable.Component;
import org.apache.cassandra.io.sstable.CorruptSSTableException;
import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.locator.TokenMetadata;
@@ -63,6 +64,7 @@
import java.util.zip.CRC32;
import java.util.zip.CheckedInputStream;
+import org.apache.cassandra.io.util.File;
import static org.apache.cassandra.SchemaLoader.counterCFMD;
import static org.apache.cassandra.SchemaLoader.createKeyspace;
import static org.apache.cassandra.SchemaLoader.loadSchema;
@@ -769,7 +771,7 @@ protected void fillCounterCF(ColumnFamilyStore cfs, int partitionsPerSSTable) th
protected long simpleFullChecksum(String filename) throws IOException
{
- try (FileInputStream inputStream = new FileInputStream(filename))
+ try (FileInputStreamPlus inputStream = new FileInputStreamPlus(filename))
{
CRC32 checksum = new CRC32();
CheckedInputStream cinStream = new CheckedInputStream(inputStream, checksum);
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java b/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java
index fa3295a484cc..762459097cc8 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java
@@ -17,9 +17,9 @@
*/
package org.apache.cassandra.db.commitlog;
-import java.io.File;
import java.io.IOException;
+import org.apache.cassandra.io.util.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -45,7 +45,7 @@ public CDCTestReplayer() throws IOException
public void examineCommitLog() throws IOException
{
- replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles());
+ replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).tryList());
}
private class CommitLogTestReader extends CommitLogReader
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java
index fb90d59462be..319e75b34668 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java
@@ -18,12 +18,12 @@
package org.apache.cassandra.db.commitlog;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Random;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
index 794f99f47b28..83723c5c5bf4 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
@@ -17,11 +17,11 @@
*/
package org.apache.cassandra.db.commitlog;
-import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -191,7 +191,7 @@ private void confirmReadOrder(TestCLRHandler handler, int offset)
static ArrayList getCommitLogs()
{
File dir = new File(DatabaseDescriptor.getCommitLogLocation());
- File[] files = dir.listFiles();
+ File[] files = dir.tryList();
ArrayList results = new ArrayList<>();
for (File f : files)
{
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
index 4128b7122ee6..cbfdadbb5fca 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
@@ -24,6 +24,8 @@
import java.nio.file.Path;
import java.util.*;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileReader;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -101,7 +103,7 @@ public void testCDCWriteFailure() throws Throwable
Assert.assertTrue("Expected files to be moved to overflow.", getCDCRawCount() > 0);
// Simulate a CDC consumer reading files then deleting them
- for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
+ for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
FileUtils.deleteWithConfirm(f);
// Update size tracker to reflect deleted files. Should flip flag on current allocatingFrom to allow.
@@ -143,14 +145,14 @@ public void testSegmentFlaggingOnCreation() throws Throwable
cdcMgr.awaitManagementTasksCompletion();
// Delete all files in cdc_raw
- for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
- f.delete();
+ for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
+ f.tryDelete();
cdcMgr.updateCDCTotalSize();
// Confirm cdc update process changes flag on active segment
expectCurrentCDCState(CDCState.PERMITTED);
// Clear out archived CDC files
- for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) {
+ for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList()) {
FileUtils.deleteWithConfirm(f);
}
}
@@ -234,7 +236,7 @@ public void testDeleteLinkOnDiscardNoCDC() throws Throwable
CommitLogSegment currentSegment = CommitLog.instance.segmentManager.allocatingFrom();
// Confirm that, with no CDC data present, we've hard-linked but have no index file
- Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.getName()).toPath();
+ Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.name()).toPath();
File cdcIndexFile = currentSegment.getCDCIndexFile();
Assert.assertTrue("File does not exist: " + linked, Files.exists(linked));
Assert.assertFalse("Expected index file to not be created but found: " + cdcIndexFile, cdcIndexFile.exists());
@@ -262,7 +264,7 @@ public void testRetainLinkOnDiscardCDC() throws Throwable
.add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3))
.build().apply();
- Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.getName()).toPath();
+ Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.name()).toPath();
// Confirm that, with CDC data present but not yet flushed, we've hard-linked but have no index file
Assert.assertTrue("File does not exist: " + linked, Files.exists(linked));
@@ -310,13 +312,13 @@ public void testReplayLogic() throws IOException
// Build up a list of expected index files after replay and then clear out cdc_raw
List oldData = parseCDCIndexData();
- for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
- FileUtils.deleteWithConfirm(f.getAbsolutePath());
+ for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
+ FileUtils.deleteWithConfirm(f.absolutePath());
try
{
Assert.assertEquals("Expected 0 files in CDC folder after deletion. ",
- 0, new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length);
+ 0, new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length);
}
finally
{
@@ -331,7 +333,7 @@ public void testReplayLogic() throws IOException
// Rough sanity check -> should be files there now.
Assert.assertTrue("Expected non-zero number of files in CDC folder after restart.",
- new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length > 0);
+ new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length > 0);
// Confirm all the old indexes in old are present and >= the original offset, as we flag the entire segment
// as cdc written on a replay.
@@ -377,9 +379,9 @@ private List parseCDCIndexData()
List results = new ArrayList<>();
try
{
- for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles())
+ for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList())
{
- if (f.getName().contains("_cdc.idx"))
+ if (f.name().contains("_cdc.idx"))
results.add(new CDCIndexData(f));
}
}
@@ -398,7 +400,7 @@ private static class CDCIndexData
CDCIndexData(File f) throws IOException
{
String line = "";
- try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(f))))
+ try (BufferedReader br = new BufferedReader(new FileReader(f)))
{
line = br.readLine();
}
@@ -406,7 +408,7 @@ private static class CDCIndexData
{
throw e;
}
- fileName = f.getName();
+ fileName = f.name();
offset = Integer.parseInt(line);
}
@@ -433,7 +435,7 @@ private ByteBuffer randomizeBuffer(int size)
private int getCDCRawCount()
{
- return new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length;
+ return new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length;
}
private void expectCurrentCDCState(CDCState expectedState)
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
index c6a11aff0b8a..c0a70c66f374 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
@@ -19,6 +19,7 @@
package org.apache.cassandra.db.commitlog;
import java.io.*;
+import org.apache.cassandra.io.util.File;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.*;
@@ -34,6 +35,7 @@
import com.google.common.collect.Iterables;
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
import org.junit.*;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -210,7 +212,7 @@ public void testHeaderOnlyFileFiltering() throws Exception
{
Assume.assumeTrue(!DatabaseDescriptor.getEncryptionContext().isEnabled());
- File directory = Files.createTempDir();
+ File directory = new File(Files.createTempDir());
CommitLogDescriptor desc1 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 1, null, DatabaseDescriptor.getEncryptionContext());
CommitLogDescriptor desc2 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 2, null, DatabaseDescriptor.getEncryptionContext());
@@ -227,7 +229,7 @@ public void testHeaderOnlyFileFiltering() throws Exception
buffer.putInt(5);
buffer.putInt(6);
- try (OutputStream lout = new FileOutputStream(file1))
+ try (OutputStream lout = new FileOutputStreamPlus(file1))
{
lout.write(buffer.array());
}
@@ -236,7 +238,7 @@ public void testHeaderOnlyFileFiltering() throws Exception
File file2 = new File(directory, desc2.fileName());
buffer = ByteBuffer.allocate(1024);
CommitLogDescriptor.writeHeader(buffer, desc2);
- try (OutputStream lout = new FileOutputStream(file2))
+ try (OutputStream lout = new FileOutputStreamPlus(file2))
{
lout.write(buffer.array());
}
@@ -571,7 +573,7 @@ protected Pair tmpFile() throws IOException
File logFile = new File(DatabaseDescriptor.getCommitLogLocation(), desc.fileName());
- try (OutputStream lout = new FileOutputStream(logFile))
+ try (OutputStream lout = new FileOutputStreamPlus(logFile))
{
lout.write(buf.array(), 0, buf.limit());
}
@@ -600,11 +602,11 @@ protected File tmpFile(int version)
protected Void testRecovery(byte[] logData, int version) throws Exception
{
File logFile = tmpFile(version);
- try (OutputStream lout = new FileOutputStream(logFile))
+ try (OutputStream lout = new FileOutputStreamPlus(logFile))
{
lout.write(logData);
//statics make it annoying to test things correctly
- CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
+ CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
}
return null;
}
@@ -612,17 +614,17 @@ protected Void testRecovery(byte[] logData, int version) throws Exception
protected Void testRecovery(CommitLogDescriptor desc, byte[] logData) throws Exception
{
File logFile = tmpFile(desc.version);
- CommitLogDescriptor fromFile = CommitLogDescriptor.fromFileName(logFile.getName());
+ CommitLogDescriptor fromFile = CommitLogDescriptor.fromFileName(logFile.name());
// Change id to match file.
desc = new CommitLogDescriptor(desc.version, fromFile.id, desc.compression, desc.getEncryptionContext());
ByteBuffer buf = ByteBuffer.allocate(1024);
CommitLogDescriptor.writeHeader(buf, desc, getAdditionalHeaders(desc.getEncryptionContext()));
- try (OutputStream lout = new FileOutputStream(logFile))
+ try (OutputStream lout = new FileOutputStreamPlus(logFile))
{
lout.write(buf.array(), 0, buf.position());
lout.write(logData);
//statics make it annoying to test things correctly
- CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
+ CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
}
return null;
}
@@ -634,12 +636,12 @@ public void testRecoveryWithIdMismatch() throws Exception
File logFile = tmpFile(desc.version);
ByteBuffer buf = ByteBuffer.allocate(1024);
CommitLogDescriptor.writeHeader(buf, desc);
- try (OutputStream lout = new FileOutputStream(logFile))
+ try (OutputStream lout = new FileOutputStreamPlus(logFile))
{
lout.write(buf.array(), 0, buf.position());
runExpecting(() -> {
- CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
+ CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/
return null;
}, CommitLogReplayException.class);
}
@@ -770,7 +772,7 @@ public void replaySimple() throws IOException
List activeSegments = CommitLog.instance.getActiveSegmentNames();
assertFalse(activeSegments.isEmpty());
- File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).listFiles((file, name) -> activeSegments.contains(name));
+ File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).tryList((file, name) -> activeSegments.contains(name));
replayer.replayFiles(files);
assertEquals(cellCount, replayer.cells);
@@ -791,7 +793,7 @@ public void replayWithBadSyncMarkerCRC() throws IOException
assertFalse(activeSegments.isEmpty());
File directory = new File(CommitLog.instance.segmentManager.storageDirectory);
- File firstActiveFile = Objects.requireNonNull(directory.listFiles((file, name) -> activeSegments.contains(name)))[0];
+ File firstActiveFile = Objects.requireNonNull(directory.tryList((file, name) -> activeSegments.contains(name)))[0];
zeroFirstSyncMarkerCRC(firstActiveFile);
CommitLogSegmentReader.setAllowSkipSyncMarkerCrc(true);
@@ -838,7 +840,7 @@ private void zeroFirstSyncMarkerCRC(File file) throws IOException
buffer.putInt(0);
// ...and write the file back out.
- try (OutputStream out = new FileOutputStream(file))
+ try (OutputStream out = new FileOutputStreamPlus(file))
{
out.write(buffer.array());
}
@@ -875,7 +877,7 @@ public void replayWithDiscard() throws IOException
List activeSegments = CommitLog.instance.getActiveSegmentNames();
assertFalse(activeSegments.isEmpty());
- File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).listFiles((file, name) -> activeSegments.contains(name));
+ File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).tryList((file, name) -> activeSegments.contains(name));
replayer.replayFiles(files);
assertEquals(cellCount, replayer.cells);
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
index 5b87d687813e..0519af925c80 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java
@@ -18,7 +18,7 @@
*/
package org.apache.cassandra.db.commitlog;
-import java.io.File;
+import org.apache.cassandra.io.util.File;
import java.io.IOException;
import com.google.common.base.Predicate;
@@ -48,7 +48,7 @@ public CommitLogTestReplayer(Predicate processor) throws IOException
public void examineCommitLog() throws IOException
{
- replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles());
+ replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).tryList());
}
private class CommitLogTestReader extends CommitLogReader
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
index 47059193bdd1..5747a381b3ec 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
@@ -25,6 +25,8 @@
import java.nio.ByteBuffer;
import java.util.Properties;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
import org.junit.Assert;
import com.google.common.base.Predicate;
@@ -116,7 +118,7 @@ public static void initialize()
public void testRestore(String location) throws IOException, InterruptedException
{
Properties prop = new Properties();
- prop.load(new FileInputStream(new File(location + File.separatorChar + PROPERTIES_FILE)));
+ prop.load(new FileInputStreamPlus(new File(location + File.pathSeparator() + PROPERTIES_FILE)));
int hash = Integer.parseInt(prop.getProperty(HASH_PROPERTY));
int cells = Integer.parseInt(prop.getProperty(CELLS_PROPERTY));
@@ -130,7 +132,7 @@ public void testRestore(String location) throws IOException, InterruptedExceptio
Hasher hasher = new Hasher();
CommitLogTestReplayer replayer = new CommitLogTestReplayer(hasher);
- File[] files = new File(location).listFiles((file, name) -> name.endsWith(".log"));
+ File[] files = new File(location).tryList((file, name) -> name.endsWith(".log"));
replayer.replayFiles(files);
Assert.assertEquals(cells, hasher.cells);
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
index 680a0e7f6c4b..b067faa13c37 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
@@ -33,6 +33,9 @@
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.util.concurrent.RateLimiter;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
import org.junit.Assert;
import org.apache.cassandra.SchemaLoader;
@@ -82,7 +85,7 @@ public static void main(String[] args) throws Exception
static public void initialize() throws IOException, ConfigurationException
{
- try (FileInputStream fis = new FileInputStream("CHANGES.txt"))
+ try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt"))
{
dataSource = ByteBuffer.allocateDirect((int) fis.getChannel().size());
while (dataSource.hasRemaining())
@@ -128,15 +131,15 @@ public void makeLog() throws IOException, InterruptedException
if (dataDir.exists())
FileUtils.deleteRecursive(dataDir);
- dataDir.mkdirs();
- for (File f : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
- FileUtils.createHardLink(f, new File(dataDir, f.getName()));
+ dataDir.tryCreateDirectories();
+ for (File f : new File(DatabaseDescriptor.getCommitLogLocation()).tryList())
+ FileUtils.createHardLink(f, new File(dataDir, f.name()));
Properties prop = new Properties();
prop.setProperty(CFID_PROPERTY, Schema.instance.getTableMetadata(KEYSPACE, TABLE).id.toString());
prop.setProperty(CELLS_PROPERTY, Integer.toString(cells));
prop.setProperty(HASH_PROPERTY, Integer.toString(hash));
- prop.store(new FileOutputStream(new File(dataDir, PROPERTIES_FILE)),
+ prop.store(new FileOutputStreamPlus(new File(dataDir, PROPERTIES_FILE)),
"CommitLog upgrade test, version " + FBUtilities.getReleaseVersionString());
System.out.println("Done");
}
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java
index 390a6e97df16..e962450a80b1 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java
@@ -18,11 +18,11 @@
package org.apache.cassandra.db.commitlog;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.Random;
import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -93,6 +93,6 @@ public void testShutdownWithPendingTasks() throws Exception
CommitLog.instance.shutdownBlocking();
// the shutdown should block until all logs except the currently active one and perhaps a new, empty one are gone
- Assert.assertTrue(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles().length <= 2);
+ Assert.assertTrue(new File(DatabaseDescriptor.getCommitLogLocation()).tryList().length <= 2);
}
}
diff --git a/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java b/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java
index ce209351fa62..416675907028 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java
@@ -17,10 +17,8 @@
*/
package org.apache.cassandra.db.commitlog;
-import java.io.File;
-import java.io.FileOutputStream;
+import org.apache.cassandra.io.util.*;
import java.io.IOException;
-import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Collections;
@@ -42,9 +40,6 @@
import org.apache.cassandra.io.compress.LZ4Compressor;
import org.apache.cassandra.io.compress.SnappyCompressor;
import org.apache.cassandra.io.compress.ZstdCompressor;
-import org.apache.cassandra.io.util.FileDataInput;
-import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.io.util.RandomAccessReader;
import org.apache.cassandra.security.CipherFactory;
import org.apache.cassandra.security.EncryptionUtils;
import org.apache.cassandra.security.EncryptionContext;
@@ -103,7 +98,7 @@ private void compressedSegmenter(ICompressor compressor) throws IOException
File compressedFile = FileUtils.createTempFile("compressed-segment-", ".log");
compressedFile.deleteOnExit();
- FileOutputStream fos = new FileOutputStream(compressedFile);
+ FileOutputStreamPlus fos = new FileOutputStreamPlus(compressedFile);
fos.getChannel().write(compBuffer);
fos.close();
@@ -190,7 +185,7 @@ public void underlyingEncryptedSegmenterTest(BiFunction(sstableOld.getAllFilePaths()));
+ assertFiles(dataFolder.path(), new HashSet<>(sstableOld.getAllFilePaths()));
}
void assertCommitted() throws Exception
{
- assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));
+ assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths()));
}
}
@@ -218,7 +218,7 @@ public void testUntrack() throws Throwable
Thread.sleep(1);
LogTransaction.waitForDeletions();
- assertFiles(dataFolder.getPath(), Collections.emptySet());
+ assertFiles(dataFolder.path(), Collections.emptySet());
}
@Test
@@ -247,7 +247,7 @@ public void testCommitSameDesc() throws Throwable
sstableOld1.selfRef().release();
sstableOld2.selfRef().release();
- assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));
+ assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths()));
sstableNew.selfRef().release();
}
@@ -265,7 +265,7 @@ public void testCommitOnlyNew() throws Throwable
log.trackNew(sstable);
log.finish();
- assertFiles(dataFolder.getPath(), new HashSet<>(sstable.getAllFilePaths()));
+ assertFiles(dataFolder.path(), new HashSet<>(sstable.getAllFilePaths()));
sstable.selfRef().release();
}
@@ -287,7 +287,7 @@ public void testCommitOnlyOld() throws Throwable
sstable.markObsolete(tidier);
sstable.selfRef().release();
- assertFiles(dataFolder.getPath(), new HashSet<>());
+ assertFiles(dataFolder.path(), new HashSet<>());
}
@Test
@@ -323,8 +323,8 @@ public void testCommitMultipleFolders() throws Throwable
Arrays.stream(sstables).forEach(s -> s.selfRef().release());
LogTransaction.waitForDeletions();
- assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths()));
- assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths()));
+ assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths()));
+ assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths()));
}
@Test
@@ -342,7 +342,7 @@ public void testAbortOnlyNew() throws Throwable
sstable.selfRef().release();
- assertFiles(dataFolder.getPath(), new HashSet<>());
+ assertFiles(dataFolder.path(), new HashSet<>());
}
@Test
@@ -363,7 +363,7 @@ public void testAbortOnlyOld() throws Throwable
sstable.selfRef().release();
- assertFiles(dataFolder.getPath(), new HashSet<>(sstable.getAllFilePaths()));
+ assertFiles(dataFolder.path(), new HashSet<>(sstable.getAllFilePaths()));
}
@Test
@@ -397,8 +397,8 @@ public void testAbortMultipleFolders() throws Throwable
Arrays.stream(sstables).forEach(s -> s.selfRef().release());
LogTransaction.waitForDeletions();
- assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[0].getAllFilePaths()));
- assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[2].getAllFilePaths()));
+ assertFiles(dataFolder1.path(), new HashSet<>(sstables[0].getAllFilePaths()));
+ assertFiles(dataFolder2.path(), new HashSet<>(sstables[2].getAllFilePaths()));
}
@@ -432,7 +432,7 @@ public void testRemoveUnfinishedLeftovers_abort() throws Throwable
Map> sstables = directories.sstableLister(Directories.OnTxnErr.THROW).list();
assertEquals(1, sstables.size());
- assertFiles(dataFolder.getPath(), new HashSet<>(sstableOld.getAllFilePaths()));
+ assertFiles(dataFolder.path(), new HashSet<>(sstableOld.getAllFilePaths()));
// complete the transaction before releasing files
tidier.run();
@@ -472,7 +472,7 @@ public void testRemoveUnfinishedLeftovers_commit() throws Throwable
Map> sstables = directories.sstableLister(Directories.OnTxnErr.THROW).list();
assertEquals(1, sstables.size());
- assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));
+ assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths()));
// complete the transaction to avoid LEAK errors
tidier.run();
@@ -522,8 +522,8 @@ public void testRemoveUnfinishedLeftovers_commit_multipleFolders() throws Throwa
assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2)));
// new tables should be only table left
- assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths()));
- assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths()));
+ assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths()));
+ assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths()));
// complete the transaction to avoid LEAK errors
Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run);
@@ -573,8 +573,8 @@ public void testRemoveUnfinishedLeftovers_abort_multipleFolders() throws Throwab
assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2)));
// old tables should be only table left
- assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[0].getAllFilePaths()));
- assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[2].getAllFilePaths()));
+ assertFiles(dataFolder1.path(), new HashSet<>(sstables[0].getAllFilePaths()));
+ assertFiles(dataFolder2.path(), new HashSet<>(sstables[2].getAllFilePaths()));
// complete the transaction to avoid LEAK errors
Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run);
@@ -742,18 +742,18 @@ private static void testRemoveUnfinishedLeftovers_multipleFolders_errorCondition
if (shouldCommit)
{
// only new sstables should still be there
- assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths()));
- assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths()));
+ assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths()));
+ assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths()));
}
else
{
// all files should still be there
- assertFiles(dataFolder1.getPath(), Sets.newHashSet(Iterables.concat(sstables[0].getAllFilePaths(),
- sstables[1].getAllFilePaths(),
- Collections.singleton(log.logFilePaths().get(0)))));
- assertFiles(dataFolder2.getPath(), Sets.newHashSet(Iterables.concat(sstables[2].getAllFilePaths(),
- sstables[3].getAllFilePaths(),
- Collections.singleton(log.logFilePaths().get(1)))));
+ assertFiles(dataFolder1.path(), Sets.newHashSet(Iterables.concat(sstables[0].getAllFilePaths(),
+ sstables[1].getAllFilePaths(),
+ Collections.singleton(log.logFilePaths().get(0)))));
+ assertFiles(dataFolder2.path(), Sets.newHashSet(Iterables.concat(sstables[2].getAllFilePaths(),
+ sstables[3].getAllFilePaths(),
+ Collections.singleton(log.logFilePaths().get(1)))));
}
@@ -778,7 +778,7 @@ public void testGetTemporaryFiles() throws IOException
{
Directories directories = new Directories(cfs.metadata());
- File[] beforeSecondSSTable = dataFolder.listFiles(pathname -> !pathname.isDirectory());
+ File[] beforeSecondSSTable = dataFolder.tryList(pathname -> !pathname.isDirectory());
SSTableReader sstable2 = sstable(dataFolder, cfs, 1, 128);
log.trackNew(sstable2);
@@ -787,7 +787,7 @@ public void testGetTemporaryFiles() throws IOException
assertEquals(2, sstables.size());
// this should contain sstable1, sstable2 and the transaction log file
- File[] afterSecondSSTable = dataFolder.listFiles(pathname -> !pathname.isDirectory());
+ File[] afterSecondSSTable = dataFolder.tryList(pathname -> !pathname.isDirectory());
int numNewFiles = afterSecondSSTable.length - beforeSecondSSTable.length;
assertEquals(numNewFiles - 1, sstable2.getAllFilePaths().size()); // new files except for transaction log file
@@ -1032,7 +1032,7 @@ private static void testCorruptRecord(BiConsumer
LogTransaction.removeUnfinishedLeftovers(cfs.metadata());
// make sure to exclude the old files that were deleted by the modifier
- assertFiles(dataFolder.getPath(), oldFiles);
+ assertFiles(dataFolder.path(), oldFiles);
}
else
{ // if an intermediate line was also modified, it should ignore the tx log file
@@ -1040,9 +1040,9 @@ private static void testCorruptRecord(BiConsumer
//This should not remove any files
LogTransaction.removeUnfinishedLeftovers(cfs.metadata());
- assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(newFiles,
- oldFiles,
- log.logFilePaths())));
+ assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(newFiles,
+ oldFiles,
+ log.logFilePaths())));
}
// make sure to run the tidier to avoid any leaks in the logs
@@ -1058,7 +1058,7 @@ public void testObsoletedDataFileUpdateTimeChanged() throws IOException
for (String filePath : sstable.getAllFilePaths())
{
if (filePath.endsWith("Data.db"))
- assertTrue(new File(filePath).setLastModified(System.currentTimeMillis() + 60000)); //one minute later
+ assertTrue(new File(filePath).trySetLastModified(System.currentTimeMillis() + 60000)); //one minute later
}
});
}
@@ -1086,7 +1086,7 @@ private static void testObsoletedFilesChanged(Consumer modifier)
//This should not remove the old files
LogTransaction.removeUnfinishedLeftovers(cfs.metadata());
- assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(
+ assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(
sstableNew.getAllFilePaths(),
sstableOld.getAllFilePaths(),
log.logFilePaths())));
@@ -1097,9 +1097,9 @@ private static void testObsoletedFilesChanged(Consumer modifier)
// complete the transaction to avoid LEAK errors
assertNull(log.complete(null));
- assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(sstableNew.getAllFilePaths(),
- sstableOld.getAllFilePaths(),
- log.logFilePaths())));
+ assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(sstableNew.getAllFilePaths(),
+ sstableOld.getAllFilePaths(),
+ log.logFilePaths())));
// make sure to run the tidier to avoid any leaks in the logs
tidier.run();
@@ -1118,7 +1118,7 @@ public void testTruncateFileUpdateTime() throws IOException
{
File f = new File(filePath);
long lastModified = f.lastModified();
- f.setLastModified(lastModified - (lastModified % 1000));
+ f.trySetLastModified(lastModified - (lastModified % 1000));
}
});
}
@@ -1146,13 +1146,13 @@ private static void testTruncatedModificationTimesHelper(Consumer
LogTransaction.removeUnfinishedLeftovers(cfs.metadata());
// only the new files should be there
- assertFiles(dataFolder.getPath(), Sets.newHashSet(sstableNew.getAllFilePaths()));
+ assertFiles(dataFolder.path(), Sets.newHashSet(sstableNew.getAllFilePaths()));
sstableNew.selfRef().release();
// complete the transaction to avoid LEAK errors
assertNull(log.complete(null));
- assertFiles(dataFolder.getPath(), Sets.newHashSet(sstableNew.getAllFilePaths()));
+ assertFiles(dataFolder.path(), Sets.newHashSet(sstableNew.getAllFilePaths()));
// make sure to run the tidier to avoid any leaks in the logs
tidier.run();
@@ -1224,8 +1224,9 @@ private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int
{
File file = new File(descriptor.filenameFor(component));
if (!file.exists())
- assertTrue(file.createNewFile());
- try (RandomAccessFile raf = new RandomAccessFile(file, "rw"))
+ assertTrue(file.createFileIfNotExists());
+
+ try (RandomAccessFile raf = new RandomAccessFile(file.toJavaIOFile(), "rw"))
{
raf.setLength(size);
}
@@ -1262,8 +1263,8 @@ private static void assertFiles(String dirPath, Set expectedFiles, boole
{
LogTransaction.waitForDeletions();
- File dir = new File(dirPath).getCanonicalFile();
- File[] files = dir.listFiles();
+ File dir = new File(dirPath).toCanonical();
+ File[] files = dir.tryList();
if (files != null)
{
for (File file : files)
@@ -1271,7 +1272,7 @@ private static void assertFiles(String dirPath, Set expectedFiles, boole
if (file.isDirectory())
continue;
- String filePath = file.getPath();
+ String filePath = file.path();
assertTrue(String.format("%s not in [%s]", filePath, expectedFiles), expectedFiles.contains(filePath));
expectedFiles.remove(filePath);
}
@@ -1328,16 +1329,7 @@ static Set listFiles(File folder, Directories.FileType... types)
(file, type) -> match.contains(type),
Directories.OnTxnErr.IGNORE).list()
.stream()
- .map(f -> {
- try
- {
- return f.getCanonicalFile();
- }
- catch (IOException e)
- {
- throw new IOError(e);
- }
- })
+ .map(File::toCanonical)
.collect(Collectors.toSet());
}
}
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
index 01bfaaee7e44..0420957b9dbd 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
@@ -18,13 +18,13 @@
package org.apache.cassandra.db.lifecycle;
-import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
+import org.apache.cassandra.io.util.File;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -85,8 +85,8 @@ public void testRewriteFinished() throws IOException
LogTransaction.waitForDeletions();
// both sstables are in the same folder
- assertFiles(oldSSTable.descriptor.directory.getPath(), new HashSet<>(newSSTable.getAllFilePaths()));
- assertFiles(newSSTable.descriptor.directory.getPath(), new HashSet<>(newSSTable.getAllFilePaths()));
+ assertFiles(oldSSTable.descriptor.directory.path(), new HashSet<>(newSSTable.getAllFilePaths()));
+ assertFiles(newSSTable.descriptor.directory.path(), new HashSet<>(newSSTable.getAllFilePaths()));
}
@Test
@@ -101,7 +101,7 @@ public void testRewriteAborted() throws IOException
replaceSSTable(cfs, txn, true);
LogTransaction.waitForDeletions();
- assertFiles(oldSSTable.descriptor.directory.getPath(), new HashSet<>(oldSSTable.getAllFilePaths()));
+ assertFiles(oldSSTable.descriptor.directory.path(), new HashSet<>(oldSSTable.getAllFilePaths()));
}
@Test
@@ -112,7 +112,7 @@ public void testFlush() throws IOException
SSTableReader ssTableReader = getSSTable(cfs, 100);
- String dataFolder = cfs.getLiveSSTables().iterator().next().descriptor.directory.getPath();
+ String dataFolder = cfs.getLiveSSTables().iterator().next().descriptor.directory.path();
assertFiles(dataFolder, new HashSet<>(ssTableReader.getAllFilePaths()));
}
@@ -202,12 +202,12 @@ private SSTableReader replaceSSTable(ColumnFamilyStore cfs, LifecycleTransaction
private void assertFiles(String dirPath, Set expectedFiles)
{
File dir = new File(dirPath);
- for (File file : dir.listFiles())
+ for (File file : dir.tryList())
{
if (file.isDirectory())
continue;
- String filePath = file.getPath();
+ String filePath = file.path();
assertTrue(filePath, expectedFiles.contains(filePath));
expectedFiles.remove(filePath);
}
diff --git a/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java
index b34207f203b8..3ec63c04c842 100644
--- a/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java
@@ -42,4 +42,4 @@ public void stringProperty()
.isEqualTo(buffer);
});
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java b/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java
index 2804733e3f54..c6deb2440bce 100644
--- a/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java
+++ b/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java
@@ -337,4 +337,4 @@ public void dumpList(List list)
{
System.out.println(str(list));
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
index bd6f3d4e0eae..c4e5db82849f 100644
--- a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
+++ b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
@@ -172,4 +172,4 @@ public AbstractType> partitionOrdering()
{
return new PartitionerDefinedOrder(this);
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java b/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java
index e5a5d86586a2..0b9230bfd128 100644
--- a/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java
+++ b/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.fql;
-import java.io.File;
import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.util.ArrayList;
@@ -30,6 +29,7 @@
import javax.annotation.Nullable;
+import org.apache.cassandra.io.util.File;
import org.apache.commons.lang3.StringUtils;
import org.junit.After;
import org.junit.BeforeClass;
@@ -132,42 +132,42 @@ public void testConfigureOverExistingFile()
@Test(expected = IllegalArgumentException.class)
public void testCanRead() throws Exception
{
- tempDir.toFile().setReadable(false);
+ new File(tempDir).trySetReadable(false);
try
{
configureFQL();
}
finally
{
- tempDir.toFile().setReadable(true);
+ new File(tempDir).trySetReadable(true);
}
}
@Test(expected = IllegalArgumentException.class)
public void testCanWrite() throws Exception
{
- tempDir.toFile().setWritable(false);
+ new File(tempDir).trySetWritable(false);
try
{
configureFQL();
}
finally
{
- tempDir.toFile().setWritable(true);
+ new File(tempDir).trySetWritable(true);
}
}
@Test(expected = IllegalArgumentException.class)
public void testCanExecute() throws Exception
{
- tempDir.toFile().setExecutable(false);
+ new File(tempDir).trySetExecutable(false);
try
{
configureFQL();
}
finally
{
- tempDir.toFile().setExecutable(true);
+ new File(tempDir).trySetExecutable(true);
}
}
@@ -192,10 +192,10 @@ public void stopWithoutConfigure() throws Exception
public void testResetCleansPaths() throws Exception
{
configureFQL();
- File tempA = File.createTempFile("foo", "bar", tempDir.toFile());
+ File tempA = FileUtils.createTempFile("foo", "bar", new File(tempDir));
assertTrue(tempA.exists());
- File tempB = File.createTempFile("foo", "bar", BinLogTest.tempDir().toFile());
- FullQueryLogger.instance.reset(tempB.getParent());
+ File tempB = FileUtils.createTempFile("foo", "bar", new File(BinLogTest.tempDir()));
+ FullQueryLogger.instance.reset(tempB.parentPath());
assertFalse(tempA.exists());
assertFalse(tempB.exists());
}
@@ -207,9 +207,9 @@ public void testResetCleansPaths() throws Exception
public void testResetSamePath() throws Exception
{
configureFQL();
- File tempA = File.createTempFile("foo", "bar", tempDir.toFile());
+ File tempA = FileUtils.createTempFile("foo", "bar", new File(tempDir));
assertTrue(tempA.exists());
- FullQueryLogger.instance.reset(tempA.getParent());
+ FullQueryLogger.instance.reset(tempA.parentPath());
assertFalse(tempA.exists());
}
@@ -223,10 +223,10 @@ public void testDoubleConfigure() throws Exception
@Test
public void testCleansDirectory() throws Exception
{
- assertTrue(new File(tempDir.toFile(), "foobar").createNewFile());
+ assertTrue(new File(tempDir, "foobar").createFileIfNotExists());
configureFQL();
- assertEquals(tempDir.toFile().listFiles().length, 1);
- assertEquals("metadata.cq4t", tempDir.toFile().listFiles()[0].getName());
+ assertEquals(new File(tempDir).tryList().length, 1);
+ assertEquals("metadata.cq4t", new File(tempDir).tryList()[0].name());
}
@Test
diff --git a/test/unit/org/apache/cassandra/gms/SerializationsTest.java b/test/unit/org/apache/cassandra/gms/SerializationsTest.java
index 90ce10ba0dff..d8511fe2a3d8 100644
--- a/test/unit/org/apache/cassandra/gms/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/gms/SerializationsTest.java
@@ -24,6 +24,7 @@
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
import org.apache.cassandra.io.util.DataOutputStreamPlus;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.FBUtilities;
@@ -68,7 +69,7 @@ public void testEndpointStateRead() throws IOException
if (EXECUTE_WRITES)
testEndpointStateWrite();
- DataInputStreamPlus in = getInput("gms.EndpointState.bin");
+ FileInputStreamPlus in = getInput("gms.EndpointState.bin");
assert HeartBeatState.serializer.deserialize(in, getVersion()) != null;
assert EndpointState.serializer.deserialize(in, getVersion()) != null;
assert VersionedValue.serializer.deserialize(in, getVersion()) != null;
@@ -110,7 +111,7 @@ public void testGossipDigestRead() throws IOException
testGossipDigestWrite();
int count = 0;
- DataInputStreamPlus in = getInput("gms.Gossip.bin");
+ FileInputStreamPlus in = getInput("gms.Gossip.bin");
while (count < Statics.Digests.size())
assert GossipDigestAck2.serializer.deserialize(in, getVersion()) != null;
assert GossipDigestAck.serializer.deserialize(in, getVersion()) != null;
diff --git a/test/unit/org/apache/cassandra/hints/AlteredHints.java b/test/unit/org/apache/cassandra/hints/AlteredHints.java
index 9b8e32f2fd21..0379c41b0c19 100644
--- a/test/unit/org/apache/cassandra/hints/AlteredHints.java
+++ b/test/unit/org/apache/cassandra/hints/AlteredHints.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.hints;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Iterator;
@@ -28,6 +27,7 @@
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -85,7 +85,7 @@ public void multiFlushAndDeserializeTest() throws Exception
long ts = System.currentTimeMillis();
HintsDescriptor descriptor = new HintsDescriptor(hostId, ts, params());
- File dir = Files.createTempDir();
+ File dir = new File(Files.createTempDir());
try (HintsWriter writer = HintsWriter.create(dir, descriptor))
{
Assert.assertTrue(looksLegit(writer));
diff --git a/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java b/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java
index 9f4cdfb92f5b..4642584005f3 100644
--- a/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java
+++ b/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java
@@ -17,13 +17,13 @@
*/
package org.apache.cassandra.hints;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.zip.CRC32;
+import org.apache.cassandra.io.util.File;
import org.junit.BeforeClass;
import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java b/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java
index 21dbd7e8c377..22f29cb46e3c 100644
--- a/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.hints;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
@@ -27,6 +26,7 @@
import java.util.List;
import java.util.concurrent.TimeUnit;
+import org.apache.cassandra.io.util.File;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -94,7 +94,7 @@ public static void setupClass() throws Exception
ttldHint = makeHint(tbm, 2, nowInSeconds - (TTL + 1), GC_GRACE);
- File directory = Files.createTempDirectory(null).toFile();
+ File directory = new File(Files.createTempDirectory(null));
HintsDescriptor descriptor = new HintsDescriptor(UUIDGen.getTimeUUID(), s2m(nowInSeconds));
try (HintsWriter writer = HintsWriter.create(directory, descriptor);
diff --git a/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java b/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java
index 92cfc7153bd4..1f8c95d6c788 100644
--- a/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.hints;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
@@ -26,6 +25,7 @@
import com.google.common.collect.ImmutableMap;
import org.apache.cassandra.SchemaLoader;
import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.KeyspaceParams;
import org.apache.cassandra.schema.Schema;
import org.apache.cassandra.utils.FBUtilities;
@@ -57,7 +57,7 @@ public static void defineSchema()
@Test
public void loadCompletenessAndOrderTest() throws IOException
{
- File directory = Files.createTempDirectory(null).toFile();
+ File directory = new File(Files.createTempDirectory(null));
try
{
loadCompletenessAndOrderTest(directory);
@@ -107,7 +107,7 @@ private void loadCompletenessAndOrderTest(File directory) throws IOException
@Test
public void deleteHintsTest() throws IOException
{
- File directory = Files.createTempDirectory(null).toFile();
+ File directory = new File(Files.createTempDirectory(null));
UUID hostId1 = UUID.randomUUID();
UUID hostId2 = UUID.randomUUID();
long now = System.currentTimeMillis();
@@ -138,7 +138,7 @@ public void deleteHintsTest() throws IOException
@Test
public void exciseHintFiles() throws IOException
{
- File directory = Files.createTempDirectory(null).toFile();
+ File directory = new File(Files.createTempDirectory(null));
try
{
exciseHintFiles(directory);
diff --git a/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java b/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java
index 2fad7335fd0f..ee79f891c293 100644
--- a/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.hints;
import java.io.DataInput;
-import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
@@ -27,6 +26,7 @@
import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteStreams;
+import org.apache.cassandra.io.util.File;
import org.junit.Test;
import org.apache.cassandra.io.compress.LZ4Compressor;
@@ -104,18 +104,18 @@ public void testReadFromFile() throws IOException
ImmutableMap parameters = ImmutableMap.of();
HintsDescriptor expected = new HintsDescriptor(hostId, version, timestamp, parameters);
- Path directory = Files.createTempDirectory("hints");
+ File directory = new File(Files.createTempDirectory("hints"));
try
{
- try (HintsWriter ignored = HintsWriter.create(directory.toFile(), expected))
+ try (HintsWriter ignored = HintsWriter.create(directory, expected))
{
}
- HintsDescriptor actual = HintsDescriptor.readFromFile(directory.resolve(expected.fileName()));
+ HintsDescriptor actual = HintsDescriptor.readFromFile(new File(directory, expected.fileName()));
assertEquals(expected, actual);
}
finally
{
- directory.toFile().deleteOnExit();
+ directory.deleteOnExit();
}
}
@@ -146,7 +146,7 @@ public void testHandleIOE() throws IOException
HintsDescriptor.handleDescriptorIOE(new IOException("test"), p);
File newFile = new File(p.getParent().toFile(), p.getFileName().toString().replace(".hints", ".corrupt.hints"));
assertThat(p).doesNotExist();
- assertThat(newFile).exists();
+ assertThat(newFile.exists());
newFile.deleteOnExit();
}
diff --git a/test/unit/org/apache/cassandra/hints/HintsReaderTest.java b/test/unit/org/apache/cassandra/hints/HintsReaderTest.java
index f05d4cec44b5..af1c89b62937 100644
--- a/test/unit/org/apache/cassandra/hints/HintsReaderTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsReaderTest.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.hints;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
@@ -29,6 +28,7 @@
import java.util.function.Function;
import com.google.common.collect.Iterables;
+import org.apache.cassandra.io.util.File;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -196,7 +196,7 @@ private void corruptFileHelper(byte[] toAppend, String ks) throws IOException
SchemaLoader.standardCFMD(ks, CF_STANDARD1),
SchemaLoader.standardCFMD(ks, CF_STANDARD2));
int numTable = 2;
- directory = Files.createTempDirectory(null).toFile();
+ directory = new File(Files.createTempDirectory(null));
try
{
generateHints(3, ks);
@@ -206,7 +206,7 @@ private void corruptFileHelper(byte[] toAppend, String ks) throws IOException
}
finally
{
- directory.delete();
+ directory.deleteRecursive();
}
}
@@ -219,7 +219,7 @@ public void testNormalRead() throws IOException
SchemaLoader.standardCFMD(ks, CF_STANDARD1),
SchemaLoader.standardCFMD(ks, CF_STANDARD2));
int numTable = 2;
- directory = Files.createTempDirectory(null).toFile();
+ directory = new File(Files.createTempDirectory(null));
try
{
generateHints(3, ks);
@@ -227,7 +227,7 @@ public void testNormalRead() throws IOException
}
finally
{
- directory.delete();
+ directory.tryDelete();
}
}
@@ -240,7 +240,7 @@ public void testDroppedTableRead() throws IOException
SchemaLoader.standardCFMD(ks, CF_STANDARD1),
SchemaLoader.standardCFMD(ks, CF_STANDARD2));
- directory = Files.createTempDirectory(null).toFile();
+ directory = new File(Files.createTempDirectory(null));
try
{
generateHints(3, ks);
@@ -249,7 +249,7 @@ public void testDroppedTableRead() throws IOException
}
finally
{
- directory.delete();
+ directory.tryDelete();
}
}
}
diff --git a/test/unit/org/apache/cassandra/hints/HintsStoreTest.java b/test/unit/org/apache/cassandra/hints/HintsStoreTest.java
index 0bf9ef43239f..c9a0d57be751 100644
--- a/test/unit/org/apache/cassandra/hints/HintsStoreTest.java
+++ b/test/unit/org/apache/cassandra/hints/HintsStoreTest.java
@@ -19,7 +19,6 @@
package org.apache.cassandra.hints;
import java.io.Closeable;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
@@ -36,6 +35,7 @@
import org.junit.Test;
import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.schema.Schema;
import org.apache.cassandra.db.Mutation;
@@ -56,7 +56,7 @@ public class HintsStoreTest
@Before
public void testSetup() throws IOException
{
- directory = Files.createTempDirectory(null).toFile();
+ directory = new File(Files.createTempDirectory(null));
directory.deleteOnExit();
hostId = UUID.randomUUID();
}
diff --git a/test/unit/org/apache/cassandra/index/CustomIndexTest.java b/test/unit/org/apache/cassandra/index/CustomIndexTest.java
index 84a36dfc8eb0..9cf3b105683e 100644
--- a/test/unit/org/apache/cassandra/index/CustomIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/CustomIndexTest.java
@@ -648,7 +648,7 @@ public void testFailing2iFlush() throws Throwable
}
// SSTables remain uncommitted.
- assertEquals(1, getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables().listFiles().length);
+ assertEquals(1, getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables().tryList().length);
}
@Test
diff --git a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
index 70948fe70c76..e9fb34a5b6bf 100644
--- a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
@@ -17,8 +17,6 @@
*/
package org.apache.cassandra.index.sasi;
-import java.io.File;
-import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
@@ -43,6 +41,7 @@
import org.apache.cassandra.cql3.QueryProcessor;
import org.apache.cassandra.cql3.UntypedResultSet;
import org.apache.cassandra.index.Index;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.ColumnMetadata;
import org.apache.cassandra.schema.Schema;
import org.apache.cassandra.schema.TableMetadata;
diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java
index 1afb7b4acdf6..0abddd9193b8 100644
--- a/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.index.sasi.disk;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ThreadLocalRandom;
@@ -38,6 +37,7 @@
import org.apache.cassandra.db.marshal.LongType;
import org.apache.cassandra.db.marshal.UTF8Type;
import org.apache.cassandra.io.util.DataOutputBuffer;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.MurmurHash;
import org.apache.cassandra.utils.Pair;
diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
index 97b3433a8fba..da0dbde7506b 100644
--- a/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.index.sasi.disk;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
@@ -40,6 +39,7 @@
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.io.FSError;
import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.schema.ColumnMetadata;
import org.apache.cassandra.schema.KeyspaceMetadata;
diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
index 4339a6243826..6d067a1d1459 100644
--- a/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.index.sasi.disk;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
@@ -36,6 +35,7 @@
import org.apache.cassandra.index.sasi.utils.RangeIterator;
import org.apache.cassandra.db.marshal.LongType;
import org.apache.cassandra.index.sasi.utils.RangeUnionIterator;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.io.util.SequentialWriterOption;
import org.apache.cassandra.utils.MurmurHash;
diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java
index 6358355994c2..f61252ac1856 100644
--- a/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java
@@ -51,4 +51,4 @@ public void testBasicITerator() throws IOException {
it.close();
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java
index e55f6bac9112..dcd79b91a465 100644
--- a/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java
@@ -25,6 +25,7 @@
import org.apache.cassandra.db.marshal.LongType;
import org.apache.cassandra.io.util.ChannelProxy;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.junit.Assert;
@@ -452,7 +453,7 @@ public void testOpenWithoutPageBits() throws IOException
File tmp = FileUtils.createTempFile("mapped-buffer", "tmp");
tmp.deleteOnExit();
- RandomAccessFile file = new RandomAccessFile(tmp, "rw");
+ RandomAccessFile file = new RandomAccessFile(tmp.toJavaIOFile(), "rw");
long numValues = 1000;
for (long i = 0; i < numValues; i++)
@@ -460,7 +461,7 @@ public void testOpenWithoutPageBits() throws IOException
file.getFD().sync();
- try (MappedBuffer buffer = new MappedBuffer(new ChannelProxy(tmp.getAbsolutePath(), file.getChannel())))
+ try (MappedBuffer buffer = new MappedBuffer(new ChannelProxy(tmp.absolutePath(), file.getChannel())))
{
Assert.assertEquals(numValues * 8, buffer.limit());
Assert.assertEquals(numValues * 8, buffer.capacity());
@@ -493,7 +494,7 @@ private MappedBuffer createTestFile(long numCount, int typeSize, int numPageBits
final File testFile = FileUtils.createTempFile("mapped-buffer-test", "db");
testFile.deleteOnExit();
- RandomAccessFile file = new RandomAccessFile(testFile, "rw");
+ RandomAccessFile file = new RandomAccessFile(testFile.toJavaIOFile(), "rw");
for (long i = 0; i < numCount; i++)
{
@@ -529,7 +530,7 @@ private MappedBuffer createTestFile(long numCount, int typeSize, int numPageBits
try
{
- return new MappedBuffer(new ChannelProxy(testFile.getAbsolutePath(), file.getChannel()), numPageBits);
+ return new MappedBuffer(new ChannelProxy(testFile.absolutePath(), file.getChannel()), numPageBits);
}
finally
{
diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java
index 162b1c6f8eff..581f4e5bd4a1 100644
--- a/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java
@@ -373,4 +373,4 @@ public void emptyRangeTest() {
Assert.assertTrue(range.hasNext());
Assert.assertEquals(10, range.getCount());
}
-}
\ No newline at end of file
+}
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
index d3d81f006e4f..8a2beaea110c 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
@@ -19,12 +19,12 @@
package org.apache.cassandra.io.compress;
import java.io.EOFException;
-import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Arrays;
import java.util.Random;
+import org.apache.cassandra.io.util.File;
import org.assertj.core.api.Assertions;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -97,7 +97,7 @@ public void testResetAndTruncateCompressedUncompressedChunksMmap() throws IOExce
public void test6791() throws IOException, ConfigurationException
{
File f = FileUtils.createTempFile("compressed6791_", "3");
- String filename = f.getAbsolutePath();
+ String filename = f.absolutePath();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
try(CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata",
null, SequentialWriterOption.DEFAULT,
@@ -132,10 +132,10 @@ public void test6791() throws IOException, ConfigurationException
finally
{
if (f.exists())
- assertTrue(f.delete());
+ assertTrue(f.tryDelete());
File metadata = new File(filename+ ".metadata");
if (metadata.exists())
- metadata.delete();
+ metadata.tryDelete();
}
}
@@ -145,8 +145,8 @@ public void test6791() throws IOException, ConfigurationException
@Test
public void testChunkIndexOverflow() throws IOException
{
- File file = File.createTempFile("chunk_idx_overflow", "1");
- String filename = file.getAbsolutePath();
+ File file = FileUtils.createTempFile("chunk_idx_overflow", "1");
+ String filename = file.absolutePath();
int chunkLength = 4096; // 4k
try
@@ -166,16 +166,16 @@ public void testChunkIndexOverflow() throws IOException
finally
{
if (file.exists())
- assertTrue(file.delete());
+ assertTrue(file.tryDelete());
File metadata = new File(filename + ".metadata");
if (metadata.exists())
- metadata.delete();
+ metadata.tryDelete();
}
}
private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize, double minCompressRatio) throws IOException
{
- final String filename = f.getAbsolutePath();
+ final String filename = f.absolutePath();
writeSSTable(f, compressed ? CompressionParams.snappy() : null, junkSize);
CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), true) : null;
@@ -192,16 +192,16 @@ private static void testResetAndTruncate(File f, boolean compressed, boolean use
finally
{
if (f.exists())
- assertTrue(f.delete());
+ assertTrue(f.tryDelete());
File metadata = new File(filename + ".metadata");
if (compressed && metadata.exists())
- metadata.delete();
+ metadata.tryDelete();
}
}
private static void writeSSTable(File f, CompressionParams params, int junkSize) throws IOException
{
- final String filename = f.getAbsolutePath();
+ final String filename = f.absolutePath();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
try(SequentialWriter writer = params != null
? new CompressedSequentialWriter(f, filename + ".metadata",
@@ -237,14 +237,14 @@ public void testDataCorruptionDetection() throws IOException
File file = new File("testDataCorruptionDetection");
file.deleteOnExit();
- File metadata = new File(file.getPath() + ".meta");
+ File metadata = new File(file.path() + ".meta");
metadata.deleteOnExit();
- assertTrue(file.createNewFile());
- assertTrue(metadata.createNewFile());
+ assertTrue(file.createFileIfNotExists());
+ assertTrue(metadata.createFileIfNotExists());
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
- try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(),
+ try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.path(),
null, SequentialWriterOption.DEFAULT,
CompressionParams.snappy(), sstableMetadataCollector))
{
@@ -253,16 +253,16 @@ public void testDataCorruptionDetection() throws IOException
}
// open compression metadata and get chunk information
- CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), true);
+ CompressionMetadata meta = new CompressionMetadata(metadata.path(), file.length(), true);
CompressionMetadata.Chunk chunk = meta.chunkFor(0);
- try (FileHandle.Builder builder = new FileHandle.Builder(file.getPath()).withCompressionMetadata(meta);
+ try (FileHandle.Builder builder = new FileHandle.Builder(file.path()).withCompressionMetadata(meta);
FileHandle fh = builder.complete();
RandomAccessReader reader = fh.createReader())
{// read and verify compressed data
assertEquals(CONTENT, reader.readLine());
Random random = new Random();
- try(RandomAccessFile checksumModifier = new RandomAccessFile(file, "rw"))
+ try(RandomAccessFile checksumModifier = new RandomAccessFile(file.toJavaIOFile(), "rw"))
{
byte[] checksum = new byte[4];
@@ -311,6 +311,6 @@ private static void updateChecksum(RandomAccessFile file, long checksumOffset, b
{
file.seek(checksumOffset);
file.write(checksum);
- SyncUtil.sync(file);
+ SyncUtil.sync(file.getFD());
}
}
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
index 57802cbf1525..1e671ddb2d2e 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
@@ -19,11 +19,11 @@
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
+import org.apache.cassandra.io.util.File;
import static org.apache.cassandra.schema.CompressionParams.DEFAULT_CHUNK_LENGTH;
import static org.apache.commons.io.FileUtils.readFileToByteArray;
import static org.junit.Assert.assertEquals;
@@ -113,7 +113,7 @@ public void testNoopWriter() throws IOException
private void testWrite(File f, int bytesToTest, boolean useMemmap) throws IOException
{
- final String filename = f.getAbsolutePath();
+ final String filename = f.absolutePath();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Collections.singletonList(BytesType.instance)));
byte[] dataPre = new byte[bytesToTest];
@@ -171,10 +171,10 @@ private void testWrite(File f, int bytesToTest, boolean useMemmap) throws IOExce
finally
{
if (f.exists())
- f.delete();
+ f.tryDelete();
File metadata = new File(f + ".metadata");
if (metadata.exists())
- metadata.delete();
+ metadata.tryDelete();
}
}
@@ -213,12 +213,12 @@ private void testUncompressedChunks(int size, double ratio, int extra) throws IO
b.flip();
File f = FileUtils.createTempFile("testUncompressedChunks", "1");
- String filename = f.getPath();
+ String filename = f.path();
MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Collections.singletonList(BytesType.instance)));
compressionParameters = new CompressionParams(MockCompressor.class.getTypeName(),
MockCompressor.paramsFor(ratio, extra),
DEFAULT_CHUNK_LENGTH, ratio);
- try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, f.getPath() + ".metadata",
+ try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, f.path() + ".metadata",
null, SequentialWriterOption.DEFAULT,
compressionParameters,
sstableMetadataCollector))
@@ -244,10 +244,10 @@ private void testUncompressedChunks(int size, double ratio, int extra) throws IO
finally
{
if (f.exists())
- f.delete();
+ f.tryDelete();
File metadata = new File(f + ".metadata");
if (metadata.exists())
- metadata.delete();
+ metadata.tryDelete();
}
}
@@ -272,12 +272,12 @@ public void cleanup()
@Override
public void resetAndTruncateTest()
{
- File tempFile = new File(Files.createTempDir(), "reset.txt");
+ File tempFile = new File(Files.createTempDir().toPath(), "reset.txt");
File offsetsFile = FileUtils.createDeletableTempFile("compressedsequentialwriter.offset", "test");
final int bufferSize = 48;
final int writeSize = 64;
byte[] toWrite = new byte[writeSize];
- try (SequentialWriter writer = new CompressedSequentialWriter(tempFile, offsetsFile.getPath(),
+ try (SequentialWriter writer = new CompressedSequentialWriter(tempFile, offsetsFile.path(),
null, SequentialWriterOption.DEFAULT,
CompressionParams.lz4(bufferSize),
new MetadataCollector(new ClusteringComparator(UTF8Type.instance))))
@@ -331,7 +331,7 @@ private TestableCSW() throws IOException
private TestableCSW(File file, File offsetsFile) throws IOException
{
- this(file, offsetsFile, new CompressedSequentialWriter(file, offsetsFile.getPath(),
+ this(file, offsetsFile, new CompressedSequentialWriter(file, offsetsFile.path(),
null, SequentialWriterOption.DEFAULT,
CompressionParams.lz4(BUFFER_SIZE, MAX_COMPRESSED),
new MetadataCollector(new ClusteringComparator(UTF8Type.instance))));
@@ -348,7 +348,7 @@ protected void assertInProgress() throws Exception
{
Assert.assertTrue(file.exists());
Assert.assertFalse(offsetsFile.exists());
- byte[] compressed = readFileToByteArray(file);
+ byte[] compressed = readFileToByteArray(file.toJavaIOFile());
byte[] uncompressed = new byte[partialContents.length];
LZ4Compressor.create(Collections.emptyMap()).uncompress(compressed, 0, compressed.length - 4, uncompressed, 0);
Assert.assertTrue(Arrays.equals(partialContents, uncompressed));
@@ -358,7 +358,7 @@ protected void assertPrepared() throws Exception
{
Assert.assertTrue(file.exists());
Assert.assertTrue(offsetsFile.exists());
- DataInputStream offsets = new DataInputStream(new ByteArrayInputStream(readFileToByteArray(offsetsFile)));
+ DataInputStream offsets = new DataInputStream(new ByteArrayInputStream(readFileToByteArray(offsetsFile.toJavaIOFile())));
Assert.assertTrue(offsets.readUTF().endsWith("LZ4Compressor"));
Assert.assertEquals(0, offsets.readInt());
Assert.assertEquals(BUFFER_SIZE, offsets.readInt());
@@ -367,7 +367,7 @@ protected void assertPrepared() throws Exception
Assert.assertEquals(2, offsets.readInt());
Assert.assertEquals(0, offsets.readLong());
int offset = (int) offsets.readLong();
- byte[] compressed = readFileToByteArray(file);
+ byte[] compressed = readFileToByteArray(file.toJavaIOFile());
byte[] uncompressed = new byte[fullContents.length];
LZ4Compressor.create(Collections.emptyMap()).uncompress(compressed, 0, offset - 4, uncompressed, 0);
LZ4Compressor.create(Collections.emptyMap()).uncompress(compressed, offset, compressed.length - (4 + offset), uncompressed, partialContents.length);
@@ -381,8 +381,8 @@ protected void assertAborted() throws Exception
void cleanup()
{
- file.delete();
- offsetsFile.delete();
+ file.tryDelete();
+ offsetsFile.tryDelete();
}
}
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressorTest.java b/test/unit/org/apache/cassandra/io/compress/CompressorTest.java
index 29e8453c5105..dad3ae44aece 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressorTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressorTest.java
@@ -27,6 +27,7 @@
import java.util.Random;
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
import static org.junit.Assert.*;
import org.junit.Assert;
import org.junit.Test;
@@ -143,7 +144,7 @@ public void testMappedFile() throws IOException
dest.clear();
channel.write(dest);
- MappedByteBuffer mappedData = Files.map(temp);
+ MappedByteBuffer mappedData = Files.map(temp.toJavaIOFile());
ByteBuffer result = makeBB(data.length + 100);
mappedData.position(outOffset).limit(outOffset + compressedLength);
diff --git a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
index 9e3594bc0fa5..14d3c5ef6f61 100644
--- a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
@@ -18,7 +18,7 @@
*/
package org.apache.cassandra.io.sstable;
-import java.io.File;
+import org.apache.cassandra.io.util.File;
import java.io.IOException;
import org.junit.BeforeClass;
diff --git a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java
index 61ac017430e2..778e8a771084 100644
--- a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java
@@ -17,11 +17,12 @@
*/
package org.apache.cassandra.io.sstable;
-import java.io.File;
-import java.io.FilenameFilter;
+
import java.io.IOException;
+import java.util.function.BiPredicate;
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -39,7 +40,7 @@ public class CQLSSTableWriterClientTest
@Before
public void setUp()
{
- this.testDirectory = Files.createTempDir();
+ this.testDirectory = new File(Files.createTempDir());
DatabaseDescriptor.daemonInitialization();
}
@@ -73,16 +74,9 @@ public void testWriterInClientMode() throws IOException, InvalidRequestException
writer.close();
writer2.close();
- FilenameFilter filter = new FilenameFilter()
- {
- @Override
- public boolean accept(File dir, String name)
- {
- return name.endsWith("-Data.db");
- }
- };
+ BiPredicate filter = (dir, name) -> name.endsWith("-Data.db");
- File[] dataFiles = this.testDirectory.listFiles(filter);
+ File[] dataFiles = this.testDirectory.tryList(filter);
assertEquals(2, dataFiles.length);
}
diff --git a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
index 79c6e7785255..2d04bb953e52 100644
--- a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
@@ -17,17 +17,18 @@
*/
package org.apache.cassandra.io.sstable;
-import java.io.File;
-import java.io.FilenameFilter;
+
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.BiPredicate;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
+import org.apache.cassandra.io.util.File;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
@@ -85,8 +86,8 @@ public void perTestSetup() throws IOException
keyspace = "cql_keyspace" + idGen.incrementAndGet();
table = "table" + idGen.incrementAndGet();
qualifiedTable = keyspace + '.' + table;
- dataDir = new File(tempFolder.newFolder().getAbsolutePath() + File.separator + keyspace + File.separator + table);
- assert dataDir.mkdirs();
+ dataDir = new File(tempFolder.newFolder().getAbsolutePath() + File.pathSeparator() + keyspace + File.pathSeparator() + table);
+ assert dataDir.tryCreateDirectories();
}
@Test
@@ -190,14 +191,8 @@ public void testSyncWithinPartition() throws Exception
writer.addRow(1, val);
writer.close();
- FilenameFilter filterDataFiles = new FilenameFilter()
- {
- public boolean accept(File dir, String name)
- {
- return name.endsWith("-Data.db");
- }
- };
- assert dataDir.list(filterDataFiles).length > 1 : Arrays.toString(dataDir.list(filterDataFiles));
+ BiPredicate filterDataFiles = (dir, name) -> name.endsWith("-Data.db");
+ assert dataDir.tryListNames(filterDataFiles).length > 1 : Arrays.toString(dataDir.tryListNames(filterDataFiles));
}
diff --git a/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java b/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
index 5f79f570b725..988774e1c475 100644
--- a/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
@@ -17,10 +17,10 @@
*/
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.io.IOException;
import java.util.UUID;
+import org.apache.cassandra.io.util.File;
import org.apache.commons.lang3.StringUtils;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -45,7 +45,7 @@ public class DescriptorTest
public DescriptorTest() throws IOException
{
// create CF directories, one without CFID and one with it
- tempDataDir = FileUtils.createTempFile("DescriptorTest", null).getParentFile();
+ tempDataDir = FileUtils.createTempFile("DescriptorTest", null).parent();
}
@BeforeClass
@@ -57,28 +57,28 @@ public static void setup()
@Test
public void testFromFilename() throws Exception
{
- File cfIdDir = new File(tempDataDir.getAbsolutePath() + File.separator + ksname + File.separator + cfname + '-' + cfId);
+ File cfIdDir = new File(tempDataDir.absolutePath() + File.pathSeparator() + ksname + File.pathSeparator() + cfname + '-' + cfId);
testFromFilenameFor(cfIdDir);
}
@Test
public void testFromFilenameInBackup() throws Exception
{
- File backupDir = new File(StringUtils.join(new String[]{tempDataDir.getAbsolutePath(), ksname, cfname + '-' + cfId, Directories.BACKUPS_SUBDIR}, File.separator));
+ File backupDir = new File(StringUtils.join(new String[]{ tempDataDir.absolutePath(), ksname, cfname + '-' + cfId, Directories.BACKUPS_SUBDIR}, File.pathSeparator()));
testFromFilenameFor(backupDir);
}
@Test
public void testFromFilenameInSnapshot() throws Exception
{
- File snapshotDir = new File(StringUtils.join(new String[]{tempDataDir.getAbsolutePath(), ksname, cfname + '-' + cfId, Directories.SNAPSHOT_SUBDIR, "snapshot_name"}, File.separator));
+ File snapshotDir = new File(StringUtils.join(new String[]{ tempDataDir.absolutePath(), ksname, cfname + '-' + cfId, Directories.SNAPSHOT_SUBDIR, "snapshot_name"}, File.pathSeparator()));
testFromFilenameFor(snapshotDir);
}
@Test
public void testFromFilenameInLegacyDirectory() throws Exception
{
- File cfDir = new File(tempDataDir.getAbsolutePath() + File.separator + ksname + File.separator + cfname);
+ File cfDir = new File(tempDataDir.absolutePath() + File.pathSeparator() + ksname + File.pathSeparator() + cfname);
testFromFilenameFor(cfDir);
}
@@ -88,7 +88,7 @@ private void testFromFilenameFor(File dir)
// secondary index
String idxName = "myidx";
- File idxDir = new File(dir.getAbsolutePath() + File.separator + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName);
+ File idxDir = new File(dir.absolutePath() + File.pathSeparator() + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName);
checkFromFilename(new Descriptor(idxDir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, 4, SSTableFormat.Type.BIG));
}
@@ -113,7 +113,7 @@ public void testEquality()
// Descriptor should be equal when parent directory points to the same directory
File dir = new File(".");
Descriptor desc1 = new Descriptor(dir, "ks", "cf", 1, SSTableFormat.Type.BIG);
- Descriptor desc2 = new Descriptor(dir.getAbsoluteFile(), "ks", "cf", 1, SSTableFormat.Type.BIG);
+ Descriptor desc2 = new Descriptor(dir.toAbsolute(), "ks", "cf", 1, SSTableFormat.Type.BIG);
assertEquals(desc1, desc2);
assertEquals(desc1.hashCode(), desc2.hashCode());
}
@@ -124,7 +124,7 @@ public void validateNames()
String[] names = {
"ma-1-big-Data.db",
// 2ndary index
- ".idx1" + File.separator + "ma-1-big-Data.db",
+ ".idx1" + File.pathSeparator() + "ma-1-big-Data.db",
};
for (String name : names)
diff --git a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
index b96e1f7e3573..2fcc54302ef1 100644
--- a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
@@ -17,9 +17,7 @@
*/
package org.apache.cassandra.io.sstable;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
@@ -29,6 +27,9 @@
import com.google.common.collect.Lists;
import com.google.common.collect.Iterables;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.io.util.FileInputStreamPlus;
+import org.apache.cassandra.io.util.FileOutputStreamPlus;
import org.junit.After;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -113,7 +114,7 @@ public static void defineSchema() throws ConfigurationException
String scp = System.getProperty(LEGACY_SSTABLE_PROP);
Assert.assertNotNull("System property " + LEGACY_SSTABLE_PROP + " not set", scp);
- LEGACY_SSTABLE_ROOT = new File(scp).getAbsoluteFile();
+ LEGACY_SSTABLE_ROOT = new File(scp).toAbsolute();
Assert.assertTrue("System property " + LEGACY_SSTABLE_ROOT + " does not specify a directory", LEGACY_SSTABLE_ROOT.isDirectory());
SchemaLoader.prepareServer();
@@ -637,7 +638,7 @@ public void testGenerateSstables() throws Throwable
StorageService.instance.forceKeyspaceFlush("legacy_tables");
File ksDir = new File(LEGACY_SSTABLE_ROOT, String.format("%s/legacy_tables", BigFormat.latestVersion));
- ksDir.mkdirs();
+ ksDir.tryCreateDirectories();
copySstablesFromTestData(String.format("legacy_%s_simple", BigFormat.latestVersion), ksDir);
copySstablesFromTestData(String.format("legacy_%s_simple_counter", BigFormat.latestVersion), ksDir);
copySstablesFromTestData(String.format("legacy_%s_clust", BigFormat.latestVersion), ksDir);
@@ -647,11 +648,11 @@ public void testGenerateSstables() throws Throwable
public static void copySstablesFromTestData(String table, File ksDir) throws IOException
{
File cfDir = new File(ksDir, table);
- cfDir.mkdir();
+ cfDir.tryCreateDirectory();
for (File srcDir : Keyspace.open("legacy_tables").getColumnFamilyStore(table).getDirectories().getCFDirectories())
{
- for (File file : srcDir.listFiles())
+ for (File file : srcDir.tryList())
{
copyFile(cfDir, file);
}
@@ -662,7 +663,7 @@ private static void copySstablesToTestData(String legacyVersion, String table, F
{
File tableDir = getTableDir(legacyVersion, table);
Assert.assertTrue("The table directory " + tableDir + " was not found", tableDir.isDirectory());
- for (File file : tableDir.listFiles())
+ for (File file : tableDir.tryList())
{
copyFile(cfDir, file);
}
@@ -678,10 +679,10 @@ private static void copyFile(File cfDir, File file) throws IOException
byte[] buf = new byte[65536];
if (file.isFile())
{
- File target = new File(cfDir, file.getName());
+ File target = new File(cfDir, file.name());
int rd;
- try (FileInputStream is = new FileInputStream(file);
- FileOutputStream os = new FileOutputStream(target);) {
+ try (FileInputStreamPlus is = new FileInputStreamPlus(file);
+ FileOutputStreamPlus os = new FileOutputStreamPlus(target);) {
while ((rd = is.read(buf)) >= 0)
os.write(buf, 0, rd);
}
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
index c4e5207accd8..6e4ed52f6c66 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java
@@ -18,13 +18,13 @@
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.function.*;
+import org.apache.cassandra.io.util.File;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
index d07187b22bfc..f578f770877f 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
@@ -32,6 +31,7 @@
import java.util.stream.IntStream;
import com.google.common.collect.Sets;
+import org.apache.cassandra.io.util.File;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -89,8 +89,8 @@ public class SSTableHeaderFixTest
public void setup()
{
File f = FileUtils.createTempFile("SSTableUDTFixTest", "");
- f.delete();
- f.mkdirs();
+ f.tryDelete();
+ f.tryCreateDirectories();
temporaryFolder = f;
}
@@ -794,7 +794,7 @@ private File buildFakeSSTable(File dir, int generation, TableMetadata.Builder co
// Just create the component files - we don't really need those.
for (Component component : requiredComponents)
- assertTrue(new File(desc.filenameFor(component)).createNewFile());
+ assertTrue(new File(desc.filenameFor(component)).createFileIfNotExists());
AbstractType> partitionKey = headerMetadata.partitionKeyType;
List> clusteringKey = headerMetadata.clusteringColumns()
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
index ac0cda1cde80..51489a89af55 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
@@ -25,6 +24,7 @@
import com.google.common.io.Files;
+import org.apache.cassandra.io.util.File;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -87,7 +87,7 @@ public static void defineSchema()
@Before
public void setup() throws Exception
{
- tmpdir = Files.createTempDir();
+ tmpdir = new File(Files.createTempDir());
}
@After
@@ -184,7 +184,7 @@ public void testLoadingIncompleteSSTable() throws Exception
cfs.forceBlockingFlush(); // wait for sstables to be on disk else we won't be able to stream them
//make sure we have some tables...
- assertTrue(Objects.requireNonNull(dataDir.listFiles()).length > 0);
+ assertTrue(Objects.requireNonNull(dataDir.tryList()).length > 0);
final CountDownLatch latch = new CountDownLatch(2);
//writer is still open so loader should not load anything
@@ -212,8 +212,8 @@ public void testLoadingIncompleteSSTable() throws Exception
@Test
public void testLoadingSSTableToDifferentKeyspace() throws Exception
{
- File dataDir = new File(tmpdir.getAbsolutePath() + File.separator + KEYSPACE1 + File.separator + CF_STANDARD1);
- assert dataDir.mkdirs();
+ File dataDir = new File(tmpdir.absolutePath() + File.pathSeparator() + KEYSPACE1 + File.pathSeparator() + CF_STANDARD1);
+ assert dataDir.tryCreateDirectories();
TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD1);
String schema = "CREATE TABLE %s.%s (key ascii, name ascii, val ascii, val1 ascii, PRIMARY KEY (key, name))";
@@ -290,10 +290,10 @@ public void testLoadingBackupsTable() throws Exception
private File dataDir(String cf)
{
- File dataDir = new File(tmpdir.getAbsolutePath() + File.separator + SSTableLoaderTest.KEYSPACE1 + File.separator + cf);
- assert dataDir.mkdirs();
+ File dataDir = new File(tmpdir.absolutePath() + File.pathSeparator() + SSTableLoaderTest.KEYSPACE1 + File.pathSeparator() + cf);
+ assert dataDir.tryCreateDirectories();
//make sure we have no tables...
- assertEquals(Objects.requireNonNull(dataDir.listFiles()).length, 0);
+ assertEquals(Objects.requireNonNull(dataDir.tryList()).length, 0);
return dataDir;
}
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
index f1fc4cb4b9b8..f64ed4eed314 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
@@ -26,6 +25,7 @@
import java.util.concurrent.*;
import com.google.common.collect.Sets;
+import org.apache.cassandra.io.util.File;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
@@ -496,7 +496,7 @@ public void testOpeningSSTable() throws Exception
// check that only the summary is regenerated when it is deleted
components.add(Component.FILTER);
summaryModified = Files.getLastModifiedTime(summaryPath).toMillis();
- summaryFile.delete();
+ summaryFile.tryDelete();
TimeUnit.MILLISECONDS.sleep(1000); // sleep to ensure modified time will be different
bloomModified = Files.getLastModifiedTime(bloomPath).toMillis();
@@ -794,7 +794,7 @@ public void testMoveAndOpenSSTable() throws IOException
SSTableReader sstable = getNewSSTable(cfs);
cfs.clearUnsafe();
sstable.selfRef().release();
- File tmpdir = Files.createTempDirectory("testMoveAndOpen").toFile();
+ File tmpdir = new File(Files.createTempDirectory("testMoveAndOpen"));
tmpdir.deleteOnExit();
Descriptor notLiveDesc = new Descriptor(tmpdir, sstable.descriptor.ksname, sstable.descriptor.cfname, 100);
// make sure the new directory is empty and that the old files exist:
@@ -861,7 +861,7 @@ public void testVerifyCompressionInfoExistenceThrows()
// delete the compression info, so it is corrupted.
File compressionInfoFile = new File(desc.filenameFor(Component.COMPRESSION_INFO));
- compressionInfoFile.delete();
+ compressionInfoFile.tryDelete();
assertFalse("CompressionInfo file should not exist", compressionInfoFile.exists());
// discovert the components on disk after deletion
@@ -881,7 +881,7 @@ public void testVerifyCompressionInfoExistenceWhenTOCUnableToOpen()
// mark the toc file not readable in order to trigger the FSReadError
File tocFile = new File(desc.filenameFor(Component.TOC));
- tocFile.setReadable(false);
+ tocFile.trySetReadable(false);
expectedException.expect(FSReadError.class);
expectedException.expectMessage("TOC.txt");
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
index 1895653ccd0b..72b4587a5e2e 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
@@ -18,8 +18,8 @@
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.nio.ByteBuffer;
+import java.io.IOException;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -52,6 +52,7 @@
import org.apache.cassandra.db.lifecycle.SSTableSet;
import org.apache.cassandra.io.sstable.format.SSTableReader;
import org.apache.cassandra.io.sstable.format.SSTableWriter;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
import org.apache.cassandra.metrics.StorageMetrics;
@@ -99,7 +100,7 @@ public void basicTest()
writer.finish();
}
LifecycleTransaction.waitForDeletions();
- assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
+ assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames()));
validateCFS(cfs);
truncate(cfs);
@@ -131,7 +132,7 @@ public void basicTest2()
writer.finish();
}
LifecycleTransaction.waitForDeletions();
- assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
+ assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames()));
validateCFS(cfs);
}
@@ -186,7 +187,7 @@ public void getPositionsTest()
writer.finish();
}
LifecycleTransaction.waitForDeletions();
- assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
+ assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames()));
validateCFS(cfs);
truncate(cfs);
@@ -244,7 +245,7 @@ public void testNumberOfFilesAndSizes()
// tmplink and tmp files should be gone:
assertEquals(sum, cfs.metric.totalDiskSpaceUsed.getCount());
- assertFileCounts(s.descriptor.directory.list());
+ assertFileCounts(s.descriptor.directory.tryListNames());
validateCFS(cfs);
}
@@ -287,7 +288,7 @@ public void testNumberOfFiles_dont_clean_readers()
assertEquals(files, cfs.getLiveSSTables().size());
LifecycleTransaction.waitForDeletions();
- assertFileCounts(s.descriptor.directory.list());
+ assertFileCounts(s.descriptor.directory.tryListNames());
validateCFS(cfs);
}
@@ -426,7 +427,7 @@ private void testNumberOfFiles_abort(RewriterTest test)
assertEquals(startSize, cfs.metric.liveDiskSpaceUsed.getCount());
assertEquals(1, cfs.getLiveSSTables().size());
- assertFileCounts(s.descriptor.directory.list());
+ assertFileCounts(s.descriptor.directory.tryListNames());
assertEquals(cfs.getLiveSSTables().iterator().next().first, origFirst);
assertEquals(cfs.getLiveSSTables().iterator().next().last, origLast);
validateCFS(cfs);
@@ -473,7 +474,7 @@ public void testNumberOfFiles_finish_empty_new_writer()
LifecycleTransaction.waitForDeletions();
assertEquals(files - 1, cfs.getLiveSSTables().size()); // we never wrote anything to the last file
- assertFileCounts(s.descriptor.directory.list());
+ assertFileCounts(s.descriptor.directory.tryListNames());
validateCFS(cfs);
}
@@ -513,7 +514,7 @@ public void testNumberOfFiles_truncate()
}
LifecycleTransaction.waitForDeletions();
- assertFileCounts(s.descriptor.directory.list());
+ assertFileCounts(s.descriptor.directory.tryListNames());
validateCFS(cfs);
}
@@ -554,7 +555,7 @@ public void testSmallFiles()
assertEquals(files, sstables.size());
assertEquals(files, cfs.getLiveSSTables().size());
LifecycleTransaction.waitForDeletions();
- assertFileCounts(s.descriptor.directory.list());
+ assertFileCounts(s.descriptor.directory.tryListNames());
validateCFS(cfs);
}
@@ -572,10 +573,10 @@ public void testSSTableSplit()
SSTableSplitter splitter = new SSTableSplitter(cfs, txn, 10);
splitter.split();
- assertFileCounts(s.descriptor.directory.list());
+ assertFileCounts(s.descriptor.directory.tryListNames());
LifecycleTransaction.waitForDeletions();
- for (File f : s.descriptor.directory.listFiles())
+ for (File f : s.descriptor.directory.tryList())
{
// we need to clear out the data dir, otherwise tests running after this breaks
FileUtils.deleteRecursive(f);
@@ -651,7 +652,7 @@ private void testAbortHelper(boolean earlyException, boolean offline)
LifecycleTransaction.waitForDeletions();
- int filecount = assertFileCounts(s.descriptor.directory.list());
+ int filecount = assertFileCounts(s.descriptor.directory.tryListNames());
assertEquals(filecount, 1);
if (!offline)
{
@@ -664,16 +665,16 @@ private void testAbortHelper(boolean earlyException, boolean offline)
assertEquals(0, cfs.getLiveSSTables().size());
cfs.truncateBlocking();
}
- filecount = assertFileCounts(s.descriptor.directory.list());
+ filecount = assertFileCounts(s.descriptor.directory.tryListNames());
if (offline)
{
// the file is not added to the CFS, therefore not truncated away above
assertEquals(1, filecount);
- for (File f : s.descriptor.directory.listFiles())
+ for (File f : s.descriptor.directory.tryList())
{
FileUtils.deleteRecursive(f);
}
- filecount = assertFileCounts(s.descriptor.directory.list());
+ filecount = assertFileCounts(s.descriptor.directory.tryListNames());
}
assertEquals(0, filecount);
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
index 731cee2e53fe..d6e4a9eab9d9 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
@@ -19,7 +19,7 @@
package org.apache.cassandra.io.sstable;
-import java.io.File;
+import org.apache.cassandra.io.util.File;
import java.io.IOException;
import java.util.*;
@@ -73,14 +73,14 @@ public static File tempSSTableFile(String keyspaceName, String cfname) throws IO
public static File tempSSTableFile(String keyspaceName, String cfname, int generation) throws IOException
{
File tempdir = FileUtils.createTempFile(keyspaceName, cfname);
- if(!tempdir.delete() || !tempdir.mkdir())
+ if(!tempdir.tryDelete() || !tempdir.tryCreateDirectory())
throw new IOException("Temporary directory creation failed.");
tempdir.deleteOnExit();
- File cfDir = new File(tempdir, keyspaceName + File.separator + cfname);
- cfDir.mkdirs();
+ File cfDir = new File(tempdir, keyspaceName + File.pathSeparator() + cfname);
+ cfDir.tryCreateDirectories();
cfDir.deleteOnExit();
File datafile = new File(new Descriptor(cfDir, keyspaceName, cfname, generation, SSTableFormat.Type.BIG).filenameFor(Component.DATA));
- if (!datafile.createNewFile())
+ if (!datafile.createFileIfNotExists())
throw new IOException("unable to create file " + datafile);
datafile.deleteOnExit();
return datafile;
@@ -219,7 +219,7 @@ public Collection write(int expectedSize, Appender appender) thro
TableMetadata metadata = Schema.instance.getTableMetadata(ksname, cfname);
ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(metadata.id);
SerializationHeader header = appender.header();
- SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFilename(datafile.getAbsolutePath()), expectedSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, 0, header);
+ SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFilename(datafile.absolutePath()), expectedSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, 0, header);
while (appender.append(writer)) { /* pass */ }
Collection readers = writer.finish(true);
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java
index e407abc0e7b2..84f80a8ce083 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java
@@ -18,10 +18,10 @@
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.UUID;
+import org.apache.cassandra.io.util.File;
import org.junit.Test;
import org.apache.cassandra.*;
@@ -64,7 +64,7 @@ public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedExc
SSTableReader s = writer.setMaxDataAge(1000).openEarly();
assert s != null;
- assertFileCounts(dir.list());
+ assertFileCounts(dir.tryListNames());
for (int i = 10000; i < 20000; i++)
{
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
@@ -74,11 +74,11 @@ public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedExc
}
SSTableReader s2 = writer.setMaxDataAge(1000).openEarly();
assertTrue(s.last.compareTo(s2.last) < 0);
- assertFileCounts(dir.list());
+ assertFileCounts(dir.tryListNames());
s.selfRef().release();
s2.selfRef().release();
- int datafiles = assertFileCounts(dir.list());
+ int datafiles = assertFileCounts(dir.tryListNames());
assertEquals(datafiles, 1);
// These checks don't work on Windows because the writer has the channel still
@@ -86,12 +86,12 @@ public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedExc
if (!FBUtilities.isWindows)
{
LifecycleTransaction.waitForDeletions();
- assertFileCounts(dir.list());
+ assertFileCounts(dir.tryListNames());
}
writer.abort();
txn.abort();
LifecycleTransaction.waitForDeletions();
- datafiles = assertFileCounts(dir.list());
+ datafiles = assertFileCounts(dir.tryListNames());
assertEquals(datafiles, 0);
validateCFS(cfs);
}
@@ -117,7 +117,7 @@ public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws Interrupted
writer.append(builder.build().unfilteredIterator());
}
- assertFileCounts(dir.list());
+ assertFileCounts(dir.tryListNames());
for (int i = 10000; i < 20000; i++)
{
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
@@ -126,7 +126,7 @@ public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws Interrupted
writer.append(builder.build().unfilteredIterator());
}
SSTableReader sstable = writer.finish(true);
- int datafiles = assertFileCounts(dir.list());
+ int datafiles = assertFileCounts(dir.tryListNames());
assertEquals(datafiles, 1);
sstable.selfRef().release();
@@ -135,12 +135,12 @@ public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws Interrupted
if (!FBUtilities.isWindows)
{
LifecycleTransaction.waitForDeletions();
- assertFileCounts(dir.list());
+ assertFileCounts(dir.tryListNames());
}
txn.abort();
LifecycleTransaction.waitForDeletions();
- datafiles = assertFileCounts(dir.list());
+ datafiles = assertFileCounts(dir.tryListNames());
assertEquals(datafiles, 0);
validateCFS(cfs);
}
@@ -168,7 +168,7 @@ public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws
writer1.append(builder.build().unfilteredIterator());
}
- assertFileCounts(dir.list());
+ assertFileCounts(dir.tryListNames());
for (int i = 10000; i < 20000; i++)
{
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
@@ -179,9 +179,9 @@ public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws
SSTableReader sstable = writer1.finish(true);
txn.update(sstable, false);
- assertFileCounts(dir.list());
+ assertFileCounts(dir.tryListNames());
- int datafiles = assertFileCounts(dir.list());
+ int datafiles = assertFileCounts(dir.tryListNames());
assertEquals(datafiles, 2);
// These checks don't work on Windows because the writer has the channel still
@@ -189,11 +189,11 @@ public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws
if (!FBUtilities.isWindows)
{
LifecycleTransaction.waitForDeletions();
- assertFileCounts(dir.list());
+ assertFileCounts(dir.tryListNames());
}
txn.abort();
LifecycleTransaction.waitForDeletions();
- datafiles = assertFileCounts(dir.list());
+ datafiles = assertFileCounts(dir.tryListNames());
assertEquals(datafiles, 0);
validateCFS(cfs);
}
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
index 9e903654d598..41d026f93fed 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.io.sstable;
-import java.io.File;
import java.nio.ByteBuffer;
import java.util.HashSet;
import java.util.Set;
@@ -27,6 +26,7 @@
import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.cassandra.io.util.File;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -145,11 +145,11 @@ public static void validateCFS(ColumnFamilyStore cfs)
}
for (File dir : cfs.getDirectories().getCFDirectories())
{
- for (File f : dir.listFiles())
+ for (File f : dir.tryList())
{
- if (f.getName().contains("Data"))
+ if (f.name().contains("Data"))
{
- Descriptor d = Descriptor.fromFilename(f.getAbsolutePath());
+ Descriptor d = Descriptor.fromFilename(f.absolutePath());
assertTrue(d.toString(), liveDescriptors.contains(d.generation));
}
}
diff --git a/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java b/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
index 5f1920639b14..838f3a69fa64 100644
--- a/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
@@ -17,7 +17,6 @@
*/
package org.apache.cassandra.io.sstable.format;
-import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
@@ -26,6 +25,7 @@
import java.util.Iterator;
import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.io.util.File;
import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.schema.ColumnMetadata;
import org.apache.cassandra.config.DatabaseDescriptor;
@@ -85,11 +85,11 @@ public void testFlushObserver()
FlushObserver observer = new FlushObserver();
String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
- File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
+ File directory = new File(sstableDirectory + File.pathSeparator() + KS_NAME + File.pathSeparator() + CF_NAME);
directory.deleteOnExit();
- if (!directory.exists() && !directory.mkdirs())
- throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
+ if (!directory.exists() && !directory.tryCreateDirectories())
+ throw new FSWriteError(new IOException("failed to create tmp directory"), directory.absolutePath());
SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
diff --git a/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java
index 3cf96f2698f8..d3aed25b511d 100644
--- a/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java
@@ -19,7 +19,6 @@
package org.apache.cassandra.io.sstable.format.big;
import java.io.ByteArrayInputStream;
-import java.io.File;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.Paths;
@@ -28,6 +27,7 @@
import java.util.function.Function;
import com.google.common.collect.ImmutableSet;
+import org.apache.cassandra.io.util.File;
import org.junit.BeforeClass;
import org.junit.Test;
diff --git a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
index 79cf83162877..6e1bd408ddee 100644
--- a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
@@ -17,8 +17,7 @@
*/
package org.apache.cassandra.io.sstable.metadata;
-import java.io.File;
-import java.io.FileOutputStream;
+import org.apache.cassandra.io.util.*;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
@@ -29,6 +28,7 @@
import org.junit.Test;
import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.SerializationHeader;
import org.apache.cassandra.db.commitlog.CommitLogPosition;
@@ -39,11 +39,9 @@
import org.apache.cassandra.io.sstable.format.SSTableFormat;
import org.apache.cassandra.io.sstable.format.Version;
import org.apache.cassandra.io.sstable.format.big.BigFormat;
-import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
import org.apache.cassandra.io.util.DataOutputStreamPlus;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.io.util.RandomAccessReader;
-import org.apache.cassandra.schema.TableMetadata;
import org.apache.cassandra.utils.Throwables;
import static org.junit.Assert.assertEquals;
@@ -66,7 +64,7 @@ public void testSerialization() throws IOException
MetadataSerializer serializer = new MetadataSerializer();
File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion);
- Descriptor desc = new Descriptor(statsFile.getParentFile(), "", "", 0, SSTableFormat.Type.BIG);
+ Descriptor desc = new Descriptor(statsFile.parent(), "", "", 0, SSTableFormat.Type.BIG);
try (RandomAccessReader in = RandomAccessReader.open(statsFile))
{
Map deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class));
@@ -93,7 +91,7 @@ public void testHistogramSterilization() throws IOException
// Serialize w/ overflowed histograms:
MetadataSerializer serializer = new MetadataSerializer();
File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion);
- Descriptor desc = new Descriptor(statsFile.getParentFile(), "", "", 0, SSTableFormat.Type.BIG);
+ Descriptor desc = new Descriptor(statsFile.parent(), "", "", 0, SSTableFormat.Type.BIG);
try (RandomAccessReader in = RandomAccessReader.open(statsFile))
{
@@ -110,7 +108,7 @@ public File serialize(Map metadata, MetadataSer
{
// Serialize to tmp file
File statsFile = FileUtils.createTempFile(Component.STATS.name, null);
- try (DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(statsFile)))
+ try (DataOutputStreamPlus out = new FileOutputStreamPlus(statsFile))
{
serializer.serialize(metadata, out, version);
}
@@ -176,7 +174,7 @@ public void testOldReadsNew(String oldV, String newV) throws IOException
File statsFileLa = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(oldV));
// Reading both as earlier version should yield identical results.
SSTableFormat.Type stype = SSTableFormat.Type.current();
- Descriptor desc = new Descriptor(stype.info.getVersion(oldV), statsFileLb.getParentFile(), "", "", 0, stype);
+ Descriptor desc = new Descriptor(stype.info.getVersion(oldV), statsFileLb.parent(), "", "", 0, stype);
try (RandomAccessReader inLb = RandomAccessReader.open(statsFileLb);
RandomAccessReader inLa = RandomAccessReader.open(statsFileLa))
{
diff --git a/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java b/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java
index 764190c0f112..bb54f25d5dc4 100644
--- a/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java
+++ b/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java
@@ -21,10 +21,7 @@
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.SyncUtil;
-import java.io.File;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
@@ -193,7 +190,7 @@ public void testLength() throws IOException
w.finish();
// will use cachedlength
- try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.getPath());
+ try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.path());
FileHandle fh = builder.complete();
RandomAccessReader r = fh.createReader())
{
@@ -354,7 +351,7 @@ public void testIsEOF() throws IOException
for (final int offset : Arrays.asList(0, 8))
{
File file1 = writeTemporaryFile(new byte[16]);
- try (FileHandle.Builder builder = new FileHandle.Builder(file1.getPath()).bufferSize(bufferSize);
+ try (FileHandle.Builder builder = new FileHandle.Builder(file1.path()).bufferSize(bufferSize);
FileHandle fh = builder.complete();
RandomAccessReader file = fh.createReader())
{
@@ -366,7 +363,7 @@ public void testIsEOF() throws IOException
for (final int n : Arrays.asList(1, 2, 4, 8))
{
File file1 = writeTemporaryFile(new byte[16]);
- try (FileHandle.Builder builder = new FileHandle.Builder(file1.getPath()).bufferSize(bufferSize);
+ try (FileHandle.Builder builder = new FileHandle.Builder(file1.path()).bufferSize(bufferSize);
FileHandle fh = builder.complete();
RandomAccessReader file = fh.createReader())
{
@@ -427,11 +424,11 @@ public void testBytesPastMark() throws IOException
tmpFile.deleteOnExit();
// Create the BRAF by filename instead of by file.
- try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.getPath());
+ try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.path());
FileHandle fh = builder.complete();
RandomAccessReader r = fh.createReader())
{
- assert tmpFile.getPath().equals(r.getPath());
+ assert tmpFile.path().equals(r.getPath());
// Create a mark and move the rw there.
final DataPosition mark = r.mark();
@@ -607,9 +604,9 @@ private File writeTemporaryFile(byte[] data) throws IOException
{
File f = FileUtils.createTempFile("BRAFTestFile", null);
f.deleteOnExit();
- FileOutputStream fout = new FileOutputStream(f);
+ FileOutputStreamPlus fout = new FileOutputStreamPlus(f);
fout.write(data);
- SyncUtil.sync(fout);
+ fout.sync();
fout.close();
return f;
}
diff --git a/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java
index 4963712fc008..91584efe436b 100644
--- a/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java
@@ -18,7 +18,6 @@
package org.apache.cassandra.io.util;
-import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Arrays;
@@ -119,7 +118,7 @@ public void corruptionDetection() throws IOException
assert data.exists();
// simulate corruption of file
- try (RandomAccessFile dataFile = new RandomAccessFile(data, "rw"))
+ try (RandomAccessFile dataFile = new RandomAccessFile(data.toJavaIOFile(), "rw"))
{
dataFile.seek(1024);
dataFile.write((byte) 5);
diff --git a/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java b/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java
index 6837d1df27e5..5d92d4584fa8 100644
--- a/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java
@@ -18,7 +18,6 @@
*/
package org.apache.cassandra.io.util;
-import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -44,7 +43,7 @@ public static void setupDD()
public void cleanup()
{
for (TestableSW sw : writers)
- sw.file.delete();
+ sw.file.tryDelete();
writers.clear();
}
diff --git a/test/unit/org/apache/cassandra/io/util/DataOutputTest.java b/test/unit/org/apache/cassandra/io/util/DataOutputTest.java
index b6291c0fc435..4c3c6bb6e662 100644
--- a/test/unit/org/apache/cassandra/io/util/DataOutputTest.java
+++ b/test/unit/org/apache/cassandra/io/util/DataOutputTest.java
@@ -24,9 +24,6 @@
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.EOFException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.BufferOverflowException;
@@ -327,16 +324,16 @@ public void testWrappedFileOutputStream() throws IOException
File file = FileUtils.createTempFile("dataoutput", "test");
try
{
- DataOutputStreamPlus write = new WrappedDataOutputStreamPlus(new FileOutputStream(file));
+ DataOutputStreamPlus write = new WrappedDataOutputStreamPlus(new FileOutputStreamPlus(file));
DataInput canon = testWrite(write);
write.close();
- DataInputStream test = new DataInputStream(new FileInputStream(file));
+ DataInputStream test = new DataInputStream(new FileInputStreamPlus(file));
testRead(test, canon);
test.close();
}
finally
{
- Assert.assertTrue(file.delete());
+ Assert.assertTrue(file.tryDelete());
}
}
@@ -346,16 +343,16 @@ public void testFileOutputStream() throws IOException
File file = FileUtils.createTempFile("dataoutput", "test");
try
{
- DataOutputStreamPlus write = new BufferedDataOutputStreamPlus(new FileOutputStream(file));
+ DataOutputStreamPlus write = new FileOutputStreamPlus(file);
DataInput canon = testWrite(write);
write.close();
- DataInputStream test = new DataInputStream(new FileInputStream(file));
+ DataInputStream test = new DataInputStream(new FileInputStreamPlus(file));
testRead(test, canon);
test.close();
}
finally
{
- Assert.assertTrue(file.delete());
+ Assert.assertTrue(file.tryDelete());
}
}
@@ -366,17 +363,17 @@ public void testRandomAccessFile() throws IOException
try
{
@SuppressWarnings("resource")
- final RandomAccessFile raf = new RandomAccessFile(file, "rw");
+ final RandomAccessFile raf = new RandomAccessFile(file.toJavaIOFile(), "rw");
DataOutputStreamPlus write = new BufferedDataOutputStreamPlus(raf.getChannel());
DataInput canon = testWrite(write);
write.close();
- DataInputStream test = new DataInputStream(new FileInputStream(file));
+ DataInputStream test = new DataInputStream(new FileInputStreamPlus(file));
testRead(test, canon);
test.close();
}
finally
{
- Assert.assertTrue(file.delete());
+ Assert.assertTrue(file.tryDelete());
}
}
@@ -390,10 +387,10 @@ public void testSequentialWriter() throws IOException
DataInput canon = testWrite(write);
write.flush();
write.close();
- DataInputStream test = new DataInputStream(new FileInputStream(file));
+ DataInputStream test = new DataInputStream(new FileInputStreamPlus(file));
testRead(test, canon);
test.close();
- Assert.assertTrue(file.delete());
+ Assert.assertTrue(file.tryDelete());
}
private DataInput testWrite(DataOutputPlus test) throws IOException
diff --git a/test/unit/org/apache/cassandra/io/util/FileTest.java b/test/unit/org/apache/cassandra/io/util/FileTest.java
new file mode 100644
index 000000000000..d12565c4df09
--- /dev/null
+++ b/test/unit/org/apache/cassandra/io/util/FileTest.java
@@ -0,0 +1,326 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cassandra.io.util;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.nio.file.Files;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.RateLimiter;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.psjava.util.Triple;
+
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_IO_TMPDIR;
+
+public class FileTest
+{
+ private static final java.io.File dir;
+ static
+ {
+ java.io.File parent = new java.io.File(JAVA_IO_TMPDIR.getString());
+ String dirName = Long.toHexString(ThreadLocalRandom.current().nextLong());
+ while (new java.io.File(parent, dirName).exists())
+ dirName = Long.toHexString(ThreadLocalRandom.current().nextLong());
+ dir = new java.io.File(parent, dirName);
+ dir.mkdirs();
+ new File(dir).deleteRecursiveOnExit();
+ }
+
+ @Test
+ public void testEquivalence() throws IOException
+ {
+ java.io.File notExists = new java.io.File(dir, "notExists");
+ java.io.File regular = new java.io.File(dir, "regular");
+ regular.createNewFile();
+ java.io.File regularLink = new java.io.File(dir, "regularLink");
+ Files.createSymbolicLink(regularLink.toPath(), regular.toPath());
+ java.io.File emptySubdir = new java.io.File(dir, "empty");
+ java.io.File emptySubdirLink = new java.io.File(dir, "emptyLink");
+ emptySubdir.mkdir();
+ Files.createSymbolicLink(emptySubdirLink.toPath(), emptySubdir.toPath());
+ java.io.File nonEmptySubdir = new java.io.File(dir, "nonEmpty");
+ java.io.File nonEmptySubdirLink = new java.io.File(dir, "nonEmptyLink");
+ nonEmptySubdir.mkdir();
+ Files.createSymbolicLink(nonEmptySubdirLink.toPath(), nonEmptySubdir.toPath());
+ new java.io.File(nonEmptySubdir, "something").createNewFile();
+
+ testEquivalence("");
+
+ List setup = ImmutableList.of(
+ () -> {},
+ () -> dir.setWritable(false),
+ () -> dir.setReadable(false),
+ () -> dir.setWritable(true)
+ );
+
+ for (Runnable run : setup)
+ {
+ run.run();
+ testEquivalence(notExists.getPath());
+ testEquivalence(nonAbsolute(notExists));
+ testEquivalence(regular.getPath());
+ testEquivalence(nonAbsolute(regular));
+ testEquivalence(regularLink.getPath());
+ testEquivalence(nonAbsolute(regularLink));
+ testEquivalence(emptySubdir.getPath());
+ testEquivalence(nonAbsolute(emptySubdir));
+ testEquivalence(emptySubdirLink.getPath());
+ testEquivalence(nonAbsolute(emptySubdirLink));
+ testEquivalence(nonEmptySubdir.getPath());
+ testEquivalence(nonAbsolute(nonEmptySubdir));
+ testEquivalence(nonEmptySubdirLink.getPath());
+ testEquivalence(nonAbsolute(nonEmptySubdirLink));
+ }
+
+ emptySubdirLink.delete();
+ regularLink.delete();
+ regular.delete();
+ emptySubdir.delete();
+ }
+
+ private static String nonAbsolute(java.io.File file)
+ {
+ return file.getParent() + File.pathSeparator() + ".." + File.pathSeparator() + file.getParentFile().getName() + File.pathSeparator() + file.getName();
+ }
+
+ private void testEquivalence(String path) throws IOException
+ {
+ java.io.File file = new java.io.File(path);
+ if (file.exists()) testExists(path);
+ else testNotExists(path);
+ }
+
+ private void testBasic(String path) throws IOException
+ {
+ // TODO: confirm - it seems that accuracy of lastModified may differ between APIs on Linux??
+ testEquivalence(path, f -> f.lastModified() / 1000, f -> f.lastModified() / 1000);
+ testEquivalence(path, java.io.File::length, File::length);
+ testEquivalence(path, java.io.File::canExecute, File::isExecutable);
+ testEquivalence(path, java.io.File::canRead, File::isReadable);
+ testEquivalence(path, java.io.File::canWrite, File::isWritable);
+ testEquivalence(path, java.io.File::exists, File::exists);
+ testEquivalence(path, java.io.File::isAbsolute, File::isAbsolute);
+ testEquivalence(path, java.io.File::isDirectory, File::isDirectory);
+ testEquivalence(path, java.io.File::isFile, File::isFile);
+ testEquivalence(path, java.io.File::getPath, File::path);
+ testEquivalence(path, java.io.File::getAbsolutePath, File::absolutePath);
+ testEquivalence(path, java.io.File::getCanonicalPath, File::canonicalPath);
+ testEquivalence(path, java.io.File::getParent, File::parentPath);
+ testEquivalence(path, java.io.File::toPath, File::toPath);
+ testEquivalence(path, java.io.File::list, File::tryListNames);
+ testEquivalence(path, java.io.File::listFiles, File::tryList);
+ java.io.File file = new java.io.File(path);
+ if (file.getParentFile() != null) testBasic(file.getParent());
+ if (!file.equals(file.getAbsoluteFile())) testBasic(file.getAbsolutePath());
+ if (!file.equals(file.getCanonicalFile())) testBasic(file.getCanonicalPath());
+ }
+
+ private void testPermissionsEquivalence(String path)
+ {
+ ImmutableList, BiFunction, Function>> tests = ImmutableList.of(
+ Triple.create(java.io.File::setReadable, File::trySetReadable, java.io.File::canRead),
+ Triple.create(java.io.File::setWritable, File::trySetWritable, java.io.File::canWrite),
+ Triple.create(java.io.File::setExecutable, File::trySetExecutable, java.io.File::canExecute)
+ );
+ for (Triple, BiFunction, Function> test : tests)
+ {
+ java.io.File file = new java.io.File(path);
+ boolean cur = test.v3.apply(file);
+ boolean canRead = file.canRead();
+ boolean canWrite = file.canWrite();
+ boolean canExecute = file.canExecute();
+ testEquivalence(path, f -> test.v1.apply(f, !cur), f -> test.v2.apply(f, !cur), (f, success) -> {
+ testEquivalence(path, java.io.File::canExecute, File::isExecutable);
+ testEquivalence(path, java.io.File::canRead, File::isReadable);
+ testEquivalence(path, java.io.File::canWrite, File::isWritable);
+ Assert.assertEquals(success != cur, test.v3.apply(file));
+ test.v1.apply(f, cur);
+ });
+ Assert.assertEquals(canRead, file.canRead());
+ Assert.assertEquals(canWrite, file.canWrite());
+ Assert.assertEquals(canExecute, file.canExecute());
+ }
+ }
+
+ private void testCreation(String path, IOConsumer afterEach)
+ {
+ testEquivalence(path, java.io.File::createNewFile, File::createFileIfNotExists, afterEach);
+ testEquivalence(path, java.io.File::mkdir, File::tryCreateDirectory, afterEach);
+ testEquivalence(path, java.io.File::mkdirs, File::tryCreateDirectories, afterEach);
+ }
+
+ private void testExists(String path) throws IOException
+ {
+ testBasic(path);
+ testPermissionsEquivalence(path);
+ testCreation(path, ignore -> {});
+ testEquivalence(path, java.io.File::delete, File::tryDelete, (f, s) -> {if (s) f.createNewFile(); });
+ testTryVsConfirm(path, java.io.File::delete, File::delete, (f, s) -> {if (s) f.createNewFile(); });
+ }
+
+ private void testNotExists(String path) throws IOException
+ {
+ testBasic(path);
+ testPermissionsEquivalence(path);
+ testCreation(path, java.io.File::delete);
+ testEquivalence(path, java.io.File::delete, File::tryDelete);
+ testTryVsConfirm(path, java.io.File::delete, File::delete);
+ }
+
+ interface IOFn { O apply(I in) throws IOException; }
+ interface IOConsumer { void accept(I1 i1) throws IOException; }
+ interface IOBiConsumer { void accept(I1 i1, I2 i2) throws IOException; }
+
+ private void testEquivalence(String path, IOFn canonical, IOFn test)
+ {
+ testEquivalence(path, canonical, test, ignore -> {});
+ }
+
+ private void testEquivalence(String path, IOFn canonical, IOFn