|
| 1 | +/* |
| 2 | + * Copyright 2017 The Hyve |
| 3 | + * |
| 4 | + * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | + * you may not use this file except in compliance with the License. |
| 6 | + * You may obtain a copy of the License at |
| 7 | + * |
| 8 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | + * |
| 10 | + * Unless required by applicable law or agreed to in writing, software |
| 11 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | + * See the License for the specific language governing permissions and |
| 14 | + * limitations under the License. |
| 15 | + */ |
| 16 | + |
| 17 | +package org.radarcns.data; |
| 18 | + |
| 19 | +import org.apache.avro.generic.GenericRecord; |
| 20 | +import org.slf4j.Logger; |
| 21 | +import org.slf4j.LoggerFactory; |
| 22 | + |
| 23 | +import javax.annotation.Nonnull; |
| 24 | +import java.io.BufferedInputStream; |
| 25 | +import java.io.BufferedOutputStream; |
| 26 | +import java.io.ByteArrayInputStream; |
| 27 | +import java.io.Closeable; |
| 28 | +import java.io.Flushable; |
| 29 | +import java.io.IOException; |
| 30 | +import java.io.InputStream; |
| 31 | +import java.io.InputStreamReader; |
| 32 | +import java.io.OutputStream; |
| 33 | +import java.io.OutputStreamWriter; |
| 34 | +import java.io.Reader; |
| 35 | +import java.io.Writer; |
| 36 | +import java.nio.file.Files; |
| 37 | +import java.nio.file.Path; |
| 38 | +import java.nio.file.StandardOpenOption; |
| 39 | +import java.util.zip.GZIPInputStream; |
| 40 | +import java.util.zip.GZIPOutputStream; |
| 41 | +import java.util.zip.ZipException; |
| 42 | + |
| 43 | +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; |
| 44 | + |
| 45 | +/** Keeps path handles of a path. */ |
| 46 | +public class FileCache implements Closeable, Flushable, Comparable<FileCache> { |
| 47 | + private static final Logger logger = LoggerFactory.getLogger(FileCache.class); |
| 48 | + private static final int BUFFER_SIZE = 8192; |
| 49 | + |
| 50 | + private final Writer writer; |
| 51 | + private final RecordConverter recordConverter; |
| 52 | + private final Path path; |
| 53 | + private final Path tmpPath; |
| 54 | + private long lastUse; |
| 55 | + |
| 56 | + /** |
| 57 | + * File cache of given path, using given converter factory. |
| 58 | + * @param converterFactory converter factory to create a converter to write files with. |
| 59 | + * @param path path to cache. |
| 60 | + * @param record example record to create converter from, this is not written to path. |
| 61 | + * @param gzip whether to gzip the records |
| 62 | + * @throws IOException if the file and/or temporary files cannot be correctly read or written to. |
| 63 | + */ |
| 64 | + public FileCache(RecordConverterFactory converterFactory, Path path, |
| 65 | + GenericRecord record, boolean gzip, Path tmpDir) throws IOException { |
| 66 | + this.path = path; |
| 67 | + boolean fileIsNew = !Files.exists(path) || Files.size(path) == 0; |
| 68 | + OutputStream outFile; |
| 69 | + if (tmpDir == null) { |
| 70 | + this.tmpPath = null; |
| 71 | + outFile = Files.newOutputStream(path, StandardOpenOption.APPEND, StandardOpenOption.CREATE); |
| 72 | + } else { |
| 73 | + this.tmpPath = Files.createTempFile(tmpDir, path.getFileName().toString(), |
| 74 | + gzip ? ".tmp.gz" : ".tmp"); |
| 75 | + outFile = Files.newOutputStream(tmpPath); |
| 76 | + } |
| 77 | + |
| 78 | + OutputStream bufOut = new BufferedOutputStream(outFile); |
| 79 | + if (gzip) { |
| 80 | + bufOut = new GZIPOutputStream(bufOut); |
| 81 | + } |
| 82 | + |
| 83 | + InputStream inputStream; |
| 84 | + if (fileIsNew) { |
| 85 | + inputStream = new ByteArrayInputStream(new byte[0]); |
| 86 | + } else { |
| 87 | + inputStream = inputStream(new BufferedInputStream(Files.newInputStream(path)), gzip); |
| 88 | + |
| 89 | + if (tmpPath != null) { |
| 90 | + try { |
| 91 | + copy(path, bufOut, gzip); |
| 92 | + } catch (ZipException ex) { |
| 93 | + // restart output buffer |
| 94 | + bufOut.close(); |
| 95 | + // clear output file |
| 96 | + outFile = Files.newOutputStream(tmpPath); |
| 97 | + bufOut = new GZIPOutputStream(new BufferedOutputStream(outFile)); |
| 98 | + } |
| 99 | + } |
| 100 | + } |
| 101 | + |
| 102 | + this.writer = new OutputStreamWriter(bufOut); |
| 103 | + |
| 104 | + try (Reader reader = new InputStreamReader(inputStream)) { |
| 105 | + this.recordConverter = converterFactory.converterFor(writer, record, fileIsNew, reader); |
| 106 | + } catch (IOException ex) { |
| 107 | + try { |
| 108 | + writer.close(); |
| 109 | + } catch (IOException exClose) { |
| 110 | + logger.error("Failed to close writer for {}", path, ex); |
| 111 | + } |
| 112 | + throw ex; |
| 113 | + } |
| 114 | + } |
| 115 | + |
| 116 | + /** |
| 117 | + * Write a record to the cache. |
| 118 | + * @param record AVRO record |
| 119 | + * @return true or false based on {@link RecordConverter} write result |
| 120 | + * @throws IOException if the record cannot be used. |
| 121 | + */ |
| 122 | + public boolean writeRecord(GenericRecord record) throws IOException { |
| 123 | + boolean result = this.recordConverter.writeRecord(record); |
| 124 | + lastUse = System.nanoTime(); |
| 125 | + return result; |
| 126 | + } |
| 127 | + |
| 128 | + @Override |
| 129 | + public void close() throws IOException { |
| 130 | + recordConverter.close(); |
| 131 | + writer.close(); |
| 132 | + if (tmpPath != null) { |
| 133 | + Files.move(tmpPath, path, REPLACE_EXISTING); |
| 134 | + } |
| 135 | + } |
| 136 | + |
| 137 | + @Override |
| 138 | + public void flush() throws IOException { |
| 139 | + recordConverter.flush(); |
| 140 | + } |
| 141 | + |
| 142 | + /** |
| 143 | + * Compares time that the filecaches were last used. If equal, it lexicographically compares |
| 144 | + * the absolute path of the path. |
| 145 | + * @param other FileCache to compare with. |
| 146 | + */ |
| 147 | + @Override |
| 148 | + public int compareTo(@Nonnull FileCache other) { |
| 149 | + int result = Long.compare(lastUse, other.lastUse); |
| 150 | + if (result != 0) { |
| 151 | + return result; |
| 152 | + } |
| 153 | + return path.compareTo(other.path); |
| 154 | + } |
| 155 | + |
| 156 | + /** File that the cache is maintaining. */ |
| 157 | + public Path getPath() { |
| 158 | + return path; |
| 159 | + } |
| 160 | + |
| 161 | + private static void copy(Path source, OutputStream sink, boolean gzip) throws IOException { |
| 162 | + try (InputStream copyStream = inputStream(Files.newInputStream(source), gzip)) { |
| 163 | + copy(copyStream, sink); |
| 164 | + } catch (ZipException ex) { |
| 165 | + Path corruptPath = null; |
| 166 | + String suffix = ""; |
| 167 | + for (int i = 0; corruptPath == null && i < 100; i++) { |
| 168 | + Path path = source.resolveSibling(source.getFileName() + ".corrupted" + suffix); |
| 169 | + if (!Files.exists(path)) { |
| 170 | + corruptPath = path; |
| 171 | + } |
| 172 | + suffix = "-" + i; |
| 173 | + } |
| 174 | + if (corruptPath != null) { |
| 175 | + logger.error("Original file {} was corrupted: {}." |
| 176 | + + " Moved to {}.", source, ex, corruptPath); |
| 177 | + Files.move(source, corruptPath); |
| 178 | + } else { |
| 179 | + logger.error("Original file {} was corrupted: {}." |
| 180 | + + " Too many corrupt backups stored, removing file.", source, ex); |
| 181 | + } |
| 182 | + throw ex; |
| 183 | + } |
| 184 | + } |
| 185 | + |
| 186 | + private static InputStream inputStream(InputStream in, boolean gzip) throws IOException { |
| 187 | + return gzip ? new GZIPInputStream(in) : in; |
| 188 | + } |
| 189 | + |
| 190 | + /** |
| 191 | + * Reads all bytes from an input stream and writes them to an output stream. |
| 192 | + */ |
| 193 | + private static void copy(InputStream source, OutputStream sink) throws IOException { |
| 194 | + byte[] buf = new byte[BUFFER_SIZE]; |
| 195 | + int n; |
| 196 | + while ((n = source.read(buf)) > 0) { |
| 197 | + sink.write(buf, 0, n); |
| 198 | + } |
| 199 | + } |
| 200 | +} |
0 commit comments