|
| 1 | +package software.amazon.encryption.s3.internal; |
| 2 | + |
| 3 | +import software.amazon.awssdk.core.io.SdkFilterInputStream; |
| 4 | + |
| 5 | +import javax.crypto.BadPaddingException; |
| 6 | +import javax.crypto.Cipher; |
| 7 | +import javax.crypto.IllegalBlockSizeException; |
| 8 | +import java.io.IOException; |
| 9 | +import java.io.InputStream; |
| 10 | + |
| 11 | +/** |
| 12 | + * A cipher stream for decrypting CBC encrypted data. There is nothing particularly |
| 13 | + * specific to CBC, but other algorithms may require additional considerations. |
| 14 | + */ |
| 15 | +public class CbcCipherInputStream extends SdkFilterInputStream { |
| 16 | + private static final int MAX_RETRY_COUNT = 1000; |
| 17 | + private static final int DEFAULT_IN_BUFFER_SIZE = 512; |
| 18 | + private final Cipher cipher; |
| 19 | + |
| 20 | + private boolean eofReached; |
| 21 | + private byte[] inputBuffer; |
| 22 | + private byte[] outputBuffer; |
| 23 | + private int currentPosition; |
| 24 | + private int maxPosition; |
| 25 | + |
| 26 | + public CbcCipherInputStream(InputStream inputStream, Cipher cipher) { |
| 27 | + super(inputStream); |
| 28 | + this.cipher = cipher; |
| 29 | + this.inputBuffer = new byte[DEFAULT_IN_BUFFER_SIZE]; |
| 30 | + } |
| 31 | + |
| 32 | + @Override |
| 33 | + public int read() throws IOException { |
| 34 | + if (!readNextChunk()) { |
| 35 | + return -1; |
| 36 | + } |
| 37 | + // Cast the last byte to int with a value between 0-255, masking out the |
| 38 | + // higher bits. In other words, this is abs(x % 256). |
| 39 | + return ((int) outputBuffer[currentPosition++] & 0xFF); |
| 40 | + } |
| 41 | + |
| 42 | + @Override |
| 43 | + public int read(byte buffer[]) throws IOException { |
| 44 | + return read(buffer, 0, buffer.length); |
| 45 | + } |
| 46 | + |
| 47 | + @Override |
| 48 | + public int read(byte buffer[], int off, int targetLength) throws IOException { |
| 49 | + if (!readNextChunk()) { |
| 50 | + return -1; |
| 51 | + } |
| 52 | + if (targetLength <= 0) { |
| 53 | + return 0; |
| 54 | + } |
| 55 | + int length = maxPosition - currentPosition; |
| 56 | + if (targetLength < length) { |
| 57 | + length = targetLength; |
| 58 | + } |
| 59 | + System.arraycopy(outputBuffer, currentPosition, buffer, off, length); |
| 60 | + currentPosition += length; |
| 61 | + return length; |
| 62 | + } |
| 63 | + |
| 64 | + private boolean readNextChunk() throws IOException { |
| 65 | + if (currentPosition >= maxPosition) { |
| 66 | + // All buffered data has been read, let's get some more |
| 67 | + if (eofReached) { |
| 68 | + return false; |
| 69 | + } |
| 70 | + int retryCount = 0; |
| 71 | + int length; |
| 72 | + do { |
| 73 | + if (retryCount > MAX_RETRY_COUNT) { |
| 74 | + throw new IOException("Exceeded maximum number of attempts to read next chunk of data"); |
| 75 | + } |
| 76 | + length = nextChunk(); |
| 77 | + // If outputBuffer != null, it means that data is being read off of the InputStream |
| 78 | + if (outputBuffer == null) { |
| 79 | + retryCount++; |
| 80 | + } |
| 81 | + } while (length == 0); |
| 82 | + |
| 83 | + if (length == -1) { |
| 84 | + return false; |
| 85 | + } |
| 86 | + } |
| 87 | + return true; |
| 88 | + } |
| 89 | + |
| 90 | + /** |
| 91 | + * {@inheritDoc} |
| 92 | + * <p> |
| 93 | + * Note: This implementation will only skip up to the end of the buffered |
| 94 | + * data, potentially skipping 0 bytes. |
| 95 | + */ |
| 96 | + @Override |
| 97 | + public long skip(long n) { |
| 98 | + abortIfNeeded(); |
| 99 | + int available = maxPosition - currentPosition; |
| 100 | + if (n > available) { |
| 101 | + n = available; |
| 102 | + } |
| 103 | + if (n < 0) { |
| 104 | + return 0; |
| 105 | + } |
| 106 | + currentPosition += n; |
| 107 | + return n; |
| 108 | + } |
| 109 | + |
| 110 | + @Override |
| 111 | + public int available() { |
| 112 | + abortIfNeeded(); |
| 113 | + return maxPosition - currentPosition; |
| 114 | + } |
| 115 | + |
| 116 | + @Override |
| 117 | + public void close() throws IOException { |
| 118 | + in.close(); |
| 119 | + try { |
| 120 | + // Throw away the unprocessed data |
| 121 | + cipher.doFinal(); |
| 122 | + } catch (BadPaddingException | IllegalBlockSizeException ex) { |
| 123 | + // Swallow the exception |
| 124 | + } |
| 125 | + } |
| 126 | + |
| 127 | + @Override |
| 128 | + public boolean markSupported() { |
| 129 | + return false; |
| 130 | + } |
| 131 | + |
| 132 | + @Override |
| 133 | + public void mark(int readlimit) { |
| 134 | + // mark/reset not supported |
| 135 | + } |
| 136 | + |
| 137 | + @Override |
| 138 | + public void reset() throws IOException { |
| 139 | + throw new IOException("mark/reset not supported"); |
| 140 | + } |
| 141 | + |
| 142 | + /** |
| 143 | + * Reads and process the next chunk of data into memory. |
| 144 | + * |
| 145 | + * @return the length of the data chunk read and processed, or -1 if end of |
| 146 | + * stream. |
| 147 | + * @throws IOException |
| 148 | + * if there is an IO exception from the underlying input stream |
| 149 | + */ |
| 150 | + private int nextChunk() throws IOException { |
| 151 | + abortIfNeeded(); |
| 152 | + if (eofReached) { |
| 153 | + return -1; |
| 154 | + } |
| 155 | + outputBuffer = null; |
| 156 | + int length = in.read(inputBuffer); |
| 157 | + if (length == -1) { |
| 158 | + eofReached = true; |
| 159 | + try { |
| 160 | + outputBuffer = cipher.doFinal(); |
| 161 | + if (outputBuffer == null) { |
| 162 | + return -1; |
| 163 | + } |
| 164 | + currentPosition = 0; |
| 165 | + return maxPosition = outputBuffer.length; |
| 166 | + } catch (IllegalBlockSizeException | BadPaddingException ignore) { |
| 167 | + // Swallow exceptions |
| 168 | + } |
| 169 | + return -1; |
| 170 | + } |
| 171 | + outputBuffer = cipher.update(inputBuffer, 0, length); |
| 172 | + currentPosition = 0; |
| 173 | + return maxPosition = (outputBuffer == null ? 0 : outputBuffer.length); |
| 174 | + } |
| 175 | +} |
0 commit comments