Skip to content

Commit a462282

Browse files
authored
Merge pull request #305 from maxmind/greg/eng-3200
Minor code cleanup and modernization
2 parents 6f6a03a + f44f6ca commit a462282

24 files changed

+540
-765
lines changed

CHANGELOG.md

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,23 @@ CHANGELOG
55
------------------
66

77
* Java 17 or greater is now required.
8+
* Added support for MaxMind DB files larger than 2GB. The library now uses
9+
an internal Buffer abstraction that can handle databases exceeding the
10+
2GB ByteBuffer limit. Files under 2GB continue to use a single ByteBuffer
11+
for optimal performance. Requested by nonetallt. GitHub #154. Fixed by
12+
Silvano Cerza. GitHub #289.
13+
* `Metadata.getBuildDate()` has been replaced with `buildTime()`, which returns
14+
`java.time.Instant` instead of `java.util.Date`. The instant represents the
15+
database build time in UTC.
16+
* `DatabaseRecord`, `Metadata`, `Network`, and internal `DecodedValue` classes
17+
have been converted to records. The following API changes were made:
18+
* `DatabaseRecord.getData()` and `DatabaseRecord.getNetwork()` have been
19+
replaced with record accessor methods `data()` and `network()`.
20+
* Simple getter methods on `Metadata` (e.g., `getBinaryFormatMajorVersion()`,
21+
`getDatabaseType()`, etc.) have been replaced with their corresponding record
22+
accessor methods (e.g., `binaryFormatMajorVersion()`, `databaseType()`, etc.).
23+
* `Network.getNetworkAddress()` and `Network.getPrefixLength()` have been
24+
replaced with record accessor methods `networkAddress()` and `prefixLength()`.
825

926
3.2.0 (2025-05-28)
1027
------------------

src/main/java/com/maxmind/db/Buffer.java

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
package com.maxmind.db;
22

3-
import java.io.IOException;
4-
import java.nio.channels.FileChannel;
53
import java.nio.charset.CharacterCodingException;
64
import java.nio.charset.CharsetDecoder;
75

@@ -12,8 +10,11 @@
1210
*
1311
* <p>This interface is designed to provide a long-based API while
1412
* remaining compatible with the limitations of underlying storage.
13+
*
14+
* <p>All underlying {@link java.nio.ByteBuffer}s are read-only to prevent
15+
* accidental modification of shared data.
1516
*/
16-
interface Buffer {
17+
sealed interface Buffer permits SingleBuffer, MultiBuffer {
1718
/**
1819
* Returns the total capacity of this buffer in bytes.
1920
*
@@ -96,16 +97,6 @@ interface Buffer {
9697
*/
9798
Buffer duplicate();
9899

99-
/**
100-
* Reads data from the given channel into this buffer starting at the
101-
* current position.
102-
*
103-
* @param channel the file channel
104-
* @return the number of bytes read
105-
* @throws IOException if an I/O error occurs
106-
*/
107-
long readFrom(FileChannel channel) throws IOException;
108-
109100
/**
110101
* Decodes the buffer's content into a string using the given decoder.
111102
*

src/main/java/com/maxmind/db/BufferHolder.java

Lines changed: 46 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
import java.nio.ByteBuffer;
99
import java.nio.channels.FileChannel;
1010
import java.util.ArrayList;
11-
import java.util.List;
1211

1312
final class BufferHolder {
1413
// DO NOT PASS OUTSIDE THIS CLASS. Doing so will remove thread safety.
@@ -23,18 +22,48 @@ final class BufferHolder {
2322
FileChannel channel = file.getChannel()) {
2423
long size = channel.size();
2524
if (mode == FileMode.MEMORY) {
26-
Buffer buf;
2725
if (size <= chunkSize) {
28-
buf = new SingleBuffer(size);
26+
// Allocate, read, and make read-only
27+
ByteBuffer buffer = ByteBuffer.allocate((int) size);
28+
if (channel.read(buffer) != size) {
29+
throw new IOException("Unable to read "
30+
+ database.getName()
31+
+ " into memory. Unexpected end of stream.");
32+
}
33+
buffer.flip();
34+
this.buffer = new SingleBuffer(buffer);
2935
} else {
30-
buf = new MultiBuffer(size);
31-
}
32-
if (buf.readFrom(channel) != buf.capacity()) {
33-
throw new IOException("Unable to read "
34-
+ database.getName()
35-
+ " into memory. Unexpected end of stream.");
36+
// Allocate chunks, read, and make read-only
37+
var fullChunks = (int) (size / chunkSize);
38+
var remainder = (int) (size % chunkSize);
39+
var totalChunks = fullChunks + (remainder > 0 ? 1 : 0);
40+
var buffers = new ByteBuffer[totalChunks];
41+
42+
for (int i = 0; i < fullChunks; i++) {
43+
buffers[i] = ByteBuffer.allocate(chunkSize);
44+
}
45+
if (remainder > 0) {
46+
buffers[totalChunks - 1] = ByteBuffer.allocate(remainder);
47+
}
48+
49+
var totalRead = 0L;
50+
for (var buffer : buffers) {
51+
var read = channel.read(buffer);
52+
if (read == -1) {
53+
break;
54+
}
55+
totalRead += read;
56+
buffer.flip();
57+
}
58+
59+
if (totalRead != size) {
60+
throw new IOException("Unable to read "
61+
+ database.getName()
62+
+ " into memory. Unexpected end of stream.");
63+
}
64+
65+
this.buffer = new MultiBuffer(buffers, chunkSize);
3666
}
37-
this.buffer = buf;
3867
} else {
3968
if (size <= chunkSize) {
4069
this.buffer = SingleBuffer.mapFromChannel(channel);
@@ -45,38 +74,27 @@ final class BufferHolder {
4574
}
4675
}
4776

48-
/**
49-
* Construct a ThreadBuffer from the provided URL.
50-
*
51-
* @param stream the source of my bytes.
52-
* @throws IOException if unable to read from your source.
53-
* @throws NullPointerException if you provide a NULL InputStream
54-
*/
55-
BufferHolder(InputStream stream) throws IOException {
56-
this(stream, MultiBuffer.DEFAULT_CHUNK_SIZE);
57-
}
58-
5977
BufferHolder(InputStream stream, int chunkSize) throws IOException {
6078
if (null == stream) {
6179
throw new NullPointerException("Unable to use a NULL InputStream");
6280
}
63-
List<ByteBuffer> chunks = new ArrayList<>();
64-
long total = 0;
65-
byte[] tmp = new byte[chunkSize];
81+
var chunks = new ArrayList<ByteBuffer>();
82+
var total = 0L;
83+
var tmp = new byte[chunkSize];
6684
int read;
6785

6886
while (-1 != (read = stream.read(tmp))) {
69-
ByteBuffer chunk = ByteBuffer.allocate(read);
87+
var chunk = ByteBuffer.allocate(read);
7088
chunk.put(tmp, 0, read);
7189
chunk.flip();
7290
chunks.add(chunk);
7391
total += read;
7492
}
7593

7694
if (total <= chunkSize) {
77-
byte[] data = new byte[(int) total];
78-
int pos = 0;
79-
for (ByteBuffer chunk : chunks) {
95+
var data = new byte[(int) total];
96+
var pos = 0;
97+
for (var chunk : chunks) {
8098
System.arraycopy(chunk.array(), 0, data, pos, chunk.capacity());
8199
pos += chunk.capacity();
82100
}

src/main/java/com/maxmind/db/CHMCache.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ public CHMCache(int capacity) {
3737

3838
@Override
3939
public DecodedValue get(CacheKey<?> key, Loader loader) throws IOException {
40-
DecodedValue value = cache.get(key);
40+
var value = cache.get(key);
4141
if (value == null) {
4242
value = loader.load(key);
4343
if (!cacheFull) {

src/main/java/com/maxmind/db/ClosedDatabaseException.java

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,6 @@
66
* Signals that the underlying database has been closed.
77
*/
88
public class ClosedDatabaseException extends IOException {
9-
10-
private static final long serialVersionUID = 1L;
11-
129
ClosedDatabaseException() {
1310
super("The MaxMind DB has been closed.");
1411
}

src/main/java/com/maxmind/db/ConstructorNotFoundException.java

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,6 @@
55
* constructor in the class with the MaxMindDbConstructor annotation.
66
*/
77
public class ConstructorNotFoundException extends RuntimeException {
8-
private static final long serialVersionUID = 1L;
9-
108
ConstructorNotFoundException(String message) {
119
super(message);
1210
}

src/main/java/com/maxmind/db/DatabaseRecord.java

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,14 @@
77
* lookup.
88
*
99
* @param <T> the type to deserialize the returned value to
10+
* @param data the data for the record in the database. The record will be
11+
* {@code null} if there was no data for the address in the
12+
* database.
13+
* @param network the network associated with the record in the database. This is
14+
* the largest network where all of the IPs in the network have the same
15+
* data.
1016
*/
11-
public final class DatabaseRecord<T> {
12-
private final T data;
13-
private final Network network;
14-
17+
public record DatabaseRecord<T>(T data, Network network) {
1518
/**
1619
* Create a new record.
1720
*
@@ -20,25 +23,6 @@ public final class DatabaseRecord<T> {
2023
* @param prefixLength the network prefix length associated with the record in the database.
2124
*/
2225
public DatabaseRecord(T data, InetAddress ipAddress, int prefixLength) {
23-
this.data = data;
24-
this.network = new Network(ipAddress, prefixLength);
25-
}
26-
27-
/**
28-
* @return the data for the record in the database. The record will be
29-
* <code>null</code> if there was no data for the address in the
30-
* database.
31-
*/
32-
public T getData() {
33-
return data;
34-
}
35-
36-
/**
37-
* @return the network associated with the record in the database. This is
38-
* the largest network where all of the IPs in the network have the same
39-
* data.
40-
*/
41-
public Network getNetwork() {
42-
return network;
26+
this(data, new Network(ipAddress, prefixLength));
4327
}
4428
}

src/main/java/com/maxmind/db/DecodedValue.java

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,7 @@
33
/**
44
* {@code DecodedValue} is a wrapper for the decoded value and the number of bytes used
55
* to decode it.
6+
*
7+
* @param value the decoded value
68
*/
7-
public final class DecodedValue {
8-
final Object value;
9-
10-
DecodedValue(Object value) {
11-
this.value = value;
12-
}
13-
14-
Object getValue() {
15-
return value;
16-
}
17-
}
9+
record DecodedValue(Object value) {}

0 commit comments

Comments
 (0)