Skip to content

Commit d2dbb4b

Browse files
committed
Fix loading Nukkit/Vanilla worlds
1 parent 70ea907 commit d2dbb4b

File tree

7 files changed

+85
-33
lines changed

7 files changed

+85
-33
lines changed

server/src/main/java/org/cloudburstmc/server/level/chunk/ChunkBuilder.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ public class ChunkBuilder {
1919
private int[] heightMap;
2020
private boolean dirty;
2121
private int state = Chunk.STATE_NEW;
22+
private int chunkVersion = -1;
2223

2324
public ChunkBuilder(int x, int z, CloudLevel level) {
2425
this.x = x;
@@ -65,6 +66,15 @@ public ChunkBuilder state(int state) {
6566
return this;
6667
}
6768

69+
public int getChunkVersion() {
70+
return chunkVersion;
71+
}
72+
73+
public ChunkBuilder chunkVersion(int chunkVersion) {
74+
this.chunkVersion = chunkVersion;
75+
return this;
76+
}
77+
6878
public ChunkBuilder dirty() {
6979
this.dirty = true;
7080
return this;

server/src/main/java/org/cloudburstmc/server/level/provider/leveldb/LevelDBKey.java

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
package org.cloudburstmc.server.level.provider.leveldb;
22

33
public enum LevelDBKey {
4+
DATA_3D('+'),
5+
VERSION(','),
46
DATA_2D('-'),
57
DATA_2D_LEGACY('.'),
68
SUBCHUNK_PREFIX('/'),
@@ -11,13 +13,19 @@ public enum LevelDBKey {
1113
BLOCK_EXTRA_DATA('4'),
1214
BIOME_STATE('5'),
1315
STATE_FINALIZATION('6'),
14-
16+
CONVERTER_TAG('7'),
1517
BORDER_BLOCKS('8'),
1618
HARDCODED_SPAWNERS('9'),
17-
18-
FLAGS('f'),
19-
20-
VERSION('v');
19+
PENDING_RANDOM_TICKS(':'),
20+
XXHASH_CHECKSUMS(';'),
21+
GENERATION_SEED('<'),
22+
GENERATED_BEFORE_CNC_BLENDING('='),
23+
BLENDING_BIOME_HEIGHT('>'),
24+
META_DATA_HASH('?'),
25+
BLENDING_DATA('@'),
26+
ACTOR_DIGEST_VERSION('A'),
27+
VERSION_OLD('v'),
28+
AABB_VOLUMES('w');
2129

2230
private final byte encoded;
2331

server/src/main/java/org/cloudburstmc/server/level/provider/leveldb/LevelDBProvider.java

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@
3030
@Log4j2
3131
@ParametersAreNonnullByDefault
3232
class LevelDBProvider implements LevelProvider {
33+
private static final int CURRENT_CHUNK_VERSION = 42;
34+
3335
private final String levelId;
3436
private final Path path;
3537
private final Executor executor;
@@ -63,6 +65,10 @@ public CompletableFuture<CloudChunk> readChunk(ChunkBuilder chunkBuilder) {
6365

6466
return CompletableFuture.supplyAsync(() -> {
6567
byte[] versionValue = this.db.get(LevelDBKey.VERSION.getKey(x, z));
68+
if (versionValue == null || versionValue.length != 1) {
69+
versionValue = this.db.get(LevelDBKey.VERSION_OLD.getKey(x, z));
70+
}
71+
6672
if (versionValue == null || versionValue.length != 1) {
6773
return null;
6874
}
@@ -80,7 +86,8 @@ public CompletableFuture<CloudChunk> readChunk(ChunkBuilder chunkBuilder) {
8086
chunkBuilder.dirty();
8187
}
8288

83-
ChunkSerializers.deserializeChunk(this.db, chunkBuilder, chunkVersion);
89+
chunkBuilder.chunkVersion(chunkVersion & 0xFF);
90+
ChunkSerializers.deserializeChunk(this.db, chunkBuilder, chunkVersion & 0xFF);
8491
Data2dSerializer.deserialize(this.db, chunkBuilder);
8592

8693
BlockEntitySerializer.loadBlockEntities(this.db, chunkBuilder);
@@ -96,19 +103,21 @@ public CompletableFuture<Void> saveChunk(Chunk chunk) {
96103
final int z = chunk.getZ();
97104

98105
return CompletableFuture.supplyAsync(() -> {
99-
//we clear the dirty flag here instead of in LevelChunkManager in case there are modifications to the chunk between now and the time it was enqueued
106+
// Clear the dirty flag here rather than in LevelChunkManager, in case the chunk
107+
// is modified between when it was enqueued and when it is actually written.
100108
if (!chunk.isGenerated() || !chunk.clearDirty()) {
101-
//the chunk was not dirty, do nothing
109+
// the chunk was not dirty
102110
return null;
103111
}
112+
104113
try (DirectWriteBatch batch = this.db.createWriteBatch()) {
105114
LockableChunk lockableChunk = chunk.readLockable();
106115
lockableChunk.lock();
107116
try {
108-
ChunkSerializers.serializeChunk(batch, chunk, 19);
117+
ChunkSerializers.serializeChunk(batch, chunk, CURRENT_CHUNK_VERSION);
109118
Data2dSerializer.serialize(batch, (CloudChunk) chunk);
110119

111-
batch.put(LevelDBKey.VERSION.getKey(x, z), new byte[]{19});
120+
batch.put(LevelDBKey.VERSION.getKey(x, z), new byte[]{(byte) CURRENT_CHUNK_VERSION});
112121
batch.put(LevelDBKey.STATE_FINALIZATION.getKey(x, z), Unpooled.buffer(4).writeIntLE(lockableChunk.getState() - 1).array());
113122

114123
BlockEntitySerializer.saveBlockEntities(batch, (CloudChunk) chunk);
@@ -120,7 +129,7 @@ public CompletableFuture<Void> saveChunk(Chunk chunk) {
120129
this.db.write(batch);
121130
return null;
122131
} catch (IOException e) {
123-
//can't happen
132+
// can't happen
124133
throw new RuntimeException(e);
125134
}
126135
}, this.executor);

server/src/main/java/org/cloudburstmc/server/level/provider/leveldb/serializer/ChunkSerializerV3.java

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,6 @@ public void serialize(DirectWriteBatch db, Chunk chunk) {
2626
int sectionCount = chunk.getLevel().getSectionsCount();
2727
int minSectionY = chunk.getLevel().getMinSectionY();
2828

29-
// Write block sub-chunks and biome sections together.
30-
// BIOME_STATE uses the same copy-last optimization as the network path.
3129
BiomeStorage previousBiome = null;
3230
ByteBuf biomeBuffer = ByteBufAllocator.DEFAULT.ioBuffer();
3331
ByteBuf biomeKeyBuffer = ByteBufAllocator.DEFAULT.ioBuffer();
@@ -37,7 +35,6 @@ public void serialize(DirectWriteBatch db, Chunk chunk) {
3735
CloudChunkSection section = (CloudChunkSection) chunk.getSection(arrayIndex);
3836
int absoluteSectionY = arrayIndex + minSectionY;
3937

40-
// Block storage
4138
if (section != null) {
4239
ByteBuf buffer = ByteBufAllocator.DEFAULT.ioBuffer();
4340
ByteBuf keyBuffer = ByteBufAllocator.DEFAULT.ioBuffer();
@@ -51,13 +48,11 @@ public void serialize(DirectWriteBatch db, Chunk chunk) {
5148
}
5249
}
5350

54-
// Biome storage: write one entry per section into the BIOME_STATE buffer
5551
BiomeStorage bs = (section != null) ? section.getBiomeStorage() : DEFAULT_BIOME_STORAGE;
5652
bs.writeToDisk(biomeBuffer, previousBiome);
5753
previousBiome = bs;
5854
}
5955

60-
// Write the full biome buffer as a single BIOME_STATE key
6156
biomeKeyBuffer.clear().writeBytes(LevelDBKey.BIOME_STATE.getKey(chunk.getX(), chunk.getZ()));
6257
db.put(biomeKeyBuffer, biomeBuffer);
6358
} finally {
@@ -90,12 +85,14 @@ public void deserialize(DirectDB db, ChunkBuilder chunkBuilder) {
9085
int minSectionY = chunkBuilder.getLevel().getMinSectionY();
9186
CloudChunkSection[] sections = new CloudChunkSection[sectionCount];
9287

93-
// Key byte is absolute section Y. Pre-1.18 worlds used keys 0..15, which map
94-
// correctly to array indices 4..19 (world Y 0..240) under this scheme.
88+
// Chunk versions 24-26 stored subchunk keys with a +4 Y offset.
89+
int chunkVersion = chunkBuilder.getChunkVersion();
90+
int subChunkKeyOffset = (chunkVersion >= 24 && chunkVersion <= 26) ? 4 : 0;
91+
9592
int maxSectionY = minSectionY + sectionCount - 1;
9693

9794
for (int absoluteSectionY = minSectionY; absoluteSectionY <= maxSectionY; absoluteSectionY++) {
98-
ByteBuf buf = db.getZeroCopy(Unpooled.wrappedBuffer(LevelDBKey.SUBCHUNK_PREFIX.getKey(chunkX, chunkZ, absoluteSectionY)));
95+
ByteBuf buf = db.getZeroCopy(Unpooled.wrappedBuffer(LevelDBKey.SUBCHUNK_PREFIX.getKey(chunkX, chunkZ, (absoluteSectionY + subChunkKeyOffset) & 0xFF)));
9996
if (buf == null) {
10097
continue;
10198
}
@@ -108,7 +105,6 @@ public void deserialize(DirectDB db, ChunkBuilder chunkBuilder) {
108105
}
109106

110107
int subChunkVersion = buf.readUnsignedByte();
111-
// On-disk format uses version 8; mark dirty only if older than that
112108
if (subChunkVersion < 8) {
113109
chunkBuilder.dirty();
114110
}
@@ -142,22 +138,27 @@ public void deserialize(DirectDB db, ChunkBuilder chunkBuilder) {
142138
chunkBuilder.sections(sections);
143139

144140
byte[] biomeData = db.get(LevelDBKey.BIOME_STATE.getKey(chunkX, chunkZ));
141+
int biomeDataOffset = 0;
142+
if (biomeData == null) {
143+
biomeData = db.get(LevelDBKey.DATA_3D.getKey(chunkX, chunkZ));
144+
biomeDataOffset = 512; // skip heightmap
145+
}
146+
w
145147
if (biomeData != null) {
146148
ByteBuf biomeBuf = Unpooled.wrappedBuffer(biomeData);
149+
biomeBuf.skipBytes(Math.min(biomeDataOffset, biomeBuf.readableBytes()));
147150
BiomeStorage previous = null;
148151
for (int arrayIndex = 0; arrayIndex < sectionCount; arrayIndex++) {
149152
if (!biomeBuf.isReadable()) {
150153
break;
151154
}
152155
BiomeStorage bs = BiomeStorage.readFromDisk(biomeBuf, previous);
153156
if (sections[arrayIndex] == null) {
154-
// Section had no block data but has biome data, so create a section to hold it
155157
sections[arrayIndex] = new CloudChunkSection(new BlockStorage[]{new BlockStorage(), new BlockStorage()});
156158
}
157159
sections[arrayIndex].setBiomeStorage(bs);
158160
previous = bs;
159161
}
160162
}
161-
// If no BIOME_STATE key exists, sections keep their default BiomeStorage (Ocean = 0).
162163
}
163164
}

server/src/main/java/org/cloudburstmc/server/level/provider/leveldb/serializer/ChunkSerializers.java

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,10 @@ public class ChunkSerializers {
1717
SERIALIZERS.put(2, ChunkSerializerV1.INSTANCE);
1818
SERIALIZERS.put(3, ChunkSerializerV3.INSTANCE);
1919
SERIALIZERS.put(4, ChunkSerializerV3.INSTANCE);
20+
SERIALIZERS.put(5, ChunkSerializerV3.INSTANCE);
2021
SERIALIZERS.put(6, ChunkSerializerV3.INSTANCE);
2122
SERIALIZERS.put(7, ChunkSerializerV3.INSTANCE);
23+
SERIALIZERS.put(8, ChunkSerializerV3.INSTANCE);
2224
SERIALIZERS.put(9, ChunkSerializerV3.INSTANCE);
2325
SERIALIZERS.put(10, ChunkSerializerV3.INSTANCE);
2426
SERIALIZERS.put(11, ChunkSerializerV3.INSTANCE);
@@ -30,6 +32,29 @@ public class ChunkSerializers {
3032
SERIALIZERS.put(17, ChunkSerializerV3.INSTANCE);
3133
SERIALIZERS.put(18, ChunkSerializerV3.INSTANCE);
3234
SERIALIZERS.put(19, ChunkSerializerV3.INSTANCE);
35+
SERIALIZERS.put(20, ChunkSerializerV3.INSTANCE);
36+
SERIALIZERS.put(21, ChunkSerializerV3.INSTANCE);
37+
SERIALIZERS.put(22, ChunkSerializerV3.INSTANCE);
38+
SERIALIZERS.put(23, ChunkSerializerV3.INSTANCE);
39+
SERIALIZERS.put(24, ChunkSerializerV3.INSTANCE);
40+
SERIALIZERS.put(25, ChunkSerializerV3.INSTANCE);
41+
SERIALIZERS.put(26, ChunkSerializerV3.INSTANCE);
42+
SERIALIZERS.put(27, ChunkSerializerV3.INSTANCE);
43+
SERIALIZERS.put(28, ChunkSerializerV3.INSTANCE);
44+
SERIALIZERS.put(29, ChunkSerializerV3.INSTANCE);
45+
SERIALIZERS.put(30, ChunkSerializerV3.INSTANCE);
46+
SERIALIZERS.put(31, ChunkSerializerV3.INSTANCE);
47+
SERIALIZERS.put(32, ChunkSerializerV3.INSTANCE);
48+
SERIALIZERS.put(33, ChunkSerializerV3.INSTANCE);
49+
SERIALIZERS.put(34, ChunkSerializerV3.INSTANCE);
50+
SERIALIZERS.put(35, ChunkSerializerV3.INSTANCE);
51+
SERIALIZERS.put(36, ChunkSerializerV3.INSTANCE);
52+
SERIALIZERS.put(37, ChunkSerializerV3.INSTANCE);
53+
SERIALIZERS.put(38, ChunkSerializerV3.INSTANCE);
54+
SERIALIZERS.put(39, ChunkSerializerV3.INSTANCE);
55+
SERIALIZERS.put(40, ChunkSerializerV3.INSTANCE);
56+
SERIALIZERS.put(41, ChunkSerializerV3.INSTANCE);
57+
SERIALIZERS.put(42, ChunkSerializerV3.INSTANCE);
3358
}
3459

3560
private static ChunkSerializer getChunkSerializer(int version) {

server/src/main/java/org/cloudburstmc/server/level/provider/leveldb/serializer/Data2dSerializer.java

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -49,11 +49,9 @@ public static void deserialize(DB db, ChunkBuilder builder) {
4949
heightMap[i] = buffer.readUnsignedShortLE();
5050
}
5151

52-
// Legacy DATA_2D format (pre-3D biomes): 512-byte heightmap followed by
53-
// a 256-byte 2D biome column (one byte per XZ column). When present,
54-
// expand it into 3D biome storage via a deferred ChunkDataLoader so that
55-
// the sections are guaranteed to exist. The chunk is also marked dirty
56-
// so the upgraded format is persisted on the next save.
52+
// Legacy DATA_2D (pre-3D biomes): 512-byte heightmap + 256-byte 2D biome column.
53+
// Expand into per-section 3D biome storage via a deferred loader; return true
54+
// to mark the chunk dirty so the upgraded format is written on next save.
5755
if (data2d.length >= LEGACY_DATA2D_SIZE) {
5856
final byte[] biomesRaw = new byte[256];
5957
buffer.readerIndex(512);
@@ -72,7 +70,7 @@ public static void deserialize(DB db, ChunkBuilder builder) {
7270
}
7371
}
7472
}
75-
return true; // mark dirty, format has been upgraded
73+
return true; // upgraded from 2D biomes; mark dirty
7674
});
7775
}
7876
} finally {

server/src/main/java/org/cloudburstmc/server/level/provider/leveldb/serializer/LevelDBDataSerializer.java

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@ public class LevelDBDataSerializer implements LevelDataSerializer {
2727

2828
private static final TypeReference<Map<String, Object>> OPTIONS_TYPE = new TypeReference<Map<String, Object>>() {
2929
};
30-
private static final int VERSION = 8;
30+
private static final int STORAGE_VERSION = 10;
31+
private static final int STORAGE_VERSION_MIN = 8;
3132

3233
@Override
3334
public LoadState load(LevelData data, Path levelPath, String levelId) throws IOException {
@@ -71,7 +72,7 @@ private void saveData(LevelData data, Path levelDatPath) throws IOException {
7172
.putInt("lightningTime", data.getLightningTime())
7273
.putInt("Difficulty", data.getDifficulty())
7374
.putInt("GameType", data.getGameType())
74-
.putInt("StorageVersion", VERSION)
75+
.putInt("StorageVersion", STORAGE_VERSION)
7576
.putInt("serverChunkTickRange", data.getServerChunkTickRange())
7677
.putInt("NetherScale", data.getNetherScale())
7778
.putLong("currentTick", data.getCurrentTick())
@@ -109,7 +110,7 @@ private void saveData(LevelData data, Path levelDatPath) throws IOException {
109110

110111
// Write
111112
try (LittleEndianDataOutputStream stream = new LittleEndianDataOutputStream(Files.newOutputStream(levelDatPath))) {
112-
stream.writeInt(VERSION);
113+
stream.writeInt(STORAGE_VERSION);
113114
stream.writeInt(tagBytes.length);
114115
stream.write(tagBytes);
115116
}
@@ -121,8 +122,8 @@ private void loadData(LevelData data, Path levelDatPath) throws IOException {
121122
NBTInputStream nbtInputStream = new NBTInputStream(stream)) {
122123

123124
int version = stream.readInt();
124-
if (version != VERSION) {
125-
throw new IOException("Incompatible level.dat version");
125+
if (version < STORAGE_VERSION_MIN || version > STORAGE_VERSION + 2) {
126+
throw new IOException("Incompatible level.dat version: " + version);
126127
}
127128
stream.readInt(); // Size
128129
tag = (NbtMap) nbtInputStream.readTag();

0 commit comments

Comments
 (0)