Skip to content

Commit

Permalink
feat: more work in WorldConverter
Browse files Browse the repository at this point in the history
  • Loading branch information
CoolLoong committed Aug 9, 2023
1 parent a8b54d4 commit 2d38695
Show file tree
Hide file tree
Showing 18 changed files with 399 additions and 224 deletions.
8 changes: 7 additions & 1 deletion Allay-API/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,11 @@ dependencies {
api(libs.commonsio)
api(libs.joml)
api(libs.joml.primitives)
compileOnly(libs.libdeflate)
implementation(libs.libdeflate)
}

tasks.processResources {
// input directory
from("${rootProject.projectDir}/Data")
include("mappings/")
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
package cn.allay.api.utils;

import cn.allay.api.data.VanillaBiomeId;
import cn.allay.api.identifier.Identifier;
import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap;
import lombok.experimental.UtilityClass;
import org.jetbrains.annotations.Nullable;

import java.util.HashMap;

/**
* Allay Project 8/8/2023
*
* @author Cool_Loong
*/
@UtilityClass
public class VanillaBiomeIdUtils {
private final static Int2ObjectOpenHashMap<VanillaBiomeId> MAP1 = new Int2ObjectOpenHashMap<>();
private final static HashMap<Identifier, VanillaBiomeId> MAP2 = new HashMap<>();

static {
for (var v : VanillaBiomeId.values()) {
MAP1.put(v.getId(), v);
MAP2.put(v.getIdentifier(), v);
}
}

@Nullable
public VanillaBiomeId fromId(int id) {
return MAP1.get(id);
}

@Nullable
public VanillaBiomeId fromIdentifier(Identifier identifier) {
return MAP2.get(identifier);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufInputStream;
import io.netty.buffer.ByteBufOutputStream;
import it.unimi.dsi.fastutil.objects.ReferenceArrayList;
import lombok.EqualsAndHashCode;
import org.cloudburstmc.nbt.*;
import org.cloudburstmc.protocol.common.util.VarInts;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;

Expand All @@ -32,7 +32,7 @@ public Palette(V first) {

public Palette(V first, BitArrayVersion version) {
this.bitArray = version.createArray(Chunk.SECTION_SIZE);
this.palette = new ReferenceArrayList<>(16);
this.palette = new ArrayList<>(16);
this.palette.add(first);
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,19 @@
package cn.allay.server.world.storage.anvil;
package cn.allay.api.world.storage;

import cn.allay.api.zlib.CompressionType;
import cn.allay.api.zlib.ZlibProviderType;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import it.unimi.dsi.fastutil.ints.IntArrayList;
import org.cloudburstmc.nbt.NBTInputStream;
import org.cloudburstmc.nbt.NBTOutputStream;
import org.cloudburstmc.nbt.NbtMap;
import org.cloudburstmc.nbt.NbtUtils;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Range;

import java.io.*;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.IntBuffer;
import java.nio.channels.FileChannel;
Expand Down Expand Up @@ -49,7 +52,8 @@ public AnvilRegionFile(Path region, int regionX, int regionZ) throws IOException
//Set the pointer in the file header
channel.position(0);

if (this.channel.size() < HEADER_LENGTH) { // new file, fill in data
// new file, fill in data
if (this.channel.size() < HEADER_LENGTH) {
// If the file is empty, initialize the 8K Byte data
if (channel.size() == 0) {
channel.write(new ByteBuffer[]{EMPTY_SECTOR.duplicate(), EMPTY_SECTOR.duplicate()});
Expand All @@ -65,11 +69,10 @@ public AnvilRegionFile(Path region, int regionX, int regionZ) throws IOException
usedSectors.set(0, 2);

// read chunk locations
channel.position(0);
ByteBuffer locations = ByteBuffer.allocate(SECTOR_SIZE * 2);//8K Bytes
while (locations.hasRemaining()) {
if (channel.read(locations) == -1) {
throw new EOFException();
}
if (channel.read(locations) == -1) {
throw new EOFException();
}
// `locations` buffer to complete the preparation for reading
locations.flip();
Expand All @@ -78,67 +81,74 @@ public AnvilRegionFile(Path region, int regionX, int regionZ) throws IOException
for (int i = 0; i < MAX_ENTRY_COUNT; i++) {
int loc = ints.get();
this.locations.add(loc);

// mark already allocated sectors as taken.
// loc 0 means the chunk is *not* stored in the file
int index = sectorIndex(loc);
int count = sectorCount(loc);
if (loc != 0 && index >= 0 && index + count <= availableSectors) {
usedSectors.set(index, index + count + 1);
if (loc != 0 && index + count <= availableSectors) {
usedSectors.set(index, index + count);
}
}
for (int i = 0; i < MAX_ENTRY_COUNT; i++) {
this.timestamps.add(ints.get());
}
}

public synchronized NbtMap readChunkData(int chunkX, int chunkZ) throws IOException {
@NotNull
public synchronized NbtMap readChunkData(@Range(from = 0, to = 31) int chunkX, @Range(from = 0, to = 31) int chunkZ) throws IOException {
int loc = this.locations.getInt(index(chunkX, chunkZ));
if (loc == 0) {
return NbtMap.EMPTY;
}
int fileOffset = sectorIndex(loc) * SECTOR_SIZE;
long fileOffset = (long) sectorIndex(loc) * SECTOR_SIZE;
int sectorCount = sectorCount(loc);
// Seek to the sector position in file channel.
this.channel.position(fileOffset);
// The number of bytes occupied by the chunk is allocated according to the number of sectors
int bytes = sectorCount * SECTOR_SIZE;
ByteBuf buffer = ByteBufAllocator.DEFAULT.ioBuffer(bytes);
ByteBuffer buffer = ByteBuffer.allocate(bytes);
// Read data from the file channel to the buffer
while (buffer.writerIndex() < bytes) {
int written = this.channel.read(buffer.internalNioBuffer(buffer.writerIndex(), buffer.writableBytes()));
if (written == -1) {
throw new EOFException();
}
buffer.writerIndex(buffer.writerIndex() + written);
int written = this.channel.read(buffer);
if (written == -1) {
throw new EOFException();
}
// Read the chunk data length bits
int length = buffer.readInt();
ByteBuf chunk = buffer.readSlice(length);
buffer.flip();
// The first byte represent the compression type, and the remain of the data is raw chunk data
byte compressionType = chunk.readByte();
byte[] input = new byte[chunk.readableBytes()];
// Read the chunk data length
int length = buffer.getInt();
byte compressionType = buffer.get();
byte[] input = new byte[length];
buffer.get(input);
byte[] output;
chunk.readBytes(input);
output = switch (compressionType) {
case GZIP_COMPRESSION ->
ZlibProviderType.LibDeflateThreadLocal.of(CompressionType.GZIP, 6).inflate(input, CHUNK_SIZE_LIMIT);
case ZLIB_COMPRESSION ->
ZlibProviderType.LibDeflateThreadLocal.of(CompressionType.ZLIB, 6).inflate(input, CHUNK_SIZE_LIMIT);
default -> throw new IllegalArgumentException("Unknown compression type: " + compressionType);
};
NBTInputStream reader = NbtUtils.createReader(new BufferedInputStream(new ByteArrayInputStream(output)));
NBTInputStream reader = NbtUtils.createReader(new ByteArrayInputStream(output));
return (NbtMap) reader.readTag();
}

public synchronized void writeChunk(int chunkX, int chunkZ, NbtMap chunkData) throws IOException {
/**
* Write chunk.
*
* @param chunkX the chunk x
* @param chunkZ the chunk z
* @param chunkData the chunk data
* @throws IOException the io exception
*/
public synchronized void writeChunk(@Range(from = 0, to = 31) int chunkX, @Range(from = 0, to = 31) int chunkZ, NbtMap chunkData) throws IOException {
// Convert chunk data to byte stream and compress
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
NBTOutputStream writer = NbtUtils.createWriter(new BufferedOutputStream(byteArrayOutputStream));
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(CHUNK_SIZE_LIMIT);
NBTOutputStream writer = NbtUtils.createWriter(byteArrayOutputStream);
writer.writeTag(chunkData);
byte[] deflateData = ZlibProviderType.LibDeflateThreadLocal.of(CompressionType.ZLIB, 6).deflate(byteArrayOutputStream.toByteArray());
writer.close();
// Calculate the number of sector needed for the chunk
int sectorCount = (deflateData.length + CHUNK_HEADER_LENGTH) / SECTOR_SIZE;
int sectorCount = (int) Math.ceil((double) (deflateData.length + CHUNK_HEADER_LENGTH) / SECTOR_SIZE);
// The maximum sector count of a chunk can use 256 sector, that is to say, the maximum size of a chunk is 256 * 4096 = 1M
if (sectorCount >= SECTOR_COUNT_PER1M) {
throw new IllegalArgumentException("Writing this chunk would take too many sectors (limit is 255, but " + sectorCount + " is needed)");
Expand All @@ -159,11 +169,12 @@ public synchronized void writeChunk(int chunkX, int chunkZ, NbtMap chunkData) th
if (sectorStartCount == -1) {
var eof = channel.size();
position = eof;
sectorStartCount = (int) eof / SECTOR_SIZE;
sectorStartCount = (int) (eof / SECTOR_SIZE);
// fill up sectors
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(SECTOR_SIZE);
for (int i = 0; i < sectorCount; i++) {
channel.position(eof + i * SECTOR_SIZE);
channel.write(ByteBuffer.allocate(SECTOR_SIZE));
channel.write(byteBuffer);
}
appendToEnd = true;
} else {
Expand All @@ -187,19 +198,20 @@ public synchronized void writeChunk(int chunkX, int chunkZ, NbtMap chunkData) th
}

// Update and write locations and timestamps
locations.set(index, buildLocation(sectorStartCount, sectorCount));
timestamps.set(index, Long.valueOf(System.currentTimeMillis()).intValue());
ByteBuffer location = ByteBuffer.allocateDirect(4);//int
header.putInt(locations.getInt(index));
header.flip();
ByteBuffer timestamp = ByteBuffer.allocateDirect(4);//int
header.putInt(timestamps.getInt(index));
header.flip();
channel.write(location, index * 4L);
channel.write(timestamp, index * 4L + 4096);

int loc = buildLocation(sectorStartCount, sectorCount);
int time = Long.valueOf(System.currentTimeMillis()).intValue();
locations.set(index, loc);
timestamps.set(index, time);
ByteBuffer location = ByteBuffer.allocate(4);//int
location.putInt(loc);
location.flip();
ByteBuffer timestamp = ByteBuffer.allocate(4);//int
timestamp.putInt(time);
timestamp.flip();
System.out.println(channel.write(location, index * 4L));
System.out.println(channel.write(timestamp, index * 4L + 4096));
// the data has been written, now free previous storage
usedSectors.set(previousSectorStart, previousSectorStart + previousSectorCount + 1, false);
usedSectors.set(previousSectorStart, previousSectorStart + previousSectorCount, false);
}

/**
Expand All @@ -208,9 +220,12 @@ public synchronized void writeChunk(int chunkX, int chunkZ, NbtMap chunkData) th
*/
private int findAvailableSectors(int sectorCount) {
for (int start = 0; start < usedSectors.size() - sectorCount; start++) {
boolean found = false;
boolean found = true;
for (int i = 0; i < sectorCount; i++) {
found = !usedSectors.get(i + start);
if (usedSectors.get(i + start)) {
found = false;
break;
}
}
if (found) {
return start;
Expand All @@ -221,9 +236,9 @@ private int findAvailableSectors(int sectorCount) {

private void alignment4K() throws IOException {
// file is not a multiple of 4kib, add padding
long missingPadding = channel.size() % SECTOR_SIZE;
int missingPadding = (int) (channel.size() % SECTOR_SIZE);
if (missingPadding > 0) {
channel.write(ByteBuffer.allocate((int) (SECTOR_SIZE - missingPadding)));
channel.write(ByteBuffer.allocate(SECTOR_SIZE - missingPadding));
}
}

Expand Down
101 changes: 101 additions & 0 deletions Allay-API/src/main/java/cn/allay/api/zlib/JavaZibThreadLocal.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
package cn.allay.api.zlib;

import it.unimi.dsi.fastutil.io.FastByteArrayOutputStream;

import java.io.IOException;
import java.util.zip.DataFormatException;
import java.util.zip.Deflater;
import java.util.zip.Inflater;

/**
* Allay Project 2023/6/6
*
* @author Cool_Loong
*/
public final class JavaZibThreadLocal implements ZlibProvider {
private static final ThreadLocal<FastByteArrayOutputStream> FBAO = ThreadLocal.withInitial(() -> new FastByteArrayOutputStream(1024));
private static final ThreadLocal<byte[]> BUFFER = ThreadLocal.withInitial(() -> new byte[8192]);
private int level;
private CompressionType type;
private final ThreadLocal<Inflater> INFLATER = ThreadLocal.withInitial(Inflater::new);
private final ThreadLocal<Deflater> DEFLATER = ThreadLocal.withInitial(() -> new Deflater(level));

JavaZibThreadLocal(CompressionType type, int level) {
this.type = type;
this.level = level;
}

@Override
public void setCompressionType(CompressionType type) {
this.type = type;
}

@Override
public void setCompressionLevel(int level) {
this.level = level;
}

@Override
public byte[] deflate(byte[] data) throws IOException {
try (var bos = FBAO.get()) {
if (type == CompressionType.GZIP) {
throw new UnsupportedOperationException(this.getClass().getSimpleName() + " dont support GZIP");
} else {
Deflater deflater = DEFLATER.get();
try {
deflater.reset();
deflater.setInput(data);
deflater.finish();
bos.reset();
byte[] buffer = BUFFER.get();
int length = 0;
while (!deflater.finished()) {
int i = deflater.deflate(buffer);
bos.write(buffer, 0, i);
length += i;
}
byte[] output = new byte[length];
System.arraycopy(bos.array, 0, output, 0, length);
return output;
} finally {
deflater.reset();
}
}
}
}

@Override
public byte[] inflate(byte[] data, int maxSize) throws IOException {
try (var bos = FBAO.get()) {
if (type == CompressionType.GZIP) {
throw new UnsupportedOperationException(this.getClass().getSimpleName() + " dont support GZIP");
} else {
Inflater inflater = INFLATER.get();
try {
inflater.reset();
inflater.setInput(data);
bos.reset();
byte[] buffer = BUFFER.get();
try {
int length = 0;
while (!inflater.finished()) {
int i = inflater.inflate(buffer);
length += i;
if (maxSize > 0 && length > maxSize) {
throw new IOException("Inflated data exceeds maximum size");
}
bos.write(buffer, 0, i);
}
byte[] output = new byte[length];
System.arraycopy(bos.array, 0, output, 0, length);
return output;
} catch (DataFormatException e) {
throw new IOException("Unable to inflate zlib stream", e);
}
} finally {
inflater.end();
}
}
}
}
}
Loading

0 comments on commit 2d38695

Please sign in to comment.