1.17 update and a lot of tinkering... It's been so long I don't really remember what was i doing
parent
011283dc09
commit
4c8c9b879b
16
build.gradle
16
build.gradle
|
@ -1,5 +1,5 @@
|
|||
plugins {
|
||||
id 'fabric-loom' version '0.5-SNAPSHOT'
|
||||
id 'fabric-loom' version '0.8-SNAPSHOT'
|
||||
id 'maven-publish'
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,7 @@ minecraft {
|
|||
}
|
||||
|
||||
repositories{
|
||||
maven { url 'http://server.bbkr.space:8081/artifactory/libs-release' }
|
||||
maven { url 'https://server.bbkr.space/artifactory/libs-release' }
|
||||
maven { url 'https://jitpack.io' }
|
||||
}
|
||||
|
||||
|
@ -22,24 +22,24 @@ dependencies {
|
|||
//to change the versions see the gradle.properties file
|
||||
minecraft "com.mojang:minecraft:${project.minecraft_version}"
|
||||
mappings "net.fabricmc:yarn:${project.yarn_mappings}:v2"
|
||||
modCompile "net.fabricmc:fabric-loader:${project.loader_version}"
|
||||
modImplementation "net.fabricmc:fabric-loader:${project.loader_version}"
|
||||
|
||||
// Fabric API. This is technically optional, but you probably want it anyway.
|
||||
modCompile "net.fabricmc.fabric-api:fabric-api:${project.fabric_version}"
|
||||
modImplementation "net.fabricmc.fabric-api:fabric-api:${project.fabric_version}"
|
||||
|
||||
modCompile "io.github.cottonmc.cotton:cotton-config:1.0.0-rc.7"
|
||||
modImplementation "io.github.cottonmc.cotton:cotton-config:1.0.0-rc.7"
|
||||
|
||||
include "io.github.cottonmc:Jankson-Fabric:3.0.0+j1.2.0"
|
||||
include "io.github.cottonmc.cotton:cotton-logging:1.0.0-rc.4"
|
||||
include "io.github.cottonmc.cotton:cotton-config:1.0.0-rc.7"
|
||||
|
||||
modCompile "org.apache.commons:commons-compress:1.19"
|
||||
modImplementation "org.apache.commons:commons-compress:1.19"
|
||||
include "org.apache.commons:commons-compress:1.19"
|
||||
|
||||
modCompile "org.tukaani:xz:1.8"
|
||||
modImplementation "org.tukaani:xz:1.8"
|
||||
include "org.tukaani:xz:1.8"
|
||||
|
||||
modCompile 'com.github.shevek:parallelgzip:master-SNAPSHOT'
|
||||
modImplementation 'com.github.shevek:parallelgzip:master-SNAPSHOT'
|
||||
include 'com.github.shevek:parallelgzip:master-SNAPSHOT'
|
||||
}
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# Done to increase the memory available to gradle.
|
||||
org.gradle.jvmargs=-Xmx1G
|
||||
|
||||
minecraft_version=1.16.4
|
||||
yarn_mappings=1.16.4+build.7
|
||||
loader_version=0.10.8
|
||||
minecraft_version=1.17
|
||||
yarn_mappings=1.17+build.10
|
||||
loader_version=0.11.5
|
||||
|
||||
#Fabric api
|
||||
fabric_version=0.26.3+1.16
|
||||
fabric_version=0.35.1+1.17
|
||||
|
||||
# Mod Properties
|
||||
mod_version = 2.1.0-prev
|
||||
mod_version = 2.1.0-prev5
|
||||
maven_group = net.szum123321
|
||||
archives_base_name = textile_backup
|
Binary file not shown.
|
@ -1,5 +1,5 @@
|
|||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-6.5-bin.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-7.0.2-bin.zip
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
|
|
|
@ -45,6 +45,7 @@ import java.util.concurrent.Executors;
|
|||
public class TextileBackup implements ModInitializer {
|
||||
@Override
|
||||
public void onInitialize() {
|
||||
//Statics.LOGGER.info("Tmpfs: {}", GetPropertyAction.privilegedGetProperty("java.io.tmpdir"));
|
||||
Statics.LOGGER.info("Starting Textile Backup by Szum123321.");
|
||||
|
||||
Statics.CONFIG = ConfigManager.loadConfig(ConfigHandler.class);
|
||||
|
@ -65,8 +66,7 @@ public class TextileBackup implements ModInitializer {
|
|||
}
|
||||
}
|
||||
|
||||
if(Statics.CONFIG.backupInterval > 0)
|
||||
ServerTickEvents.END_SERVER_TICK.register(Statics.scheduler::tick);
|
||||
if(Statics.CONFIG.backupInterval > 0) ServerTickEvents.END_SERVER_TICK.register(Statics.scheduler::tick);
|
||||
|
||||
//Restart Executor Service in singleplayer
|
||||
ServerLifecycleEvents.SERVER_STARTING.register(ignored -> {
|
||||
|
|
|
@ -28,8 +28,9 @@ import net.minecraft.entity.player.PlayerEntity;
|
|||
import net.minecraft.server.command.ServerCommandSource;
|
||||
import net.szum123321.textile_backup.Statics;
|
||||
import net.szum123321.textile_backup.core.restore.RestoreHelper;
|
||||
import org.lwjgl.system.CallbackI;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
|
||||
public final class FileSuggestionProvider implements SuggestionProvider<ServerCommandSource> {
|
||||
|
@ -43,25 +44,25 @@ public final class FileSuggestionProvider implements SuggestionProvider<ServerCo
|
|||
public CompletableFuture<Suggestions> getSuggestions(CommandContext<ServerCommandSource> ctx, SuggestionsBuilder builder) throws CommandSyntaxException {
|
||||
String remaining = builder.getRemaining();
|
||||
|
||||
for (RestoreHelper.RestoreableFile file : RestoreHelper.getAvailableBackups(ctx.getSource().getMinecraftServer())) {
|
||||
List<RestoreHelper.RestoreableFile> list = RestoreHelper.getAvailableBackups(ctx.getSource().getMinecraftServer());
|
||||
|
||||
Collections.sort(list);
|
||||
Collections.reverse(list);
|
||||
|
||||
for (RestoreHelper.RestoreableFile file : list) {
|
||||
String formattedCreationTime = file.getCreationTime().format(Statics.defaultDateTimeFormatter);
|
||||
|
||||
if (formattedCreationTime.startsWith(remaining)) {
|
||||
if (ctx.getSource().getEntity() instanceof PlayerEntity) { //was typed by player
|
||||
if (file.getComment() != null) {
|
||||
if(ctx.getSource().getEntity() instanceof PlayerEntity)
|
||||
builder.suggest(formattedCreationTime, new LiteralMessage("Comment: " + file.getComment()));
|
||||
} else {
|
||||
builder.suggest(formattedCreationTime);
|
||||
}
|
||||
} else { //was typed from server console
|
||||
if (file.getComment() != null) {
|
||||
else
|
||||
builder.suggest(file.getCreationTime() + "#" + file.getComment());
|
||||
} else {
|
||||
builder.suggest(formattedCreationTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return builder.buildFuture();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ public class RestoreBackupCommand {
|
|||
}
|
||||
|
||||
private static int execute(String file, @Nullable String comment, ServerCommandSource source) throws CommandSyntaxException {
|
||||
if(Statics.restoreAwaitThread == null || (Statics.restoreAwaitThread != null && !Statics.restoreAwaitThread.isAlive())) {
|
||||
if(Statics.restoreAwaitThread == null || !Statics.restoreAwaitThread.isAlive()) {
|
||||
LocalDateTime dateTime;
|
||||
|
||||
try {
|
||||
|
|
|
@ -45,7 +45,7 @@ public class Utilities {
|
|||
public static File getWorldFolder(MinecraftServer server) {
|
||||
return ((MinecraftServerSessionAccessor)server)
|
||||
.getSession()
|
||||
.getWorldDirectory(RegistryKey.of(Registry.DIMENSION, DimensionType.OVERWORLD_REGISTRY_KEY.getValue()));
|
||||
.getWorldDirectory(RegistryKey.of(Registry.WORLD_KEY, DimensionType.OVERWORLD_REGISTRY_KEY.getValue()));
|
||||
}
|
||||
|
||||
public static File getBackupRootPath(String worldName) {
|
||||
|
|
|
@ -26,8 +26,6 @@ import net.szum123321.textile_backup.core.create.compressors.tar.AbstractTarArch
|
|||
import net.szum123321.textile_backup.core.create.compressors.tar.LZMACompressor;
|
||||
import net.szum123321.textile_backup.core.create.compressors.tar.ParallelBZip2Compressor;
|
||||
import net.szum123321.textile_backup.core.create.compressors.tar.ParallelGzipCompressor;
|
||||
import net.szum123321.textile_backup.core.create.compressors.ParallelZipCompressor;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
|
|
@ -20,13 +20,19 @@ package net.szum123321.textile_backup.core.create.compressors;
|
|||
|
||||
import net.szum123321.textile_backup.Statics;
|
||||
import net.szum123321.textile_backup.core.create.BackupContext;
|
||||
import net.szum123321.textile_backup.core.create.compressors.ParallelZipCompressor.FileInputStreamSupplier;
|
||||
import net.szum123321.textile_backup.core.create.compressors.parallel_zip_fix.FailsafeScatterGatherBackingStore;
|
||||
import org.apache.commons.compress.archivers.zip.*;
|
||||
import org.apache.commons.compress.parallel.InputStreamSupplier;
|
||||
import org.apache.commons.compress.parallel.ScatterGatherBackingStore;
|
||||
import org.apache.commons.compress.parallel.ScatterGatherBackingStoreSupplier;
|
||||
import sun.security.action.GetPropertyAction;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.zip.CRC32;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.zip.ZipEntry;
|
||||
|
||||
/*
|
||||
|
@ -37,14 +43,17 @@ import java.util.zip.ZipEntry;
|
|||
*/
|
||||
public class ParallelZipCompressor extends ZipCompressor {
|
||||
private ParallelScatterZipCreator scatterZipCreator;
|
||||
private ScatterZipOutputStream dirs;
|
||||
|
||||
public static ParallelZipCompressor getInstance() {
|
||||
return new ParallelZipCompressor();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected OutputStream createArchiveOutputStream(OutputStream stream, BackupContext ctx, int coreLimit) {
|
||||
scatterZipCreator = new ParallelScatterZipCreator(Executors.newFixedThreadPool(coreLimit));
|
||||
protected OutputStream createArchiveOutputStream(OutputStream stream, BackupContext ctx, int coreLimit) throws IOException {
|
||||
dirs = ScatterZipOutputStream.fileBased(File.createTempFile("scatter-dirs", "tmp"));
|
||||
scatterZipCreator = new ParallelScatterZipCreator(Executors.newFixedThreadPool(coreLimit), new CatchingBackingStoreSupplier());
|
||||
|
||||
return super.createArchiveOutputStream(stream, ctx, coreLimit);
|
||||
}
|
||||
|
||||
|
@ -52,20 +61,25 @@ public class ParallelZipCompressor extends ZipCompressor {
|
|||
protected void addEntry(File file, String entryName, OutputStream arc) throws IOException {
|
||||
ZipArchiveEntry entry = (ZipArchiveEntry)((ZipArchiveOutputStream)arc).createArchiveEntry(file, entryName);
|
||||
|
||||
if(ZipCompressor.isDotDat(file.getName())) {
|
||||
if(entry.isDirectory() && !entry.isUnixSymlink()) {
|
||||
dirs.addArchiveEntry(
|
||||
ZipArchiveEntryRequest.createZipArchiveEntryRequest(entry, new FileInputStreamSupplier(file))
|
||||
);
|
||||
} else {
|
||||
if (ZipCompressor.isDotDat(file.getName())) {
|
||||
entry.setMethod(ZipArchiveOutputStream.STORED);
|
||||
entry.setSize(file.length());
|
||||
entry.setCompressedSize(file.length());
|
||||
entry.setCompressedSize(entry.getSize());
|
||||
entry.setCrc(getCRC(file));
|
||||
} else entry.setMethod(ZipEntry.DEFLATED);
|
||||
|
||||
entry.setTime(System.currentTimeMillis());
|
||||
|
||||
scatterZipCreator.addArchiveEntry(entry, new FileInputStreamSupplier(file));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finish(OutputStream arc) throws InterruptedException, ExecutionException, IOException {
|
||||
dirs.writeTo((ZipArchiveOutputStream) arc);
|
||||
dirs.close();
|
||||
scatterZipCreator.writeTo((ZipArchiveOutputStream) arc);
|
||||
}
|
||||
|
||||
|
@ -86,4 +100,14 @@ public class ParallelZipCompressor extends ZipCompressor {
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static class CatchingBackingStoreSupplier implements ScatterGatherBackingStoreSupplier {
|
||||
final AtomicInteger storeNum = new AtomicInteger(0);
|
||||
|
||||
@Override
|
||||
public ScatterGatherBackingStore get() throws IOException {
|
||||
//final File tempFile = File.createTempFile("catchngparallelscatter", "n" + storeNum.incrementAndGet());
|
||||
return new FailsafeScatterGatherBackingStore(storeNum.incrementAndGet(), Paths.get(GetPropertyAction.privilegedGetProperty("java.io.tmpdir")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
package net.szum123321.textile_backup.core.create.compressors.parallel_zip_fix;
|
||||
|
||||
import org.apache.commons.compress.parallel.ScatterGatherBackingStore;
|
||||
import sun.security.action.GetPropertyAction;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public class CompositeScatterGatherBackingStore implements ScatterGatherBackingStore {
|
||||
private final static String NO_SPACE_LEFT_ON_DEVICE_EXCEPTION_MESSAGE = "No space left on device";
|
||||
private final static Path mainTmpPath = Paths.get(GetPropertyAction.privilegedGetProperty("java.io.tmpdir"));
|
||||
|
||||
private final static AtomicInteger TMP_FILE_COUNTER = new AtomicInteger(0);
|
||||
private static Path localTmpPath;
|
||||
|
||||
private final File mainTarget;
|
||||
private long mainBytesWritten = 0;
|
||||
private File localTarget = null;
|
||||
private OutputStream os;
|
||||
|
||||
public CompositeScatterGatherBackingStore(Path localTmpPath) throws IOException {
|
||||
this.localTmpPath = localTmpPath;
|
||||
|
||||
mainTarget = mainTmpPath.resolve("scatter_storage_" + TMP_FILE_COUNTER.getAndIncrement()).toFile();
|
||||
mainTarget.createNewFile();
|
||||
//mainTmpFile.deleteOnExit();
|
||||
|
||||
os = Files.newOutputStream(mainTarget.toPath());
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream() throws IOException {
|
||||
if(localTarget == null)
|
||||
return new SequenceInputStream(
|
||||
new SizeLimitedInputStream((int) mainBytesWritten, Files.newInputStream(mainTarget.toPath())),
|
||||
Files.newInputStream(localTarget.toPath())
|
||||
);
|
||||
|
||||
return Files.newInputStream(mainTarget.toPath());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeOut(byte[] data, int offset, int length) throws IOException {
|
||||
try {
|
||||
os.write(data, offset, length);
|
||||
if(localTarget == null) mainBytesWritten += length;
|
||||
} catch (IOException e) {
|
||||
if(e.getMessage().equals(NO_SPACE_LEFT_ON_DEVICE_EXCEPTION_MESSAGE)) {
|
||||
if(localTarget == null) {
|
||||
os.close();
|
||||
|
||||
localTarget = localTmpPath.resolve(mainTarget.getName()).toFile();
|
||||
localTarget.createNewFile();
|
||||
//localTmpFile.deleteOnExit();
|
||||
os = Files.newOutputStream(localTarget.toPath());
|
||||
os.write(data, offset, length);
|
||||
}
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeForWriting() throws IOException {
|
||||
os.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if(mainTarget.exists() && !mainTarget.delete()) mainTarget.deleteOnExit();
|
||||
if(localTarget != null && localTarget.exists() && !localTarget.delete()) localTarget.deleteOnExit();
|
||||
}
|
||||
/*
|
||||
public static void setMemoryStorageSize(long size) {
|
||||
MAX_MEMORY_STORAGE_SIZE = size;
|
||||
}
|
||||
|
||||
public static void resetMemoryStorage() {
|
||||
MEMORY_SPACE_USED.set(MAX_MEMORY_STORAGE_SIZE);
|
||||
}
|
||||
*/
|
||||
}
|
|
@ -0,0 +1,205 @@
|
|||
package net.szum123321.textile_backup.core.create.compressors.parallel_zip_fix;
|
||||
|
||||
import org.apache.commons.compress.parallel.ScatterGatherBackingStore;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* So the main issue with the {@link org.apache.commons.compress.parallel.FileBasedScatterGatherBackingStore} is that it
|
||||
* stores its results as files in tmpfs. In most cases it a good thing, as it allows for low system memory usage.
|
||||
* Sadly some Minecraft Server Providers limit the size of the folder which causes software to fail.
|
||||
*
|
||||
* This {@link ScatterGatherBackingStore } implementation should overcome this issue by storing data in tmp files, and if that fails,
|
||||
* it will switch to {@link MemoryBlockOutputStream}. This creates another issue as the system might run out of memory if too much data would be stored.
|
||||
*/
|
||||
|
||||
public class FailsafeScatterGatherBackingStore implements ScatterGatherBackingStore {
|
||||
private final static String NO_SPACE_LEFT_ON_DEVICE_EXCEPTION_MESSAGE = "No space left on device";
|
||||
|
||||
private final Path tmpdir;
|
||||
private final int id;
|
||||
private final Deque<DataChunk<?>> queue;
|
||||
private int fileCounter;
|
||||
private OutputStream os;
|
||||
private boolean closed;
|
||||
|
||||
public FailsafeScatterGatherBackingStore(int id, Path tmpdir) throws IOException {
|
||||
this.tmpdir = tmpdir;
|
||||
this.id = id;
|
||||
queue = new ArrayDeque<>();
|
||||
//this.target = File.createTempFile("parallelscaterstore", String.valueOf(id), tmpdir.toFile());
|
||||
|
||||
if(!tryAddingNewFileToQueue()) {
|
||||
queue.add(new MemoryBasedDataChunk());
|
||||
os = (OutputStream) queue.peek().getSource();
|
||||
} else {
|
||||
os = Files.newOutputStream(((File)queue.peek().getSource()).toPath());
|
||||
}
|
||||
|
||||
/*try {
|
||||
os = Files.newOutputStream(target.toPath());
|
||||
} catch (IOException ex) {
|
||||
if(ex.getMessage().equals(NO_SPACE_LEFT_ON_DEVICE_EXCEPTION_MESSAGE )) {
|
||||
//Caught it!
|
||||
state = State.Memory;
|
||||
os = new MemoryBlockOutputStream();
|
||||
target.delete();
|
||||
} else {
|
||||
//No need to stay backwards-compatible with Compress 1.13
|
||||
throw ex;
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
private boolean tryAddingNewFileToQueue() throws IOException {
|
||||
try {
|
||||
queue.add(new FileBasedDataChunk(File.createTempFile("parallescatterstore-" + id, String.valueOf(fileCounter++))));
|
||||
} catch (IOException e) {
|
||||
if(e.getMessage().equals(NO_SPACE_LEFT_ON_DEVICE_EXCEPTION_MESSAGE)) {
|
||||
return false;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream() throws IOException {
|
||||
ArrayList<InputStream> list = new ArrayList<>(queue.size());
|
||||
for(DataChunk<?> dataChunk: queue) list.add(dataChunk.getInputStream());
|
||||
return new SequenceInputStream(Collections.enumeration(list));
|
||||
/*if(state == State.MemoryBackup) {
|
||||
return new SequenceInputStream(
|
||||
new SizeLimitedInputStream(safelyWritten, Files.newInputStream(target.toPath())),
|
||||
((MemoryBlockOutputStream)os).getInputStream()
|
||||
);
|
||||
} else if(state == State.Memory) {
|
||||
return ((MemoryBlockOutputStream)os).getInputStream();
|
||||
} else {
|
||||
return Files.newInputStream(target.toPath());
|
||||
}*/
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeOut(byte[] data, int offset, int length) throws IOException {
|
||||
try {
|
||||
os.write(data, offset, length);
|
||||
queue.peekLast().size += length;
|
||||
} catch (IOException e) {
|
||||
if(e.getMessage().equals(NO_SPACE_LEFT_ON_DEVICE_EXCEPTION_MESSAGE)) {
|
||||
//Caught it!
|
||||
queue.add(new MemoryBasedDataChunk());
|
||||
os = (OutputStream) queue.peek().getSource();
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
/*try {
|
||||
os.write(data, offset, length);
|
||||
safelyWritten += length;
|
||||
} catch (IOException e) {
|
||||
if(e.getMessage().equals(NO_SPACE_LEFT_ON_DEVICE_EXCEPTION_MESSAGE )) {
|
||||
//Caught it!
|
||||
state = State.MemoryBackup;
|
||||
os.close();
|
||||
os = new MemoryBlockOutputStream();
|
||||
os.write(data, offset, length);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeForWriting() throws IOException {
|
||||
if (!closed) {
|
||||
os.close();
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
closeForWriting();
|
||||
} finally {
|
||||
queue.stream()
|
||||
.filter(dataChunk -> dataChunk instanceof FileBasedDataChunk)
|
||||
.map(dataChunk -> (File)dataChunk.getSource())
|
||||
.filter(file -> file.exists() && !file.delete())
|
||||
.forEach(File::deleteOnExit);
|
||||
}
|
||||
}
|
||||
|
||||
private static abstract class DataChunk <T> {
|
||||
private long size;
|
||||
|
||||
public DataChunk() {
|
||||
this.size = 0;
|
||||
}
|
||||
|
||||
public long getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
public void setSize(long size) {
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
public abstract T getSource();
|
||||
|
||||
public abstract InputStream getInputStream() throws IOException;
|
||||
public abstract OutputStream getOutputStream() throws IOException;
|
||||
}
|
||||
|
||||
private static class FileBasedDataChunk extends DataChunk<File> {
|
||||
private final File file;
|
||||
|
||||
public FileBasedDataChunk(File file) {
|
||||
this.file = file;
|
||||
}
|
||||
|
||||
@Override
|
||||
public File getSource() {
|
||||
return file;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream() throws IOException {
|
||||
return Files.newInputStream(file.toPath());
|
||||
}
|
||||
|
||||
@Override
|
||||
public OutputStream getOutputStream() throws IOException {
|
||||
return Files.newOutputStream(file.toPath());
|
||||
}
|
||||
}
|
||||
|
||||
private static class MemoryBasedDataChunk extends DataChunk<MemoryBlockOutputStream> {
|
||||
private final MemoryBlockOutputStream memoryBlockOutputStream;
|
||||
|
||||
public MemoryBasedDataChunk() {
|
||||
memoryBlockOutputStream = new MemoryBlockOutputStream();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MemoryBlockOutputStream getSource() {
|
||||
return memoryBlockOutputStream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream() {
|
||||
return memoryBlockOutputStream.getInputStream();
|
||||
}
|
||||
|
||||
@Override
|
||||
public OutputStream getOutputStream() {
|
||||
return memoryBlockOutputStream;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package net.szum123321.textile_backup.core.create.compressors.parallel_zip_fix;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.Deque;
|
||||
|
||||
/**
|
||||
* Somewhat similar to ByteArrayOutputStream, except the data is stored in variable-size blocks.
|
||||
* Blocks are created to be at least {@link MemoryBlockOutputStream#MIN_BLOCK_SIZE} in size.
|
||||
* It is to limit object overhead
|
||||
*/
|
||||
public class MemoryBlockOutputStream extends OutputStream {
|
||||
private static final int MIN_BLOCK_SIZE = 65536; //64K
|
||||
private final Deque<DataBlock> blockQueue;
|
||||
|
||||
public MemoryBlockOutputStream() {
|
||||
this.blockQueue = new ArrayDeque<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
this.write(new byte[] {(byte)(b & 0xFF)}, 0, 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] b, int off, int len) throws IOException {
|
||||
while(len > 0) {
|
||||
if(blockQueue.isEmpty() || blockQueue.peekLast().full()) blockQueue.add(new DataBlock(len));
|
||||
|
||||
//assert blockQueue.peekLast() != null;
|
||||
int written = blockQueue.peekLast().write(b, off, len);
|
||||
off += written;
|
||||
len -= written;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Warning! Returned InputStream will DESTROY data stored in the queue!
|
||||
* @return {@link InputStream} to read data stored in queue buffer
|
||||
*/
|
||||
public InputStream getInputStream() {
|
||||
return new InMemoryInputStream(blockQueue);
|
||||
}
|
||||
|
||||
private static class DataBlock {
|
||||
private final byte[] block;
|
||||
private final int size;
|
||||
private int written = 0;
|
||||
private int read = 0;
|
||||
|
||||
public DataBlock(int size) {
|
||||
this.size = Math.max(size, MIN_BLOCK_SIZE);
|
||||
this.block = new byte[this.size];
|
||||
}
|
||||
|
||||
public boolean full() {
|
||||
return written == size;
|
||||
}
|
||||
|
||||
public boolean dataLeft() {
|
||||
return read < size;
|
||||
}
|
||||
|
||||
public int write(byte[] b, int off, int len) {
|
||||
int tbw = Math.min(len, size - written);
|
||||
|
||||
System.arraycopy(b, off, block, written, tbw);
|
||||
written += tbw;
|
||||
return tbw;
|
||||
}
|
||||
|
||||
public int read(byte[] b, int off, int len) {
|
||||
//if(!dataLeft()) return -1;
|
||||
int tbr = Math.min(len, written - read);
|
||||
|
||||
System.arraycopy(block, read, b, off, tbr);
|
||||
read += tbr;
|
||||
return tbr;
|
||||
}
|
||||
|
||||
public byte[] getBlock() {
|
||||
return block;
|
||||
}
|
||||
}
|
||||
|
||||
private static class InMemoryInputStream extends InputStream {
|
||||
private final Deque<DataBlock> blockQueue;
|
||||
|
||||
public InMemoryInputStream(Deque<DataBlock> blockQueue) {
|
||||
this.blockQueue = blockQueue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() {
|
||||
byte[] buff = new byte[1];
|
||||
return (this.read(buff, 0, 1) == -1) ? -1 : buff[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) {
|
||||
if(blockQueue.isEmpty()) return -1;
|
||||
|
||||
int totalRead = 0;
|
||||
|
||||
while(len > 0 && !blockQueue.isEmpty()) {
|
||||
if(!blockQueue.peek().dataLeft()) {
|
||||
blockQueue.poll();
|
||||
continue;
|
||||
}
|
||||
|
||||
int read = blockQueue.peek().read(b, off, len);
|
||||
|
||||
off += read;
|
||||
len -= read;
|
||||
totalRead += read;
|
||||
}
|
||||
|
||||
return totalRead;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package net.szum123321.textile_backup.core.create.compressors.parallel_zip_fix;
|
||||
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
public class SizeLimitedInputStream extends FilterInputStream {
|
||||
//private final int maxSize;
|
||||
private int dataLeft;
|
||||
|
||||
public SizeLimitedInputStream(int maxSize, InputStream inputStream) {
|
||||
super(inputStream);
|
||||
//this.maxSize = maxSize;
|
||||
this.dataLeft = maxSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
if(dataLeft == 0) return -1;
|
||||
int read = super.read();
|
||||
|
||||
if(read != -1) dataLeft--;
|
||||
return read;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int available() throws IOException {
|
||||
return Math.min(dataLeft, super.available());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(@NotNull byte[] b, int off, int len) throws IOException {
|
||||
if(dataLeft == 0) return -1;
|
||||
|
||||
int read = super.read(b, off, Math.min(dataLeft, len));
|
||||
|
||||
if(read != -1) dataLeft -= read;
|
||||
|
||||
return read;
|
||||
}
|
||||
}
|
|
@ -67,7 +67,11 @@ public class RestoreBackupRunnable implements Runnable {
|
|||
|
||||
Set<Path> undeleted = deleteDirectory(worldFile);
|
||||
if(!undeleted.isEmpty()) {
|
||||
Statics.LOGGER.error("Failed to delete {} files:\n {}",undeleted.size(), Arrays.toString(undeleted.toArray()));
|
||||
Statics.LOGGER.error("Failed to delete {} file{}:\n {}",
|
||||
undeleted.size(),
|
||||
undeleted.size() > 1 ? "s" : "",
|
||||
Arrays.toString(undeleted.toArray())
|
||||
);
|
||||
}
|
||||
|
||||
worldFile.mkdirs();
|
||||
|
|
|
@ -63,12 +63,6 @@ public class GenericTarDecompressor {
|
|||
} catch (IOException e) {
|
||||
Statics.LOGGER.error("An exception occurred when trying to create {}", file, e);
|
||||
}
|
||||
/*
|
||||
if (!parent.isDirectory() && !parent.mkdirs()) {
|
||||
Statics.LOGGER.error("Failed to create {}", parent);
|
||||
Statics.LOGGER.error("Skipping: {}", file);
|
||||
continue;
|
||||
}*/
|
||||
|
||||
try (OutputStream outputStream = Files.newOutputStream(file.toPath());
|
||||
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(outputStream)) {
|
||||
|
@ -85,6 +79,12 @@ public class GenericTarDecompressor {
|
|||
Statics.LOGGER.info("Decompression took {} seconds.", Utilities.formatDuration(Duration.between(start, Instant.now())));
|
||||
}
|
||||
|
||||
/**
|
||||
* This function handles uncompressed (.tar) streams
|
||||
* @param inputStream File input stream
|
||||
* @return Either { @link CompressorInputStream } that decompresses the file or the inputStream if it's tar
|
||||
* @throws CompressorException when the file is neither a tar or other supported archive
|
||||
*/
|
||||
private static InputStream getCompressorInputStream(InputStream inputStream) throws CompressorException {
|
||||
try {
|
||||
return new CompressorStreamFactory().createCompressorInputStream(inputStream);
|
||||
|
|
|
@ -21,25 +21,23 @@ package net.szum123321.textile_backup.core.restore.decompressors;
|
|||
import net.szum123321.textile_backup.Statics;
|
||||
import net.szum123321.textile_backup.core.Utilities;
|
||||
import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
|
||||
import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
|
||||
import org.apache.commons.compress.archivers.zip.ZipFile;
|
||||
import org.apache.commons.compress.utils.IOUtils;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.file.Files;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.Enumeration;
|
||||
|
||||
public class ZipDecompressor {
|
||||
public static void decompress(File inputFile, File target) {
|
||||
Instant start = Instant.now();
|
||||
|
||||
try (FileInputStream fileInputStream = new FileInputStream(inputFile);
|
||||
BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream);
|
||||
ZipArchiveInputStream zipInputStream = new ZipArchiveInputStream((bufferedInputStream))) {
|
||||
ZipArchiveEntry entry;
|
||||
|
||||
while ((entry = zipInputStream.getNextZipEntry()) != null) {
|
||||
if(!zipInputStream.canReadEntryData(entry)){
|
||||
try (ZipFile zipFile = new ZipFile(inputFile)) {
|
||||
for(Enumeration<ZipArchiveEntry> enumeration = zipFile.getEntries(); enumeration.hasMoreElements();) {
|
||||
ZipArchiveEntry entry = enumeration.nextElement();
|
||||
if(!zipFile.canReadEntryData(entry)) {
|
||||
Statics.LOGGER.error("Something when wrong while trying to decompress {}", entry.getName());
|
||||
continue;
|
||||
}
|
||||
|
@ -52,8 +50,6 @@ public class ZipDecompressor {
|
|||
} catch (IOException e) {
|
||||
Statics.LOGGER.error("An exception occurred when trying to create {}", file, e);
|
||||
}
|
||||
//if(!file.isDirectory() && !file.mkdirs())
|
||||
// Statics.LOGGER.error("Failed to create: {}", file);
|
||||
} else {
|
||||
File parent = file.getParentFile();
|
||||
|
||||
|
@ -65,7 +61,7 @@ public class ZipDecompressor {
|
|||
|
||||
try (OutputStream outputStream = Files.newOutputStream(file.toPath());
|
||||
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(outputStream)) {
|
||||
IOUtils.copy(zipInputStream, bufferedOutputStream);
|
||||
IOUtils.copy(zipFile.getInputStream(entry), bufferedOutputStream);
|
||||
} catch (IOException e) {
|
||||
Statics.LOGGER.error("An exception occurred while trying to decompress file: {}", file.getName(), e);
|
||||
}
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
package net.szum123321.textile_backup.mixin;
|
||||
|
||||
import net.minecraft.server.MinecraftServer;
|
||||
import net.minecraft.world.level.storage.LevelStorage;
|
||||
import org.spongepowered.asm.mixin.Mixin;
|
||||
import org.spongepowered.asm.mixin.gen.Accessor;
|
||||
|
||||
@Mixin(MinecraftServer.class)
|
||||
public interface MinecraftServerSessionAccessor {
|
||||
@Accessor
|
||||
LevelStorage.Session getSession();
|
||||
}
|
Loading…
Reference in New Issue