Add S3 support to backups, fixes #7.

This commit is contained in:
Logan Gorence 2021-12-24 00:43:44 +00:00
parent da820b8a0d
commit fca1db8802
No known key found for this signature in database
GPG Key ID: 9743CEF10935949A
5 changed files with 126 additions and 16 deletions

View File

@ -24,13 +24,19 @@ abstract class FoundationPlugin : JavaPlugin() {
features = createFeatures()
module = createModule()
// TODO: If we have another plugin using this class, we may need to use context isolation.
// TODO: If we have another plugin using Koin, we may need to use context isolation and ensure
// it uses the same context so they can fetch stuff from us.
// https://insert-koin.io/docs/reference/koin-core/context-isolation
pluginApplication = startKoin {
modules(pluginModule)
modules(module)
}
// This is probably a bit of a hack.
pluginApplication.modules(module {
single { pluginApplication }
})
features.forEach {
pluginApplication.modules(it.module())
}

View File

@ -8,6 +8,8 @@ import org.bukkit.Server
import org.bukkit.command.Command
import org.bukkit.command.CommandExecutor
import org.bukkit.command.CommandSender
import software.amazon.awssdk.services.s3.S3Client
import software.amazon.awssdk.services.s3.model.PutObjectRequest
import java.io.BufferedOutputStream
import java.io.FileInputStream
import java.io.FileOutputStream
@ -19,9 +21,12 @@ import java.util.concurrent.atomic.AtomicBoolean
import java.util.zip.ZipEntry
import java.util.zip.ZipOutputStream
// TODO: Clean up dependency injection.
class BackupCommand(
private val plugin: FoundationCorePlugin,
private val backupPath: Path
private val backupsPath: Path,
private val config: BackupConfig,
private val s3Client: S3Client,
) : CommandExecutor {
override fun onCommand(
sender: CommandSender, command: Command, label: String, args: Array<String>
@ -47,26 +52,38 @@ class BackupCommand(
return true
}
private fun runBackup(server: Server) {
// TODO: Pull backup creation code into a separate service.
private fun runBackup(server: Server) = try {
RUNNING.set(true)
server.sendMessage(Util.formatSystemMessage("Backup started."))
val backupFile =
backupPath.resolve(String.format("backup-%s.zip", Instant.now().toString())).toFile()
val backupFileName = String.format("backup-%s.zip", Instant.now().toString())
val backupPath = backupsPath.resolve(backupFileName)
val backupFile = backupPath.toFile()
try {
FileOutputStream(backupFile).use { zipFileStream ->
ZipOutputStream(BufferedOutputStream(zipFileStream)).use { zipStream ->
backupPlugins(server, zipStream)
backupWorlds(server, zipStream)
}
}
// TODO: Pull upload code out into a separate service.
if (config.s3.accessKeyId.isNotEmpty()) {
s3Client.putObject(
PutObjectRequest.builder().apply {
bucket(config.s3.bucket)
key("${config.s3.baseDirectory}/$backupFileName")
}.build(),
backupPath
)
}
Unit
} finally {
RUNNING.set(false)
server.sendMessage(Util.formatSystemMessage("Backup finished."))
}
}
private fun backupPlugins(server: Server, zipStream: ZipOutputStream) {
try {
@ -106,7 +123,7 @@ class BackupCommand(
.filter { path: Path? -> Files.isRegularFile(path) }
.toList()
val buffer = ByteArray(1024)
val backupsPath = backupPath.toRealPath()
val backupsPath = backupsPath.toRealPath()
for (path in paths) {
val realPath = path.toRealPath()

View File

@ -0,0 +1,18 @@
package cloud.kubelet.foundation.core.features.backup
import kotlinx.serialization.Serializable
@Serializable
data class BackupConfig(
val s3: S3Config = S3Config(),
)
@Serializable
data class S3Config(
val accessKeyId: String = "",
val secretAccessKey: String = "",
val region: String = "",
val endpointOverride: String = "",
val bucket: String = "",
val baseDirectory: String = "",
)

View File

@ -1,18 +1,64 @@
package cloud.kubelet.foundation.core.features.backup
import cloud.kubelet.foundation.core.FoundationCorePlugin
import cloud.kubelet.foundation.core.Util
import cloud.kubelet.foundation.core.abstraction.Feature
import com.charleskorn.kaml.Yaml
import org.koin.core.KoinApplication
import org.koin.core.component.inject
import org.koin.dsl.module
import software.amazon.awssdk.auth.credentials.AwsSessionCredentials
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider
import software.amazon.awssdk.regions.Region
import software.amazon.awssdk.services.s3.S3Client
import java.net.URI
import kotlin.io.path.inputStream
class BackupFeature : Feature() {
private val plugin by inject<FoundationCorePlugin>()
private val s3Client by inject<S3Client>()
private val config by inject<BackupConfig>()
override fun enable() {
// Create backup directory.
val backupPath = plugin.pluginDataPath.resolve(BACKUPS_DIRECTORY)
backupPath.toFile().mkdir()
registerCommandExecutor("fbackup", BackupCommand(plugin, backupPath))
registerCommandExecutor("fbackup", BackupCommand(plugin, backupPath, config, s3Client))
}
override fun module() = module {
single {
val configPath = Util.copyDefaultConfig<FoundationCorePlugin>(
plugin.slF4JLogger,
plugin.pluginDataPath,
"backup.yaml",
)
return@single Yaml.default.decodeFromStream(
BackupConfig.serializer(),
configPath.inputStream()
)
}
single {
val config = get<BackupConfig>()
val creds = StaticCredentialsProvider.create(
AwsSessionCredentials.create(config.s3.accessKeyId, config.s3.secretAccessKey, "")
)
val builder = S3Client.builder().credentialsProvider(creds)
if (config.s3.endpointOverride.isNotEmpty()) {
builder.endpointOverride(URI.create(config.s3.endpointOverride))
}
if (config.s3.region.isNotEmpty()) {
builder.region(Region.of(config.s3.region))
} else {
builder.region(Region.US_WEST_1)
}
builder.build()
}
}
companion object {

View File

@ -0,0 +1,23 @@
# Configuration of S3 service to upload back-ups to.
s3:
# The access key ID from your S3-compliant storage provider.
# If empty, backups will not be uploaded to S3.
accessKeyId: ""
# The secret access key from your S3-compliant storage provider.
secretAccessKey: ""
# The region the bucket is located in. If using something other than AWS, this field can be set to
# any valid region (us-west-1, etc.), or blank which defaults to us-west-1.
region: ""
# An endpoint override, this is typically used for S3-compatible services like Backblaze B2.
# If not specified, it will use the AWS region specified.
endpointOverride: ""
# Name of the bucket to upload to.
bucket: ""
# Base directory to store backups in. Value being set to "my-server" will store backups with a
# path like bucket-name/my-server/backup-2021-12-21T00:06:41.760568Z.zip
baseDirectory: ""