diff --git a/.dockerignore b/.dockerignore index a86cc57..a691416 100644 --- a/.dockerignore +++ b/.dockerignore @@ -24,3 +24,8 @@ LICENSE README.md **/appsettings.Development.json +**/*.db +*.DotSettings* +*.editorconfig +testData/ + diff --git a/.editorconfig b/.editorconfig index accc999..dba7ad9 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,5 +8,5 @@ indent_style = space insert_final_newline = true trim_trailing_whitespace = true -[*.{csproj,xml,yml,dll.config,msbuildproj,targets,json}] +[*.{csproj,xml,yml,dll.config,msbuildproj,targets,props,json}] indent_size = 2 diff --git a/.gitignore b/.gitignore index 290f82f..03b6e2d 100644 --- a/.gitignore +++ b/.gitignore @@ -5,5 +5,6 @@ riderModule.iml /_ReSharper.Caches/ Robust.Cdn/content.db* +Robust.Cdn/manifest.db* *.user testData/ diff --git a/Directory.Packages.props b/Directory.Packages.props new file mode 100644 index 0000000..575330e --- /dev/null +++ b/Directory.Packages.props @@ -0,0 +1,18 @@ + + + true + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index ceadbc7..c10c679 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,9 +6,8 @@ EXPOSE 8080 FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build ARG BUILD_CONFIGURATION=Release WORKDIR /src -COPY ["Robust.Cdn/Robust.Cdn.csproj", "Robust.Cdn/"] -RUN dotnet restore "Robust.Cdn/Robust.Cdn.csproj" COPY . . +RUN dotnet restore "Robust.Cdn/Robust.Cdn.csproj" WORKDIR "/src/Robust.Cdn" RUN dotnet build "Robust.Cdn.csproj" -c $BUILD_CONFIGURATION -o /app/build @@ -22,5 +21,7 @@ COPY --from=publish /app/publish . ENTRYPOINT ["dotnet", "Robust.Cdn.dll"] VOLUME /database ENV CDN__DatabaseFileName=/database/content.db +VOLUME /manifest +ENV Manifest__DatabaseFileName=/manifest/manifest.db VOLUME /builds -ENV CDN__VersionDiskPath=/builds +ENV Manifest__FileDiskPath=/builds diff --git a/Robust.Cdn.Downloader/Program.cs b/Robust.Cdn.Downloader/Program.cs new file mode 100644 index 0000000..0a43934 --- /dev/null +++ b/Robust.Cdn.Downloader/Program.cs @@ -0,0 +1,28 @@ +using System.CommandLine; +using Robust.Cdn.Lib; + +var rootCommand = new RootCommand(); + +{ + var downloadDestinationArgument = new Argument("destination"); + var downloadUrlArgument = new Argument("url"); + var downloadIndexArgument = new Argument("index"); + var downloadIndexFromUrlCommand = new Command("index-from-url"); + downloadIndexFromUrlCommand.AddArgument(downloadUrlArgument); + downloadIndexFromUrlCommand.AddArgument(downloadIndexArgument); + downloadIndexFromUrlCommand.AddArgument(downloadDestinationArgument); + downloadIndexFromUrlCommand.SetHandler(async (url, index, destination) => + { + using var httpClient = new HttpClient(); + using var downloader = await Downloader.DownloadFilesAsync(httpClient, url, [index]); + + using var file = destination.Create(); + + await downloader.ReadFileHeaderAsync(); + await downloader.ReadFileContentsAsync(file); + + }, downloadUrlArgument, downloadIndexArgument, downloadDestinationArgument); + rootCommand.AddCommand(downloadIndexFromUrlCommand); +} + +await rootCommand.InvokeAsync(args); diff --git a/Robust.Cdn.Downloader/Robust.Cdn.Downloader.csproj b/Robust.Cdn.Downloader/Robust.Cdn.Downloader.csproj new file mode 100644 index 0000000..939df7f --- /dev/null +++ b/Robust.Cdn.Downloader/Robust.Cdn.Downloader.csproj @@ -0,0 +1,15 @@ + + + + Exe + net8.0 + enable + enable + + + + + + + + diff --git a/Robust.Cdn.Lib/Downloader.cs b/Robust.Cdn.Lib/Downloader.cs new file mode 100644 index 0000000..305ee5c --- /dev/null +++ b/Robust.Cdn.Lib/Downloader.cs @@ -0,0 +1,219 @@ +using System.Buffers; +using System.Buffers.Binary; +using System.Globalization; +using System.Net.Http.Headers; +using System.Net.Mime; +using SharpZstd; + +namespace Robust.Cdn.Lib; + +public static class Downloader +{ + // ReSharper disable once ConvertToConstant.Global + public static readonly int ManifestDownloadProtocolVersion = 1; + + public static async Task DownloadFilesAsync( + HttpClient client, + string downloadUrl, + IEnumerable downloadIndices, + CancellationToken cancel = default) + { + var request = new HttpRequestMessage(HttpMethod.Post, downloadUrl); + request.Content = new ByteArrayContent(BuildRequestBody(downloadIndices, out var totalFiles)); + request.Content.Headers.ContentType = new MediaTypeHeaderValue(MediaTypeNames.Application.Octet); + request.Headers.AcceptEncoding.Add(new StringWithQualityHeaderValue("zstd")); + request.Headers.Add( + "X-Robust-Download-Protocol", + ManifestDownloadProtocolVersion.ToString(CultureInfo.InvariantCulture)); + + var response = await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cancel); + try + { + response.EnsureSuccessStatusCode(); + + var stream = await response.Content.ReadAsStreamAsync(cancel); + if (response.Content.Headers.ContentEncoding.Contains("zstd")) + stream = new ZstdDecodeStream(stream, leaveOpen: false); + + try + { + var header = await ReadStreamHeaderAsync(stream, cancel); + + return new DownloadReader(response, stream, header, totalFiles); + } + catch + { + await stream.DisposeAsync(); + throw; + } + } + catch + { + response.Dispose(); + throw; + } + } + + private static byte[] BuildRequestBody(IEnumerable indices, out int totalFiles) + { + var toDownload = indices.ToArray(); + var requestBody = new byte[toDownload.Length * 4]; + var reqI = 0; + foreach (var idx in toDownload) + { + BinaryPrimitives.WriteInt32LittleEndian(requestBody.AsSpan(reqI, 4), idx); + reqI += 4; + } + + totalFiles = toDownload.Length; + return requestBody; + } + + private static async Task ReadStreamHeaderAsync(Stream stream, CancellationToken cancel) + { + var streamHeader = await stream.ReadExactAsync(4, cancel); + var streamFlags = (DownloadStreamHeaderFlags)BinaryPrimitives.ReadInt32LittleEndian(streamHeader); + + return new DownloadStreamHeaderData + { + Flags = streamFlags + }; + } +} + +[Flags] +public enum DownloadStreamHeaderFlags +{ + None = 0, + + /// + /// If this flag is set on the download stream, individual files have been pre-compressed by the server. + /// This means each file has a compression header, and the launcher should not attempt to compress files itself. + /// + PreCompressed = 1 << 0 +} + +public sealed class DownloadStreamHeaderData +{ + public DownloadStreamHeaderFlags Flags { get; init; } + + public bool PreCompressed => (Flags & DownloadStreamHeaderFlags.PreCompressed) != 0; +} + +public sealed class DownloadReader : IDisposable +{ + private readonly Stream _stream; + private readonly HttpResponseMessage _httpResponse; + private readonly int _totalFileCount; + private readonly byte[] _headerReadBuffer; + public DownloadStreamHeaderData Data { get; } + + private int _filesRead; + private State _state = State.ReadFileHeader; + private FileHeaderData _currentHeader; + + internal DownloadReader( + HttpResponseMessage httpResponse, + Stream stream, + DownloadStreamHeaderData data, + int totalFileCount) + { + _stream = stream; + Data = data; + _totalFileCount = totalFileCount; + _httpResponse = httpResponse; + _headerReadBuffer = new byte[data.PreCompressed ? 8 : 4]; + } + + public async ValueTask ReadFileHeaderAsync(CancellationToken cancel = default) + { + CheckState(State.ReadFileHeader); + + if (_filesRead >= _totalFileCount) + return null; + + await _stream.ReadExactlyAsync(_headerReadBuffer, cancel); + + var length = BinaryPrimitives.ReadInt32LittleEndian(_headerReadBuffer.AsSpan(0, 4)); + var compressedLength = 0; + + if (Data.PreCompressed) + compressedLength = BinaryPrimitives.ReadInt32LittleEndian(_headerReadBuffer.AsSpan(4, 4)); + + _currentHeader = new FileHeaderData + { + DataLength = length, + CompressedLength = compressedLength + }; + + _state = State.ReadFileContents; + _filesRead += 1; + + return _currentHeader; + } + + public async ValueTask ReadRawFileContentsAsync(Memory buffer, CancellationToken cancel = default) + { + CheckState(State.ReadFileContents); + + var size = _currentHeader.IsPreCompressed ? _currentHeader.CompressedLength : _currentHeader.DataLength; + if (size > buffer.Length) + throw new ArgumentException("Provided buffer is not large enough to fit entire data size"); + + await _stream.ReadExactlyAsync(buffer, cancel); + + _state = State.ReadFileHeader; + } + + public async ValueTask ReadFileContentsAsync(Stream destination, CancellationToken cancel = default) + { + CheckState(State.ReadFileContents); + + if (_currentHeader.IsPreCompressed) + { + // TODO: Buffering can be avoided here. + var compressedBuffer = ArrayPool.Shared.Rent(_currentHeader.CompressedLength); + + await _stream.ReadExactlyAsync(compressedBuffer, cancel); + + var ms = new MemoryStream(compressedBuffer, writable: false); + await using var decompress = new ZstdDecodeStream(ms, false); + + await decompress.CopyToAsync(destination, cancel); + + ArrayPool.Shared.Return(compressedBuffer); + } + else + { + await _stream.CopyAmountToAsync(destination, _currentHeader.DataLength, 4096, cancel); + } + + _state = State.ReadFileHeader; + } + + private void CheckState(State expectedState) + { + if (expectedState != _state) + throw new InvalidOperationException($"Invalid state! Expected {expectedState}, but was {_state}"); + } + + public enum State : byte + { + ReadFileHeader, + ReadFileContents + } + + public struct FileHeaderData + { + public int DataLength; + public int CompressedLength; + + public bool IsPreCompressed => CompressedLength > 0; + } + + public void Dispose() + { + _stream.Dispose(); + _httpResponse.Dispose(); + } +} diff --git a/Robust.Cdn.Lib/Robust.Cdn.Lib.csproj b/Robust.Cdn.Lib/Robust.Cdn.Lib.csproj new file mode 100644 index 0000000..e03f26a --- /dev/null +++ b/Robust.Cdn.Lib/Robust.Cdn.Lib.csproj @@ -0,0 +1,13 @@ + + + net8.0 + enable + enable + true + + + + + + + diff --git a/Robust.Cdn.Lib/StreamHelper.cs b/Robust.Cdn.Lib/StreamHelper.cs new file mode 100644 index 0000000..bf50e6e --- /dev/null +++ b/Robust.Cdn.Lib/StreamHelper.cs @@ -0,0 +1,42 @@ +using System.Buffers; + +namespace Robust.Cdn.Lib; + +internal static class StreamHelper +{ + public static async ValueTask ReadExactAsync(this Stream stream, int amount, CancellationToken cancel) + { + var data = new byte[amount]; + await stream.ReadExactlyAsync(data, cancel); + return data; + } + + public static async Task CopyAmountToAsync( + this Stream stream, + Stream to, + int amount, + int bufferSize, + CancellationToken cancel) + { + var buffer = ArrayPool.Shared.Rent(bufferSize); + + while (amount > 0) + { + Memory readInto = buffer; + if (amount < readInto.Length) + readInto = readInto[..amount]; + + var read = await stream.ReadAsync(readInto, cancel); + if (read == 0) + throw new EndOfStreamException(); + + amount -= read; + + readInto = readInto[..read]; + + await to.WriteAsync(readInto, cancel); + } + + ArrayPool.Shared.Return(buffer); + } +} diff --git a/Robust.Cdn.Lib/ZStd.cs b/Robust.Cdn.Lib/ZStd.cs new file mode 100644 index 0000000..30fb6c0 --- /dev/null +++ b/Robust.Cdn.Lib/ZStd.cs @@ -0,0 +1,262 @@ +using System.Buffers; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using SharpZstd.Interop; +using static SharpZstd.Interop.Zstd; + +[assembly: InternalsVisibleTo("Robust.Cdn")] + +namespace Robust.Cdn.Lib; + +internal static class ZStd +{ + public static int CompressBound(int length) + { + return (int)ZSTD_COMPRESSBOUND((nuint)length); + } +} + +[Serializable] +internal sealed class ZStdException : Exception +{ + public ZStdException() + { + } + + public ZStdException(string message) : base(message) + { + } + + public ZStdException(string message, Exception inner) : base(message, inner) + { + } + + public static unsafe ZStdException FromCode(nuint code) + { + return new ZStdException(Marshal.PtrToStringUTF8((IntPtr)ZSTD_getErrorName(code))!); + } + + public static void ThrowIfError(nuint code) + { + if (ZSTD_isError(code) != 0) + throw FromCode(code); + } +} + +public sealed unsafe class ZStdCompressionContext : IDisposable +{ + public ZSTD_CCtx* Context { get; private set; } + + private bool Disposed => Context == null; + + public ZStdCompressionContext() + { + Context = ZSTD_createCCtx(); + } + + public void SetParameter(ZSTD_cParameter parameter, int value) + { + CheckDisposed(); + + ZSTD_CCtx_setParameter(Context, parameter, value); + } + + public int Compress(Span destination, Span source, int compressionLevel = ZSTD_CLEVEL_DEFAULT) + { + CheckDisposed(); + + fixed (byte* dst = destination) + fixed (byte* src = source) + { + var ret = ZSTD_compressCCtx( + Context, + dst, (nuint)destination.Length, + src, (nuint)source.Length, + compressionLevel); + + ZStdException.ThrowIfError(ret); + return (int)ret; + } + } + + ~ZStdCompressionContext() + { + Dispose(); + } + + public void Dispose() + { + if (Disposed) + return; + + ZSTD_freeCCtx(Context); + Context = null; + GC.SuppressFinalize(this); + } + + private void CheckDisposed() + { + if (Disposed) + throw new ObjectDisposedException(nameof(ZStdCompressionContext)); + } +} + +internal sealed class ZStdDecompressStream : Stream +{ + private readonly Stream _baseStream; + private readonly bool _ownStream; + private readonly unsafe ZSTD_DCtx* _ctx; + private readonly byte[] _buffer; + private int _bufferPos; + private int _bufferSize; + private bool _disposed; + + public unsafe ZStdDecompressStream(Stream baseStream, bool ownStream = true) + { + _baseStream = baseStream; + _ownStream = ownStream; + _ctx = ZSTD_createDCtx(); + _buffer = ArrayPool.Shared.Rent((int)ZSTD_DStreamInSize()); + } + + protected override unsafe void Dispose(bool disposing) + { + if (_disposed) + return; + + _disposed = true; + ZSTD_freeDCtx(_ctx); + + if (disposing) + { + if (_ownStream) + _baseStream.Dispose(); + + ArrayPool.Shared.Return(_buffer); + } + } + + public override void Flush() + { + ThrowIfDisposed(); + _baseStream.Flush(); + } + + public override int Read(byte[] buffer, int offset, int count) + { + return Read(buffer.AsSpan(offset, count)); + } + + public override int ReadByte() + { + Span buf = stackalloc byte[1]; + return Read(buf) == 0 ? -1 : buf[0]; + } + + public override unsafe int Read(Span buffer) + { + ThrowIfDisposed(); + do + { + if (_bufferSize == 0 || _bufferPos == _bufferSize) + { + _bufferPos = 0; + _bufferSize = _baseStream.Read(_buffer); + + if (_bufferSize == 0) + return 0; + } + + fixed (byte* inputPtr = _buffer) + fixed (byte* outputPtr = buffer) + { + var outputBuf = new ZSTD_outBuffer { dst = outputPtr, pos = 0, size = (nuint)buffer.Length }; + var inputBuf = new ZSTD_inBuffer { src = inputPtr, pos = (nuint)_bufferPos, size = (nuint)_bufferSize }; + var ret = ZSTD_decompressStream(_ctx, &outputBuf, &inputBuf); + + _bufferPos = (int)inputBuf.pos; + ZStdException.ThrowIfError(ret); + + if (outputBuf.pos > 0) + return (int)outputBuf.pos; + } + } while (true); + } + + public override async ValueTask ReadAsync( + Memory buffer, + CancellationToken cancellationToken = default) + { + ThrowIfDisposed(); + do + { + if (_bufferSize == 0 || _bufferPos == _bufferSize) + { + _bufferPos = 0; + _bufferSize = await _baseStream.ReadAsync(_buffer, cancellationToken); + + if (_bufferSize == 0) + return 0; + } + + var outputPos = DecompressChunk(); + if (outputPos > 0) + return outputPos; + } while (true); + + unsafe int DecompressChunk() + { + fixed (byte* inputPtr = _buffer) + fixed (byte* outputPtr = buffer.Span) + { + ZSTD_outBuffer outputBuf = default; + outputBuf.dst = outputPtr; + outputBuf.pos = 0; + outputBuf.size = (nuint)buffer.Length; + ZSTD_inBuffer inputBuf = default; + inputBuf.src = inputPtr; + inputBuf.pos = (nuint)_bufferPos; + inputBuf.size = (nuint)_bufferSize; + + var ret = ZSTD_decompressStream(_ctx, &outputBuf, &inputBuf); + + _bufferPos = (int)inputBuf.pos; + ZStdException.ThrowIfError(ret); + + return (int)outputBuf.pos; + } + } + } + + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException(); + } + + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + private void ThrowIfDisposed() + { + if (_disposed) + throw new ObjectDisposedException(nameof(ZStdDecompressStream)); + } +} diff --git a/Robust.Cdn.sln b/Robust.Cdn.sln index b564687..6ea46ec 100644 --- a/Robust.Cdn.sln +++ b/Robust.Cdn.sln @@ -10,8 +10,13 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Files", "Solution LICENSE.txt = LICENSE.txt Dockerfile = Dockerfile .dockerignore = .dockerignore + Directory.Packages.props = Directory.Packages.props EndProjectSection EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Robust.Cdn.Lib", "Robust.Cdn.Lib\Robust.Cdn.Lib.csproj", "{9B66C804-46C1-4D91-A028-8D12E25827CA}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Robust.Cdn.Downloader", "Robust.Cdn.Downloader\Robust.Cdn.Downloader.csproj", "{937F7101-5979-4A47-ABF4-36405B839D4A}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -22,5 +27,13 @@ Global {4B470D95-7DE8-4ED7-BB1B-0F17C53C7CE2}.Debug|Any CPU.Build.0 = Debug|Any CPU {4B470D95-7DE8-4ED7-BB1B-0F17C53C7CE2}.Release|Any CPU.ActiveCfg = Release|Any CPU {4B470D95-7DE8-4ED7-BB1B-0F17C53C7CE2}.Release|Any CPU.Build.0 = Release|Any CPU + {9B66C804-46C1-4D91-A028-8D12E25827CA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {9B66C804-46C1-4D91-A028-8D12E25827CA}.Debug|Any CPU.Build.0 = Debug|Any CPU + {9B66C804-46C1-4D91-A028-8D12E25827CA}.Release|Any CPU.ActiveCfg = Release|Any CPU + {9B66C804-46C1-4D91-A028-8D12E25827CA}.Release|Any CPU.Build.0 = Release|Any CPU + {937F7101-5979-4A47-ABF4-36405B839D4A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {937F7101-5979-4A47-ABF4-36405B839D4A}.Debug|Any CPU.Build.0 = Debug|Any CPU + {937F7101-5979-4A47-ABF4-36405B839D4A}.Release|Any CPU.ActiveCfg = Release|Any CPU + {937F7101-5979-4A47-ABF4-36405B839D4A}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection EndGlobal diff --git a/Robust.Cdn.sln.DotSettings b/Robust.Cdn.sln.DotSettings new file mode 100644 index 0000000..add4b95 --- /dev/null +++ b/Robust.Cdn.sln.DotSettings @@ -0,0 +1,6 @@ + + INDENT + True + True + True + True \ No newline at end of file diff --git a/Robust.Cdn/BuildDirectoryManager.cs b/Robust.Cdn/BuildDirectoryManager.cs new file mode 100644 index 0000000..7557c76 --- /dev/null +++ b/Robust.Cdn/BuildDirectoryManager.cs @@ -0,0 +1,29 @@ +using Microsoft.Extensions.Options; +using Robust.Cdn.Config; + +namespace Robust.Cdn; + +/// +/// Manages storage for manifest server builds. +/// +/// +/// For now this takes care of all the "Path.Combine" calls in the project. +/// In the future this should be expanded to other file access methods like cloud storage, if we want those. +/// +public sealed class BuildDirectoryManager(IOptions options) +{ + public string GetForkPath(string fork) + { + return Path.Combine(Path.GetFullPath(options.Value.FileDiskPath), fork); + } + + public string GetBuildVersionPath(string fork, string version) + { + return Path.Combine(GetForkPath(fork), version); + } + + public string GetBuildVersionFilePath(string fork, string version, string file) + { + return Path.Combine(GetBuildVersionPath(fork, version), file); + } +} diff --git a/Robust.Cdn/CdnOptions.cs b/Robust.Cdn/Config/CdnOptions.cs similarity index 82% rename from Robust.Cdn/CdnOptions.cs rename to Robust.Cdn/Config/CdnOptions.cs index 09ab83c..53c407e 100644 --- a/Robust.Cdn/CdnOptions.cs +++ b/Robust.Cdn/Config/CdnOptions.cs @@ -1,26 +1,25 @@ using Robust.Cdn.Services; -namespace Robust.Cdn; +namespace Robust.Cdn.Config; public sealed class CdnOptions { public const string Position = "Cdn"; /// - /// Directory path where new version zips are read from stored. See docs site for details. + /// Backwards-compatibility "default" fork value for content stored by an older version of Robust.Cdn. /// - public string VersionDiskPath { get; set; } = ""; + /// + /// When migrating a content database from an older version, this fork name will be assigned to existing content. + /// Furthermore, the old /version endpoint will use this fork as the one to look up. + /// + public string? DefaultFork { get; set; } /// /// File path for the database to store files, versions and logs into. /// public string DatabaseFileName { get; set; } = "content.db"; - /// - /// The name of client zip files in the directory structure. - /// - public string ClientZipName { get; set; } = "SS14.Client.zip"; - /// /// Whether to do stream compression over whole download requests. /// Ignored if AutoStreamCompressRatio is used. @@ -79,9 +78,4 @@ public sealed class CdnOptions /// Log download requests to database or console /// public RequestLogStorage LogRequestStorage { get; set; } = RequestLogStorage.Database; - - /// - /// Authentication token to initiate version updates via the POST /control/update endpoint. - /// - public string UpdateToken { get; set; } = "CHANGE ME"; } diff --git a/Robust.Cdn/Config/ManifestOptions.cs b/Robust.Cdn/Config/ManifestOptions.cs new file mode 100644 index 0000000..ca16d88 --- /dev/null +++ b/Robust.Cdn/Config/ManifestOptions.cs @@ -0,0 +1,53 @@ +namespace Robust.Cdn.Config; + +public sealed class ManifestOptions +{ + public const string Position = "Manifest"; + + /// + /// File path for the database to store data in for the manifest system. + /// + public string DatabaseFileName { get; set; } = "manifest.db"; + + public string FileDiskPath { get; set; } = ""; + + public Dictionary Forks { get; set; } = new(); +} + +public sealed class ManifestForkOptions +{ + public string? UpdateToken { get; set; } + + /// + /// The name of client zip files in the directory structure, excluding the ".zip" extension. + /// + public string ClientZipName { get; set; } = "SS14.Client"; + + public string ServerZipName { get; set; } = "SS14.Server_"; + + public ManifestForkNotifyWatchdog[] NotifyWatchdogs { get; set; } = []; + + public bool Private { get; set; } = false; + + public Dictionary PrivateUsers { get; set; } = new(); + + /// + /// If set to a value other than 0, old manifest versions will be automatically deleted after this many days. + /// + /// + /// This does not delete these old versions from the client CDN, only the server manifest. + /// This is seen as acceptable as those generally don't take too much space. + /// + public int PruneBuildsDays { get; set; } = 90; + + public string? DisplayName { get; set; } + public string? BuildsPageLink { get; set; } + public string? BuildsPageLinkText { get; set; } +} + +public sealed class ManifestForkNotifyWatchdog +{ + public required string WatchdogUrl { get; set; } + public required string Instance { get; set; } + public required string ApiToken { get; set; } +} diff --git a/Robust.Cdn/ConfigurationKeys.cs b/Robust.Cdn/ConfigurationKeys.cs new file mode 100644 index 0000000..8cb4e8d --- /dev/null +++ b/Robust.Cdn/ConfigurationKeys.cs @@ -0,0 +1,6 @@ +namespace Robust.Cdn; + +internal static class ConfigurationKeys +{ + public const string KeyBaseUrl = "BaseUrl"; +} diff --git a/Robust.Cdn/Controllers/DownloadCompatibilityController.cs b/Robust.Cdn/Controllers/DownloadCompatibilityController.cs new file mode 100644 index 0000000..3739d68 --- /dev/null +++ b/Robust.Cdn/Controllers/DownloadCompatibilityController.cs @@ -0,0 +1,39 @@ +using Microsoft.AspNetCore.Mvc; +using Microsoft.Extensions.Options; +using Robust.Cdn.Config; + +namespace Robust.Cdn.Controllers; + +[ApiController] +[Route("/version/{version}")] +public sealed class DownloadCompatibilityController( + DownloadController downloadController, + IOptions cdnOptions) : ControllerBase +{ + [HttpGet("manifest")] + public IActionResult GetManifest(string version) + { + if (cdnOptions.Value.DefaultFork is not { } defaultFork) + return NotFound(); + + return downloadController.GetManifest(defaultFork, version); + } + + [HttpOptions("download")] + public IActionResult DownloadOptions(string version) + { + if (cdnOptions.Value.DefaultFork is not { } defaultFork) + return NotFound(); + + return downloadController.DownloadOptions(defaultFork, version); + } + + [HttpPost("download")] + public async Task Download(string version) + { + if (cdnOptions.Value.DefaultFork is not { } defaultFork) + return NotFound(); + + return await downloadController.Download(defaultFork, version); + } +} diff --git a/Robust.Cdn/Controllers/ForkBuildPageController.cs b/Robust.Cdn/Controllers/ForkBuildPageController.cs new file mode 100644 index 0000000..9cbb65d --- /dev/null +++ b/Robust.Cdn/Controllers/ForkBuildPageController.cs @@ -0,0 +1,99 @@ +using System.Diagnostics.CodeAnalysis; +using Dapper; +using Microsoft.AspNetCore.Mvc; +using Microsoft.Extensions.Options; +using Robust.Cdn.Config; + +namespace Robust.Cdn.Controllers; + +[Controller] +[Route("/fork/{fork}")] +public sealed class ForkBuildPageController( + ManifestDatabase database, + IOptions manifestOptions) + : Controller +{ + [HttpGet] + public IActionResult Index(string fork) + { + if (!TryCheckBasicAuth(fork, out var errorResult)) + return errorResult; + + var versions = new List(); + + using var tx = database.Connection.BeginTransaction(); + + var dbVersions = database.Connection.Query( + """ + SELECT FV.Id, FV.Name, PublishedTime, EngineVersion + FROM ForkVersion FV + INNER JOIN main.Fork F ON FV.ForkId = F.Id + WHERE F.Name = @Fork + AND FV.Available + ORDER BY PublishedTime DESC + LIMIT 50 + """, new { Fork = fork }); + + foreach (var dbVersion in dbVersions) + { + var servers = database.Connection.Query(""" + SELECT Platform, FileName, FileSize + FROM ForkVersionServerBuild + WHERE ForkVersionId = @ForkVersionId + ORDER BY Platform + """, new { ForkVersionId = dbVersion.Id }); + + versions.Add(new Version + { + Name = dbVersion.Name, + EngineVersion = dbVersion.EngineVersion, + PublishedTime = DateTime.SpecifyKind(dbVersion.PublishedTime, DateTimeKind.Utc), + Servers = servers.ToArray() + }); + } + + return View(new Model + { + Fork = fork, + Options = manifestOptions.Value.Forks[fork], + Versions = versions + }); + } + + private bool TryCheckBasicAuth( + string fork, + [NotNullWhen(false)] out IActionResult? errorResult) + { + return ForkManifestController.TryCheckBasicAuth(HttpContext, manifestOptions.Value, fork, out errorResult); + } + + public sealed class Model + { + public required string Fork; + public required ManifestForkOptions Options; + public required List Versions; + } + + public sealed class Version + { + public required string Name; + public required DateTime PublishedTime; + public required string EngineVersion; + public required VersionServer[] Servers; + } + + public sealed class VersionServer + { + public required string Platform { get; set; } + public required string FileName { get; set; } + public required long? FileSize { get; set; } + } + + private sealed class DbVersion + { + public required int Id { get; set; } + public required string Name { get; set; } + public required DateTime PublishedTime { get; set; } + public required string EngineVersion { get; set; } + } +} diff --git a/Robust.Cdn/Controllers/DownloadController.cs b/Robust.Cdn/Controllers/ForkDownloadController.cs similarity index 85% rename from Robust.Cdn/Controllers/DownloadController.cs rename to Robust.Cdn/Controllers/ForkDownloadController.cs index d094cff..20bb53b 100644 --- a/Robust.Cdn/Controllers/DownloadController.cs +++ b/Robust.Cdn/Controllers/ForkDownloadController.cs @@ -5,48 +5,47 @@ using Microsoft.AspNetCore.Http.Features; using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Options; +using Robust.Cdn.Config; using Robust.Cdn.Helpers; +using Robust.Cdn.Lib; using Robust.Cdn.Services; +using SharpZstd; using SharpZstd.Interop; using SQLitePCL; namespace Robust.Cdn.Controllers; [ApiController] -[Route("version/{version}")] -public sealed class DownloadController : ControllerBase +[Route("/fork/{fork}/version/{version}")] +public sealed class DownloadController( + Database db, + ILogger logger, + IOptionsSnapshot options, + DownloadRequestLogger requestLogger) + : ControllerBase { private const int MinDownloadProtocol = 1; private const int MaxDownloadProtocol = 1; private const int MaxDownloadRequestSize = 4 * 100_000; - private readonly Database _db; - private readonly ILogger _logger; - private readonly DownloadRequestLogger _requestLogger; - private readonly CdnOptions _options; - - public DownloadController( - Database db, - ILogger logger, - IOptionsSnapshot options, - DownloadRequestLogger requestLogger) - { - _db = db; - _logger = logger; - _requestLogger = requestLogger; - _options = options.Value; - } + private readonly CdnOptions _options = options.Value; [HttpGet("manifest")] - public IActionResult GetManifest(string version) + public IActionResult GetManifest(string fork, string version) { - var con = _db.Connection; + var con = db.Connection; con.BeginTransaction(deferred: true); var (row, hash) = con.QuerySingleOrDefault<(long, byte[])>( - "SELECT Id, ManifestHash FROM ContentVersion WHERE Version = @Version", + """ + SELECT CV.Id, CV.ManifestHash + FROM ContentVersion CV + INNER JOIN main.Fork F on F.Id = CV.ForkId + WHERE F.Name = @Fork AND Version = @Version + """, new { + Fork = fork, Version = version }); @@ -66,14 +65,15 @@ public IActionResult GetManifest(string version) return File(blob, "text/plain"); } - var decompress = new ZStdDecompressStream(blob); + var decompress = new ZstdDecodeStream(blob, leaveOpen: false); return File(decompress, "text/plain"); } [HttpOptions("download")] - public IActionResult DownloadOptions(string version) + public IActionResult DownloadOptions(string fork, string version) { + _ = fork; _ = version; Response.Headers["X-Robust-Download-Min-Protocol"] = MinDownloadProtocol.ToString(); @@ -83,7 +83,7 @@ public IActionResult DownloadOptions(string version) } [HttpPost("download")] - public async Task Download(string version) + public async Task Download(string fork, string version) { if (Request.ContentType != "application/octet-stream") return BadRequest("Must specify application/octet-stream Content-Type"); @@ -96,13 +96,19 @@ public async Task Download(string version) // TODO: this request limiting logic is pretty bad. HttpContext.Features.Get()!.MaxRequestBodySize = MaxDownloadRequestSize; - var con = _db.Connection; + var con = db.Connection; con.BeginTransaction(deferred: true); var (versionId, countDistinctBlobs) = con.QuerySingleOrDefault<(long, int)>( - "SELECT Id, CountDistinctBlobs FROM ContentVersion WHERE Version = @Version", + """ + SELECT CV.Id, CV.CountDistinctBlobs + FROM ContentVersion CV + INNER JOIN main.Fork F on F.Id = CV.ForkId + WHERE F.Name = @Fork AND Version = @Version + """, new { + Fork = fork, Version = version }); @@ -152,7 +158,7 @@ public async Task Download(string version) if (optAutoStreamCompressRatio > 0) { var requestRatio = countFilesRequested / (float) countDistinctBlobs; - _logger.LogTrace("Auto stream compression ratio: {RequestRatio}", requestRatio); + logger.LogTrace("Auto stream compression ratio: {RequestRatio}", requestRatio); if (requestRatio > optAutoStreamCompressRatio) { optStreamCompression = true; @@ -166,12 +172,12 @@ public async Task Download(string version) } var doStreamCompression = optStreamCompression && AcceptsZStd; - _logger.LogTrace("Transfer is using stream-compression: {PreCompressed}", doStreamCompression); + logger.LogTrace("Transfer is using stream-compression: {PreCompressed}", doStreamCompression); if (doStreamCompression) { - var zStdCompressStream = new ZStdCompressStream(outStream); - zStdCompressStream.Context.SetParameter( + var zStdCompressStream = new ZstdEncodeStream(outStream, leaveOpen: false); + zStdCompressStream.Encoder.SetParameter( ZSTD_cParameter.ZSTD_c_compressionLevel, _options.StreamCompressLevel); @@ -191,7 +197,7 @@ public async Task Download(string version) var preCompressed = optPreCompression; - _logger.LogTrace("Transfer is using pre-compression: {PreCompressed}", preCompressed); + logger.LogTrace("Transfer is using pre-compression: {PreCompressed}", preCompressed); var fileHeaderSize = 4; if (preCompressed) @@ -281,7 +287,7 @@ public async Task Download(string version) count += 1; } - _logger.LogTrace( + logger.LogTrace( "Total SQLite: {SqliteElapsed} ms, ns / iter: {NanosPerIter}", swSqlite.ElapsedMilliseconds, swSqlite.Elapsed.TotalMilliseconds * 1_000_000 / count); @@ -294,7 +300,7 @@ public async Task Download(string version) } var bytesSent = countStream.Written; - _logger.LogTrace("Total data sent: {BytesSent} B", bytesSent); + logger.LogTrace("Total data sent: {BytesSent} B", bytesSent); if (_options.LogRequests) { @@ -307,7 +313,7 @@ public async Task Download(string version) var log = new DownloadRequestLogger.RequestLog( buf, logCompression, protocol, DateTime.UtcNow, versionId, bytesSent); - await _requestLogger.QueueLog(log); + await requestLogger.QueueLog(log); } return new NoOpActionResult(); diff --git a/Robust.Cdn/Controllers/ForkManifestController.cs b/Robust.Cdn/Controllers/ForkManifestController.cs new file mode 100644 index 0000000..355555d --- /dev/null +++ b/Robust.Cdn/Controllers/ForkManifestController.cs @@ -0,0 +1,108 @@ +using System.Diagnostics.CodeAnalysis; +using System.Net.Mime; +using Dapper; +using Microsoft.AspNetCore.Mvc; +using Microsoft.Extensions.Options; +using Robust.Cdn.Config; +using Robust.Cdn.Helpers; + +namespace Robust.Cdn.Controllers; + +/// +/// Functionality for server manifests managed by the CDN. +/// This covers the server build manifest as well as the download endpoint. +/// +[ApiController] +[Route("/fork/{fork}")] +public sealed class ForkManifestController( + ManifestDatabase database, + BuildDirectoryManager buildDirectoryManager, + IOptions manifestOptions) + : ControllerBase +{ + [HttpGet("manifest")] + public IActionResult GetManifest(string fork) + { + if (!TryCheckBasicAuth(fork, out var errorResult)) + return errorResult; + + var rowId = database.Connection.QuerySingleOrDefault( + "SELECT ROWID FROM Fork WHERE Name == @Fork AND ServerManifestCache IS NOT NULL", + new { Fork = fork }); + + if (rowId == 0) + return NotFound(); + + var stream = SqliteBlobStream.Open( + database.Connection.Handle!, + "main", + "Fork", + "ServerManifestCache", + rowId, + false); + + return File(stream, MediaTypeNames.Application.Json); + } + + [HttpGet("version/{version}/file/{file}")] + public IActionResult GetFile( + string fork, + string version, + string file) + { + // Just safety shit here. + if (file.Contains('/') || file == ".." || file == ".") + return BadRequest(); + + if (!TryCheckBasicAuth(fork, out var errorResult)) + return errorResult; + + var versionExists = database.Connection.QuerySingleOrDefault(""" + SELECT 1 + FROM ForkVersion, Fork + WHERE ForkVersion.Name = @Version + AND Fork.Name = @Fork + AND Fork.Id = ForkVersion.ForkId + """, new { Fork = fork, Version = version }); + + if (!versionExists) + return NotFound(); + + var disk = buildDirectoryManager.GetBuildVersionFilePath(fork, version, file); + + return PhysicalFile(disk, MediaTypeNames.Application.Zip); + } + + private bool TryCheckBasicAuth( + string fork, + [NotNullWhen(false)] out IActionResult? errorResult) + { + return TryCheckBasicAuth(HttpContext, manifestOptions.Value, fork, out errorResult); + } + + internal static bool TryCheckBasicAuth( + HttpContext httpContext, + ManifestOptions manifestOptions, + string fork, + [NotNullWhen(false)] out IActionResult? errorResult) + { + if (!manifestOptions.Forks.TryGetValue(fork, out var forkConfig)) + { + errorResult = new NotFoundObjectResult("Fork does not exist"); + return false; + } + + if (!forkConfig.Private) + { + errorResult = null; + return true; + } + + return AuthorizationUtility.CheckBasicAuth( + httpContext, + $"fork_{fork}", + a => forkConfig.PrivateUsers.GetValueOrDefault(a), + out _, + out errorResult); + } +} diff --git a/Robust.Cdn/Controllers/ForkPublishController.cs b/Robust.Cdn/Controllers/ForkPublishController.cs new file mode 100644 index 0000000..4cb757d --- /dev/null +++ b/Robust.Cdn/Controllers/ForkPublishController.cs @@ -0,0 +1,403 @@ +using System.IO.Compression; +using System.Security.Cryptography; +using System.Text; +using System.Text.Json; +using System.Text.RegularExpressions; +using Dapper; +using Microsoft.AspNetCore.Mvc; +using Microsoft.Extensions.Options; +using Quartz; +using Robust.Cdn.Config; +using Robust.Cdn.Helpers; +using Robust.Cdn.Jobs; +using SpaceWizards.Sodium; + +namespace Robust.Cdn.Controllers; + +/// +/// Implements the publish endpoint used to receive new builds from CI. +/// +/// +/// +/// The actual build content is provided as a single zip artifact containing server and client files. +/// This file is pulled from a URL by the CDN, not pushed. +/// This should be compatible with systems such as GitHub Actions artifacts. +/// +/// +/// The artifacts are pulled and written to disk. The client manifest information is built and injected into the stored +/// server builds, filling out the build.json file. +/// +/// +/// After publish, the game client CDN is notified to ingest the client files. +/// Builds are only marked available for servers after the CDN has finished ingesting them. +/// +/// +[ApiController] +[Route("/fork/{fork}")] +public sealed partial class ForkPublishController( + ForkAuthHelper authHelper, + IHttpClientFactory httpFactory, + ManifestDatabase manifestDatabase, + ISchedulerFactory schedulerFactory, + BaseUrlManager baseUrlManager, + BuildDirectoryManager buildDirectoryManager, + ILogger logger) + : ControllerBase +{ + private static readonly Regex ValidVersionRegex = MyRegex(); + + public const string PublishFetchHttpClient = "PublishFetch"; + + [HttpPost("publish")] + public async Task PostPublish( + string fork, + [FromBody] PublishRequest request, + CancellationToken cancel) + { + if (!authHelper.IsAuthValid(fork, out var forkConfig, out var failureResult)) + return failureResult; + + baseUrlManager.ValidateBaseUrl(); + + if (string.IsNullOrWhiteSpace(request.Archive)) + return BadRequest("Archive is empty"); + + if (!ValidVersionRegex.IsMatch(request.Version)) + return BadRequest("Invalid version name"); + + if (VersionAlreadyExists(fork, request.Version)) + return Conflict("Version already exists"); + + logger.LogInformation("Starting publish for fork {Fork} version {Version}", fork, request.Version); + + var httpClient = httpFactory.CreateClient(); + + await using var tmpFile = CreateTempFile(); + + logger.LogDebug("Downloading publish archive {Archive} to temp file", request.Archive); + + await using var response = await httpClient.GetStreamAsync(request.Archive, cancel); + await response.CopyToAsync(tmpFile, cancel); + tmpFile.Seek(0, SeekOrigin.Begin); + + using var archive = new ZipArchive(tmpFile, ZipArchiveMode.Read); + + logger.LogDebug("Classifying archive entries..."); + + var artifacts = ClassifyEntries(forkConfig, archive); + var clientArtifact = artifacts.SingleOrDefault(art => art.Type == ArtifactType.Client); + if (clientArtifact == null) + return BadRequest("Client zip is missing!"); + + var versionDir = buildDirectoryManager.GetBuildVersionPath(fork, request.Version); + + try + { + Directory.CreateDirectory(versionDir); + + var diskFiles = ExtractZipToVersionDir(artifacts, versionDir); + var buildJson = GenerateBuildJson(diskFiles, clientArtifact, request, fork); + InjectBuildJsonIntoServers(diskFiles, buildJson); + + AddVersionToDatabase(clientArtifact, diskFiles, fork, request); + + await QueueIngestJobAsync(fork); + + logger.LogInformation("Publish succeeded!"); + + return NoContent(); + } + catch + { + // Clean up after ourselves if something goes wrong. + Directory.Delete(versionDir, true); + + throw; + } + } + + private bool VersionAlreadyExists(string fork, string version) + { + return manifestDatabase.Connection.QuerySingleOrDefault( + """ + SELECT 1 + FROM Fork, ForkVersion + WHERE Fork.Id = ForkVersion.ForkId + AND Fork.Name = @ForkName + AND ForkVersion.Name = @ForkVersion + """, new + { + ForkName = fork, + ForkVersion = version + }); + } + + private List ClassifyEntries(ManifestForkOptions forkConfig, ZipArchive archive) + { + var list = new List(); + + foreach (var entry in archive.Entries) + { + var artifact = ClassifyEntry(forkConfig, entry); + + if (artifact == null) + continue; + + logger.LogDebug( + "Artifact entry {Name}: Type {Type} Platform {Platform}", + entry.FullName, + artifact.Type, + artifact.Platform); + + list.Add(artifact); + } + + return list; + } + + private static ZipArtifact? ClassifyEntry(ManifestForkOptions forkConfig, ZipArchiveEntry entry) + { + if (entry.FullName == $"{forkConfig.ClientZipName}.zip") + return new ZipArtifact { Entry = entry, Type = ArtifactType.Client }; + + if (entry.FullName.StartsWith(forkConfig.ServerZipName) && entry.FullName.EndsWith(".zip")) + { + var rid = entry.FullName[forkConfig.ServerZipName.Length..^".zip".Length]; + return new ZipArtifact + { + Entry = entry, + Platform = rid, + Type = ArtifactType.Server + }; + } + + return null; + } + + private Dictionary ExtractZipToVersionDir(List artifacts, string versionDir) + { + logger.LogDebug("Extracting artifacts to directory {Directory}", versionDir); + + var dict = new Dictionary(); + + foreach (var artifact in artifacts) + { + var filePath = Path.Combine(versionDir, artifact.Entry.Name); + logger.LogTrace("Extracting artifact {Name}", artifact.Entry.FullName); + + using var entry = artifact.Entry.Open(); + using var file = System.IO.File.Create(filePath); + + entry.CopyTo(file); + dict.Add(artifact, filePath); + } + + return dict; + } + + private MemoryStream GenerateBuildJson( + Dictionary diskFiles, + ZipArtifact clientArtifact, + PublishRequest request, + string forkName) + { + logger.LogDebug("Generating build.json contents"); + + var diskPath = diskFiles[clientArtifact]; + + var diskFileName = Path.GetFileName(diskPath); + using var file = System.IO.File.OpenRead(diskPath); + + // Hash zip file + var hash = Convert.ToHexString(SHA256.HashData(file)); + + // Hash manifest + var manifestHash = Convert.ToHexString(GenerateManifestHash(file)); + + logger.LogDebug("Client zip hash is {ZipHash}, manifest hash is {ManifestHash}", hash, manifestHash); + + var data = new Dictionary + { + { "download", baseUrlManager.MakeBuildInfoUrl($"fork/{{FORK_ID}}/version/{{FORK_VERSION}}/file/{diskFileName}") }, + { "version", request.Version }, + { "hash", hash }, + { "fork_id", forkName }, + { "engine_version", request.EngineVersion }, + { "manifest_url", baseUrlManager.MakeBuildInfoUrl("fork/{FORK_ID}/version/{FORK_VERSION}/manifest") }, + { "manifest_download_url", baseUrlManager.MakeBuildInfoUrl("fork/{FORK_ID}/version/{FORK_VERSION}/download") }, + { "manifest_hash", manifestHash } + }; + + var stream = new MemoryStream(); + JsonSerializer.Serialize(stream, data); + + stream.Position = 0; + return stream; + } + + private byte[] GenerateManifestHash(Stream zipFile) + { + using var zip = new ZipArchive(zipFile, ZipArchiveMode.Read); + + var manifest = new MemoryStream(); + var writer = new StreamWriter(manifest, new UTF8Encoding(false), leaveOpen: true); + + writer.Write("Robust Content Manifest 1\n"); + + foreach (var entry in zip.Entries.OrderBy(e => e.FullName, StringComparer.Ordinal)) + { + // Ignore directory entries. + if (entry.Name == "") + continue; + + var hash = GetZipEntryBlake2B(entry); + writer.Write($"{Convert.ToHexString(hash)} {entry.FullName}\n"); + } + + writer.Dispose(); + + return CryptoGenericHashBlake2B.Hash( + CryptoGenericHashBlake2B.Bytes, + manifest.AsSpan(), + ReadOnlySpan.Empty); + } + + private static byte[] GetZipEntryBlake2B(ZipArchiveEntry entry) + { + using var stream = entry.Open(); + + return HashHelper.HashBlake2B(stream); + } + + private void InjectBuildJsonIntoServers(Dictionary diskFiles, MemoryStream buildJson) + { + logger.LogDebug("Adding build.json to server builds"); + + foreach (var (artifact, diskPath) in diskFiles) + { + if (artifact.Type != ArtifactType.Server) + continue; + + logger.LogTrace("Adding build.json to build {ServerBuildFileName}", diskPath); + + using var zipFile = System.IO.File.Open(diskPath, FileMode.Open); + using var zip = new ZipArchive(zipFile, ZipArchiveMode.Update); + + if (zip.GetEntry("build.json") is { } existing) + { + logger.LogDebug("Zip {ServerBuildFileName} had existing build.json, deleting", diskPath); + existing.Delete(); + } + + var buildJsonEntry = zip.CreateEntry("build.json"); + using var entryStream = buildJsonEntry.Open(); + + buildJson.CopyTo(entryStream); + buildJson.Position = 0; + } + } + + private void AddVersionToDatabase( + ZipArtifact clientArtifact, + Dictionary diskFiles, + string fork, + PublishRequest request) + { + logger.LogDebug("Adding new version to database"); + + var dbCon = manifestDatabase.Connection; + using var tx = dbCon.BeginTransaction(); + + var forkId = dbCon.QuerySingle("SELECT Id FROM Fork WHERE Name = @Name", new { Name = fork }); + + var (clientName, clientSha256, _) = GetFileNameSha256Pair(diskFiles[clientArtifact]); + + var versionId = dbCon.QuerySingle(""" + INSERT INTO ForkVersion (Name, ForkId, PublishedTime, ClientFileName, ClientSha256, EngineVersion) + VALUES (@Name, @ForkId, @PublishTime, @ClientName, @ClientSha256, @EngineVersion) + RETURNING Id + """, + new + { + Name = request.Version, + ForkId = forkId, + ClientName = clientName, + ClientSha256 = clientSha256, + request.EngineVersion, + PublishTime = DateTime.UtcNow + }); + + foreach (var (artifact, diskPath) in diskFiles) + { + if (artifact.Type != ArtifactType.Server) + continue; + + var (serverName, serverSha256, fileSize) = GetFileNameSha256Pair(diskPath); + + dbCon.Execute(""" + INSERT INTO ForkVersionServerBuild (ForkVersionId, Platform, FileName, Sha256, FileSize) + VALUES (@ForkVersion, @Platform, @ServerName, @ServerSha256, @FileSize) + """, + new + { + ForkVersion = versionId, + artifact.Platform, + ServerName = serverName, + ServerSha256 = serverSha256, + FileSize = fileSize + }); + } + + tx.Commit(); + } + + private static (string name, byte[] hash, long size) GetFileNameSha256Pair(string diskPath) + { + using var file = System.IO.File.OpenRead(diskPath); + + return (Path.GetFileName(diskPath), SHA256.HashData(file), file.Length); + } + + private async Task QueueIngestJobAsync(string fork) + { + logger.LogDebug("Notifying client CDN for ingest of new files"); + + var scheduler = await schedulerFactory.GetScheduler(); + await scheduler.TriggerJob(IngestNewCdnContentJob.Key, IngestNewCdnContentJob.Data(fork)); + } + + private static FileStream CreateTempFile() + { + return new FileStream( + Path.GetTempFileName(), + FileMode.Open, + FileAccess.ReadWrite, + FileShare.None, + 4096, + FileOptions.DeleteOnClose); + } + + public sealed class PublishRequest + { + public required string Version { get; set; } + public required string EngineVersion { get; set; } + public required string Archive { get; set; } + } + + // File cannot start with a dot but otherwise most shit is fair game. + [GeneratedRegex(@"[a-zA-Z0-9\-_][a-zA-Z0-9\-_.]*")] + private static partial Regex MyRegex(); + + private sealed class ZipArtifact + { + public required ZipArchiveEntry Entry { get; set; } + public ArtifactType Type { get; set; } + public string? Platform { get; set; } + } + + private enum ArtifactType + { + Server, + Client + } +} diff --git a/Robust.Cdn/Controllers/ForkUpdateController.cs b/Robust.Cdn/Controllers/ForkUpdateController.cs new file mode 100644 index 0000000..7ede489 --- /dev/null +++ b/Robust.Cdn/Controllers/ForkUpdateController.cs @@ -0,0 +1,25 @@ +using Microsoft.AspNetCore.Mvc; +using Quartz; +using Robust.Cdn.Helpers; +using Robust.Cdn.Jobs; + +namespace Robust.Cdn.Controllers; + +[ApiController] +[Route("/fork/{fork}")] +public sealed class UpdateController( + ForkAuthHelper authHelper, + ISchedulerFactory schedulerFactory) : ControllerBase +{ + [HttpPost("control/update")] + public async Task PostControlUpdate(string fork) + { + if (!authHelper.IsAuthValid(fork, out _, out var failureResult)) + return failureResult; + + var scheduler = await schedulerFactory.GetScheduler(); + await scheduler.TriggerJob(IngestNewCdnContentJob.Key, IngestNewCdnContentJob.Data(fork)); + + return Accepted(); + } +} diff --git a/Robust.Cdn/Controllers/UpdateController.cs b/Robust.Cdn/Controllers/UpdateController.cs deleted file mode 100644 index 55fa272..0000000 --- a/Robust.Cdn/Controllers/UpdateController.cs +++ /dev/null @@ -1,51 +0,0 @@ -using System.Runtime.InteropServices; -using System.Security.Cryptography; -using Microsoft.AspNetCore.Mvc; -using Microsoft.Extensions.Options; -using Robust.Cdn.Services; - -namespace Robust.Cdn.Controllers; - -[ApiController] -public sealed class UpdateController : ControllerBase -{ - private readonly DataLoader _loader; - private readonly CdnOptions _options; - - public UpdateController(IOptionsSnapshot options, DataLoader loader) - { - _loader = loader; - _options = options.Value; - } - - [HttpPost("control/update")] - public async Task PostControlUpdate() - { - var authHeader = Request.Headers.Authorization; - - if (authHeader.Count == 0) - return Unauthorized(); - - var auth = authHeader[0]; - - // Idk does using Bearer: make sense here? - if (auth == null || !auth.StartsWith("Bearer ")) - return Unauthorized("Need Bearer: auth type"); - - var token = auth["Bearer ".Length..]; - - var matches = StringsEqual(token, _options.UpdateToken); - if (!matches) - return Unauthorized("Incorrect token"); - - await _loader.QueueUpdateVersions(); - return Accepted(); - } - - private static bool StringsEqual(ReadOnlySpan a, ReadOnlySpan b) - { - return CryptographicOperations.FixedTimeEquals( - MemoryMarshal.AsBytes(a), - MemoryMarshal.AsBytes(b)); - } -} diff --git a/Robust.Cdn/Database.cs b/Robust.Cdn/Database.cs index bd9e202..f7fcba2 100644 --- a/Robust.Cdn/Database.cs +++ b/Robust.Cdn/Database.cs @@ -1,33 +1,69 @@ using Dapper; using Microsoft.Data.Sqlite; using Microsoft.Extensions.Options; +using Robust.Cdn.Config; namespace Robust.Cdn; -public sealed class Database : IDisposable +public abstract class BaseScopedDatabase : IDisposable { - private readonly IOptions _options; private SqliteConnection? _connection; public SqliteConnection Connection => _connection ??= OpenConnection(); - public Database(IOptions options) + private SqliteConnection OpenConnection() { - _options = options; + var con = new SqliteConnection(GetConnectionString()); + con.Open(); + con.Execute("PRAGMA journal_mode=WAL"); + return con; } +#pragma warning disable CA1816 public void Dispose() { _connection?.Dispose(); } +#pragma warning restore CA1816 - private SqliteConnection OpenConnection() + protected abstract string GetConnectionString(); + + protected string GetConnectionStringForFile(string fileName) { - var options = _options.Value; - var conString = $"Data Source={options.DatabaseFileName};Mode=ReadWriteCreate;Pooling=True;Foreign Keys=True"; + return $"Data Source={fileName};Mode=ReadWriteCreate;Pooling=True;Foreign Keys=True"; + } +} - var con = new SqliteConnection(conString); - con.Open(); - con.Execute("PRAGMA journal_mode=WAL"); - return con; +/// +/// Database service for CDN functionality. +/// +public sealed class Database(IOptions options) : BaseScopedDatabase +{ + protected override string GetConnectionString() + { + return GetConnectionStringForFile(options.Value.DatabaseFileName); + } +} + +/// +/// Database service for server manifest functionality. +/// +public sealed class ManifestDatabase(IOptions options) : BaseScopedDatabase +{ + protected override string GetConnectionString() + { + return GetConnectionStringForFile(options.Value.DatabaseFileName); + } + + public void EnsureForksCreated() + { + var con = Connection; + using var tx = con.BeginTransaction(); + + foreach (var forkName in options.Value.Forks.Keys) + { + con.Execute("INSERT INTO Fork (Name) VALUES (@Name) ON CONFLICT DO NOTHING", new { Name = forkName }); + } + + tx.Commit(); } } diff --git a/Robust.Cdn/Helpers/AuthorizationUtility.cs b/Robust.Cdn/Helpers/AuthorizationUtility.cs new file mode 100644 index 0000000..806e22e --- /dev/null +++ b/Robust.Cdn/Helpers/AuthorizationUtility.cs @@ -0,0 +1,95 @@ +using System.Diagnostics.CodeAnalysis; +using System.Security.Cryptography; +using System.Text; +using Microsoft.AspNetCore.Mvc; + +namespace Robust.Cdn.Helpers; + +public static class AuthorizationUtility +{ + public static bool TryParseBasicAuthentication(string authorization, + [NotNullWhen(false)] out IActionResult? failure, + [NotNullWhen(true)] out string? username, + [NotNullWhen(true)] out string? password) + { + username = null; + password = null; + + if (!authorization.StartsWith("Basic ")) + { + failure = new UnauthorizedResult(); + return false; + } + + var value = Encoding.UTF8.GetString(Convert.FromBase64String(authorization[6..])); + var split = value.Split(':'); + + if (split.Length != 2) + { + failure = new BadRequestResult(); + return false; + } + + username = split[0]; + password = split[1]; + failure = null; + return true; + } + + public static bool BasicAuthMatches(string provided, string expected) + { + return CryptographicOperations.FixedTimeEquals( + Encoding.UTF8.GetBytes(provided), + Encoding.UTF8.GetBytes(expected)); + } + + public static bool CheckBasicAuth( + HttpContext httpContext, + string realm, + Func getPassword, + [NotNullWhen(true)] out string? user, + [NotNullWhen(false)] out IActionResult? failure) + { + user = null; + + if (!httpContext.Request.Headers.TryGetValue("Authorization", out var authValues)) + { + SetWwwAuthenticate(httpContext, realm); + failure = new UnauthorizedResult(); + return false; + } + + var authValue = authValues[0]!; + if (!TryParseBasicAuthentication( + authValue, + out failure, + out user, + out var password)) + { + SetWwwAuthenticate(httpContext, realm); + return false; + } + + var expectedPassword = getPassword(user); + if (expectedPassword == null) + { + SetWwwAuthenticate(httpContext, realm); + failure = new UnauthorizedResult(); + return false; + } + + if (!BasicAuthMatches(password, expectedPassword)) + { + SetWwwAuthenticate(httpContext, realm); + failure = new UnauthorizedResult(); + return false; + } + + return true; + } + + private static void SetWwwAuthenticate(HttpContext context, string realm) + { + context.Response.Headers.WWWAuthenticate = $"Basic realm={realm}"; + } +} diff --git a/Robust.Cdn/Helpers/BaseUrlManager.cs b/Robust.Cdn/Helpers/BaseUrlManager.cs new file mode 100644 index 0000000..1c70e23 --- /dev/null +++ b/Robust.Cdn/Helpers/BaseUrlManager.cs @@ -0,0 +1,22 @@ +namespace Robust.Cdn.Helpers; + +public sealed class BaseUrlManager(IConfiguration configuration) +{ + public string MakeBuildInfoUrl(string path) + { + var baseUrl = configuration[ConfigurationKeys.KeyBaseUrl]; + + return baseUrl + path; + } + + public void ValidateBaseUrl() + { + var baseUrl = configuration[ConfigurationKeys.KeyBaseUrl]; + + if (baseUrl is null) + throw new InvalidOperationException("BaseUrl is not set!"); + + if (!baseUrl.EndsWith('/')) + throw new InvalidOperationException("BaseUrl must end with '/'!"); + } +} diff --git a/Robust.Cdn/Helpers/BufferHelpers.cs b/Robust.Cdn/Helpers/BufferHelpers.cs index 83b31de..9825aa4 100644 --- a/Robust.Cdn/Helpers/BufferHelpers.cs +++ b/Robust.Cdn/Helpers/BufferHelpers.cs @@ -20,4 +20,9 @@ public static void EnsurePooledBuffer(ref T[] buf, ArrayPool pool, int min pool.Return(buf); buf = pool.Rent(minimumLength); } + + public static Span AsSpan(this MemoryStream stream) + { + return stream.GetBuffer().AsSpan(0, (int)stream.Length); + } } diff --git a/Robust.Cdn/Helpers/ByteHelpers.cs b/Robust.Cdn/Helpers/ByteHelpers.cs new file mode 100644 index 0000000..705bc87 --- /dev/null +++ b/Robust.Cdn/Helpers/ByteHelpers.cs @@ -0,0 +1,29 @@ +namespace Robust.Cdn.Helpers; + +public static class ByteHelpers +{ + public static string FormatBytes(long bytes) + { + double d = bytes; + var i = 0; + for (; i < ByteSuffixes.Length && d >= 1024; i++) + { + d /= 1024; + } + + return $"{Math.Round(d, 2)} {ByteSuffixes[i]}"; + } + + private static readonly string[] ByteSuffixes = + [ + "B", + "KiB", + "MiB", + "GiB", + "TiB", + "PiB", + "EiB", + "ZiB", + "YiB" + ]; +} diff --git a/Robust.Cdn/Helpers/ForkAuthHelper.cs b/Robust.Cdn/Helpers/ForkAuthHelper.cs new file mode 100644 index 0000000..44b09d3 --- /dev/null +++ b/Robust.Cdn/Helpers/ForkAuthHelper.cs @@ -0,0 +1,60 @@ +using System.Diagnostics.CodeAnalysis; +using System.Runtime.InteropServices; +using System.Security.Cryptography; +using Microsoft.AspNetCore.Mvc; +using Microsoft.Extensions.Options; +using Robust.Cdn.Config; + +namespace Robust.Cdn.Helpers; + +public sealed class ForkAuthHelper(IHttpContextAccessor accessor, IOptions options) +{ + public bool IsAuthValid( + string fork, + [NotNullWhen(true)] out ManifestForkOptions? forkConfig, + [NotNullWhen(false)] out IActionResult? failureResult) + { + if (!options.Value.Forks.TryGetValue(fork, out forkConfig)) + { + failureResult = new NotFoundResult(); + return false; + } + + var context = accessor.HttpContext ?? throw new InvalidOperationException("Unable to get HttpContext"); + + var authHeader = context.Request.Headers.Authorization; + if (authHeader.Count == 0) + { + failureResult = new UnauthorizedResult(); + return false; + } + + var auth = authHeader[0]; + + // Idk does using Bearer: make sense here? + if (auth == null || !auth.StartsWith("Bearer ")) + { + failureResult = new UnauthorizedObjectResult("Need Bearer: auth type"); + return false; + } + + var token = auth["Bearer ".Length..]; + + var matches = StringsEqual(token, forkConfig.UpdateToken); + if (!matches) + { + failureResult = new UnauthorizedObjectResult("Incorrect token"); + return false; + } + + failureResult = null; + return true; + } + + private static bool StringsEqual(ReadOnlySpan a, ReadOnlySpan b) + { + return CryptographicOperations.FixedTimeEquals( + MemoryMarshal.AsBytes(a), + MemoryMarshal.AsBytes(b)); + } +} diff --git a/Robust.Cdn/Helpers/HashHelper.cs b/Robust.Cdn/Helpers/HashHelper.cs new file mode 100644 index 0000000..8404f4d --- /dev/null +++ b/Robust.Cdn/Helpers/HashHelper.cs @@ -0,0 +1,41 @@ +using System.Buffers; +using System.Runtime.InteropServices; +using SpaceWizards.Sodium; + +namespace Robust.Cdn.Helpers; + +internal static class HashHelper +{ + public static unsafe byte[] HashBlake2B(Stream stream) + { + var statePointer = NativeMemory.AlignedAlloc((UIntPtr)sizeof(CryptoGenericHashBlake2B.State), 64); + ref var state = ref *(CryptoGenericHashBlake2B.State*)statePointer; + + var buffer = ArrayPool.Shared.Rent(4096); + + var result = new byte[CryptoGenericHashBlake2B.Bytes]; + + try + { + CryptoGenericHashBlake2B.Init(ref state, ReadOnlySpan.Empty, result.Length); + + while (true) + { + var readAmount = stream.Read(buffer); + if (readAmount == 0) + break; + + var readData = buffer.AsSpan(0, readAmount); + CryptoGenericHashBlake2B.Update(ref state, readData); + } + + CryptoGenericHashBlake2B.Final(ref state, result); + return result; + } + finally + { + NativeMemory.AlignedFree(statePointer); + ArrayPool.Shared.Return(buffer); + } + } +} diff --git a/Robust.Cdn/Helpers/ZStd.cs b/Robust.Cdn/Helpers/ZStd.cs deleted file mode 100644 index 6037059..0000000 --- a/Robust.Cdn/Helpers/ZStd.cs +++ /dev/null @@ -1,543 +0,0 @@ -using System.Buffers; -using System.Runtime.InteropServices; -using System.Runtime.Serialization; -using SharpZstd.Interop; -using static SharpZstd.Interop.Zstd; - -namespace Robust.Cdn.Helpers; - -public static class ZStd -{ - public static int CompressBound(int length) - { - return (int)ZSTD_COMPRESSBOUND((nuint)length); - } - - public static unsafe int Compress( - Span into, - ReadOnlySpan data, - int compressionLevel = ZSTD_CLEVEL_DEFAULT) - { - fixed (byte* dst = into) - fixed (byte* src = data) - { - var result = ZSTD_compress(dst, (nuint)into.Length, src, (nuint)data.Length, compressionLevel); - ZStdException.ThrowIfError(result); - return (int)result; - } - } -} - -[Serializable] -internal sealed class ZStdException : Exception -{ - public ZStdException() - { - } - - public ZStdException(string message) : base(message) - { - } - - public ZStdException(string message, Exception inner) : base(message, inner) - { - } - - public static unsafe ZStdException FromCode(nuint code) - { - return new ZStdException(Marshal.PtrToStringUTF8((IntPtr)ZSTD_getErrorName(code))!); - } - - public static void ThrowIfError(nuint code) - { - if (ZSTD_isError(code) != 0) - throw FromCode(code); - } -} - -public sealed unsafe class ZStdCompressionContext : IDisposable -{ - public ZSTD_CCtx* Context { get; private set; } - - private bool Disposed => Context == null; - - public ZStdCompressionContext() - { - Context = ZSTD_createCCtx(); - } - - public void SetParameter(ZSTD_cParameter parameter, int value) - { - CheckDisposed(); - - ZSTD_CCtx_setParameter(Context, parameter, value); - } - - public int Compress(Span destination, Span source, int compressionLevel = ZSTD_CLEVEL_DEFAULT) - { - CheckDisposed(); - - fixed (byte* dst = destination) - fixed (byte* src = source) - { - var ret = ZSTD_compressCCtx( - Context, - dst, (nuint)destination.Length, - src, (nuint)source.Length, - compressionLevel); - - ZStdException.ThrowIfError(ret); - return (int)ret; - } - } - - ~ZStdCompressionContext() - { - Dispose(); - } - - public void Dispose() - { - if (Disposed) - return; - - ZSTD_freeCCtx(Context); - Context = null; - GC.SuppressFinalize(this); - } - - private void CheckDisposed() - { - if (Disposed) - throw new ObjectDisposedException(nameof(ZStdCompressionContext)); - } -} - -internal sealed class ZStdDecompressStream : Stream -{ - private readonly Stream _baseStream; - private readonly bool _ownStream; - private readonly unsafe ZSTD_DCtx* _ctx; - private readonly byte[] _buffer; - private int _bufferPos; - private int _bufferSize; - private bool _disposed; - - public unsafe ZStdDecompressStream(Stream baseStream, bool ownStream = true) - { - _baseStream = baseStream; - _ownStream = ownStream; - _ctx = ZSTD_createDCtx(); - _buffer = ArrayPool.Shared.Rent((int)ZSTD_DStreamInSize()); - } - - protected override unsafe void Dispose(bool disposing) - { - if (_disposed) - return; - - _disposed = true; - ZSTD_freeDCtx(_ctx); - - if (disposing) - { - if (_ownStream) - _baseStream.Dispose(); - - ArrayPool.Shared.Return(_buffer); - } - } - - public override void Flush() - { - ThrowIfDisposed(); - _baseStream.Flush(); - } - - public override int Read(byte[] buffer, int offset, int count) - { - return Read(buffer.AsSpan(offset, count)); - } - - public override int ReadByte() - { - Span buf = stackalloc byte[1]; - return Read(buf) == 0 ? -1 : buf[0]; - } - - public override unsafe int Read(Span buffer) - { - ThrowIfDisposed(); - do - { - if (_bufferSize == 0 || _bufferPos == _bufferSize) - { - _bufferPos = 0; - _bufferSize = _baseStream.Read(_buffer); - - if (_bufferSize == 0) - return 0; - } - - fixed (byte* inputPtr = _buffer) - fixed (byte* outputPtr = buffer) - { - var outputBuf = new ZSTD_outBuffer { dst = outputPtr, pos = 0, size = (nuint)buffer.Length }; - var inputBuf = new ZSTD_inBuffer { src = inputPtr, pos = (nuint)_bufferPos, size = (nuint)_bufferSize }; - var ret = ZSTD_decompressStream(_ctx, &outputBuf, &inputBuf); - - _bufferPos = (int)inputBuf.pos; - ZStdException.ThrowIfError(ret); - - if (outputBuf.pos > 0) - return (int)outputBuf.pos; - } - } while (true); - } - - public override async ValueTask ReadAsync( - Memory buffer, - CancellationToken cancellationToken = default) - { - ThrowIfDisposed(); - do - { - if (_bufferSize == 0 || _bufferPos == _bufferSize) - { - _bufferPos = 0; - _bufferSize = await _baseStream.ReadAsync(_buffer, cancellationToken); - - if (_bufferSize == 0) - return 0; - } - - var outputPos = DecompressChunk(); - if (outputPos > 0) - return outputPos; - } while (true); - - unsafe int DecompressChunk() - { - fixed (byte* inputPtr = _buffer) - fixed (byte* outputPtr = buffer.Span) - { - ZSTD_outBuffer outputBuf = default; - outputBuf.dst = outputPtr; - outputBuf.pos = 0; - outputBuf.size = (nuint)buffer.Length; - ZSTD_inBuffer inputBuf = default; - inputBuf.src = inputPtr; - inputBuf.pos = (nuint)_bufferPos; - inputBuf.size = (nuint)_bufferSize; - - var ret = ZSTD_decompressStream(_ctx, &outputBuf, &inputBuf); - - _bufferPos = (int)inputBuf.pos; - ZStdException.ThrowIfError(ret); - - return (int)outputBuf.pos; - } - } - } - - public override long Seek(long offset, SeekOrigin origin) - { - throw new NotSupportedException(); - } - - public override void SetLength(long value) - { - throw new NotSupportedException(); - } - - public override void Write(byte[] buffer, int offset, int count) - { - throw new NotSupportedException(); - } - - public override bool CanRead => true; - public override bool CanSeek => false; - public override bool CanWrite => false; - public override long Length => throw new NotSupportedException(); - - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } - - private void ThrowIfDisposed() - { - if (_disposed) - throw new ObjectDisposedException(nameof(ZStdDecompressStream)); - } -} - -internal sealed class ZStdCompressStream : Stream -{ - private readonly Stream _baseStream; - private readonly bool _ownStream; - public ZStdCompressionContext Context { get; } - private readonly byte[] _buffer; - private int _bufferPos; - private bool _disposed; - private bool _hasSession; - - public ZStdCompressStream(Stream baseStream, bool ownStream = true) - { - Context = new ZStdCompressionContext(); - _baseStream = baseStream; - _ownStream = ownStream; - _buffer = ArrayPool.Shared.Rent((int)ZSTD_CStreamOutSize()); - } - - public override void Flush() - { - FlushInternal(ZSTD_EndDirective.ZSTD_e_flush); - } - - public override async Task FlushAsync(CancellationToken cancellationToken) - { - await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken); - } - - public void FlushEnd() - { - _hasSession = false; - FlushInternal(ZSTD_EndDirective.ZSTD_e_end); - } - - public async ValueTask FlushEndAsync() - { - _hasSession = false; - await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_end); - } - - private unsafe void FlushInternal(ZSTD_EndDirective directive) - { - fixed (byte* outPtr = _buffer) - { - ZSTD_outBuffer outBuf = default; - outBuf.size = (nuint)_buffer.Length; - outBuf.pos = (nuint)_bufferPos; - outBuf.dst = outPtr; - - ZSTD_inBuffer inBuf; - - while (true) - { - var err = ZSTD_compressStream2(Context.Context, &outBuf, &inBuf, directive); - ZStdException.ThrowIfError(err); - _bufferPos = (int)outBuf.pos; - - _baseStream.Write(_buffer.AsSpan(0, (int)outBuf.pos)); - _bufferPos = 0; - outBuf.pos = 0; - - if (err == 0) - break; - } - } - - _baseStream.Flush(); - } - - private async ValueTask FlushInternalAsync(ZSTD_EndDirective directive, CancellationToken cancel = default) - { - var outBufPos = (nuint)_bufferPos; - - while (true) - { - var err = FlushChunk(); - - await _baseStream.WriteAsync(_buffer.AsMemory(0, (int)outBufPos), cancel); - _bufferPos = 0; - outBufPos = 0; - - if (err == 0) - break; - } - - await _baseStream.FlushAsync(cancel); - - unsafe nuint FlushChunk() - { - fixed (byte* outPtr = _buffer) - { - ZSTD_outBuffer outBuf = default; - outBuf.size = (nuint)_buffer.Length; - outBuf.pos = outBufPos; - outBuf.dst = outPtr; - - ZSTD_inBuffer inBuf; - - var err = ZSTD_compressStream2(Context.Context, &outBuf, &inBuf, directive); - ZStdException.ThrowIfError(err); - outBufPos = outBuf.pos; - _bufferPos = (int)outBuf.pos; - - return err; - } - } - } - - public override int Read(byte[] buffer, int offset, int count) - { - throw new NotSupportedException(); - } - - public override long Seek(long offset, SeekOrigin origin) - { - throw new NotSupportedException(); - } - - public override void SetLength(long value) - { - throw new NotSupportedException(); - } - - public override void Write(byte[] buffer, int offset, int count) - { - Write(buffer.AsSpan(offset, count)); - } - - public override unsafe void Write(ReadOnlySpan buffer) - { - ThrowIfDisposed(); - - _hasSession = true; - - fixed (byte* outPtr = _buffer) - fixed (byte* inPtr = buffer) - { - ZSTD_outBuffer outBuf = default; - outBuf.size = (nuint)_buffer.Length; - outBuf.pos = (nuint)_bufferPos; - outBuf.dst = outPtr; - - ZSTD_inBuffer inBuf = default; - inBuf.pos = 0; - inBuf.size = (nuint)buffer.Length; - inBuf.src = inPtr; - - while (true) - { - var err = ZSTD_compressStream2(Context.Context, &outBuf, &inBuf, ZSTD_EndDirective.ZSTD_e_continue); - ZStdException.ThrowIfError(err); - _bufferPos = (int)outBuf.pos; - - if (inBuf.pos >= inBuf.size) - break; - - // Not all input data consumed. Flush output buffer and continue. - _baseStream.Write(_buffer.AsSpan(0, (int)outBuf.pos)); - _bufferPos = 0; - outBuf.pos = 0; - } - } - } - - public override async ValueTask WriteAsync( - ReadOnlyMemory buffer, - CancellationToken cancellationToken = default) - { - ThrowIfDisposed(); - - _hasSession = true; - - var inBufPos = (nuint)0; - var inBufSize = (nuint)buffer.Length; - var outBufPos = (nuint)_bufferPos; - - while (true) - { - CompressChunk(); - - if (inBufPos >= inBufSize) - break; - - // Not all input data consumed. Flush output buffer and continue. - await _baseStream.WriteAsync(_buffer.AsMemory(0, (int)outBufPos), cancellationToken); - _bufferPos = 0; - outBufPos = 0; - } - - unsafe void CompressChunk() - { - fixed (byte* outPtr = _buffer) - fixed (byte* inPtr = buffer.Span) - { - ZSTD_outBuffer outBuf = default; - outBuf.size = (nuint)_buffer.Length; - outBuf.pos = outBufPos; - - ZSTD_inBuffer inBuf = default; - inBuf.pos = inBufPos; - inBuf.size = inBufSize; - - outBuf.dst = outPtr; - inBuf.src = inPtr; - - var err = ZSTD_compressStream2(Context.Context, &outBuf, &inBuf, ZSTD_EndDirective.ZSTD_e_continue); - ZStdException.ThrowIfError(err); - _bufferPos = (int)outBuf.pos; - outBufPos = outBuf.pos; - inBufPos = inBuf.pos; - } - } - } - - public override bool CanRead => false; - public override bool CanSeek => false; - public override bool CanWrite => true; - public override long Length => throw new NotSupportedException(); - - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } - - protected override void Dispose(bool disposing) - { - if (_disposed) - return; - - if (disposing) - { - if (_hasSession) - FlushEnd(); - - if (_ownStream) - _baseStream.Dispose(); - - ArrayPool.Shared.Return(_buffer); - Context.Dispose(); - } - - _disposed = true; - } - - public override async ValueTask DisposeAsync() - { - if (_disposed) - return; - - _disposed = true; - - if (_hasSession) - await FlushEndAsync(); - - if (_ownStream) - await _baseStream.DisposeAsync(); - - ArrayPool.Shared.Return(_buffer); - Context.Dispose(); - } - - private void ThrowIfDisposed() - { - if (_disposed) - throw new ObjectDisposedException(nameof(ZStdCompressStream)); - } -} diff --git a/Robust.Cdn/Services/DataLoader.cs b/Robust.Cdn/Jobs/IngestNewCdnContentJob.cs similarity index 68% rename from Robust.Cdn/Services/DataLoader.cs rename to Robust.Cdn/Jobs/IngestNewCdnContentJob.cs index d669cbb..9b38cbb 100644 --- a/Robust.Cdn/Services/DataLoader.cs +++ b/Robust.Cdn/Jobs/IngestNewCdnContentJob.cs @@ -1,79 +1,94 @@ using System.Buffers; using System.IO.Compression; using System.Text; -using System.Threading.Channels; using Dapper; using Microsoft.Data.Sqlite; using Microsoft.Extensions.Options; +using Quartz; +using Robust.Cdn.Config; using Robust.Cdn.Helpers; +using Robust.Cdn.Lib; using SpaceWizards.Sodium; using SQLitePCL; -namespace Robust.Cdn.Services; +namespace Robust.Cdn.Jobs; -public sealed class DataLoader : BackgroundService +[DisallowConcurrentExecution] +public sealed class IngestNewCdnContentJob( + Database cdnDatabase, + IOptions cdnOptions, + IOptions manifestOptions, + ISchedulerFactory schedulerFactory, + BuildDirectoryManager buildDirectoryManager, + ILogger logger) : IJob { - private readonly IServiceScopeFactory _scopeFactory; - private readonly IOptions _options; - private readonly ILogger _logger; + public static readonly JobKey Key = new(nameof(IngestNewCdnContentJob)); + public const string KeyForkName = "ForkName"; - private readonly ChannelReader _channelReader; - private readonly ChannelWriter _channelWriter; - - public DataLoader(IServiceScopeFactory scopeFactory, IOptions options, ILogger logger) + public static JobDataMap Data(string fork) => new() { - _scopeFactory = scopeFactory; - _options = options; - _logger = logger; - - var channel = Channel.CreateBounded(new BoundedChannelOptions(1) { SingleReader = true }); - _channelReader = channel.Reader; - _channelWriter = channel.Writer; - } + { KeyForkName, fork } + }; - public async ValueTask QueueUpdateVersions() + public async Task Execute(IJobExecutionContext context) { - await _channelWriter.WriteAsync(null); - } + var fork = context.MergedJobDataMap.GetString(KeyForkName) ?? throw new InvalidDataException(); - protected override async Task ExecuteAsync(CancellationToken stoppingToken) - { - _channelWriter.TryWrite(null); + logger.LogInformation("Ingesting new versions for fork: {Fork}", fork); + + var forkConfig = manifestOptions.Value.Forks[fork]; - // Idk if there's a better way but make sure we don't hold up startup. - await Task.Delay(1000, stoppingToken); + var connection = cdnDatabase.Connection; + var transaction = connection.BeginTransaction(); - while (true) + List newVersions; + try { - await _channelReader.ReadAsync(stoppingToken); + newVersions = FindNewVersions(fork, connection); - _logger.LogInformation("Updating versions"); + if (newVersions.Count == 0) + return; - try - { - Update(stoppingToken); - } - catch (Exception e) - { - _logger.LogError(e, "Error while loading new content versions"); - } + IngestNewVersions( + fork, + connection, + newVersions, + ref transaction, + forkConfig, + context.CancellationToken); + + logger.LogDebug("Committing database"); + + transaction.Commit(); + } + finally + { + transaction.Dispose(); } - // ReSharper disable once FunctionNeverReturns + await QueueManifestAvailable(fork, newVersions); } - private void Update(CancellationToken cancel) + private async Task QueueManifestAvailable(string fork, IEnumerable newVersions) { - using var scope = _scopeFactory.CreateScope(); - - var options = _options.Value; - var connection = scope.ServiceProvider.GetRequiredService().Connection; - var transaction = connection.BeginTransaction(); + var scheduler = await schedulerFactory.GetScheduler(); + await scheduler.TriggerJob( + MakeNewManifestVersionsAvailableJob.Key, + MakeNewManifestVersionsAvailableJob.Data(fork, newVersions)); + } - var newVersions = FindNewVersions(connection); + private void IngestNewVersions( + string fork, + SqliteConnection connection, + List newVersions, + ref SqliteTransaction transaction, + ManifestForkOptions forkConfig, + CancellationToken cancel) + { + var cdnOpts = cdnOptions.Value; + var manifestOpts = manifestOptions.Value; - if (newVersions.Count == 0) - return; + var forkId = EnsureForkCreated(fork, connection); using var stmtLookupContent = connection.Handle!.Prepare("SELECT Id FROM Content WHERE Hash = ?"); using var stmtInsertContent = connection.Handle!.Prepare( @@ -100,7 +115,7 @@ private void Update(CancellationToken cancel) { if (versionIdx % 5 == 0) { - _logger.LogDebug("Doing interim commit"); + logger.LogDebug("Doing interim commit"); blob?.Dispose(); blob = null; @@ -111,17 +126,20 @@ private void Update(CancellationToken cancel) cancel.ThrowIfCancellationRequested(); - _logger.LogInformation("Ingesting new version: {Version}", version); + logger.LogInformation("Ingesting new version: {Version}", version); var versionId = connection.ExecuteScalar( - "INSERT INTO ContentVersion (Version, TimeAdded, ManifestHash, ManifestData, CountDistinctBlobs) " + - "VALUES (@Version, datetime('now'), zeroblob(0), zeroblob(0), 0) " + + "INSERT INTO ContentVersion (ForkId, Version, TimeAdded, ManifestHash, ManifestData, CountDistinctBlobs) " + + "VALUES (@ForkId, @Version, datetime('now'), zeroblob(0), zeroblob(0), 0) " + "RETURNING Id", - new { Version = version }); + new { Version = version, ForkId = forkId }); stmtInsertContentManifestEntry.BindInt64(1, versionId); - var zipFilePath = Path.Combine(options.VersionDiskPath, version, options.ClientZipName); + var zipFilePath = buildDirectoryManager.GetBuildVersionFilePath( + fork, + version, + forkConfig.ClientZipName + ".zip"); using var zipFile = ZipFile.OpenRead(zipFilePath); @@ -169,7 +187,7 @@ private void Update(CancellationToken cancel) var compression = ContentCompression.None; // Try compression maybe. - if (options.BlobCompress) + if (cdnOpts.BlobCompress) { BufferHelpers.EnsurePooledBuffer( ref compressBuffer, @@ -179,9 +197,9 @@ private void Update(CancellationToken cancel) var compressedLength = compressor.Compress( compressBuffer, readData, - options.BlobCompressLevel); + cdnOpts.BlobCompressLevel); - if (compressedLength + options.BlobCompressSavingsThreshold < dataLength) + if (compressedLength + cdnOpts.BlobCompressSavingsThreshold < dataLength) { compression = ContentCompression.ZStd; writeData = compressBuffer.AsSpan(0, compressedLength); @@ -246,7 +264,7 @@ private void Update(CancellationToken cancel) idx += 1; } - _logger.LogDebug("Ingested {NewBlobCount} new blobs", newBlobCount); + logger.LogDebug("Ingested {NewBlobCount} new blobs", newBlobCount); // Handle manifest hashing and compression. { @@ -257,7 +275,7 @@ private void Update(CancellationToken cancel) var manifestHash = CryptoGenericHashBlake2B.Hash(32, manifestData, ReadOnlySpan.Empty); - _logger.LogDebug("New manifest hash: {ManifestHash}", Convert.ToHexString(manifestHash)); + logger.LogDebug("New manifest hash: {ManifestHash}", Convert.ToHexString(manifestHash)); BufferHelpers.EnsurePooledBuffer( ref compressBuffer, @@ -267,7 +285,7 @@ private void Update(CancellationToken cancel) var compressedLength = compressor.Compress( compressBuffer, manifestData, - options.ManifestCompressLevel); + cdnOpts.ManifestCompressLevel); var compressedData = compressBuffer.AsSpan(0, compressedLength); @@ -313,26 +331,25 @@ private void Update(CancellationToken cancel) ArrayPool.Shared.Return(readBuffer); ArrayPool.Shared.Return(compressBuffer); } - - _logger.LogDebug("Committing database"); - - transaction.Commit(); - - GC.Collect(); } - private List FindNewVersions(SqliteConnection con) + private List FindNewVersions(string fork, SqliteConnection con) { using var stmtCheckVersion = con.Handle!.Prepare("SELECT 1 FROM ContentVersion WHERE Version = ?"); var newVersions = new List<(string, DateTime)>(); - foreach (var versionDirectory in Directory.EnumerateDirectories(_options.Value.VersionDiskPath)) + var dir = buildDirectoryManager.GetForkPath(fork); + if (!Directory.Exists(dir)) + return []; + + foreach (var versionDirectory in Directory.EnumerateDirectories(dir)) { var createdTime = Directory.GetLastWriteTime(versionDirectory); var version = Path.GetFileName(versionDirectory); - _logger.LogTrace("Found version directory: {VersionDir}, write time: {WriteTime}", versionDirectory, createdTime); + logger.LogTrace("Found version directory: {VersionDir}, write time: {WriteTime}", versionDirectory, + createdTime); stmtCheckVersion.Reset(); stmtCheckVersion.BindString(1, version); @@ -340,20 +357,35 @@ private List FindNewVersions(SqliteConnection con) if (stmtCheckVersion.Step() == raw.SQLITE_ROW) { // Already have version, skip. - _logger.LogTrace("Already have version: {Version}", version); + logger.LogTrace("Already have version: {Version}", version); continue; } - if (!File.Exists(Path.Combine(versionDirectory, _options.Value.ClientZipName))) + var clientZipName = manifestOptions.Value.Forks[fork].ClientZipName + ".zip"; + + if (!File.Exists(Path.Combine(versionDirectory, clientZipName))) { - _logger.LogWarning("On-disk version is missing client zip: {Version}", version); + logger.LogWarning("On-disk version is missing client zip: {Version}", version); continue; } newVersions.Add((version, createdTime)); - _logger.LogTrace("Found new version: {Version}", version); + logger.LogTrace("Found new version: {Version}", version); } return newVersions.OrderByDescending(x => x.Item2).Select(x => x.Item1).ToList(); } + + private static int EnsureForkCreated(string fork, SqliteConnection connection) + { + var id = connection.QuerySingleOrDefault( + "SELECT Id FROM Fork WHERE Name = @Name", + new { Name = fork }); + + id ??= connection.QuerySingle( + "INSERT INTO Fork (Name) VALUES (@Name) RETURNING Id", + new { Name = fork }); + + return id.Value; + } } diff --git a/Robust.Cdn/Jobs/MakeNewManifestVersionsAvailableJob.cs b/Robust.Cdn/Jobs/MakeNewManifestVersionsAvailableJob.cs new file mode 100644 index 0000000..3463d82 --- /dev/null +++ b/Robust.Cdn/Jobs/MakeNewManifestVersionsAvailableJob.cs @@ -0,0 +1,74 @@ +using System.Text.Json; +using Dapper; +using Quartz; +using Robust.Cdn.Helpers; + +namespace Robust.Cdn.Jobs; + +public sealed class MakeNewManifestVersionsAvailableJob( + ManifestDatabase database, + ISchedulerFactory factory, + ILogger logger) : IJob +{ + private static readonly JsonSerializerOptions ManifestCacheContext = new() + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }; + + public static readonly JobKey Key = new(nameof(MakeNewManifestVersionsAvailableJob)); + + public const string KeyForkName = "ForkName"; + public const string KeyVersions = "Versions"; + + public static JobDataMap Data(string fork, IEnumerable versions) => new() + { + { KeyForkName, fork }, + { KeyVersions, versions.ToArray() }, + }; + + public async Task Execute(IJobExecutionContext context) + { + var fork = context.MergedJobDataMap.GetString(KeyForkName) ?? throw new InvalidDataException(); + var versions = (string[])context.MergedJobDataMap.Get(KeyVersions) ?? throw new InvalidDataException(); + + logger.LogInformation( + "Updating version availability for manifest fork {Fork}, {VersionCount} new versions", + fork, + versions.Length); + + using var tx = database.Connection.BeginTransaction(); + + var forkId = database.Connection.QuerySingle( + "SELECT Id FROM Fork WHERE Name = @ForkName", + new { ForkName = fork }); + + MakeVersionsAvailable(forkId, versions); + + tx.Commit(); + + var scheduler = await factory.GetScheduler(); + await scheduler.TriggerJob( + UpdateForkManifestJob.Key, + UpdateForkManifestJob.Data(fork, notifyUpdate: true)); + } + + private void MakeVersionsAvailable(int forkId, IEnumerable versions) + { + foreach (var version in versions) + { + logger.LogInformation("New available version: {Version}", version); + + database.Connection.Execute(""" + UPDATE ForkVersion + SET Available = TRUE + WHERE Name = @Name + AND ForkId = @ForkId + """, + new + { + Name = version, + ForkId = forkId + }); + } + } +} diff --git a/Robust.Cdn/Jobs/NotifyWatchdogUpdateJob.cs b/Robust.Cdn/Jobs/NotifyWatchdogUpdateJob.cs new file mode 100644 index 0000000..70227ab --- /dev/null +++ b/Robust.Cdn/Jobs/NotifyWatchdogUpdateJob.cs @@ -0,0 +1,94 @@ +using System.Net.Http.Headers; +using System.Text; +using Microsoft.Extensions.Options; +using Quartz; +using Robust.Cdn.Config; + +namespace Robust.Cdn.Jobs; + +/// +/// Job responsible for notifying SS14.Watchdog instances that a new update is available. +/// +/// +/// This job is triggered by . +/// +public sealed class NotifyWatchdogUpdateJob( + IHttpClientFactory httpClientFactory, + ILogger logger, + IOptions manifestOptions) : IJob +{ + public static readonly JobKey Key = new(nameof(NotifyWatchdogUpdateJob)); + + public const string KeyForkName = "ForkName"; + + public const string HttpClientName = "NotifyWatchdogUpdateJob"; + + public static JobDataMap Data(string fork) => new() + { + { KeyForkName, fork }, + }; + + public async Task Execute(IJobExecutionContext context) + { + var fork = context.MergedJobDataMap.GetString(KeyForkName) ?? throw new InvalidDataException(); + var config = manifestOptions.Value.Forks[fork]; + + if (config.NotifyWatchdogs.Length == 0) + return; + + logger.LogInformation("Notifying watchdogs of update for fork {Fork}", fork); + + var httpClient = httpClientFactory.CreateClient(HttpClientName); + + await Task.WhenAll( + config.NotifyWatchdogs.Select(notify => SendNotify(notify, httpClient, context.CancellationToken))); + } + + private async Task SendNotify( + ManifestForkNotifyWatchdog watchdog, + HttpClient client, + CancellationToken cancel) + { + logger.LogDebug( + "Sending watchdog update notify to {WatchdogUrl} instance {Instance}", + watchdog.WatchdogUrl, + watchdog.Instance); + + var url = NormalizeTrailingSlash(watchdog.WatchdogUrl) + $"instances/{watchdog.Instance}/update"; + var request = new HttpRequestMessage(HttpMethod.Post, url); + request.Headers.Authorization = new AuthenticationHeaderValue( + "Basic", + FormatBasicAuth(watchdog.Instance, watchdog.ApiToken)); + + try + { + using var response = await client.SendAsync(request, cancel); + + if (!response.IsSuccessStatusCode) + { + var responseContent = await response.Content.ReadAsStringAsync(cancel); + logger.LogWarning( + "Update notify to {WatchdogUrl} instance {Instance} did not indicate success ({Status}): {ResponseContent}", + watchdog.WatchdogUrl, watchdog.Instance, response.StatusCode, responseContent); + } + } + catch (Exception e) + { + logger.LogWarning( + e, + "Error while notifying watchdog {WatchdogUrl} instance {Instance} of update", + watchdog.WatchdogUrl, + watchdog.Instance); + } + } + + private static string NormalizeTrailingSlash(string url) + { + return url.EndsWith('/') ? url : url + '/'; + } + + private static string FormatBasicAuth(string user, string password) + { + return Convert.ToBase64String(Encoding.UTF8.GetBytes($"{user}:{password}")); + } +} diff --git a/Robust.Cdn/Jobs/PruneOldManifestBuilds.cs b/Robust.Cdn/Jobs/PruneOldManifestBuilds.cs new file mode 100644 index 0000000..0ba4e06 --- /dev/null +++ b/Robust.Cdn/Jobs/PruneOldManifestBuilds.cs @@ -0,0 +1,99 @@ +using Dapper; +using Microsoft.Extensions.Options; +using Quartz; +using Robust.Cdn.Config; + +namespace Robust.Cdn.Jobs; + +/// +/// Job that periodically goes through and deletes old manifest builds. +/// +/// +/// This job gets ran every 24 hours automatically. +/// +/// +public sealed class PruneOldManifestBuilds( + ManifestDatabase manifestDatabase, + IOptions options, + BuildDirectoryManager buildDirectoryManager, + ISchedulerFactory schedulerFactory, + ILogger logger) : IJob +{ + public async Task Execute(IJobExecutionContext context) + { + var opts = options.Value; + + logger.LogInformation("Pruning old manifest builds"); + + var totalPruned = 0; + var scheduler = await schedulerFactory.GetScheduler(); + + foreach (var (forkName, forkConfig) in opts.Forks) + { + context.CancellationToken.ThrowIfCancellationRequested(); + + var forkPruned = PruneFork(forkName, forkConfig, context.CancellationToken); + totalPruned += forkPruned; + + if (forkPruned > 0) + { + await scheduler.TriggerJob( + UpdateForkManifestJob.Key, + UpdateForkManifestJob.Data(forkName)); + } + } + + logger.LogInformation("Pruned {Pruned} old manifest builds", totalPruned); + } + + private int PruneFork(string forkName, ManifestForkOptions forkConfig, CancellationToken cancel) + { + if (forkConfig.PruneBuildsDays == 0) + { + logger.LogDebug("Not pruning fork {Fork}: pruning is disabled", forkConfig.PruneBuildsDays); + return 0; + } + + logger.LogDebug("Pruning old manifest builds for fork {Fork}", forkName); + + var pruneFrom = DateTime.UtcNow - TimeSpan.FromDays(forkConfig.PruneBuildsDays); + + var builds = manifestDatabase.Connection.Query(""" + SELECT FV.Id, FV.Name + FROM ForkVersion FV, Fork + WHERE FV.ForkId = Fork.Id + AND Fork.Name = @ForkName + AND FV.PublishedTime < @PruneFrom + """, new { ForkName = forkName, PruneFrom = pruneFrom }); + + var total = 0; + foreach (var versionData in builds) + { + cancel.ThrowIfCancellationRequested(); + logger.LogDebug("Pruning fork version {Version}", versionData.Name); + + var directory = buildDirectoryManager.GetBuildVersionPath(forkName, versionData.Name); + + if (Directory.Exists(directory)) + { + Directory.Delete(directory, recursive: true); + logger.LogTrace("Version directory deleted: {Directory}", directory); + } + else + { + logger.LogTrace("Version directory didn't exist when cleaning it up ({Directory})", directory); + } + + manifestDatabase.Connection.Execute("DELETE FROM ForkVersion WHERE Id = @Id", versionData); + total += 1; + } + + return total; + } + + private sealed class VersionData + { + public required int Id { get; set; } + public required string Name { get; set; } + } +} diff --git a/Robust.Cdn/Jobs/UpdateForkManifestJob.cs b/Robust.Cdn/Jobs/UpdateForkManifestJob.cs new file mode 100644 index 0000000..0d90bed --- /dev/null +++ b/Robust.Cdn/Jobs/UpdateForkManifestJob.cs @@ -0,0 +1,140 @@ +using System.Text.Json; +using System.Text.Json.Serialization; +using Dapper; +using Quartz; +using Robust.Cdn.Helpers; + +namespace Robust.Cdn.Jobs; + +/// +/// Updates the cached server manifest for a fork. +/// +public sealed class UpdateForkManifestJob( + ManifestDatabase database, + BaseUrlManager baseUrlManager, + ISchedulerFactory schedulerFactory, + ILogger logger) : IJob +{ + private static readonly JsonSerializerOptions ManifestCacheContext = new() + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }; + + public static readonly JobKey Key = new(nameof(UpdateForkManifestJob)); + + public const string KeyForkName = "ForkName"; + public const string KeyNotifyUpdate = "NotifyUpdate"; + + public static JobDataMap Data(string fork, bool notifyUpdate = false) => new() + { + { KeyForkName, fork }, + { KeyNotifyUpdate, notifyUpdate } + }; + + public async Task Execute(IJobExecutionContext context) + { + var fork = context.MergedJobDataMap.GetString(KeyForkName) ?? throw new InvalidDataException(); + var notifyUpdate = context.MergedJobDataMap.GetBooleanValue(KeyNotifyUpdate); + + var forkId = database.Connection.QuerySingle( + "SELECT Id FROM Fork WHERE Name = @ForkName", + new { ForkName = fork }); + + logger.LogInformation("Updating manifest cache for fork {Fork}", fork); + + UpdateServerManifestCache(fork, forkId); + + if (notifyUpdate) + await QueueNotifyWatchdogUpdate(fork); + } + + private void UpdateServerManifestCache(string fork, int forkId) + { + var data = CollectManifestData(fork, forkId); + var bytes = JsonSerializer.SerializeToUtf8Bytes(data, ManifestCacheContext); + + database.Connection.Execute("UPDATE Fork SET ServerManifestCache = @Data WHERE Id = @ForkId", + new + { + Data = bytes, + ForkId = forkId + }); + } + + private ManifestData CollectManifestData(string fork, int forkId) + { + var data = new ManifestData { Builds = new Dictionary() }; + + var versions = database.Connection + .Query<(int id, string name, DateTime time, string clientFileName, byte[] clientSha256)>( + """ + SELECT Id, Name, PublishedTime, ClientFileName, ClientSha256 + FROM ForkVersion + WHERE Available AND ForkId = @ForkId + """, + new { ForkId = forkId }); + + foreach (var version in versions) + { + var buildData = new ManifestBuildData + { + Time = DateTime.SpecifyKind(version.time, DateTimeKind.Utc), + Client = new ManifestArtifact + { + Url = baseUrlManager.MakeBuildInfoUrl( + $"fork/{fork}/version/{version.name}/file/{version.clientFileName}"), + Sha256 = Convert.ToHexString(version.clientSha256) + }, + Server = new Dictionary() + }; + + var servers = database.Connection.Query<(string platform, string fileName, byte[] sha256, long? size)>(""" + SELECT Platform, FileName, Sha256, FileSize + FROM ForkVersionServerBuild + WHERE ForkVersionId = @ForkVersionId + """, new { ForkVersionId = version.id }); + + foreach (var (platform, fileName, sha256, size) in servers) + { + buildData.Server.Add(platform, new ManifestArtifact + { + Url = baseUrlManager.MakeBuildInfoUrl($"fork/{fork}/version/{version.name}/file/{fileName}"), + Sha256 = Convert.ToHexString(sha256), + Size = size + }); + } + + data.Builds.Add(version.name, buildData); + } + + return data; + } + + private async Task QueueNotifyWatchdogUpdate(string fork) + { + var scheduler = await schedulerFactory.GetScheduler(); + await scheduler.TriggerJob( + NotifyWatchdogUpdateJob.Key, + NotifyWatchdogUpdateJob.Data(fork)); + } + + private sealed class ManifestData + { + public required Dictionary Builds { get; set; } + } + + private sealed class ManifestBuildData + { + public DateTime Time { get; set; } + public required ManifestArtifact Client { get; set; } + public required Dictionary Server { get; set; } + } + + private sealed class ManifestArtifact + { + public required string Url { get; set; } + public required string Sha256 { get; set; } + [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)] + public long? Size { get; set; } + } +} diff --git a/Robust.Cdn/ManifestMigrations/Script0001_Init.sql b/Robust.Cdn/ManifestMigrations/Script0001_Init.sql new file mode 100644 index 0000000..3c0da88 --- /dev/null +++ b/Robust.Cdn/ManifestMigrations/Script0001_Init.sql @@ -0,0 +1,61 @@ +-- Stores a single fork managed by the CDN. +CREATE TABLE Fork( + Id INTEGER PRIMARY KEY, + -- Name as used in build and configuration files. + Name TEXT NOT NULL UNIQUE, + + -- A cache of the manifest.json content used by the watchdog. + -- Just contains raw JSON encoded data. + ServerManifestCache BLOB NULL +); + +-- A single stored version of a fork. +CREATE TABLE ForkVersion( + Id INTEGER PRIMARY KEY, + + -- The name of the version itself. + Name TEXT NOT NULL, + + -- The ID of the fork this version is on. + ForkId INTEGER NOT NULL REFERENCES Fork(Id) ON DELETE CASCADE, + + -- The time when this version was published. + PublishedTime DATETIME NOT NULL, + + -- The file name of the fork's client zip file in the version files. + ClientFileName TEXT NOT NULL, + -- SHA256 hash of the above file. + ClientSha256 BLOB NOT NULL, + + -- Not strictly necessary, but I'll save it here anyways. + EngineVersion TEXT NOT NULL, + + -- Whether this version is available for servers to download. + -- This is updated after CDN content ingestion finishes. + Available BOOLEAN DEFAULT(FALSE), + + -- Make sure version names are unique. + UNIQUE (ForkId, Name) +); + +-- A single stored server build for a fork version. +CREATE TABLE ForkVersionServerBuild( + Id INTEGER PRIMARY KEY, + + -- Version that this build is for. + ForkVersionId INTEGER NOT NULL REFERENCES ForkVersion(Id) ON DELETE CASCADE, + + -- The platform (.NET RID) for this server build. + Platform TEXT NOT NULL, + + -- The file name of the server build. + FileName TEXT NOT NULL, + -- SHA256 hash of the above file. + Sha256 BLOB NOT NULL, + + -- Can't have multiple builds on the same platform per version. + UNIQUE (ForkVersionId, Platform), + + -- Can't have multiple builds with the same file name per version. + UNIQUE (ForkVersionId, FileName) +); diff --git a/Robust.Cdn/ManifestMigrations/Script0002_AddBuildSize.sql b/Robust.Cdn/ManifestMigrations/Script0002_AddBuildSize.sql new file mode 100644 index 0000000..aace996 --- /dev/null +++ b/Robust.Cdn/ManifestMigrations/Script0002_AddBuildSize.sql @@ -0,0 +1 @@ +ALTER TABLE ForkVersionServerBuild ADD COLUMN FileSize INTEGER NULL; diff --git a/Robust.Cdn/Migrations/Script0002_DropRequestLog.sql b/Robust.Cdn/Migrations/Script0002_DropRequestLog.sql new file mode 100644 index 0000000..c7f16f8 --- /dev/null +++ b/Robust.Cdn/Migrations/Script0002_DropRequestLog.sql @@ -0,0 +1,3 @@ +DROP TABLE RequestLog; +DROP TABLE RequestLogBlob; + diff --git a/Robust.Cdn/Migrations/Script0003_AddFork.cs b/Robust.Cdn/Migrations/Script0003_AddFork.cs new file mode 100644 index 0000000..d88ffee --- /dev/null +++ b/Robust.Cdn/Migrations/Script0003_AddFork.cs @@ -0,0 +1,84 @@ +using Dapper; +using Microsoft.Data.Sqlite; +using Microsoft.Extensions.Options; +using Robust.Cdn.Config; + +namespace Robust.Cdn.Migrations; + +public sealed class Script0003_AddFork : Migrator.IMigrationScript +{ + public string Up(IServiceProvider services, SqliteConnection connection) + { + var options = services.GetRequiredService>().Value; + + connection.Execute(""" + CREATE TABLE Fork( + Id INTEGER PRIMARY KEY, + Name TEXT NOT NULL UNIQUE + ); + """); + + var defaultFork = options.DefaultFork; + // This default value of "0" is not used unless we insert it down below. + var defaultForkId = 0; + + if (connection.QuerySingle("SELECT COUNT(*) FROM ContentVersion") > 0) + { + if (defaultFork == null) + { + throw new InvalidOperationException( + "Database has existing versions stored, need to set DefaultFork in CdnOptions to enable migration."); + } + + defaultForkId = connection.QuerySingle(""" + INSERT INTO Fork (Name) + VALUES (@Name) + RETURNING Id + """, new { Name = defaultFork }); + } + + // Re-create tables to be able to add a "Fork" + connection.Execute(""" + DROP INDEX ContentManifestEntryContentId; + ALTER TABLE ContentManifestEntry RENAME TO _ContentManifestEntry; + ALTER TABLE ContentVersion RENAME TO _ContentVersion; + + CREATE TABLE ContentManifestEntry( + VersionId INTEGER REFERENCES ContentVersion(Id) ON DELETE CASCADE, + ManifestIdx INTEGER, + + ContentId REFERENCES Content(Id) ON DELETE RESTRICT, + + PRIMARY KEY (VersionId, ManifestIdx) + ) WITHOUT ROWID; + + CREATE INDEX ContentManifestEntryContentId ON ContentManifestEntry(ContentId); + + CREATE TABLE ContentVersion( + Id INTEGER PRIMARY KEY, + ForkId INTEGER NOT NULL REFERENCES Fork(Id) ON DELETE CASCADE, + Version TEXT NOT NULL, + TimeAdded DATETIME NOT NULL, + ManifestHash BLOB NOT NULL, + ManifestData BLOB NOT NULL, + CountDistinctBlobs INTEGER NOT NULL, + + UNIQUE (ForkId, Version) + ); + + -- Transfer data from old tables. + INSERT INTO ContentVersion + SELECT Id, @DefaultFork, Version, TimeAdded, ManifestHash, ManifestData, CountDistinctBlobs + FROM _ContentVersion; + + INSERT INTO ContentManifestEntry + SELECT VersionId, ManifestIdx, ContentId + FROM _ContentManifestEntry; + + DROP TABLE _ContentVersion; + DROP TABLE _ContentManifestEntry; + """, new {DefaultFork = defaultForkId}); + + return ""; + } +} diff --git a/Robust.Cdn/Migrator.cs b/Robust.Cdn/Migrator.cs index aac4855..133bbe8 100644 --- a/Robust.Cdn/Migrator.cs +++ b/Robust.Cdn/Migrator.cs @@ -9,7 +9,7 @@ namespace Robust.Cdn; /// public sealed class Migrator { - internal static bool Migrate(ILogger logger, SqliteConnection connection, string prefix) + internal static bool Migrate(IServiceProvider services, ILogger logger, SqliteConnection connection, string prefix) { logger.LogDebug("Migrating with prefix {Prefix}", prefix); @@ -38,9 +38,10 @@ Applied DATETIME NOT NULL try { - var code = script.Up(connection); + var code = script.Up(services, connection); - connection.Execute(code); + if (!string.IsNullOrWhiteSpace(code)) + connection.Execute(code); connection.Execute( "INSERT INTO SchemaVersions(ScriptName, Applied) VALUES (@Script, datetime('now'))", @@ -98,7 +99,7 @@ Applied DATETIME NOT NULL public interface IMigrationScript { - string Up(SqliteConnection connection); + string Up(IServiceProvider services, SqliteConnection connection); } private sealed class FileMigrationScript : IMigrationScript @@ -107,6 +108,6 @@ private sealed class FileMigrationScript : IMigrationScript public FileMigrationScript(string code) => _code = code; - public string Up(SqliteConnection connection) => _code; + public string Up(IServiceProvider services, SqliteConnection connection) => _code; } } diff --git a/Robust.Cdn/Program.cs b/Robust.Cdn/Program.cs index 810d53f..c31f20e 100644 --- a/Robust.Cdn/Program.cs +++ b/Robust.Cdn/Program.cs @@ -1,7 +1,12 @@ -using Dapper; +using System.Net.Http.Headers; using Microsoft.Data.Sqlite; using Microsoft.Extensions.Options; +using Quartz; using Robust.Cdn; +using Robust.Cdn.Config; +using Robust.Cdn.Controllers; +using Robust.Cdn.Helpers; +using Robust.Cdn.Jobs; using Robust.Cdn.Services; var builder = WebApplication.CreateBuilder(args); @@ -10,13 +15,48 @@ // Add services to the container. builder.Services.Configure(builder.Configuration.GetSection(CdnOptions.Position)); +builder.Services.Configure(builder.Configuration.GetSection(ManifestOptions.Position)); -builder.Services.AddControllers(); -builder.Services.AddSingleton(); -builder.Services.AddHostedService(services => services.GetRequiredService()); +builder.Services.AddControllersWithViews(); +builder.Services.AddScoped(); builder.Services.AddSingleton(); builder.Services.AddHostedService(services => services.GetRequiredService()); builder.Services.AddTransient(); +builder.Services.AddTransient(); +builder.Services.AddQuartz(q => +{ + q.AddJob(j => j.WithIdentity(IngestNewCdnContentJob.Key).StoreDurably()); + q.AddJob(j => + { + j.WithIdentity(MakeNewManifestVersionsAvailableJob.Key).StoreDurably(); + }); + q.AddJob(j => j.WithIdentity(NotifyWatchdogUpdateJob.Key).StoreDurably()); + q.AddJob(j => j.WithIdentity(UpdateForkManifestJob.Key).StoreDurably()); + q.ScheduleJob(trigger => trigger.WithSimpleSchedule(schedule => + { + schedule.RepeatForever().WithIntervalInHours(24); + })); +}); + +builder.Services.AddQuartzHostedService(q => +{ + q.WaitForJobsToComplete = true; +}); + +const string userAgent = "Robust.Cdn"; + +builder.Services.AddHttpClient(ForkPublishController.PublishFetchHttpClient, c => +{ + c.DefaultRequestHeaders.UserAgent.Add(new ProductInfoHeaderValue(userAgent, null)); +}); +builder.Services.AddHttpClient(NotifyWatchdogUpdateJob.HttpClientName, c => +{ + c.DefaultRequestHeaders.UserAgent.Add(new ProductInfoHeaderValue(userAgent, null)); +}); + +builder.Services.AddScoped(); +builder.Services.AddScoped(); +builder.Services.AddHttpContextAccessor(); /* // Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle @@ -34,28 +74,41 @@ var services = initScope.ServiceProvider; var logFactory = services.GetRequiredService(); var loggerStartup = logFactory.CreateLogger("Robust.Cdn.Program"); - var options = services.GetRequiredService>().Value; - var db = services.GetRequiredService().Connection; + var manifestOptions = services.GetRequiredService>().Value; + var db = services.GetRequiredService(); + var manifestDb = services.GetRequiredService(); - if (string.IsNullOrEmpty(options.VersionDiskPath)) + if (string.IsNullOrEmpty(manifestOptions.FileDiskPath)) { - loggerStartup.LogCritical("version disk path not set in configuration!"); - return; + loggerStartup.LogCritical("Manifest.FileDiskPath not set in configuration!"); + return 1; } - db.Open(); - - db.Execute("PRAGMA journal_mode=WAL"); + if (manifestOptions.Forks.Count == 0) + { + loggerStartup.LogCritical("No forks defined in Manifest configuration!"); + return 1; + } loggerStartup.LogDebug("Running migrations!"); var loggerMigrator = logFactory.CreateLogger(); - Migrator.Migrate(loggerMigrator, db, "Robust.Cdn.Migrations"); - loggerStartup.LogDebug("Done running migrations!"); + var success = Migrator.Migrate(services, loggerMigrator, db.Connection, "Robust.Cdn.Migrations"); + success &= Migrator.Migrate(services, loggerMigrator, manifestDb.Connection, "Robust.Cdn.ManifestMigrations"); + if (!success) + return 1; - Migrator.Migrate(loggerMigrator, db, "Robust.Cdn.Migrations"); loggerStartup.LogDebug("Done running migrations!"); + loggerStartup.LogDebug("Ensuring forks created in manifest DB"); + manifestDb.EnsureForksCreated(); + loggerStartup.LogDebug("Done creating forks in manifest DB!"); + + var scheduler = await initScope.ServiceProvider.GetRequiredService().GetScheduler(); + foreach (var fork in manifestOptions.Forks.Keys) + { + await scheduler.TriggerJob(IngestNewCdnContentJob.Key, IngestNewCdnContentJob.Data(fork)); + } } /* // Configure the HTTP request pipeline. @@ -72,4 +125,6 @@ app.MapControllers(); -app.Run(); +await app.RunAsync(); + +return 0; diff --git a/Robust.Cdn/Robust.Cdn.csproj b/Robust.Cdn/Robust.Cdn.csproj index f4cccca..436330f 100644 --- a/Robust.Cdn/Robust.Cdn.csproj +++ b/Robust.Cdn/Robust.Cdn.csproj @@ -9,17 +9,25 @@ - - - - - - + + + + + + + + + + + + + + diff --git a/Robust.Cdn/Services/DownloadRequestLogger.cs b/Robust.Cdn/Services/DownloadRequestLogger.cs index 93c6cf0..aedc73a 100644 --- a/Robust.Cdn/Services/DownloadRequestLogger.cs +++ b/Robust.Cdn/Services/DownloadRequestLogger.cs @@ -1,6 +1,7 @@ using System.Threading.Channels; using Dapper; using Microsoft.Extensions.Options; +using Robust.Cdn.Config; using Robust.Cdn.Helpers; using SpaceWizards.Sodium; @@ -8,7 +9,6 @@ namespace Robust.Cdn.Services; public sealed class DownloadRequestLogger : BackgroundService { - private readonly IServiceScopeFactory _scopeFactory; private readonly IOptions _options; private readonly ILogger _logger; private readonly ChannelReader _channelReader; @@ -19,7 +19,6 @@ public DownloadRequestLogger( IOptions options, ILogger logger) { - _scopeFactory = scopeFactory; _options = options; _logger = logger; @@ -43,7 +42,10 @@ protected override async Task ExecuteAsync(CancellationToken stoppingToken) { var storage = _options.Value.LogRequestStorage; if (storage == RequestLogStorage.Database) - WriteLogsDatabase(); + { + _logger.LogWarning("Database request logging has been removed"); + break; + } else if (storage == RequestLogStorage.Console) WriteLogsConsole(); else @@ -58,62 +60,6 @@ protected override async Task ExecuteAsync(CancellationToken stoppingToken) // ReSharper disable once FunctionNeverReturns } - private void WriteLogsDatabase() - { - using var scope = _scopeFactory.CreateScope(); - - var connection = scope.ServiceProvider.GetRequiredService().Connection; - using var transaction = connection.BeginTransaction(); - - var countWritten = 0; - while (_channelReader.TryRead(out var entry)) - { - var hash = CryptoGenericHashBlake2B.Hash(32, entry.RequestData.Span, ReadOnlySpan.Empty); - var blobRowId = connection.QuerySingleOrDefault("SELECT Id FROM RequestLogBlob WHERE Hash = @Hash", - new - { - Hash = hash - }); - - if (blobRowId == 0) - { - blobRowId = connection.ExecuteScalar( - "INSERT INTO RequestLogBlob (Hash, Data) VALUES (@Hash, zeroblob(@DataSize)) RETURNING Id", - new - { - Hash = hash, - DataSize = entry.RequestData.Length - }); - - using var blob = SqliteBlobStream.Open( - connection.Handle!, - "main", "RequestLogBlob", "Data", - blobRowId, - true); - - blob.Write(entry.RequestData.Span); - } - - connection.Execute( - "INSERT INTO RequestLog (Time, Compression, Protocol, BytesSent, VersionId, BlobId) " + - "VALUES (@Time, @Compression, @Protocol, @BytesSent, @VersionId, @BlobId)", - new - { - entry.Time, - entry.Compression, - entry.Protocol, - entry.VersionId, - entry.BytesSent, - BlobId = blobRowId - }); - - countWritten += 1; - } - - transaction.Commit(); - _logger.LogDebug("Wrote {CountWritten} log entries to disk", countWritten); - } - private void WriteLogsConsole() { var countWritten = 0; diff --git a/Robust.Cdn/Views/ForkBuildPage/Index.cshtml b/Robust.Cdn/Views/ForkBuildPage/Index.cshtml new file mode 100644 index 0000000..d3f59b3 --- /dev/null +++ b/Robust.Cdn/Views/ForkBuildPage/Index.cshtml @@ -0,0 +1,185 @@ +@using Robust.Cdn.Controllers +@using Robust.Cdn.Helpers +@model Robust.Cdn.Controllers.ForkBuildPageController.Model +@addTagHelper *, Microsoft.AspNetCore.Mvc.TagHelpers + +@{ + Layout = null; + + var displayName = Model.Options.DisplayName ?? Model.Fork; +} + + +@{ + static string ShowRid(string rid) + { + return rid switch + { + "win-x64" => "Windows x64", + "win-arm64" => "Windows ARM64", + "linux-x64" => "Linux x64", + "linux-arm64" => "Linux ARM64", + "osx-x64" => "macOS x64 (Intel)", + "osx-arm64" => "macOS ARM64 (Apple Silicon)", + _ => rid + }; + } + + async Task ShowBuild(ForkBuildPageController.Version version) + { +
+
+
Version:
+
@version.Name
+
Date:
+
+
Engine Version:
+
@version.EngineVersion
+
+

Download

+
    + @foreach (var versionServer in version.Servers) + { +
  • + @ShowRid(versionServer.Platform) + + @if (versionServer.FileSize is { } fileSize) + { + (@ByteHelpers.FormatBytes(fileSize)) + } +
  • + } +
+
+ } +} + + + + + + + @(Model.Options.DisplayName ?? Model.Fork) builds - Robust.Cdn + + + +
+
+

@displayName builds

+ @if (Model.Options.BuildsPageLink is { } link) + { +

Here you can find the latest server builds available for @Model.Options.BuildsPageLinkText.

+ } + else + { +

Here you can find the latest server builds available for @displayName.

+ } + +

+ If you somehow ended up here without reading the Server Hosting Tutorial, go read that first. It tells you how to actually host a server. +

+ + @if (Model.Versions.Count == 0) + { +

Congratulations. There are no server builds yet.
Seriously, none at all. Go push some commits.

+ } + else + { + +

Latest build

+ + @{ await ShowBuild(Model.Versions[0]); } + +

Old builds

+ + @foreach (var version in Model.Versions[1..]) + { + await ShowBuild(version); + } +
+ } + +
+ + +
+ + + diff --git a/Robust.Cdn/appsettings.Development.json b/Robust.Cdn/appsettings.Development.json index 9d92cf2..3d22b3d 100644 --- a/Robust.Cdn/appsettings.Development.json +++ b/Robust.Cdn/appsettings.Development.json @@ -7,10 +7,35 @@ } }, + "Manifest": { + "FileDiskPath": "../testData", + "Forks": { + "test": { + "UpdateToken": "foobar", + "NotifyWatchdogs": [ + { + "WatchdogUrl": "http://localhost:5000/", + "Instance": "syndicate_mothership", + "ApiToken": "Honk" + } + ], + "Private": true, + "PrivateUsers": { + "foobar": "baz" + }, + "DisplayName": "Test Fork", + "BuildsPageLink": "https://spacestation14.com", + "BuildsPageLinkText": "Test Fork LINK" + } + } + }, + "Cdn": { - "VersionDiskPath": "../testData", "DatabaseFileName": "content.db", "StreamCompressLevel": 3, - "LogRequests": true - } + "LogRequests": false, + "DefaultFork": "test" + }, + + "BaseUrl": "http://localhost:5123/" } diff --git a/Robust.Cdn/appsettings.json b/Robust.Cdn/appsettings.json index d7d36b7..e9f69c6 100644 --- a/Robust.Cdn/appsettings.json +++ b/Robust.Cdn/appsettings.json @@ -2,7 +2,8 @@ "Logging": { "LogLevel": { "Default": "Information", - "Microsoft.AspNetCore": "Warning" + "Microsoft.AspNetCore": "Warning", + "Quartz": "Warning" } }, "AllowedHosts": "*",