From c4f2062776ee82c6b924b99439df30f9a9ffc47c Mon Sep 17 00:00:00 2001 From: NathanFlurry Date: Thu, 21 Nov 2024 05:18:23 +0000 Subject: [PATCH] fix: use correct endpoint for upload-prepare preisgned requests & presigning docs (#1396) Fixes RVT-4120 --- docs-internal/infrastructure/s3/PRESIGNING.md | 22 ++++++++++ .../services/ds/src/workflows/server/mod.rs | 2 +- .../mm/worker/src/workers/lobby_create/mod.rs | 2 +- .../services/upload/ops/prepare/src/lib.rs | 40 ++++++++++++------- 4 files changed, 50 insertions(+), 16 deletions(-) create mode 100644 docs-internal/infrastructure/s3/PRESIGNING.md diff --git a/docs-internal/infrastructure/s3/PRESIGNING.md b/docs-internal/infrastructure/s3/PRESIGNING.md new file mode 100644 index 0000000000..557f18a780 --- /dev/null +++ b/docs-internal/infrastructure/s3/PRESIGNING.md @@ -0,0 +1,22 @@ +# Presigning + +## Endpoint types + +See documentation on the endpoint types at `s3_util::EndpointKind`. + +## Presigning requests + +Make sure you understand [presigning S3 requests](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html) before reading this. + +Presigned requests need to be made built `EndpointKind::External` in order to ensure the origin is a public endpoint. + +Sometimes you need to create two separate clients: one with `EndpointKind::Internal` for making requests to S3 and one with `EndpointKind::External` for presigning requests. + +## Edge caching + +Rivet edge clusters can optionally run a pull-through S3 cache to acceelrate requests. See `rivet_config::server::rivet::BuildDeliveryMethod` for how this is configured with Dynamic Servers. + +Requests via the edge cache are automatically authenticated to the S3 origin. Do not use presigned requests with the edge cache. + + + diff --git a/packages/services/ds/src/workflows/server/mod.rs b/packages/services/ds/src/workflows/server/mod.rs index e66bfadae4..8eab05cd52 100644 --- a/packages/services/ds/src/workflows/server/mod.rs +++ b/packages/services/ds/src/workflows/server/mod.rs @@ -395,7 +395,7 @@ pub(crate) async fn resolve_image_artifact_url( let s3_client = s3_util::Client::with_bucket_and_endpoint( ctx.config(), "bucket-build", - s3_util::EndpointKind::EdgeInternal, + s3_util::EndpointKind::External, ) .await?; diff --git a/packages/services/mm/worker/src/workers/lobby_create/mod.rs b/packages/services/mm/worker/src/workers/lobby_create/mod.rs index a3b0e255e1..5ee3bb6011 100644 --- a/packages/services/mm/worker/src/workers/lobby_create/mod.rs +++ b/packages/services/mm/worker/src/workers/lobby_create/mod.rs @@ -799,7 +799,7 @@ async fn resolve_image_artifact_url( let s3_client = s3_util::Client::with_bucket_and_endpoint( ctx.config(), bucket, - s3_util::EndpointKind::EdgeInternal, + s3_util::EndpointKind::External, ) .await?; diff --git a/packages/services/upload/ops/prepare/src/lib.rs b/packages/services/upload/ops/prepare/src/lib.rs index 284fb5fea5..7ad260a0db 100644 --- a/packages/services/upload/ops/prepare/src/lib.rs +++ b/packages/services/upload/ops/prepare/src/lib.rs @@ -28,13 +28,22 @@ struct MultipartUpdate { async fn handle( ctx: OperationContext, ) -> GlobalResult { - let s3_client = s3_util::Client::with_bucket_and_endpoint( + // This client is used for making requests directly to S3 + let s3_client_internal = s3_util::Client::with_bucket_and_endpoint( ctx.config(), &ctx.bucket, s3_util::EndpointKind::Internal, ) .await?; + // This client is used for presigning requests using the public endopint + let s3_client_external = s3_util::Client::with_bucket_and_endpoint( + ctx.config(), + &ctx.bucket, + s3_util::EndpointKind::External, + ) + .await?; + // Validate upload sizes let total_content_length = ctx .files @@ -122,12 +131,14 @@ async fn handle( let (presigned_requests_init, multipart_updates) = futures_util::stream::iter(ctx.files.iter().cloned()) .map(move |file| { - let s3_client = s3_client.clone(); + let s3_client_internal = s3_client_internal.clone(); + let s3_client_external = s3_client_external.clone(); if file.multipart { - handle_multipart_upload(s3_client, upload_id, file).boxed() + handle_multipart_upload(s3_client_internal, s3_client_external, upload_id, file) + .boxed() } else { - handle_upload(s3_client, upload_id, file).boxed() + handle_upload(s3_client_external, upload_id, file).boxed() } }) .buffer_unordered(16) @@ -200,15 +211,15 @@ async fn handle( } async fn handle_upload( - s3_client: s3_util::Client, + s3_client_external: s3_util::Client, upload_id: Uuid, file: backend::upload::PrepareFile, ) -> GlobalResult> { let fut = async move { // Sign an upload request - let mut put_obj_builder = s3_client + let mut put_obj_builder = s3_client_external .put_object() - .bucket(s3_client.bucket()) + .bucket(s3_client_external.bucket()) .key(format!("{}/{}", upload_id, file.path)) .content_length(file.content_length as i64); if let Some(mime) = &file.mime { @@ -246,19 +257,20 @@ async fn handle_upload( const MIN_MULTIPART_FILE_SIZE: u64 = util::file_size::mebibytes(5); async fn handle_multipart_upload( - s3_client: s3_util::Client, + s3_client_internal: s3_util::Client, + s3_client_external: s3_util::Client, upload_id: Uuid, file: backend::upload::PrepareFile, ) -> GlobalResult> { // If the file is too small for multipart uploads, fallback to normal file uploads if file.content_length < MIN_MULTIPART_FILE_SIZE { - return Ok(handle_upload(s3_client, upload_id, file).await?); + return Ok(handle_upload(s3_client_external, upload_id, file).await?); } // Create multipart upload - let mut multipart_builder = s3_client + let mut multipart_builder = s3_client_internal .create_multipart_upload() - .bucket(s3_client.bucket()) + .bucket(s3_client_internal.bucket()) .key(format!("{}/{}", upload_id, file.path)); if let Some(mime) = &file.mime { multipart_builder = multipart_builder.content_type(mime.clone()); @@ -273,7 +285,7 @@ async fn handle_multipart_upload( // S3's part number is 1-based Ok((1..=part_count) .map(|part_number| { - let s3_client = s3_client.clone(); + let s3_client_external = s3_client_external.clone(); let file = file.clone(); let multipart_upload_id2 = multipart_upload_id.clone(); let path = file.path.clone(); @@ -283,9 +295,9 @@ async fn handle_multipart_upload( let offset = (part_number - 1) * CHUNK_SIZE; let content_length = (file.content_length - offset).min(CHUNK_SIZE); - let upload_part_builder = s3_client + let upload_part_builder = s3_client_external .upload_part() - .bucket(s3_client.bucket()) + .bucket(s3_client_external.bucket()) .key(format!("{}/{}", upload_id, file.path)) .content_length(content_length as i64) .upload_id(multipart_upload_id2)