Skip to content
This repository has been archived by the owner on Sep 17, 2024. It is now read-only.

Commit

Permalink
feat: Create uploads module
Browse files Browse the repository at this point in the history
  • Loading branch information
Blckbrry-Pi committed Apr 17, 2024
1 parent b5c0823 commit 9366389
Show file tree
Hide file tree
Showing 19 changed files with 1,357 additions and 0 deletions.
16 changes: 16 additions & 0 deletions modules/uploads/config.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
export interface Config {
maxUploadSize: UploadSize;
maxMultipartUploadSize: UploadSize;
maxFilesPerUpload?: number;
defaultMultipartChunkSize?: UploadSize;
}

export const DEFAULT_MAX_FILES_PER_UPLOAD = 10;
// export const DEFAULT_MULTIPART_CHUNK_SIZE: UploadSize = { mib: 100 };
export const DEFAULT_MULTIPART_CHUNK_SIZE: UploadSize = { mib: 10 };

type Units = "b" | "kb" | "mb" | "gb" | "tb" | "kib" | "mib" | "gib" | "tib";

export type UploadSize = {
[unit in Units]: Record<unit, number>;
}[Units];
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
-- CreateTable
CREATE TABLE "Upload" (
"id" UUID NOT NULL,
"userId" UUID,
"bucket" TEXT NOT NULL,
"contentLength" BIGINT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
"completedAt" TIMESTAMP(3),
"deletedAt" TIMESTAMP(3),

CONSTRAINT "Upload_pkey" PRIMARY KEY ("id")
);

-- CreateTable
CREATE TABLE "Files" (
"path" TEXT NOT NULL,
"mime" TEXT,
"contentLength" BIGINT NOT NULL,
"nsfwScoreThreshold" DOUBLE PRECISION,
"uploadId" UUID NOT NULL,

CONSTRAINT "Files_pkey" PRIMARY KEY ("uploadId","path")
);

-- AddForeignKey
ALTER TABLE "Files" ADD CONSTRAINT "Files_uploadId_fkey" FOREIGN KEY ("uploadId") REFERENCES "Upload"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
/*
Warnings:
- You are about to drop the column `nsfwScoreThreshold` on the `Files` table. All the data in the column will be lost.
*/
-- AlterTable
ALTER TABLE "Files" DROP COLUMN "nsfwScoreThreshold";
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "Files" ADD COLUMN "multipartUploadId" TEXT;
3 changes: 3 additions & 0 deletions modules/uploads/db/migrations/migration_lock.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Please do not edit this file manually
# It should be added in your version-control system (i.e. Git)
provider = "postgresql"
33 changes: 33 additions & 0 deletions modules/uploads/db/schema.prisma
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// Do not modify this `datasource` block
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}

model Upload {
id String @id @default(uuid()) @db.Uuid
userId String? @db.Uuid
bucket String
contentLength BigInt
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
completedAt DateTime?
deletedAt DateTime?
files Files[] @relation("Files")
}

model Files {
uploadId String @db.Uuid
upload Upload @relation("Files", fields: [uploadId], references: [id])
multipartUploadId String?
path String
mime String?
contentLength BigInt
@@id([uploadId, path])
}
46 changes: 46 additions & 0 deletions modules/uploads/module.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
scripts:
prepare:
name: Prepare Upload
description: Prepare an upload batch for data transfer
complete:
name: Complete Upload
description: Alert the module that the upload has been completed
get:
name: Get Upload Metadata
description: Get the metadata (including contained files) for specified upload IDs
get_public_file_urls:
name: Get File Link
description: Get presigned download links for each of the specified files
list_for_user:
name: List Uploads for Users
description: Get a list of upload IDs associated with the specified user IDs
delete:
name: Delete Upload
description: Removes the upload and deletes the files from the bucket
errors:
no_files:
name: No Files Provided
description: An upload must have at least 1 file
too_many_files:
name: Too Many Files Provided
description: There is a limit to how many files can be put into a single upload (see config)
duplicate_paths:
name: Duplicate Paths Provided
description: An upload cannot contain 2 files with the same paths (see `cause` for offending paths)
size_limit_exceeded:
name: Combined Size Limit Exceeded
description: There is a maximum total size per upload (see config)
upload_not_found:
name: Upload Not Found
description: The provided upload ID didn't match any known existing uploads
upload_already_completed:
name: Upload Already completed
description: \`complete\` was already called on this upload
s3_not_configured:
name: S3 Not Configured
description: The S3 bucket is not configured (missing env variables)
multipart_upload_completion_fail:
name: Multipart Upload Completion Failure
description: The multipart upload failed to complete (see `cause` for more information)
dependencies:
users: {}
139 changes: 139 additions & 0 deletions modules/uploads/scripts/complete.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
import { RuntimeError, ScriptContext } from "../_gen/scripts/complete.ts";
import { keyExists, getMultipartUploadParts, completeMultipartUpload } from "../utils/bucket.ts";
import { getS3EnvConfig } from "../utils/env.ts";
import { getKey } from "../utils/types.ts";
import { prismaToOutput } from "../utils/types.ts";
import { Upload } from "../utils/types.ts";

export interface Request {
uploadId: string;
}

export interface Response {
upload: Upload;
}

export async function run(
ctx: ScriptContext,
req: Request,
): Promise<Response> {
const s3 = getS3EnvConfig();
if (!s3) throw new RuntimeError("s3_not_configured");

const newUpload = await ctx.db.$transaction(async (db) => {
// Find the upload by ID
const upload = await db.upload.findFirst({
where: {
id: req.uploadId,
},
select: {
id: true,
userId: true,
bucket: true,
contentLength: true,
files: true,
createdAt: true,
updatedAt: true,
completedAt: true,
},
});

// Error if the upload wasn't prepared
if (!upload) {
throw new RuntimeError(
"upload_not_found",
{
meta: {
reason: `Upload with ID ${req.uploadId} not found`,
},
},
);
}

// Check with S3 to see if the files were uploaded
const fileExistencePromises = upload.files.map(
async file => {
// If the file was uploaded in parts, complete the multipart upload
if (file.multipartUploadId) {
try {
const parts = await getMultipartUploadParts(
s3,
getKey(upload.id, file.path),
file.multipartUploadId,
);
if (parts.length === 0) return false;

await completeMultipartUpload(
s3,
getKey(upload.id, file.path),
file.multipartUploadId,
parts,
);


} catch (e) {
throw new RuntimeError(
"multipart_upload_completion_fail",
{ cause: e },
)
}
}

// Check if the file exists
return await keyExists(s3, getKey(upload.id, file.path))
},
);
const fileExistence = await Promise.all(fileExistencePromises);
const filesAllExist = fileExistence.every(Boolean);
if (!filesAllExist) {
const missingFiles = upload.files.filter((_, i) => !fileExistence[i]);
throw new RuntimeError(
"files_not_uploaded",
{
meta: {
reason: `Not all files for upload with ID ${req.uploadId} have not been uploaded`,
missingFiles: missingFiles.map((file) => file.path),
},
},
);
}

// Error if `complete` was already called with this ID
if (upload.completedAt !== null) {
throw new RuntimeError(
"upload_already_completed",
{
meta: {
reason: `Upload with ID ${req.uploadId} has already been completed`,
},
},
);
}

// Update the upload to mark it as completed
const completedUpload = await db.upload.update({
where: {
id: req.uploadId,
},
data: {
completedAt: new Date().toISOString(),
},
select: {
id: true,
userId: true,
bucket: true,
contentLength: true,
files: true,
createdAt: true,
updatedAt: true,
completedAt: true,
},
});

return completedUpload;
});

return {
upload: prismaToOutput(newUpload, true),
};
}
86 changes: 86 additions & 0 deletions modules/uploads/scripts/delete.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import { RuntimeError, ScriptContext } from "../_gen/scripts/delete.ts";
import { getKey } from "../utils/types.ts";
import { deleteKeys } from "../utils/bucket.ts";
import { getS3EnvConfig } from "../utils/env.ts";

export interface Request {
uploadId: string;
}

export interface Response {
bytesDeleted: string;
}

export async function run(
ctx: ScriptContext,
req: Request,
): Promise<Response> {
const s3 = getS3EnvConfig();
if (!s3) throw new RuntimeError("s3_not_configured");

const bytesDeleted = await ctx.db.$transaction(async (db) => {
const upload = await db.upload.findFirst({
where: {
id: req.uploadId,
completedAt: { not: null },
deletedAt: null,
},
select: {
id: true,
userId: true,
bucket: true,
contentLength: true,
files: true,
createdAt: true,
updatedAt: true,
completedAt: true,
},
});
if (!upload) {
throw new RuntimeError(
"upload_not_found",
{
meta: {
modified: false,
reason: `Upload with ID ${req.uploadId} not found`,
},
},
);
}

const filesToDelete = upload.files.map((file) =>
getKey(file.uploadId, file.path)
);
const deleteResults = await deleteKeys(s3, filesToDelete);

const failures = upload.files
.map((file, i) => [file, deleteResults[i]] as const)
.filter(([, successfullyDeleted]) => !successfullyDeleted)
.map(([file]) => file);

if (failures.length) {
const failedPaths = JSON.stringify(failures.map((file) => file.path));
throw new RuntimeError(
"failed_to_delete",
{
meta: {
modified: failures.length !== filesToDelete.length,
reason:`Failed to delete files with paths ${failedPaths}`,
},
},
);
}

await db.upload.update({
where: {
id: req.uploadId,
},
data: {
deletedAt: new Date().toISOString(),
},
});

return upload.contentLength.toString();
});
return { bytesDeleted };
}
Loading

0 comments on commit 9366389

Please sign in to comment.