Skip to content

Commit

Permalink
feat: Go based WebDAV with low memory usage and fast uploads
Browse files Browse the repository at this point in the history
  • Loading branch information
bouassaba authored Jul 9, 2024
1 parent 6cd9ff9 commit a4572fd
Show file tree
Hide file tree
Showing 61 changed files with 2,746 additions and 1,409 deletions.
8 changes: 8 additions & 0 deletions api/model/snapshot_model.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,3 +67,11 @@ type ImageProps struct {
Width int `json:"width"`
Height int `json:"height"`
}

type S3Reference struct {
Bucket string `json:"bucket"`
Key string `json:"key"`
Size int64 `json:"size"`
SnapshotID string `json:"snapshotId"`
ContentType string `json:"contentType"`
}
204 changes: 200 additions & 4 deletions api/router/file_router.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ func (r *FileRouter) AppendNonJWTRoutes(g fiber.Router) {
g.Get("/:id/original:ext", r.DownloadOriginal)
g.Get("/:id/preview:ext", r.DownloadPreview)
g.Get("/:id/thumbnail:ext", r.DownloadThumbnail)
g.Post("/create_from_s3", r.CreateFromS3)
g.Patch("/:id/patch_from_s3", r.PatchFromS3)
}

// Create godoc
Expand Down Expand Up @@ -159,7 +161,7 @@ func (r *FileRouter) Create(c *fiber.Ctx) error {
}
}
}(tmpPath)
file, err = r.fileSvc.Store(file.ID, tmpPath, userID)
file, err = r.fileSvc.Store(file.ID, service.StoreOptions{Path: &tmpPath}, userID)
if err != nil {
return err
}
Expand Down Expand Up @@ -226,7 +228,7 @@ func (r *FileRouter) Patch(c *fiber.Ctx) error {
}
}
}(tmpPath)
file, err = r.fileSvc.Store(file.ID, tmpPath, userID)
file, err = r.fileSvc.Store(file.ID, service.StoreOptions{Path: &tmpPath}, userID)
if err != nil {
return err
}
Expand Down Expand Up @@ -377,11 +379,11 @@ func (r *FileRouter) List(c *fiber.Ctx) error {
SortOrder: sortOrder,
}
if query != "" {
bytes, err := base64.StdEncoding.DecodeString(query + strings.Repeat("=", (4-len(query)%4)%4))
b, err := base64.StdEncoding.DecodeString(query + strings.Repeat("=", (4-len(query)%4)%4))
if err != nil {
return errorpkg.NewInvalidQueryParamError("query")
}
if err := json.Unmarshal(bytes, &opts.Query); err != nil {
if err := json.Unmarshal(b, &opts.Query); err != nil {
return errorpkg.NewInvalidQueryParamError("query")
}
res, err = r.fileSvc.Search(id, opts, userID)
Expand Down Expand Up @@ -917,6 +919,200 @@ func (r *FileRouter) DownloadThumbnail(c *fiber.Ctx) error {
return c.Send(b)
}

// CreateFromS3 godoc
//
// @Summary Create from S3
// @Description Create from S3
// @Tags Files
// @Id files_create_from_s3
// @Accept x-www-form-urlencoded
// @Produce json
// @Param api_key query string true "API Key"
// @Param access_token query string true "Access Token"
// @Param workspace_id query string true "Workspace ID"
// @Param parent_id query string false "Parent ID"
// @Param name query string false "Name"
// @Param s3_key query string true "S3 Key"
// @Param s3_bucket query string true "S3 Bucket"
// @Param size query string true "Size"
// @Success 200 {object} service.File
// @Failure 404 {object} errorpkg.ErrorResponse
// @Failure 400 {object} errorpkg.ErrorResponse
// @Failure 500 {object} errorpkg.ErrorResponse
// @Router /files/create_from_s3 [post]
func (r *FileRouter) CreateFromS3(c *fiber.Ctx) error {
apiKey := c.Query("api_key")
if apiKey == "" {
return errorpkg.NewMissingQueryParamError("api_key")
}
if apiKey != r.config.Security.APIKey {
return errorpkg.NewInvalidAPIKeyError()
}
accessToken := c.Query("access_token")
if accessToken == "" {
return errorpkg.NewMissingQueryParamError("access_token")
}
userID, err := r.getUserIDFromAccessToken(accessToken)
if err != nil {
return c.SendStatus(http.StatusNotFound)
}
workspaceID := c.Query("workspace_id")
if workspaceID == "" {
return errorpkg.NewMissingQueryParamError("workspace_id")
}
parentID := c.Query("parent_id")
if parentID == "" {
workspace, err := r.workspaceSvc.Find(workspaceID, userID)
if err != nil {
return err
}
parentID = workspace.RootID
}
name := c.Query("name")
if name == "" {
return errorpkg.NewMissingQueryParamError("name")
}
s3Key := c.Query("s3_key")
if s3Key == "" {
return errorpkg.NewMissingQueryParamError("s3_key")
}
s3Bucket := c.Query("s3_bucket")
if s3Bucket == "" {
return errorpkg.NewMissingQueryParamError("s3_bucket")
}
snapshotID := c.Query("snapshot_id")
if snapshotID == "" {
return errorpkg.NewMissingQueryParamError("snapshot_id")
}
contentType := c.Query("content_type")
if contentType == "" {
return errorpkg.NewMissingQueryParamError("content_type")
}
var size int64
if c.Query("size") == "" {
return errorpkg.NewMissingQueryParamError("size")
}
size, err = strconv.ParseInt(c.Query("size"), 10, 64)
if err != nil {
return err
}
ok, err := r.workspaceSvc.HasEnoughSpaceForByteSize(workspaceID, size)
if err != nil {
return err
}
if !*ok {
return errorpkg.NewStorageLimitExceededError()
}
file, err := r.fileSvc.Create(service.FileCreateOptions{
Name: name,
Type: model.FileTypeFile,
ParentID: &parentID,
WorkspaceID: workspaceID,
}, userID)
if err != nil {
return err
}
file, err = r.fileSvc.Store(file.ID, service.StoreOptions{
S3Reference: &model.S3Reference{
Key: s3Key,
Bucket: s3Bucket,
SnapshotID: snapshotID,
Size: size,
ContentType: contentType,
},
}, userID)
if err != nil {
return err
}
return c.Status(http.StatusCreated).JSON(file)
}

// PatchFromS3 godoc
//
// @Summary Patch from S3
// @Description Patch from S3
// @Tags Files
// @Id files_patch_from_s3
// @Accept x-www-form-urlencoded
// @Produce json
// @Param api_key query string true "API Key"
// @Param access_token query string true "Access Token"
// @Param s3_key query string true "S3 Key"
// @Param s3_bucket query string true "S3 Bucket"
// @Param size query string true "Size"
// @Param id path string true "ID"
// @Success 200 {object} service.File
// @Failure 404 {object} errorpkg.ErrorResponse
// @Failure 400 {object} errorpkg.ErrorResponse
// @Failure 500 {object} errorpkg.ErrorResponse
// @Router /files/{id}/patch_from_s3 [patch]
func (r *FileRouter) PatchFromS3(c *fiber.Ctx) error {
apiKey := c.Query("api_key")
if apiKey == "" {
return errorpkg.NewMissingQueryParamError("api_key")
}
if apiKey != r.config.Security.APIKey {
return errorpkg.NewInvalidAPIKeyError()
}
accessToken := c.Query("access_token")
if accessToken == "" {
return errorpkg.NewMissingQueryParamError("access_token")
}
userID, err := r.getUserIDFromAccessToken(accessToken)
if err != nil {
return c.SendStatus(http.StatusNotFound)
}
files, err := r.fileSvc.Find([]string{c.Params("id")}, userID)
if err != nil {
return err
}
file := files[0]
s3Key := c.Query("s3_key")
if s3Key == "" {
return errorpkg.NewMissingQueryParamError("s3_key")
}
s3Bucket := c.Query("s3_bucket")
if s3Bucket == "" {
return errorpkg.NewMissingQueryParamError("s3_bucket")
}
var size int64
if c.Query("size") == "" {
return errorpkg.NewMissingQueryParamError("size")
}
size, err = strconv.ParseInt(c.Query("size"), 10, 64)
if err != nil {
return err
}
snapshotID := c.Query("snapshot_id")
if snapshotID == "" {
return errorpkg.NewMissingQueryParamError("snapshot_id")
}
contentType := c.Query("content_type")
if contentType == "" {
return errorpkg.NewMissingQueryParamError("content_type")
}
ok, err := r.workspaceSvc.HasEnoughSpaceForByteSize(file.WorkspaceID, size)
if err != nil {
return err
}
if !*ok {
return errorpkg.NewStorageLimitExceededError()
}
file, err = r.fileSvc.Store(file.ID, service.StoreOptions{
S3Reference: &model.S3Reference{
Key: s3Key,
Bucket: s3Bucket,
SnapshotID: snapshotID,
Size: size,
ContentType: contentType,
},
}, userID)
if err != nil {
return err
}
return c.JSON(file)
}

func (r *FileRouter) getUserIDFromAccessToken(accessToken string) (string, error) {
token, err := jwt.Parse(accessToken, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
Expand Down
58 changes: 44 additions & 14 deletions api/service/file_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,12 @@ func (svc *FileService) validateParent(id string, userID string) error {
return nil
}

func (svc *FileService) Store(id string, path string, userID string) (*File, error) {
type StoreOptions struct {
S3Reference *model.S3Reference
Path *string
}

func (svc *FileService) Store(id string, opts StoreOptions, userID string) (*File, error) {
file, err := svc.fileRepo.Find(id)
if err != nil {
return nil, err
Expand All @@ -214,7 +219,39 @@ func (svc *FileService) Store(id string, path string, userID string) (*File, err
if err != nil {
return nil, err
}
snapshotID := helper.NewID()
var snapshotID string
var size int64
var path string
var original model.S3Object
var bucket string
var contentType string
if opts.S3Reference == nil {
snapshotID = helper.NewID()
path = *opts.Path
stat, err := os.Stat(*opts.Path)
if err != nil {
return nil, err
}
size = stat.Size()
original = model.S3Object{
Bucket: workspace.GetBucket(),
Key: snapshotID + "/original" + strings.ToLower(filepath.Ext(path)),
Size: helper.ToPtr(size),
}
bucket = workspace.GetBucket()
contentType = infra.DetectMimeFromPath(path)
} else {
snapshotID = opts.S3Reference.SnapshotID
path = opts.S3Reference.Key
size = opts.S3Reference.Size
original = model.S3Object{
Bucket: opts.S3Reference.Bucket,
Key: opts.S3Reference.Key,
Size: helper.ToPtr(size),
}
bucket = opts.S3Reference.Bucket
contentType = opts.S3Reference.ContentType
}
snapshot := repo.NewSnapshot()
snapshot.SetID(snapshotID)
snapshot.SetVersion(latestVersion + 1)
Expand All @@ -228,18 +265,11 @@ func (svc *FileService) Store(id string, path string, userID string) (*File, err
if err = svc.snapshotRepo.MapWithFile(snapshotID, id); err != nil {
return nil, err
}
stat, err := os.Stat(path)
if err != nil {
return nil, err
}
exceedsProcessingLimit := stat.Size() > helper.MegabyteToByte(svc.fileIdent.GetProcessingLimitMB(path))
original := model.S3Object{
Bucket: workspace.GetBucket(),
Key: snapshotID + "/original" + strings.ToLower(filepath.Ext(path)),
Size: helper.ToPtr(stat.Size()),
}
if err = svc.s3.PutFile(original.Key, path, infra.DetectMimeFromPath(path), workspace.GetBucket(), minio.PutObjectOptions{}); err != nil {
return nil, err
exceedsProcessingLimit := size > helper.MegabyteToByte(svc.fileIdent.GetProcessingLimitMB(path))
if opts.S3Reference == nil {
if err = svc.s3.PutFile(original.Key, path, contentType, bucket, minio.PutObjectOptions{}); err != nil {
return nil, err
}
}
snapshot.SetOriginal(&original)
if exceedsProcessingLimit {
Expand Down
2 changes: 2 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@ services:
- PORT=8082
- IDP_URL=http://idp:8081
- API_URL=http://api:8080
- REDIS_ADDRESS=redis:6379
- S3_URL=minio:9000
healthcheck:
test: wget --quiet --spider http://127.0.0.1:8082/v2/health || exit 1
depends_on:
Expand Down
1 change: 0 additions & 1 deletion webdav/.dockerignore

This file was deleted.

10 changes: 0 additions & 10 deletions webdav/.editorconfig

This file was deleted.

16 changes: 16 additions & 0 deletions webdav/.env
Original file line number Diff line number Diff line change
@@ -1,3 +1,19 @@
PORT=8082

# Security
SECURITY_API_KEY="7znl9Zd8F!4lRZA43lEQb757mCy"

# URLs
IDP_URL="http://127.0.0.1:8081"
API_URL="http://127.0.0.1:8080"

# S3
S3_URL="127.0.0.1:9000"
S3_ACCESS_KEY="voltaserve"
S3_SECRET_KEY="voltaserve"
S3_REGION="us-east-1"
S3_SECURE=false

# Redis
REDIS_ADDRESS="127.0.0.1:6379"
REDIS_DB=0
5 changes: 1 addition & 4 deletions webdav/.gitattributes
Original file line number Diff line number Diff line change
@@ -1,4 +1 @@
/.yarn/** linguist-vendored
/.yarn/releases/* binary
/.yarn/plugins/**/* binary
/.pnp.* binary linguist-generated
docs/** linguist-detectable=false
7 changes: 5 additions & 2 deletions webdav/.gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,5 @@
node_modules
.env.local
/.env.local
/voltaserve
__debug_bin*
/*.tsv
/.air
Loading

0 comments on commit a4572fd

Please sign in to comment.