From 7124daf785b2f8b2b716c147634576a1ab11074d Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Wed, 15 May 2024 16:49:54 -0400 Subject: [PATCH 01/23] comment out deprecated folder presigned download --- blobstore/presigned_url.go | 370 ++++++++++++++++++------------------- main.go | 11 +- 2 files changed, 192 insertions(+), 189 deletions(-) diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 9c7bddd..abc7dba 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -1,16 +1,12 @@ package blobstore import ( - "archive/tar" "bytes" - "compress/gzip" "fmt" - "io" "net/http" "net/url" "path/filepath" "strings" - "sync" "time" "github.com/aws/aws-sdk-go/aws" @@ -29,89 +25,89 @@ func (s3Ctrl *S3Controller) GetDownloadPresignedURL(bucket, key string, expDays return req.Presign(duration) } -func (s3Ctrl *S3Controller) tarS3Files(r *s3.ListObjectsV2Output, bucket string, outputFile string, prefix string) (err error) { - uploader := s3manager.NewUploader(s3Ctrl.Sess) - pr, pw := io.Pipe() - - gzipWriter := gzip.NewWriter(pw) - tarWriter := tar.NewWriter(gzipWriter) - - var wg sync.WaitGroup - wg.Add(1) - - go func() { - defer wg.Done() - log.Debug("start writing files to:", outputFile) - _, err := uploader.Upload(&s3manager.UploadInput{ - Bucket: aws.String(bucket), - Key: aws.String(outputFile), - Body: pr, - }) - if err != nil { - log.Errorf("failed to upload tar.gz file to S3: %s", err) - return - } - log.Debug("completed writing files to:", outputFile) - }() - - for _, item := range r.Contents { - filePath := filepath.Join(strings.TrimPrefix(aws.StringValue(item.Key), prefix)) - copyObj := aws.StringValue(item.Key) - log.Debugf("copying %s to %s", copyObj, outputFile) - - getResp, err := s3Ctrl.S3Svc.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(copyObj), - }) - if err != nil { - log.Errorf("failed to download file: %s, error: %s", copyObj, err) - return err - } - defer getResp.Body.Close() - - header := &tar.Header{ - Name: filePath, - Size: *getResp.ContentLength, - Mode: int64(0644), - } - - err = tarWriter.WriteHeader(header) - if err != nil { - log.Errorf("failed to write tar header for file: %s, error: %s", copyObj, err) - return err - } - - _, err = io.Copy(tarWriter, getResp.Body) - if err != nil { - log.Errorf("failed to write file content to tar for file: %s, error: %s", copyObj, err) - return err - } - log.Debugf("completed copying: %s", copyObj) - } - - err = tarWriter.Close() - if err != nil { - log.Error("tar close failure:", err) - return err - } - - err = gzipWriter.Close() - if err != nil { - log.Error("gzip close failure:", err) - return err - } - - err = pw.Close() - if err != nil { - log.Error("pipe writer close failure:", err) - return err - } - - wg.Wait() - - log.Debug("completed tar of file successfully") - return nil -} +// func (s3Ctrl *S3Controller) tarS3Files(r *s3.ListObjectsV2Output, bucket string, outputFile string, prefix string) (err error) { +// uploader := s3manager.NewUploader(s3Ctrl.Sess) +// pr, pw := io.Pipe() + +// gzipWriter := gzip.NewWriter(pw) +// tarWriter := tar.NewWriter(gzipWriter) + +// var wg sync.WaitGroup +// wg.Add(1) + +// go func() { +// defer wg.Done() +// log.Debug("start writing files to:", outputFile) +// _, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(bucket), +// Key: aws.String(outputFile), +// Body: pr, +// }) +// if err != nil { +// log.Errorf("failed to upload tar.gz file to S3: %s", err) +// return +// } +// log.Debug("completed writing files to:", outputFile) +// }() + +// for _, item := range r.Contents { +// filePath := filepath.Join(strings.TrimPrefix(aws.StringValue(item.Key), prefix)) +// copyObj := aws.StringValue(item.Key) +// log.Debugf("copying %s to %s", copyObj, outputFile) + +// getResp, err := s3Ctrl.S3Svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String(bucket), +// Key: aws.String(copyObj), +// }) +// if err != nil { +// log.Errorf("failed to download file: %s, error: %s", copyObj, err) +// return err +// } +// defer getResp.Body.Close() + +// header := &tar.Header{ +// Name: filePath, +// Size: *getResp.ContentLength, +// Mode: int64(0644), +// } + +// err = tarWriter.WriteHeader(header) +// if err != nil { +// log.Errorf("failed to write tar header for file: %s, error: %s", copyObj, err) +// return err +// } + +// _, err = io.Copy(tarWriter, getResp.Body) +// if err != nil { +// log.Errorf("failed to write file content to tar for file: %s, error: %s", copyObj, err) +// return err +// } +// log.Debugf("completed copying: %s", copyObj) +// } + +// err = tarWriter.Close() +// if err != nil { +// log.Error("tar close failure:", err) +// return err +// } + +// err = gzipWriter.Close() +// if err != nil { +// log.Error("gzip close failure:", err) +// return err +// } + +// err = pw.Close() +// if err != nil { +// log.Error("pipe writer close failure:", err) +// return err +// } + +// wg.Wait() + +// log.Debug("completed tar of file successfully") +// return nil +// } func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { bucket := c.QueryParam("bucket") @@ -153,106 +149,106 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { return c.JSON(http.StatusOK, url) } -func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { - prefix := c.QueryParam("prefix") - if prefix == "" { - errMsg := fmt.Errorf("request must include a `prefix` parameter") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - - bucket := c.QueryParam("bucket") - s3Ctrl, err := bh.GetController(bucket) - if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" - } - - response, err := s3Ctrl.GetList(bucket, prefix, false) - if err != nil { - errMsg := fmt.Errorf("error getting list: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if *response.KeyCount == 0 { - errMsg := fmt.Errorf("the specified prefix %s does not exist in S3", prefix) - log.Error(errMsg.Error()) - return c.JSON(http.StatusNotFound, errMsg.Error()) - } - //check if size is below 5GB - var size, fileCount uint64 - err = bh.GetSize(response, &size, &fileCount) - if err != nil { - errMsg := fmt.Errorf("error getting size: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - limit := uint64(1024 * 1024 * 1024 * bh.Config.DefaultZipDownloadSizeLimit) - if size >= limit { - errMsg := fmt.Errorf("request entity is larger than %v GB, current prefix size is: %v GB", bh.Config.DefaultZipDownloadSizeLimit, float64(size)/(1024*1024*1024)) - log.Error(errMsg.Error()) - return c.JSON(http.StatusRequestEntityTooLarge, errMsg.Error()) - } - - filename := fmt.Sprintf("%s.%s", strings.TrimSuffix(prefix, "/"), "tar.gz") - outputFile := filepath.Join(bh.Config.DefaultTempPrefix, filename) - - // Check if the tar.gz file already exists in S3 - tarFileResponse, err := s3Ctrl.GetList(bucket, outputFile, false) - if err != nil { - errMsg := fmt.Errorf("error checking if tar.gz file exists in S3: %s", err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - if len(tarFileResponse.Contents) > 0 { - log.Debug("the prefix was once downloaded, checking if it is outdated") - // Tar.gz file exists, now compare modification dates - mostRecentModTime, err := s3Ctrl.getMostRecentModTime(bucket, prefix) - if err != nil { - errMsg := fmt.Errorf("error getting most recent modification time: %s", err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - if tarFileResponse.Contents[0].LastModified.After(mostRecentModTime) { - log.Debug("folder already downloaded and is current") - - // Existing tar.gz file is up-to-date, return pre-signed URL - href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) - if err != nil { - errMsg := fmt.Errorf("error getting presigned: %s", err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - return c.JSON(http.StatusOK, string(href)) - } - log.Debug("folder already downloaded but is outdated starting the zip process") - } - - err = s3Ctrl.tarS3Files(response, bucket, outputFile, prefix) - if err != nil { - errMsg := fmt.Errorf("error tarring S3 files: %s", err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) - if err != nil { - errMsg := fmt.Errorf("error getting presigned URL: %s", err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - log.Info("successfully generated presigned URL for prefix:", prefix) - return c.JSON(http.StatusOK, string(href)) -} +// func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { +// prefix := c.QueryParam("prefix") +// if prefix == "" { +// errMsg := fmt.Errorf("request must include a `prefix` parameter") +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) +// } + +// bucket := c.QueryParam("bucket") +// s3Ctrl, err := bh.GetController(bucket) +// if err != nil { +// errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) +// } + +// if !strings.HasSuffix(prefix, "/") { +// prefix = prefix + "/" +// } + +// response, err := s3Ctrl.GetList(bucket, prefix, false) +// if err != nil { +// errMsg := fmt.Errorf("error getting list: %s", err.Error()) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusInternalServerError, errMsg.Error()) +// } +// if *response.KeyCount == 0 { +// errMsg := fmt.Errorf("the specified prefix %s does not exist in S3", prefix) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusNotFound, errMsg.Error()) +// } +// //check if size is below 5GB +// var size, fileCount uint64 +// err = bh.GetSize(response, &size, &fileCount) +// if err != nil { +// errMsg := fmt.Errorf("error getting size: %s", err.Error()) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusInternalServerError, errMsg.Error()) +// } + +// limit := uint64(1024 * 1024 * 1024 * bh.Config.DefaultZipDownloadSizeLimit) +// if size >= limit { +// errMsg := fmt.Errorf("request entity is larger than %v GB, current prefix size is: %v GB", bh.Config.DefaultZipDownloadSizeLimit, float64(size)/(1024*1024*1024)) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusRequestEntityTooLarge, errMsg.Error()) +// } + +// filename := fmt.Sprintf("%s.%s", strings.TrimSuffix(prefix, "/"), "tar.gz") +// outputFile := filepath.Join(bh.Config.DefaultTempPrefix, filename) + +// // Check if the tar.gz file already exists in S3 +// tarFileResponse, err := s3Ctrl.GetList(bucket, outputFile, false) +// if err != nil { +// errMsg := fmt.Errorf("error checking if tar.gz file exists in S3: %s", err) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusInternalServerError, errMsg.Error()) +// } + +// if len(tarFileResponse.Contents) > 0 { +// log.Debug("the prefix was once downloaded, checking if it is outdated") +// // Tar.gz file exists, now compare modification dates +// mostRecentModTime, err := s3Ctrl.getMostRecentModTime(bucket, prefix) +// if err != nil { +// errMsg := fmt.Errorf("error getting most recent modification time: %s", err) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusInternalServerError, errMsg.Error()) +// } + +// if tarFileResponse.Contents[0].LastModified.After(mostRecentModTime) { +// log.Debug("folder already downloaded and is current") + +// // Existing tar.gz file is up-to-date, return pre-signed URL +// href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) +// if err != nil { +// errMsg := fmt.Errorf("error getting presigned: %s", err) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusInternalServerError, errMsg.Error()) +// } +// return c.JSON(http.StatusOK, string(href)) +// } +// log.Debug("folder already downloaded but is outdated starting the zip process") +// } + +// err = s3Ctrl.tarS3Files(response, bucket, outputFile, prefix) +// if err != nil { +// errMsg := fmt.Errorf("error tarring S3 files: %s", err) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusInternalServerError, errMsg.Error()) +// } + +// href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) +// if err != nil { +// errMsg := fmt.Errorf("error getting presigned URL: %s", err) +// log.Error(errMsg.Error()) +// return c.JSON(http.StatusInternalServerError, errMsg.Error()) +// } + +// log.Info("successfully generated presigned URL for prefix:", prefix) +// return c.JSON(http.StatusOK, string(href)) +// } func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { prefix := c.QueryParam("prefix") diff --git a/main.go b/main.go index 3eacbc7..b2b3c94 100644 --- a/main.go +++ b/main.go @@ -54,7 +54,11 @@ func main() { if !ok { log.Fatal("AUTH_S3_LIMITED_WRITER env variable not set") } - allUsers = append(allUsers, s3LimitWriterRoleName) + s3LimitedReaderRoleName, ok := os.LookupEnv("AUTH_LIMITED_READER_ROLE") + if !ok { + log.Fatal("AUTH_LIMITED_READER_ROLE env variable not set") + } + allUsers = append(allUsers, s3LimitWriterRoleName, s3LimitedReaderRoleName) writers = append(writers, s3LimitWriterRoleName) } @@ -93,7 +97,7 @@ func main() { // prefix e.GET("/prefix/list", auth.Authorize(bh.HandleListByPrefix, allUsers...)) e.GET("/prefix/list_with_details", auth.Authorize(bh.HandleListByPrefixWithDetail, allUsers...)) - e.GET("/prefix/download", auth.Authorize(bh.HandleGetPresignedURLMultiObj, allUsers...)) + // e.GET("/prefix/download", auth.Authorize(bh.HandleGetPresignedURLMultiObj, allUsers...)) e.GET("/prefix/download/script", auth.Authorize(bh.HandleGenerateDownloadScript, allUsers...)) e.PUT("/prefix/move", auth.Authorize(bh.HandleMovePrefix, admin...)) e.DELETE("/prefix/delete", auth.Authorize(bh.HandleDeletePrefix, admin...)) @@ -108,6 +112,9 @@ func main() { // e.PUT("/object/cross-bucket/copy", auth.Authorize(bh., writers...)) // e.PUT("/prefix/cross-bucket/copy", auth.Authorize(bh., writers...)) + //auth + e.GET("/check_user_permission", auth.Authorize(bh.HandleCheckS3UserPermission, writers...)) + // Start server go func() { log.Info("server starting on port: ", os.Getenv("S3API_SERVICE_PORT")) From dd01954a70d261aca96dd6be8c269e3a0b9af39c Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Wed, 15 May 2024 17:08:08 -0400 Subject: [PATCH 02/23] add new checks that will retrieve accesible prefixes --- auth/database.go | 45 +++++++++++++++++++++--- blobstore/blobhandler.go | 28 ++++++++++++++- blobstore/blobstore.go | 75 ++++++++++++++++++++++++++++++---------- blobstore/upload.go | 10 +++--- go.mod | 2 +- go.sum | 14 -------- 6 files changed, 130 insertions(+), 44 deletions(-) diff --git a/auth/database.go b/auth/database.go index cae1e79..7f45ea1 100644 --- a/auth/database.go +++ b/auth/database.go @@ -6,13 +6,15 @@ import ( "os" "github.com/labstack/gommon/log" + "github.com/lib/pq" _ "github.com/lib/pq" ) // Database interface abstracts database operations type Database interface { - CheckUserPermission(userEmail, operation, s3_prefix string) bool + CheckUserPermission(userEmail, bucket, prefix string, operations []string) bool Close() error + GetUserAccessiblePrefixes(userEmail, bucket string, operations []string) ([]string, error) } type PostgresDB struct { @@ -63,21 +65,56 @@ func (db *PostgresDB) createTables() error { return nil } +func (db *PostgresDB) GetUserAccessiblePrefixes(userEmail, bucket string, operations []string) ([]string, error) { + query := ` + WITH unnested_permissions AS ( + SELECT DISTINCT unnest(allowed_s3_prefixes) AS allowed_prefix + FROM permissions + WHERE user_email = $1 AND operation = ANY($3) + ) + SELECT allowed_prefix + FROM unnested_permissions + WHERE allowed_prefix LIKE $2 || '/%' + ORDER BY allowed_prefix; + ` + + rows, err := db.Handle.Query(query, userEmail, "/"+bucket, pq.Array(operations)) + if err != nil { + return nil, fmt.Errorf("database error: %s", err) + } + defer rows.Close() + + var prefixes []string + var prefix string + for rows.Next() { + if err := rows.Scan(&prefix); err != nil { + return nil, fmt.Errorf("scan error: %s", err) + } + prefixes = append(prefixes, prefix) + } + if err = rows.Err(); err != nil { + return nil, fmt.Errorf("row error: %s", err) + } + + return prefixes, nil +} + // CheckUserPermission checks if a user has permission for a specific request. -func (db *PostgresDB) CheckUserPermission(userEmail, operation, s3_prefix string) bool { +func (db *PostgresDB) CheckUserPermission(userEmail, bucket, prefix string, operations []string) bool { + s3Prefix := fmt.Sprintf("/%s/%s", bucket, prefix) query := ` SELECT EXISTS ( SELECT 1 FROM permissions, UNNEST(allowed_s3_prefixes) AS allowed_prefix WHERE user_email = $1 - AND operation = $2 + AND operation = ANY($2) AND $3 LIKE allowed_prefix || '%' ); ` var hasPermission bool - if err := db.Handle.QueryRow(query, userEmail, operation, s3_prefix).Scan(&hasPermission); err != nil { + if err := db.Handle.QueryRow(query, userEmail, pq.Array(operations), s3Prefix).Scan(&hasPermission); err != nil { log.Errorf("error querying user permissions: %v", err) return false } diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index 051a8d4..c40ea21 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -30,6 +30,7 @@ type Config struct { // external sources like configuration files, environment variables should go here. AuthLevel int LimitedWriterRoleName string + LimitedReaderRoleName string DefaultTempPrefix string DefaultDownloadPresignedUrlExpiration int DefaultUploadPresignedUrlExpiration int @@ -153,7 +154,7 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { } if len(bucketNames) > 0 { - config.S3Controllers = append(config.S3Controllers, S3Controller{Sess: sess, S3Svc: s3SVC, Buckets: bucketNames, S3Mock: false}) + config.S3Controllers = append(config.S3Controllers, S3Controller{Sess: sess, S3Svc: s3SVC, Buckets: bucketNames}) } } @@ -305,3 +306,28 @@ func (bh *BlobHandler) PingWithAuth(c echo.Context) error { return c.JSON(http.StatusOK, bucketHealth) } + +func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { + if bh.Config.AuthLevel == 0 { + log.Info("Checked user permissions successfully") + return c.JSON(http.StatusOK, true) + } + prefix := c.QueryParam("prefix") + bucket := c.QueryParam("bucket") + operation := c.QueryParam("operation") + claims, ok := c.Get("claims").(*auth.Claims) + if !ok { + errMsg := fmt.Errorf("could not get claims from request context") + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + userEmail := claims.Email + if operation == "" || prefix == "" || bucket == "" { + errMsg := fmt.Errorf("`prefix`, `operation` and 'bucket are required params") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + } + isAllowed := bh.DB.CheckUserPermission(userEmail, bucket, prefix, []string{operation}) + log.Info("Checked user permissions successfully") + return c.JSON(http.StatusOK, isAllowed) +} diff --git a/blobstore/blobstore.go b/blobstore/blobstore.go index 7e54b41..11782c0 100644 --- a/blobstore/blobstore.go +++ b/blobstore/blobstore.go @@ -3,7 +3,7 @@ package blobstore import ( "fmt" "net/http" - "time" + "strings" "github.com/Dewberry/s3api/auth" "github.com/Dewberry/s3api/utils" @@ -14,6 +14,7 @@ import ( ) func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { + _, err := s3Ctrl.S3Svc.HeadObject(&s3.HeadObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), @@ -33,23 +34,23 @@ func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { } // function that will get the most recently uploaded file in a prefix -func (s3Ctrl *S3Controller) getMostRecentModTime(bucket, prefix string) (time.Time, error) { - // Initialize a time variable to store the most recent modification time - var mostRecent time.Time +// func (s3Ctrl *S3Controller) getMostRecentModTime(bucket, prefix string, permissions []string, fullAccess bool) (time.Time, error) { +// // Initialize a time variable to store the most recent modification time +// var mostRecent time.Time - // Call GetList to retrieve the list of objects with the specified prefix - response, err := s3Ctrl.GetList(bucket, prefix, false) - if err != nil { - return time.Time{}, err - } - // Iterate over the returned objects to find the most recent modification time - for _, item := range response.Contents { - if item.LastModified != nil && item.LastModified.After(mostRecent) { - mostRecent = *item.LastModified - } - } - return mostRecent, nil -} +// // Call GetList to retrieve the list of objects with the specified prefix +// response, err := s3Ctrl.GetList(bucket, prefix, false) +// if err != nil { +// return time.Time{}, err +// } +// // Iterate over the returned objects to find the most recent modification time +// for _, item := range response.Contents { +// if item.LastModified != nil && item.LastModified.After(mostRecent) { +// mostRecent = *item.LastModified +// } +// } +// return mostRecent, nil +// } func arrayContains(a string, arr []string) bool { for _, b := range arr { @@ -80,7 +81,7 @@ func isIdenticalArray(array1, array2 []string) bool { return true } -func (bh *BlobHandler) CheckUserS3WritePermission(c echo.Context, bucket, key string) (int, error) { +func (bh *BlobHandler) CheckUserS3Permission(c echo.Context, bucket, prefix string, permissions []string) (int, error) { if bh.Config.AuthLevel > 0 { claims, ok := c.Get("claims").(*auth.Claims) if !ok { @@ -91,13 +92,49 @@ func (bh *BlobHandler) CheckUserS3WritePermission(c echo.Context, bucket, key st // Check for required roles isLimitedWriter := utils.StringInSlice(bh.Config.LimitedWriterRoleName, roles) + // Ensure the prefix ends with a slash + if !strings.HasSuffix(prefix, "/") { + prefix += "/" + } // We assume if someone is limited_writer, they should never be admin or super_writer if isLimitedWriter { - if !bh.DB.CheckUserPermission(ue, "write", fmt.Sprintf("/%s/%s", bucket, key)) { + if !bh.DB.CheckUserPermission(ue, bucket, prefix, permissions) { return http.StatusForbidden, fmt.Errorf("forbidden") } } } return 0, nil } + +func (bh *BlobHandler) GetUserS3ReadListPermission(c echo.Context, bucket string) ([]string, bool, error) { + permissions := make([]string, 0) + + if bh.Config.AuthLevel > 0 { + fullAccess := false + claims, ok := c.Get("claims").(*auth.Claims) + if !ok { + return permissions, fullAccess, fmt.Errorf("could not get claims from request context") + } + roles := claims.RealmAccess["roles"] + + // Check if user has the limited reader role + isLimitedReader := utils.StringInSlice(bh.Config.LimitedReaderRoleName, roles) + + // If user is not a limited reader, assume they have full read access + if !isLimitedReader { + fullAccess = true // Indicating full access + return permissions, fullAccess, nil + } + + // If user is a limited reader, fetch specific permissions + ue := claims.Email + permissions, err := bh.DB.GetUserAccessiblePrefixes(ue, bucket, []string{"read", "write"}) + if err != nil { + return permissions, fullAccess, err + } + return permissions, fullAccess, nil + } + + return permissions, true, nil +} diff --git a/blobstore/upload.go b/blobstore/upload.go index 60091b5..b0af696 100644 --- a/blobstore/upload.go +++ b/blobstore/upload.go @@ -130,7 +130,7 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3WritePermission(c, bucket, key) + httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err) log.Error(errMsg.Error()) @@ -271,7 +271,7 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3WritePermission(c, bucket, key) + httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err) log.Error(errMsg.Error()) @@ -341,7 +341,7 @@ func (bh *BlobHandler) HandleGetMultipartUploadID(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3WritePermission(c, bucket, key) + httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err) log.Error(errMsg.Error()) @@ -390,7 +390,7 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3WritePermission(c, bucket, key) + httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err.Error()) log.Error(errMsg.Error()) @@ -458,7 +458,7 @@ func (bh *BlobHandler) HandleAbortMultipartUpload(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3WritePermission(c, bucket, key) + httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err) log.Error(errMsg.Error()) diff --git a/go.mod b/go.mod index 33e9d6b..736fa77 100644 --- a/go.mod +++ b/go.mod @@ -7,13 +7,13 @@ require ( github.com/golang-jwt/jwt/v5 v5.0.0 github.com/labstack/echo/v4 v4.11.1 github.com/labstack/gommon v0.4.0 + github.com/lib/pq v1.10.9 github.com/sirupsen/logrus v1.9.3 ) require ( github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/lib/pq v1.10.9 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect diff --git a/go.sum b/go.sum index 66160ec..4f2a961 100644 --- a/go.sum +++ b/go.sum @@ -40,10 +40,6 @@ github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -51,8 +47,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -69,10 +63,6 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -82,10 +72,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= From 28456a99fb6f6a48aa6b76fcc84c58712fbc1bef Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Wed, 15 May 2024 17:29:01 -0400 Subject: [PATCH 03/23] add restriction on read list --- blobstore/list.go | 88 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 68 insertions(+), 20 deletions(-) diff --git a/blobstore/list.go b/blobstore/list.go index 285178f..818e437 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -146,36 +146,50 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { var results []ListResult var count int - + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if !fullAccess && len(permissions) == 0 { + errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { // Handle directories (common prefixes) - dir := ListResult{ - ID: count, - Name: filepath.Base(*cp.Prefix), - Size: "", - Path: *cp.Prefix, - Type: "", - IsDir: true, - ModifiedBy: "", + if fullAccess || isPermittedPrefix(bucket, *cp.Prefix, permissions) { + dir := ListResult{ + ID: count, + Name: filepath.Base(*cp.Prefix), + Size: "", + Path: *cp.Prefix, + Type: "", + IsDir: true, + ModifiedBy: "", + } + results = append(results, dir) } - results = append(results, dir) count++ } for _, object := range page.Contents { // Handle files - file := ListResult{ - ID: count, - Name: filepath.Base(*object.Key), - Size: strconv.FormatInt(*object.Size, 10), - Path: filepath.Dir(*object.Key), - Type: filepath.Ext(*object.Key), - IsDir: false, - Modified: *object.LastModified, - ModifiedBy: "", + if fullAccess || isPermittedPrefix(bucket, *object.Key, permissions) { + file := ListResult{ + ID: count, + Name: filepath.Base(*object.Key), + Size: strconv.FormatInt(*object.Size, 10), + Path: filepath.Dir(*object.Key), + Type: filepath.Ext(*object.Key), + IsDir: false, + Modified: *object.LastModified, + ModifiedBy: "", + } + results = append(results, file) } - results = append(results, file) count++ } return nil @@ -257,3 +271,37 @@ func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter } return err // Return any errors encountered in the pagination process } + +func isPermittedPrefix(bucket, prefix string, permissions []string) bool { + prefixForChecking := fmt.Sprintf("/%s/%s", bucket, prefix) + + // Check if any of the permissions indicate the prefixForChecking is a parent directory + for _, perm := range permissions { + // Add a trailing slash to permission if it represents a directory + if !strings.HasSuffix(perm, "/") { + perm += "/" + } + // Split the paths into components + prefixComponents := strings.Split(prefixForChecking, "/") + permComponents := strings.Split(perm, "/") + + // Compare each component + match := true + for i := 1; i < len(prefixComponents) && i < len(permComponents); i++ { + if permComponents[i] == "" || prefixComponents[i] == "" { + break + } + if prefixComponents[i] != permComponents[i] { + match = false + break + } + } + + // If all components match up to the length of the permission path, + // and the permission path has no additional components, return true + if match { + return true + } + } + return false +} From 5b4598fbc84c17b1a8960fd185337521da39ebad Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Thu, 16 May 2024 09:15:37 -0400 Subject: [PATCH 04/23] added bucket limitation --- blobstore/buckets.go | 54 ++++++++++++++++++++++++++++++++------------ blobstore/config.go | 1 + 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/blobstore/buckets.go b/blobstore/buckets.go index c4df444..66b8f4c 100644 --- a/blobstore/buckets.go +++ b/blobstore/buckets.go @@ -5,6 +5,7 @@ package blobstore import ( "fmt" "net/http" + "sort" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" @@ -75,17 +76,22 @@ func (s3Ctrl *S3Controller) ListBuckets() (*s3.ListBucketsOutput, error) { // } type BucketInfo struct { - ID int `json:"id"` - Name string `json:"name"` + ID int `json:"id"` + Name string `json:"name"` + CanRead bool `json:"can_read"` } func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { var allBuckets []BucketInfo - currentID := 1 // Initialize ID counter + bh.Mu.Lock() - for i := 0; i < len(bh.S3Controllers); i++ { + defer bh.Mu.Unlock() + + fullAccess := false + + for _, controller := range bh.S3Controllers { if bh.AllowAllBuckets { - result, err := bh.S3Controllers[i].ListBuckets() + result, err := controller.ListBuckets() if err != nil { errMsg := fmt.Errorf("error returning list of buckets, error: %s", err) log.Error(errMsg) @@ -95,24 +101,44 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { for _, b := range result.Buckets { mostRecentBucketList = append(mostRecentBucketList, *b.Name) } - if !isIdenticalArray(bh.S3Controllers[i].Buckets, mostRecentBucketList) { - - bh.S3Controllers[i].Buckets = mostRecentBucketList - + if !isIdenticalArray(controller.Buckets, mostRecentBucketList) { + controller.Buckets = mostRecentBucketList } } + // Extract the bucket names from the response and append to allBuckets - for _, bucket := range bh.S3Controllers[i].Buckets { + for i, bucket := range controller.Buckets { + permissions, fullAccessTmp, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + return c.JSON(http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error())) + } + fullAccess = fullAccess || fullAccessTmp // Update full access based on any bucket returning full access + + canRead := len(permissions) > 0 || fullAccessTmp // Set canRead based on permissions or full access allBuckets = append(allBuckets, BucketInfo{ - ID: currentID, - Name: bucket, + ID: i, + Name: bucket, + CanRead: canRead, }) - currentID++ // Increment the ID for the next bucket + } + } + if fullAccess { // If full access is true, set CanRead to true for all buckets + for i := range allBuckets { + allBuckets[i].CanRead = true } } - bh.Mu.Unlock() + + // Sorting allBuckets slice by CanRead true first and then by Name field alphabetically + sort.Slice(allBuckets, func(i, j int) bool { + if allBuckets[i].CanRead == allBuckets[j].CanRead { + return allBuckets[i].Name < allBuckets[j].Name + } + return allBuckets[i].CanRead && !allBuckets[j].CanRead + }) + log.Info("Successfully retrieved list of buckets") + return c.JSON(http.StatusOK, allBuckets) } diff --git a/blobstore/config.go b/blobstore/config.go index 8a79bc6..a97cb51 100644 --- a/blobstore/config.go +++ b/blobstore/config.go @@ -20,6 +20,7 @@ func newConfig(authLvl int) *Config { c := &Config{ AuthLevel: authLvl, LimitedWriterRoleName: os.Getenv("AUTH_LIMITED_WRITER_ROLE"), + LimitedReaderRoleName: os.Getenv("AUTH_LIMITED_READER_ROLE"), DefaultTempPrefix: getEnvOrDefault("TEMP_PREFIX", defaultTempPrefix), DefaultDownloadPresignedUrlExpiration: getIntEnvOrDefault("DOWNLOAD_URL_EXP_DAYS", defaultDownloadPresignedUrlExpiration), DefaultUploadPresignedUrlExpiration: getIntEnvOrDefault("UPLOAD_URL_EXP_MIN", defaultUploadPresignedUrlExpiration), From 42a5e14e78fe7cce71a30d4b6428ab5bc8354b9c Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Thu, 16 May 2024 10:13:34 -0400 Subject: [PATCH 05/23] add limitation for list with no detail --- blobstore/list.go | 68 ++++++++++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 25 deletions(-) diff --git a/blobstore/list.go b/blobstore/list.go index 818e437..324f23c 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -30,11 +30,6 @@ type ListResult struct { // HandleListByPrefix handles the API endpoint for listing objects by prefix in S3 bucket. func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { prefix := c.QueryParam("prefix") - if prefix == "" { - errMsg := fmt.Errorf("request must include a `prefix` parameter") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) @@ -67,44 +62,67 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { } } - isObject, err := s3Ctrl.KeyExists(bucket, prefix) - if err != nil { - errMsg := fmt.Errorf("can't find bucket or object %s" + err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - if isObject { - objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) + if prefix != "" && prefix != "./" && prefix != "/" { + isObject, err := s3Ctrl.KeyExists(bucket, prefix) if err != nil { - errMsg := fmt.Errorf("error getting metadata: %s" + err.Error()) + errMsg := fmt.Errorf("error checking if key exists: %s", err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - if *objMeta.ContentLength == 0 { - log.Infof("Detected a zero byte directory marker within prefix: %s", prefix) - } else { - errMsg := fmt.Errorf("`%s` is an object, not a prefix. please see options for keys or pass a prefix", prefix) - log.Error(errMsg.Error()) - return c.JSON(http.StatusTeapot, errMsg.Error()) + if isObject { + objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) + if err != nil { + errMsg := fmt.Errorf("error checking for object's metadata: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if *objMeta.ContentLength == 0 { + log.Infof("detected a zero byte directory marker within prefix: %s", prefix) + } else { + errMsg := fmt.Errorf("`%s` is an object, not a prefix. please see options for keys or pass a prefix", prefix) + log.Error(errMsg.Error()) + return c.JSON(http.StatusTeapot, errMsg.Error()) + } } + prefix = strings.Trim(prefix, "/") + "/" } - var objectKeys []string + var result []string + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if !fullAccess && len(permissions) == 0 { + errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } processPage := func(page *s3.ListObjectsV2Output) error { + for _, cp := range page.CommonPrefixes { + // Handle directories (common prefixes) + if fullAccess || isPermittedPrefix(bucket, *cp.Prefix, permissions) { + result = append(result, aws.StringValue(cp.Prefix)) + + } + } for _, object := range page.Contents { - objectKeys = append(objectKeys, aws.StringValue(object.Key)) + // Handle files + if fullAccess || isPermittedPrefix(bucket, *object.Key, permissions) { + result = append(result, aws.StringValue(object.Key)) + } + } return nil } - err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { return c.JSON(http.StatusInternalServerError, fmt.Sprintf("Error processing objects: %v", err)) } log.Info("Successfully retrieved list by prefix:", prefix) - return c.JSON(http.StatusOK, objectKeys) + return c.JSON(http.StatusOK, result) } // HandleListByPrefixWithDetail retrieves a detailed list of objects in the specified S3 bucket with the given prefix. From 392c55c36fa8dd95daa5527811c3848f7bb35ba5 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Thu, 16 May 2024 10:37:12 -0400 Subject: [PATCH 06/23] limit read access to endpoints with allUsers --- blobstore/metadata.go | 51 +++++++++++++++++++++++- blobstore/object_content.go | 16 ++++++++ blobstore/presigned_url.go | 79 +++++++++++++++++++++++++------------ 3 files changed, 120 insertions(+), 26 deletions(-) diff --git a/blobstore/metadata.go b/blobstore/metadata.go index 9ecb2ad..cb7ca9a 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -44,7 +44,22 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if !fullAccess && len(permissions) == 0 { + errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } + if !fullAccess && !isPermittedPrefix(bucket, prefix, permissions) { + errMsg := fmt.Errorf("user does not have read permission to read this prefix %s", prefix) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } // Check if the prefix points directly to an object isObject, err := s3Ctrl.KeyExists(bucket, prefix) if err != nil { @@ -104,7 +119,23 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if !fullAccess && len(permissions) == 0 { + errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } + if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { + errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } result, err := s3Ctrl.GetMetaData(bucket, key) if err != nil { if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { @@ -137,6 +168,24 @@ func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if !fullAccess && len(permissions) == 0 { + errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } + + if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { + errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } + result, err := s3Ctrl.KeyExists(bucket, key) if err != nil { errMsg := fmt.Errorf("error checking if key exists: %s", err.Error()) diff --git a/blobstore/object_content.go b/blobstore/object_content.go index 401e001..dc9cf73 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -52,7 +52,23 @@ func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if !fullAccess && len(permissions) == 0 { + errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } + if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { + errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } body, err := s3Ctrl.FetchObjectContent(bucket, key) if err != nil { errMsg := fmt.Errorf("error fetching object's content: %s", err.Error()) diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index abc7dba..8f5404d 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -124,7 +124,23 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if !fullAccess && len(permissions) == 0 { + errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } + if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { + errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } keyExist, err := s3Ctrl.KeyExists(bucket, key) if err != nil { errMsg := fmt.Errorf("checking if key exists: %s", err.Error()) @@ -278,38 +294,51 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { scriptBuilder.WriteString("REM 4. Initiate the Download: Double-click the renamed \".bat\" file to initiate the download process. Windows might display a warning message to protect your PC.\n") scriptBuilder.WriteString("REM 5. Windows Defender SmartScreen (Optional): If you see a message like \"Windows Defender SmartScreen prevented an unrecognized app from starting,\" click \"More info\" and then click \"Run anyway\" to proceed with the download.\n\n") scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", basePrefix)) - + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if !fullAccess && len(permissions) == 0 { + errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } // Define the processPage function processPage := func(page *s3.ListObjectsV2Output) error { for _, item := range page.Contents { - // Size checking - if item.Size != nil { - totalSize += uint64(*item.Size) - if totalSize > uint64(bh.Config.DefaultScriptDownloadSizeLimit*1024*1024*1024) { - return fmt.Errorf("size limit of %d GB exceeded", bh.Config.DefaultScriptDownloadSizeLimit) - } + if fullAccess || isPermittedPrefix(bucket, *item.Key, permissions) { - } + // Size checking + if item.Size != nil { + totalSize += uint64(*item.Size) + if totalSize > uint64(bh.Config.DefaultScriptDownloadSizeLimit*1024*1024*1024) { + return fmt.Errorf("size limit of %d GB exceeded", bh.Config.DefaultScriptDownloadSizeLimit) + } - // Script generation logic (replicating your directory creation and URL logic) - relativePath := strings.TrimPrefix(*item.Key, filepath.Dir(prefix)+"/") - dirPath := filepath.Join(basePrefix, filepath.Dir(relativePath)) - if _, exists := createdDirs[dirPath]; !exists && dirPath != basePrefix { - scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", dirPath)) - createdDirs[dirPath] = true - } + } - fullPath := filepath.Join(basePrefix, relativePath) - presignedURL, err := s3Ctrl.GetDownloadPresignedURL(bucket, *item.Key, bh.Config.DefaultDownloadPresignedUrlExpiration) - if err != nil { - return fmt.Errorf("error generating presigned URL for object %s: %v", *item.Key, err) - } - url, err := url.QueryUnescape(presignedURL) - if err != nil { - return fmt.Errorf("error unescaping URL encoding: %v", err) + // Script generation logic (replicating your directory creation and URL logic) + relativePath := strings.TrimPrefix(*item.Key, filepath.Dir(prefix)+"/") + dirPath := filepath.Join(basePrefix, filepath.Dir(relativePath)) + if _, exists := createdDirs[dirPath]; !exists && dirPath != basePrefix { + scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", dirPath)) + createdDirs[dirPath] = true + } + + fullPath := filepath.Join(basePrefix, relativePath) + presignedURL, err := s3Ctrl.GetDownloadPresignedURL(bucket, *item.Key, bh.Config.DefaultDownloadPresignedUrlExpiration) + if err != nil { + return fmt.Errorf("error generating presigned URL for object %s: %v", *item.Key, err) + } + url, err := url.QueryUnescape(presignedURL) + if err != nil { + return fmt.Errorf("error unescaping URL encoding: %v", err) + } + encodedURL := strings.ReplaceAll(url, " ", "%20") + scriptBuilder.WriteString(fmt.Sprintf("if exist \"%s\" (echo skipping existing file) else (curl -v -o \"%s\" \"%s\")\n", fullPath, fullPath, encodedURL)) } - encodedURL := strings.ReplaceAll(url, " ", "%20") - scriptBuilder.WriteString(fmt.Sprintf("if exist \"%s\" (echo skipping existing file) else (curl -v -o \"%s\" \"%s\")\n", fullPath, fullPath, encodedURL)) } return nil } From 3626fbfad146575b0b8f08be6e060e8275ca63e0 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Thu, 16 May 2024 10:44:33 -0400 Subject: [PATCH 07/23] remove deprecated prefix test --- e2e-test/e2eCollection.json | 148 ------------------------------------ 1 file changed, 148 deletions(-) diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index 6291595..dbf9f20 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -727,55 +727,6 @@ }, "response": [] }, - { - "name": "10/prefix/download", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/download?prefix={{e2ePathToObj}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "download" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, { "name": "10.2/prefix/download_script", "request": { @@ -2528,56 +2479,6 @@ }, "response": [] }, - { - "name": "10/prefix/download", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/download?bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "download" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}", - "disabled": true - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, { "name": "10.2/prefix/download/script", "event": [ @@ -2911,55 +2812,6 @@ }, "response": [] }, - { - "name": "10/prefix/download", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/download?prefix=invalid&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "download" - ], - "query": [ - { - "key": "prefix", - "value": "invalid" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, { "name": "5/object/download", "event": [ From 9b21d30e3ea95e3198675e43b200103a1c1e458c Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Thu, 13 Jun 2024 10:10:51 -0400 Subject: [PATCH 08/23] clean up and refactor permissions logic --- auth/database.go | 5 +++-- blobstore/blobhandler.go | 13 ++++++++++- blobstore/list.go | 44 ++++++++++++++----------------------- blobstore/metadata.go | 36 ++++++++---------------------- blobstore/object_content.go | 12 +++------- blobstore/presigned_url.go | 25 ++++++--------------- 6 files changed, 50 insertions(+), 85 deletions(-) diff --git a/auth/database.go b/auth/database.go index 7f45ea1..bd1d929 100644 --- a/auth/database.go +++ b/auth/database.go @@ -21,7 +21,7 @@ type PostgresDB struct { Handle *sql.DB } -// Initialize the database and create tables if they do not exist. +// NewPostgresDB initializes the database and creates tables if they do not exist. func NewPostgresDB() (*PostgresDB, error) { connString, exist := os.LookupEnv("POSTGRES_CONN_STRING") if !exist { @@ -43,7 +43,7 @@ func NewPostgresDB() (*PostgresDB, error) { return pgDB, nil } -// Creates the necessary tables in the database. +// createTables creates the necessary tables in the database. func (db *PostgresDB) createTables() error { createPermissionsTable := ` CREATE TABLE IF NOT EXISTS permissions ( @@ -65,6 +65,7 @@ func (db *PostgresDB) createTables() error { return nil } +// GetUserAccessiblePrefixes retrieves the accessible prefixes for a user. func (db *PostgresDB) GetUserAccessiblePrefixes(userEmail, bucket string, operations []string) ([]string, error) { query := ` WITH unnested_permissions AS ( diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index 5e519a4..c7db7b6 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -154,7 +154,7 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { } if len(bucketNames) > 0 { - config.S3Controllers = append(config.S3Controllers, S3Controller{Sess: sess, S3Svc: s3SVC, Buckets: bucketNames}) + config.S3Controllers = append(config.S3Controllers, S3Controller{Sess: sess, S3Svc: s3SVC, Buckets: bucketNames, S3Mock: false}) } } @@ -307,6 +307,17 @@ func (bh *BlobHandler) PingWithAuth(c echo.Context) error { return c.JSON(http.StatusOK, bucketHealth) } +func (bh *BlobHandler) GetS3ReadPermissions(c echo.Context, bucket string) ([]string, bool, int, error) { + permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + return nil, false, http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error()) + } + if !fullAccess && len(permissions) == 0 { + return nil, false, http.StatusForbidden, fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + } + return permissions, fullAccess, http.StatusOK, nil +} + func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { if bh.Config.AuthLevel == 0 { log.Info("Checked user permissions successfully") diff --git a/blobstore/list.go b/blobstore/list.go index cf7cff5..11b1fe6 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -30,7 +30,7 @@ type ListResult struct { // CheckAndAdjustPrefix checks if the prefix is an object and adjusts the prefix accordingly. // Returns the adjusted prefix, an error message (if any), and the HTTP status code. func CheckAndAdjustPrefix(s3Ctrl *S3Controller, bucket, prefix string) (string, string, int) { - //As of 6/12/24, unsure why ./ is included here, may be needed for an edge case, but could also cause problems + // As of 6/12/24, unsure why ./ is included here, may be needed for an edge case, but could also cause problems if prefix != "" && prefix != "./" && prefix != "/" { isObject, err := s3Ctrl.KeyExists(bucket, prefix) if err != nil { @@ -41,11 +41,11 @@ func CheckAndAdjustPrefix(s3Ctrl *S3Controller, bucket, prefix string) (string, if err != nil { return "", fmt.Sprintf("error checking for object's metadata: %s", err.Error()), http.StatusInternalServerError } - //this is because AWS considers empty prefixes with a .keep as an object, so we ignore and log + // This is because AWS considers empty prefixes with a .keep as an object, so we ignore and log if *objMeta.ContentLength == 0 { log.Infof("detected a zero byte directory marker within prefix: %s", prefix) } else { - return "", fmt.Sprintf("`%s` is an object, not a prefix. please see options for keys or pass a prefix", prefix), http.StatusTeapot + return "", fmt.Sprintf("`%s` is an object, not a prefix. Please see options for keys or pass a prefix", prefix), http.StatusTeapot } } prefix = strings.Trim(prefix, "/") + "/" @@ -53,7 +53,7 @@ func CheckAndAdjustPrefix(s3Ctrl *S3Controller, bucket, prefix string) (string, return prefix, "", http.StatusOK } -// HandleListByPrefix handles the API endpoint for listing objects by prefix in S3 bucket. +// HandleListByPrefix handles the API endpoint for listing objects by prefix in an S3 bucket. func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { prefix := c.QueryParam("prefix") @@ -93,16 +93,10 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { prefix = adjustedPrefix var result []string - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { - errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if !fullAccess && len(permissions) == 0 { - errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + log.Error(err.Error()) + return c.JSON(statusCode, err.Error()) } processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { @@ -153,16 +147,10 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { var results []ListResult var count int - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { - errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if !fullAccess && len(permissions) == 0 { - errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + log.Error(err.Error()) + return c.JSON(statusCode, err.Error()) } processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { @@ -197,7 +185,6 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { } results = append(results, file) } - count++ } return nil } @@ -214,9 +201,9 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { } // GetList retrieves a list of objects in the specified S3 bucket with the given prefix. -// if delimiter is set to true then it is going to search for any objects within the prefix provided, if no object sare found it will -// return null even if there was prefixes within the user provided prefix. If delimiter is set to false then it will look for all prefixes -// that start with the user provided prefix. +// If delimiter is set to true, it will search for any objects within the prefix provided. +// If no objects are found, it will return null even if there were prefixes within the user-provided prefix. +// If delimiter is set to false, it will look for all prefixes that start with the user-provided prefix. func (s3Ctrl *S3Controller) GetList(bucket, prefix string, delimiter bool) (*s3.ListObjectsV2Output, error) { // Set up input parameters for the ListObjectsV2 API input := &s3.ListObjectsV2Input{ @@ -252,8 +239,8 @@ func (s3Ctrl *S3Controller) GetList(bucket, prefix string, delimiter bool) (*s3. return response, nil } -// GetListWithCallBack is the same as GetList, except instead of returning the entire list at once, it gives you the option of processing page by page -// this method is safer than GetList as it avoid memory overload for large datasets since it does not store the entire list in memory but rather processes it on the go. +// GetListWithCallBack is the same as GetList, except instead of returning the entire list at once, it allows processing page by page. +// This method is safer than GetList as it avoids memory overload for large datasets by processing data on the go. func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter bool, processPage func(*s3.ListObjectsV2Output) error) error { input := &s3.ListObjectsV2Input{ Bucket: aws.String(bucket), @@ -279,6 +266,7 @@ func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter return err // Return any errors encountered in the pagination process } +// isPermittedPrefix checks if the prefix is within the user's permissions. func isPermittedPrefix(bucket, prefix string, permissions []string) bool { prefixForChecking := fmt.Sprintf("/%s/%s", bucket, prefix) diff --git a/blobstore/metadata.go b/blobstore/metadata.go index 96d805a..88c9895 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -43,16 +43,10 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { - errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if !fullAccess && len(permissions) == 0 { - errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + log.Error(err.Error()) + return c.JSON(statusCode, err.Error()) } if !fullAccess && !isPermittedPrefix(bucket, prefix, permissions) { errMsg := fmt.Errorf("user does not have read permission to read this prefix %s", prefix) @@ -118,16 +112,10 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { - errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if !fullAccess && len(permissions) == 0 { - errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + log.Error(err.Error()) + return c.JSON(statusCode, err.Error()) } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { @@ -167,16 +155,10 @@ func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { - errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if !fullAccess && len(permissions) == 0 { - errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + log.Error(err.Error()) + return c.JSON(statusCode, err.Error()) } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { diff --git a/blobstore/object_content.go b/blobstore/object_content.go index c174ac8..1a1be68 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -47,16 +47,10 @@ func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { - errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if !fullAccess && len(permissions) == 0 { - errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + log.Error(err.Error()) + return c.JSON(statusCode, err.Error()) } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 4b26a68..4e311e6 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -124,16 +124,10 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { - errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if !fullAccess && len(permissions) == 0 { - errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + log.Error(err.Error()) + return c.JSON(statusCode, err.Error()) } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { @@ -293,16 +287,11 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { scriptBuilder.WriteString("REM 4. Initiate the Download: Double-click the renamed \".bat\" file to initiate the download process. Windows might display a warning message to protect your PC.\n") scriptBuilder.WriteString("REM 5. Windows Defender SmartScreen (Optional): If you see a message like \"Windows Defender SmartScreen prevented an unrecognized app from starting,\" click \"More info\" and then click \"Run anyway\" to proceed with the download.\n\n") scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", basePrefix)) - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + + permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { - errMsg := fmt.Errorf("error fetching user permissions: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if !fullAccess && len(permissions) == 0 { - errMsg := fmt.Errorf("user does not have read permission to read the %s bucket", bucket) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + log.Error(err.Error()) + return c.JSON(statusCode, err.Error()) } // Define the processPage function processPage := func(page *s3.ListObjectsV2Output) error { From 07156c2f8b5460884e605ae9ff0fe9b067956976 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Thu, 13 Jun 2024 10:25:36 -0400 Subject: [PATCH 09/23] change isPermittedPrefix to a public function --- blobstore/list.go | 12 ++++++------ blobstore/metadata.go | 6 +++--- blobstore/object_content.go | 2 +- blobstore/presigned_url.go | 4 ++-- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/blobstore/list.go b/blobstore/list.go index 11b1fe6..311b477 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -101,14 +101,14 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { // Handle directories (common prefixes) - if fullAccess || isPermittedPrefix(bucket, *cp.Prefix, permissions) { + if fullAccess || IsPermittedPrefix(bucket, *cp.Prefix, permissions) { result = append(result, aws.StringValue(cp.Prefix)) } } for _, object := range page.Contents { // Handle files - if fullAccess || isPermittedPrefix(bucket, *object.Key, permissions) { + if fullAccess || IsPermittedPrefix(bucket, *object.Key, permissions) { result = append(result, aws.StringValue(object.Key)) } @@ -155,7 +155,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { // Handle directories (common prefixes) - if fullAccess || isPermittedPrefix(bucket, *cp.Prefix, permissions) { + if fullAccess || IsPermittedPrefix(bucket, *cp.Prefix, permissions) { dir := ListResult{ ID: count, Name: filepath.Base(*cp.Prefix), @@ -172,7 +172,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { for _, object := range page.Contents { // Handle files - if fullAccess || isPermittedPrefix(bucket, *object.Key, permissions) { + if fullAccess || IsPermittedPrefix(bucket, *object.Key, permissions) { file := ListResult{ ID: count, Name: filepath.Base(*object.Key), @@ -266,8 +266,8 @@ func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter return err // Return any errors encountered in the pagination process } -// isPermittedPrefix checks if the prefix is within the user's permissions. -func isPermittedPrefix(bucket, prefix string, permissions []string) bool { +// IsPermittedPrefix checks if the prefix is within the user's permissions. +func IsPermittedPrefix(bucket, prefix string, permissions []string) bool { prefixForChecking := fmt.Sprintf("/%s/%s", bucket, prefix) // Check if any of the permissions indicate the prefixForChecking is a parent directory diff --git a/blobstore/metadata.go b/blobstore/metadata.go index 88c9895..f3ad7b3 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -48,7 +48,7 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) } - if !fullAccess && !isPermittedPrefix(bucket, prefix, permissions) { + if !fullAccess && !IsPermittedPrefix(bucket, prefix, permissions) { errMsg := fmt.Errorf("user does not have read permission to read this prefix %s", prefix) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) @@ -118,7 +118,7 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { return c.JSON(statusCode, err.Error()) } - if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { + if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) @@ -161,7 +161,7 @@ func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { return c.JSON(statusCode, err.Error()) } - if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { + if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) diff --git a/blobstore/object_content.go b/blobstore/object_content.go index 1a1be68..a43b04e 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -53,7 +53,7 @@ func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { return c.JSON(statusCode, err.Error()) } - if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { + if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 4e311e6..56c7096 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -130,7 +130,7 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { return c.JSON(statusCode, err.Error()) } - if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { + if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) @@ -296,7 +296,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { // Define the processPage function processPage := func(page *s3.ListObjectsV2Output) error { for _, item := range page.Contents { - if fullAccess || isPermittedPrefix(bucket, *item.Key, permissions) { + if fullAccess || IsPermittedPrefix(bucket, *item.Key, permissions) { // Size checking if item.Size != nil { From 9ef1910a14d84df8af3678c78d1fa7d5166dd82a Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Thu, 13 Jun 2024 12:27:26 -0400 Subject: [PATCH 10/23] readd counter --- blobstore/list.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/blobstore/list.go b/blobstore/list.go index 311b477..8035a8a 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -166,8 +166,9 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { ModifiedBy: "", } results = append(results, dir) + count++ } - count++ + } for _, object := range page.Contents { @@ -184,7 +185,9 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { ModifiedBy: "", } results = append(results, file) + count++ } + } return nil } From e93254e7e8e83e6ebc47bfd969d8c2dea45ca5b6 Mon Sep 17 00:00:00 2001 From: ShaneMPutnam Date: Fri, 14 Jun 2024 14:40:05 +0000 Subject: [PATCH 11/23] Improve wording of error message --- blobstore/blobhandler.go | 2 +- blobstore/metadata.go | 6 +++--- blobstore/object_content.go | 2 +- blobstore/presigned_url.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index c7db7b6..7459274 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -313,7 +313,7 @@ func (bh *BlobHandler) GetS3ReadPermissions(c echo.Context, bucket string) ([]st return nil, false, http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error()) } if !fullAccess && len(permissions) == 0 { - return nil, false, http.StatusForbidden, fmt.Errorf("user does not have read permission to read the %s bucket", bucket) + return nil, false, http.StatusForbidden, fmt.Errorf("user does not have permission to read the %s bucket", bucket) } return permissions, fullAccess, http.StatusOK, nil } diff --git a/blobstore/metadata.go b/blobstore/metadata.go index f3ad7b3..ef58a4e 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -49,7 +49,7 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { return c.JSON(statusCode, err.Error()) } if !fullAccess && !IsPermittedPrefix(bucket, prefix, permissions) { - errMsg := fmt.Errorf("user does not have read permission to read this prefix %s", prefix) + errMsg := fmt.Errorf("user does not have permission to read the %s prefix", prefix) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) } @@ -119,7 +119,7 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { } if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { - errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) + errMsg := fmt.Errorf("user does not have permission to read the %s key", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) } @@ -162,7 +162,7 @@ func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { } if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { - errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) + errMsg := fmt.Errorf("user does not have permission to read the %s key", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) } diff --git a/blobstore/object_content.go b/blobstore/object_content.go index a43b04e..f74e4ae 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -54,7 +54,7 @@ func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { } if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { - errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) + errMsg := fmt.Errorf("user does not have permission to read the %s key", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) } diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 56c7096..74a4305 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -131,7 +131,7 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { } if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { - errMsg := fmt.Errorf("user does not have read permission to read this key %s", key) + errMsg := fmt.Errorf("user does not have permission to read the %s key", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) } From 4d29a62fe25ac00a0ed04e02b73904bf8b10c2f1 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Fri, 14 Jun 2024 10:58:12 -0400 Subject: [PATCH 12/23] add AUTH_LIMITED_READER_ROLE to .example.env --- .example.env | 1 + 1 file changed, 1 insertion(+) diff --git a/.example.env b/.example.env index d8965c7..f4db772 100644 --- a/.example.env +++ b/.example.env @@ -6,6 +6,7 @@ S3API_SERVICE_PORT='5005' KEYCLOAK_PUBLIC_KEYS_URL='public-keys-url-string' AUTH_LEVEL=1 # Options: [0, 1] corresponds to [no FGAC, FGAC]. This integer value configures the initialization mode in docker-compose. AUTH_LIMITED_WRITER_ROLE='s3_limited_writer' +AUTH_LIMITED_READER_ROLE='s3_limited_reader' ## DB for Auth: POSTGRES_CONN_STRING='postgres://user:password@postgres:5432/db?sslmode=disable' From eb7da81fd228c121f11a4993a9895422adb88188 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Fri, 14 Jun 2024 12:24:13 -0400 Subject: [PATCH 13/23] disable check permisisons endpoint when auth is off --- blobstore/blobhandler.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index 7459274..d071546 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -323,6 +323,14 @@ func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { log.Info("Checked user permissions successfully") return c.JSON(http.StatusOK, true) } + initAuth := os.Getenv("INIT_AUTH") + + if initAuth == "0" { + errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + + } prefix := c.QueryParam("prefix") bucket := c.QueryParam("bucket") operation := c.QueryParam("operation") From 43031969662772c9d507826caea15506cb6f55c4 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Fri, 14 Jun 2024 12:34:02 -0400 Subject: [PATCH 14/23] update postman --- postman/S3api.postman_collection.json | 48 +++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/postman/S3api.postman_collection.json b/postman/S3api.postman_collection.json index ef1170b..e918b0d 100644 --- a/postman/S3api.postman_collection.json +++ b/postman/S3api.postman_collection.json @@ -1,11 +1,11 @@ { "info": { - "_postman_id": "89da2e0a-96d6-418f-964d-c00912cabda2", + "_postman_id": "e8b7d353-22af-417c-bdb6-b44d4be5288e", "name": "S3api", "description": "# 🚀 Get started here\n\nThis collection guides you through CRUD operations (GET, POST, PUT, DELETE), variables, and tests.\n\n## 🔖 **How to use this collection**\n\n#### **Step 1: Send requests**\n\nRESTful APIs allow you to perform CRUD operations using the POST, GET, PUT, and DELETE HTTP methods.\n\nThis collection contains each of these request types. Open each request and click \"Send\" to see what happens.\n\n#### **Step 2: View responses**\n\nObserve the response tab for status code (200 OK), response time, and size.\n\n#### **Step 3: Send new Body data**\n\nUpdate or add new data in \"Body\" in the POST request. Typically, Body data is also used in PUT and PATCH requests.\n\n```\n{\n \"name\": \"Add your name in the body\"\n}\n\n```\n\n#### **Step 4: Update the variable**\n\nVariables enable you to store and reuse values in Postman. We have created a variable called `base_url` with the sample request [https://postman-api-learner.glitch.me](https://postman-api-learner.glitch.me). Replace it with your API endpoint to customize this collection.\n\n#### **Step 5: Add tests in the \"Tests\" tab**\n\nTests help you confirm that your API is working as expected. You can write test scripts in JavaScript and view the output in the \"Test Results\" tab.\n\n\n\n## đŸ’Ē Pro tips\n\n- Use folders to group related requests and organize the collection.\n- Add more scripts in \"Tests\" to verify if the API works as expected and execute flows.\n \n\n## ℹī¸ Resources\n\n[Building requests](https://learning.postman.com/docs/sending-requests/requests/) \n[Authorizing requests](https://learning.postman.com/docs/sending-requests/authorization/) \n[Using variables](https://learning.postman.com/docs/sending-requests/variables/) \n[Managing environments](https://learning.postman.com/docs/sending-requests/managing-environments/) \n[Writing scripts](https://learning.postman.com/docs/writing-scripts/intro-to-scripts/)", "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", "_exporter_id": "18773467", - "_collection_link": "https://universal-comet-524706.postman.co/workspace/Dewberry~fe115dcb-2f48-4ca3-a618-e462c6ac4255/collection/18773467-89da2e0a-96d6-418f-964d-c00912cabda2?action=share&source=collection_link&creator=18773467" + "_collection_link": "https://universal-comet-524706.postman.co/workspace/Dewberry~fe115dcb-2f48-4ca3-a618-e462c6ac4255/collection/18773467-e8b7d353-22af-417c-bdb6-b44d4be5288e?action=share&source=collection_link&creator=18773467" }, "item": [ { @@ -630,7 +630,8 @@ "var jsonData = JSON.parse(responseBody);\r", "postman.setEnvironmentVariable(\"bearer_token\", jsonData.access_token);" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], @@ -719,6 +720,47 @@ }, "response": [] }, + { + "name": "check_user_permission", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "url": { + "raw": "{{url}}/check_user_permission?prefix=antonTestingFolder/anton_testing/10Files%20-%20Copy%20(3)/&operation=write&bucket=ffrd-trinity", + "host": [ + "{{url}}" + ], + "path": [ + "check_user_permission" + ], + "query": [ + { + "key": "prefix", + "value": "antonTestingFolder/anton_testing/10Files%20-%20Copy%20(3)/" + }, + { + "key": "operation", + "value": "write" + }, + { + "key": "bucket", + "value": "ffrd-trinity" + } + ] + } + }, + "response": [] + }, { "name": "DeleteListOfObjects", "request": { From 1de00ef7e3683ee3980aa43a826b49aea896f256 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Fri, 14 Jun 2024 12:55:33 -0400 Subject: [PATCH 15/23] add delimiter option for list with detail --- blobstore/list.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/blobstore/list.go b/blobstore/list.go index 8035a8a..cf521c0 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -143,6 +143,18 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { log.Error(errMsg) return c.JSON(statusCode, errMsg) } + delimiterParam := c.QueryParam("delimiter") + delimiter := true + if delimiterParam != "" { + delimiter, err = strconv.ParseBool(delimiterParam) + if err != nil { + errMsg := fmt.Errorf("error parsing `delimiter` param: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + } + + } + prefix = adjustedPrefix var results []ListResult @@ -191,8 +203,8 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { } return nil } - - err = s3Ctrl.GetListWithCallBack(bucket, prefix, true, processPage) + fmt.Println(delimiter) + err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { errMsg := fmt.Errorf("error processing objects: %s", err.Error()) log.Error(errMsg.Error()) From 5e628412d8ced7b10f20daa314ee03ef6bd9b269 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Fri, 14 Jun 2024 16:46:54 -0400 Subject: [PATCH 16/23] make list buckets more efficient with FGAC --- blobstore/buckets.go | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/blobstore/buckets.go b/blobstore/buckets.go index 4cf72a0..e813656 100644 --- a/blobstore/buckets.go +++ b/blobstore/buckets.go @@ -87,7 +87,11 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { bh.Mu.Lock() defer bh.Mu.Unlock() - fullAccess := false + // Check user's overall read access level + _, fullAccess, err := bh.GetUserS3ReadListPermission(c, "") + if err != nil { + return c.JSON(http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error())) + } for _, controller := range bh.S3Controllers { if bh.AllowAllBuckets { @@ -108,13 +112,14 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { // Extract the bucket names from the response and append to allBuckets for i, bucket := range controller.Buckets { - permissions, fullAccessTmp, err := bh.GetUserS3ReadListPermission(c, bucket) - if err != nil { - return c.JSON(http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error())) + canRead := fullAccess + if !fullAccess { + permissions, _, err := bh.GetUserS3ReadListPermission(c, bucket) + if err != nil { + return c.JSON(http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error())) + } + canRead = len(permissions) > 0 } - fullAccess = fullAccess || fullAccessTmp // Update full access based on any bucket returning full access - - canRead := len(permissions) > 0 || fullAccessTmp // Set canRead based on permissions or full access allBuckets = append(allBuckets, BucketInfo{ ID: i, Name: bucket, @@ -123,12 +128,6 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { } } - if fullAccess { // If full access is true, set CanRead to true for all buckets - for i := range allBuckets { - allBuckets[i].CanRead = true - } - } - // Sorting allBuckets slice by CanRead true first and then by Name field alphabetically sort.Slice(allBuckets, func(i, j int) bool { if allBuckets[i].CanRead == allBuckets[j].CanRead { From b83d4c4b97b80c95ca8338dc9b084b3e787823d2 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Fri, 14 Jun 2024 16:48:23 -0400 Subject: [PATCH 17/23] consistent delimiter retrieval --- blobstore/list.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/blobstore/list.go b/blobstore/list.go index cf521c0..8715c29 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -66,8 +66,8 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { } delimiterParam := c.QueryParam("delimiter") - var delimiter bool - if delimiterParam == "true" || delimiterParam == "false" { + delimiter := true + if delimiterParam != "" { delimiter, err = strconv.ParseBool(delimiterParam) if err != nil { errMsg := fmt.Errorf("error parsing `delimiter` param: %s", err.Error()) @@ -75,11 +75,6 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - } else { - errMsg := fmt.Errorf("request must include a `delimiter`, options are `true` or `false`") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } if delimiter && !strings.HasSuffix(prefix, "/") { prefix = prefix + "/" From 148624aac7f34ad2fd6ccbb6719e188a778627bc Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Fri, 14 Jun 2024 16:51:45 -0400 Subject: [PATCH 18/23] update e2e for 422 checks --- e2e-test/e2eCollection.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index dbf9f20..bb1bb1b 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -2043,7 +2043,7 @@ "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/list?bucket={{bucket}}", + "raw": "{{s3_api_root_url}}/prefix/list?bucket=", "host": [ "{{s3_api_root_url}}" ], From 64dfca8d196b6f271c6d9726acdaf04a0f6a790a Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sat, 15 Jun 2024 18:13:31 -0400 Subject: [PATCH 19/23] remove bucket from e2e for failure testing --- e2e-test/e2eCollection.json | 4 ---- 1 file changed, 4 deletions(-) diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index bb1bb1b..756799c 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -2061,10 +2061,6 @@ "key": "delimiter", "value": "{{e2eoverride}}", "disabled": true - }, - { - "key": "bucket", - "value": "{{bucket}}" } ] } From 2c6cda0c03ce441645094fde39a80cfa5393bd13 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 17 Jun 2024 12:40:21 -0400 Subject: [PATCH 20/23] add checks for auth level to ensure claims exist --- blobstore/blobhandler.go | 10 +++++++--- blobstore/blobstore.go | 11 +++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index d071546..ffdf153 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -5,6 +5,7 @@ import ( "net/http" "os" "strconv" + "strings" "sync" "github.com/Dewberry/s3api/auth" @@ -309,8 +310,13 @@ func (bh *BlobHandler) PingWithAuth(c echo.Context) error { func (bh *BlobHandler) GetS3ReadPermissions(c echo.Context, bucket string) ([]string, bool, int, error) { permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) + httpStatus := http.StatusInternalServerError if err != nil { - return nil, false, http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error()) + //TEMP solution before error library is implimented and string check ups become redundant + if strings.Contains(err.Error(), "this endpoint requires authentication information that is unavailable when authorization is disabled.") { + httpStatus = http.StatusForbidden + } + return nil, false, httpStatus, fmt.Errorf("error fetching user permissions: %s", err.Error()) } if !fullAccess && len(permissions) == 0 { return nil, false, http.StatusForbidden, fmt.Errorf("user does not have permission to read the %s bucket", bucket) @@ -324,12 +330,10 @@ func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { return c.JSON(http.StatusOK, true) } initAuth := os.Getenv("INIT_AUTH") - if initAuth == "0" { errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) - } prefix := c.QueryParam("prefix") bucket := c.QueryParam("bucket") diff --git a/blobstore/blobstore.go b/blobstore/blobstore.go index 11782c0..c9db565 100644 --- a/blobstore/blobstore.go +++ b/blobstore/blobstore.go @@ -3,6 +3,7 @@ package blobstore import ( "fmt" "net/http" + "os" "strings" "github.com/Dewberry/s3api/auth" @@ -83,6 +84,11 @@ func isIdenticalArray(array1, array2 []string) bool { func (bh *BlobHandler) CheckUserS3Permission(c echo.Context, bucket, prefix string, permissions []string) (int, error) { if bh.Config.AuthLevel > 0 { + initAuth := os.Getenv("INIT_AUTH") + if initAuth == "0" { + errMsg := fmt.Errorf("this requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") + return http.StatusForbidden, errMsg + } claims, ok := c.Get("claims").(*auth.Claims) if !ok { return http.StatusInternalServerError, fmt.Errorf("could not get claims from request context") @@ -111,6 +117,11 @@ func (bh *BlobHandler) GetUserS3ReadListPermission(c echo.Context, bucket string permissions := make([]string, 0) if bh.Config.AuthLevel > 0 { + initAuth := os.Getenv("INIT_AUTH") + if initAuth == "0" { + errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") + return permissions, false, errMsg + } fullAccess := false claims, ok := c.Get("claims").(*auth.Claims) if !ok { From 0dd9b826da761a8550b5e83cbbc4a9a0a66e8bca Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 17 Jun 2024 12:48:21 -0400 Subject: [PATCH 21/23] add trailing / check to the detailed list endpoint --- blobstore/list.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/blobstore/list.go b/blobstore/list.go index 8715c29..73fcbc7 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -76,7 +76,8 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { } } - if delimiter && !strings.HasSuffix(prefix, "/") { + + if delimiter && prefix != "" && !strings.HasSuffix(prefix, "/") { prefix = prefix + "/" } @@ -138,6 +139,8 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { log.Error(errMsg) return c.JSON(statusCode, errMsg) } + prefix = adjustedPrefix + delimiterParam := c.QueryParam("delimiter") delimiter := true if delimiterParam != "" { @@ -150,7 +153,9 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { } - prefix = adjustedPrefix + if delimiter && prefix != "" && !strings.HasSuffix(prefix, "/") { + prefix = prefix + "/" + } var results []ListResult var count int @@ -198,7 +203,6 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { } return nil } - fmt.Println(delimiter) err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { errMsg := fmt.Errorf("error processing objects: %s", err.Error()) From fc43cf027cf09dd0eae32be6523b08b56e8cdb06 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 17 Jun 2024 14:29:05 -0400 Subject: [PATCH 22/23] move adjusted prefix before the delimiter check --- blobstore/list.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/blobstore/list.go b/blobstore/list.go index 73fcbc7..5f6906b 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -65,6 +65,13 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } + adjustedPrefix, errMsg, statusCode := CheckAndAdjustPrefix(s3Ctrl, bucket, prefix) + if errMsg != "" { + log.Error(errMsg) + return c.JSON(statusCode, errMsg) + } + prefix = adjustedPrefix + delimiterParam := c.QueryParam("delimiter") delimiter := true if delimiterParam != "" { @@ -81,13 +88,6 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { prefix = prefix + "/" } - adjustedPrefix, errMsg, statusCode := CheckAndAdjustPrefix(s3Ctrl, bucket, prefix) - if errMsg != "" { - log.Error(errMsg) - return c.JSON(statusCode, errMsg) - } - prefix = adjustedPrefix - var result []string permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) if err != nil { From dc2feb97ade0ba4ce8b6e73c3d02e57b2359b267 Mon Sep 17 00:00:00 2001 From: ShaneMPutnam Date: Mon, 17 Jun 2024 20:43:50 +0000 Subject: [PATCH 23/23] Move variable inside if statement --- blobstore/blobhandler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index ffdf153..15b2c99 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -310,9 +310,9 @@ func (bh *BlobHandler) PingWithAuth(c echo.Context) error { func (bh *BlobHandler) GetS3ReadPermissions(c echo.Context, bucket string) ([]string, bool, int, error) { permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) - httpStatus := http.StatusInternalServerError if err != nil { //TEMP solution before error library is implimented and string check ups become redundant + httpStatus := http.StatusInternalServerError if strings.Contains(err.Error(), "this endpoint requires authentication information that is unavailable when authorization is disabled.") { httpStatus = http.StatusForbidden }