Skip to content

Commit

Permalink
Separate access check from other operations (#5190)
Browse files Browse the repository at this point in the history
* Separate access check from other operations

---------

Co-authored-by: Simon Dumas <[email protected]>
  • Loading branch information
imsdu and Simon Dumas authored Oct 17, 2024
1 parent 1e76ea1 commit 22b5baf
Show file tree
Hide file tree
Showing 27 changed files with 212 additions and 202 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.{S3F
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.routes.StoragesRoutes
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.schemas.{storage => storagesSchemaId}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages._
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access.{RemoteStorageAccess, S3StorageAccess, StorageAccess}
import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi
import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution}
import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering
Expand Down Expand Up @@ -78,12 +79,16 @@ class StoragePluginModule(priority: Int) extends ModuleDef {
new S3LocationGenerator(prefix)
}

make[StorageAccess].from { (remoteClient: RemoteDiskStorageClient, s3Client: S3StorageClient) =>
StorageAccess(RemoteStorageAccess(remoteClient), S3StorageAccess(s3Client))
}

make[Storages]
.fromEffect {
(
fetchContext: FetchContext,
contextResolution: ResolverContextResolution,
fileOperations: FileOperations,
storageAccess: StorageAccess,
permissions: Permissions,
xas: Transactors,
cfg: StoragePluginConfig,
Expand All @@ -96,7 +101,7 @@ class StoragePluginModule(priority: Int) extends ModuleDef {
fetchContext,
contextResolution,
permissions.fetchPermissionSet,
fileOperations,
storageAccess,
xas,
cfg.storages,
serviceAccount,
Expand Down Expand Up @@ -183,7 +188,7 @@ class StoragePluginModule(priority: Int) extends ModuleDef {
}

make[FileOperations].from { (disk: DiskFileOperations, remoteDisk: RemoteDiskFileOperations, s3: S3FileOperations) =>
FileOperations.mk(disk, remoteDisk, s3)
FileOperations.apply(disk, remoteDisk, s3)
}

make[Files].from {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ final class Files(

private def saveFileToStorage(iri: Iri, storage: Storage, uploadRequest: FileUploadRequest): IO[FileAttributes] = {
for {
info <- formDataExtractor(iri, uploadRequest.entity, storage.storageValue.maxFileSize)
info <- formDataExtractor(uploadRequest.entity, storage.storageValue.maxFileSize)
description = FileDescription.from(info, uploadRequest.metadata)
storageMetadata <- fileOperations.save(storage, info, uploadRequest.contentLength)
} yield FileAttributes.from(description, storageMetadata)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import ch.epfl.bluebrain.nexus.delta.kernel.error.NotARejection
import ch.epfl.bluebrain.nexus.delta.kernel.http.MediaTypeDetectorConfig
import ch.epfl.bluebrain.nexus.delta.kernel.utils.FileUtils
import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.{FileTooLarge, InvalidMultipartFieldName, WrappedAkkaRejection}
import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri

import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
Expand All @@ -25,20 +24,14 @@ trait FormDataExtractor {
* Extracts the part with fieldName ''file'' from the passed ''entity'' MultiPart/FormData. Any other part is
* discarded.
*
* @param id
* the file id
* @param entity
* the Multipart/FormData payload
* @param maxFileSize
* the file size limit to be uploaded, provided by the storage
* @return
* the file metadata. plus the entity with the file content
*/
def apply(
id: Iri,
entity: HttpEntity,
maxFileSize: Long
): IO[UploadedFileInformation]
def apply(entity: HttpEntity, maxFileSize: Long): IO[UploadedFileInformation]
}

case class UploadedFileInformation(
Expand Down Expand Up @@ -74,15 +67,11 @@ object FormDataExtractor {
new FormDataExtractor {
implicit val ec: ExecutionContext = as.getDispatcher

override def apply(
id: Iri,
entity: HttpEntity,
maxFileSize: Long
): IO[UploadedFileInformation] = {
override def apply(entity: HttpEntity, maxFileSize: Long): IO[UploadedFileInformation] = {
for {
formData <- unmarshall(entity, maxFileSize)
fileOpt <- extractFile(formData, maxFileSize)
file <- IO.fromOption(fileOpt)(InvalidMultipartFieldName(id))
file <- IO.fromOption(fileOpt)(InvalidMultipartFieldName)
} yield file
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ final case class FileId(id: IdSegmentRef, project: ProjectRef) {
def expandRef(fetchContext: ProjectRef => IO[ProjectContext]): IO[ResourceRef] =
fetchContext(project).flatMap { pc =>
iriExpander(id.value, pc).map { iri =>
(iri, pc)
id match {
case IdSegmentRef.Latest(_) => ResourceRef.Latest(iri)
case IdSegmentRef.Revision(_, rev) => ResourceRef.Revision(iri, rev)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,8 @@ object FileRejection {
* Rejection returned when attempting to create/update a file with a Multipart/Form-Data payload that does not
* contain a ''file'' fieldName
*/
final case class InvalidMultipartFieldName(id: Iri)
extends FileRejection(s"File '$id' payload a Multipart/Form-Data without a 'file' part.")
final case object InvalidMultipartFieldName
extends FileRejection(s"Multipart/Form-Data payload does not contain a 'file' part.")

final case object EmptyCustomMetadata extends FileRejection(s"No metadata was provided")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ import ch.epfl.bluebrain.nexus.delta.kernel.kamon.KamonMetricComponent
import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.Storages._
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access.StorageAccess
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageCommand._
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageEvent._
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection._
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.DiskStorageValue
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model._
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageAccess
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.schemas.{storage => storageSchema}
import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri
import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access

import cats.effect.IO
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.AbsolutePath
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotAccessible

import java.nio.file.Files

object DiskStorageAccess {

def checkVolumeExists(path: AbsolutePath): IO[Unit] = {
def failWhen(condition: Boolean, err: => String) = {
IO.raiseWhen(condition)(StorageNotAccessible(err))
}

for {
exists <- IO.blocking(Files.exists(path.value))
_ <- failWhen(!exists, s"Volume '${path.value}' does not exist.")
isDirectory <- IO.blocking(Files.isDirectory(path.value))
_ <- failWhen(!isDirectory, s"Volume '${path.value}' is not a directory.")
isWritable <- IO.blocking(Files.isWritable(path.value))
_ <- failWhen(!isWritable, s"Volume '${path.value}' does not have write access.")
} yield ()
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access

import cats.syntax.all._
import cats.effect.IO
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotAccessible
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient
import ch.epfl.bluebrain.nexus.delta.sdk.http.HttpClientError
import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label

trait RemoteStorageAccess {

def checkFolderExists(folder: Label): IO[Unit]

}

object RemoteStorageAccess {

def apply(client: RemoteDiskStorageClient): RemoteStorageAccess =
(folder: Label) =>
client
.exists(folder)
.adaptError { case err: HttpClientError =>
StorageNotAccessible(
err.details.fold(s"Folder '$folder' does not exist")(d => s"${err.reason}: $d")
)
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access

import cats.effect.IO
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotAccessible
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.client.S3StorageClient

trait S3StorageAccess {

def checkBucketExists(bucket: String): IO[Unit]

}

object S3StorageAccess {

def apply(client: S3StorageClient): S3StorageAccess =
(bucket: String) =>
client.bucketExists(bucket).flatMap { exists =>
IO.raiseUnless(exists)(StorageNotAccessible(s"Bucket $bucket does not exist"))
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access

import cats.effect.IO
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue}

trait StorageAccess {

/**
* Checks whether the system has access to the passed ''storage''
*
* @return
* a [[Unit]] if access has been verified successfully or signals an error [[StorageNotAccessible]] with the
* details about why the storage is not accessible
*/
def validateStorageAccess(storage: StorageValue): IO[Unit]
}

object StorageAccess {

def apply(remoteAccess: RemoteStorageAccess, s3Access: S3StorageAccess): StorageAccess = {
case d: DiskStorageValue => DiskStorageAccess.checkVolumeExists(d.volume)
case s: RemoteDiskStorageValue => remoteAccess.checkFolderExists(s.folder)
case s: S3StorageValue => s3Access.checkBucketExists(s.bucket)
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@ import akka.http.scaladsl.model.Uri
import cats.effect.IO
import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.UploadedFileInformation
import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{ComputedFileAttributes, FileAttributes, FileDelegationRequest, FileStorageMetadata}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, RemoteDiskStorage, S3Storage}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageValue}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{DelegateFileOperation, FetchAttributeRejection, LinkFileRejection, MoveFileRejection}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.UploadingFile.{DiskUploadingFile, RemoteUploadingFile, S3UploadingFile}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.DiskFileOperations
Expand All @@ -18,7 +17,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef

import java.util.UUID

trait FileOperations extends StorageAccess {
trait FileOperations {
def save(
storage: Storage,
info: UploadedFileInformation,
Expand All @@ -37,18 +36,12 @@ trait FileOperations extends StorageAccess {
}

object FileOperations {
def mk(
def apply(
diskFileOps: DiskFileOperations,
remoteDiskFileOps: RemoteDiskFileOperations,
s3FileOps: S3FileOperations
): FileOperations = new FileOperations {

override def validateStorageAccess(storage: StorageValue): IO[Unit] = storage match {
case s: DiskStorageValue => diskFileOps.checkVolumeExists(s.volume)
case s: S3StorageValue => s3FileOps.checkBucketExists(s.bucket)
case s: RemoteDiskStorageValue => remoteDiskFileOps.checkFolderExists(s.folder)
}

override def save(
storage: Storage,
info: UploadedFileInformation,
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,12 @@ import akka.stream.scaladsl.FileIO
import cats.effect.IO
import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF
import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileStorageMetadata
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.AbsolutePath
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotAccessible
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection.UnexpectedLocationFormat
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.UploadingFile.DiskUploadingFile
import ch.epfl.bluebrain.nexus.delta.sdk.AkkaSource

import java.nio.file.Files

trait DiskFileOperations {
def checkVolumeExists(path: AbsolutePath): IO[Unit]

def fetch(path: Uri.Path): IO[AkkaSource]

def save(uploading: DiskUploadingFile): IO[FileStorageMetadata]
Expand All @@ -28,21 +22,6 @@ object DiskFileOperations {

private val saveFile = new DiskStorageSaveFile()

override def checkVolumeExists(path: AbsolutePath): IO[Unit] = {
def failWhen(condition: Boolean, err: => String) = {
IO.raiseWhen(condition)(StorageNotAccessible(err))
}

for {
exists <- IO.blocking(Files.exists(path.value))
_ <- failWhen(!exists, s"Volume '${path.value}' does not exist.")
isDirectory <- IO.blocking(Files.isDirectory(path.value))
_ <- failWhen(!isDirectory, s"Volume '${path.value}' is not a directory.")
isWritable <- IO.blocking(Files.isWritable(path.value))
_ <- failWhen(!isWritable, s"Volume '${path.value}' does not have write access.")
} yield ()
}

override def fetch(path: Uri.Path): IO[AkkaSource] = absoluteDiskPath(path).redeemWith(
e => IO.raiseError(UnexpectedLocationFormat(s"file://$path", e.getMessage)),
path =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,18 @@ import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF
import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin
import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{ComputedFileAttributes, FileStorageMetadata}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.RemoteDiskStorage
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotAccessible
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.FileOperations.intermediateFolders
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchAttributeRejection.WrappedFetchRejection
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.UploadingFile.RemoteUploadingFile
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.RemoteDiskStorageFileAttributes
import ch.epfl.bluebrain.nexus.delta.sdk.AkkaSource
import ch.epfl.bluebrain.nexus.delta.sdk.http.HttpClientError
import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef}

import java.util.UUID

trait RemoteDiskFileOperations {
def checkFolderExists(folder: Label): IO[Unit]

def legacyLink(storage: RemoteDiskStorage, sourcePath: Uri.Path, filename: String): IO[FileStorageMetadata]

Expand All @@ -36,15 +33,6 @@ object RemoteDiskFileOperations {

def mk(client: RemoteDiskStorageClient)(implicit uuidf: UUIDF): RemoteDiskFileOperations =
new RemoteDiskFileOperations {
override def checkFolderExists(folder: Label): IO[Unit] =
client
.exists(folder)
.adaptError { case err: HttpClientError =>
StorageNotAccessible(
err.details.fold(s"Folder '$folder' does not exist")(d => s"${err.reason}: $d")
)
}

override def fetch(folder: Label, path: Uri.Path): IO[AkkaSource] = client.getFile(folder, path)

override def save(uploading: RemoteUploadingFile): IO[FileStorageMetadata] =
Expand Down
Loading

0 comments on commit 22b5baf

Please sign in to comment.