diff --git a/frontend/javascripts/admin/api/mesh.ts b/frontend/javascripts/admin/api/mesh.ts index d64f2d7d592..673e5bf0aaa 100644 --- a/frontend/javascripts/admin/api/mesh.ts +++ b/frontend/javascripts/admin/api/mesh.ts @@ -44,6 +44,8 @@ export function getMeshfileChunksForSegment( // editableMappingTracingId should be the tracing id, not the editable mapping id. // If this is set, it is assumed that the request is about an editable mapping. editableMappingTracingId: string | null | undefined, + meshFileType: string | null | undefined, + meshFilePath: string | null | undefined, ): Promise { return doWithToken((token) => { const params = new URLSearchParams(); @@ -60,6 +62,8 @@ export function getMeshfileChunksForSegment( data: { meshFile, segmentId, + meshFileType, + meshFilePath, }, showErrorToast: false, }, @@ -70,10 +74,13 @@ export function getMeshfileChunksForSegment( type MeshChunkDataRequest = { byteOffset: number; byteSize: number; + segmentId: number | null; // Only relevant for neuroglancer precomputed meshes }; type MeshChunkDataRequestList = { meshFile: string; + meshFileType: string | null | undefined; + meshFilePath: string | null | undefined; requests: MeshChunkDataRequest[]; }; diff --git a/frontend/javascripts/oxalis/model/sagas/mesh_saga.ts b/frontend/javascripts/oxalis/model/sagas/mesh_saga.ts index 2fdf47b3710..0cb6f2972c2 100644 --- a/frontend/javascripts/oxalis/model/sagas/mesh_saga.ts +++ b/frontend/javascripts/oxalis/model/sagas/mesh_saga.ts @@ -846,6 +846,7 @@ function* loadPrecomputedMeshForSegmentId( scale, additionalCoordinates, mergeChunks, + id, ); try { @@ -882,7 +883,7 @@ function* _getChunkLoadingDescriptors( const { segmentMeshController } = getSceneController(); const version = meshFile.formatVersion; - const { meshFileName } = meshFile; + const { meshFileName, meshFileType, meshFilePath } = meshFile; const editableMapping = yield* select((state) => getEditableMappingForVolumeTracingId(state, segmentationLayer.tracingId), @@ -916,6 +917,8 @@ function* _getChunkLoadingDescriptors( // without a mapping. meshFile.mappingName == null ? mappingName : null, editableMapping != null && tracing ? tracing.tracingId : null, + meshFileType, + meshFilePath, ); scale = [segmentInfo.transform[0][0], segmentInfo.transform[1][1], segmentInfo.transform[2][2]]; segmentInfo.chunks.lods.forEach((chunks, lodIndex) => { @@ -951,9 +954,10 @@ function _getLoadChunksTasks( scale: Vector3 | null, additionalCoordinates: AdditionalCoordinate[] | null, mergeChunks: boolean, + segmentId: number, ) { const { segmentMeshController } = getSceneController(); - const { meshFileName } = meshFile; + const { meshFileName, meshFileType, meshFilePath } = meshFile; const loader = getDracoLoader(); return _.compact( _.flatten( @@ -981,8 +985,14 @@ function _getLoadChunksTasks( getBaseSegmentationName(segmentationLayer), { meshFile: meshFileName, + meshFileType, + meshFilePath, // Only extract the relevant properties - requests: chunks.map(({ byteOffset, byteSize }) => ({ byteOffset, byteSize })), + requests: chunks.map(({ byteOffset, byteSize }) => ({ + byteOffset, + byteSize, + segmentId: segmentId, + })), }, ); diff --git a/frontend/javascripts/types/api_flow_types.ts b/frontend/javascripts/types/api_flow_types.ts index 958651b32ec..63c514c0c76 100644 --- a/frontend/javascripts/types/api_flow_types.ts +++ b/frontend/javascripts/types/api_flow_types.ts @@ -896,6 +896,8 @@ export type APIMeshFile = { // 1-2 - the format should behave as v0 (refer to voxelytics for actual differences) // 3 - is the newer version with draco encoding. formatVersion: number; + meshFileType: string | null | undefined; + meshFilePath: string | null | undefined; }; export type APIConnectomeFile = { connectomeFileName: string; diff --git a/test/backend/MurmurHashTestSuite.scala b/test/backend/MurmurHashTestSuite.scala new file mode 100644 index 00000000000..add2f0cfc30 --- /dev/null +++ b/test/backend/MurmurHashTestSuite.scala @@ -0,0 +1,19 @@ +package backend + +import com.scalableminds.webknossos.datastore.datareaders.precomputed.MurmurHash3 +import org.scalatestplus.play.PlaySpec + +class MurmurHashTestSuite extends PlaySpec { + + "Murmur hash" should { + "return the correct hash" in { + val keyString = "Hello World!" + val keyBytes = keyString.getBytes + val seed = 0 + val expectedHash = -1505357907696379773L + val actualHash = MurmurHash3.hash64(keyBytes, seed) + + assert(actualHash == expectedHash) + } + } +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSMeshController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSMeshController.scala index 36d3b25eeec..1e2ae279a2d 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSMeshController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DSMeshController.scala @@ -33,7 +33,11 @@ class DSMeshController @Inject()( urlOrHeaderToken(token, request)) { for { meshFiles <- meshFileService.exploreMeshFiles(organizationId, datasetDirectoryName, dataLayerName) - } yield Ok(Json.toJson(meshFiles)) + neuroglancerMeshFiles <- meshFileService.exploreNeuroglancerPrecomputedMeshes(organizationId, + datasetDirectoryName, + dataLayerName) + allMeshFiles = meshFiles ++ neuroglancerMeshFiles + } yield Ok(Json.toJson(allMeshFiles)) } } @@ -70,11 +74,18 @@ class DSMeshController @Inject()( omitMissing = false, urlOrHeaderToken(token, request) ) - chunkInfos <- meshFileService.listMeshChunksForSegmentsMerged(organizationId, - datasetDirectoryName, - dataLayerName, - request.body.meshFile, - segmentIds) + chunkInfos <- request.body.meshFileType match { + case Some("neuroglancerPrecomputed") => + meshFileService.listMeshChunksForNeuroglancerPrecomputedMesh( + request.body.meshFilePath, + request.body.segmentId) // TODO: Pass segmentIds here + case _ => + meshFileService.listMeshChunksForSegmentsMerged(organizationId, + datasetDirectoryName, + dataLayerName, + request.body.meshFile, + segmentIds) + } } yield Ok(Json.toJson(chunkInfos)) } } @@ -88,10 +99,12 @@ class DSMeshController @Inject()( UserAccessRequest.readDataSources(DataSourceId(datasetDirectoryName, organizationId)), urlOrHeaderToken(token, request)) { for { - (data, encoding) <- meshFileService.readMeshChunk(organizationId, - datasetDirectoryName, - dataLayerName, - request.body) ?~> "mesh.file.loadChunk.failed" + (data, encoding) <- request.body.meshFileType match { + case Some("neuroglancerPrecomputed") => + meshFileService.readMeshChunkForNeuroglancerPrecomputed(request.body.meshFilePath, request.body.requests) + case _ => + meshFileService.readMeshChunk(organizationId, datasetDirectoryName, dataLayerName, request.body) ?~> "mesh.file.loadChunk.failed" + } } yield { if (encoding.contains("gzip")) { Ok(data).withHeaders("Content-Encoding" -> "gzip") diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/MurmurHash3.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/MurmurHash3.scala new file mode 100644 index 00000000000..f0363c2e057 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/MurmurHash3.scala @@ -0,0 +1,118 @@ +package com.scalableminds.webknossos.datastore.datareaders.precomputed + +object MurmurHash3 { + + private def fmix(h: Int): Int = { + var hash = h + hash ^= (hash >>> 16) + hash = (hash * 0x85EBCA6B) & 0xFFFFFFFF + hash ^= (hash >>> 13) + hash = (hash * 0xC2B2AE35) & 0xFFFFFFFF + hash ^= (hash >>> 16) + hash + } + + private def hash128(key: Array[Byte], seed: Int): BigInt = { + val c1 = 0x239B961B + val c2 = 0xAB0E9789 + val c3 = 0x38B34AE5 + val c4 = 0xA1E38B93 + + val length = key.length + val nblocks = length / 16 + + var h1 = seed + var h2 = seed + var h3 = seed + var h4 = seed + + // Process blocks + for (i <- 0 until nblocks) { + val block = key.slice(i * 16, i * 16 + 16) + val k1 = BigInt(block.slice(0, 4).reverse).toInt + val k2 = BigInt(block.slice(4, 8).reverse).toInt + val k3 = BigInt(block.slice(8, 12).reverse).toInt + val k4 = BigInt(block.slice(12, 16).reverse).toInt + + h1 ^= Integer.rotateLeft((k1 * c1) & 0xFFFFFFFF, 15) * c2 & 0xFFFFFFFF + h1 = (Integer.rotateLeft(h1, 19) + h2) * 5 + 0x561CCD1B & 0xFFFFFFFF + + h2 ^= Integer.rotateLeft((k2 * c2) & 0xFFFFFFFF, 16) * c3 & 0xFFFFFFFF + h2 = (Integer.rotateLeft(h2, 17) + h3) * 5 + 0x0BCAA747 & 0xFFFFFFFF + + h3 ^= Integer.rotateLeft((k3 * c3) & 0xFFFFFFFF, 17) * c4 & 0xFFFFFFFF + h3 = (Integer.rotateLeft(h3, 15) + h4) * 5 + 0x96CD1C35 & 0xFFFFFFFF + + h4 ^= Integer.rotateLeft((k4 * c4) & 0xFFFFFFFF, 18) * c1 & 0xFFFFFFFF + h4 = (Integer.rotateLeft(h4, 13) + h1) * 5 + 0x32AC3B17 & 0xFFFFFFFF + } + + // Tail + val tail = key.slice(nblocks * 16, length) + var k1, k2, k3, k4 = 0 + + tail.zipWithIndex.foreach { + case (byte, i) => + val shift = (i % 4) * 8 + i / 4 match { + case 0 => k1 |= (byte & 0xFF) << shift + case 1 => k2 |= (byte & 0xFF) << shift + case 2 => k3 |= (byte & 0xFF) << shift + case 3 => k4 |= (byte & 0xFF) << shift + } + } + + if (tail.length > 0) { + k1 = (k1 * c1) & 0xFFFFFFFF + k1 = Integer.rotateLeft(k1, 15) * c2 & 0xFFFFFFFF + h1 ^= k1 + } + + if (tail.length > 4) { + k2 = (k2 * c2) & 0xFFFFFFFF + k2 = Integer.rotateLeft(k2, 16) * c3 & 0xFFFFFFFF + h2 ^= k2 + } + + if (tail.length > 8) { + k3 = (k3 * c3) & 0xFFFFFFFF + k3 = Integer.rotateLeft(k3, 17) * c4 & 0xFFFFFFFF + h3 ^= k3 + } + + if (tail.length > 12) { + k4 = (k4 * c4) & 0xFFFFFFFF + k4 = Integer.rotateLeft(k4, 18) * c1 & 0xFFFFFFFF + h4 ^= k4 + } + + // Finalization + h1 ^= length + h2 ^= length + h3 ^= length + h4 ^= length + + h1 = (h1 + h2 + h3 + h4) & 0xFFFFFFFF + h2 = (h1 + h2) & 0xFFFFFFFF + h3 = (h1 + h3) & 0xFFFFFFFF + h4 = (h1 + h4) & 0xFFFFFFFF + + h1 = fmix(h1) + h2 = fmix(h2) + h3 = fmix(h3) + h4 = fmix(h4) + + h1 = (h1 + h2 + h3 + h4) & 0xFFFFFFFF + h2 = (h1 + h2) & 0xFFFFFFFF + h3 = (h1 + h3) & 0xFFFFFFFF + h4 = (h1 + h4) & 0xFFFFFFFF + + BigInt(h4) << 96 | BigInt(h3) << 64 | BigInt(h2) << 32 | BigInt(h1) + } + + def hash64(key: Array[Byte], seed: Int = 0): Long = { + val hash128 = MurmurHash3.hash128(key, seed) + val low = (hash128 & BigInt("FFFFFFFFFFFFFFFF", 16)).toLong + low + } +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/NeuroglancerPrecomputedShardingUtils.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/NeuroglancerPrecomputedShardingUtils.scala new file mode 100644 index 00000000000..1e964da75b4 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/NeuroglancerPrecomputedShardingUtils.scala @@ -0,0 +1,143 @@ +package com.scalableminds.webknossos.datastore.datareaders.precomputed + +import com.scalableminds.util.cache.AlfuCache +import com.scalableminds.util.io.ZipIO +import com.scalableminds.util.tools.Fox +import com.scalableminds.webknossos.datastore.datavault.VaultPath +import net.liftweb.common.Box +import net.liftweb.common.Box.tryo + +import java.nio.{ByteBuffer, ByteOrder} +import scala.collection.immutable.NumericRange +import scala.concurrent.ExecutionContext + +trait NeuroglancerPrecomputedShardingUtils { + + // SHARDING + // Implemented according to https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md, + // directly adapted from https://github.com/scalableminds/webknossos-connect/blob/master/wkconnect/backends/neuroglancer/sharding.py. + + val shardingSpecification: ShardingSpecification + + private val minishardIndexCache: AlfuCache[(VaultPath, Int), Array[(Long, Long, Long)]] = + AlfuCache() + + private val shardIndexCache: AlfuCache[VaultPath, Array[Byte]] = + AlfuCache() + + private lazy val minishardCount = 1 << shardingSpecification.minishard_bits + + protected lazy val shardIndexRange: NumericRange.Exclusive[Long] = { + val end = minishardCount * 16 + Range.Long(0, end, 1) + } + + private def getShardIndex(shardPath: VaultPath)(implicit ec: ExecutionContext): Fox[Array[Byte]] = + shardIndexCache.getOrLoad(shardPath, readShardIndex) + + private def readShardIndex(shardPath: VaultPath)(implicit ec: ExecutionContext): Fox[Array[Byte]] = + shardPath.readBytes(Some(shardIndexRange)) + + private def parseShardIndex(index: Array[Byte]): Seq[(Long, Long)] = + // See https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md#shard-index-format + index + .grouped(16) // 16 Bytes: 2 uint64 numbers: start_offset, end_offset + .map((bytes: Array[Byte]) => { + (BigInt(bytes.take(8).reverse).toLong, BigInt(bytes.slice(8, 16).reverse).toLong) // bytes reversed because they are stored little endian + }) + .toSeq + + private def getMinishardIndexRange(minishardNumber: Int, + parsedShardIndex: Seq[(Long, Long)]): NumericRange.Exclusive[Long] = { + val miniShardIndexStart: Long = (shardIndexRange.end) + parsedShardIndex(minishardNumber)._1 + val miniShardIndexEnd: Long = (shardIndexRange.end) + parsedShardIndex(minishardNumber)._2 + Range.Long(miniShardIndexStart, miniShardIndexEnd, 1) + } + + private def decodeMinishardIndex(bytes: Array[Byte]) = + shardingSpecification.minishard_index_encoding match { + case "gzip" => ZipIO.gunzip(bytes) + case _ => bytes + + } + + private def parseMinishardIndex(input: Array[Byte]): Box[Array[(Long, Long, Long)]] = tryo { + val bytes = decodeMinishardIndex(input) + /* + From: https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md#minishard-index-format + The decoded "minishard index" is a binary string of 24*n bytes, specifying a contiguous C-order array of [3, n] + uint64le values. + */ + val n = bytes.length / 24 + val buf = ByteBuffer.allocate(bytes.length) + buf.put(bytes) + + val longArray = new Array[Long](n * 3) + buf.position(0) + buf.order(ByteOrder.LITTLE_ENDIAN) + buf.asLongBuffer().get(longArray) + // longArray is row major / C-order + /* + From: https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md#minishard-index-format + Values array[0, 0], ..., array[0, n-1] specify the chunk IDs in the minishard, and are delta encoded, such that + array[0, 0] is equal to the ID of the first chunk, and the ID of chunk i is equal to the sum + of array[0, 0], ..., array[0, i]. + */ + val chunkIds = new Array[Long](n) + chunkIds(0) = longArray(0) + for (i <- 1 until n) { + chunkIds(i) = longArray(i) + chunkIds(i - 1) + } + /* + From: https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md#minishard-index-format + The size of the data for chunk i is stored as array[2, i]. + Values array[1, 0], ..., array[1, n-1] specify the starting offsets in the shard file of the data corresponding to + each chunk, and are also delta encoded relative to the end of the prior chunk, such that the starting offset of the + first chunk is equal to shard_index_end + array[1, 0], and the starting offset of chunk i is the sum of + shard_index_end + array[1, 0], ..., array[1, i] and array[2, 0], ..., array[2, i-1]. + */ + val chunkSizes = longArray.slice(2 * n, 3 * n) + val chunkStartOffsets = new Array[Long](n) + chunkStartOffsets(0) = longArray(n) + for (i <- 1 until n) { + val startOffsetIndex = i + n + chunkStartOffsets(i) = chunkStartOffsets(i - 1) + longArray(startOffsetIndex) + chunkSizes(i - 1) + } + + chunkIds.lazyZip(chunkStartOffsets).lazyZip(chunkSizes).toArray + } + + def getMinishardIndex(shardPath: VaultPath, minishardNumber: Int)( + implicit ec: ExecutionContext): Fox[Array[(Long, Long, Long)]] = + minishardIndexCache.getOrLoad((shardPath, minishardNumber), readMinishardIndex) + + private def readMinishardIndex(vaultPathAndMinishardNumber: (VaultPath, Int))( + implicit ec: ExecutionContext): Fox[Array[(Long, Long, Long)]] = { + val (vaultPath, minishardNumber) = vaultPathAndMinishardNumber + for { + index <- getShardIndex(vaultPath) + parsedIndex = parseShardIndex(index) + minishardIndexRange = getMinishardIndexRange(minishardNumber, parsedIndex) + indexRaw <- vaultPath.readBytes(Some(minishardIndexRange)) + minishardIndex <- parseMinishardIndex(indexRaw) + } yield minishardIndex + } + + def getChunkRange(chunkId: Long, minishardIndex: Array[(Long, Long, Long)])( + implicit ec: ExecutionContext): Fox[NumericRange.Exclusive[Long]] = + for { + chunkSpecification <- Fox.option2Fox(minishardIndex.find(_._1 == chunkId)) ?~> s"Could not find chunk id $chunkId in minishard index" + chunkStart = (shardIndexRange.end) + chunkSpecification._2 + chunkEnd = (shardIndexRange.end) + chunkSpecification._2 + chunkSpecification._3 + } yield Range.Long(chunkStart, chunkEnd, 1) + + def getChunk(chunkRange: NumericRange[Long], shardPath: VaultPath)(implicit ec: ExecutionContext): Fox[Array[Byte]] = + for { + rawBytes <- shardPath.readBytes(Some(chunkRange)) + bytes = shardingSpecification.data_encoding match { + // Check for GZIP Magic bytes to check if it was already decompressed + case "gzip" if rawBytes(0) == 31 && rawBytes(1) == -117 => ZipIO.gunzip(rawBytes) + case _ => rawBytes + } + } yield bytes +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedArray.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedArray.scala index f7cc98ef8d1..c08944eed87 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedArray.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedArray.scala @@ -1,7 +1,6 @@ package com.scalableminds.webknossos.datastore.datareaders.precomputed import com.scalableminds.util.cache.AlfuCache -import com.scalableminds.util.io.ZipIO import com.scalableminds.util.tools.{Fox, FoxImplicits, JsonHelper} import com.scalableminds.webknossos.datastore.datareaders.{AxisOrder, DatasetArray} import com.scalableminds.webknossos.datastore.datavault.VaultPath @@ -10,12 +9,9 @@ import com.scalableminds.webknossos.datastore.models.datasource.AdditionalAxis import com.typesafe.scalalogging.LazyLogging import net.liftweb.common.Box.tryo -import java.nio.ByteOrder -import java.nio.ByteBuffer import scala.collection.immutable.NumericRange import scala.concurrent.ExecutionContext import com.scalableminds.util.tools.Fox.{box2Fox, option2Fox} -import net.liftweb.common.Box import ucar.ma2.{Array => MultiArray} object PrecomputedArray extends LazyLogging { @@ -65,7 +61,8 @@ class PrecomputedArray(vaultPath: VaultPath, additionalAxes, sharedChunkContentsCache) with FoxImplicits - with LazyLogging { + with LazyLogging + with NeuroglancerPrecomputedShardingUtils { lazy val voxelOffset: Array[Int] = header.precomputedScale.voxel_offset.getOrElse(Array(0, 0, 0)) override protected def getChunkFilename(chunkIndex: Array[Int]): String = { @@ -78,183 +75,17 @@ class PrecomputedArray(vaultPath: VaultPath, .mkString(header.dimension_separator.toString) } - // SHARDING - // Implemented according to https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md, - // directly adapted from https://github.com/scalableminds/webknossos-connect/blob/master/wkconnect/backends/neuroglancer/sharding.py. - - private val shardIndexCache: AlfuCache[VaultPath, Array[Byte]] = - AlfuCache() - - private val minishardIndexCache: AlfuCache[(VaultPath, Int), Array[(Long, Long, Long)]] = - AlfuCache() + val shardingSpecification: ShardingSpecification = + header.precomputedScale.sharding.getOrElse(ShardingSpecification.empty) private def getHashForChunk(chunkIndex: Array[Int]): Long = CompressedMortonCode.encode(chunkIndex, header.gridSize) - private lazy val minishardMask = { - header.precomputedScale.sharding match { - case Some(shardingSpec: ShardingSpecification) => - if (shardingSpec.minishard_bits == 0) { - 0 - } else { - var minishardMask = 1L - for (_ <- 0 until shardingSpec.minishard_bits - 1) { - minishardMask <<= 1 - minishardMask |= 1 - } - minishardMask - } - case None => 0 - } - } - - private lazy val shardMask = { - header.precomputedScale.sharding match { - case Some(shardingSpec: ShardingSpecification) => - val oneMask = Long.MinValue // 0xFFFFFFFFFFFFFFFF - val cursor = shardingSpec.minishard_bits + shardingSpec.shard_bits - val shardMask = ~((oneMask >> cursor) << cursor) - shardMask & (~minishardMask) - case None => 0 - } - } - - private lazy val minishardCount = 1 << header.precomputedScale.sharding.map(_.minishard_bits).getOrElse(0) - - private lazy val shardIndexRange: NumericRange.Exclusive[Long] = { - val end = minishardCount * 16 - Range.Long(0, end, 1) - } - - private def decodeMinishardIndex(bytes: Array[Byte]) = - header.precomputedScale.sharding match { - case Some(shardingSpec: ShardingSpecification) => - shardingSpec.minishard_index_encoding match { - case "gzip" => ZipIO.gunzip(bytes) - case _ => bytes - } - case _ => bytes - } - - private def getShardIndex(shardPath: VaultPath)(implicit ec: ExecutionContext): Fox[Array[Byte]] = - shardIndexCache.getOrLoad(shardPath, readShardIndex) - - private def readShardIndex(shardPath: VaultPath)(implicit ec: ExecutionContext): Fox[Array[Byte]] = - shardPath.readBytes(Some(shardIndexRange)) - - private def parseShardIndex(index: Array[Byte]): Seq[(Long, Long)] = - // See https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md#shard-index-format - index - .grouped(16) // 16 Bytes: 2 uint64 numbers: start_offset, end_offset - .map((bytes: Array[Byte]) => { - (BigInt(bytes.take(8).reverse).toLong, BigInt(bytes.slice(8, 16).reverse).toLong) // bytes reversed because they are stored little endian - }) - .toSeq - - private def getMinishardInfo(chunkHash: Long): (Long, Long) = - header.precomputedScale.sharding match { - case Some(shardingSpec: ShardingSpecification) => - val rawChunkIdentifier = chunkHash >> shardingSpec.preshift_bits - val chunkIdentifier = shardingSpec.hashFunction(rawChunkIdentifier) - val minishardNumber = chunkIdentifier & minishardMask - val shardNumber = (chunkIdentifier & shardMask) >> shardingSpec.minishard_bits - (shardNumber, minishardNumber) - case None => (0, 0) - } - - private def getPathForShard(shardNumber: Long): VaultPath = { - val shardBits = header.precomputedScale.sharding.map(_.shard_bits.toFloat).getOrElse(0f) - if (shardBits == 0) { - vaultPath / "0.shard" - } else { - val shardString = String.format(s"%1$$${(shardBits / 4).ceil.toInt}s", shardNumber.toHexString).replace(' ', '0') - vaultPath / s"$shardString.shard" - } - - } - - private def getMinishardIndexRange(minishardNumber: Int, - parsedShardIndex: Seq[(Long, Long)]): NumericRange.Exclusive[Long] = { - val miniShardIndexStart: Long = (shardIndexRange.end) + parsedShardIndex(minishardNumber)._1 - val miniShardIndexEnd: Long = (shardIndexRange.end) + parsedShardIndex(minishardNumber)._2 - Range.Long(miniShardIndexStart, miniShardIndexEnd, 1) - } - - private def parseMinishardIndex(input: Array[Byte]): Box[Array[(Long, Long, Long)]] = tryo { - val bytes = decodeMinishardIndex(input) - /* - From: https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md#minishard-index-format - The decoded "minishard index" is a binary string of 24*n bytes, specifying a contiguous C-order array of [3, n] - uint64le values. - */ - val n = bytes.length / 24 - val buf = ByteBuffer.allocate(bytes.length) - buf.put(bytes) - - val longArray = new Array[Long](n * 3) - buf.position(0) - buf.order(ByteOrder.LITTLE_ENDIAN) - buf.asLongBuffer().get(longArray) - // longArray is row major / C-order - /* - From: https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md#minishard-index-format - Values array[0, 0], ..., array[0, n-1] specify the chunk IDs in the minishard, and are delta encoded, such that - array[0, 0] is equal to the ID of the first chunk, and the ID of chunk i is equal to the sum - of array[0, 0], ..., array[0, i]. - */ - val chunkIds = new Array[Long](n) - chunkIds(0) = longArray(0) - for (i <- 1 until n) { - chunkIds(i) = longArray(i) + chunkIds(i - 1) - } - /* - From: https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/sharded.md#minishard-index-format - The size of the data for chunk i is stored as array[2, i]. - Values array[1, 0], ..., array[1, n-1] specify the starting offsets in the shard file of the data corresponding to - each chunk, and are also delta encoded relative to the end of the prior chunk, such that the starting offset of the - first chunk is equal to shard_index_end + array[1, 0], and the starting offset of chunk i is the sum of - shard_index_end + array[1, 0], ..., array[1, i] and array[2, 0], ..., array[2, i-1]. - */ - val chunkSizes = longArray.slice(2 * n, 3 * n) - val chunkStartOffsets = new Array[Long](n) - chunkStartOffsets(0) = longArray(n) - for (i <- 1 until n) { - val startOffsetIndex = i + n - chunkStartOffsets(i) = chunkStartOffsets(i - 1) + longArray(startOffsetIndex) + chunkSizes(i - 1) - } - - chunkIds.lazyZip(chunkStartOffsets).lazyZip(chunkSizes).toArray - } - - private def getMinishardIndex(shardPath: VaultPath, minishardNumber: Int)( - implicit ec: ExecutionContext): Fox[Array[(Long, Long, Long)]] = - minishardIndexCache.getOrLoad((shardPath, minishardNumber), readMinishardIndex) - - private def readMinishardIndex(vaultPathAndMinishardNumber: (VaultPath, Int))( - implicit ec: ExecutionContext): Fox[Array[(Long, Long, Long)]] = { - val (vaultPath, minishardNumber) = vaultPathAndMinishardNumber - for { - index <- getShardIndex(vaultPath) - parsedIndex = parseShardIndex(index) - minishardIndexRange = getMinishardIndexRange(minishardNumber, parsedIndex) - indexRaw <- vaultPath.readBytes(Some(minishardIndexRange)) - minishardIndex <- parseMinishardIndex(indexRaw) - } yield minishardIndex - } - - private def getChunkRange(chunkId: Long, minishardIndex: Array[(Long, Long, Long)])( - implicit ec: ExecutionContext): Fox[NumericRange.Exclusive[Long]] = - for { - chunkSpecification <- Fox.option2Fox(minishardIndex.find(_._1 == chunkId)) ?~> s"Could not find chunk id $chunkId in minishard index" - chunkStart = (shardIndexRange.end) + chunkSpecification._2 - chunkEnd = (shardIndexRange.end) + chunkSpecification._2 + chunkSpecification._3 - } yield Range.Long(chunkStart, chunkEnd, 1) - override def getShardedChunkPathAndRange(chunkIndex: Array[Int])( implicit ec: ExecutionContext): Fox[(VaultPath, NumericRange[Long])] = { val chunkIdentifier = getHashForChunk(chunkIndex) - val minishardInfo = getMinishardInfo(chunkIdentifier) - val shardPath = getPathForShard(minishardInfo._1) + val minishardInfo = shardingSpecification.getMinishardInfo(chunkIdentifier) + val shardPath = shardingSpecification.getPathForShard(vaultPath, minishardInfo._1) for { minishardIndex <- getMinishardIndex(shardPath, minishardInfo._2.toInt) ?~> f"Could not get minishard index for chunkIndex ${chunkIndex .mkString(",")}" diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedHeader.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedHeader.scala index 8ac64cfb075..ab536ed0cb0 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedHeader.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedHeader.scala @@ -1,10 +1,12 @@ package com.scalableminds.webknossos.datastore.datareaders.precomputed import com.scalableminds.util.geometry.Vec3Int +import com.scalableminds.util.tools.ByteUtils import com.scalableminds.webknossos.datastore.datareaders.ArrayDataType.ArrayDataType import com.scalableminds.webknossos.datastore.datareaders.ArrayOrder.ArrayOrder import com.scalableminds.webknossos.datastore.datareaders.DimensionSeparator.DimensionSeparator import com.scalableminds.webknossos.datastore.datareaders.{ArrayOrder, Compressor, DatasetHeader, DimensionSeparator} +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.scalableminds.webknossos.datastore.helpers.JsonImplicits import play.api.libs.json.{Format, JsResult, JsValue, Json} import play.api.libs.json.Json.WithDefaultValues @@ -79,11 +81,53 @@ case class ShardingSpecification(`@type`: String, minishard_bits: Int, shard_bits: Long, minishard_index_encoding: String = "raw", - data_encoding: String = "raw") { + data_encoding: String = "raw") + extends ByteUtils { def hashFunction(input: Long): Long = - if (hash == "identity") input - else ??? // not implemented: murmurhash3_x86_128 + hash match { + case "identity" => input + case "murmurhash3_x86_128" => MurmurHash3.hash64(longToBytes(input)) + case _ => throw new IllegalArgumentException(s"Unsupported hash function: $hash") + } + + private lazy val minishardMask = { + if (minishard_bits == 0) { + 0 + } else { + var minishardMask = 1L + for (_ <- 0 until minishard_bits - 1) { + minishardMask <<= 1 + minishardMask |= 1 + } + minishardMask + } + } + + private lazy val shardMask = { + val oneMask = 0xFFFFFFFFFFFFFFFFL + val cursor = minishard_bits + shard_bits + val shardMask = ~((oneMask >> cursor) << cursor) + shardMask & (~minishardMask) + } + + def getMinishardInfo(chunkHash: Long): (Long, Long) = { + val rawChunkIdentifier = chunkHash >> preshift_bits + val chunkIdentifier = hashFunction(rawChunkIdentifier) + val minishardNumber = chunkIdentifier & minishardMask + val shardNumber = (chunkIdentifier & shardMask) >> minishard_bits + (shardNumber, minishardNumber) + } + + def getPathForShard(base: VaultPath, shardNumber: Long): VaultPath = + if (shard_bits == 0) { + base / "0.shard" + } else { + val shardString = + String.format(s"%1$$${(shard_bits.toFloat / 4).ceil.toInt}s", shardNumber.toHexString).replace(' ', '0') + base / s"$shardString.shard" + } + } object ShardingSpecification extends JsonImplicits { @@ -94,6 +138,8 @@ object ShardingSpecification extends JsonImplicits { override def writes(shardingSpecification: ShardingSpecification): JsValue = Json.writes[ShardingSpecification].writes(shardingSpecification) } + + def empty: ShardingSpecification = ShardingSpecification("neuroglancer_uint64_sharded_v1", 0, "identity", 0, 0) } object PrecomputedScale extends JsonImplicits { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSFullMeshService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSFullMeshService.scala index 11c560f31cd..2b3ac4c5cf2 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSFullMeshService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSFullMeshService.scala @@ -25,7 +25,9 @@ case class FullMeshRequest( editableMappingTracingId: Option[String], mag: Option[Vec3Int], // required for ad-hoc meshing seedPosition: Option[Vec3Int], // required for ad-hoc meshing - additionalCoordinates: Option[Seq[AdditionalCoordinate]] + additionalCoordinates: Option[Seq[AdditionalCoordinate]], + meshFilePath: Option[String], // required for remote neuroglancer precomputed mesh files + meshFileType: Option[String] ) object FullMeshRequest { @@ -55,6 +57,8 @@ class DSFullMeshService @Inject()(dataSourceRepository: DataSourceRepository, dataLayerName: String, fullMeshRequest: FullMeshRequest)(implicit ec: ExecutionContext, m: MessagesProvider): Fox[Array[Byte]] = fullMeshRequest.meshFileName match { + case Some(_) if fullMeshRequest.meshFilePath.isDefined => + loadFullMeshFromRemoteNeuroglancerMeshFile(organizationId, datasetDirectoryName, dataLayerName, fullMeshRequest) case Some(_) => loadFullMeshFromMeshfile(token, organizationId, datasetDirectoryName, dataLayerName, fullMeshRequest) case None => loadFullMeshFromAdHoc(organizationId, datasetDirectoryName, dataLayerName, fullMeshRequest) @@ -149,7 +153,10 @@ class DSFullMeshService @Inject()(dataSourceRepository: DataSourceRepository, layerName, meshFileName, chunkRange, - chunkInfos.transform) + chunkInfos.transform, + None, + None, + None) } stlOutput = combineEncodedChunksToStl(stlEncodedChunks) _ = logMeshingDuration(before, "meshfile", stlOutput.length) @@ -160,14 +167,28 @@ class DSFullMeshService @Inject()(dataSourceRepository: DataSourceRepository, layerName: String, meshfileName: String, chunkInfo: MeshChunk, - transform: Array[Array[Double]])(implicit ec: ExecutionContext): Fox[Array[Byte]] = + transform: Array[Array[Double]], + meshFileType: Option[String], + meshFilePath: Option[String], + segmentId: Option[Long])(implicit ec: ExecutionContext): Fox[Array[Byte]] = for { - (dracoMeshChunkBytes, encoding) <- meshFileService.readMeshChunk( - organizationId, - datasetDirectoryName, - layerName, - MeshChunkDataRequestList(meshfileName, List(MeshChunkDataRequest(chunkInfo.byteOffset, chunkInfo.byteSize))) - ) ?~> "mesh.file.loadChunk.failed" + (dracoMeshChunkBytes, encoding) <- meshFileType match { + case Some("neuroglancerPrecomputed") => + meshFileService.readMeshChunkForNeuroglancerPrecomputed( + meshFilePath, + Seq(MeshChunkDataRequest(chunkInfo.byteOffset, chunkInfo.byteSize, segmentId)) + ) ?~> "mesh.file.loadChunk.failed" + case _ => + meshFileService.readMeshChunk( + organizationId, + datasetDirectoryName, + layerName, + MeshChunkDataRequestList(meshfileName, + None, + None, + List(MeshChunkDataRequest(chunkInfo.byteOffset, chunkInfo.byteSize, None))) + ) ?~> "mesh.file.loadChunk.failed" + } _ <- bool2Fox(encoding == "draco") ?~> s"meshfile encoding is $encoding, only draco is supported" scale <- tryo(Vec3Double(transform(0)(0), transform(1)(1), transform(2)(2))) ?~> "could not extract scale from meshfile transform attribute" stlEncodedChunk <- tryo( @@ -180,4 +201,33 @@ class DSFullMeshService @Inject()(dataSourceRepository: DataSourceRepository, scale.z)) } yield stlEncodedChunk + private def loadFullMeshFromRemoteNeuroglancerMeshFile( + organizationId: String, + datasetDirectoryName: String, + layerName: String, + fullMeshRequest: FullMeshRequest)(implicit ec: ExecutionContext, m: MessagesProvider): Fox[Array[Byte]] = + for { + // TODO: Mapping, segmentIds + chunkInfos: WebknossosSegmentInfo <- meshFileService.listMeshChunksForNeuroglancerPrecomputedMesh( + fullMeshRequest.meshFilePath, + fullMeshRequest.segmentId + ) + selectedLod = fullMeshRequest.lod.getOrElse(0) + allChunkRanges: List[MeshChunk] = chunkInfos.chunks.lods(selectedLod).chunks + stlEncodedChunks: Seq[Array[Byte]] <- Fox.serialCombined(allChunkRanges) { chunkRange: MeshChunk => + readMeshChunkAsStl( + organizationId, + datasetDirectoryName, + layerName, + fullMeshRequest.meshFileName.get, + chunkRange, + Array(Array(1, 0, 0), Array(0, 1, 0), Array(0, 0, 1)), + fullMeshRequest.meshFileType, + fullMeshRequest.meshFilePath, + Some(fullMeshRequest.segmentId) + ) + } + stlOutput = combineEncodedChunksToStl(stlEncodedChunks) + } yield stlOutput + } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/MeshFileService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/MeshFileService.scala index eaf88ad19c0..70904d6cc19 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/MeshFileService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/MeshFileService.scala @@ -1,20 +1,36 @@ package com.scalableminds.webknossos.datastore.services import com.google.common.io.LittleEndianDataInputStream +import com.scalableminds.util.cache.AlfuCache import com.scalableminds.util.geometry.{Vec3Float, Vec3Int} import com.scalableminds.util.io.PathUtils -import com.scalableminds.util.tools.JsonHelper.bool2Box -import com.scalableminds.util.tools.{ByteUtils, Fox, FoxImplicits} +import com.scalableminds.util.tools.{ByteUtils, Fox, FoxImplicits, JsonHelper} import com.scalableminds.webknossos.datastore.DataStoreConfig -import com.scalableminds.webknossos.datastore.storage.{CachedHdf5File, Hdf5FileCache} +import com.scalableminds.webknossos.datastore.datareaders.precomputed.ShardingSpecification +import com.scalableminds.webknossos.datastore.datavault.VaultPath +import com.scalableminds.webknossos.datastore.models.datasource.{ + Category, + DataFormat, + DataLayer, + DataLayerWithMagLocators, + GenericDataSource +} +import com.scalableminds.webknossos.datastore.storage.{ + CachedHdf5File, + DataVaultService, + Hdf5FileCache, + RemoteSourceDescriptor +} import com.typesafe.scalalogging.LazyLogging import net.liftweb.common.Box import net.liftweb.common.Box.tryo +import net.liftweb.common.Full import org.apache.commons.io.FilenameUtils import play.api.i18n.{Messages, MessagesProvider} import play.api.libs.json.{Json, OFormat} import java.io.ByteArrayInputStream +import java.net.URI import java.nio.file.{Path, Paths} import javax.inject.Inject import scala.collection.mutable.ListBuffer @@ -22,6 +38,8 @@ import scala.concurrent.ExecutionContext case class ListMeshChunksRequest( meshFile: String, + meshFilePath: Option[String], + meshFileType: Option[String], segmentId: Long ) @@ -31,11 +49,14 @@ object ListMeshChunksRequest { case class MeshChunkDataRequest( byteOffset: Long, - byteSize: Int + byteSize: Int, + segmentId: Option[Long] // Only relevant for neuroglancer precomputed meshes ) case class MeshChunkDataRequestList( meshFile: String, + meshFilePath: Option[String], + meshFileType: Option[String], requests: Seq[MeshChunkDataRequest] ) @@ -49,6 +70,8 @@ object MeshChunkDataRequestList { case class MeshFileInfo( meshFileName: String, + meshFilePath: Option[String], + meshFileType: Option[String], mappingName: Option[String], formatVersion: Long ) @@ -57,6 +80,19 @@ object MeshFileInfo { implicit val jsonFormat: OFormat[MeshFileInfo] = Json.format[MeshFileInfo] } +case class NeuroglancerPrecomputedMeshInfo( + lod_scale_multiplier: Double, + transform: Array[Double], + sharding: Option[ShardingSpecification], + vertex_quantization_bits: Int, +) { + def transform2DArray: Array[Array[Double]] = transform.grouped(4).toArray +} + +object NeuroglancerPrecomputedMeshInfo { + implicit val jsonFormat: OFormat[NeuroglancerPrecomputedMeshInfo] = Json.format[NeuroglancerPrecomputedMeshInfo] +} + case class NeuroglancerSegmentManifest(chunkShape: Vec3Float, gridOrigin: Vec3Float, numLods: Int, @@ -171,7 +207,8 @@ object WebknossosSegmentInfo { } -class MeshFileService @Inject()(config: DataStoreConfig)(implicit ec: ExecutionContext) +class MeshFileService @Inject()(config: DataStoreConfig, dataVaultService: DataVaultService)( + implicit ec: ExecutionContext) extends FoxImplicits with LazyLogging with Hdf5HashedArrayUtils @@ -209,7 +246,59 @@ class MeshFileService @Inject()(config: DataStoreConfig)(implicit ec: ExecutionC mappingNameOptions = mappingNameBoxes.map(_.toOption) zipped = meshFileNames.lazyZip(mappingNameOptions).lazyZip(meshFileVersions) - } yield zipped.map(MeshFileInfo(_, _, _)).toSet + } yield + zipped + .map({ + case (fileName, mappingName, fileVersion) => + MeshFileInfo(fileName, None, Some("local"), mappingName, fileVersion) + }) + .toSet + } + + private lazy val neuroglancerPrecomputedMeshInfoCache = AlfuCache[VaultPath, NeuroglancerPrecomputedMeshInfo](100) + + private def loadRemoteMeshInfo(meshPath: VaultPath): Fox[NeuroglancerPrecomputedMeshInfo] = + for { + _ <- Fox.successful(()) + meshInfoPath = meshPath / "info" + meshInfo <- meshInfoPath.parseAsJson[NeuroglancerPrecomputedMeshInfo] ?~> "Failed to read mesh info" + } yield meshInfo + + def exploreNeuroglancerPrecomputedMeshes(organizationId: String, + datasetName: String, + dataLayerName: String): Fox[Set[MeshFileInfo]] = { + def exploreMeshesForLayer(dataLayer: DataLayer): Fox[(NeuroglancerPrecomputedMeshInfo, VaultPath)] = + for { + _ <- Fox.successful(()) + dataLayerWithMagLocators <- tryo(dataLayer.asInstanceOf[DataLayerWithMagLocators]).toFox + firstMag <- dataLayerWithMagLocators.mags.headOption.toFox ?~> "No mags found" + magPath <- firstMag.path.toFox ?~> "Mag has no path" + remotePath <- dataVaultService.getVaultPath(RemoteSourceDescriptor(new URI(magPath), None)) + // We are assuming that meshes will be placed in /mesh directory. To be precise, we would first need to check the root info file. + meshPath = remotePath.parent / "mesh" + meshInfo <- neuroglancerPrecomputedMeshInfoCache.getOrLoad(meshPath, loadRemoteMeshInfo) + } yield (meshInfo, meshPath) + + def isDataLayerValid(d: DataLayer) = + d.name == dataLayerName && d.category == Category.segmentation && d.dataFormat == DataFormat.neuroglancerPrecomputed + + val datasetDir = dataBaseDir.resolve(organizationId).resolve(datasetName) + val datasetPropertiesFile = datasetDir.resolve("datasource-properties.json") + for { + datasetProperties <- JsonHelper + .validatedJsonFromFile[GenericDataSource[DataLayer]](datasetPropertiesFile, datasetDir) + .toFox + meshInfosAndInfoPaths = datasetProperties.dataLayers.filter(isDataLayerValid).map(exploreMeshesForLayer) + meshInfosResolved: List[(NeuroglancerPrecomputedMeshInfo, VaultPath)] <- Fox + .sequenceOfFulls(meshInfosAndInfoPaths) + .toFox + } yield + meshInfosResolved + .map({ + case (_, vaultPath) => + MeshFileInfo("mesh", Some(vaultPath.toString), Some("neuroglancerPrecomputed"), None, 7) + }) + .toSet } /* @@ -377,6 +466,26 @@ class MeshFileService @Inject()(config: DataStoreConfig)(implicit ec: ExecutionC (neuroglancerStart, neuroglancerEnd) } + def listMeshChunksForNeuroglancerPrecomputedMesh(meshFilePathOpt: Option[String], + segmentId: Long): Fox[WebknossosSegmentInfo] = + for { + meshFilePath <- meshFilePathOpt.toFox ?~> "No mesh file path provided" + vaultPath <- dataVaultService.getVaultPath(RemoteSourceDescriptor(new URI(meshFilePath), None)) + meshInfo <- neuroglancerPrecomputedMeshInfoCache.getOrLoad(vaultPath, loadRemoteMeshInfo) + mesh = NeuroglancerMesh(meshInfo) + minishardInfo = mesh.shardingSpecification.getMinishardInfo(segmentId) + shardUrl = mesh.shardingSpecification.getPathForShard(vaultPath, minishardInfo._1) + minishardIndex <- mesh.getMinishardIndex(shardUrl, minishardInfo._2.toInt) + chunkRange <- mesh.getChunkRange(segmentId, minishardIndex) + chunk <- mesh.getChunk(chunkRange, shardUrl) + segmentManifest = NeuroglancerSegmentManifest.fromBytes(chunk) + meshSegmentInfo = enrichSegmentInfo(segmentManifest, meshInfo.lod_scale_multiplier, chunkRange.start, segmentId) + transform = meshInfo.transform2DArray // Something is going wrong here, the meshes are far outside the other data + //transform = Array(Array(2.0, 0.0, 0.0, 0.0), Array(0.0, 2.0, 0.0, 0.0), Array(0.0, 0.0, 2.0, 0.0)) + encoding = "draco" + wkChunkInfos <- WebknossosSegmentInfo.fromMeshInfosAndMetadata(List(meshSegmentInfo), encoding, transform) + } yield wkChunkInfos + def readMeshChunk(organizationId: String, datasetDirectoryName: String, dataLayerName: String, @@ -403,20 +512,16 @@ class MeshFileService @Inject()(config: DataStoreConfig)(implicit ec: ExecutionC // Sort the requests by byte offset to optimize for spinning disk access val requestsReordered = meshChunkDataRequests.requests.zipWithIndex.sortBy(requestAndIndex => requestAndIndex._1.byteOffset).toList - val data: List[(Array[Byte], String, Int)] = requestsReordered.map { requestAndIndex => + val data: List[(Array[Byte], Int)] = requestsReordered.map { requestAndIndex => val meshChunkDataRequest = requestAndIndex._1 val data = cachedMeshFile.uint8Reader.readArrayBlockWithOffset("neuroglancer", meshChunkDataRequest.byteSize, meshChunkDataRequest.byteOffset) - (data, meshFormat, requestAndIndex._2) + (data, requestAndIndex._2) } - val dataSorted = data.sortBy(d => d._3) - for { - _ <- bool2Box(data.map(d => d._2).toSet.size == 1) ?~! "Different encodings for the same mesh chunk request found." - encoding <- data.map(d => d._2).headOption - output = dataSorted.flatMap(d => d._1).toArray - } yield (output, encoding) + val dataSorted = data.sortBy(d => d._2) + Full((dataSorted.flatMap(d => d._1).toArray, meshFormat)) } def clearCache(organizationId: String, datasetDirectoryName: String, layerNameOpt: Option[String]): Int = { @@ -425,4 +530,23 @@ class MeshFileService @Inject()(config: DataStoreConfig)(implicit ec: ExecutionC meshFileCache.clear(key => key.startsWith(relevantPath.toString)) } + def readMeshChunkForNeuroglancerPrecomputed( + meshFilePathOpt: Option[String], + meshChunkDataRequests: Seq[MeshChunkDataRequest]): Fox[(Array[Byte], String)] = + for { + meshFilePath <- meshFilePathOpt.toFox ?~> "Mesh file path is required" + vaultPath <- dataVaultService.getVaultPath(RemoteSourceDescriptor(new URI(meshFilePath), None)) + meshInfo <- neuroglancerPrecomputedMeshInfoCache.getOrLoad(vaultPath, loadRemoteMeshInfo) + mesh = NeuroglancerMesh(meshInfo) + + segmentId <- Fox.option2Fox(meshChunkDataRequests.head.segmentId) ?~> "Segment id parameter is required" // This assumes that all requests are for the same segment + + minishardInfo = mesh.shardingSpecification.getMinishardInfo(segmentId) + shardUrl = mesh.shardingSpecification.getPathForShard(vaultPath, minishardInfo._1) + chunks <- Fox.serialCombined(meshChunkDataRequests.toList)(request => + shardUrl.readBytes(Some(request.byteOffset until request.byteOffset + request.byteSize))) + encoding = "draco" + output = chunks.flatten.toArray + } yield (output, encoding) + } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/NeuroglancerMesh.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/NeuroglancerMesh.scala new file mode 100644 index 00000000000..20eec6bb956 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/NeuroglancerMesh.scala @@ -0,0 +1,11 @@ +package com.scalableminds.webknossos.datastore.services + +import com.scalableminds.webknossos.datastore.datareaders.precomputed.{ + NeuroglancerPrecomputedShardingUtils, + ShardingSpecification +} + +case class NeuroglancerMesh(meshInfo: NeuroglancerPrecomputedMeshInfo) extends NeuroglancerPrecomputedShardingUtils { + override val shardingSpecification: ShardingSpecification = meshInfo.sharding.get // TODO: Remove get + +}