This also allows you to change file formats per-session", + default: false, }, upload_to_dandi: { type: "boolean", - title: "Would you like to upload your data to DANDI?", + title: "Will you publish data on DANDI?", default: true, }, }; @@ -92,7 +115,7 @@ const getSchema = (questions) => { else Object.entries(deps).forEach(([dep, opts]) => { if (!acc[dep]) acc[dep] = []; - acc[dep].push({ name, ...opts }); + acc[dep].push({ name, default: info.default, ...opts }); }); } return acc; @@ -108,6 +131,11 @@ const getSchema = (questions) => { return acc; }, []); + const ignore = Object.entries(questions).reduce((acc, [name, info]) => { + if (info.ignore) acc[name] = true; + return acc; + }, {}); + const projectWorkflowSchema = { type: "object", properties: Object.entries(questions).reduce((acc, [name, info]) => { @@ -123,6 +151,7 @@ const getSchema = (questions) => { schema: structuredClone(projectWorkflowSchema), defaults, dependents, + ignore, }; }; @@ -142,9 +171,11 @@ export class GuidedPreform extends Page { subtitle: "Answer the following questions to simplify your workflow through the GUIDE", }; + #setWorkflow = () => (this.info.globalState.project.workflow = this.state); // NOTE: Defaults already populated + beforeSave = async () => { await this.form.validate(); - this.info.globalState.project.workflow = this.state; + this.#setWorkflow(); }; footer = { @@ -155,7 +186,7 @@ export class GuidedPreform extends Page { }; updateForm = () => { - const { schema, dependents, defaults } = getSchema(questions); + const { schema, dependents, defaults, ignore } = getSchema(questions); const projectState = this.info.globalState.project ?? {}; if (!projectState.workflow) projectState.workflow = {}; @@ -168,6 +199,7 @@ export class GuidedPreform extends Page { this.form = new JSONSchemaForm({ schema, + ignore, results: this.state, validateEmptyValues: false, // Only show errors after submission validateOnChange: function (name, parent, path, value) { @@ -193,28 +225,46 @@ export class GuidedPreform extends Page { condition = (v) => dependent.condition.some((condition) => v == condition); else console.warn("Invalid condition", dependent.condition); + // Is set to true if (uniformDeps.every(({ name }) => condition(parent[name]))) { dependentParent.removeAttribute(attr); if ("required" in dependent) dependentEl.required = dependent.required; if ("__cached" in dependent) dependentEl.updateData(dependent.__cached); - } else { + } + + // Is set to false + else { if (dependentEl.value !== undefined) dependent.__cached = dependentEl.value; dependentEl.updateData(dependent.default); dependentParent.setAttribute(attr, true); if ("required" in dependent) dependentEl.required = !dependent.required; } }); + + const { upload_to_dandi, file_format } = parent; + + // Only check file format because of global re-render + if (name === "file_format") { + if (upload_to_dandi === true && file_format === "zarr") + return [ + { + type: "error", + message: + "
Zarr files are not supported by DANDI
Please change the file format to HDF5 or disable DANDI upload.", + }, + ]; + } }, - // Immediately re-render boolean values + // Save all changes onUpdate: async (path, value) => { - if (typeof value === "boolean") { - this.unsavedUpdates = true; - this.info.globalState.project.workflow = this.state; - this.updateSections(); // Trigger section changes with new workflow - await this.save({}, false); // Save new workflow and section changes - } + const willUpdateFlow = typeof value === "boolean"; + this.unsavedUpdates = true; + this.#setWorkflow(); + if (willUpdateFlow) this.updateSections(); // Trigger section changes with new workflow + await this.save({}, false); // Save new workflow and section changes }, + onThrow, // groups: [ // { diff --git a/src/electron/frontend/core/components/pages/settings/SettingsPage.js b/src/electron/frontend/core/components/pages/settings/SettingsPage.js index d5ea234eab..5c70885068 100644 --- a/src/electron/frontend/core/components/pages/settings/SettingsPage.js +++ b/src/electron/frontend/core/components/pages/settings/SettingsPage.js @@ -32,7 +32,8 @@ import examplePipelines from "../../../../../../example_pipelines.yml"; import { run } from "../guided-mode/options/utils.js"; import { joinPath } from "../../../globals"; import { Modal } from "../../Modal"; -import { ProgressBar, humanReadableBytes } from "../../ProgressBar"; +import { ProgressBar } from "../../ProgressBar"; +import { humanReadableBytes } from "../../utils/size"; const DATA_OUTPUT_PATH = joinPath(testDataFolderPath, "single_session_data"); const DATASET_OUTPUT_PATH = joinPath(testDataFolderPath, "multi_session_dataset"); diff --git a/src/electron/frontend/core/components/table/cells/input.ts b/src/electron/frontend/core/components/table/cells/input.ts index 17572ead1d..4c02b508de 100644 --- a/src/electron/frontend/core/components/table/cells/input.ts +++ b/src/electron/frontend/core/components/table/cells/input.ts @@ -43,8 +43,6 @@ export class NestedEditor extends LitElement { const schema = this.schema - - console.log('schema', schema, 'data', data) const container = document.createElement('div') const input = this.#input = new JSONSchemaInput({ schema, diff --git a/src/electron/frontend/core/components/utils/size.ts b/src/electron/frontend/core/components/utils/size.ts new file mode 100644 index 0000000000..2e08881342 --- /dev/null +++ b/src/electron/frontend/core/components/utils/size.ts @@ -0,0 +1,20 @@ +export function humanReadableBytes(size: number | string) { + + // Define the units + const units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + + // Initialize the index to 0 + let index = 0; + + // Convert the size to a floating point number + size = parseFloat(size); + + // Loop until the size is less than 1024 and increment the unit + while (size >= 1000 && index < units.length - 1) { + size /= 1000; + index += 1; + } + + // Return the size formatted with 2 decimal places and the appropriate unit + return `${size.toFixed(2)} ${units[index]}`; +} diff --git a/src/electron/frontend/core/pages.js b/src/electron/frontend/core/pages.js index 11f042adcf..3371f27950 100644 --- a/src/electron/frontend/core/pages.js +++ b/src/electron/frontend/core/pages.js @@ -4,9 +4,13 @@ import { GuidedHomePage } from "./components/pages/guided-mode/GuidedHome"; import { GuidedNewDatasetPage } from "./components/pages/guided-mode/setup/GuidedNewDatasetInfo"; import { GuidedStructurePage } from "./components/pages/guided-mode/data/GuidedStructure"; import { sections } from "./components/pages/globals"; + import { GuidedSubjectsPage } from "./components/pages/guided-mode/setup/GuidedSubjects"; + import { GuidedSourceDataPage } from "./components/pages/guided-mode/data/GuidedSourceData"; import { GuidedMetadataPage } from "./components/pages/guided-mode/data/GuidedMetadata"; +import { GuidedBackendConfigurationPage } from "./components/pages/guided-mode/data/GuidedBackendConfiguration"; + import { GuidedUploadPage } from "./components/pages/guided-mode/options/GuidedUpload"; import { GuidedResultsPage } from "./components/pages/guided-mode/results/GuidedResults"; import { Dashboard } from "./components/Dashboard"; @@ -125,6 +129,12 @@ const pages = { section: sections[1], }), + backend: new GuidedBackendConfigurationPage({ + title: "Backend Configuration", + label: "Backend configuration", + section: sections[1], + }), + inspect: new GuidedInspectorPage({ title: "Inspector Report", label: "Validate metadata", diff --git a/src/electron/frontend/core/validation/backend-configuration.ts b/src/electron/frontend/core/validation/backend-configuration.ts new file mode 100644 index 0000000000..16431dc93c --- /dev/null +++ b/src/electron/frontend/core/validation/backend-configuration.ts @@ -0,0 +1,7 @@ +import { humanReadableBytes } from "../components/utils/size"; + +const prod = (arr: number[]) => arr.reduce((accumulator, currentValue) => accumulator * currentValue, 1); + +export const getResourceUsageBytes = (shape: number[], itemsize: number, scale=1e9) => prod(shape) * (itemsize / scale) // Default to GB + +export const getResourceUsage = (shape: number[], itemsize: number) => humanReadableBytes(getResourceUsageBytes(shape, itemsize, 1)) diff --git a/src/pyflask/app.py b/src/pyflask/app.py index 28b0f8a308..3301c21a88 100644 --- a/src/pyflask/app.py +++ b/src/pyflask/app.py @@ -107,10 +107,15 @@ def post(self): type = payload["type"] header = payload["header"] inputs = payload["inputs"] - traceback = payload["traceback"] + traceback = payload.get("traceback", "") + + message = f"{header}\n{'-'*len(header)}\n\n{json.dumps(inputs, indent=2)}\n" + + if traceback: + message += f"\n{traceback}\n" - message = f"{header}\n{'-'*len(header)}\n\n{json.dumps(inputs, indent=2)}\n\n{traceback}\n" selected_logger = getattr(api.logger, type) + api.logger.info(f"Logging {type} message: {header}") selected_logger(message) diff --git a/src/pyflask/manageNeuroconv/__init__.py b/src/pyflask/manageNeuroconv/__init__.py index 0793acb240..8401b0f374 100644 --- a/src/pyflask/manageNeuroconv/__init__.py +++ b/src/pyflask/manageNeuroconv/__init__.py @@ -7,6 +7,7 @@ generate_test_data, get_all_converter_info, get_all_interface_info, + get_backend_configuration, get_interface_alignment, get_metadata_schema, get_source_schema, diff --git a/src/pyflask/manageNeuroconv/manage_neuroconv.py b/src/pyflask/manageNeuroconv/manage_neuroconv.py index f875874459..2d7825411b 100644 --- a/src/pyflask/manageNeuroconv/manage_neuroconv.py +++ b/src/pyflask/manageNeuroconv/manage_neuroconv.py @@ -510,6 +510,9 @@ def on_recording_interface(name, recording_interface): if has_ecephys: + if "definitions" not in ecephys_schema: + ecephys_schema["definitions"] = ecephys_properties["definitions"] + has_electrodes = "ElectrodeColumns" in ecephys_metadata original_units_schema = ecephys_properties.pop("UnitProperties", None) @@ -863,52 +866,45 @@ def get_interface_alignment(info: dict) -> dict: ) -def convert_to_nwb( +def create_file( info: dict, - log_url=None, -) -> str: - """Function used to convert the source data to NWB format using the specified metadata.""" + log_url: Optional[str] = None, +) -> dict: import requests - from neuroconv import NWBConverter from tqdm_publisher import TQDMProgressSubscriber - url = info.get("url", None) - request_id = info.get("request_id", None) - - nwbfile_path = Path(info["nwbfile_path"]) - custom_output_directory = info.get("output_folder") project_name = info.get("project_name") + run_stub_test = info.get("stub_test", False) - default_output_base = STUB_SAVE_FOLDER_PATH if run_stub_test else CONVERSION_SAVE_FOLDER_PATH - default_output_directory = default_output_base / project_name - try: + overwrite = info.get("overwrite", False) - # add a subdirectory to a filepath if stub_test is true - resolved_output_base = Path(custom_output_directory) if custom_output_directory else default_output_base - resolved_output_directory = resolved_output_base / project_name - resolved_output_path = resolved_output_directory / nwbfile_path + # Progress update info + url = info.get("url") + request_id = info.get("request_id") - # Remove symlink placed at the default_output_directory if this will hold real data - if resolved_output_directory == default_output_directory and default_output_directory.is_symlink(): - default_output_directory.unlink() + # Backend configuration info + backend_configuration = info.get("configuration", {}) + backend = backend_configuration.get("backend", "hdf5") - resolved_output_path.parent.mkdir(exist_ok=True, parents=True) # Ensure all parent directories exist + converter, metadata, path_info = get_conversion_info(info) - resolved_source_data = replace_none_with_nan( - info["source_data"], resolve_references(get_custom_converter(info["interfaces"]).get_source_schema()) - ) + nwbfile_path = path_info["file"] - converter = instantiate_custom_converter( - source_data=resolved_source_data, - interface_class_dict=info["interfaces"], - alignment_info=info.get("alignment", dict()), - ) + try: + + # Delete files manually if using Zarr + if overwrite: + if nwbfile_path.exists(): + if nwbfile_path.is_dir(): + rmtree(nwbfile_path) + else: + nwbfile_path.unlink() def update_conversion_progress(message): update_dict = dict(request_id=request_id, **message) - if (url) or not run_stub_test: + if url or not run_stub_test: requests.post(url=url, json=update_dict) else: progress_handler.announce(update_dict) @@ -938,126 +934,273 @@ def update_conversion_progress(message): progress_bar_options=progress_bar_options, ) - # Ensure Ophys NaN values are resolved - resolved_metadata = replace_none_with_nan(info["metadata"], resolve_references(converter.get_metadata_schema())) + run_conversion_kwargs = dict( + metadata=metadata, + nwbfile_path=nwbfile_path, + overwrite=overwrite, + conversion_options=options, + backend=backend, + ) - ecephys_metadata = resolved_metadata.get("Ecephys") + if not run_stub_test: + run_conversion_kwargs.update(dict(backend_configuration=update_backend_configuration(info))) - if ecephys_metadata: + converter.run_conversion(**run_conversion_kwargs) - # Quick fix to remove units - has_units = "Units" in ecephys_metadata + except Exception as e: + if log_url: + requests.post( + url=log_url, + json=dict( + header=f"Conversion failed for {project_name} — {nwbfile_path} (convert_to_nwb)", + inputs=dict(info=info), + traceback=traceback.format_exc(), + type="error", + ), + ) - if has_units: + raise e - ## NOTE: Currently do not allow editing units properties - # shared_units_columns = ecephys_metadata["UnitColumns"] - # for interface_name, interface_unit_results in ecephys_metadata["Units"].items(): - # interface = converter.data_interface_objects[interface_name] - # update_sorting_properties_from_table_as_json( - # interface, - # unit_table_json=interface_unit_results, - # unit_column_info=shared_units_columns, - # ) +def update_backend_configuration(info: dict) -> dict: - # ecephys_metadata["UnitProperties"] = [ - # {"name": entry["name"], "description": entry["description"]} for entry in shared_units_columns - # ] + from neuroconv.tools.nwb_helpers import ( + get_default_backend_configuration, + make_nwbfile_from_metadata, + ) - del ecephys_metadata["Units"] - del ecephys_metadata["UnitColumns"] + PROPS_TO_IGNORE = ["full_shape"] - has_electrodes = "Electrodes" in ecephys_metadata - if has_electrodes: + info_from_frontend = info.get("configuration", {}) + backend = info_from_frontend.get("backend", "hdf5") + backend_configuration_from_frontend = info_from_frontend.get("results", {}).get(backend, {}) - shared_electrode_columns = ecephys_metadata["ElectrodeColumns"] + converter, metadata, __ = get_conversion_info(info) - for interface_name, interface_electrode_results in ecephys_metadata["Electrodes"].items(): - name_split = interface_name.split(" — ") + nwbfile = make_nwbfile_from_metadata(metadata=metadata) + converter.add_to_nwbfile(nwbfile, metadata=metadata) - if len(name_split) == 1: - sub_interface = name_split[0] - elif len(name_split) == 2: - sub_interface, sub_sub_interface = name_split + backend_configuration = get_default_backend_configuration(nwbfile=nwbfile, backend=backend) - interface_or_subconverter = converter.data_interface_objects[sub_interface] + for location_in_file, dataset_configuration in backend_configuration_from_frontend.items(): + for key, value in dataset_configuration.items(): + if key not in PROPS_TO_IGNORE: + # Pydantic models only allow setting of attributes + setattr(backend_configuration.dataset_configurations[location_in_file], key, value) - if isinstance(interface_or_subconverter, NWBConverter): - subconverter = interface_or_subconverter + return backend_configuration - update_recording_properties_from_table_as_json( - recording_interface=subconverter.data_interface_objects[sub_sub_interface], - electrode_table_json=interface_electrode_results, - electrode_column_info=shared_electrode_columns, - ) - else: - interface = interface_or_subconverter - update_recording_properties_from_table_as_json( - recording_interface=interface, - electrode_table_json=interface_electrode_results, - electrode_column_info=shared_electrode_columns, - ) +def get_backend_configuration(info: dict) -> dict: - ecephys_metadata["Electrodes"] = [ - {"name": entry["name"], "description": entry["description"]} for entry in shared_electrode_columns - ] + import numpy as np - del ecephys_metadata["ElectrodeColumns"] + PROPS_TO_REMOVE = [ + # Immutable + "object_id", + "dataset_name", + "location_in_file", + "dtype", + ] - # Correct timezone in metadata fields - resolved_metadata["NWBFile"]["session_start_time"] = datetime.fromisoformat( - resolved_metadata["NWBFile"]["session_start_time"] - ).replace(tzinfo=zoneinfo.ZoneInfo(info["timezone"])) + info["overwrite"] = True # Always overwrite the file - if "date_of_birth" in resolved_metadata["Subject"]: - resolved_metadata["Subject"]["date_of_birth"] = datetime.fromisoformat( - resolved_metadata["Subject"]["date_of_birth"] - ).replace(tzinfo=zoneinfo.ZoneInfo(info["timezone"])) + backend = info.get("backend", "hdf5") + configuration = update_backend_configuration(info) - # Actually run the conversion - converter.run_conversion( - metadata=resolved_metadata, - nwbfile_path=resolved_output_path, - overwrite=info.get("overwrite", False), - conversion_options=options, - ) + def custom_encoder(obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + if isinstance(obj, np.dtype): + return str(obj) + raise TypeError(f"Object of type {obj.__class__.__name__} is not JSON serializable") - # Create a symlink between the fake data and custom data - if not resolved_output_directory == default_output_directory: - if default_output_directory.exists(): - # If default default_output_directory is not a symlink, delete all contents and create a symlink there - if not default_output_directory.is_symlink(): - rmtree(default_output_directory) + # Provide metadata on configuration dictionary + configuration_dict = configuration.dict() - # If the location is already a symlink, but points to a different output location - # remove the existing symlink before creating a new one - elif ( - default_output_directory.is_symlink() - and default_output_directory.readlink() is not resolved_output_directory - ): - default_output_directory.unlink() + itemsizes = {} + for key, dataset in configuration_dict["dataset_configurations"].items(): + itemsizes[key] = dataset["dtype"].itemsize - # Create a pointer to the actual conversion outputs - if not default_output_directory.exists(): - os.symlink(resolved_output_directory, default_output_directory) + serialized = json.loads(json.dumps(configuration_dict, default=custom_encoder)) - return dict(file=str(resolved_output_path)) + dataset_configurations = serialized["dataset_configurations"] # Only provide dataset configurations - except Exception as e: - if log_url: - requests.post( - url=log_url, - json=dict( - header=f"Conversion failed for {project_name} — {nwbfile_path} (convert_to_nwb)", - inputs=dict(info=info), - traceback=traceback.format_exc(), - type="error", - ), - ) + for dataset in dataset_configurations.values(): + for key in PROPS_TO_REMOVE: + del dataset[key] - raise e + schema = list(configuration.schema()["$defs"].values())[0] + for key in PROPS_TO_REMOVE: + existed = schema["properties"].pop(key, None) # Why is dtype not included but the rest are? + if existed: + schema["required"].remove(key) + + return dict(results=dataset_configurations, schema=schema, backend=backend, itemsizes=itemsizes) + + +def get_conversion_path_info(info: dict) -> dict: + """Function used to resolve the path details for the conversion.""" + + nwbfile_path = Path(info["nwbfile_path"]) + custom_output_directory = info.get("output_folder") + project_name = info.get("project_name") + run_stub_test = info.get("stub_test", False) + default_output_base = STUB_SAVE_FOLDER_PATH if run_stub_test else CONVERSION_SAVE_FOLDER_PATH + default_output_directory = default_output_base / project_name + + # add a subdirectory to a filepath if stub_test is true + resolved_output_base = Path(custom_output_directory) if custom_output_directory else default_output_base + resolved_output_directory = resolved_output_base / project_name + resolved_output_path = resolved_output_directory / nwbfile_path + + return dict(file=resolved_output_path, directory=resolved_output_directory, default=default_output_directory) + + +def get_conversion_info(info: dict) -> dict: + """Function used to organize the required information for conversion.""" + + from neuroconv import NWBConverter + + path_info = get_conversion_path_info(info) + resolved_output_path = path_info["file"] + resolved_output_directory = path_info["directory"] + default_output_directory = path_info["default"] + + # Remove symlink placed at the default_output_directory if this will hold real data + if resolved_output_directory == default_output_directory and default_output_directory.is_symlink(): + default_output_directory.unlink() + + resolved_output_path.parent.mkdir(exist_ok=True, parents=True) # Ensure all parent directories exist + + resolved_source_data = replace_none_with_nan( + info["source_data"], resolve_references(get_custom_converter(info["interfaces"]).get_source_schema()) + ) + + converter = instantiate_custom_converter( + source_data=resolved_source_data, + interface_class_dict=info["interfaces"], + alignment_info=info.get("alignment", dict()), + ) + + # Ensure Ophys NaN values are resolved + resolved_metadata = replace_none_with_nan(info["metadata"], resolve_references(converter.get_metadata_schema())) + + ecephys_metadata = resolved_metadata.get("Ecephys") + + if ecephys_metadata: + + # Quick fix to remove units + has_units = "Units" in ecephys_metadata + + if has_units: + + ## NOTE: Currently do not allow editing units properties + # shared_units_columns = ecephys_metadata["UnitColumns"] + # for interface_name, interface_unit_results in ecephys_metadata["Units"].items(): + # interface = converter.data_interface_objects[interface_name] + + # update_sorting_properties_from_table_as_json( + # interface, + # unit_table_json=interface_unit_results, + # unit_column_info=shared_units_columns, + # ) + + # ecephys_metadata["UnitProperties"] = [ + # {"name": entry["name"], "description": entry["description"]} for entry in shared_units_columns + # ] + + del ecephys_metadata["Units"] + del ecephys_metadata["UnitColumns"] + + has_electrodes = "Electrodes" in ecephys_metadata + if has_electrodes: + + shared_electrode_columns = ecephys_metadata["ElectrodeColumns"] + + for interface_name, interface_electrode_results in ecephys_metadata["Electrodes"].items(): + name_split = interface_name.split(" — ") + + if len(name_split) == 1: + sub_interface = name_split[0] + elif len(name_split) == 2: + sub_interface, sub_sub_interface = name_split + + interface_or_subconverter = converter.data_interface_objects[sub_interface] + + if isinstance(interface_or_subconverter, NWBConverter): + subconverter = interface_or_subconverter + + update_recording_properties_from_table_as_json( + recording_interface=subconverter.data_interface_objects[sub_sub_interface], + electrode_table_json=interface_electrode_results, + electrode_column_info=shared_electrode_columns, + ) + else: + interface = interface_or_subconverter + + update_recording_properties_from_table_as_json( + recording_interface=interface, + electrode_table_json=interface_electrode_results, + electrode_column_info=shared_electrode_columns, + ) + + ecephys_metadata["Electrodes"] = [ + {"name": entry["name"], "description": entry["description"]} for entry in shared_electrode_columns + ] + + del ecephys_metadata["ElectrodeColumns"] + + # Correct timezone in metadata fields + resolved_metadata["NWBFile"]["session_start_time"] = datetime.fromisoformat( + resolved_metadata["NWBFile"]["session_start_time"] + ).replace(tzinfo=zoneinfo.ZoneInfo(info["timezone"])) + + if "date_of_birth" in resolved_metadata["Subject"]: + resolved_metadata["Subject"]["date_of_birth"] = datetime.fromisoformat( + resolved_metadata["Subject"]["date_of_birth"] + ).replace(tzinfo=zoneinfo.ZoneInfo(info["timezone"])) + + return ( + converter, + resolved_metadata, + path_info, + ) + + +def convert_to_nwb( + info: dict, + log_url: Optional[str] = None, +) -> str: + """Function used to convert the source data to NWB format using the specified metadata.""" + + path_info = get_conversion_path_info(info) + output_path = path_info["file"] + resolved_output_directory = path_info["directory"] + default_output_directory = path_info["default"] + + create_file(info, log_url=log_url) + + # Create a symlink between the fake data and custom data + if not resolved_output_directory == default_output_directory: + if default_output_directory.exists(): + # If default default_output_directory is not a symlink, delete all contents and create a symlink there + if not default_output_directory.is_symlink(): + rmtree(default_output_directory) + + # If the location is already a symlink, but points to a different output location + # remove the existing symlink before creating a new one + elif ( + default_output_directory.is_symlink() + and default_output_directory.readlink() is not resolved_output_directory + ): + default_output_directory.unlink() + + # Create a pointer to the actual conversion outputs + if not default_output_directory.exists(): + os.symlink(resolved_output_directory, default_output_directory) + + return dict(file=str(output_path)) def convert_all_to_nwb( diff --git a/src/pyflask/namespaces/neuroconv.py b/src/pyflask/namespaces/neuroconv.py index 3d37dcc156..fc3fba9814 100644 --- a/src/pyflask/namespaces/neuroconv.py +++ b/src/pyflask/namespaces/neuroconv.py @@ -7,6 +7,7 @@ convert_all_to_nwb, get_all_converter_info, get_all_interface_info, + get_backend_configuration, get_interface_alignment, get_metadata_schema, get_source_schema, @@ -93,6 +94,13 @@ def post(self): return get_interface_alignment(neuroconv_namespace.payload) +@neuroconv_namespace.route("/configuration") +class GetBackendConfiguration(Resource): + @neuroconv_namespace.doc(responses={200: "Success", 400: "Bad Request", 500: "Internal server error"}) + def post(self): + return get_backend_configuration(neuroconv_namespace.payload) + + validate_parser = neuroconv_namespace.parser() validate_parser.add_argument("parent", type=dict, required=True) validate_parser.add_argument("function_name", type=str, required=True) diff --git a/src/schemas/backend-configuration.schema.ts b/src/schemas/backend-configuration.schema.ts new file mode 100644 index 0000000000..2b36b5aedf --- /dev/null +++ b/src/schemas/backend-configuration.schema.ts @@ -0,0 +1,80 @@ +import { getResourceUsage } from "../../src/electron/frontend/core/validation/backend-configuration" + +export const resolveBackendResults = (schema, results, itemsize) => { + const copy = structuredClone(schema) + + // results.buffer_shape = results.chunk_shape.map(() => null); // Provide an unspecified buffer shape for now + + // Do not handle compression options or any filter options for now + if (copy.properties.compression_options) results.compression_options = null; + if (copy.properties.filter_methods) results.filter_methods = [] + if (copy.properties.filter_options) results.filter_options = null; + + + const { full_shape } = results; + if (copy.properties.filter_methods) copy.properties.filter_methods.description = "The ordered collection of filtering methods to apply to this dataset prior to compression.Set blank to disable filtering" + copy.properties.compression_method.description = "The specified compression method to apply to this dataset.
Set blank to disable compression" + delete copy.properties.compression_method.default // Remove gzip as the default compression method + copy.description = `Full Shape: ${full_shape.join(' x ')}
Source size: ${getResourceUsage(full_shape, itemsize)}`; // This is static + + updateSchema(copy, results, itemsize) + + return { schema: copy, resolved: results } +} + + +const propertiesToUpdate = [ + 'chunk_shape', + // 'buffer_shape' +] + +// const bufferShapeDescription = (value, itemsize) => { +// return `Expected RAM usage: ${getResourceUsage(value, itemsize)}.`; +// } + +const chunkShapeDescription = (value, itemsize) => { + const hasNull = value.includes(null) || value.includes(undefined); // Both null after JSON processing + const diskSpaceMessage = hasNull ? 'Disk space usage will be determined automatically' : `Disk space usage per chunk: ${getResourceUsage(value, itemsize)}`; + return `${diskSpaceMessage}
Leave blank to auto-specify the axis`; +} + + +export const updateSchema = (schema, results, itemsize) => { + + const { + chunk_shape, + // buffer_shape, + full_shape + } = results; + + + const chunkSchema = schema.properties.chunk_shape; + const chunkArraySchema = chunkSchema.anyOf?.[0] || chunkSchema; + // const bufferSchema = schema.properties.buffer_shape; + + const shapeMax = full_shape[0] + + if (propertiesToUpdate.includes('chunk_shape')) { + chunkArraySchema.items.minimum = 1 + chunkArraySchema.maxItems = chunkArraySchema.minItems = chunk_shape.length; + chunkArraySchema.items.maximum = shapeMax + chunkArraySchema.description = chunkShapeDescription( + chunk_shape, + itemsize + ); + + } + + // if (propertiesToUpdate.includes('buffer_shape')) { + // bufferSchema.items.minimum = 1 + // bufferSchema.items.maximum = shapeMax + // bufferSchema.items.step = chunk_shape[0] // Constrain to increments of chunk size + // bufferSchema.strict = true + + // bufferSchema.maxItems = bufferSchema.minItems = buffer_shape.length; + // bufferSchema.description = bufferShapeDescription( + // buffer_shape, + // itemsize + // ); + // } +} diff --git a/src/schemas/base-metadata.schema.ts b/src/schemas/base-metadata.schema.ts index 466993ebe3..1dfa0de588 100644 --- a/src/schemas/base-metadata.schema.ts +++ b/src/schemas/base-metadata.schema.ts @@ -174,10 +174,11 @@ export const preprocessMetadataSchema = (schema: any = baseMetadataSchema, globa order: ["channel_name", "group_name", "shank_electrode_number", ...UV_PROPERTIES] }) - if (ecephys.properties["Units"]) { + const units = ecephys.properties["Units"] - ecephys.properties["Units"].title = "Summarized Units" + if (units) { + units.title = "Summarized Units" updateEcephysTable("Units", copy, { properties: { @@ -187,7 +188,6 @@ export const preprocessMetadataSchema = (schema: any = baseMetadataSchema, globa }, order: ["unit_id", "unit_name", "clu_id", "group_id"] }) - } } diff --git a/src/schemas/dandi-upload.schema.ts b/src/schemas/dandi-upload.schema.ts index d39a05234b..16b416a8db 100644 --- a/src/schemas/dandi-upload.schema.ts +++ b/src/schemas/dandi-upload.schema.ts @@ -32,12 +32,12 @@ onServerOpen(async () => { .then((res) => res.json()) .then(({ physical, logical }) => { const { number_of_jobs, number_of_threads } = additionalSettings as any; - number_of_jobs.max = physical; - number_of_threads.max = logical / physical; + number_of_jobs.maximum = physical; + number_of_threads.maximum = logical / physical; setReady.cpus({ number_of_jobs, number_of_threads }) }) .catch(() => { - if (isStorybook) setReady.cpus({ number_of_jobs: { max: 1, default: 1 }, number_of_threads: { max: 1, default: 1 } }) + if (isStorybook) setReady.cpus({ number_of_jobs: { maximum: 1, default: 1 }, number_of_threads: { maximum: 1, default: 1 } }) }); }); diff --git a/src/schemas/json/dandi/create.json b/src/schemas/json/dandi/create.json index aeca70a26b..faedd35041 100644 --- a/src/schemas/json/dandi/create.json +++ b/src/schemas/json/dandi/create.json @@ -27,7 +27,8 @@ "main": "Main Archive" }, "enum": ["main", "staging"], - "description": "Which DANDI server to upload to.
Note: The Development Server is recommended for developers, or users learning to use DANDI" + "description": "Which DANDI server to upload to.
Note: The Development Server is recommended for developers, or users learning to use DANDI", + "strict": true }, "description": {