diff --git a/+file/fillClass.m b/+file/fillClass.m index 79f3acb5..21a10f95 100644 --- a/+file/fillClass.m +++ b/+file/fillClass.m @@ -1,119 +1,141 @@ function template = fillClass(name, namespace, processed, classprops, inherited) -%name is the name of the scheme -%namespace is the namespace context for this class + %name is the name of the scheme + %namespace is the namespace context for this class -%% PROCESSING -class = processed(1); + %% PROCESSING + class = processed(1); -allprops = keys(classprops); -required = {}; -optional = {}; -readonly = {}; -defaults = {}; -dependent = {}; -%separate into readonly, required, and optional properties -for i=1:length(allprops) - pnm = allprops{i}; - prop = classprops(pnm); + allProperties = keys(classprops); + required = {}; + optional = {}; + readonly = {}; + defaults = {}; + dependent = {}; + hidden = {}; % special hidden properties for hard-coded workarounds + %separate into readonly, required, and optional properties + for iGroup = 1:length(allProperties) + propertyName = allProperties{iGroup}; + prop = classprops(propertyName); - isRequired = ischar(prop) || isa(prop, 'containers.Map') || isstruct(prop); - iIsPropertyRequired = false; - if isa(prop, 'file.interface.HasProps') - iIsPropertyRequired = false(size(prop)); - for iProp = 1:length(prop) - p = prop(iProp); - iIsPropertyRequired(iProp) = p.required; - end - end - - if isRequired || all(iIsPropertyRequired) - required = [required {pnm}]; - else - optional = [optional {pnm}]; - end - - if isa(prop, 'file.Attribute') - if prop.readonly - readonly = [readonly {pnm}]; + isRequired = ischar(prop) || isa(prop, 'containers.Map') || isstruct(prop); + isPropertyRequired = false; + if isa(prop, 'file.interface.HasProps') + isPropertyRequired = false(size(prop)); + for iProp = 1:length(prop) + p = prop(iProp); + isPropertyRequired(iProp) = p.required; + end end - - if ~isempty(prop.value) - defaults = [defaults {pnm}]; + + if isRequired || all(isPropertyRequired) + required = [required {propertyName}]; + else + optional = [optional {propertyName}]; end - - if ~isempty(prop.dependent) - %extract prefix - parentName = strrep(pnm, ['_' prop.name], ''); - parent = classprops(parentName); - if ~parent.required - dependent = [dependent {pnm}]; + + if isa(prop, 'file.Attribute') + if prop.readonly + readonly = [readonly {propertyName}]; + end + + if ~isempty(prop.value) + defaults = [defaults {propertyName}]; + end + + if ~isempty(prop.dependent) + %extract prefix + parentName = strrep(propertyName, ['_' prop.name], ''); + parent = classprops(parentName); + if ~parent.required + dependent = [dependent {propertyName}]; + end + end + + if strcmp(namespace.name, 'hdmf_common') ... + && strcmp(name, 'VectorData') ... + && any(strcmp(prop.name, {'unit', 'sampling_rate', 'resolution'})) + hidden{end+1} = propertyName; end end end -end -nonInherited = setdiff(allprops, inherited); -readonly = intersect(readonly, nonInherited); -required = intersect(required, nonInherited); -optional = intersect(optional, nonInherited); + nonInherited = setdiff(allProperties, inherited); + readonly = intersect(readonly, nonInherited); + exclusivePropertyGroups = union(readonly, hidden); + required = setdiff(intersect(required, nonInherited), exclusivePropertyGroups); + optional = setdiff(intersect(optional, nonInherited), exclusivePropertyGroups); -%% CLASSDEF -if length(processed) <= 1 - depnm = 'types.untyped.MetaClass'; %WRITE -else - parentName = processed(2).type; %WRITE - depnm = namespace.getFullClassName(parentName); -end + %% CLASSDEF + if length(processed) <= 1 + depnm = 'types.untyped.MetaClass'; %WRITE + else + parentName = processed(2).type; %WRITE + depnm = namespace.getFullClassName(parentName); + end -if isa(processed, 'file.Group') - classTag = 'types.untyped.GroupClass'; -else - classTag = 'types.untyped.DatasetClass'; -end + if isa(processed, 'file.Group') + classTag = 'types.untyped.GroupClass'; + else + classTag = 'types.untyped.DatasetClass'; + end -%% return classfile string -classDef = [... - 'classdef ' name ' < ' depnm ' & ' classTag newline... %header, dependencies - '% ' upper(name) ' ' class.doc]; %name, docstr -propgroups = {... - @()file.fillProps(classprops, readonly, 'PropertyAttributes', 'SetAccess=protected')... - @()file.fillProps(classprops, setdiff(required, readonly), 'IsRequired', true)... - @()file.fillProps(classprops, setdiff(optional, readonly))... - }; -docsep = {... - '% READONLY PROPERTIES'... - '% REQUIRED PROPERTIES'... - '% OPTIONAL PROPERTIES'... - }; -propsDef = ''; -for i=1:length(propgroups) - pg = propgroups{i}; - pdef = pg(); - if ~isempty(pdef) - propsDef = strjoin({propsDef docsep{i} pdef}, newline); + %% return classfile string + classDefinitionHeader = [... + 'classdef ' name ' < ' depnm ' & ' classTag newline... %header, dependencies + '% ' upper(name) ' ' class.doc]; %name, docstr + hiddenAndReadonly = intersect(hidden, readonly); + hidden = setdiff(hidden, hiddenAndReadonly); + readonly = setdiff(readonly, hiddenAndReadonly); + PropertyGroups = struct(... + 'Function', {... + @()file.fillProps(classprops, hiddenAndReadonly, 'PropertyAttributes', 'Hidden, SetAccess = protected') ... + , @()file.fillProps(classprops, hidden, 'PropertyAttributes', 'Hidden') ... + , @()file.fillProps(classprops, readonly, 'PropertyAttributes', 'SetAccess = protected') ... + , @()file.fillProps(classprops, required, 'IsRequired', true) ... + , @()file.fillProps(classprops, optional)... + } ... + , 'Separator', { ... + '% HIDDEN READONLY PROPERTIES' ... + , '% HIDDEN PROPERTIES' ... + , '% READONLY PROPERTIES' ... + , '% REQUIRED PROPERTIES' ... + , '% OPTIONAL PROPERTIES' ... + } ... + ); + fullPropertyDefinition = ''; + for iGroup = 1:length(PropertyGroups) + Group = PropertyGroups(iGroup); + propertyDefinitionBody = Group.Function(); + if isempty(propertyDefinitionBody) + continue; + end + fullPropertyDefinition = strjoin({... + fullPropertyDefinition ... + , Group.Separator ... + , propertyDefinitionBody ... + }, newline); end -end -constructorBody = file.fillConstructor(... - name,... - depnm,... - defaults,... %all defaults, regardless of inheritance - classprops,... - namespace); -setterFcns = file.fillSetters(setdiff(nonInherited, readonly)); -validatorFcns = file.fillValidators(allprops, classprops, namespace); -exporterFcns = file.fillExport(nonInherited, class, depnm); -methodBody = strjoin({constructorBody... - '%% SETTERS' setterFcns... - '%% VALIDATORS' validatorFcns... - '%% EXPORT' exporterFcns}, newline); + constructorBody = file.fillConstructor(... + name,... + depnm,... + defaults,... %all defaults, regardless of inheritance + classprops,... + namespace); + setterFcns = file.fillSetters(setdiff(nonInherited, union(readonly, hiddenAndReadonly))); + validatorFcns = file.fillValidators(allProperties, classprops, namespace); + exporterFcns = file.fillExport(nonInherited, class, depnm); + methodBody = strjoin({constructorBody... + '%% SETTERS' setterFcns... + '%% VALIDATORS' validatorFcns... + '%% EXPORT' exporterFcns}, newline); -if strcmp(name, 'DynamicTable') - methodBody = strjoin({methodBody, '%% TABLE METHODS', file.fillDynamicTableMethods()}, newline); -end + if strcmp(name, 'DynamicTable') + methodBody = strjoin({methodBody, '%% TABLE METHODS', file.fillDynamicTableMethods()}, newline); + end -fullMethodBody = strjoin({'methods' ... - file.addSpaces(methodBody, 4) 'end'}, newline); -template = strjoin({classDef propsDef fullMethodBody 'end'}, ... - [newline newline]); + fullMethodBody = strjoin({'methods' ... + file.addSpaces(methodBody, 4) 'end'}, newline); + template = strjoin({classDefinitionHeader fullPropertyDefinition fullMethodBody 'end'}, ... + [newline newline]); end diff --git a/+file/fillExport.m b/+file/fillExport.m index 1f590e5b..1fb5a1c2 100644 --- a/+file/fillExport.m +++ b/+file/fillExport.m @@ -1,206 +1,244 @@ -function festr = fillExport(propnames, raw, parentName) -hdrstr = 'function refs = export(obj, fid, fullpath, refs)'; -if isa(raw, 'file.Dataset') - propnames = propnames(~strcmp(propnames, 'data')); -end +function festr = fillExport(propertyNames, RawClass, parentName) + exportHeader = 'function refs = export(obj, fid, fullpath, refs)'; + if isa(RawClass, 'file.Dataset') + propertyNames = propertyNames(~strcmp(propertyNames, 'data')); + end -bodystr = {strjoin({... - ['refs = export@' parentName '(obj, fid, fullpath, refs);']... - 'if any(strcmp(refs, fullpath))'... - ' return;'... - 'end'... - }, newline)}; - -if isa(raw, 'file.Group') && strcmp(raw.type, 'NWBFile') - %NWBFile is technically the root `group`, which in HDF5 is a single `/` - % this messes with property creation so we reassign the path here to - % empty string so concatenation looks right - bodystr = [bodystr {'fullpath = '''';'}]; -end + exportBody = {strjoin({... + ['refs = export@' parentName '(obj, fid, fullpath, refs);']... + 'if any(strcmp(refs, fullpath))'... + ' return;'... + 'end'... + }, newline)}; -for i = 1:length(propnames) - pnm = propnames{i}; - pathProps = traverseRaw(pnm, raw); - if isempty(pathProps) - keyboard; -% bodystr{end+1} = fillDataExport(pnm, + if isa(RawClass, 'file.Group') && strcmp(RawClass.type, 'NWBFile') + %NWBFile is technically the root `group`, which in HDF5 is a single `/` + % this messes with property creation so we reassign the path here to + % empty string so concatenation looks right + exportBody = [exportBody {'fullpath = '''';'}]; end - prop = pathProps{end}; - elideProps = pathProps(1:end-1); - elisions = cell(length(elideProps),1); - %Construct elisions - for j = 1:length(elideProps) - elisions{j} = elideProps{j}.name; - end - - elisions = strjoin(elisions, '/'); - if ~isempty(elideProps) && all(cellfun('isclass', elideProps, 'file.Group')) - bodystr{end+1} = ['io.writeGroup(fid, [fullpath ''/' elisions ''']);']; - end - bodystr{end+1} = fillDataExport(pnm, prop, elisions); -end - -festr = strjoin({hdrstr... - file.addSpaces(strjoin(bodystr, newline), 4)... - 'end'}, newline); -end -function path = traverseRaw(propname, raw) -path = {}; -switch class(raw) - case 'file.Group' - %get names of both subgroups and datasets - gnames = {}; - dsnames = {}; - lnknames = {}; - anames = {}; - - if ~isempty(raw.attributes) - anames = {raw.attributes.name}; + for i = 1:length(propertyNames) + propertyName = propertyNames{i}; + pathProps = traverseRaw(propertyName, RawClass); + if isempty(pathProps) + keyboard; end - - if ~isempty(raw.subgroups) - gnames = {raw.subgroups.name}; - lowerGroupTypes = lower({raw.subgroups.type}); - useLower = [raw.subgroups.isConstrainedSet] | cellfun('isempty', gnames); - gnames(useLower) = lowerGroupTypes(useLower); + prop = pathProps{end}; + elideProps = pathProps(1:end-1); + elisions = cell(length(elideProps),1); + % Construct elisions + for j = 1:length(elideProps) + elisions{j} = elideProps{j}.name; end - - if ~isempty(raw.datasets) - dsnames = {raw.datasets.name}; - lowerDsTypes = lower({raw.datasets.type}); - useLower = [raw.datasets.isConstrainedSet] | cellfun('isempty', dsnames); - dsnames(useLower) = lowerDsTypes(useLower); + + elisions = strjoin(elisions, '/'); + if ~isempty(elideProps) && all(cellfun('isclass', elideProps, 'file.Group')) + exportBody{end+1} = ['io.writeGroup(fid, [fullpath ''/' elisions ''']);']; end - - if ~isempty(raw.links) - lnknames = {raw.links.name}; + + if strcmp(propertyName, 'unit') && strcmp(RawClass.type, 'VectorData') + exportBody{end+1} = fillVectorDataUnitConditional(); + elseif strcmp(propertyName, 'sampling_rate') && strcmp(RawClass.type, 'VectorData') + exportBody{end+1} = fillVectorDataSamplingRateConditional(); + elseif strcmp(propertyName, 'resolution') && strcmp(RawClass.type, 'VectorData') + exportBody{end+1} = fillVectorDataResolutionConditional(); + else + exportBody{end+1} = fillDataExport(propertyName, prop, elisions); end - - if any(strcmp([anames gnames dsnames lnknames], propname)) - amatch = strcmp(anames, propname); - gmatch = strcmp(gnames, propname); - dsmatch = strcmp(dsnames, propname); - lnkmatch = strcmp(lnknames, propname); - if any(amatch) - path = {raw.attributes(amatch)}; - elseif any(gmatch) - path = {raw.subgroups(gmatch)}; - elseif any(dsmatch) - path = {raw.datasets(dsmatch)}; - elseif any(lnkmatch) - path = {raw.links(lnkmatch)}; - end + end + + festr = strjoin({ ... + exportHeader ... + , file.addSpaces(strjoin(exportBody, newline), 4) ... + , 'end' ... + }, newline); +end + +function exportBody = fillVectorDataResolutionConditional() + exportBody = strjoin({... + 'if ~isempty(obj.resolution) && any(endsWith(fullpath, ''units/spike_times''))' ... + , ' io.writeAttribute(fid, [fullpath ''/resolution''], obj.resolution);' ... + , 'end'}, newline); +end + +function exportBody = fillVectorDataUnitConditional() + exportBody = strjoin({... + 'validUnitPaths = strcat(''units/'', {''waveform_mean'', ''waveform_sd'', ''waveforms''});' ... + , 'if ~isempty(obj.unit) && any(endsWith(fullpath, validUnitPaths))' ... + , ' io.writeAttribute(fid, [fullpath ''/unit''], obj.unit);' ... + , 'end'}, newline); +end + +function exportBody = fillVectorDataSamplingRateConditional() + exportBody = strjoin({... + 'validDataSamplingPaths = strcat(''units/'', {''waveform_mean'', ''waveform_sd'', ''waveforms''});' ... + , 'if ~isempty(obj.sampling_rate) && any(endsWith(fullpath, validDataSamplingPaths))' ... + , ' io.writeAttribute(fid, [fullpath ''/sampling_rate''], obj.sampling_rate);' ... + , 'end'}, newline); +end + +function path = traverseRaw(propertyName, RawClass) + % returns a cell array of named tokens which may or may not indicate an identifier. + % these tokens are relative to the Raw class + path = {}; + + if isa(RawClass, 'file.Dataset') + if isempty(RawClass.attributes) return; end - - %find true path for elided property - if startsWith(propname, dsnames) || startsWith(propname, gnames) - for i=1:length(gnames) - nm = gnames{i}; - suffix = propname(length(nm)+2:end); - if startsWith(propname, nm) - res = traverseRaw(suffix, raw.subgroups(i)); - if ~isempty(res) - path = [{raw.subgroups(i)} res]; - return; - end + matchesAttribute = strcmp({RawClass.attributes.name}, propertyName); + path = {RawClass.attributes(matchesAttribute)}; + return; + end + + % probably a file.Group + + % get names of both subgroups and datasets + subgroupNames = {}; + datasetNames = {}; + linkNames = {}; + attributeNames = {}; + + if ~isempty(RawClass.attributes) + attributeNames = {RawClass.attributes.name}; + end + + if ~isempty(RawClass.subgroups) + subgroupNames = {RawClass.subgroups.name}; + lowerGroupTypes = lower({RawClass.subgroups.type}); + useLower = [RawClass.subgroups.isConstrainedSet] | cellfun('isempty', subgroupNames); + subgroupNames(useLower) = lowerGroupTypes(useLower); + end + + if ~isempty(RawClass.datasets) + datasetNames = {RawClass.datasets.name}; + lowerDsTypes = lower({RawClass.datasets.type}); + useLower = [RawClass.datasets.isConstrainedSet] | cellfun('isempty', datasetNames); + datasetNames(useLower) = lowerDsTypes(useLower); + end + + if ~isempty(RawClass.links) + linkNames = {RawClass.links.name}; + end + + if any(strcmp([attributeNames subgroupNames datasetNames linkNames], propertyName)) + isAttribute = strcmp(attributeNames, propertyName); + isGroup = strcmp(subgroupNames, propertyName); + isDataset = strcmp(datasetNames, propertyName); + isLink = strcmp(linkNames, propertyName); + if any(isAttribute) + path = {RawClass.attributes(isAttribute)}; + elseif any(isGroup) + path = {RawClass.subgroups(isGroup)}; + elseif any(isDataset) + path = {RawClass.datasets(isDataset)}; + elseif any(isLink) + path = {RawClass.links(isLink)}; + end + return; + end + + % find true path for elided property + if startsWith(propertyName, datasetNames) || startsWith(propertyName, subgroupNames) + for i=1:length(subgroupNames) + name = subgroupNames{i}; + suffix = propertyName(length(name)+2:end); + if startsWith(propertyName, name) + res = traverseRaw(suffix, RawClass.subgroups(i)); + if ~isempty(res) + path = [{RawClass.subgroups(i)} res]; + return; end end - for i=1:length(dsnames) - nm = dsnames{i}; - suffix = propname(length(nm)+2:end); - if startsWith(propname, nm) - res = traverseRaw(suffix, raw.datasets(i)); - if ~isempty(res) - path = [{raw.datasets(i)} res]; - return; - end + end + for i=1:length(datasetNames) + name = datasetNames{i}; + suffix = propertyName(length(name)+2:end); + if startsWith(propertyName, name) + res = traverseRaw(suffix, RawClass.datasets(i)); + if ~isempty(res) + path = [{RawClass.datasets(i)} res]; + return; end end end - case 'file.Dataset' - if isempty(raw.attributes) - return; - end - attrmatch = strcmp({raw.attributes.name}, propname); - path = {raw.attributes(attrmatch)}; -end + end end function fde = fillDataExport(name, prop, elisions) -if isempty(elisions) - fullpath = ['[fullpath ''/' prop.name ''']']; - elisionpath = 'fullpath'; -else - fullpath = ['[fullpath ''/' elisions '/' prop.name ''']']; - elisionpath = ['[fullpath ''/' elisions ''']']; -end - -if (isa(prop, 'file.Group') || isa(prop, 'file.Dataset')) && prop.isConstrainedSet - fde = ['refs = obj.' name '.export(fid, ' elisionpath ', refs);']; -elseif isa(prop, 'file.Link') || isa(prop, 'file.Group') ||... - (isa(prop, 'file.Dataset') && ~isempty(prop.type)) - % obj, loc_id, path, refs - fde = ['refs = obj.' name '.export(fid, ' fullpath ', refs);']; -elseif isa(prop, 'file.Dataset') %untyped dataset - options = {}; - - % special case due to unique behavior of file_create_date - if strcmp(name, 'file_create_date') - options = [options {'''forceChunking''', '''forceArray'''}]; - elseif ~prop.scalar - options = [options {'''forceArray'''}]; - end - - - % untyped compound - if isstruct(prop.dtype) - writerStr = 'io.writeCompound'; + if isempty(elisions) + fullpath = ['[fullpath ''/' prop.name ''']']; + elisionpath = 'fullpath'; else - writerStr = 'io.writeDataset'; + fullpath = ['[fullpath ''/' elisions '/' prop.name ''']']; + elisionpath = ['[fullpath ''/' elisions ''']']; end - - % just to guarantee optional arguments are correct syntax - nameProp = sprintf('obj.%s', name); - nameArgs = [{nameProp} options]; - nameArgs = strjoin(nameArgs, ', '); - fde = strjoin({... - ['if startsWith(class(obj.' name '), ''types.untyped.'')']... - [' refs = obj.' name '.export(fid, ' fullpath ', refs);']... - ['elseif ~isempty(obj.' name ')']... - [sprintf(' %s(fid, %s, %s);', writerStr, fullpath, nameArgs)]... - 'end'... - }, newline); -else - if prop.scalar - forceArrayFlag = ''; + + if (isa(prop, 'file.Group') || isa(prop, 'file.Dataset')) && prop.isConstrainedSet + % is a sub-object (with an export function) + fde = ['refs = obj.' name '.export(fid, ' elisionpath ', refs);']; + elseif isa(prop, 'file.Link') || isa(prop, 'file.Group') ||... + (isa(prop, 'file.Dataset') && ~isempty(prop.type)) + % obj, loc_id, path, refs + fde = ['refs = obj.' name '.export(fid, ' fullpath ', refs);']; + elseif isa(prop, 'file.Dataset') %untyped dataset + options = {}; + + % special case due to unique behavior of file_create_date + if strcmp(name, 'file_create_date') + options = [options {'''forceChunking''', '''forceArray'''}]; + elseif ~prop.scalar + options = [options {'''forceArray'''}]; + end + + % untyped compound + if isstruct(prop.dtype) + writerStr = 'io.writeCompound'; + else + writerStr = 'io.writeDataset'; + end + + % just to guarantee optional arguments are correct syntax + nameProp = sprintf('obj.%s', name); + nameArgs = [{nameProp} options]; + nameArgs = strjoin(nameArgs, ', '); + fde = strjoin({... + ['if startsWith(class(obj.' name '), ''types.untyped.'')']... + [' refs = obj.' name '.export(fid, ' fullpath ', refs);']... + ['elseif ~isempty(obj.' name ')']... + [sprintf(' %s(fid, %s, %s);', writerStr, fullpath, nameArgs)]... + 'end'... + }, newline); else - forceArrayFlag = ', ''forceArray'''; + if prop.scalar + forceArrayFlag = ''; + else + forceArrayFlag = ', ''forceArray'''; + end + fde = sprintf('io.writeAttribute(fid, %1$s, obj.%2$s%3$s);',... + fullpath, name, forceArrayFlag); end - fde = sprintf('io.writeAttribute(fid, %1$s, obj.%2$s%3$s);',... - fullpath, name, forceArrayFlag); -end -propertyChecks = {}; + propertyChecks = {}; -if isa(prop, 'file.Attribute') && ~isempty(prop.dependent) - %if attribute is dependent, check before writing - if isempty(elisions) || strcmp(elisions, prop.dependent) - depPropname = prop.dependent; - else - flattened = strfind(elisions, '/'); - flattened = strrep(elisions(1:flattened(end)), '/', '_'); - depPropname = [flattened prop.dependent]; + if isa(prop, 'file.Attribute') && ~isempty(prop.dependent) + %if attribute is dependent, check before writing + if isempty(elisions) || strcmp(elisions, prop.dependent) + depPropname = prop.dependent; + else + flattened = strfind(elisions, '/'); + flattened = strrep(elisions(1:flattened(end)), '/', '_'); + depPropname = [flattened prop.dependent]; + end + propertyChecks{end+1} = ['~isempty(obj.' depPropname ')']; end - propertyChecks{end+1} = ['~isempty(obj.' depPropname ')']; -end -if ~prop.required - propertyChecks{end+1} = ['~isempty(obj.' name ')']; -end + if ~prop.required + propertyChecks{end+1} = ['~isempty(obj.' name ')']; + end -if ~isempty(propertyChecks) - fde = ['if ' strjoin(propertyChecks, ' && ') newline file.addSpaces(fde, 4) newline 'end']; -end + if ~isempty(propertyChecks) + fde = ['if ' strjoin(propertyChecks, ' && ') newline file.addSpaces(fde, 4) newline 'end']; + end end \ No newline at end of file diff --git a/+file/fillProps.m b/+file/fillProps.m index 97b0fdfc..a68aa15f 100644 --- a/+file/fillProps.m +++ b/+file/fillProps.m @@ -1,6 +1,12 @@ function s = fillProps(props, names, varargin) - validateattributes(props, {'containers.Map'}, {}, mfilename, 'props', 1); - validateattributes(names, {'cell'}, {'vector'}, mfilename, 'names', 2); + % Fills the property list in the classdef + assert(isa(props, 'containers.Map')); + assert(iscellstr(names) || isstring(names)); + s = ''; + if isempty(names) + return; + end + p = inputParser; p.addParameter('PropertyAttributes', '', ... @(x)validateattributes(x, {'char'}, {'scalartext'}, mfilename, 'Attribute')); @@ -26,14 +32,10 @@ options = ['(' p.Results.PropertyAttributes ')']; end - if isempty(proplines) - s = ''; - else - s = strjoin({... - ['properties' options]... - file.addSpaces(strjoin(proplines, newline), 4)... - 'end'}, newline); - end + s = strjoin({... + ['properties' options]... + file.addSpaces(strjoin(proplines, newline), 4)... + 'end'}, newline); end function propStr = getPropStr(prop, propName) diff --git a/+file/fillSetters.m b/+file/fillSetters.m index 9e945898..941e60eb 100644 --- a/+file/fillSetters.m +++ b/+file/fillSetters.m @@ -3,7 +3,7 @@ for i=1:length(propnames) nm = propnames{i}; fsstr{i} = strjoin({... - ['function obj = set.' nm '(obj, val)']... + ['function set.' nm '(obj, val)']... [' obj.' nm ' = obj.validate_' nm '(val);']... 'end'}, newline); end diff --git a/+file/processClass.m b/+file/processClass.m index 6f288a0a..1ccfc21e 100644 --- a/+file/processClass.m +++ b/+file/processClass.m @@ -1,42 +1,90 @@ function [Processed, classprops, inherited] = processClass(name, namespace, pregen) -inherited = {}; -branch = [{namespace.getClass(name)} namespace.getRootBranch(name)]; -branchNames = cell(size(branch)); -TYPEDEF_KEYS = {'neurodata_type_def', 'data_type_def'}; -for i = 1:length(branch) - hasTypeDefs = isKey(branch{i}, TYPEDEF_KEYS); - branchNames{i} = branch{i}(TYPEDEF_KEYS{hasTypeDefs}); -end + inherited = {}; + branch = [{namespace.getClass(name)} namespace.getRootBranch(name)]; + branchNames = cell(size(branch)); + TYPEDEF_KEYS = {'neurodata_type_def', 'data_type_def'}; + for i = 1:length(branch) + hasTypeDefs = isKey(branch{i}, TYPEDEF_KEYS); + branchNames{i} = branch{i}(TYPEDEF_KEYS{hasTypeDefs}); + end + + for iAncestor = 1:length(branch) + node = branch{iAncestor}; + hasTypeDefs = isKey(node, TYPEDEF_KEYS); + nodename = node(TYPEDEF_KEYS{hasTypeDefs}); -for iAncestor = 1:length(branch) - node = branch{iAncestor}; - hasTypeDefs = isKey(node, TYPEDEF_KEYS); - nodename = node(TYPEDEF_KEYS{hasTypeDefs}); - - if ~isKey(pregen, nodename) - switch node('class_type') - case 'groups' - class = file.Group(node); - case 'datasets' - class = file.Dataset(node); - otherwise - error('NWB:FileGen:InvalidClassType',... - 'Class type %s is invalid', node('class_type')); + if ~isKey(pregen, nodename) + switch node('class_type') + case 'groups' + class = file.Group(node); + case 'datasets' + class = file.Dataset(node); + otherwise + error('NWB:FileGen:InvalidClassType',... + 'Class type %s is invalid', node('class_type')); + end + if strcmp(nodename, 'VectorData') && strcmp(namespace.name, 'hdmf_common') + class = patchVectorData(class); + end + props = class.getProps(); + + pregen(nodename) = struct('class', class, 'props', props); + end + try + Processed(iAncestor) = pregen(nodename).class; + catch + keyboard; end - props = class.getProps(); - pregen(nodename) = struct('class', class, 'props', props); end - try - Processed(iAncestor) = pregen(nodename).class; - catch - keyboard; + classprops = pregen(name).props; + names = keys(classprops); + for iAncestor = 2:length(Processed) + pname = Processed(iAncestor).type; + parentPropNames = keys(pregen(pname).props); + inherited = union(inherited, intersect(names, parentPropNames)); end end -classprops = pregen(name).props; -names = keys(classprops); -for iAncestor=2:length(Processed) - pname = Processed(iAncestor).type; - parentPropNames = keys(pregen(pname).props); - inherited = union(inherited, intersect(names, parentPropNames)); -end + +function class = patchVectorData(class) + %% Unit Attribute + % derived from schema 2.6.0 + source = containers.Map(); + source('name') = 'unit'; + source('doc') = ['NOTE: this is a special value for compatibility with the Units table and is ' ... + 'only written to file when detected to be in that specific HDF5 Group. ' ... + 'The value must be ''volts''']; + source('dtype') = 'text'; + source('value') = 'volts'; + source('required') = false; + class.attributes(end+1) = file.Attribute(source); + + %% Sampling Rate Attribute + % derived from schema 2.6.0 + + source = containers.Map(); + source('name') = 'sampling_rate'; + source('doc') = ['NOTE: this is a special value for compatibility with the Units table and is ' ... + 'only written to file when detected to be in that specific HDF5 Group. ' ... + 'Must be Hertz']; + source('dtype') = 'float32'; + source('required') = false; + + class.attributes(end+1) = file.Attribute(source); + %% Resolution Attribute + % derived from schema 2.6.0 + + source = containers.Map(); + source('name') = 'resolution'; + source('doc') = ['NOTE: this is a special value for compatibility with the Units table and is ' ... + 'only written to file when detected to be in that specific HDF5 Group. ' ... + 'The smallest possible difference between two spike times. ' ... + 'Usually 1 divided by the acquisition sampling rate from which spike times were extracted, ' ... + 'but could be larger if the acquisition time series was downsampled or smaller if the ' ... + 'acquisition time series was smoothed/interpolated ' ... + 'and it is possible for the spike time to be between samples.' ... + ]; + source('dtype') = 'float64'; + source('required') = false; + + class.attributes(end+1) = file.Attribute(source); end \ No newline at end of file diff --git a/+io/writeCompound.m b/+io/writeCompound.m index c7db74b1..503b03b2 100644 --- a/+io/writeCompound.m +++ b/+io/writeCompound.m @@ -1,119 +1,119 @@ function writeCompound(fid, fullpath, data, varargin) -%convert to a struct -if istable(data) - data = table2struct(data); -elseif isa(data, 'containers.Map') - names = keys(data); - vals = values(data, names); + %convert to a struct + if istable(data) + data = table2struct(data); + elseif isa(data, 'containers.Map') + names = keys(data); + vals = values(data, names); - s = struct(); - for i=1:length(names) - s.(misc.str2validName(names{i})) = vals{i}; + s = struct(); + for i=1:length(names) + s.(misc.str2validName(names{i})) = vals{i}; + end + data = s; end - data = s; -end -%convert to scalar struct -names = fieldnames(data); -if isempty(names) - numrows = 0; -elseif isscalar(data) - if ischar(data.(names{1})) - numrows = 1; + %convert to scalar struct + names = fieldnames(data); + if isempty(names) + numrows = 0; + elseif isscalar(data) + if ischar(data.(names{1})) + numrows = 1; + else + numrows = length(data.(names{1})); + end else - numrows = length(data.(names{1})); + numrows = length(data); + s = struct(); + for i=1:length(names) + s.(names{i}) = {data.(names{i})}; + end + data = s; end -else - numrows = length(data); - s = struct(); + + %check for references and construct tid. + classes = cell(length(names), 1); + tids = cell(size(classes)); + sizes = zeros(size(classes)); for i=1:length(names) - s.(names{i}) = {data.(names{i})}; - end - data = s; -end + val = data.(names{i}); + if iscell(val) && ~iscellstr(val) + data.(names{i}) = [val{:}]; + val = val{1}; + end -%check for references and construct tid. -classes = cell(length(names), 1); -tids = cell(size(classes)); -sizes = zeros(size(classes)); -for i=1:length(names) - val = data.(names{i}); - if iscell(val) && ~iscellstr(val) - data.(names{i}) = [val{:}]; - val = val{1}; + classes{i} = class(val); + tids{i} = io.getBaseType(classes{i}); + sizes(i) = H5T.get_size(tids{i}); end - classes{i} = class(val); - tids{i} = io.getBaseType(classes{i}); - sizes(i) = H5T.get_size(tids{i}); -end - -tid = H5T.create('H5T_COMPOUND', sum(sizes)); -for i=1:length(names) - %insert columns into compound type - H5T.insert(tid, names{i}, sum(sizes(1:i-1)), tids{i}); -end -%close custom type ids (errors if char base type) -isH5ml = tids(cellfun('isclass', tids, 'H5ML.id')); -for i=1:length(isH5ml) - H5T.close(isH5ml{i}); -end -%optimizes for type size -H5T.pack(tid); + tid = H5T.create('H5T_COMPOUND', sum(sizes)); + for i=1:length(names) + %insert columns into compound type + H5T.insert(tid, names{i}, sum(sizes(1:i-1)), tids{i}); + end + %close custom type ids (errors if char base type) + isH5ml = tids(cellfun('isclass', tids, 'H5ML.id')); + for i=1:length(isH5ml) + H5T.close(isH5ml{i}); + end + %optimizes for type size + H5T.pack(tid); -ref_i = strcmp(classes, 'types.untyped.ObjectView') |... - strcmp(classes, 'types.untyped.RegionView'); + isReferenceClass = strcmp(classes, 'types.untyped.ObjectView') |... + strcmp(classes, 'types.untyped.RegionView'); -% convert logical values -boolNames = names(strcmp(classes, 'logical')); -for iField = 1:length(boolNames) - data.(boolNames{iField}) = strcmp('TRUE', data.(boolNames{iField})); -end + % convert logical values + boolNames = names(strcmp(classes, 'logical')); + for iField = 1:length(boolNames) + data.(boolNames{iField}) = strcmp('TRUE', data.(boolNames{iField})); + end -%transpose numeric column arrays to row arrays -% reference and str arrays are handled below -transposeNames = names(~ref_i); -for i=1:length(transposeNames) - nm = transposeNames{i}; - if iscolumn(data.(nm)) - data.(nm) = data.(nm) .'; + %transpose numeric column arrays to row arrays + % reference and str arrays are handled below + transposeNames = names(~isReferenceClass); + for i=1:length(transposeNames) + nm = transposeNames{i}; + if iscolumn(data.(nm)) + data.(nm) = data.(nm) .'; + end end -end -%attempt to convert raw reference information -refNames = names(ref_i); -for i=1:length(refNames) - data.(refNames{i}) = io.getRefData(fid, data.(refNames{i})); -end + %attempt to convert raw reference information + referenceNames = names(isReferenceClass); + for i=1:length(referenceNames) + data.(referenceNames{i}) = io.getRefData(fid, data.(referenceNames{i})); + end -try - sid = H5S.create_simple(1, numrows, []); - did = H5D.create(fid, fullpath, tid, sid, 'H5P_DEFAULT'); -catch ME - if contains(ME.message, 'name already exists') - did = H5D.open(fid, fullpath); - create_plist = H5D.get_create_plist(did); - edit_sid = H5D.get_space(did); - [~, edit_dims, ~] = H5S.get_simple_extent_dims(edit_sid); - layout = H5P.get_layout(create_plist); - is_chunked = layout == H5ML.get_constant_value('H5D_CHUNKED'); - is_same_dims = all(edit_dims == numrows); + try + sid = H5S.create_simple(1, numrows, []); + did = H5D.create(fid, fullpath, tid, sid, 'H5P_DEFAULT'); + catch ME + if contains(ME.message, 'name already exists') + did = H5D.open(fid, fullpath); + create_plist = H5D.get_create_plist(did); + edit_sid = H5D.get_space(did); + [~, edit_dims, ~] = H5S.get_simple_extent_dims(edit_sid); + layout = H5P.get_layout(create_plist); + is_chunked = layout == H5ML.get_constant_value('H5D_CHUNKED'); + is_same_dims = all(edit_dims == numrows); - if ~is_same_dims - if is_chunked - H5D.set_extent(did, dims); - else - warning('Attempted to change size of continuous compound `%s`. Skipping.',... - fullpath); + if ~is_same_dims + if is_chunked + H5D.set_extent(did, dims); + else + warning('Attempted to change size of continuous compound `%s`. Skipping.',... + fullpath); + end end + H5P.close(create_plist); + H5S.close(edit_sid); + else + rethrow(ME); end - H5P.close(create_plist); - H5S.close(edit_sid); - else - rethrow(ME); end -end -H5D.write(did, tid, sid, sid, 'H5P_DEFAULT', data); -H5D.close(did); -H5S.close(sid); + H5D.write(did, tid, sid, sid, 'H5P_DEFAULT', data); + H5D.close(did); + H5S.close(sid); end \ No newline at end of file diff --git a/+tests/+system/PyNWBIOTest.py b/+tests/+system/PyNWBIOTest.py index ac614dec..2962409d 100644 --- a/+tests/+system/PyNWBIOTest.py +++ b/+tests/+system/PyNWBIOTest.py @@ -9,6 +9,7 @@ from pynwb import get_manager, NWBFile, TimeSeries from pynwb.ecephys import ElectricalSeries, Clustering from pynwb.ophys import OpticalChannel, TwoPhotonSeries +from pynwb.misc import Units from hdmf.backends.hdf5 import HDF5IO from hdmf.container import Container, Data @@ -57,7 +58,11 @@ def assertContainerEqual(self, container1, container2): # noqa: C901 type1 = type(container1) type2 = type(container2) self.assertEqual(type1, type2) - for nwbfield in container1.__nwbfields__: + try: + container_fields = container1.__nwbfields__ + except AttributeError: + container_fields = container1.__fields__ + for nwbfield in container_fields: with self.subTest(nwbfield=nwbfield, container_type=type1.__name__): f1 = getattr(container1, nwbfield) f2 = getattr(container2, nwbfield) @@ -200,3 +205,12 @@ def addContainer(self, file): def getContainer(self, file): return file + + +class UnitTimesIOTest(PyNWBIOTest): + def addContainer(self, file): + self.file.units = Units('units', waveform_rate=1., resolution=3.) + self.file.units.add_unit(waveform_mean=[5], waveform_sd=[7], waveforms=np.full((1, 1), 9), + spike_times=[11]) + def getContainer(self, file): + return file.units diff --git a/+tests/+system/UnitTimesIOTest.m b/+tests/+system/UnitTimesIOTest.m index 79509fd0..47671e27 100644 --- a/+tests/+system/UnitTimesIOTest.m +++ b/+tests/+system/UnitTimesIOTest.m @@ -1,16 +1,56 @@ -classdef UnitTimesIOTest < tests.system.RoundTripTest +classdef UnitTimesIOTest < tests.system.PyNWBIOTest methods function addContainer(~, file) file.units = types.core.Units( ... - 'colnames', {'spike_times', 'waveforms'}, 'description', 'test Units'); + 'colnames', {'spike_times'; 'waveform_mean'; 'waveform_sd'; 'waveforms'} ... + , 'description', 'data on spiking units' ... + ); - file.units.addRow('spike_times', rand(), 'waveforms', rand()); - file.units.addRow('spike_times', rand(3,1), 'waveforms', rand(3,1)); - file.units.addRow('spike_times', rand(5,1), 'waveforms', {rand(5,1);rand(7,1)}); + file.units.addRow( ... + 'spike_times', 11 ... + , 'waveforms', int32(9) ... + , 'waveform_sd', 7 ... + , 'waveform_mean', 5 ... + ); + % the following is solely for a2a comparison with files. + file.units.spike_times.description = 'the spike times for each unit'; + file.units.waveforms.description = ['Individual waveforms for each spike. ' ... + 'If the dataset is three-dimensional, the third dimension shows the response ' ... + 'from different electrodes that all observe this unit simultaneously. ' ... + 'In this case, the `electrodes` column of this Units table should be used to ' ... + 'indicate which electrodes are associated with this unit, ' ... + 'and the electrodes dimension here should be in the same order as the ' ... + 'electrodes referenced in the `electrodes` column of this table.']; + file.units.waveform_sd.description = ['the spike waveform standard deviation for each ' ... + 'spike unit']; + file.units.waveform_mean.description = 'the spike waveform mean for each spike unit'; + file.units.spike_times_index = types.hdmf_common.VectorIndex( ... + 'target', types.untyped.ObjectView(file.units.spike_times) ... + , 'description', 'Index for VectorData ''spike_times''' ... + , 'data', 1 ... + ); + file.units.waveforms_index = types.hdmf_common.VectorIndex( ... + 'target', types.untyped.ObjectView(file.units.waveforms) ... + , 'description', 'Index for VectorData ''waveforms''' ... + , 'data', 1 ... + ); + file.units.waveforms_index_index = types.hdmf_common.VectorIndex( ... + 'target', types.untyped.ObjectView(file.units.waveforms_index) ... + , 'description', 'Index for VectorData ''waveforms_index''' ... + , 'data', 1 ... + ); + + % set optional hidden vector data attributes + file.units.spike_times.resolution = 3; + Units = file.units; + [Units.waveform_mean.sampling_rate ... + , Units.waveform_sd.sampling_rate ... + , Units.waveforms.sampling_rate ... + ] = deal(1); end function c = getContainer(~, file) - c = file.units.spike_times; + c = file.units; end end end \ No newline at end of file diff --git a/+tests/+util/verifyContainerEqual.m b/+tests/+util/verifyContainerEqual.m index f51084c1..1bf3179f 100644 --- a/+tests/+util/verifyContainerEqual.m +++ b/+tests/+util/verifyContainerEqual.m @@ -53,18 +53,10 @@ function verifyContainerEqual(testCase, actual, expected, ignoreList) testCase.verifyGreaterThanOrEqual(actualNtfs, expectedNtfs - 10, failmsg); testCase.verifyLessThanOrEqual(actualNtfs, expectedNtfs + 10, failmsg); end - elseif startsWith(class(expectedVal), 'int') || startsWith(class(expectedVal), 'uint') - actualTypeSize = regexp(class(actualVal), 'int(\d+)', 'once', 'tokens'); - expectedTypeSize = regexp(class(expectedVal), 'int(\d+)', 'once', 'tokens'); - testCase.verifyGreaterThanOrEqual(... - str2double(actualTypeSize{1}),... - str2double(expectedTypeSize{1})); - - if startsWith(class(expectedVal), 'int') - testCase.verifyEqual(int64(actualVal), int64(expectedVal), failmsg); - else - testCase.verifyEqual(uint64(actualVal), uint64(expectedVal), failmsg); - end + elseif startsWith(class(expectedVal), 'int') + testCase.verifyEqual(int64(actualVal), int64(expectedVal), failmsg); + elseif startsWith(class(expectedVal), 'uint') + testCase.verifyEqual(uint64(actualVal), uint64(expectedVal), failmsg); else testCase.verifyEqual(actualVal, expectedVal, failmsg); end diff --git a/+types/+untyped/DataStub.m b/+types/+untyped/DataStub.m index 54bf4b0b..6e82f53e 100644 --- a/+types/+untyped/DataStub.m +++ b/+types/+untyped/DataStub.m @@ -349,8 +349,8 @@ end io.writeCompound(fid, fullpath, data); - else - %copy data over and return destination + elseif ~H5L.exists(fid, fullpath, 'H5P_DEFAULT') + % copy data over and return destination. ocpl = H5P.create('H5P_OBJECT_COPY'); lcpl = H5P.create('H5P_LINK_CREATE'); H5O.copy(src_fid, obj.path, fid, fullpath, ocpl, lcpl); diff --git a/+types/+untyped/MetaClass.m b/+types/+untyped/MetaClass.m index 11ed19b7..6d7840a9 100644 --- a/+types/+untyped/MetaClass.m +++ b/+types/+untyped/MetaClass.m @@ -26,11 +26,11 @@ io.writeDataset(fid, fullpath, obj.data, 'forceArray'); end catch ME - refs = obj.capture_ref_errors(ME, fullpath, refs); + refs = obj.captureReferenceErrors(ME, fullpath, refs); end end - function refs = capture_ref_errors(~, ME, fullpath, refs) + function refs = captureReferenceErrors(~, ME, fullpath, refs) if any(strcmp(ME.identifier, {... 'NWB:getRefData:InvalidPath',... 'NWB:ObjectView:MissingPath'})) @@ -58,7 +58,7 @@ try io.getRefData(fid, props{i}); catch ME - refs = obj.capture_ref_errors(ME, fullpath, refs); + refs = obj.captureReferenceErrors(ME, fullpath, refs); end end diff --git a/+types/+util/checkUnset.m b/+types/+util/checkUnset.m index ee90d6fd..8e27ddb8 100644 --- a/+types/+util/checkUnset.m +++ b/+types/+util/checkUnset.m @@ -1,18 +1,22 @@ function checkUnset(obj, argin) -props = properties(obj); -anonNames = {}; -for i = 1:length(props) - p = obj.(props{i}); - if isa(p, 'types.untyped.Anon') - anonNames = [anonNames;{p.name}]; - elseif isa(p, 'types.untyped.Set') - anonNames = [anonNames;keys(p) .']; + publicProperties = properties(obj); + objMetaClass = metaclass(obj); + isHiddenProperty = logical([objMetaClass.PropertyList.Hidden]); + hiddenProperties = {objMetaClass.PropertyList(isHiddenProperty).Name}; + allProperties = union(publicProperties, hiddenProperties); + anonNames = {}; + for i = 1:length(allProperties) + p = obj.(allProperties{i}); + if isa(p, 'types.untyped.Anon') + anonNames = [anonNames;{p.name}]; + elseif isa(p, 'types.untyped.Set') + anonNames = [anonNames;keys(p) .']; + end end -end -dropped = setdiff(argin, [props;anonNames]); -assert(isempty(dropped),... - 'NWB:CheckUnset:InvalidProperties',... - ['Unexpected properties {%s}. '... + dropped = setdiff(argin, union(allProperties, anonNames)); + assert(isempty(dropped),... + 'NWB:CheckUnset:InvalidProperties',... + ['Unexpected properties {%s}. '... '\n\nYour schema version may be incompatible with the file. '... 'Consider checking the schema version of the file with '... '`util.getSchemaVersion(filename)` '... diff --git a/NwbFile.m b/NwbFile.m index 76c3c288..24e5dc6f 100644 --- a/NwbFile.m +++ b/NwbFile.m @@ -10,7 +10,7 @@ % nwbExport(nwb, 'epoch.nwb'); % % See also NWBREAD, GENERATECORE, GENERATEEXTENSION - + methods function obj = NwbFile(varargin) obj = obj@types.core.NWBFile(varargin{:}); @@ -18,14 +18,14 @@ types.util.checkUnset(obj, unique(varargin(1:2:end))); end end - + function export(obj, filename) %add to file create date current_time = datetime('now', 'TimeZone', 'local'); if isa(obj.file_create_date, 'types.untyped.DataStub') obj.file_create_date = obj.file_create_date.load(); end - + if isempty(obj.file_create_date) obj.file_create_date = current_time; elseif iscell(obj.file_create_date) @@ -33,12 +33,12 @@ function export(obj, filename) else obj.file_create_date = {obj.file_create_date current_time}; end - + %equate reference time to session_start_time if empty if isempty(obj.timestamps_reference_time) obj.timestamps_reference_time = obj.session_start_time; end - + try output_file_id = H5F.create(filename); isEditingFile = false; @@ -49,14 +49,14 @@ function export(obj, filename) else isEditingFile = strcmp(ME.identifier, 'MATLAB:imagesci:hdf5io:resourceAlreadyExists'); end - + if isEditingFile output_file_id = H5F.open(filename, 'H5F_ACC_RDWR', 'H5P_DEFAULT'); else rethrow(ME); end end - + try obj.embedSpecifications(output_file_id); refs = export@types.core.NWBFile(obj, output_file_id, '/', {}); @@ -71,7 +71,7 @@ function export(obj, filename) rethrow(ME); end end - + function o = resolve(obj, path) if ischar(path) path = {path}; @@ -84,7 +84,7 @@ function export(obj, filename) o = o{1}; end end - + function objectMap = searchFor(obj, typename, varargin) % Searches this NwbFile object for a given typename % Including the full namespace is optional. @@ -98,7 +98,7 @@ function export(obj, filename) varargin{:}); end end - + %% PRIVATE methods(Access=private) function resolveReferences(obj, fid, references) @@ -111,10 +111,8 @@ function resolveReferences(obj, fid, references) exportSuccess = isempty(unresolvedRefs); resolved(iRef) = exportSuccess; end - - if any(resolved) - references(resolved) = []; - else + + if ~any(resolved) errorFormat = ['object(s) could not be created:\n%s\n\nThe '... 'listed object(s) above contain an ObjectView, '... 'RegionView , or SoftLink object that has failed to resolve itself. '... @@ -124,9 +122,11 @@ function resolveReferences(obj, fid, references) error('NWB:NwbFile:UnresolvedReferences',... errorFormat, file.addSpaces(unresolvedRefs, 4)); end + + references(resolved) = []; end end - + function embedSpecifications(~, fid) try attrId = H5A.open(fid, '/.specloc'); @@ -138,7 +138,7 @@ function embedSpecifications(~, fid) specView = types.untyped.ObjectView(specLocation); io.writeAttribute(fid, '/.specloc', specView); end - + JsonData = schemes.exportJson(); for iJson = 1:length(JsonData) JsonDatum = JsonData(iJson); @@ -164,80 +164,80 @@ function embedSpecifications(~, fid) io.writeDataset(fid, path, Json(name)); end end - - function versionNames = getVersionNames(namespaceGroupId) - [~, ~, versionNames] = H5L.iterate(namespaceGroupId,... - 'H5_INDEX_NAME', 'H5_ITER_NATIVE',... - 0, @removeGroups, {}); - function [status, versionNames] = removeGroups(~, name, versionNames) - versionNames{end+1} = name; - status = 0; - end - end end end end -function tf = metaHasType(mc, typeSuffix) -assert(isa(mc, 'meta.class')); -tf = false; -if contains(mc.Name, typeSuffix, 'IgnoreCase', true) - tf = true; - return; +function versionNames = getVersionNames(namespaceGroupId) + [~, ~, versionNames] = H5L.iterate(namespaceGroupId,... + 'H5_INDEX_NAME', 'H5_ITER_NATIVE',... + 0, @removeGroups, {}); + function [status, versionNames] = removeGroups(~, name, versionNames) + versionNames{end+1} = name; + status = 0; + end end -for i = 1:length(mc.SuperclassList) - sc = mc.SuperclassList(i); - if metaHasType(sc, typeSuffix) +function tf = metaHasType(mc, typeSuffix) + assert(isa(mc, 'meta.class')); + tf = false; + if contains(mc.Name, typeSuffix, 'IgnoreCase', true) tf = true; return; end -end + + for i = 1:length(mc.SuperclassList) + sc = mc.SuperclassList(i); + if metaHasType(sc, typeSuffix) + tf = true; + return; + end + end end function pathToObjectMap = searchProperties(... - pathToObjectMap,... - obj,... - basePath,... - typename,... - varargin) -assert(all(iscellstr(varargin)),... - 'NWB:NwbFile:SearchProperties:InvalidVariableArguments',... - 'Optional keywords for searchFor must be char arrays.'); -shouldSearchSuperClasses = any(strcmpi(varargin, 'includeSubClasses')); - -if isa(obj, 'types.untyped.MetaClass') - propertyNames = properties(obj); - getProperty = @(x, prop) x.(prop); -elseif isa(obj, 'types.untyped.Set') - propertyNames = obj.keys(); - getProperty = @(x, prop) x.get(prop); -elseif isa(obj, 'types.untyped.Anon') - propertyNames = {obj.name}; - getProperty = @(x, prop) x.value; -else - error('NWB:NwbFile:SearchProperties:InvalidType',... - 'Invalid object type passed %s', class(obj)); -end + pathToObjectMap,... + obj,... + basePath,... + typename,... + varargin) + assert(all(iscellstr(varargin)),... + 'NWB:NwbFile:SearchProperties:InvalidVariableArguments',... + 'Optional keywords for searchFor must be char arrays.'); + shouldSearchSuperClasses = any(strcmpi(varargin, 'includeSubClasses')); -searchTypename = @(obj, typename) contains(class(obj), typename, 'IgnoreCase', true); -if shouldSearchSuperClasses - searchTypename = @(obj, typename) metaHasType(metaclass(obj), typename); -end + if isa(obj, 'types.untyped.MetaClass') + propertyNames = properties(obj); + getProperty = @(x, prop) x.(prop); + elseif isa(obj, 'types.untyped.Set') + propertyNames = obj.keys(); + getProperty = @(x, prop) x.get(prop); + elseif isa(obj, 'types.untyped.Anon') + propertyNames = {obj.name}; + getProperty = @(x, prop) x.value; + else + error('NWB:NwbFile:SearchProperties:InvalidType',... + 'Invalid object type passed %s', class(obj)); + end -for i = 1:length(propertyNames) - propName = propertyNames{i}; - propValue = getProperty(obj, propName); - fullPath = [basePath '/' propName]; - if searchTypename(propValue, typename) - pathToObjectMap(fullPath) = propValue; + searchTypename = @(obj, typename) contains(class(obj), typename, 'IgnoreCase', true); + if shouldSearchSuperClasses + searchTypename = @(obj, typename) metaHasType(metaclass(obj), typename); end - - if isa(propValue, 'types.untyped.GroupClass')... - || isa(propValue, 'types.untyped.Set')... - || isa(propValue, 'types.untyped.Anon') - % recursible (even if there is a match!) - searchProperties(pathToObjectMap, propValue, fullPath, typename, varargin{:}); + + for i = 1:length(propertyNames) + propName = propertyNames{i}; + propValue = getProperty(obj, propName); + fullPath = [basePath '/' propName]; + if searchTypename(propValue, typename) + pathToObjectMap(fullPath) = propValue; + end + + if isa(propValue, 'types.untyped.GroupClass')... + || isa(propValue, 'types.untyped.Set')... + || isa(propValue, 'types.untyped.Anon') + % recursible (even if there is a match!) + searchProperties(pathToObjectMap, propValue, fullPath, typename, varargin{:}); + end end -end end \ No newline at end of file