diff --git a/docker/rest-server.py b/docker/rest-server.py
index 159cbb4..0a80e4b 100644
--- a/docker/rest-server.py
+++ b/docker/rest-server.py
@@ -17,7 +17,7 @@ class NIDMRest(Resource):
def get(self, all): # noqa: A002
query_bits = []
for a in request.args.keys():
- query_bits.append("{}={}".format(a, request.args.get(a)))
+ query_bits.append(f"{a}={request.args.get(a)}")
query = "&".join(query_bits)
files = getTTLFiles()
@@ -29,9 +29,7 @@ def get(self, all): # noqa: A002
output_format=RestParser.OBJECT_FORMAT, verbosity_level=5
)
- json_str = json.dumps(
- restParser.run(files, "{}?{}".format(all, query)), indent=2
- )
+ json_str = json.dumps(restParser.run(files, f"{all}?{query}"), indent=2)
response = app.response_class(
response=json_str, status=200, mimetype="application/json"
)
@@ -42,9 +40,7 @@ def get(self, all): # noqa: A002
class Instructions(Resource):
def get(self):
return {
- "message": "You probably want to start at {}projects See instructions at PyNIDM/docker/README.md for details on the API and loading data.".format(
- request.url_root
- )
+ "message": f"You probably want to start at {request.url_root}projects See instructions at PyNIDM/docker/README.md for details on the API and loading data."
}
diff --git a/src/nidm/core/dot.py b/src/nidm/core/dot.py
index 602d2b0..235da42 100644
--- a/src/nidm/core/dot.py
+++ b/src/nidm/core/dot.py
@@ -221,7 +221,7 @@
def htlm_link_if_uri(value):
try:
uri = value.uri
- return '%s' % (uri, str(value))
+ return f'{value}'
except AttributeError:
return str(value)
@@ -279,7 +279,7 @@ def _attach_attribute_annotation(node, record):
% (
attr.uri,
escape(str(attr)),
- ' href="%s"' % value.uri if isinstance(value, Identifier) else "",
+ f' href="{value.uri}"' if isinstance(value, Identifier) else "",
escape(
str(value)
if not isinstance(value, datetime)
@@ -291,7 +291,7 @@ def _attach_attribute_annotation(node, record):
ann_rows.append(ANNOTATION_END_ROW)
count[3] += 1
annotations = pydot.Node(
- "ann%d" % count[3], label="\n".join(ann_rows), **ANNOTATION_STYLE
+ f"ann{count[3]}", label="\n".join(ann_rows), **ANNOTATION_STYLE
)
dot.add_node(annotations)
dot.add_edge(pydot.Edge(annotations, node, **ANNOTATION_LINK_STYLE))
@@ -299,58 +299,50 @@ def _attach_attribute_annotation(node, record):
def _add_bundle(bundle):
count[2] += 1
subdot = pydot.Cluster(
- graph_name="c%d" % count[2], URL='"%s"' % bundle.identifier.uri
+ graph_name=f"c{count[2]}", URL=f'"{bundle.identifier.uri}"'
)
if use_labels:
if bundle.label == bundle.identifier:
- bundle_label = '"%s"' % str(bundle.label)
+ bundle_label = f'"{bundle.label}"'
else:
# Fancier label if both are different. The label will be
# the main node text, whereas the identifier will be a
# kind of subtitle.
bundle_label = (
- "<%s
"
+ f"<{bundle.label}
"
''
- "%s>"
+ f"{bundle.identifier}>"
)
- bundle_label = bundle_label % (
- str(bundle.label),
- str(bundle.identifier),
- )
- subdot.set_label('"%s"' % str(bundle_label))
+ subdot.set_label(f'"{bundle_label}"')
else:
- subdot.set_label('"%s"' % str(bundle.identifier))
+ subdot.set_label(f'"{bundle.identifier}"')
_bundle_to_dot(subdot, bundle)
dot.add_subgraph(subdot)
return subdot
def _add_node(record):
count[0] += 1
- node_id = "n%d" % count[0]
+ node_id = f"n{count[0]}"
if use_labels:
if record.label == record.identifier:
- node_label = '"%s"' % str(record.label)
+ node_label = f'"{record.label}"'
else:
# Fancier label if both are different. The label will be
# the main node text, whereas the identifier will be a
# kind of subtitle.
node_label = (
- "<%s
"
+ f"<{record.label}
"
''
- "%s>"
- )
- node_label = node_label % (
- str(record.label),
- str(record.identifier),
+ f"{record.identifier}>"
)
else:
- node_label = '"%s"' % str(record.identifier)
+ node_label = f'"{record.identifier}"'
uri = record.identifier.uri
print("record type: ", record.get_type())
style = DOT_PROVONE_STYLE[record.get_type()]
print("style: ", style)
- node = pydot.Node(node_id, label=node_label, URL='"%s"' % uri, **style)
+ node = pydot.Node(node_id, label=node_label, URL=f'"{uri}"', **style)
node_map[uri] = node
dot.add_node(node)
@@ -360,19 +352,19 @@ def _add_node(record):
def _add_generic_node(qname):
count[0] += 1
- node_id = "n%d" % count[0]
- node_label = '"%s"' % str(qname)
+ node_id = f"n{count[0]}"
+ node_label = f'"{qname}"'
uri = qname.uri
style = DOT_PROVONE_STYLE[0]
- node = pydot.Node(node_id, label=node_label, URL='"%s"' % uri, **style)
+ node = pydot.Node(node_id, label=node_label, URL=f'"{uri}"', **style)
node_map[uri] = node
dot.add_node(node)
return node
def _get_bnode():
count[1] += 1
- bnode_id = "b%d" % count[1]
+ bnode_id = f"b{count[1]}"
bnode = pydot.Node(bnode_id, label='""', shape="point", color="gray")
dot.add_node(bnode)
return bnode
diff --git a/src/nidm/core/serializers/__init__.py b/src/nidm/core/serializers/__init__.py
index 738c9e7..016d18f 100644
--- a/src/nidm/core/serializers/__init__.py
+++ b/src/nidm/core/serializers/__init__.py
@@ -73,4 +73,4 @@ def get(format_name):
try:
return Registry.serializers[format_name]
except KeyError:
- raise DoNotExist('No serializer available for the format "%s"' % format_name)
+ raise DoNotExist(f'No serializer available for the format "{format_name}"')
diff --git a/src/nidm/core/serializers/provonerdf.py b/src/nidm/core/serializers/provonerdf.py
index 1d330c7..6a29f39 100644
--- a/src/nidm/core/serializers/provonerdf.py
+++ b/src/nidm/core/serializers/provonerdf.py
@@ -62,7 +62,7 @@ def __init__(self):
def get_anon_id(self, obj, local_prefix="id"):
if obj not in self._cache:
self._count += 1
- self._cache[obj] = pm.Identifier("_:%s%d" % (local_prefix, self._count)).uri
+ self._cache[obj] = pm.Identifier(f"_:{local_prefix}{self._count}").uri
return self._cache[obj]
@@ -178,7 +178,7 @@ def decode_rdf_representation(self, literal, graph):
if datatype == XSD["gYearMonth"]:
parsed_info = dateutil.parser.parse(literal)
return pm.Literal(
- "{0}-{1:02d}".format(parsed_info.year, parsed_info.month),
+ f"{parsed_info.year}-{parsed_info.month:02d}",
datatype=self.valid_identifier(datatype),
)
else:
diff --git a/src/nidm/experiment/CDE.py b/src/nidm/experiment/CDE.py
index ff2f763..a65b59c 100644
--- a/src/nidm/experiment/CDE.py
+++ b/src/nidm/experiment/CDE.py
@@ -12,7 +12,7 @@ def download_cde_files():
cde_dir = tempfile.gettempdir()
for url in Constants.CDE_FILE_LOCATIONS:
- urlretrieve(url, "{}/{}".format(cde_dir, url.split("/")[-1]))
+ urlretrieve(url, f"{cde_dir}/{url.split('/')[-1]}")
return cde_dir
@@ -25,7 +25,7 @@ def getCDEs(file_list=None):
hasher.update(str(file_list).encode("utf-8"))
h = hasher.hexdigest()
- cache_file_name = tempfile.gettempdir() + "/cde_graph.{}.pickle".format(h)
+ cache_file_name = tempfile.gettempdir() + f"/cde_graph.{h}.pickle"
if path.isfile(cache_file_name):
with open(cache_file_name, "rb") as fp:
@@ -50,7 +50,7 @@ def getCDEs(file_list=None):
file_list = []
for f in ["ants_cde.ttl", "fs_cde.ttl", "fsl_cde.ttl"]:
- fname = "{}/{}".format(cde_dir, f)
+ fname = f"{cde_dir}/{f}"
if path.isfile(fname):
file_list.append(fname)
diff --git a/src/nidm/experiment/Core.py b/src/nidm/experiment/Core.py
index 5af9f89..e047f84 100644
--- a/src/nidm/experiment/Core.py
+++ b/src/nidm/experiment/Core.py
@@ -250,8 +250,9 @@ def addLiteralAttribute(
)
except KeyError as e:
print(
- '\nPredicate namespace identifier " %s " not found! \n'
- % (str(e).split("'")[1])
+ '\nPredicate namespace identifier "',
+ str(e).split("'")[1],
+ '" not found! \n',
)
print(
"Use addNamespace method to add namespace before adding literal attribute \n"
@@ -364,7 +365,7 @@ def get_metadata_dict(self, NIDM_TYPE):
uri = s
if uri is None:
- print("Error finding %s in NIDM-Exp Graph" % NIDM_TYPE)
+ print(f"Error finding {NIDM_TYPE} in NIDM-Exp Graph")
return metadata
# Cycle through metadata and add to json
@@ -531,7 +532,7 @@ def save_DotGraph(self, filename, format=None): # noqa: A002
"""
qres = rdf_graph.query(query)
for row in qres:
- print("project uuid = %s" % row)
+ print(f"project uuid = {row}")
# parse uuid from project URI
# project_uuid = str(row[0]).rsplit('/', 1)[-1]
project_uuid = str(row[0])
@@ -557,7 +558,7 @@ def save_DotGraph(self, filename, format=None): # noqa: A002
dot.obj_dict["nodes"][key][0]["attributes"]["URL"]
):
session_node = key
- # print("session node = %s" %key)
+ # print(f"session node = {key}")
# add to DOT structure edge between project_node and session_node
dot.add_edge(Edge(session_node, project_node, **style))
@@ -574,7 +575,7 @@ def save_DotGraph(self, filename, format=None): # noqa: A002
]
):
acquisition_node = key
- # print("acquisition node = %s" %key)
+ # print(f"acquisition node = {key}")
dot.add_edge(
Edge(
diff --git a/src/nidm/experiment/Navigate.py b/src/nidm/experiment/Navigate.py
index 01e2964..52f443e 100644
--- a/src/nidm/experiment/Navigate.py
+++ b/src/nidm/experiment/Navigate.py
@@ -538,7 +538,7 @@ def GetDataelementDetails(nidm_files_tuple, dataelement):
if d in rdf_graph.subjects(
predicate=isa, object=Constants.NIDM["Project"]
):
- result["inProjects"].add("{} ({})".format(str(d), file))
+ result["inProjects"].add(f"{d} ({file})")
return result # found it, we are done
diff --git a/src/nidm/experiment/Query.py b/src/nidm/experiment/Query.py
index 4b09146..7f4bd80 100644
--- a/src/nidm/experiment/Query.py
+++ b/src/nidm/experiment/Query.py
@@ -86,9 +86,7 @@ def sparql_query_nidm(nidm_file_list, query, output_file=None, return_graph=Fals
except Exception as e:
print(
- "Exception while communicating with blazegraph at {}: {}".format(
- environ["BLAZEGRAPH_URL"], e
- )
+ f"Exception while communicating with blazegraph at {environ['BLAZEGRAPH_URL']}: {e}"
)
# query result list
@@ -231,8 +229,7 @@ def testprojectmeta(nidm_file_list):
def GetProjectSessionsMetadata(nidm_file_list, project_uuid):
import json
- query = (
- """
+ query = f"""
prefix nidm:
prefix rdf:
@@ -240,14 +237,12 @@ def GetProjectSessionsMetadata(nidm_file_list, project_uuid):
select distinct ?session_uuid ?p ?o
- where {
- ?session_uuid dct:isPartOf <%s> ;
+ where {{
+ ?session_uuid dct:isPartOf <{project_uuid}> ;
?p ?o .
- }
+ }}
"""
- % project_uuid
- )
df = sparql_query_nidm(nidm_file_list, query, output_file=None)
@@ -296,8 +291,7 @@ def GetProjectInstruments(nidm_file_list, project_id):
:param project_id: identifier of project you'd like to search for unique instruments
:return: Dataframe of instruments and project titles
"""
- query = (
- """
+ query = f"""
PREFIX prov:
PREFIX sio:
PREFIX dct:
@@ -305,7 +299,7 @@ def GetProjectInstruments(nidm_file_list, project_id):
prefix dctypes:
SELECT DISTINCT ?project_title ?assessment_type
- WHERE {
+ WHERE {{
?entity rdf:type onli:assessment-instrument ;
rdf:type ?assessment_type .
?entity prov:wasGeneratedBy/dct:isPartOf/dct:isPartOf ?project .
@@ -314,11 +308,9 @@ def GetProjectInstruments(nidm_file_list, project_id):
- FILTER( (!regex(str(?assessment_type), "http://www.w3.org/ns/prov#Entity")) && (!regex(str(?assessment_type), "http://purl.org/nidash/nidm#AcquisitionObject")) && (regex(str(?project), "%s")) )
- }
+ FILTER( (!regex(str(?assessment_type), "http://www.w3.org/ns/prov#Entity")) && (!regex(str(?assessment_type), "http://purl.org/nidash/nidm#AcquisitionObject")) && (regex(str(?project), "{project_id}")) )
+ }}
"""
- % project_id
- )
logging.info("Query: %s", query)
df = sparql_query_nidm(nidm_file_list, query, output_file=None)
results = df.to_dict()
@@ -334,8 +326,7 @@ def GetInstrumentVariables(nidm_file_list, project_id):
:param project_id: identifier of project you'd like to search for unique instruments
:return: Dataframe of instruments, project titles, and variables
"""
- query = (
- """
+ query = f"""
PREFIX prov:
PREFIX sio:
PREFIX dct:
@@ -343,7 +334,7 @@ def GetInstrumentVariables(nidm_file_list, project_id):
prefix dctypes:
SELECT DISTINCT ?project_title ?assessment_type ?variables
- WHERE {
+ WHERE {{
?entity rdf:type onli:assessment-instrument ;
rdf:type ?assessment_type ;
?variables ?value .
@@ -353,11 +344,9 @@ def GetInstrumentVariables(nidm_file_list, project_id):
- FILTER( (!regex(str(?assessment_type), "http://www.w3.org/ns/prov#Entity")) && (!regex(str(?assessment_type), "http://purl.org/nidash/nidm#AcquisitionObject")) && (regex(str(?project), "%s")) )
- }
+ FILTER( (!regex(str(?assessment_type), "http://www.w3.org/ns/prov#Entity")) && (!regex(str(?assessment_type), "http://purl.org/nidash/nidm#AcquisitionObject")) && (regex(str(?project), "{project_id}")) )
+ }}
"""
- % project_id
- )
logging.info("Query: %s", query)
df = sparql_query_nidm(nidm_file_list, query, output_file=None)
results = df.to_dict()
@@ -373,7 +362,7 @@ def GetParticipantIDs(nidm_file_list, output_file=None):
:return: list of Constants.NIDM_PARTICIPANT UUIDs and Constants.NIDM_SUBJECTID
"""
- query = """
+ query = f"""
PREFIX prov:
PREFIX sio:
@@ -381,21 +370,18 @@ def GetParticipantIDs(nidm_file_list, output_file=None):
PREFIX rdf:
SELECT DISTINCT ?uuid ?ID
- WHERE {
+ WHERE {{
?activity rdf:type prov:Activity ;
prov:qualifiedAssociation _:blanknode .
- _:blanknode prov:hadRole %s ;
+ _:blanknode prov:hadRole {Constants.NIDM_PARTICIPANT} ;
prov:agent ?uuid .
- ?uuid %s ?ID .
+ ?uuid {Constants.NIDM_SUBJECTID} ?ID .
- }
- """ % (
- Constants.NIDM_PARTICIPANT,
- Constants.NIDM_SUBJECTID,
- )
+ }}
+ """
df = sparql_query_nidm(nidm_file_list, query, output_file=output_file)
@@ -413,7 +399,7 @@ def GetParticipantIDFromAcquisition(nidm_file_list, acquisition, output_file=Non
:return: a dataframe subject ID and prov:Agent UUID of participant with qualified association
"""
- query = """
+ query = f"""
PREFIX prov:
PREFIX sio:
@@ -422,22 +408,18 @@ def GetParticipantIDFromAcquisition(nidm_file_list, acquisition, output_file=Non
PREFIX prov:
SELECT DISTINCT ?uuid ?ID
- WHERE {
+ WHERE {{
- <%s> rdf:type prov:Activity ;
+ <{acquisition}> rdf:type prov:Activity ;
prov:qualifiedAssociation _:blanknode .
- _:blanknode prov:hadRole %s ;
+ _:blanknode prov:hadRole {Constants.NIDM_PARTICIPANT} ;
prov:agent ?uuid .
- ?uuid %s ?ID .
+ ?uuid {Constants.NIDM_SUBJECTID} ?ID .
- }
- """ % (
- acquisition,
- Constants.NIDM_PARTICIPANT,
- Constants.NIDM_SUBJECTID,
- )
+ }}
+ """
df = sparql_query_nidm(nidm_file_list, query, output_file=output_file)
@@ -451,7 +433,7 @@ def GetParticipantDetails(nidm_file_list, project_id, participant_id, output_fil
:return: list of Constants.NIDM_PARTICIPANT UUIDs and Constants.NIDM_SUBJECTID
"""
- query = """
+ query = f"""
PREFIX prov:
PREFIX sio:
@@ -462,28 +444,24 @@ def GetParticipantDetails(nidm_file_list, project_id, participant_id, output_fil
SELECT DISTINCT ?uuid ?id ?activity
- WHERE {
+ WHERE {{
?activity rdf:type prov:Activity ;
prov:qualifiedAssociation _:blanknode .
- _:blanknode prov:hadRole %s ;
+ _:blanknode prov:hadRole {Constants.NIDM_PARTICIPANT} ;
prov:agent ?uuid .
- ?uuid %s ?id .
+ ?uuid {Constants.NIDM_SUBJECTID} ?id .
?proj a nidm:Project .
?sess dct:isPartOf ?proj .
?activity dct:isPartOf ?sess .
- FILTER(regex(str(?uuid), "%s")).
+ FILTER(regex(str(?uuid), "{participant_id}")).
- }
- """ % (
- Constants.NIDM_PARTICIPANT,
- Constants.NIDM_SUBJECTID,
- participant_id,
- )
+ }}
+ """
df = sparql_query_nidm(nidm_file_list, query, output_file=output_file)
data = df.values
@@ -1209,13 +1187,11 @@ def matchPrefix(possible_URI, short=False) -> str:
if short:
return k
else:
- return "{}:{}".format(k, possible_URI.replace(n, ""))
+ return f"{k}:{possible_URI.replace(n, '')}"
# also check the prov prefix
if possible_URI.startswith("http://www.w3.org/ns/prov#"):
- return "{}:{}".format(
- "prov", possible_URI.replace("http://www.w3.org/ns/prov#", "")
- )
+ return f"prov:{possible_URI.replace('http://www.w3.org/ns/prov#', '')}"
return possible_URI
@@ -1405,14 +1381,14 @@ def OpenGraph(file):
try:
with open(file) as f:
data = f.read()
- logging.debug("Sending {} to blazegraph".format(file))
+ logging.debug("Sending %s to blazegraph", file)
requests.post(
url=environ["BLAZEGRAPH_URL"],
data=data,
headers={"Content-type": "application/x-turtle"},
)
except Exception as e:
- logging.error("Exception {} loading {} into Blazegraph.".format(e, file))
+ logging.error("Exception %s loading %s into Blazegraph.", e, file)
BLOCKSIZE = 65536
hasher = hashlib.md5()
@@ -1423,7 +1399,7 @@ def OpenGraph(file):
buf = afile.read(BLOCKSIZE)
digest = hasher.hexdigest()
- pickle_file = "{}/rdf_graph.{}.pickle".format(tempfile.gettempdir(), digest)
+ pickle_file = f"{tempfile.gettempdir()}/rdf_graph.{digest}.pickle"
if path.isfile(pickle_file):
with open(pickle_file, "rb") as fp:
return pickle.load(fp)
@@ -1491,7 +1467,7 @@ def download_cde_files():
cde_dir = tempfile.gettempdir()
for url in Constants.CDE_FILE_LOCATIONS:
- urlretrieve(url, "{}/{}".format(cde_dir, url.split("/")[-1]))
+ urlretrieve(url, f"{cde_dir}/{url.split('/')[-1]}")
return cde_dir
@@ -1504,7 +1480,7 @@ def getCDEs(file_list=None):
hasher.update(str(file_list).encode("utf-8"))
h = hasher.hexdigest()
- cache_file_name = tempfile.gettempdir() + "/cde_graph.{}.pickle".format(h)
+ cache_file_name = tempfile.gettempdir() + f"/cde_graph.{h}.pickle"
if path.isfile(cache_file_name):
with open(cache_file_name, "rb") as fp:
@@ -1530,7 +1506,7 @@ def getCDEs(file_list=None):
# TODO: the list of file names should be it's own constant or derived from CDE_FILE_LOCATIONS
file_list = []
for f in ["ants_cde.ttl", "fs_cde.ttl", "fsl_cde.ttl"]:
- fname = "{}/{}".format(cde_dir, f)
+ fname = f"{cde_dir}/{f}"
if os.path.isfile(fname):
file_list.append(fname)
diff --git a/src/nidm/experiment/Session.py b/src/nidm/experiment/Session.py
index 3536f21..65deb16 100644
--- a/src/nidm/experiment/Session.py
+++ b/src/nidm/experiment/Session.py
@@ -72,7 +72,7 @@ def acquisition_exist(self, uuid):
:param uuid: full uuid of acquisition
:return: True if exists, False otherwise
"""
- # print("Query uuid: %s" %uuid)
+ # print(f"Query uuid: {uuid}")
for acquisitions in self._acquisitions:
# print(acquisitions._identifier._localpart)
if str(uuid) == acquisitions._identifier._localpart:
diff --git a/src/nidm/experiment/Utils.py b/src/nidm/experiment/Utils.py
index 2c8aaad..73c769c 100644
--- a/src/nidm/experiment/Utils.py
+++ b/src/nidm/experiment/Utils.py
@@ -96,7 +96,7 @@ def read_nidm(nidmDoc):
proj_id = s
if proj_id is None:
- print("Error reading NIDM-Exp Document %s, Must have Project Object" % nidmDoc)
+ print(f"Error reading NIDM-Exp Document {nidmDoc}, Must have Project Object")
print()
create_obj = input("Should read_nidm create a Project object for you [yes]: ")
if create_obj == "yes" or create_obj == "":
@@ -142,12 +142,12 @@ def read_nidm(nidmDoc):
for s in rdf_graph_parse.subjects(
predicate=RDF.type, object=URIRef(Constants.NIDM_SESSION.uri)
):
- # print("session: %s" % s)
+ # print(f"session: {s}")
# Split subject URI for session into namespace, uuid
nm, session_uuid = split_uri(s)
- # print("session uuid= %s" %session_uuid)
+ # print(f"session uuid= {session_uuid}")
# instantiate session with this uuid
session = Session(project=project, uuid=session_uuid, add_default_type=False)
@@ -165,7 +165,7 @@ def read_nidm(nidmDoc):
):
# Split subject URI for session into namespace, uuid
nm, acq_uuid = split_uri(acq)
- # print("acquisition uuid: %s" %acq_uuid)
+ # print(f"acquisition uuid: {acq_uuid}")
# query for whether this is an AssessmentAcquisition of other Acquisition, etc.
for rdf_type in rdf_graph_parse.objects(subject=acq, predicate=RDF.type):
@@ -177,7 +177,7 @@ def read_nidm(nidmDoc):
):
# Split subject URI for acquisition object (entity) into namespace, uuid
nm, acq_obj_uuid = split_uri(acq_obj)
- # print("acquisition object uuid: %s" %acq_obj_uuid)
+ # print(f"acquisition object uuid: {acq_obj_uuid}")
# query for whether this is an MRI acquisition by way of looking at the generated entity and determining
# if it has the tuple [uuid Constants.NIDM_ACQUISITION_MODALITY Constants.NIDM_MRI]
@@ -233,7 +233,7 @@ def read_nidm(nidmDoc):
) in rdf_graph:
# Split subject URI for associated acquisition entity for nidm:StimulusResponseFile into namespace, uuid
nm, assoc_acq_uuid = split_uri(assoc_acq)
- # print("associated acquisition object (stimulus file) uuid: %s" % assoc_acq_uuid)
+ # print(f"associated acquisition object (stimulus file) uuid: {assoc_acq_uuid}")
# if so then add this entity and associate it with acquisition activity and MRI entity
events_obj = AcquisitionObject(
acquisition=acquisition, uuid=assoc_acq_uuid
@@ -413,8 +413,7 @@ def read_nidm(nidmDoc):
)
# now we need to check if there are labels for data element isAbout entries, if so add them.
- query2 = (
- """
+ query2 = f"""
prefix nidm:
prefix rdfs:
@@ -422,16 +421,14 @@ def read_nidm(nidmDoc):
prefix prov:
select distinct ?id ?label
- where {
- <%s> nidm:isAbout ?id .
+ where {{
+ <{row["uuid"]}> nidm:isAbout ?id .
?id rdf:type prov:Entity ;
rdfs:label ?label .
- }
+ }}
"""
- % row["uuid"]
- )
# print(query2)
qres2 = rdf_graph_parse.query(query2)
@@ -723,70 +720,192 @@ def QuerySciCrunchElasticSearch(
sys.exit(1)
# Add check for internet connection, if not then skip this query...return empty dictionary
- headers = {
- "Content-Type": "application/json",
- }
-
params = (("key", os.environ["INTERLEX_API_KEY"]),)
if type == "cde":
if anscestors:
- data = (
- '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "cde" } },\n { "terms" : { "ancestors.ilx" : ["ilx_0115066" , "ilx_0103210", "ilx_0115072", "ilx_0115070"] } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n'
- % query_string
- )
+ data = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"type": "cde"}},
+ {
+ "terms": {
+ "ancestors.ilx": [
+ "ilx_0115066",
+ "ilx_0103210",
+ "ilx_0115072",
+ "ilx_0115070",
+ ]
+ }
+ },
+ {
+ "multi_match": {
+ "query": query_string,
+ "fields": ["label", "definition"],
+ }
+ },
+ ]
+ }
+ }
+ }
else:
- data = (
- '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "cde" } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n'
- % query_string
- )
+ data = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"type": "cde"}},
+ {
+ "multi_match": {
+ "query": query_string,
+ "fields": ["label", "definition"],
+ }
+ },
+ ]
+ }
+ }
+ }
elif type == "pde":
if anscestors:
- data = (
- '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "pde" } },\n { "terms" : { "ancestors.ilx" : ["ilx_0115066" , "ilx_0103210", "ilx_0115072", "ilx_0115070"] } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n'
- % query_string
- )
+ data = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"type": "pde"}},
+ {
+ "terms": {
+ "ancestors.ilx": [
+ "ilx_0115066",
+ "ilx_0103210",
+ "ilx_0115072",
+ "ilx_0115070",
+ ]
+ }
+ },
+ {
+ "multi_match": {
+ "query": query_string,
+ "fields": ["label", "definition"],
+ }
+ },
+ ]
+ }
+ }
+ }
else:
- data = (
- '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "pde" } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n'
- % query_string
- )
+ data = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"type": "pde"}},
+ {
+ "multi_match": {
+ "query": query_string,
+ "fields": ["label", "definition"],
+ }
+ },
+ ]
+ }
+ }
+ }
elif type == "fde":
if anscestors:
- data = (
- '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "fde" } },\n { "terms" : { "ancestors.ilx" : ["ilx_0115066" , "ilx_0103210", "ilx_0115072", "ilx_0115070"] } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n'
- % query_string
- )
+ data = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"type": "fde"}},
+ {
+ "terms": {
+ "ancestors.ilx": [
+ "ilx_0115066",
+ "ilx_0103210",
+ "ilx_0115072",
+ "ilx_0115070",
+ ]
+ }
+ },
+ {
+ "multi_match": {
+ "query": query_string,
+ "fields": ["label", "definition"],
+ }
+ },
+ ]
+ }
+ }
+ }
else:
- data = (
- '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "fde" } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n'
- % query_string
- )
+ data = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"type": "fde"}},
+ {
+ "multi_match": {
+ "query": query_string,
+ "fields": ["label", "definition"],
+ }
+ },
+ ]
+ }
+ }
+ }
elif type == "term":
if anscestors:
- data = (
- '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "term" } },\n { "terms" : { "ancestors.ilx" : ["ilx_0115066" , "ilx_0103210", "ilx_0115072", "ilx_0115070"] } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n'
- % query_string
- )
+ data = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"type": "term"}},
+ {
+ "terms": {
+ "ancestors.ilx": [
+ "ilx_0115066",
+ "ilx_0103210",
+ "ilx_0115072",
+ "ilx_0115070",
+ ]
+ }
+ },
+ {
+ "multi_match": {
+ "query": query_string,
+ "fields": ["label", "definition"],
+ }
+ },
+ ]
+ }
+ }
+ }
else:
- data = (
- '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "term" } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n'
- % query_string
- )
+ data = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"type": "term"}},
+ {
+ "multi_match": {
+ "query": query_string,
+ "fields": ["label", "definition"],
+ }
+ },
+ ]
+ }
+ }
+ }
else:
print(
- "ERROR: Valid types for SciCrunch query are 'cde','pde', or 'fde'. You set type: %s "
- % type
+ f"ERROR: Valid types for SciCrunch query are 'cde','pde', or 'fde'. You set type: {type} "
)
print("ERROR: in function Utils.py/QuerySciCrunchElasticSearch")
exit(1)
response = requests.post(
"https://scicrunch.org/api/1/elastic-ilx/interlex/term/_search#",
- headers=headers,
params=params,
- data=data,
+ json=data,
)
return json.loads(response.text)
@@ -969,7 +1088,7 @@ def load_nidm_terms_concepts():
r.raise_for_status()
concept_graph = r.json()
except Exception:
- logging.info("Error opening %s used concepts file..continuing" % concept_url)
+ logging.info("Error opening %s used concepts file..continuing", concept_url)
return None
return concept_graph
@@ -1015,7 +1134,7 @@ def load_nidm_owl_files():
# union_graph=union_graph+temp_graph
#
# except Exception:
- # logging.info("Error opening %s import file..continuing" %os.path.join(imports_path,resource))
+ # logging.info("Error opening %s import file..continuing", os.path.join(imports_path,resource))
# continue
owls = [
@@ -1041,7 +1160,7 @@ def load_nidm_owl_files():
temp_graph.parse(location=resource, format="turtle")
union_graph = union_graph + temp_graph
except Exception:
- logging.info("Error opening %s owl file..continuing" % resource)
+ logging.info("Error opening %s owl file..continuing", resource)
continue
return union_graph
@@ -1186,7 +1305,7 @@ def getSubjIDColumn(column_to_terms, df):
if id_field is None:
option = 1
for column in df.columns:
- print("%d: %s" % (option, column))
+ print(f"{option}: {column}")
option = option + 1
selection = input("Please select the subject ID field from the list above: ")
id_field = df.columns[int(selection) - 1]
@@ -1330,7 +1449,7 @@ def map_variables_to_terms(
with open(json_source, "r") as f:
json_map = json.load(f)
else:
- print("ERROR: Can't open json mapping file: %s" % (json_source))
+ print(f"ERROR: Can't open json mapping file: {json_source}")
exit()
except Exception:
# if not then it's a json structure already
@@ -1417,8 +1536,7 @@ def map_variables_to_terms(
column_to_terms[current_tuple]["label"] = ""
print(
"No label or source_variable or sourceVariable keys found in json mapping file for variable "
- "%s. Consider adding these to the json file as they are important"
- % json_key[0]
+ f"{json_key[0]}. Consider adding these to the json file as they are important"
)
else:
column_to_terms[current_tuple]["label"] = json_map[json_key[0]][
@@ -1440,67 +1558,63 @@ def map_variables_to_terms(
print("\n" + ("*" * 85))
print(
- "Column %s already annotated in user supplied JSON mapping file"
- % column
- )
- print("label: %s" % column_to_terms[current_tuple]["label"])
- print(
- "description: %s"
- % column_to_terms[current_tuple]["description"]
+ f"Column {column} already annotated in user supplied JSON mapping file"
)
+ print("label:", column_to_terms[current_tuple]["label"])
+ print("description:", column_to_terms[current_tuple]["description"])
if "url" in json_map[json_key[0]]:
column_to_terms[current_tuple]["url"] = json_map[json_key[0]][
"url"
]
- print("url: %s" % column_to_terms[current_tuple]["url"])
- # print("Variable: %s" %column_to_terms[current_tuple]['variable'])
+ print("url:", column_to_terms[current_tuple]["url"])
+ # print("Variable:", column_to_terms[current_tuple]['variable'])
if "sameAs" in json_map[json_key[0]]:
column_to_terms[current_tuple]["sameAs"] = json_map[
json_key[0]
]["sameAs"]
- print("sameAs: %s" % column_to_terms[current_tuple]["sameAs"])
+ print("sameAs:", column_to_terms[current_tuple]["sameAs"])
if "url" in json_map[json_key[0]]:
column_to_terms[current_tuple]["url"] = json_map[json_key[0]][
"url"
]
- print("url: %s" % column_to_terms[current_tuple]["url"])
+ print("url:", column_to_terms[current_tuple]["url"])
if "source_variable" in json_map[json_key[0]]:
column_to_terms[current_tuple]["source_variable"] = json_map[
json_key[0]
]["source_variable"]
print(
- "source variable: %s"
- % column_to_terms[current_tuple]["source_variable"]
+ "source variable:",
+ column_to_terms[current_tuple]["source_variable"],
)
elif "sourceVariable" in json_map[json_key[0]]:
column_to_terms[current_tuple]["source_variable"] = json_map[
json_key[0]
]["sourceVariable"]
print(
- "source variable: %s"
- % column_to_terms[current_tuple]["source_variable"]
+ "source variable:",
+ column_to_terms[current_tuple]["source_variable"],
)
else:
# add source variable if not there...
column_to_terms[current_tuple]["source_variable"] = str(column)
- print("Added source variable (%s) to annotations" % column)
+ print(f"Added source variable ({column}) to annotations")
if "associatedWith" in json_map[json_key[0]]:
column_to_terms[current_tuple]["associatedWith"] = json_map[
json_key[0]
]["associatedWith"]
print(
- "associatedWith: %s"
- % column_to_terms[current_tuple]["associatedWith"]
+ "associatedWith:",
+ column_to_terms[current_tuple]["associatedWith"],
)
if "allowableValues" in json_map[json_key[0]]:
column_to_terms[current_tuple]["allowableValues"] = json_map[
json_key[0]
]["allowableValues"]
print(
- "allowableValues: %s"
- % column_to_terms[current_tuple]["allowableValues"]
+ "allowableValues:",
+ column_to_terms[current_tuple]["allowableValues"],
)
# added to support ReproSchema json format
@@ -1523,10 +1637,10 @@ def map_variables_to_terms(
"valueType"
]
print(
- "valueType: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "valueType:",
+ column_to_terms[current_tuple]["responseOptions"][
"valueType"
- ]
+ ],
)
elif "minValue" in subkey:
@@ -1542,10 +1656,10 @@ def map_variables_to_terms(
"minValue"
] = json_map[json_key[0]]["responseOptions"]["minValue"]
print(
- "minValue: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "minValue:",
+ column_to_terms[current_tuple]["responseOptions"][
"minValue"
- ]
+ ],
)
elif "maxValue" in subkey:
@@ -1561,10 +1675,10 @@ def map_variables_to_terms(
"maxValue"
] = json_map[json_key[0]]["responseOptions"]["maxValue"]
print(
- "maxValue: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "maxValue:",
+ column_to_terms[current_tuple]["responseOptions"][
"maxValue"
- ]
+ ],
)
elif "choices" in subkey:
if (
@@ -1579,10 +1693,10 @@ def map_variables_to_terms(
"choices"
] = json_map[json_key[0]]["responseOptions"]["choices"]
print(
- "levels: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "levels:",
+ column_to_terms[current_tuple]["responseOptions"][
"choices"
- ]
+ ],
)
elif "hasUnit" in subkey:
if (
@@ -1597,10 +1711,10 @@ def map_variables_to_terms(
"unitCode"
] = json_map[json_key[0]]["responseOptions"]["hasUnit"]
print(
- "units: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "units:",
+ column_to_terms[current_tuple]["responseOptions"][
"unitCode"
- ]
+ ],
)
elif "unitCode" in subkey:
if (
@@ -1615,10 +1729,10 @@ def map_variables_to_terms(
"unitCode"
] = json_map[json_key[0]]["responseOptions"]["unitCode"]
print(
- "units: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "units:",
+ column_to_terms[current_tuple]["responseOptions"][
"unitCode"
- ]
+ ],
)
if "levels" in json_map[json_key[0]]:
@@ -1632,10 +1746,10 @@ def map_variables_to_terms(
"choices"
] = json_map[json_key[0]]["levels"]
print(
- "choices: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "choices:",
+ column_to_terms[current_tuple]["responseOptions"][
"choices"
- ]
+ ],
)
elif "Levels" in json_map[json_key[0]]:
# upgrade 'levels' to 'responseOptions'->'choices'
@@ -1648,10 +1762,10 @@ def map_variables_to_terms(
"choices"
] = json_map[json_key[0]]["Levels"]
print(
- "levels: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "levels:",
+ column_to_terms[current_tuple]["responseOptions"][
"choices"
- ]
+ ],
)
if "valueType" in json_map[json_key[0]]:
@@ -1665,10 +1779,10 @@ def map_variables_to_terms(
"valueType"
] = json_map[json_key[0]]["valueType"]
print(
- "valueType: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "valueType:",
+ column_to_terms[current_tuple]["responseOptions"][
"valueType"
- ]
+ ],
)
if "minValue" in json_map[json_key[0]]:
@@ -1682,10 +1796,10 @@ def map_variables_to_terms(
"minValue"
] = json_map[json_key[0]]["minValue"]
print(
- "minValue: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "minValue:",
+ column_to_terms[current_tuple]["responseOptions"][
"minValue"
- ]
+ ],
)
elif "minimumValue" in json_map[json_key[0]]:
# upgrade 'minValue' to 'responseOptions'->'minValue
@@ -1698,10 +1812,10 @@ def map_variables_to_terms(
"minValue"
] = json_map[json_key[0]]["minimumValue"]
print(
- "minValue: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "minValue:",
+ column_to_terms[current_tuple]["responseOptions"][
"minValue"
- ]
+ ],
)
if "maxValue" in json_map[json_key[0]]:
@@ -1715,10 +1829,10 @@ def map_variables_to_terms(
"maxValue"
] = json_map[json_key[0]]["maxValue"]
print(
- "maxValue: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "maxValue:",
+ column_to_terms[current_tuple]["responseOptions"][
"maxValue"
- ]
+ ],
)
elif "maximumValue" in json_map[json_key[0]]:
# upgrade 'maxValue' to 'responseOptions'->'maxValue
@@ -1731,10 +1845,10 @@ def map_variables_to_terms(
"maxValue"
] = json_map[json_key[0]]["maximumValue"]
print(
- "maxValue: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "maxValue:",
+ column_to_terms[current_tuple]["responseOptions"][
"maxValue"
- ]
+ ],
)
if "hasUnit" in json_map[json_key[0]]:
# upgrade 'hasUnit' to 'responseOptions'->'unitCode
@@ -1747,10 +1861,10 @@ def map_variables_to_terms(
"unitCode"
] = json_map[json_key[0]]["hasUnit"]
print(
- "unitCode: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "unitCode:",
+ column_to_terms[current_tuple]["responseOptions"][
"unitCode"
- ]
+ ],
)
elif "Units" in json_map[json_key[0]]:
# upgrade 'Units' to 'responseOptions'->'unitCode
@@ -1763,10 +1877,10 @@ def map_variables_to_terms(
"unitCode"
] = json_map[json_key[0]]["Units"]
print(
- "unitCode: %s"
- % column_to_terms[current_tuple]["responseOptions"][
+ "unitCode:",
+ column_to_terms[current_tuple]["responseOptions"][
"unitCode"
- ]
+ ],
)
if "isAbout" in json_map[json_key[0]]:
@@ -1805,24 +1919,16 @@ def map_variables_to_terms(
}
)
print(
- "isAbout: %s = %s, %s = %s"
- % (
- "@id",
- subdict["@id"],
- "label",
- subdict["label"],
- )
+ f"isAbout: @id = {subdict['@id']}, label = {subdict['label']}"
)
else:
column_to_terms[current_tuple][
"isAbout"
].append({"@id": subdict["@id"]})
- print(
- "isAbout: %s = %s" % ("@id", subdict["@id"])
- )
+ print(f"isAbout: @id = {subdict['@id']}")
# for isabout_key,isabout_value in subdict.items():
# column_to_terms[current_tuple]['isAbout'].append({isabout_key:isabout_value})
- # print("isAbout: %s = %s" %(isabout_key, isabout_value))
+ # print(f"isAbout: {isabout_key} = {isabout_value}")
# if isAbout is a dictionary then we only have 1 isAbout...we'll upgrade it to a list
# to be consistent moving forward
else:
@@ -1861,13 +1967,7 @@ def map_variables_to_terms(
)
print(
- "isAbout: %s = %s, %s = %s"
- % (
- "@id",
- column_to_terms[current_tuple]["isAbout"]["@id"],
- "label",
- column_to_terms[current_tuple]["isAbout"]["label"],
- )
+ f"isAbout: @id = {column_to_terms[current_tuple]['isAbout']['@id']}, label = {column_to_terms[current_tuple]['isAbout']['label']}"
)
else:
# if user ran in mode where they want to associate concepts and this isn't the participant
@@ -1936,15 +2036,12 @@ def map_variables_to_terms(
# column_to_terms[subjid_tuple]['variable'] = str(column)
print(
- "Variable %s automatically mapped to participant/subject identifier"
- % search_term
- )
- print("Label: %s" % column_to_terms[subjid_tuple]["label"])
- print("Description: %s" % column_to_terms[subjid_tuple]["description"])
- # print("Url: %s" %column_to_terms[subjid_tuple]['url'])
- print(
- "Source Variable: %s" % column_to_terms[subjid_tuple]["source_variable"]
+ f"Variable {search_term} automatically mapped to participant/subject identifier"
)
+ print("Label:", column_to_terms[subjid_tuple]["label"])
+ print("Description:", column_to_terms[subjid_tuple]["description"])
+ # print("Url:", column_to_terms[subjid_tuple]['url'])
+ print("Source Variable:", column_to_terms[subjid_tuple]["source_variable"])
print("-" * 87)
continue
# if we haven't already found an annotation for this column then have user create one.
@@ -2063,7 +2160,7 @@ def write_json_mapping_file(source_variable_annotations, output_file, bids=False
) as fp:
json.dump(new_dict, fp, indent=4)
else:
- # logging.info("saving json mapping file: %s" %os.path.join(os.path.basename(output_file), \
+ # logging.info("saving json mapping file: %s", os.path.join(os.path.basename(output_file), \
# os.path.splitext(output_file)[0]+".json"))
with open(
os.path.join(
@@ -2123,7 +2220,7 @@ def find_concept_interactive(
option = 1
print()
print("Concept Association")
- print("Query String: %s " % search_term)
+ print(f"Query String: {search_term} ")
# modified by DBK 5/14/21 to start with nidm-terms used concepts
if nidmterms_concepts is not None:
@@ -2140,13 +2237,12 @@ def find_concept_interactive(
first_nidm_term = False
print(
- "%d: Label: %s \t Definition: %s \t URL: %s"
- % (
- option,
- nidmterms_concepts_query[key]["label"],
- nidmterms_concepts_query[key]["definition"],
- nidmterms_concepts_query[key]["url"],
- )
+ f"{option}: Label:",
+ nidmterms_concepts_query[key]["label"],
+ "\t Definition:",
+ nidmterms_concepts_query[key]["definition"],
+ "\t URL:",
+ nidmterms_concepts_query[key]["url"],
)
search_result[key] = {}
search_result[key]["label"] = nidmterms_concepts_query[key]["label"]
@@ -2167,20 +2263,19 @@ def find_concept_interactive(
)
# temp = ilx_result.copy()
- # print("Search Term: %s" %search_term)
+ # print("Search Term:", search_term)
if len(ilx_result) != 0:
print("InterLex:")
print()
# print("Search Results: ")
for key, _ in ilx_result.items():
print(
- "%d: Label: %s \t Definition: %s \t Preferred URL: %s "
- % (
- option,
- ilx_result[key]["label"],
- ilx_result[key]["definition"],
- ilx_result[key]["preferred_url"],
- )
+ f"{option}: Label:",
+ ilx_result[key]["label"],
+ "\t Definition:",
+ ilx_result[key]["definition"],
+ "\t Preferred URL:",
+ ilx_result[key]["preferred_url"],
)
search_result[key] = {}
@@ -2207,14 +2302,10 @@ def find_concept_interactive(
first_cogatlas_concept = False
print(
- "%d: Label: %s \t Definition: %s "
- % (
- option,
- cogatlas_concepts_query[key]["label"],
- cogatlas_concepts_query[key]["definition"].rstrip(
- "\r\n"
- ),
- )
+ f"{option}: Label:",
+ cogatlas_concepts_query[key]["label"],
+ "\t Definition: ",
+ cogatlas_concepts_query[key]["definition"].rstrip("\r\n"),
)
search_result[key] = {}
search_result[key]["label"] = cogatlas_concepts_query[key][
@@ -2239,14 +2330,10 @@ def find_concept_interactive(
for key, _ in cogatlas_disorders_query.items():
if cogatlas_disorders_query[key]["score"] > min_match_score + 20:
print(
- "%d: Label: %s \t Definition: %s "
- % (
- option,
- cogatlas_disorders_query[key]["label"],
- cogatlas_disorders_query[key]["definition"].rstrip(
- "\r\n"
- ),
- )
+ f"{option}: Label:",
+ cogatlas_disorders_query[key]["label"],
+ "\t Definition: ",
+ cogatlas_disorders_query[key]["definition"].rstrip("\r\n"),
)
search_result[key] = {}
search_result[key]["label"] = cogatlas_disorders_query[key][
@@ -2281,13 +2368,12 @@ def find_concept_interactive(
first_nidm_term = False
print(
- "%d: Label: %s \t Definition: %s \t URL: %s"
- % (
- option,
- nidm_constants_query[key]["label"],
- nidm_constants_query[key]["definition"],
- nidm_constants_query[key]["url"],
- )
+ f"{option}: Label:",
+ nidm_constants_query[key]["label"],
+ "\t Definition:",
+ nidm_constants_query[key]["definition"],
+ "\t URL:",
+ nidm_constants_query[key]["url"],
)
search_result[key] = {}
search_result[key]["label"] = nidm_constants_query[key]["label"]
@@ -2304,37 +2390,35 @@ def find_concept_interactive(
if ancestor:
# Broaden Interlex search
print(
- "%d: Broaden Search (includes interlex, cogatlas, and nidm ontology) "
- % option
+ f"{option}: Broaden Search (includes interlex, cogatlas, and nidm ontology) "
)
else:
# Narrow Interlex search
print(
- "%d: Narrow Search (includes nidm-terms previously used concepts) "
- % option
+ f"{option}: Narrow Search (includes nidm-terms previously used concepts) "
)
option = option + 1
# Add option to change query string
- print('%d: Change query string from: "%s"' % (option, search_term))
+ print(f'{option}: Change query string from: "{search_term}"')
# ####### DEFINE NEW CONCEPT COMMENTED OUT RIGHT NOW ##################
# # Add option to define your own term
# option = option + 1
- # print("%d: Define my own concept for this variable" % option)
+ # print(f"{option}: Define my own concept for this variable")
# ####### DEFINE NEW CONCEPT COMMENTED OUT RIGHT NOW ##################
# Add option to define your own term
option = option + 1
- print("%d: No concept needed for this variable" % option)
+ print(f"{option}: No concept needed for this variable")
print("*" * 87)
# Wait for user input
- selection = input("Please select an option (1:%d) from above: \t" % option)
+ selection = input(f"Please select an option (1:{option}) from above: \t")
# Make sure user selected one of the options. If not present user with selection input again
while (not selection.isdigit()) or (int(selection) > int(option)):
# Wait for user input
- selection = input("Please select an option (1:%d) from above: \t" % option)
+ selection = input(f"Please select an option (1:{option}) from above: \t")
# toggle use of ancestors in interlex query or not
if int(selection) == (option - 2):
@@ -2343,8 +2427,7 @@ def find_concept_interactive(
elif int(selection) == (option - 1):
# ask user for new search string
search_term = input(
- "Please input new search string for CSV column: %s \t:"
- % source_variable
+ f"Please input new search string for CSV column: {source_variable} \t:"
)
print("*" * 87)
@@ -2372,19 +2455,17 @@ def find_concept_interactive(
"label": search_result[search_result[selection]]["label"],
}
)
- print(
- "\nConcept annotation added for source variable: %s" % source_variable
- )
+ print("\nConcept annotation added for source variable:", source_variable)
go_loop = False
def define_new_concept(source_variable, ilx_obj):
# user wants to define their own term. Ask for term label and definition
- print("\nYou selected to enter a new concept for CSV column: %s" % source_variable)
+ print("\nYou selected to enter a new concept for CSV column:", source_variable)
# collect term information from user
concept_label = input(
- "Please enter a label for the new concept [%s]:\t" % source_variable
+ f"Please enter a label for the new concept [{source_variable}]:\t"
)
concept_definition = input("Please enter a definition for this concept:\t")
@@ -2408,13 +2489,13 @@ def annotate_data_element(source_variable, current_tuple, source_variable_annota
# user instructions
print(
- "\nYou will now be asked a series of questions to annotate your term: %s"
- % source_variable
+ "\nYou will now be asked a series of questions to annotate your term:",
+ source_variable,
)
# collect term information from user
term_label = input(
- "Please enter a full name to associate with the term [%s]:\t" % source_variable
+ f"Please enter a full name to associate with the term [{source_variable}]:\t"
)
if term_label == "":
term_label = source_variable
@@ -2501,11 +2582,10 @@ def annotate_data_element(source_variable, current_tuple, source_variable_annota
for category in range(1, int(num_categories) + 1):
# term category dictionary has labels as keys and value associated with label as value
cat_label = input(
- "Please enter the text string label for the category %d:\t"
- % category
+ f"Please enter the text string label for the category {category}:\t"
)
cat_value = input(
- 'Please enter the value associated with label "%s":\t' % cat_label
+ f'Please enter the value associated with label "{cat_label}":\t'
)
term_category[cat_label] = cat_value
@@ -2515,8 +2595,7 @@ def annotate_data_element(source_variable, current_tuple, source_variable_annota
for category in range(1, int(num_categories) + 1):
# term category dictionary has labels as keys and value associated with label as value
cat_label = input(
- "Please enter the text string label for the category %d:\t"
- % category
+ f"Please enter the text string label for the category {category}:\t"
)
term_category.append(cat_label)
@@ -2580,39 +2659,39 @@ def annotate_data_element(source_variable, current_tuple, source_variable_annota
# print mappings
print("\n" + ("*" * 85))
- print("Stored mapping: %s -> " % source_variable)
- print("label: %s" % source_variable_annotations[current_tuple]["label"])
+ print(f"Stored mapping: {source_variable} -> ")
+ print("label:", source_variable_annotations[current_tuple]["label"])
print(
- "source variable: %s"
- % source_variable_annotations[current_tuple]["source_variable"]
+ "source variable:",
+ source_variable_annotations[current_tuple]["source_variable"],
)
- print("description: %s" % source_variable_annotations[current_tuple]["description"])
+ print("description:", source_variable_annotations[current_tuple]["description"])
print(
- "valueType: %s"
- % source_variable_annotations[current_tuple]["responseOptions"]["valueType"]
+ "valueType:",
+ source_variable_annotations[current_tuple]["responseOptions"]["valueType"],
)
# left for legacy purposes
if "hasUnit" in source_variable_annotations[current_tuple]:
- print("hasUnit: %s" % source_variable_annotations[current_tuple]["hasUnit"])
+ print("hasUnit:", source_variable_annotations[current_tuple]["hasUnit"])
elif "unitCode" in source_variable_annotations[current_tuple]["responseOptions"]:
print(
- "hasUnit: %s"
- % source_variable_annotations[current_tuple]["responseOptions"]["unitCode"]
+ "hasUnit:",
+ source_variable_annotations[current_tuple]["responseOptions"]["unitCode"],
)
if "minValue" in source_variable_annotations[current_tuple]["responseOptions"]:
print(
- "minimumValue: %s"
- % source_variable_annotations[current_tuple]["responseOptions"]["minValue"]
+ "minimumValue:",
+ source_variable_annotations[current_tuple]["responseOptions"]["minValue"],
)
if "maxValue" in source_variable_annotations[current_tuple]["responseOptions"]:
print(
- "maximumValue: %s"
- % source_variable_annotations[current_tuple]["responseOptions"]["maxValue"]
+ "maximumValue:",
+ source_variable_annotations[current_tuple]["responseOptions"]["maxValue"],
)
if term_datatype == URIRef(Constants.XSD["complexType"]):
print(
- "choices: %s"
- % source_variable_annotations[current_tuple]["responseOptions"]["choices"]
+ "choices:",
+ source_variable_annotations[current_tuple]["responseOptions"]["choices"],
)
print("-" * 87)
@@ -2888,7 +2967,7 @@ def addGitAnnexSources(obj, bids_root, filepath=None):
return len(sources)
except Exception:
# if "No annex found at" not in str(e):
- # print("Warning, error with AnnexRepo (Utils.py, addGitAnnexSources): %s" %str(e))
+ # print("Warning, error with AnnexRepo (Utils.py, addGitAnnexSources):", e)
return 0
diff --git a/src/nidm/experiment/tools/bidsmri2nidm.py b/src/nidm/experiment/tools/bidsmri2nidm.py
index ebad76a..b3b9985 100755
--- a/src/nidm/experiment/tools/bidsmri2nidm.py
+++ b/src/nidm/experiment/tools/bidsmri2nidm.py
@@ -159,7 +159,7 @@ def main():
level=logging.DEBUG,
)
# add some logging info
- logging.info("bidsmri2nidm %s" % args)
+ logging.info("bidsmri2nidm %s", args)
# if args.owl is None:
# args.owl = 'nidm'
@@ -234,7 +234,7 @@ def main():
def addbidsignore(directory, filename_to_add):
- logging.info("Adding file %s to %s/.bidsignore..." % (filename_to_add, directory))
+ logging.info("Adding file %s to %s/.bidsignore...", filename_to_add, directory)
# adds filename_to_add to .bidsignore file in directory
if not isfile(os.path.join(directory, ".bidsignore")):
with open(os.path.join(directory, ".bidsignore"), "w") as text_file:
@@ -309,8 +309,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
- % file_tpl.entities["suffix"]
+ "WARNING: No matching image contrast type found in BIDS_Constants.py for %s",
+ file_tpl.entities["suffix"],
)
# add image usage type
@@ -324,8 +324,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
- % file_tpl.entities["datatype"]
+ "WARNING: No matching image usage type found in BIDS_Constants.py for %s",
+ file_tpl.entities["datatype"],
)
# add file link
# make relative link to
@@ -364,8 +364,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..."
- % join(directory, file_tpl.dirname, file_tpl.filename)
+ "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files...",
+ join(directory, file_tpl.dirname, file_tpl.filename),
)
# get associated JSON file if exists
# There is T1w.json file with information
@@ -419,7 +419,7 @@ def addimagingsessions(
else:
logging.critical(
- "Error: BIDS directory %s does not exist!" % os.path.join(directory)
+ "Error: BIDS directory %s does not exist!", os.path.join(directory)
)
exit(-1)
@@ -450,8 +450,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
- % file_tpl.entities["suffix"]
+ "WARNING: No matching image contrast type found in BIDS_Constants.py for %s",
+ file_tpl.entities["suffix"],
)
# add image usage type
@@ -465,8 +465,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
- % file_tpl.entities["datatype"]
+ "WARNING: No matching image usage type found in BIDS_Constants.py for %s",
+ file_tpl.entities["datatype"],
)
# make relative link to
acq_obj.add_attributes(
@@ -505,8 +505,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..."
- % join(directory, file_tpl.dirname, file_tpl.filename)
+ "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files...",
+ join(directory, file_tpl.dirname, file_tpl.filename),
)
if "run" in file_tpl.entities:
@@ -614,7 +614,7 @@ def addimagingsessions(
dataset = {}
else:
logging.critical(
- "Error: BIDS directory %s does not exist!" % os.path.join(directory)
+ "Error: BIDS directory %s does not exist!", os.path.join(directory)
)
exit(-1)
@@ -650,8 +650,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
- % file_tpl.entities["suffix"]
+ "WARNING: No matching image contrast type found in BIDS_Constants.py for %s",
+ file_tpl.entities["suffix"],
)
# add image usage type
if file_tpl.entities["datatype"] in BIDS_Constants.scans:
@@ -660,8 +660,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
- % file_tpl.entities["datatype"]
+ "WARNING: No matching image usage type found in BIDS_Constants.py for %s",
+ file_tpl.entities["datatype"],
)
# make relative link to
acq_obj.add_attributes(
@@ -682,8 +682,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..."
- % join(directory, file_tpl.dirname, file_tpl.filename)
+ "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files...",
+ join(directory, file_tpl.dirname, file_tpl.filename),
)
# add git-annex/datalad info if exists
@@ -749,8 +749,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
- % file_tpl.entities["suffix"]
+ "WARNING: No matching image contrast type found in BIDS_Constants.py for %s",
+ file_tpl.entities["suffix"],
)
# add image usage type
@@ -760,8 +760,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
- % file_tpl.entities["datatype"]
+ "WARNING: No matching image usage type found in BIDS_Constants.py for %s",
+ file_tpl.entities["datatype"],
)
# make relative link to
acq_obj.add_attributes(
@@ -782,8 +782,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..."
- % join(directory, file_tpl.dirname, file_tpl.filename)
+ "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files...",
+ join(directory, file_tpl.dirname, file_tpl.filename),
)
# add git-annex/datalad info if exists
@@ -883,8 +883,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..."
- % join(directory, file_tpl.dirname, file_tpl.filename)
+ "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files...",
+ join(directory, file_tpl.dirname, file_tpl.filename),
)
acq_obj_bvec = AcquisitionObject(acq)
acq_obj_bvec.add_attributes({PROV_TYPE: BIDS_Constants.scans["bvec"]})
@@ -938,8 +938,8 @@ def addimagingsessions(
)
else:
logging.info(
- "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..."
- % join(directory, file_tpl.dirname, file_tpl.filename)
+ "WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files...",
+ join(directory, file_tpl.dirname, file_tpl.filename),
)
# link bval and bvec acquisition object entities together or is their association with DWI scan...
@@ -961,7 +961,7 @@ def bidsmri2project(directory, args):
exit("-1")
else:
logging.critical(
- "Error: BIDS directory %s does not exist!" % os.path.join(directory)
+ "Error: BIDS directory %s does not exist!", os.path.join(directory)
)
exit("-1")
@@ -1279,7 +1279,7 @@ def bidsmri2project(directory, args):
# create acquisition objects for each scan for each subject
# loop through all subjects in dataset
for subject_id in bids_layout.get_subjects():
- logging.info("Converting subject: %s" % subject_id)
+ logging.info("Converting subject: %s", subject_id)
# skip .git directories...added to support datalad datasets
if subject_id.startswith("."):
continue
diff --git a/src/nidm/experiment/tools/csv2nidm.py b/src/nidm/experiment/tools/csv2nidm.py
index 76251a1..f5af966 100644
--- a/src/nidm/experiment/tools/csv2nidm.py
+++ b/src/nidm/experiment/tools/csv2nidm.py
@@ -195,7 +195,7 @@ def main():
level=logging.DEBUG,
)
# add some logging info
- logging.info("csv2nidm %s" % args)
+ logging.info("csv2nidm %s", args)
# If user has added an existing NIDM file as a command line parameter then add to existing file for subjects who exist in the NIDM file
if args.nidm_file:
@@ -232,7 +232,7 @@ def main():
if id_field is None:
option = 1
for column in df.columns:
- print("%d: %s" % (option, column))
+ print(f"{option}: {column}")
option = option + 1
selection = input(
"Please select the subject ID field from the list above: "
@@ -242,7 +242,6 @@ def main():
# Wait for user input
selection = input(
"Please select the subject ID field from the list above: \t"
- % option
)
id_field = df.columns[int(selection) - 1]
# make sure id_field is a string for zero-padded subject ids
@@ -269,7 +268,7 @@ def main():
# qres = rdf_graph.query(query)
for _, row in qres.iterrows():
- logging.info("participant in NIDM file %s \t %s" % (row[0], row[1]))
+ logging.info("participant in NIDM file %s \t %s", row[0], row[1])
# find row in CSV file with subject id matching agent from NIDM file
# csv_row = df.loc[df[id_field]==type(df[id_field][0])(row[1])]
@@ -390,7 +389,7 @@ def main():
if id_field is None:
option = 1
for column in df.columns:
- print("%d: %s" % (option, column))
+ print(f"{option}: {column}")
option = option + 1
selection = input(
"Please select the subject ID field from the list above: "
@@ -400,7 +399,6 @@ def main():
# Wait for user input
selection = input(
"Please select the subject ID field from the list above: \t"
- % option
)
id_field = df.columns[int(selection) - 1]
# make sure id_field is a string for zero-padded subject ids
diff --git a/src/nidm/experiment/tools/nidm2bids.py b/src/nidm/experiment/tools/nidm2bids.py
index 0e19fa0..aa4774b 100644
--- a/src/nidm/experiment/tools/nidm2bids.py
+++ b/src/nidm/experiment/tools/nidm2bids.py
@@ -41,7 +41,6 @@
from os import mkdir, system
from os.path import basename, isdir, isfile, join, splitext
from shutil import copyfile
-import sys
import tempfile
import urllib.parse
import datalad.api as dl
@@ -69,7 +68,7 @@ def GetImageFromAWS(location, output_file, args):
:return: None if file not downloaded else will return True
"""
- print("Trying AWS S3 for dataset: %s" % location)
+ print(f"Trying AWS S3 for dataset: {location}")
# modify location to remove everything before the dataset name
# problem is we don't know the dataset identifier inside the path string because
# it doesn't have any constraints. For openneuro datasets they start with "ds" so
@@ -154,7 +153,7 @@ def GetImageFromURL(url):
temp.flush()
return temp.name
except Exception:
- print("ERROR! Can't open url: %s" % url)
+ print(f"ERROR! Can't open url: {url}")
return -1
@@ -165,22 +164,19 @@ def GetDataElementMetadata(nidm_graph, de_uuid):
"""
# query nidm_graph for Constants.NIIRI[de_uuid] rdf:type PersonalDataElement
- query = (
- """
+ query = f"""
PREFIX rdf:
PREFIX prov:
PREFIX niiri:
PREFIX nidm:
select distinct ?p ?o
- where {
+ where {{
- <%s> rdf:type nidm:PersonalDataElement ;
+ <{Constants.NIIRI[de_uuid]}> rdf:type nidm:PersonalDataElement ;
?p ?o .
- }
+ }}
"""
- % Constants.NIIRI[de_uuid]
- )
# print(query)
qres = nidm_graph.query(query)
@@ -220,21 +216,18 @@ def GetDataElementMetadata(nidm_graph, de_uuid):
# if this is a list we have to loop through the entries and store the url and labels
for entry in value:
# query for label for this isAbout URL
- query = (
- """
+ query = f"""
prefix prov:
prefix rdfs:
prefix rdf:
select distinct ?label
- where {
- <%s> rdf:type prov:Entity ;
+ where {{
+ <{entry}> rdf:type prov:Entity ;
rdfs:label ?label .
- }
+ }}
"""
- % entry
- )
# print(query)
qres = nidm_graph.query(query)
@@ -245,21 +238,18 @@ def GetDataElementMetadata(nidm_graph, de_uuid):
else:
# only 1 isAbout entry
# query for label for this isAbout URL
- query = (
- """
+ query = f"""
prefix prov:
prefix rdfs:
prefix rdf:
select distinct ?label
- where {
- <%s> rdf:type prov:Entity ;
+ where {{
+ <{value}> rdf:type prov:Entity ;
rdfs:label ?label .
- }
+ }}
"""
- % value
- )
# print(query)
qres = nidm_graph.query(query)
for row in qres:
@@ -348,7 +338,7 @@ def CreateBIDSParticipantFile(nidm_graph, output_file, participant_fields):
#
# Steps(1):(3)
- query = """
+ query = f"""
PREFIX rdf:
PREFIX prov:
PREFIX onli:
@@ -356,20 +346,17 @@ def CreateBIDSParticipantFile(nidm_graph, output_file, participant_fields):
PREFIX niiri:
SELECT DISTINCT ?pred ?value
- WHERE {
+ WHERE {{
?asses_activity prov:qualifiedAssociation ?_blank .
?_blank rdf:type prov:Association ;
- prov:agent <%s> ;
+ prov:agent <{subj_uri}> ;
prov:hadRole sio:Subject .
?entities prov:wasGeneratedBy ?asses_activity ;
rdf:type onli:assessment-instrument ;
?pred ?value .
- FILTER (regex(str(?pred) ,"%s","i" ))
- }""" % (
- subj_uri,
- fields,
- )
+ FILTER (regex(str(?pred) ,"{fields}","i" ))
+ }}"""
# print(query)
qres = nidm_graph.query(query)
@@ -431,8 +418,8 @@ def NIDMProject2BIDSDatasetDescriptor(nidm_graph, output_directory):
# iterate over the temporary dictionary and delete items from the original
for proj_key, _ in project_metadata_tmp.items():
key_found = 0
- # print("proj_key = %s " % proj_key)
- # print("project_metadata[proj_key] = %s" %project_metadata[proj_key])
+ # print(f"proj_key = {proj_key} ")
+ # print(f"project_metadata[proj_key] = {project_metadata[proj_key]}")
for key, _ in BIDS_Constants.dataset_description.items():
if BIDS_Constants.dataset_description[key]._uri == proj_key:
@@ -464,15 +451,12 @@ def AddMetadataToImageSidecar(graph_entity, graph, output_directory, image_filen
"""
# query graph for metadata associated with graph_entity
- query = (
- """
+ query = f"""
Select DISTINCT ?p ?o
- WHERE {
- <%s> ?p ?o .
- }
+ WHERE {{
+ <{graph_entity}> ?p ?o .
+ }}
"""
- % graph_entity
- )
qres = graph.query(query)
# dictionary to store metadata
@@ -560,12 +544,11 @@ def ProcessFiles(graph, scan_type, output_directory, project_location, args):
ret = GetImageFromURL(location)
if ret == -1:
print(
- "ERROR! Can't download file: %s from url: %s, trying to copy locally...."
- % (filename, location)
+ f"ERROR! Can't download file: {filename} from url: {location}, trying to copy locally...."
)
if "file" in location:
location = str(location).lstrip("file:")
- print("Trying to copy file from %s" % (location))
+ print(f"Trying to copy file from {location}")
try:
copyfile(
location,
@@ -579,23 +562,20 @@ def ProcessFiles(graph, scan_type, output_directory, project_location, args):
except Exception:
print(
- "ERROR! Failed to find file %s on filesystem..."
- % location
+ f"ERROR! Failed to find file {location} on filesystem..."
)
if not args.no_downloads:
try:
print(
- "Running datalad get command on dataset: %s"
- % location
+ f"Running datalad get command on dataset: {location}"
)
dl.Dataset(os.path.dirname(location)).get(
recursive=True, jobs=1
)
- except Exception:
+ except Exception as e:
print(
- "ERROR! Datalad returned error: %s for dataset %s."
- % (sys.exc_info()[0], location)
+ f"ERROR! Datalad returned error: {type(e)} for dataset {location}."
)
GetImageFromAWS(
location=location,
@@ -635,19 +615,16 @@ def ProcessFiles(graph, scan_type, output_directory, project_location, args):
# if this is a DWI scan then we should copy over the b-value and b-vector files
if bids_ext == "dwi":
# search for entity uuid with rdf:type nidm:b-value that was generated by activity
- query = (
- """
+ query = f"""
PREFIX rdf:
PREFIX prov:
PREFIX nidm:
SELECT DISTINCT ?entity
- WHERE {
+ WHERE {{
?entity rdf:type ;
- prov:wasGeneratedBy <%s> .
- }"""
- % activity
- )
+ prov:wasGeneratedBy <{activity}> .
+ }}"""
# print(query)
qres = graph.query(query)
@@ -663,12 +640,11 @@ def ProcessFiles(graph, scan_type, output_directory, project_location, args):
ret = GetImageFromURL(location)
if ret == -1:
print(
- "ERROR! Can't download file: %s from url: %s, trying to copy locally...."
- % (filename, location)
+ f"ERROR! Can't download file: {filename} from url: {location}, trying to copy locally...."
)
if "file" in location:
location = str(location).lstrip("file:")
- print("Trying to copy file from %s" % (location))
+ print(f"Trying to copy file from {location}")
try:
copyfile(
location,
@@ -681,23 +657,20 @@ def ProcessFiles(graph, scan_type, output_directory, project_location, args):
)
except Exception:
print(
- "ERROR! Failed to find file %s on filesystem..."
- % location
+ f"ERROR! Failed to find file {location} on filesystem..."
)
if not args.no_downloads:
try:
print(
- "Running datalad get command on dataset: %s"
- % location
+ f"Running datalad get command on dataset: {location}"
)
dl.Dataset(os.path.dirname(location)).get(
recursive=True, jobs=1
)
- except Exception:
+ except Exception as e:
print(
- "ERROR! Datalad returned error: %s for dataset %s."
- % (sys.exc_info()[0], location)
+ f"ERROR! Datalad returned error: {type(e)} for dataset {location}."
)
GetImageFromAWS(
location=location,
@@ -710,19 +683,16 @@ def ProcessFiles(graph, scan_type, output_directory, project_location, args):
args=args,
)
# search for entity uuid with rdf:type nidm:b-value that was generated by activity
- query = (
- """
+ query = f"""
PREFIX rdf:
PREFIX prov:
PREFIX nidm:
SELECT DISTINCT ?entity
- WHERE {
+ WHERE {{
?entity rdf:type ;
- prov:wasGeneratedBy <%s> .
- }"""
- % activity
- )
+ prov:wasGeneratedBy <{activity}> .
+ }}"""
# print(query)
qres = graph.query(query)
@@ -738,12 +708,11 @@ def ProcessFiles(graph, scan_type, output_directory, project_location, args):
ret = GetImageFromURL(location)
if ret == -1:
print(
- "ERROR! Can't download file: %s from url: %s, trying to copy locally...."
- % (filename, location)
+ f"ERROR! Can't download file: {filename} from url: {location}, trying to copy locally...."
)
if "file" in location:
location = str(location).lstrip("file:")
- print("Trying to copy file from %s" % (location))
+ print(f"Trying to copy file from {location}")
try:
copyfile(
location,
@@ -756,23 +725,20 @@ def ProcessFiles(graph, scan_type, output_directory, project_location, args):
)
except Exception:
print(
- "ERROR! Failed to find file %s on filesystem..."
- % location
+ f"ERROR! Failed to find file {location} on filesystem..."
)
if not args.no_downloads:
try:
print(
- "Running datalad get command on dataset: %s"
- % location
+ f"Running datalad get command on dataset: {location}"
)
dl.Dataset(os.path.dirname(location)).get(
recursive=True, jobs=1
)
- except Exception:
+ except Exception as e:
print(
- "ERROR! Datalad returned error: %s for dataset %s."
- % (sys.exc_info()[0], location)
+ f"ERROR! Datalad returned error: {type(e)} for dataset {location}."
)
GetImageFromAWS(
location=location,
@@ -897,7 +863,7 @@ def main():
format_found = False
for fmt in "turtle", "xml", "n3", "trix", "rdfa":
try:
- print("Reading RDF file as %s..." % fmt)
+ print(f"Reading RDF file as {fmt}...")
# load NIDM graph into NIDM-Exp API objects
nidm_project = read_nidm(rdf_file)
# temporary save nidm_project
@@ -907,7 +873,7 @@ def main():
format_found = True
break
except Exception:
- print("File: %s appears to be an invalid %s RDF file" % (rdf_file, fmt))
+ print(f"File: {rdf_file} appears to be an invalid {fmt} RDF file")
if not format_found:
print(
diff --git a/src/nidm/experiment/tools/nidm_affinity_propagation.py b/src/nidm/experiment/tools/nidm_affinity_propagation.py
index f10bcc5..c3a8020 100644
--- a/src/nidm/experiment/tools/nidm_affinity_propagation.py
+++ b/src/nidm/experiment/tools/nidm_affinity_propagation.py
@@ -366,7 +366,7 @@ def ap():
labels = af.labels_
n_clusters_ = len(cluster_center_indices)
- print("Estimated number of clusters: %d" % n_clusters_)
+ print(f"Estimated number of clusters: {n_clusters_}")
# print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
# print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
# print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
diff --git a/src/nidm/experiment/tools/nidm_linreg.py b/src/nidm/experiment/tools/nidm_linreg.py
index 064b107..1af6d6f 100644
--- a/src/nidm/experiment/tools/nidm_linreg.py
+++ b/src/nidm/experiment/tools/nidm_linreg.py
@@ -650,7 +650,7 @@ def contrasting():
# Defining the Simple class
def _name_levels(prefix, levels):
- return ["[%s%s]" % (prefix, level) for level in levels]
+ return [f"[{prefix}{level}]" for level in levels]
class Simple(object):
def _simple_contrast(self, levels):
@@ -789,10 +789,9 @@ def regularizing():
lassoModelChosen.fit(X, y)
print("\nLasso regression model:")
print(
- "Alpha with maximum likelihood (range: 1 to %d) = %f"
- % (MAX_ALPHA, max_cross_val_alpha)
+ f"Alpha with maximum likelihood (range: 1 to {MAX_ALPHA}) = {max_cross_val_alpha}"
)
- print("Current Model Score = %f" % (lassoModelChosen.score(X, y)))
+ print(f"Current Model Score = {lassoModelChosen.score(X, y)}")
index = 0
print("\nCoefficients:")
if o is not None:
@@ -800,21 +799,20 @@ def regularizing():
with open(o, "a") as f:
f.write("\n\nLasso regression model:")
f.write(
- "\nAlpha with maximum likelihood (range: 1 to %d) = %f"
- % (MAX_ALPHA, max_cross_val_alpha)
+ f"\nAlpha with maximum likelihood (range: 1 to {MAX_ALPHA}) = {max_cross_val_alpha}"
)
- f.write("\nCurrent Model Score = %f" % (lassoModelChosen.score(X, y)))
+ f.write(f"\nCurrent Model Score = {lassoModelChosen.score(X, y)}")
f.write("\n\nCoefficients:")
for var in full_model_variable_list:
- print("%s \t %f" % (var, lassoModelChosen.coef_[index]))
+ print(f"{var} \t {lassoModelChosen.coef_[index]}")
if o is not None:
with open(o, "a") as f:
- f.write("\n%s \t %f" % (var, lassoModelChosen.coef_[index]))
+ f.write(f"\n{var} \t {lassoModelChosen.coef_[index]}")
index = index + 1
- print("Intercept: %f" % (lassoModelChosen.intercept_))
+ print(f"Intercept: {lassoModelChosen.intercept_}")
if o is not None:
with open(o, "a") as f:
- f.write("\nIntercept: %f" % (lassoModelChosen.intercept_))
+ f.write(f"\nIntercept: {lassoModelChosen.intercept_}")
print()
if r == ("L2" or "Ridge" or "l2" or "Ridge") and not (
@@ -840,10 +838,9 @@ def regularizing():
ridgeModelChosen.fit(X, y)
print("\nRidge regression model:")
print(
- "Alpha with maximum likelihood (range: 1 to %d) = %f"
- % (MAX_ALPHA, max_cross_val_alpha)
+ f"Alpha with maximum likelihood (range: 1 to {MAX_ALPHA}) = {max_cross_val_alpha}"
)
- print("Current Model Score = %f" % (ridgeModelChosen.score(X, y)))
+ print(f"Current Model Score = {ridgeModelChosen.score(X, y)}")
index = 0
"""This numpy_conversion part was necessary because for the ridge model, all the coefficients get stored in a
numpy array, and the conversion is necessary to get the coefficients. However, it is only needed if the model
@@ -857,37 +854,36 @@ def regularizing():
with open(o, "a") as f:
f.write("\n\nRidge regression model:")
f.write(
- "\nAlpha with maximum likelihood (range: 1 to %d) = %f"
- % (MAX_ALPHA, max_cross_val_alpha)
+ f"\nAlpha with maximum likelihood (range: 1 to {MAX_ALPHA}) = {max_cross_val_alpha}"
)
- f.write("\nCurrent Model Score = %f" % (ridgeModelChosen.score(X, y)))
+ f.write(f"\nCurrent Model Score = {ridgeModelChosen.score(X, y)}")
f.write("\n\nCoefficients:")
print("\nCoefficients:")
if numpy_conversion:
coeff_list = ridgeModelChosen.coef_[index].tolist()
coeff_list.pop(0)
for var in full_model_variable_list:
- print("%s \t %f" % (var, coeff_list[index]))
+ print(f"{var} \t {coeff_list[index]}")
if o is not None:
with open(o, "a") as f:
- f.write("\n%s \t %f" % (var, coeff_list[index]))
+ f.write(f"\n{var} \t {coeff_list[index]}")
index = index + 1
- print("Intercept: %f" % (ridgeModelChosen.intercept_))
+ print(f"Intercept: {ridgeModelChosen.intercept_}")
if o is not None:
with open(o, "a") as f:
- f.write("\nIntercept: %f" % (ridgeModelChosen.intercept_))
+ f.write(f"\nIntercept: {ridgeModelChosen.intercept_}")
print()
else:
for var in full_model_variable_list:
- print("%s \t %f" % (var, ridgeModelChosen.coef_[index]))
+ print(f"{var} \t {ridgeModelChosen.coef_[index]}")
if o is not None:
with open(o, "a") as f:
- f.write("\n%s \t %f" % (var, ridgeModelChosen.coef_[index]))
+ f.write(f"\n{var} \t {ridgeModelChosen.coef_[index]}")
index = index + 1
- print("Intercept: %f" % (ridgeModelChosen.intercept_))
+ print(f"Intercept: {ridgeModelChosen.intercept_}")
if o is not None:
with open(o, "a") as f:
- f.write("\nIntercept: %f" % (ridgeModelChosen.intercept_))
+ f.write(f"\nIntercept: {ridgeModelChosen.intercept_}")
print()
diff --git a/src/nidm/experiment/tools/nidm_merge.py b/src/nidm/experiment/tools/nidm_merge.py
index 3b0bd7f..9e78940 100644
--- a/src/nidm/experiment/tools/nidm_merge.py
+++ b/src/nidm/experiment/tools/nidm_merge.py
@@ -86,8 +86,7 @@ def merge(nidm_file_list, s, out_file):
# for each UUID / subject ID look in graph and see if you can find the same ID. If so get the UUID of
# that prov:agent and change all the UUIDs in nidm_file to match then concatenate the two graphs.
- query = (
- """
+ query = f"""
PREFIX prov:
PREFIX sio:
@@ -96,14 +95,12 @@ def merge(nidm_file_list, s, out_file):
PREFIX prov:
SELECT DISTINCT ?uuid ?ID
- WHERE {
+ WHERE {{
?uuid a prov:Agent ;
- %s ?ID .
+ {Constants.NIDM_SUBJECTID} ?ID .
FILTER(?ID =
"""
- % Constants.NIDM_SUBJECTID
- )
# add filters to above query to only look for subject IDs which are in the first file to merge into
temp = True
@@ -135,15 +132,15 @@ def merge(nidm_file_list, s, out_file):
for s, p, o in graph.triples((None, None, None)):
if s == row["uuid"]:
- # print("replacing subject in triple %s %s %s with %s" %(s,p,o,uuid_to_replace))
+ # print(f"replacing subject in triple {s} {p} {o} with {uuid_to_replace}")
graph.add((uuid_replacement, p, o))
graph.remove((row["uuid"], p, o))
elif o == row["uuid"]:
- # print("replacing object in triple %s %s %s with %s" %(s,p,o,uuid_to_replace))
+ # print(f"replacing object in triple {s} {p} {o} with {uuid_to_replace}")
graph.add((s, p, uuid_replacement))
graph.remove((s, p, row["uuid"]))
elif p == row["uuid"]:
- # print("replacing predicate in triple %s %s %s with %s" %(s,p,o,uuid_to_replace))
+ # print(f"replacing predicate in triple {s} {p} {o} with {uuid_to_replace}")
graph.add((s, uuid_replacement, o))
graph.remove((s, row["uuid"], o))
diff --git a/src/nidm/experiment/tools/nidm_query.py b/src/nidm/experiment/tools/nidm_query.py
index 00921e4..358dffe 100644
--- a/src/nidm/experiment/tools/nidm_query.py
+++ b/src/nidm/experiment/tools/nidm_query.py
@@ -167,7 +167,7 @@ def query(
if blaze:
os.environ["BLAZEGRAPH_URL"] = blaze
- print("setting BLAZEGRAPH_URL to {}".format(blaze))
+ print(f"setting BLAZEGRAPH_URL to {blaze}")
if get_participants:
df = GetParticipantIDs(nidm_file_list.split(","), output_file=output_file)
diff --git a/src/nidm/experiment/tools/nidm_version.py b/src/nidm/experiment/tools/nidm_version.py
index fbfd3f9..99d1830 100644
--- a/src/nidm/experiment/tools/nidm_version.py
+++ b/src/nidm/experiment/tools/nidm_version.py
@@ -8,7 +8,7 @@ def version():
"""
This function will print the version of pynidm.
"""
- print("PyNIDM Version: %s" % __version__)
+ print(f"PyNIDM Version: {__version__}")
if __name__ == "__main__":
diff --git a/src/nidm/experiment/tools/repronim_simple2_brainvolumes.py b/src/nidm/experiment/tools/repronim_simple2_brainvolumes.py
index aab74a1..5348696 100644
--- a/src/nidm/experiment/tools/repronim_simple2_brainvolumes.py
+++ b/src/nidm/experiment/tools/repronim_simple2_brainvolumes.py
@@ -289,7 +289,7 @@ def add_brainvolume_data(
qres = rdf_graph_parse.query(query)
for row in qres:
- print("%s \t %s" % (row[2], row[1]))
+ print(f"{row[2]} \t {row[1]}")
# find row in CSV file with subject id matching agent from NIDM file
# csv_row = df.loc[df[id_field]==type(df[id_field][0])(row[1])]
@@ -304,7 +304,7 @@ def add_brainvolume_data(
# if there was data about this subject in the NIDM file already (i.e. an agent already exists with this subject id)
# then add this brain volumes data to NIDM file, else skip it....
if not (len(csv_row.index) == 0):
- print("found other data for participant %s" % row[1])
+ print(f"found other data for participant {row[1]}")
# Here we're sure we have an agent in the NIDM graph that corresponds to the participant in the
# brain volumes data. We don't know which AcquisitionObject (entity) describes the T1-weighted scans
@@ -325,7 +325,7 @@ def add_brainvolume_data(
if row_variable == id_field:
# store participant id for later use in processing the data for this row
participant_id = row_data.values[0]
- print("participant id: %s" % participant_id)
+ print(f"participant id: {participant_id}")
continue
else:
# get source software matching this column deal with duplicate variables in source_row and pandas changing duplicate names
@@ -691,7 +691,7 @@ def main():
# qres = rdf_graph_parse.query(query)
# for row in qres:
- # print('%s \t %s' %(row[0],row[1]))
+ # print(f'{row[0]} \t {row[1]}')
# #find row in CSV file with subject id matching agent from NIDM file
# #csv_row = df.loc[df[id_field]==type(df[id_field][0])(row[1])]
diff --git a/src/nidm/experiment/tools/rest.py b/src/nidm/experiment/tools/rest.py
index cec4aba..9627bd3 100644
--- a/src/nidm/experiment/tools/rest.py
+++ b/src/nidm/experiment/tools/rest.py
@@ -34,11 +34,11 @@ class RestParser:
def __init__(self, verbosity_level=0, output_format=0):
self.verbosity_level = verbosity_level
self.output_format = output_format
- self.restLog("Setting output format {}".format(self.output_format), 4)
+ self.restLog(f"Setting output format {self.output_format}", 4)
def setOutputFormat(self, output_format):
self.output_format = output_format
- self.restLog("Setting output format {}".format(self.output_format), 4)
+ self.restLog(f"Setting output format {self.output_format}", 4)
#####################
# Standard formatters
@@ -191,7 +191,7 @@ def projectSummaryFormat(self, result):
]
field_table = tabulate(fh_rows, fh_header)
# added by DBK, if they asked for fields then just give them the fields
- return "{}".format(field_table)
+ return str(field_table)
else:
field_table = ""
@@ -222,9 +222,7 @@ def projectSummaryFormat(self, result):
return self.format(result)
def formatDerivatives(self, derivative):
- self.restLog(
- "formatting derivatives in format {}".format(self.output_format), 5
- )
+ self.restLog(f"formatting derivatives in format {self.output_format}", 5)
if self.output_format == self.CLI_FORMAT:
table = []
for uri in derivative:
@@ -335,7 +333,7 @@ def subjectSummaryFormat(self, result):
)
derivatives = self.formatDerivatives(result["derivatives"])
- return "{}\n\n{}\n\n{}".format(tabulate(toptable), derivatives, instruments)
+ return f"{tabulate(toptable)}\n\n{derivatives}\n\n{instruments}"
else:
return self.format(result)
@@ -365,7 +363,7 @@ def subjectSummaryFormat_v2(self, result):
instruments = self.activityDataTableFormat(result["instruments"])
derivatives = self.activityDataTableFormat(result["derivatives"])
- return "{}\n\n{}\n\n{}".format(tabulate(toptable), derivatives, instruments)
+ return f"{tabulate(toptable)}\n\n{derivatives}\n\n{instruments}"
else:
return self.format(result)
@@ -413,7 +411,7 @@ def projects(self):
if "fields" in self.query and len(self.query["fields"]) > 0:
subjects_set = set()
dataelements_set = set()
- self.restLog("Using fields {}".format(self.query["fields"]), 2)
+ self.restLog(f"Using fields {self.query['fields']}", 2)
# result['field_values'] = []
for proj in projects:
@@ -533,7 +531,7 @@ def projectStats(self):
match = re.match(r"^/?statistics/projects/([^/]+)\??$", path)
id_ = parse.unquote(str(match.group(1)))
- self.restLog("Returning project {} stats metadata".format(id_), 2)
+ self.restLog(f"Returning project {id_} stats metadata", 2)
meta_data = Query.GetProjectsMetadata(self.nidm_files)
self.ExpandProjectMetaData(meta_data)
@@ -639,7 +637,7 @@ def addFieldStats(self, result, project, subjects, field, type): # noqa: A002
def projectSummary(self):
match = re.match(r"^/?projects/([^/]+)$", self.command)
pid = parse.unquote(str(match.group(1)))
- self.restLog("Returning project {} summary".format(pid), 2)
+ self.restLog(f"Returning project {pid} summary", 2)
result = nidm.experiment.Navigate.GetProjectAttributes(
self.nidm_files, project_id=pid
@@ -654,7 +652,7 @@ def projectSummary(self):
# if we got fields, drill into each subject and pull out the field data
# subject details -> derivatives / instrument -> values -> element
if "fields" in self.query and len(self.query["fields"]) > 0:
- self.restLog("Using fields {}".format(self.query["fields"]), 2)
+ self.restLog(f"Using fields {self.query['fields']}", 2)
result["field_values"] = []
# get all the synonyms for all the fields
field_synonyms = functools.reduce(
@@ -687,9 +685,7 @@ def subjectsList(self):
match = re.match(r"^/?projects/([^/]+)/subjects/?$", self.command)
project = match.group((1))
self.restLog(
- "Returning all agents matching filter '{}' for project {}".format(
- self.query["filter"], project
- ),
+ f"Returning all agents matching filter '{self.query['filter']}' for project {project}",
2,
)
# result = Query.GetParticipantUUIDsForProject(self.nidm_files, project, self.query['filter'], None)
@@ -712,7 +708,7 @@ def subjectsList(self):
def projectSubjectSummary(self):
match = re.match(r"^/?projects/([^/]+)/subjects/([^/]+)/?$", self.command)
subject = Navigate.normalizeSingleSubjectToUUID(self.nidm_files, match.group(2))
- self.restLog("Returning info about subject {}".format(match.group(2)), 2)
+ self.restLog(f"Returning info about subject {match[2]}", 2)
return self.subjectSummaryFormat(
Query.GetParticipantDetails(self.nidm_files, match.group(1), subject)
)
@@ -782,7 +778,7 @@ def subjects(self):
def subjectSummary(self):
match = re.match(r"^/?subjects/([^/]+)/?$", self.command)
- self.restLog("Returning info about subject {}".format(match.group(1)), 2)
+ self.restLog(f"Returning info about subject {match[1]}", 2)
sid = match.group(1)
# if we were passed in a sub_id rather than a UUID, lookup the associated UUID. (we might get multiple!)
@@ -816,7 +812,7 @@ def instrumentsList(self):
match = re.match(
r"^/?projects/([^/]+)/subjects/([^/]+)/instruments/?$", self.command
)
- self.restLog("Returning instruments in subject {}".format(match.group(2)), 2)
+ self.restLog(f"Returning instruments in subject {match[2]}", 2)
subject = Navigate.normalizeSingleSubjectToUUID(self.nidm_files, match.group(2))
instruments = Query.GetParticipantInstrumentData(
self.nidm_files, match.group(1), subject
@@ -830,9 +826,7 @@ def instrumentSummary(self):
r"^/?projects/([^/]+)/subjects/([^/]+)/instruments/([^/]+)$", self.command
)
self.restLog(
- "Returning instrument {} in subject {}".format(
- match.group(3), match.group(2)
- ),
+ f"Returning instrument {match[3]} in subject {match[2]}",
2,
)
subject = Navigate.normalizeSingleSubjectToUUID(self.nidm_files, match.group(2))
@@ -844,7 +838,7 @@ def instrumentSummary(self):
def derivativesList(self):
result = []
match = re.match(r"^/?projects/([^/]+)/subjects/([^/]+)", self.command)
- self.restLog("Returning derivatives in subject {}".format(match.group(2)), 2)
+ self.restLog(f"Returning derivatives in subject {match[2]}", 2)
subject = Navigate.normalizeSingleSubjectToUUID(self.nidm_files, match.group(2))
derivatives = Query.GetDerivativesDataForSubject(
self.nidm_files, match.group(1), subject
@@ -859,7 +853,7 @@ def derivativeSummary(self):
)
subject = Navigate.normalizeSingleSubjectToUUID(self.nidm_files, match.group(2))
uri = match.group(3)
- self.restLog("Returning stat {} in subject {}".format(uri, match.group(2)), 2)
+ self.restLog(f"Returning stat {uri} in subject {match[2]}", 2)
derivatives = Query.GetDerivativesDataForSubject(
self.nidm_files, match.group(1), subject
)
@@ -874,9 +868,7 @@ def run(self, nidm_files, command):
try:
self.restLog("parsing command " + command, 1)
self.restLog("Files to read:" + str(nidm_files), 1)
- self.restLog(
- "Using {} as the graph cache directory".format(gettempdir()), 1
- )
+ self.restLog(f"Using {gettempdir()} as the graph cache directory", 1)
self.nidm_files = tuple(nidm_files)
# replace # marks with %23 - they are sometimes used in the is_about terms
@@ -898,7 +890,7 @@ def run(self, nidm_files, command):
return self.route()
except ValueError as ve:
- logging.error("Exception: {}".format(ve))
+ logging.error("Exception: %s", ve)
return self.format(
{"error": "One of the supplied field terms was not found."}
)
diff --git a/tests/experiment/read_nidm.py b/tests/experiment/read_nidm.py
index 756e602..c69b656 100644
--- a/tests/experiment/read_nidm.py
+++ b/tests/experiment/read_nidm.py
@@ -13,29 +13,29 @@ def main():
project = read_nidm(args.nidm_file)
- print("Project: \n %s" % project.get_uuid())
+ print(f"Project: \n {project.get_uuid()}")
sessions = project.get_sessions()
- print("Sessions:\n %s" % sessions)
+ print(f"Sessions:\n {sessions}")
acquisitions = []
for session in sessions:
acquisitions = session.get_acquisitions()
- print("Acquisitions:\n %s" % acquisitions)
+ print(f"Acquisitions:\n {acquisitions}")
for acq in acquisitions:
acquisition_objects = acq.get_acquisition_objects()
- print("Acquisition Objects:\n %s" % acquisition_objects)
+ print(f"Acquisition Objects:\n {acquisition_objects}")
# check for data elements
- print("Data Elements: \n %s" % project.get_dataelements())
+ print(f"Data Elements: \n {project.get_dataelements()}")
# derivatives
# and for derivatives
- print("Derivatives: \n %s" % project.get_derivatives())
+ print(f"Derivatives: \n {project.get_derivatives()}")
for deriv in project.get_derivatives():
derivobj = deriv.get_derivative_objects()
- print("Derivative Objects: \n %s" % derivobj)
+ print(f"Derivative Objects: \n {derivobj}")
with open(args.outfile, "w") as f:
# serialize project for comparison with the original
diff --git a/tests/experiment/termsearch.py b/tests/experiment/termsearch.py
index c8b3759..0b5fe78 100644
--- a/tests/experiment/termsearch.py
+++ b/tests/experiment/termsearch.py
@@ -38,7 +38,7 @@ def main():
# for items in term['_source']['existing_ids']:
# if items['preferred']=='1':
# preferred_url=items['iri']
- # print("Label = %s \t Definition = %s \t Preferred URL = %s " %(term['_source']['label'],term['_source']['definition'],preferred_url))
+ # print(f"Label = {term['_source']['label']} \t Definition = {term['_source']['definition']} \t Preferred URL = {preferred_url} ")
# example of uber elastic search query returns dictionary of label, definition, and preferred_url
print("\n\n-------------------------------------------")
@@ -46,12 +46,7 @@ def main():
results = Utils.GetNIDMTermsFromSciCrunch(args.key, args.query_string)
for key, _ in results.items():
print(
- "Label: %s \t Definition: %s \t Preferred URL: %s "
- % (
- results[key]["label"],
- results[key]["definition"],
- results[key]["preferred_url"],
- )
+ f"Label: {results[key]['label']} \t Definition: {results[key]['definition']} \t Preferred URL: {results[key]['preferred_url']} "
)
diff --git a/tests/experiment/test_query.py b/tests/experiment/test_query.py
index 9c9d111..4899f53 100644
--- a/tests/experiment/test_query.py
+++ b/tests/experiment/test_query.py
@@ -203,7 +203,7 @@ def saveProject(file_name, project):
# save a turtle file
with open(file_name, "w") as f:
f.write(project.serializeTurtle())
- return "nidm:_123_{}".format(file_name)
+ return f"nidm:_123_{file_name}"
def makeProjectTestFile(filename):
diff --git a/tests/experiment/tools/test_rest.py b/tests/experiment/tools/test_rest.py
index 1c82f67..aae13fd 100644
--- a/tests/experiment/tools/test_rest.py
+++ b/tests/experiment/tools/test_rest.py
@@ -396,7 +396,7 @@ def test_CheckSubjectMatchesFilter(brain_vol: BrainVol) -> None:
break
# find an actual stat and build a matching filter to make sure our matcher passes it
- filter_str = "derivatives.{} eq {}".format(dt, val)
+ filter_str = f"derivatives.{dt} eq {val}"
assert Query.CheckSubjectMatchesFilter(
brain_vol.files, project, subject, filter_str
)
diff --git a/tests/experiment/tools/test_rest_dataelements.py b/tests/experiment/tools/test_rest_dataelements.py
index 3142de2..a53a0c0 100644
--- a/tests/experiment/tools/test_rest_dataelements.py
+++ b/tests/experiment/tools/test_rest_dataelements.py
@@ -85,7 +85,7 @@ def test_dataelement_details(openneuro_files: list[str]) -> None:
rest_parser = RestParser(output_format=RestParser.OBJECT_FORMAT)
# result = rest_parser.run(openneuro_files, '/dataelements')
#
- # dti = rest_parser.run(openneuro_files, '/dataelements/{}'.format(result["data_elements"]["label"][0]))
+ # dti = rest_parser.run(openneuro_files, f'/dataelements/{result["data_elements"]["label"][0]}')
#
# assert "label" in dti
# assert "description" in dti
@@ -94,7 +94,7 @@ def test_dataelement_details(openneuro_files: list[str]) -> None:
#
# # make sure the text formatter doesn't fail horribly
# rest_parser.setOutputFormat(RestParser.CLI_FORMAT)
- # txt = rest_parser.run(openneuro_files, '/dataelements/{}'.format(result["data_elements"]["label"][0]))
+ # txt = rest_parser.run(openneuro_files, f'/dataelements/{result["data_elements"]["label"][0]}')
#
dti = rest_parser.run(
@@ -106,7 +106,7 @@ def test_dataelement_details(openneuro_files: list[str]) -> None:
def test_dataelement_details_in_projects_field(brain_vol_files: list[str]) -> None:
rest_parser = RestParser(output_format=RestParser.OBJECT_FORMAT)
# result = rest_parser.run(openneuro_files, '/dataelements')
- # dti = rest_parser.run(openneuro_files, '/dataelements/{}'.format(result["data_elements"]["label"][0]))
+ # dti = rest_parser.run(openneuro_files, f'/dataelements/{result["data_elements"]["label"][0]}')
# assert len(dti['inProjects']) >= 1
# find a data element that we are using for at least one subject