Skip to content

Commit

Permalink
(PR #36) Visualizer support
Browse files Browse the repository at this point in the history
- Restructured Tomato parser to better handle available keys in the raw data - resolves missing current in OCV runs
- Implemented guard for fluctuating current (less than 10 positive/negative values in a row) - resolves incorrect cycle count
- Updated cycling analysis script to support collapsible raw-data dataframe views in visualizer panels
- Now pre-computing Q per t - used in V vs. C plot
  • Loading branch information
edan-bainglass authored Nov 1, 2023
2 parents a524633 + ff03392 commit 0753d5a
Show file tree
Hide file tree
Showing 8 changed files with 110 additions and 134 deletions.
41 changes: 21 additions & 20 deletions aiida_aurora/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,30 +120,31 @@ def parse_tomato_results(data_dic, logger=None):
The name of each array is: `'step{step_number}_{raw_quantity_name}_{identifier}'`
"""
array_dic = {}
for imstep, mstep in enumerate(data_dic["steps"]): # method step
raw_qty_names = list(mstep["data"][0]["raw"].keys())
if logger:
logger.debug(f"parse_tomato_results: step {imstep}: {list(raw_qty_names)}")
for raw_qty_name in raw_qty_names:
# substitute any special character with underscores
raw_qty_name_cleaned = re.sub("[^0-9a-zA-Z_]", "_", raw_qty_name)
if isinstance(mstep["data"][0]["raw"][raw_qty_name], dict):
for identifier in mstep["data"][0]["raw"][raw_qty_name].keys():
array_dic[f"step{imstep}_{raw_qty_name_cleaned}_{identifier}"] = np.array([
step["raw"][raw_qty_name][identifier] for step in mstep["data"]
])
else:
array_dic[f"step{imstep}_{raw_qty_name_cleaned}"] = np.array([
step["raw"][raw_qty_name] for step in mstep["data"]
])
array_dic[f"step{imstep}_uts"] = np.array([step["uts"] for step in mstep["data"]])

parsed = {}

data = data_dic["steps"][0]["data"]

keys = ["Ewe", "I"] # HACK hardcoded
fill = {"n": np.nan, "s": np.nan, "u": ""}

if logger:
logger.debug(f"parse_tomato_results: arrays stored: {list(array_dic.keys())}")
logger.debug(f"parse_tomato_results: storing {keys}")

for key in keys:
clean_key = re.sub("[^0-9a-zA-Z_]", "_", key) # TODO necessary?
for id in ("n", "s", "u"):
values = [step["raw"].get(key, fill)[id] for step in data]
parsed[f"step0_{clean_key}_{id}"] = np.array(values)

parsed["step0_uts"] = np.array([step["uts"] for step in data])

node = ArrayData()
for key, value in array_dic.items():
for key, value in parsed.items():
node.set_array(key, value)
node.set_attribute_many(data_dic["metadata"])

if logger:
logger.debug(f"parse_tomato_results: {list(parsed.keys())} stored")

return node
3 changes: 1 addition & 2 deletions aiida_aurora/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from . import cycling_analysis, plot
from . import cycling_analysis

__all__ = [
'cycling_analysis',
'plot',
]
3 changes: 1 addition & 2 deletions aiida_aurora/utils/analyzers.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,7 @@ def _get_capacities(self, snapshot: dict):
"""
try:
data = get_data_from_raw(snapshot)
capacities = data['Qd'] if self.is_discharge else data['Qc']
return capacities / 3.6 # As -> mAh
return data['Qd'] if self.is_discharge else data['Qc']
except KeyError as err:
self.logger.error(f"missing '{str(err)}' in snapshot")
return []
Expand Down
96 changes: 58 additions & 38 deletions aiida_aurora/utils/cycling_analysis.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
from __future__ import annotations

import json
from typing import Dict, Optional, Tuple

from pandas import DataFrame
from pandas.io.formats.style import Styler

from aiida.orm import CalcJobNode, QueryBuilder, RemoteData, SinglefileData

from aiida_aurora.data import BatterySampleData
from aiida_aurora.utils.parsers import get_data_from_raw, get_data_from_results


def cycling_analysis(node: CalcJobNode) -> Tuple[dict, str]:
def cycling_analysis(node: CalcJobNode) -> tuple[dict, str, DataFrame]:
"""Perform post-processing of cycling experiments results.
Used by the frontend Aurora app for plotting.
Expand All @@ -23,7 +27,7 @@ def cycling_analysis(node: CalcJobNode) -> Tuple[dict, str]:
Returns
-------
`Tuple[dict, str]`
`tuple[dict, str, DataFrame]`
Post-processed data and an analysis report.
Raises
Expand All @@ -35,28 +39,28 @@ def cycling_analysis(node: CalcJobNode) -> Tuple[dict, str]:
if node.process_type != "aiida.calculations:aurora.cycler":
raise TypeError("`node` is not a `BatteryCyclerExperiment`")

report = f"CalcJob: <{node.pk}> '{node.label}'\n"
log = f"CalcJob: <{node.pk}> '{node.label}'\n"

sample: BatterySampleData = node.inputs.battery_sample
report += f"Sample: {sample.label}\n"
log += f"Sample: {sample.label}\n"

report += "Monitored: "
log += "Monitored: "

if monitors := get_monitors(node):
report += "True\n"
report += add_monitor_details(monitors)
log += "True"
log += add_monitor_details(monitors)
else:
report += "False\n"
log += "False"

try:
data, analysis = process_data(node)
data, warning, raw = process_data(node)
except Exception as err:
data, analysis = {}, f"*** ERROR ***\n\n{str(err)}"
data, warning, raw = {}, f"*** ERROR ***\n\n{str(err)}", None

return (data, f"{report}\n{analysis}")
return (data, f"{log}\n{warning}", raw)


def get_monitors(node: CalcJobNode) -> Dict[str, dict]:
def get_monitors(node: CalcJobNode) -> dict[str, dict]:
"""Fetch the monitor dictionary.
The function is backwards compatible, capable of fetching the
Expand All @@ -72,7 +76,7 @@ def get_monitors(node: CalcJobNode) -> Dict[str, dict]:
Returns
-------
`Dict[str, dict]`
`dict[str, dict]`
A dictionary of monitors.
"""

Expand All @@ -85,7 +89,7 @@ def get_monitors(node: CalcJobNode) -> Dict[str, dict]:
return convert_to_new_monitor_format(monitor) if monitor else {}


def get_node_monitor_calcjob(node: CalcJobNode) -> Optional[CalcJobNode]:
def get_node_monitor_calcjob(node: CalcJobNode) -> CalcJobNode | None:
"""Fetch the monitor calcjob associated with the calculation
`node`.
Expand All @@ -99,7 +103,7 @@ def get_node_monitor_calcjob(node: CalcJobNode) -> Optional[CalcJobNode]:
Returns
-------
`Optional[CalcJobNode]`
`CalcJobNode | None`
The associated monitor calcjob node, `None` if not found.
"""

Expand Down Expand Up @@ -134,7 +138,7 @@ def get_node_monitor_calcjob(node: CalcJobNode) -> Optional[CalcJobNode]:
return results[0] if results else None


def convert_to_new_monitor_format(monitor: CalcJobNode) -> Dict[str, dict]:
def convert_to_new_monitor_format(monitor: CalcJobNode) -> dict[str, dict]:
"""Convert monitor calcjob attributes to AiiDA 2.x format.
For more details, see
Expand All @@ -148,7 +152,7 @@ def convert_to_new_monitor_format(monitor: CalcJobNode) -> Dict[str, dict]:
Returns
-------
`Dict[str, dict]`
`dict[str, dict]`
The formatted monitor dictionary.
"""

Expand Down Expand Up @@ -182,7 +186,7 @@ def convert_to_new_monitor_format(monitor: CalcJobNode) -> Dict[str, dict]:
}


def add_monitor_details(monitors: Dict[str, dict]) -> str:
def add_monitor_details(monitors: dict[str, dict]) -> str:
"""Return monitor details.
Details include the following:
Expand All @@ -194,7 +198,7 @@ def add_monitor_details(monitors: Dict[str, dict]) -> str:
Parameters
----------
`monitors` : `Dict[str, dict]`
`monitors` : `dict[str, dict]`
A dictionary of monitors.
Returns
Expand All @@ -207,9 +211,7 @@ def add_monitor_details(monitors: Dict[str, dict]) -> str:

for label, params in monitors.items():
details += f"\nMonitor: {label}\n"
entry_point = params.get("entry_point", "aiida-calcmonitor plugin")
refresh_rate = params.get("minimum_poll_interval", 600)
details += f" Entry point: {entry_point}\n"
details += f" Interval (s): {refresh_rate}\n"

if "kwargs" in params:
Expand Down Expand Up @@ -255,7 +257,7 @@ def add_monitor_settings(
return _settings


def process_data(node: CalcJobNode) -> Tuple[dict, str]:
def process_data(node: CalcJobNode) -> tuple[dict, str, Styler | str]:
"""Analyze the results of the cycling experiment.
The analysis is performed on the results `ArrayNode`, if one
Expand All @@ -272,20 +274,20 @@ def process_data(node: CalcJobNode) -> Tuple[dict, str]:
Returns
-------
`Tuple[dict, str]`
Post-processed data and an analysis report.
`tuple[dict, str, Styler | str]`
Post-processed data, warning, and analysis | error message.
"""

if node.process_state and "finished" not in node.process_state.value:
return {}, f"Job terminated with message '{node.process_status}'"
return {}, f"Job terminated with message '{node.process_status}'", ""

report = ""
warning = ""

if node.exit_status:
report += "WARNING: "
warning += "WARNING: "
generic = "job killed by monitor"
report += f"{node.exit_message}" if node.exit_message else generic
report += "\n\n"
warning += f"{node.exit_message}" if node.exit_message else generic
warning += "\n\n"

if "results" in node.outputs:
data = get_data_from_results(node.outputs.results)
Expand All @@ -298,10 +300,7 @@ def process_data(node: CalcJobNode) -> Tuple[dict, str]:
else:
data = {}

# TODO extract data summary/statistics
report += add_analysis(data)

return data, report
return data, warning, add_analysis(data)


def get_data_from_file(source: SinglefileData) -> dict:
Expand Down Expand Up @@ -347,7 +346,7 @@ def get_data_from_remote(source: RemoteData) -> dict:
return {}


def add_analysis(data: dict) -> str:
def add_analysis(data: dict) -> Styler | str:
"""Return analysis details.
Parameters
Expand All @@ -357,8 +356,29 @@ def add_analysis(data: dict) -> str:
Returns
-------
`str`
`Styler | str`
The details of the analysis.
"""
# TODO replace str(data) with something insightful, and clean!
return str(data) if data else "ERROR! Failed to find or parse output"

if data:

COLUMNS = {
"time": "Time (s)",
"I": "I (A)",
"Ewe": "Ewe (V)",
}

selected_keys = {key: data[key] for key in COLUMNS}
df = DataFrame(selected_keys).rename(columns=COLUMNS)

return df.style.set_properties(width="100vw").set_table_styles([
dict(
selector="th, td",
props=[
("text-align", "center"),
],
),
]).hide(axis="index")

else:
return "ERROR! Failed to find or parse output"
49 changes: 22 additions & 27 deletions aiida_aurora/utils/parsers.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
import numpy as np
from scipy.integrate import cumtrapz

from aiida.orm import ArrayData


def get_data_from_raw(jsdata):
def get_data_from_raw(jsdata) -> dict:
"Extract raw data from json file."

if not isinstance(jsdata, dict):
raise TypeError('jsdata should be a dictionary')

if len(jsdata["steps"]) > 1:
raise NotImplementedError('Analysis of multiple steps is not implemented.')

Expand All @@ -17,32 +20,12 @@ def get_data_from_raw(jsdata):
Ewe = np.array([ts["raw"]["Ewe"]["n"] for ts in raw_data])
I = np.array([ts["raw"]["I"]["n"] for ts in raw_data])

# find indices of sign changes in I
idx = np.where(np.diff(np.sign(I)) != 0)[0]
return post_process_data(t, Ewe, I)

# integrate and store charge and discharge currents
Qc, Qd = [], []
for ii in range(len(idx) - 1):
i0, ie = idx[ii], idx[ii + 1]
q = np.trapz(I[i0:ie], t[i0:ie])
if q > 0:
Qc.append(q)
else:
Qd.append(abs(q))

return {
'time': t,
'Ewe': Ewe,
'I': I,
'cn': len(Qd),
'time-cycles': t[idx[2::2]],
'Qd': np.array(Qd),
'Qc': np.array(Qc),
}


def get_data_from_results(array_node):
def get_data_from_results(array_node) -> dict:
"Extract data from parsed ArrayData node."

if not isinstance(array_node, ArrayData):
raise TypeError('array_node should be an ArrayData')

Expand All @@ -52,13 +35,24 @@ def get_data_from_results(array_node):
Ewe = array_node.get_array('step0_Ewe_n')
I = array_node.get_array('step0_I_n')

# find indices of sign changes in I
return post_process_data(t, Ewe, I)


def post_process_data(t: np.ndarray, Ewe: np.ndarray, I: np.ndarray) -> dict:
"""docstring"""

# find half-cycle markers
# add last point if not already a marker
idx = np.where(np.diff(np.sign(I)) != 0)[0]
if (final := len(I) - 1) not in idx:
idx = np.append(idx, final)

# integrate and store charge and discharge currents
Qc, Qd = [], []
for ii in range(len(idx) - 1):
i0, ie = idx[ii], idx[ii + 1]
if ie - i0 < 10:
continue
q = np.trapz(I[i0:ie], t[i0:ie])
if q > 0:
Qc.append(q)
Expand All @@ -71,6 +65,7 @@ def get_data_from_results(array_node):
'I': I,
'cn': len(Qd),
'time-cycles': t[idx[2::2]],
'Qd': np.array(Qd),
'Qc': np.array(Qc),
'Q': cumtrapz(I, t, axis=0, initial=0) / 3.6,
'Qd': np.array(Qd) / 3.6,
'Qc': np.array(Qc) / 3.6,
}
Loading

0 comments on commit 0753d5a

Please sign in to comment.