From aaec06dd5397ef66958cfccfb27ddf39060e473b Mon Sep 17 00:00:00 2001 From: alchem0x2A Date: Wed, 20 Nov 2024 18:11:23 +0000 Subject: [PATCH] deploy: 0b7c166cc6f26f31bae9177f7ac1627879b83cff --- _static/htmlcov/class_index.html | 435 ++++ _static/htmlcov/coverage_html_cb_6fb7b396.js | 733 ++++++ _static/htmlcov/favicon_32_cb_58284776.png | Bin 0 -> 1732 bytes _static/htmlcov/function_index.html | 2099 +++++++++++++++++ _static/htmlcov/index.html | 244 ++ _static/htmlcov/keybd_closed_cb_ce680311.png | Bin 0 -> 9004 bytes _static/htmlcov/status.json | 1 + _static/htmlcov/style_cb_8e611ae1.css | 337 +++ .../z_e32f35a0016f670d___init___py.html | 150 ++ .../htmlcov/z_e32f35a0016f670d_api_py.html | 411 ++++ .../z_e32f35a0016f670d_calculator_py.html | 1419 +++++++++++ .../htmlcov/z_e32f35a0016f670d_common_py.html | 103 + .../z_e32f35a0016f670d_docparser_py.html | 866 +++++++ .../z_e32f35a0016f670d_download_data_py.html | 186 ++ _static/htmlcov/z_e32f35a0016f670d_io_py.html | 1277 ++++++++++ .../z_e32f35a0016f670d_quicktest_py.html | 513 ++++ .../z_e32f35a0016f670d_socketio_py.html | 451 ++++ .../htmlcov/z_e32f35a0016f670d_utils_py.html | 489 ++++ .../z_ef57e6186893c87e___init___py.html | 97 + .../htmlcov/z_ef57e6186893c87e_aimd_py.html | 273 +++ .../htmlcov/z_ef57e6186893c87e_atoms_py.html | 494 ++++ .../htmlcov/z_ef57e6186893c87e_geopt_py.html | 240 ++ .../htmlcov/z_ef57e6186893c87e_inpt_py.html | 203 ++ .../htmlcov/z_ef57e6186893c87e_ion_py.html | 323 +++ .../htmlcov/z_ef57e6186893c87e_out_py.html | 360 +++ ...z_ef57e6186893c87e_pseudopotential_py.html | 289 +++ .../htmlcov/z_ef57e6186893c87e_static_py.html | 329 +++ .../htmlcov/z_ef57e6186893c87e_utils_py.html | 190 ++ 28 files changed, 12512 insertions(+) create mode 100644 _static/htmlcov/class_index.html create mode 100644 _static/htmlcov/coverage_html_cb_6fb7b396.js create mode 100644 _static/htmlcov/favicon_32_cb_58284776.png create mode 100644 _static/htmlcov/function_index.html create mode 100644 _static/htmlcov/index.html create mode 100644 _static/htmlcov/keybd_closed_cb_ce680311.png create mode 100644 _static/htmlcov/status.json create mode 100644 _static/htmlcov/style_cb_8e611ae1.css create mode 100644 _static/htmlcov/z_e32f35a0016f670d___init___py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_api_py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_calculator_py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_common_py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_docparser_py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_download_data_py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_io_py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_quicktest_py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_socketio_py.html create mode 100644 _static/htmlcov/z_e32f35a0016f670d_utils_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e___init___py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_aimd_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_atoms_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_geopt_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_inpt_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_ion_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_out_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_pseudopotential_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_static_py.html create mode 100644 _static/htmlcov/z_ef57e6186893c87e_utils_py.html diff --git a/_static/htmlcov/class_index.html b/_static/htmlcov/class_index.html new file mode 100644 index 00000000..ccf7f314 --- /dev/null +++ b/_static/htmlcov/class_index.html @@ -0,0 +1,435 @@ + + + + + Coverage report + + + + + +
+
+

Coverage report: + 74% +

+ +
+ +
+ + +
+
+

+ Files + Functions + Classes +

+

+ coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fileclassstatementsmissingexcludedcoverage
sparc/__init__.pySPARCMissingDeps2200%
sparc/__init__.py(no class)227068%
sparc/api.pySparcAPI11720083%
sparc/api.py(no class)2600100%
sparc/calculator.pySPARC541252053%
sparc/calculator.py(no class)12800100%
sparc/common.py(no class)400100%
sparc/docparser.pySparcDocParser16932081%
sparc/docparser.py(no class)19425087%
sparc/download_data.py(no class)506088%
sparc/io.pySparcBundle29633089%
sparc/io.py(no class)17997046%
sparc/quicktest.pyBaseTest389076%
sparc/quicktest.pyImportTest61083%
sparc/quicktest.pyPspTest289068%
sparc/quicktest.pyApiTest135062%
sparc/quicktest.pyCommandTest2913055%
sparc/quicktest.pyFileIOCalcTest143079%
sparc/quicktest.pySocketCalcTest213086%
sparc/quicktest.py(no class)671099%
sparc/socketio.pySPARCProtocol717100%
sparc/socketio.pySPARCSocketServer3223028%
sparc/socketio.pySPARCSocketClient595900%
sparc/socketio.py(no class)3700100%
sparc/sparc_parsers/__init__.py(no class)000100%
sparc/sparc_parsers/aimd.py(no class)10411089%
sparc/sparc_parsers/atoms.py(no class)1955097%
sparc/sparc_parsers/geopt.py(no class)769088%
sparc/sparc_parsers/inpt.py(no class)543094%
sparc/sparc_parsers/ion.pyInvalidSortingComment100100%
sparc/sparc_parsers/ion.py(no class)993097%
sparc/sparc_parsers/out.py(no class)14012091%
sparc/sparc_parsers/pseudopotential.pyNotPSP8Format100100%
sparc/sparc_parsers/pseudopotential.pyNoMatchingPseudopotential100100%
sparc/sparc_parsers/pseudopotential.pyMultiplePseudoPotentialFiles100100%
sparc/sparc_parsers/pseudopotential.py(no class)8900100%
sparc/sparc_parsers/static.py(no class)11511090%
sparc/sparc_parsers/utils.py(no class)531098%
sparc/utils.pyTimeoutException000100%
sparc/utils.pyProcessReturned000100%
sparc/utils.py(no class)221138038%
Total 3293864074%
+

+ No items found using the specified filter. +

+
+ + + diff --git a/_static/htmlcov/coverage_html_cb_6fb7b396.js b/_static/htmlcov/coverage_html_cb_6fb7b396.js new file mode 100644 index 00000000..1face13d --- /dev/null +++ b/_static/htmlcov/coverage_html_cb_6fb7b396.js @@ -0,0 +1,733 @@ +// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// General helpers +function debounce(callback, wait) { + let timeoutId = null; + return function(...args) { + clearTimeout(timeoutId); + timeoutId = setTimeout(() => { + callback.apply(this, args); + }, wait); + }; +}; + +function checkVisible(element) { + const rect = element.getBoundingClientRect(); + const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight); + const viewTop = 30; + return !(rect.bottom < viewTop || rect.top >= viewBottom); +} + +function on_click(sel, fn) { + const elt = document.querySelector(sel); + if (elt) { + elt.addEventListener("click", fn); + } +} + +// Helpers for table sorting +function getCellValue(row, column = 0) { + const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (cell.childElementCount == 1) { + var child = cell.firstElementChild; + if (child.tagName === "A") { + child = child.firstElementChild; + } + if (child instanceof HTMLDataElement && child.value) { + return child.value; + } + } + return cell.innerText || cell.textContent; +} + +function rowComparator(rowA, rowB, column = 0) { + let valueA = getCellValue(rowA, column); + let valueB = getCellValue(rowB, column); + if (!isNaN(valueA) && !isNaN(valueB)) { + return valueA - valueB; + } + return valueA.localeCompare(valueB, undefined, {numeric: true}); +} + +function sortColumn(th) { + // Get the current sorting direction of the selected header, + // clear state on other headers and then set the new sorting direction. + const currentSortOrder = th.getAttribute("aria-sort"); + [...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none")); + var direction; + if (currentSortOrder === "none") { + direction = th.dataset.defaultSortOrder || "ascending"; + } + else if (currentSortOrder === "ascending") { + direction = "descending"; + } + else { + direction = "ascending"; + } + th.setAttribute("aria-sort", direction); + + const column = [...th.parentElement.cells].indexOf(th) + + // Sort all rows and afterwards append them in order to move them in the DOM. + Array.from(th.closest("table").querySelectorAll("tbody tr")) + .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (direction === "ascending" ? 1 : -1)) + .forEach(tr => tr.parentElement.appendChild(tr)); + + // Save the sort order for next time. + if (th.id !== "region") { + let th_id = "file"; // Sort by file if we don't have a column id + let current_direction = direction; + const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); + if (stored_list) { + ({th_id, direction} = JSON.parse(stored_list)) + } + localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({ + "th_id": th.id, + "direction": current_direction + })); + if (th.id !== th_id || document.getElementById("region")) { + // Sort column has changed, unset sorting by function or class. + localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({ + "by_region": false, + "region_direction": current_direction + })); + } + } + else { + // Sort column has changed to by function or class, remember that. + localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({ + "by_region": true, + "region_direction": direction + })); + } +} + +// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key. +coverage.assign_shortkeys = function () { + document.querySelectorAll("[data-shortcut]").forEach(element => { + document.addEventListener("keypress", event => { + if (event.target.tagName.toLowerCase() === "input") { + return; // ignore keypress from search filter + } + if (event.key === element.dataset.shortcut) { + element.click(); + } + }); + }); +}; + +// Create the events for the filter box. +coverage.wire_up_filter = function () { + // Populate the filter and hide100 inputs if there are saved values for them. + const saved_filter_value = localStorage.getItem(coverage.FILTER_STORAGE); + if (saved_filter_value) { + document.getElementById("filter").value = saved_filter_value; + } + const saved_hide100_value = localStorage.getItem(coverage.HIDE100_STORAGE); + if (saved_hide100_value) { + document.getElementById("hide100").checked = JSON.parse(saved_hide100_value); + } + + // Cache elements. + const table = document.querySelector("table.index"); + const table_body_rows = table.querySelectorAll("tbody tr"); + const no_rows = document.getElementById("no_rows"); + + // Observe filter keyevents. + const filter_handler = (event => { + // Keep running total of each metric, first index contains number of shown rows + const totals = new Array(table.rows[0].cells.length).fill(0); + // Accumulate the percentage as fraction + totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection + + var text = document.getElementById("filter").value; + // Store filter value + localStorage.setItem(coverage.FILTER_STORAGE, text); + const casefold = (text === text.toLowerCase()); + const hide100 = document.getElementById("hide100").checked; + // Store hide value. + localStorage.setItem(coverage.HIDE100_STORAGE, JSON.stringify(hide100)); + + // Hide / show elements. + table_body_rows.forEach(row => { + var show = false; + // Check the text filter. + for (let column = 0; column < totals.length; column++) { + cell = row.cells[column]; + if (cell.classList.contains("name")) { + var celltext = cell.textContent; + if (casefold) { + celltext = celltext.toLowerCase(); + } + if (celltext.includes(text)) { + show = true; + } + } + } + + // Check the "hide covered" filter. + if (show && hide100) { + const [numer, denom] = row.cells[row.cells.length - 1].dataset.ratio.split(" "); + show = (numer !== denom); + } + + if (!show) { + // hide + row.classList.add("hidden"); + return; + } + + // show + row.classList.remove("hidden"); + totals[0]++; + + for (let column = 0; column < totals.length; column++) { + // Accumulate dynamic totals + cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (cell.classList.contains("name")) { + continue; + } + if (column === totals.length - 1) { + // Last column contains percentage + const [numer, denom] = cell.dataset.ratio.split(" "); + totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection + totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection + } + else { + totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection + } + } + }); + + // Show placeholder if no rows will be displayed. + if (!totals[0]) { + // Show placeholder, hide table. + no_rows.style.display = "block"; + table.style.display = "none"; + return; + } + + // Hide placeholder, show table. + no_rows.style.display = null; + table.style.display = null; + + const footer = table.tFoot.rows[0]; + // Calculate new dynamic sum values based on visible rows. + for (let column = 0; column < totals.length; column++) { + // Get footer cell element. + const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection + if (cell.classList.contains("name")) { + continue; + } + + // Set value into dynamic footer cell element. + if (column === totals.length - 1) { + // Percentage column uses the numerator and denominator, + // and adapts to the number of decimal places. + const match = /\.([0-9]+)/.exec(cell.textContent); + const places = match ? match[1].length : 0; + const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection + cell.dataset.ratio = `${numer} ${denom}`; + // Check denom to prevent NaN if filtered files contain no statements + cell.textContent = denom + ? `${(numer * 100 / denom).toFixed(places)}%` + : `${(100).toFixed(places)}%`; + } + else { + cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection + } + } + }); + + document.getElementById("filter").addEventListener("input", debounce(filter_handler)); + document.getElementById("hide100").addEventListener("input", debounce(filter_handler)); + + // Trigger change event on setup, to force filter on page refresh + // (filter value may still be present). + document.getElementById("filter").dispatchEvent(new Event("input")); + document.getElementById("hide100").dispatchEvent(new Event("input")); +}; +coverage.FILTER_STORAGE = "COVERAGE_FILTER_VALUE"; +coverage.HIDE100_STORAGE = "COVERAGE_HIDE100_VALUE"; + +// Set up the click-to-sort columns. +coverage.wire_up_sorting = function () { + document.querySelectorAll("[data-sortable] th[aria-sort]").forEach( + th => th.addEventListener("click", e => sortColumn(e.target)) + ); + + // Look for a localStorage item containing previous sort settings: + let th_id = "file", direction = "ascending"; + const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); + if (stored_list) { + ({th_id, direction} = JSON.parse(stored_list)); + } + let by_region = false, region_direction = "ascending"; + const sorted_by_region = localStorage.getItem(coverage.SORTED_BY_REGION); + if (sorted_by_region) { + ({ + by_region, + region_direction + } = JSON.parse(sorted_by_region)); + } + + const region_id = "region"; + if (by_region && document.getElementById(region_id)) { + direction = region_direction; + } + // If we are in a page that has a column with id of "region", sort on + // it if the last sort was by function or class. + let th; + if (document.getElementById(region_id)) { + th = document.getElementById(by_region ? region_id : th_id); + } + else { + th = document.getElementById(th_id); + } + th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending"); + th.click() +}; + +coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2"; +coverage.SORTED_BY_REGION = "COVERAGE_SORT_REGION"; + +// Loaded on index.html +coverage.index_ready = function () { + coverage.assign_shortkeys(); + coverage.wire_up_filter(); + coverage.wire_up_sorting(); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + + on_click(".button_show_hide_help", coverage.show_hide_help); +}; + +// -- pyfile stuff -- + +coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; + +coverage.pyfile_ready = function () { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === "t") { + document.querySelector(frag).closest(".n").classList.add("highlight"); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } + else { + coverage.set_sel(0); + } + + on_click(".button_toggle_run", coverage.toggle_lines); + on_click(".button_toggle_mis", coverage.toggle_lines); + on_click(".button_toggle_exc", coverage.toggle_lines); + on_click(".button_toggle_par", coverage.toggle_lines); + + on_click(".button_next_chunk", coverage.to_next_chunk_nicely); + on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely); + on_click(".button_top_of_page", coverage.to_top); + on_click(".button_first_chunk", coverage.to_first_chunk); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + on_click(".button_to_index", coverage.to_index); + + on_click(".button_show_hide_help", coverage.show_hide_help); + + coverage.filters = undefined; + try { + coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); + } catch(err) {} + + if (coverage.filters) { + coverage.filters = JSON.parse(coverage.filters); + } + else { + coverage.filters = {run: false, exc: true, mis: true, par: true}; + } + + for (cls in coverage.filters) { + coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection + } + + coverage.assign_shortkeys(); + coverage.init_scroll_markers(); + coverage.wire_up_sticky_header(); + + document.querySelectorAll("[id^=ctxs]").forEach( + cbox => cbox.addEventListener("click", coverage.expand_contexts) + ); + + // Rebuild scroll markers when the window height changes. + window.addEventListener("resize", coverage.build_scroll_markers); +}; + +coverage.toggle_lines = function (event) { + const btn = event.target.closest("button"); + const category = btn.value + const show = !btn.classList.contains("show_" + category); + coverage.set_line_visibilty(category, show); + coverage.build_scroll_markers(); + coverage.filters[category] = show; + try { + localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); + } catch(err) {} +}; + +coverage.set_line_visibilty = function (category, should_show) { + const cls = "show_" + category; + const btn = document.querySelector(".button_toggle_" + category); + if (btn) { + if (should_show) { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls)); + btn.classList.add(cls); + } + else { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls)); + btn.classList.remove(cls); + } + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return document.getElementById("t" + n)?.closest("p"); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +coverage.to_prev_file = function () { + window.location = document.getElementById("prevFileLink").href; +} + +coverage.to_next_file = function () { + window.location = document.getElementById("nextFileLink").href; +} + +coverage.to_index = function () { + location.href = document.getElementById("indexLink").href; +} + +coverage.show_hide_help = function () { + const helpCheck = document.getElementById("help_panel_state") + helpCheck.checked = !helpCheck.checked; +} + +// Return a string indicating what kind of chunk this line belongs to, +// or null if not a chunk. +coverage.chunk_indicator = function (line_elt) { + const classes = line_elt?.className; + if (!classes) { + return null; + } + const match = classes.match(/\bshow_\w+\b/); + if (!match) { + return null; + } + return match[0]; +}; + +coverage.to_next_chunk = function () { + const c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + var chunk_indicator, probe_line; + while (true) { + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + if (chunk_indicator) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_indicator = chunk_indicator; + while (next_indicator === chunk_indicator) { + probe++; + probe_line = c.line_elt(probe); + next_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + const c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + var chunk_indicator = c.chunk_indicator(probe_line); + while (probe > 1 && !chunk_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_indicator = chunk_indicator; + while (prev_indicator === chunk_indicator) { + probe--; + if (probe <= 0) { + return; + } + probe_line = c.line_elt(probe); + prev_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + const begin = coverage.line_elt(coverage.sel_begin); + const end = coverage.line_elt(coverage.sel_end-1); + + return ( + (checkVisible(begin) ? 1 : 0) + + (checkVisible(end) ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the top line on the screen as selection. + + // This will select the top-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(0, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(1); + } + else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the lowest line on the screen as selection. + + // This will select the bottom-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(coverage.lines_len); + } + else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (!probe_line) { + return; + } + var the_indicator = c.chunk_indicator(probe_line); + if (the_indicator) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var indicator = the_indicator; + while (probe > 0 && indicator === the_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + break; + } + indicator = c.chunk_indicator(probe_line); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + indicator = the_indicator; + while (indicator === the_indicator) { + probe++; + probe_line = c.line_elt(probe); + indicator = c.chunk_indicator(probe_line); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + // Highlight the lines in the chunk + document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight")); + for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) { + coverage.line_elt(probe).querySelector(".n").classList.add("highlight"); + } + + coverage.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + const element = coverage.line_elt(coverage.sel_begin); + coverage.scroll_window(element.offsetTop - 60); + } +}; + +coverage.scroll_window = function (to_pos) { + window.scroll({top: to_pos, behavior: "smooth"}); +}; + +coverage.init_scroll_markers = function () { + // Init some variables + coverage.lines_len = document.querySelectorAll("#source > p").length; + + // Build html + coverage.build_scroll_markers(); +}; + +coverage.build_scroll_markers = function () { + const temp_scroll_marker = document.getElementById("scroll_marker") + if (temp_scroll_marker) temp_scroll_marker.remove(); + // Don't build markers if the window has no scroll bar. + if (document.body.scrollHeight <= window.innerHeight) { + return; + } + + const marker_scale = window.innerHeight / document.body.scrollHeight; + const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10); + + let previous_line = -99, last_mark, last_top; + + const scroll_marker = document.createElement("div"); + scroll_marker.id = "scroll_marker"; + document.getElementById("source").querySelectorAll( + "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par" + ).forEach(element => { + const line_top = Math.floor(element.offsetTop * marker_scale); + const line_number = parseInt(element.querySelector(".n a").id.substr(1)); + + if (line_number === previous_line + 1) { + // If this solid missed block just make previous mark higher. + last_mark.style.height = `${line_top + line_height - last_top}px`; + } + else { + // Add colored line in scroll_marker block. + last_mark = document.createElement("div"); + last_mark.id = `m${line_number}`; + last_mark.classList.add("marker"); + last_mark.style.height = `${line_height}px`; + last_mark.style.top = `${line_top}px`; + scroll_marker.append(last_mark); + last_top = line_top; + } + + previous_line = line_number; + }); + + // Append last to prevent layout calculation + document.body.append(scroll_marker); +}; + +coverage.wire_up_sticky_header = function () { + const header = document.querySelector("header"); + const header_bottom = ( + header.querySelector(".content h2").getBoundingClientRect().top - + header.getBoundingClientRect().top + ); + + function updateHeader() { + if (window.scrollY > header_bottom) { + header.classList.add("sticky"); + } + else { + header.classList.remove("sticky"); + } + } + + window.addEventListener("scroll", updateHeader); + updateHeader(); +}; + +coverage.expand_contexts = function (e) { + var ctxs = e.target.parentNode.querySelector(".ctxs"); + + if (!ctxs.classList.contains("expanded")) { + var ctxs_text = ctxs.textContent; + var width = Number(ctxs_text[0]); + ctxs.textContent = ""; + for (var i = 1; i < ctxs_text.length; i += width) { + key = ctxs_text.substring(i, i + width).trim(); + ctxs.appendChild(document.createTextNode(contexts[key])); + ctxs.appendChild(document.createElement("br")); + } + ctxs.classList.add("expanded"); + } +}; + +document.addEventListener("DOMContentLoaded", () => { + if (document.body.classList.contains("indexfile")) { + coverage.index_ready(); + } + else { + coverage.pyfile_ready(); + } +}); diff --git a/_static/htmlcov/favicon_32_cb_58284776.png b/_static/htmlcov/favicon_32_cb_58284776.png new file mode 100644 index 0000000000000000000000000000000000000000..8649f0475d8d20793b2ec431fe25a186a414cf10 GIT binary patch literal 1732 zcmV;#20QtQP)K2KOkBOVxIZChq#W-v7@TU%U6P(wycKT1hUJUToW3ke1U1ONa4 z000000000000000bb)GRa9mqwR9|UWHy;^RUrt?IT__Y0JUcxmBP0(51q1>E00030 z|NrOz)aw7%8sJzM<5^g%z7^qE`}_Ot|JUUG(NUkWzR|7K?Zo%@_v-8G-1N%N=D$;; zw;keH4dGY$`1t4M=HK_s*zm^0#KgqfwWhe3qO_HtvXYvtjgX>;-~C$L`&k>^R)9)7 zdPh2TL^pCnHC#0+_4D)M`p?qp!pq{jO_{8;$fbaflbx`Tn52n|n}8VFRTA1&ugOP< zPd{uvFjz7t*Vot1&d$l-xWCk}s;sQL&#O(Bskh6gqNJv>#iB=ypG1e3K!K4yc7!~M zfj4S*g^zZ7eP$+_Sl07Z646l;%urinP#D8a6TwRtnLIRcI!r4f@bK~9-`~;E(N?Lv zSEst7s;rcxsi~}{Nsytfz@MtUoR*iFc8!#vvx}Umhm4blk(_~MdVD-@dW&>!Nn~ro z_E~-ESVQAj6Wmn;(olz(O&_{U2*pZBc1aYjMh>Dq3z|6`jW`RDHV=t3I6yRKJ~LOX zz_z!!vbVXPqob#=pj3^VMT?x6t(irRmSKsMo1~LLkB&=#j!=M%NP35mfqim$drWb9 zYIb>no_LUwc!r^NkDzs4YHu@=ZHRzrafWDZd1EhEVq=tGX?tK$pIa)DTh#bkvh!J- z?^%@YS!U*0E8$q$_*aOTQ&)Ra64g>ep;BdcQgvlg8qQHrP*E$;P{-m=A*@axn@$bO zO-Y4JzS&EAi%YG}N?cn?YFS7ivPY=EMV6~YH;+Xxu|tefLS|Aza)Cg6us#)=JW!uH zQa?H>d^j+YHCtyjL^LulF*05|F$RG!AX_OHVI&MtA~_@=5_lU|0000rbW%=J06GH4 z^5LD8b8apw8vNh1ua1mF{{Hy)_U`NA;Nacc+sCpuHXa-V{r&yz?c(9#+}oX+NmiRW z+W-IqK1oDDR5;6GfCDCOP5}iL5fK(cB~ET81`MFgF2kGa9AjhSIk~-E-4&*tPPKdiilQJ11k_J082ZS z>@TvivP!5ZFG?t@{t+GpR3XR&@*hA_VE1|Lo8@L@)l*h(Z@=?c-NS$Fk&&61IzUU9 z*nPqBM=OBZ-6ka1SJgGAS-Us5EN)r#dUX%>wQZLa2ytPCtMKp)Ob z*xcu38Z&d5<-NBS)@jRD+*!W*cf-m_wmxDEqBf?czI%3U0J$Xik;lA`jg}VH?(S(V zE!M3;X2B8w0TnnW&6(8;_Uc)WD;Ms6PKP+s(sFgO!}B!^ES~GDt4qLPxwYB)^7)XA zZwo9zDy-B0B+jT6V=!=bo(zs_8{eBA78gT9GH$(DVhz;4VAYwz+bOIdZ-PNb|I&rl z^XG=vFLF)1{&nT2*0vMz#}7^9hXzzf&ZdKlEj{LihP;|;Ywqn35ajP?H?7t|i-Un% z&&kxee@9B{nwgv1+S-~0)E1{ob1^Wn`F2isurqThKK=3%&;`@{0{!D- z&CSj80t;uPu&FaJFtSXKH#ajgGj}=sEad7US6jP0|Db@0j)?(5@sf<7`~a9>s;wCa zm^)spe{uxGFmrJYI9cOh7s$>8Npkt-5EWB1UKc`{W{y5Ce$1+nM9Cr;);=Ju#N^62OSlJMn7omiUgP&ErsYzT~iGxcW aE(`!K@+CXylaC4j0000 + + + + Coverage report + + + + + +
+
+

Coverage report: + 74% +

+ +
+ +
+ + +
+
+

+ Files + Functions + Classes +

+

+ coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Filefunctionstatementsmissingexcludedcoverage
sparc/__init__.py_missing_deps_func1100%
sparc/__init__.pySPARCMissingDeps.__init__1100%
sparc/__init__.pySPARCMissingDeps.__getattr__1100%
sparc/__init__.py(no function)216071%
sparc/api.pySparcAPI.__init__1000100%
sparc/api.pySparcAPI.get_parameter_dict400100%
sparc/api.pySparcAPI.help_info300100%
sparc/api.pySparcAPI.validate_input436086%
sparc/api.pySparcAPI.convert_string_to_value368078%
sparc/api.pySparcAPI.convert_value_to_string216071%
sparc/api.py_array_to_string1000100%
sparc/api.py(no function)1600100%
sparc/calculator.pySPARC.__init__2200100%
sparc/calculator.pySPARC._compare_system_state122083%
sparc/calculator.pySPARC._compare_calc_parameters207065%
sparc/calculator.pySPARC._dump_system_state200100%
sparc/calculator.pySPARC.ensure_socket132085%
sparc/calculator.pySPARC.__enter__4400%
sparc/calculator.pySPARC.__exit__3300%
sparc/calculator.pySPARC.use_socket100100%
sparc/calculator.pySPARC.socket_mode73057%
sparc/calculator.pySPARC._indir100100%
sparc/calculator.pySPARC.log100100%
sparc/calculator.pySPARC.log41075%
sparc/calculator.pySPARC.in_socket_filename3300%
sparc/calculator.pySPARC.directory300100%
sparc/calculator.pySPARC.directory400100%
sparc/calculator.pySPARC.label31067%
sparc/calculator.pySPARC.label4400%
sparc/calculator.pySPARC.sort3300%
sparc/calculator.pySPARC.resort3300%
sparc/calculator.pySPARC.check_state1300100%
sparc/calculator.pySPARC._make_command1511027%
sparc/calculator.pySPARC.check_input_atoms211095%
sparc/calculator.pySPARC.calculate218062%
sparc/calculator.pySPARC._calculate_as_server9900%
sparc/calculator.pySPARC._calculate_with_socket2825011%
sparc/calculator.pySPARC.get_stress3300%
sparc/calculator.pySPARC._check_input_exclusion111091%
sparc/calculator.pySPARC._check_minimal_input52060%
sparc/calculator.pySPARC._generate_inpt_state101090%
sparc/calculator.pySPARC.write_input1200100%
sparc/calculator.pySPARC.execute166062%
sparc/calculator.pySPARC.close171700%
sparc/calculator.pySPARC._send_mpi_signal222200%
sparc/calculator.pySPARC._reset_process4400%
sparc/calculator.pySPARC.pid3300%
sparc/calculator.pySPARC.raw_results100100%
sparc/calculator.pySPARC.raw_results2200%
sparc/calculator.pySPARC.read_results300100%
sparc/calculator.pySPARC._restart1412014%
sparc/calculator.pySPARC.get_fermi_level1100%
sparc/calculator.pySPARC.detect_sparc_version197063%
sparc/calculator.pySPARC.run_client3300%
sparc/calculator.pySPARC.detect_socket_compatibility113073%
sparc/calculator.pySPARC.set247071%
sparc/calculator.pySPARC._sanitize_kwargs252092%
sparc/calculator.pySPARC._convert_special_params6320068%
sparc/calculator.pySPARC.print_sysinfo82075%
sparc/calculator.pySPARC.interpret_grid_input1100%
sparc/calculator.pySPARC.interpret_kpoint_input1100%
sparc/calculator.pySPARC.interpret_downsampling_input1100%
sparc/calculator.pySPARC.interpret_kpoint_shift1100%
sparc/calculator.pySPARC.get_pseudopotential_directory1100%
sparc/calculator.pySPARC.get_nstates1100%
sparc/calculator.pySPARC.setup_parallel_env1100%
sparc/calculator.pySPARC.generate_command1100%
sparc/calculator.pySPARC.estimate_memory232300%
sparc/calculator.pySPARC.get_scf_steps1100%
sparc/calculator.pySPARC.get_geometric_steps1100%
sparc/calculator.pySPARC.get_runtime1100%
sparc/calculator.pySPARC.get_fermi_level1100%
sparc/calculator.pySPARC.concatinate_output1100%
sparc/calculator.pySPARC.read_line1100%
sparc/calculator.pySPARC.parse_output1100%
sparc/calculator.pySPARC.parse_relax1100%
sparc/calculator.pySPARC.parse_MD1100%
sparc/calculator.pySPARC.parse_input_args1100%
sparc/calculator.pySPARC.recover_index_order_from_ion_file1100%
sparc/calculator.pySPARC.atoms_dict1100%
sparc/calculator.pySPARC.dict_atoms1100%
sparc/calculator.py(no function)12800100%
sparc/common.py(no function)400100%
sparc/docparser.pySparcDocParser.__init__1000100%
sparc/docparser.pySparcDocParser.find_main_file400100%
sparc/docparser.pySparcDocParser.get_include_files1200100%
sparc/docparser.pySparcDocParser.parse_version276078%
sparc/docparser.pySparcDocParser.__parse_parameter_from_frame2200100%
sparc/docparser.pySparcDocParser.__parse_frames_from_text300100%
sparc/docparser.pySparcDocParser.__parse_intro_file221095%
sparc/docparser.pySparcDocParser.__parse_all_included_files1200100%
sparc/docparser.pySparcDocParser.parse_parameters172088%
sparc/docparser.pySparcDocParser.postprocess400100%
sparc/docparser.pySparcDocParser.to_dict700100%
sparc/docparser.pySparcDocParser.json_from_directory159040%
sparc/docparser.pySparcDocParser.json_from_repo141400%
sparc/docparser.pyconvert_tex_parameter100100%
sparc/docparser.pyconvert_tex_example1000100%
sparc/docparser.pyconvert_tex_default211095%
sparc/docparser.pyconvert_comment1000100%
sparc/docparser.pytext2value3300100%
sparc/docparser.pyis_array400100%
sparc/docparser.pycontain_only_bool800100%
sparc/docparser.pysanitize_description800100%
sparc/docparser.pysanitize_default600100%
sparc/docparser.pysanitize_type385087%
sparc/docparser.py(no function)5519065%
sparc/download_data.pydownload_psp224082%
sparc/download_data.pychecksum_all1000100%
sparc/download_data.pyis_psp_download_complete100100%
sparc/download_data.py(no function)172088%
sparc/io.pySparcBundle.__init__1200100%
sparc/io.pySparcBundle._find_files1100%
sparc/io.pySparcBundle._make_label1700100%
sparc/io.pySparcBundle.__find_psp_dir124067%
sparc/io.pySparcBundle._indir700100%
sparc/io.pySparcBundle._read_ion_and_inpt500100%
sparc/io.pySparcBundle._write_ion_and_inpt2600100%
sparc/io.pySparcBundle.read_raw_results2300100%
sparc/io.pySparcBundle._read_results_from_index191095%
sparc/io.pySparcBundle.convert_to_ase2000100%
sparc/io.pySparcBundle._make_singlepoint1200100%
sparc/io.pySparcBundle._extract_static_results406085%
sparc/io.pySparcBundle._extract_geopt_results4910080%
sparc/io.pySparcBundle._extract_aimd_results274085%
sparc/io.pySparcBundle.sort6600%
sparc/io.pySparcBundle.resort61083%
sparc/io.pySparcBundle.read_psp_info1400100%
sparc/io.pyread_sparc400100%
sparc/io.pywrite_sparc1000100%
sparc/io.pyread_sparc_ion5500%
sparc/io.pywrite_sparc_ion6600%
sparc/io.pyread_sparc_static8800%
sparc/io.pyread_sparc_geopt8800%
sparc/io.pyread_sparc_aimd8800%
sparc/io.py__register_new_filetype8800%
sparc/io.py__register_new_filetype._new_filetype8800%
sparc/io.pyregister_ase_io_sparc353500%
sparc/io.py(no function)7911086%
sparc/quicktest.pyBaseTest.__init__400100%
sparc/quicktest.pyBaseTest.dislay_name1100%
sparc/quicktest.pyBaseTest.display_docstring223086%
sparc/quicktest.pyBaseTest.make_test1100%
sparc/quicktest.pyBaseTest.run_test104060%
sparc/quicktest.pyImportTest.make_test61083%
sparc/quicktest.pyPspTest.make_test289068%
sparc/quicktest.pyApiTest.make_test135062%
sparc/quicktest.pyCommandTest.make_test2913055%
sparc/quicktest.pyFileIOCalcTest.make_test143079%
sparc/quicktest.pySocketCalcTest.make_test213086%
sparc/quicktest.pymain3500100%
sparc/quicktest.py(no function)321097%
sparc/socketio.pygenerate_random_socket_name200100%
sparc/socketio.pySPARCProtocol.send_string8800%
sparc/socketio.pySPARCProtocol.send_object141400%
sparc/socketio.pySPARCProtocol.recv_object141400%
sparc/socketio.pySPARCProtocol.send_param7700%
sparc/socketio.pySPARCProtocol.sendinit8800%
sparc/socketio.pySPARCProtocol.recvinit1100%
sparc/socketio.pySPARCProtocol.calculate_new_protocol191900%
sparc/socketio.pySPARCSocketServer.__init__71086%
sparc/socketio.pySPARCSocketServer.socket_filename1100%
sparc/socketio.pySPARCSocketServer.proc31067%
sparc/socketio.pySPARCSocketServer.proc100100%
sparc/socketio.pySPARCSocketServer._accept7700%
sparc/socketio.pySPARCSocketServer.send_atoms_and_params5500%
sparc/socketio.pySPARCSocketServer.calculate_origin_protocol4400%
sparc/socketio.pySPARCSocketServer.calculate_new_protocol4400%
sparc/socketio.pySPARCSocketClient.__init__4400%
sparc/socketio.pySPARCSocketClient.calculate3300%
sparc/socketio.pySPARCSocketClient.irun484800%
sparc/socketio.pySPARCSocketClient.run4400%
sparc/socketio.py(no function)3500100%
sparc/sparc_parsers/__init__.py(no function)000100%
sparc/sparc_parsers/aimd.py_read_aimd700100%
sparc/sparc_parsers/aimd.py_read_aimd_step8510088%
sparc/sparc_parsers/aimd.py_write_aimd1100%
sparc/sparc_parsers/aimd.py(no function)1100100%
sparc/sparc_parsers/atoms.pyatoms_to_dict4800100%
sparc/sparc_parsers/atoms.pydict_to_atoms392095%
sparc/sparc_parsers/atoms.pycount_symbols1200100%
sparc/sparc_parsers/atoms.pyconstraints_from_relax2300100%
sparc/sparc_parsers/atoms.pyrelax_from_constraint1600100%
sparc/sparc_parsers/atoms.pyrelax_from_all_constraints900100%
sparc/sparc_parsers/atoms.pymodify_atoms_bc161094%
sparc/sparc_parsers/atoms.pyatoms_bc_to_sparc142086%
sparc/sparc_parsers/atoms.py(no function)1800100%
sparc/sparc_parsers/geopt.py_read_geopt600100%
sparc/sparc_parsers/geopt.py_read_geopt_step578086%
sparc/sparc_parsers/geopt.py_write_geopt1100%
sparc/sparc_parsers/geopt.py(no function)1200100%
sparc/sparc_parsers/inpt.py_read_inpt400100%
sparc/sparc_parsers/inpt.py_write_inpt222091%
sparc/sparc_parsers/inpt.py_inpt_cell_to_ase_cell171094%
sparc/sparc_parsers/inpt.py(no function)1100100%
sparc/sparc_parsers/ion.pyInvalidSortingComment.__init__100100%
sparc/sparc_parsers/ion.py_read_ion600100%
sparc/sparc_parsers/ion.py_write_ion361097%
sparc/sparc_parsers/ion.py_ion_coord_to_ase_pos151093%
sparc/sparc_parsers/ion.py_read_sort_comment261096%
sparc/sparc_parsers/ion.py(no function)1600100%
sparc/sparc_parsers/out.py_read_out700100%
sparc/sparc_parsers/out.py_read_sparc_version155067%
sparc/sparc_parsers/out.py_read_input_params300100%
sparc/sparc_parsers/out.py_read_run_info1400100%
sparc/sparc_parsers/out.py_read_scfs756092%
sparc/sparc_parsers/out.py_get_block_text700100%
sparc/sparc_parsers/out.py_write_out1100%
sparc/sparc_parsers/out.py(no function)1800100%
sparc/sparc_parsers/pseudopotential.pyNotPSP8Format.__init__100100%
sparc/sparc_parsers/pseudopotential.pyNoMatchingPseudopotential.__init__100100%
sparc/sparc_parsers/pseudopotential.pyMultiplePseudoPotentialFiles.__init__100100%
sparc/sparc_parsers/pseudopotential.pyparse_psp8_header2900100%
sparc/sparc_parsers/pseudopotential.pyinfer_pseudo_path1800100%
sparc/sparc_parsers/pseudopotential.pycopy_psp_file1000100%
sparc/sparc_parsers/pseudopotential.pyfind_pseudo_path1500100%
sparc/sparc_parsers/pseudopotential.py(no function)1700100%
sparc/sparc_parsers/static.py_read_static800100%
sparc/sparc_parsers/static.py_read_static_block366083%
sparc/sparc_parsers/static.py_read_static_step433093%
sparc/sparc_parsers/static.py_add_cell_info141093%
sparc/sparc_parsers/static.py_write_static1100%
sparc/sparc_parsers/static.py(no function)1300100%
sparc/sparc_parsers/utils.pyget_label1100%
sparc/sparc_parsers/utils.pystrip_comments900100%
sparc/sparc_parsers/utils.pybisect_and_strip200100%
sparc/sparc_parsers/utils.pyread_block_input3100100%
sparc/sparc_parsers/utils.pymake_reverse_mapping400100%
sparc/sparc_parsers/utils.py(no function)600100%
sparc/utils.pydeprecated200100%
sparc/utils.pydeprecated.decorator200100%
sparc/utils.pydeprecated.decorator.new_func2200%
sparc/utils.pycompare_dict62067%
sparc/utils.pystring2index145064%
sparc/utils.py_find_default_sparc131300%
sparc/utils.pyh2gpts5500%
sparc/utils.pycprint132085%
sparc/utils.pylocate_api215076%
sparc/utils.pytime_limit6600%
sparc/utils.pytime_limit.signal_handler1100%
sparc/utils.pymonitor_process101000%
sparc/utils.pymonitor_process.signal_handler1100%
sparc/utils.pymonitor_process.check_process8800%
sparc/utils.py_find_mpi_process323200%
sparc/utils.py_get_slurm_jobid4400%
sparc/utils.py_locate_slurm_step242400%
sparc/utils.py_slurm_signal9900%
sparc/utils.py_run_process9900%
sparc/utils.py(no function)3900100%
Total 3293864074%
+

+ No items found using the specified filter. +

+
+ + + diff --git a/_static/htmlcov/index.html b/_static/htmlcov/index.html new file mode 100644 index 00000000..4a6a2761 --- /dev/null +++ b/_static/htmlcov/index.html @@ -0,0 +1,244 @@ + + + + + Coverage report + + + + + +
+
+

Coverage report: + 74% +

+ +
+ +
+ + +
+
+

+ Files + Functions + Classes +

+

+ coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Filestatementsmissingexcludedcoverage
sparc/__init__.py249062%
sparc/api.py14320086%
sparc/calculator.py669252062%
sparc/common.py400100%
sparc/docparser.py36357084%
sparc/download_data.py506088%
sparc/io.py475130073%
sparc/quicktest.py21644080%
sparc/socketio.py199153023%
sparc/sparc_parsers/__init__.py000100%
sparc/sparc_parsers/aimd.py10411089%
sparc/sparc_parsers/atoms.py1955097%
sparc/sparc_parsers/geopt.py769088%
sparc/sparc_parsers/inpt.py543094%
sparc/sparc_parsers/ion.py1003097%
sparc/sparc_parsers/out.py14012091%
sparc/sparc_parsers/pseudopotential.py9200100%
sparc/sparc_parsers/static.py11511090%
sparc/sparc_parsers/utils.py531098%
sparc/utils.py221138038%
Total3293864074%
+

+ No items found using the specified filter. +

+
+ + + diff --git a/_static/htmlcov/keybd_closed_cb_ce680311.png b/_static/htmlcov/keybd_closed_cb_ce680311.png new file mode 100644 index 0000000000000000000000000000000000000000..ba119c47df81ed2bbd27a06988abf700139c4f99 GIT binary patch literal 9004 zcmeHLc{tSF+aIY=A^R4_poB4tZAN2XC;O7M(inrW3}(h&Q4}dl*&-65$i9^&vW6_# zcM4g`Qix=GhkBl;=lwnJ@Ap2}^}hc-b6vBXb3XUyzR%~}_c`-Dw+!?&>5p(90RRB> zXe~7($~PP3eT?=X<@3~Q1w84vX~IoSx~1#~02+TopXK(db;4v6!{+W`RHLkkHO zo;+s?)puc`+$yOwHv>I$5^8v^F3<|$44HA8AFnFB0cAP|C`p}aSMJK*-CUB{eQ!;K z-9Ju3OQ+xVPr3P#o4>_lNBT;M+1vgV&B~6!naOGHb-LFA9TkfHv1IFA1Y!Iz!Zl3) z%c#-^zNWPq7U_}6I7aHSmFWi125RZrNBKyvnV^?64)zviS;E!UD%LaGRl6@zn!3E{ zJ`B$5``cH_3a)t1#6I7d==JeB_IcSU%=I#DrRCBGm8GvCmA=+XHEvC2SIfsNa0(h9 z7P^C4U`W@@`9p>2f^zyb5B=lpc*RZMn-%%IqrxSWQF8{ec3i?-AB(_IVe z)XgT>Y^u41MwOMFvU=I4?!^#jaS-%bjnx@ zmL44yVEslR_ynm18F!u}Ru#moEn3EE?1=9@$B1Z5aLi5b8{&?V(IAYBzIar!SiY3< z`l0V)djHtrImy}(!7x-Pmq+njM)JFQ9mx*(C+9a3M)(_SW|lrN=gfxFhStu^zvynS zm@gl;>d8i8wpUkX42vS3BEzE3-yctH%t0#N%s+6-&_<*Fe7+h=`=FM?DOg1)eGL~~ zQvIFm$D*lqEh07XrXY=jb%hdyP4)`wyMCb$=-z9(lOme9=tirVkb)_GOl2MJn;=Ky z^0pV1owR7KP-BSxhI@@@+gG0roD-kXE1;!#R7KY1QiUbyDdTElm|ul7{mMdF1%UDJ z_vp=Vo!TCF?D*?u% zk~}4!xK2MSQd-QKC0${G=ZRv2x8%8ZqdfR!?Dv=5Mj^8WU)?iH;C?o6rSQy*^YwQb zf@5V)q=xah#a3UEIBC~N7on(p4jQd4K$|i7k`d8mw|M{Mxapl46Z^X^9U}JgqH#;T z`CTzafpMD+J-LjzF+3Xau>xM_sXisRj6m-287~i9g|%gHc}v77>n_+p7ZgmJszx!b zSmL4wV;&*5Z|zaCk`rOYFdOjZLLQr!WSV6AlaqYh_OE)>rYdtx`gk$yAMO=-E1b~J zIZY6gM*}1UWsJ)TW(pf1=h?lJy_0TFOr|nALGW>$IE1E7z+$`^2WJY+>$$nJo8Rs` z)xS>AH{N~X3+b=2+8Q_|n(1JoGv55r>TuwBV~MXE&9?3Zw>cIxnOPNs#gh~C4Zo=k z&!s;5)^6UG>!`?hh0Q|r|Qbm>}pgtOt23Vh!NSibozH$`#LSiYL)HR4bkfEJMa zBHwC3TaHx|BzD|MXAr>mm&FbZXeEX-=W}Ji&!pji4sO$#0Wk^Q7j%{8#bJPn$C=E% zPlB}0)@Ti^r_HMJrTMN?9~4LQbIiUiOKBVNm_QjABKY4;zC88yVjvB>ZETNzr%^(~ zI3U&Ont?P`r&4 z#Bp)jcVV_N_{c1_qW}_`dQm)D`NG?h{+S!YOaUgWna4i8SuoLcXAZ|#Jh&GNn7B}3 z?vZ8I{LpmCYT=@6)dLPd@|(;d<08ufov%+V?$mgUYQHYTrc%eA=CDUzK}v|G&9}yJ z)|g*=+RH1IQ>rvkY9UIam=fkxWDyGIKQ2RU{GqOQjD8nG#sl+$V=?wpzJdT=wlNWr z1%lw&+;kVs(z?e=YRWRA&jc75rQ~({*TS<( z8X!j>B}?Bxrrp%wEE7yBefQ?*nM20~+ZoQK(NO_wA`RNhsqVkXHy|sod@mqen=B#@ zmLi=x2*o9rWqTMWoB&qdZph$~qkJJTVNc*8^hU?gH_fY{GYPEBE8Q{j0Y$tvjMv%3 z)j#EyBf^7n)2d8IXDYX2O0S%ZTnGhg4Ss#sEIATKpE_E4TU=GimrD5F6K(%*+T-!o z?Se7^Vm`$ZKDwq+=~jf?w0qC$Kr&R-;IF#{iLF*8zKu8(=#chRO;>x zdM;h{i{RLpJgS!B-ueTFs8&4U4+D8|7nP~UZ@P`J;*0sj^#f_WqT#xpA?@qHonGB& zQ<^;OLtOG1w#)N~&@b0caUL7syAsAxV#R`n>-+eVL9aZwnlklzE>-6!1#!tVA`uNo z>Gv^P)sohc~g_1YMC;^f(N<{2y5C^;QCEXo;LQ^#$0 zr>jCrdoeXuff!dJ^`#=Wy2Gumo^Qt7BZrI~G+Pyl_kL>is3P0^JlE;Sjm-YfF~I>t z_KeNpK|5U&F4;v?WS&#l(jxUWDarfcIcl=-6!8>^S`57!M6;hZea5IFA@)2+*Rt85 zi-MBs_b^DU8LygXXQGkG+86N7<%M|baM(orG*ASffC`p!?@m{qd}IcYmZyi^d}#Q& zNjk-0@CajpUI-gPm20ERVDO!L8@p`tMJ69FD(ASIkdoLdiRV6h9TPKRz>2WK4upHd z6OZK33EP?`GoJkXh)S035}uLUO$;TlXwNdMg-WOhLB)7a`-%*a9lFmjf6n+4ZmIHN z-V@$ z8PXsoR4*`5RwXz=A8|5;aXKtSHFccj%dG7cO~UBJnt)61K>-uPX)`vu{7fcX6_>zZ zw_2V&Li+7mxbf!f7{Rk&VVyY!UtZywac%g!cH+xh#j$a`uf?XWl<``t`36W;p7=_* zO6uf~2{sAdkZn=Ts@p0>8N8rzw2ZLS@$ibV-c-QmG@%|3gUUrRxu=e*ekhTa+f?8q z3$JVGPr9w$VQG~QCq~Y=2ThLIH!T@(>{NihJ6nj*HA_C#Popv)CBa)+UI-bx8u8zfCT^*1|k z&N9oFYsZEijPn31Yx_yO5pFs>0tOAV=oRx~Wpy5ie&S_449m4R^{LWQMA~}vocV1O zIf#1ZV85E>tvZE4mz~zn{hs!pkIQM;EvZMimqiPAJu-9P@mId&nb$lsrICS=)zU3~ zn>a#9>}5*3N)9;PTMZ)$`5k} z?iG}Rwj$>Y*|(D3S3e&fxhaPHma8@vwu(cwdlaCjX+NIK6=$H4U`rfzcWQVOhp{fnzuZhgCCGpw|p zTi`>cv~xVzdx|^`C0vXdlMwPae3S?>3|7v$e*Bs6-5gS>>FMHk_r2M(ADOV{KV7+6 zA@5Q(mdx%7J}MY}K461iuQ}5GwDGI=Yc&g0MZHu)7gC3{5@QZj6SJl*o0MS2Cl_ia zyK?9QmC9tJ6yn{EA-erJ4wk$+!E#X(s~9h^HOmQ_|6V_s1)k;%9Q6Niw}SyT?jxl4 z;HYz2$Nj$8Q_*Xo`TWEUx^Q9b+ik@$o39`mlY&P}G8wnjdE+Dlj?uL;$aB$n;x zWoh-M_u>9}_Ok@d_uidMqz10zJc}RQijPW3Fs&~1am=j*+A$QWTvxf9)6n;n8zTQW z!Q_J1%apTsJzLF`#^P_#mRv2Ya_keUE7iMSP!ha-WQoo0vZZG?gyR;+4q8F6tL#u< zRj8Hu5f-p1$J;)4?WpGL{4@HmJ6&tF9A5Tc8Trp>;Y>{^s?Q1&bam}?OjsnKd?|Z82aix26wUOLxbEW~E)|CgJ#)MLf_me# zv4?F$o@A~Um)6>HlM0=3Bd-vc91EM}D+t6-@!}O%i*&Wl%@#C8X+?5+nv`oPu!!=5 znbL+Fk_#J_%8vOq^FIv~5N(nk03kyo1p@l|1c+rO^zCG3bk2?|%AF;*|4si1XM<`a z1NY0-8$wv?&129!(g_A1lXR!+pD*1*cF?T~e1d6*G1Fz)jcSaZoKpxtA%FNnKP2jo zLXn@OR#1z@6zuH%mMB98}-t zHJqClsZ!G5xMSgIs_=<8sBePXxfoXsuvy`|buON9BX%s-o>OVLA)k3W=wKnw1?so$ zEjm0aS=zu@Xu#;{A)QTjJ$a9_={++ACkRY*sk3jLk&Fu}RxR<-DXR<`5`$VNG*wJE zidM6VzaQ!M0gbQM98@x@;#0qUS8O)p6mrYwTk*;8J~!ovbY6jon^Ki}uggd3#J5G8 z>awvtF85Y<9yE{Iag}J7O7)1O=ylk^255@XmV5J06-{xaaSNASZoTKKp~$tSxdUI~ zU1RZ&UuW37Ro&_ryj^cSt$Jd&pt|+h!A&dwcr&`S=R5E`=6Tm`+(qGm@$YZ8(8@a$ zXfo@Rwtvm7N3RMmVCb7radAs-@QtCXx^CQ-<)V>QPLZy@jH{#dc4#(y zV)6Hp{ZMz!|NG8!>i01gZMy)G<8Hf2X7e&LH_gOaajW<<^Xi55@OnlY*|S|*TS8;u_nHbv7lgmmZ+Q<5 zi!*lLCJmdpyzl(L${$C?(pVo|oR%r~x_B_ocPePa_);27^=n4L=`toZ;xdBut9rSv z?wDQ7j2I3WQBdhz%X7`2YaG_y|wA!7|s?k;A&WNMLMTZEzCaE^d??E&u?f=ejQBR~|< z)=thyP2(p8r6mt?Ad}tXAP_GvF9|P630I;$1cpQ+Ay7C34hK^ZV3H4kjPV8&NP>G5 zKRDEIBrFl{M#j4mfP0)68&?mqJP1S?2mU0djAGTjDV;wZ?6vplNn~3Hn$nP>%!dMi zz@bnC7zzi&k&s{QDWkf&zgrVXKUJjY3Gv3bL0}S4h>OdgEJ$Q^&p-VAr3J}^a*+rz z!jW7(h*+GuCyqcC{MD(Ovj^!{pB^OKUe|uy&bD?CN>KZrf3?v>>l*xSvnQiH-o^ViN$%FRdm9url;%(*jf5H$*S)8;i0xWHdl>$p);nH9v0)YfW?Vz$! zNCeUbi9`NEg(i^57y=fzM@1o*z*Bf6?QCV>2p9}(BLlYsOCfMjFv1pw1mlo)Py{8v zppw{MDfEeWN+n>Ne~oI7%9cU}mz0r3!es2gNF0t5jkGipjIo2lz;-e)7}Ul_#!eDv zw;#>kI>;#-pyfeu3Fsd^2F@6=oh#8r9;A!G0`-mm7%{=S;Ec(bJ=I_`FodKGQVNEY zmXwr4{9*jpDl%4{ggQZ5Ac z%wYTdl*!1c5^)%^E78Q&)ma|27c6j(a=)g4sGrp$r{jv>>M2 z6y)E5|Aooe!PSfKzvKA>`a6pfK3=E8vL14ksP&f=>gOP?}rG6ye@9ZR3 zJF*vsh*P$w390i!FV~~_Hv6t2Zl<4VUi|rNja#boFt{%q~xGb z(2petq9A*_>~B*>?d?Olx^lmYg4)}sH2>G42RE; literal 0 HcmV?d00001 diff --git a/_static/htmlcov/status.json b/_static/htmlcov/status.json new file mode 100644 index 00000000..fce3d59b --- /dev/null +++ b/_static/htmlcov/status.json @@ -0,0 +1 @@ +{"note":"This file is an internal implementation detail to speed up HTML report generation. Its format can change at any time. You might be looking for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json","format":5,"version":"7.6.7","globals":"31e9ed74ee2851f711db0107a95a4d87","files":{"z_e32f35a0016f670d___init___py":{"hash":"0c2494ce83d62c7e6b6c0af30924b493","index":{"url":"z_e32f35a0016f670d___init___py.html","file":"sparc/__init__.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":24,"n_excluded":0,"n_missing":9,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_api_py":{"hash":"2c940e42442c9cd0e05f66af001521f5","index":{"url":"z_e32f35a0016f670d_api_py.html","file":"sparc/api.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":143,"n_excluded":0,"n_missing":20,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_calculator_py":{"hash":"83092c6c6914c51304dd198e8589ef6f","index":{"url":"z_e32f35a0016f670d_calculator_py.html","file":"sparc/calculator.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":669,"n_excluded":0,"n_missing":252,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_common_py":{"hash":"2ce5b2745f7c70b2c1c04752d57b7480","index":{"url":"z_e32f35a0016f670d_common_py.html","file":"sparc/common.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":4,"n_excluded":0,"n_missing":0,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_docparser_py":{"hash":"b13dfadec7e2f2b193a5e9ceac185db0","index":{"url":"z_e32f35a0016f670d_docparser_py.html","file":"sparc/docparser.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":363,"n_excluded":0,"n_missing":57,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_download_data_py":{"hash":"b0ec9cd8a2bb03e46d80486009247e0a","index":{"url":"z_e32f35a0016f670d_download_data_py.html","file":"sparc/download_data.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":50,"n_excluded":0,"n_missing":6,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_io_py":{"hash":"054ba36796fa0a508c4c39b4a270c5c4","index":{"url":"z_e32f35a0016f670d_io_py.html","file":"sparc/io.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":475,"n_excluded":0,"n_missing":130,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_quicktest_py":{"hash":"dc96fe6ec8b413dff38484b9c6a075fb","index":{"url":"z_e32f35a0016f670d_quicktest_py.html","file":"sparc/quicktest.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":216,"n_excluded":0,"n_missing":44,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_socketio_py":{"hash":"86cc619f16449988ed72f5328a065cb4","index":{"url":"z_e32f35a0016f670d_socketio_py.html","file":"sparc/socketio.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":199,"n_excluded":0,"n_missing":153,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e___init___py":{"hash":"3c77fc9ef7f887ac2508d4109cf92472","index":{"url":"z_ef57e6186893c87e___init___py.html","file":"sparc/sparc_parsers/__init__.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":0,"n_excluded":0,"n_missing":0,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_aimd_py":{"hash":"1448590a3e10dbf81a7ac494f9e3ae3b","index":{"url":"z_ef57e6186893c87e_aimd_py.html","file":"sparc/sparc_parsers/aimd.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":104,"n_excluded":0,"n_missing":11,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_atoms_py":{"hash":"39e04b17e57f5e3f491731561291ec01","index":{"url":"z_ef57e6186893c87e_atoms_py.html","file":"sparc/sparc_parsers/atoms.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":195,"n_excluded":0,"n_missing":5,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_geopt_py":{"hash":"93fbdd2034535afbfa49d8d8b9c7f896","index":{"url":"z_ef57e6186893c87e_geopt_py.html","file":"sparc/sparc_parsers/geopt.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":76,"n_excluded":0,"n_missing":9,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_inpt_py":{"hash":"b09070117d7dd31f3c1965a02c4565e1","index":{"url":"z_ef57e6186893c87e_inpt_py.html","file":"sparc/sparc_parsers/inpt.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":54,"n_excluded":0,"n_missing":3,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_ion_py":{"hash":"6c4457f4dd864e3f70e5a4803035da47","index":{"url":"z_ef57e6186893c87e_ion_py.html","file":"sparc/sparc_parsers/ion.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":100,"n_excluded":0,"n_missing":3,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_out_py":{"hash":"402386765b1246c079a627c77fa33433","index":{"url":"z_ef57e6186893c87e_out_py.html","file":"sparc/sparc_parsers/out.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":140,"n_excluded":0,"n_missing":12,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_pseudopotential_py":{"hash":"56d08cd71fbd818a0194ae05f0946581","index":{"url":"z_ef57e6186893c87e_pseudopotential_py.html","file":"sparc/sparc_parsers/pseudopotential.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":92,"n_excluded":0,"n_missing":0,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_static_py":{"hash":"f744da7115bbe43786355923775a70ca","index":{"url":"z_ef57e6186893c87e_static_py.html","file":"sparc/sparc_parsers/static.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":115,"n_excluded":0,"n_missing":11,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_ef57e6186893c87e_utils_py":{"hash":"1b2508faf343787aa934f5ec0335653f","index":{"url":"z_ef57e6186893c87e_utils_py.html","file":"sparc/sparc_parsers/utils.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":53,"n_excluded":0,"n_missing":1,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}},"z_e32f35a0016f670d_utils_py":{"hash":"c5bbea186b105ab48275670020ebcb77","index":{"url":"z_e32f35a0016f670d_utils_py.html","file":"sparc/utils.py","description":"","nums":{"precision":0,"n_files":1,"n_statements":221,"n_excluded":0,"n_missing":138,"n_branches":0,"n_partial_branches":0,"n_missing_branches":0}}}}} \ No newline at end of file diff --git a/_static/htmlcov/style_cb_8e611ae1.css b/_static/htmlcov/style_cb_8e611ae1.css new file mode 100644 index 00000000..3cdaf05a --- /dev/null +++ b/_static/htmlcov/style_cb_8e611ae1.css @@ -0,0 +1,337 @@ +@charset "UTF-8"; +/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ +/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ +/* Don't edit this .css file. Edit the .scss file instead! */ +html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } + +body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { body { color: #eee; } } + +html > body { font-size: 16px; } + +a:active, a:focus { outline: 2px dashed #007acc; } + +p { font-size: .875em; line-height: 1.4em; } + +table { border-collapse: collapse; } + +td { vertical-align: top; } + +table tr.hidden { display: none !important; } + +p#no_rows { display: none; font-size: 1.15em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } + +a.nav { text-decoration: none; color: inherit; } + +a.nav:hover { text-decoration: underline; color: inherit; } + +.hidden { display: none; } + +header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } + +@media (prefers-color-scheme: dark) { header { background: black; } } + +@media (prefers-color-scheme: dark) { header { border-color: #333; } } + +header .content { padding: 1rem 3.5rem; } + +header h2 { margin-top: .5em; font-size: 1em; } + +header h2 a.button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } + +@media (prefers-color-scheme: dark) { header h2 a.button { background: #333; } } + +@media (prefers-color-scheme: dark) { header h2 a.button { border-color: #444; } } + +header h2 a.button.current { border: 2px solid; background: #fff; border-color: #999; cursor: default; } + +@media (prefers-color-scheme: dark) { header h2 a.button.current { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { header h2 a.button.current { border-color: #777; } } + +header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } + +header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } + +header.sticky .text { display: none; } + +header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } + +header.sticky .content { padding: 0.5rem 3.5rem; } + +header.sticky .content p { font-size: 1em; } + +header.sticky ~ #source { padding-top: 6.5em; } + +main { position: relative; z-index: 1; } + +footer { margin: 1rem 3.5rem; } + +footer .content { padding: 0; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } + +#index { margin: 1rem 0 0 3.5rem; } + +h1 { font-size: 1.25em; display: inline-block; } + +#filter_container { float: right; margin: 0 2em 0 0; line-height: 1.66em; } + +#filter_container #filter { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { #filter_container #filter { border-color: #444; } } + +@media (prefers-color-scheme: dark) { #filter_container #filter { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #filter_container #filter { color: #eee; } } + +#filter_container #filter:focus { border-color: #007acc; } + +#filter_container :disabled ~ label { color: #ccc; } + +@media (prefers-color-scheme: dark) { #filter_container :disabled ~ label { color: #444; } } + +#filter_container label { font-size: .875em; color: #666; } + +@media (prefers-color-scheme: dark) { #filter_container label { color: #aaa; } } + +header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } + +@media (prefers-color-scheme: dark) { header button { background: #333; } } + +@media (prefers-color-scheme: dark) { header button { border-color: #444; } } + +header button:active, header button:focus { outline: 2px dashed #007acc; } + +header button.run { background: #eeffee; } + +@media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } + +header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } + +header button.mis { background: #ffeeee; } + +@media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } + +header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } + +header button.exc { background: #f7f7f7; } + +@media (prefers-color-scheme: dark) { header button.exc { background: #333; } } + +header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } + +header button.par { background: #ffffd5; } + +@media (prefers-color-scheme: dark) { header button.par { background: #650; } } + +header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } + +#help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } + +#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } + +#help_panel_wrapper { float: right; position: relative; } + +#keyboard_icon { margin: 5px; } + +#help_panel_state { display: none; } + +#help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } + +#help_panel .keyhelp p { margin-top: .75em; } + +#help_panel .legend { font-style: italic; margin-bottom: 1em; } + +.indexfile #help_panel { width: 25em; } + +.pyfile #help_panel { width: 18em; } + +#help_panel_state:checked ~ #help_panel { display: block; } + +kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } + +#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } + +#source p { position: relative; white-space: pre; } + +#source p * { box-sizing: border-box; } + +#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; } + +@media (prefers-color-scheme: dark) { #source p .n { color: #777; } } + +#source p .n.highlight { background: #ffdd00; } + +#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } + +#source p .n a:hover { text-decoration: underline; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } + +#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } + +@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } + +#source p .t:hover { background: #f2f2f2; } + +@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } + +#source p .t:hover ~ .r .annotate.long { display: block; } + +#source p .t .com { color: #008000; font-style: italic; line-height: 1px; } + +@media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } + +#source p .t .key { font-weight: bold; line-height: 1px; } + +#source p .t .str { color: #0451a5; } + +@media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } } + +#source p.mis .t { border-left: 0.2em solid #ff0000; } + +#source p.mis.show_mis .t { background: #fdd; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } + +#source p.mis.show_mis .t:hover { background: #f2d2d2; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } + +#source p.run .t { border-left: 0.2em solid #00dd00; } + +#source p.run.show_run .t { background: #dfd; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } + +#source p.run.show_run .t:hover { background: #d2f2d2; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } + +#source p.exc .t { border-left: 0.2em solid #808080; } + +#source p.exc.show_exc .t { background: #eee; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } + +#source p.exc.show_exc .t:hover { background: #e2e2e2; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } + +#source p.par .t { border-left: 0.2em solid #bbbb00; } + +#source p.par.show_par .t { background: #ffa; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } + +#source p.par.show_par .t:hover { background: #f2f2a2; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } + +#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } + +#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } + +@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } + +#source p .annotate.short:hover ~ .long { display: block; } + +#source p .annotate.long { width: 30em; right: 2.5em; } + +#source p input { display: none; } + +#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } + +#source p input ~ .r label.ctx::before { content: "â–¶ "; } + +#source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } + +#source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } + +#source p input:checked ~ .r label.ctx::before { content: "â–¼ "; } + +#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } + +#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } + +@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } + +#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } + +@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } + +#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } + +#index table.index { margin-left: -.5em; } + +#index td, #index th { text-align: right; padding: .25em .5em; border-bottom: 1px solid #eee; } + +@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } + +#index td.name, #index th.name { text-align: left; width: auto; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; min-width: 15em; } + +#index th { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-style: italic; color: #333; cursor: pointer; } + +@media (prefers-color-scheme: dark) { #index th { color: #ddd; } } + +#index th:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } + +#index th .arrows { color: #666; font-size: 85%; font-family: sans-serif; font-style: normal; pointer-events: none; } + +#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } + +@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } + +#index th[aria-sort="ascending"] .arrows::after { content: " â–²"; } + +#index th[aria-sort="descending"] .arrows::after { content: " â–¼"; } + +#index td.name { font-size: 1.15em; } + +#index td.name a { text-decoration: none; color: inherit; } + +#index td.name .no-noun { font-style: italic; } + +#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } + +#index tr.region:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index tr.region:hover { background: #333; } } + +#index tr.region:hover td.name { text-decoration: underline; color: inherit; } + +#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } + +@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } + +#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } + +@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } diff --git a/_static/htmlcov/z_e32f35a0016f670d___init___py.html b/_static/htmlcov/z_e32f35a0016f670d___init___py.html new file mode 100644 index 00000000..4f01894c --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d___init___py.html @@ -0,0 +1,150 @@ + + + + + Coverage for sparc/__init__.py: 62% + + + + + +
+
+

+ Coverage for sparc/__init__.py: + 62% +

+ +

+ 24 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1"""Initialization of sparc-x-api 

+

2 

+

3For submodules like download_data and api, ase / numpy may be ignored, 

+

4and run using standard python libaries. This may be useful for cases like 

+

5conda build and CI where not all dependencies are present 

+

6""" 

+

7 

+

8 

+

9def _missing_deps_func(*args, **kwargs): 

+

10 raise ImportError("Importing fails for ase / numpy!") 

+

11 

+

12 

+

13class SPARCMissingDeps: 

+

14 def __init__(self, *args, **kwargs): 

+

15 raise ImportError( 

+

16 "Cannot initialize sparc.SPARC because the required dependencies (ase and numpy) are not available." 

+

17 ) 

+

18 

+

19 def __getattr__(self, name): 

+

20 raise ImportError( 

+

21 f"Cannot access '{name}' on sparc.SPARC because the required dependencies (ase and numpy) are not available." 

+

22 ) 

+

23 

+

24 

+

25try: 

+

26 import ase 

+

27 import numpy 

+

28 

+

29 _import_complete = True 

+

30except ImportError: 

+

31 _import_complete = False 

+

32 

+

33if _import_complete: 

+

34 from packaging import version 

+

35 

+

36 from .calculator import SPARC 

+

37 from .io import read_sparc, register_ase_io_sparc, write_sparc 

+

38 

+

39 # If ase version less than 3.23, use manual register function 

+

40 # Otherwise use the new entry point 

+

41 if version.parse(ase.__version__) < version.parse("3.23"): 

+

42 register_ase_io_sparc() 

+

43 else: 

+

44 # register calculator class <experimental> 

+

45 from ase.calculators.calculator import register_calculator_class 

+

46 

+

47 register_calculator_class("sparc", SPARC) 

+

48else: 

+

49 # If importing is not complete, any code trying to directly import 

+

50 # the following attributes will raise ImportError 

+

51 read_sparc = _missing_deps_func 

+

52 write_sparc = _missing_deps_func 

+

53 SPARC = SPARCMissingDeps 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_api_py.html b/_static/htmlcov/z_e32f35a0016f670d_api_py.html new file mode 100644 index 00000000..8351da15 --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_api_py.html @@ -0,0 +1,411 @@ + + + + + Coverage for sparc/api.py: 86% + + + + + +
+
+

+ Coverage for sparc/api.py: + 86% +

+ +

+ 143 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1import json 

+

2from io import StringIO 

+

3from pathlib import Path 

+

4from warnings import warn 

+

5 

+

6import numpy as np 

+

7 

+

8curdir = Path(__file__).parent 

+

9default_api_dir = curdir / "sparc_json_api" 

+

10default_json_api = default_api_dir / "parameters.json" 

+

11 

+

12 

+

13class SparcAPI: 

+

14 """ 

+

15 An interface to the parameter settings in SPARC-X calculator. User can use the 

+

16 SparcAPI instance to validate and translate parameters that matches a certain 

+

17 version of the SPARC-X code. 

+

18 

+

19 Attributes: 

+

20 sparc_version (str): Version of SPARC. 

+

21 categories (dict): Categories of parameters. 

+

22 parameters (dict): Detailed parameters information. 

+

23 other_parameters (dict): Additional parameters. 

+

24 data_types (dict): Supported data types. 

+

25 

+

26 Methods: 

+

27 get_parameter_dict(parameter): Retrieves dictionary for a specific parameter. 

+

28 help_info(parameter): Provides detailed information about a parameter. 

+

29 validate_input(parameter, input): Validates user input against the expected parameter type. 

+

30 convert_string_to_value(parameter, string): Converts string input to the appropriate data type. 

+

31 convert_value_to_string(parameter, value): Converts a value to a string representation. 

+

32 """ 

+

33 

+

34 def __init__(self, json_api=None): 

+

35 """ """ 

+

36 if json_api is None: 

+

37 json_api = Path(default_json_api) 

+

38 else: 

+

39 json_api = Path(json_api) 

+

40 

+

41 json_data = json.load(open(json_api, "r")) 

+

42 self.sparc_version = json_data["sparc_version"] 

+

43 self.categories = json_data["categories"] 

+

44 self.parameters = json_data["parameters"] 

+

45 self.other_parameters = json_data["other_parameters"] 

+

46 self.data_types = json_data["data_types"] 

+

47 # TT: 2024-10-31 add the sources to trace the origin 

+

48 # locate_api can modify self.source if it is deferred from LaTeX 

+

49 # at runtime 

+

50 self.source = {"path": json_api.as_posix(), "type": "json"} 

+

51 

+

52 def get_parameter_dict(self, parameter): 

+

53 """ 

+

54 Retrieves the dictionary for a specified parameter. 

+

55 

+

56 Args: 

+

57 parameter (str): The name of the parameter. 

+

58 

+

59 Returns: 

+

60 dict: Dictionary containing details of the parameter. 

+

61 

+

62 Raises: 

+

63 KeyError: If the parameter is not known to the SPARC version. 

+

64 """ 

+

65 parameter = parameter.upper() 

+

66 if parameter not in self.parameters.keys(): 

+

67 raise KeyError( 

+

68 f"Parameter {parameter} is not known to " f"SPARC {self.sparc_version}!" 

+

69 ) 

+

70 return self.parameters[parameter] 

+

71 

+

72 def help_info(self, parameter): 

+

73 """Provides a detailed information string for a given parameter. 

+

74 

+

75 Args: 

+

76 parameter (str): The name of the parameter to get information for. 

+

77 

+

78 Returns: 

+

79 str: A formatted string with detailed information about the parameter. 

+

80 """ 

+

81 pdict = self.get_parameter_dict(parameter) 

+

82 message = "\n".join( 

+

83 [ 

+

84 f"{key}: {pdict[key]}" 

+

85 for key in ( 

+

86 "symbol", 

+

87 "category", 

+

88 "type", 

+

89 "unit", 

+

90 "default", 

+

91 "example", 

+

92 "description", 

+

93 "remark", 

+

94 "allow_bool_input", 

+

95 ) 

+

96 ] 

+

97 ) 

+

98 return message 

+

99 

+

100 def validate_input(self, parameter, input): 

+

101 """ 

+

102 Validates if the given input is appropriate for the specified parameter's type. 

+

103 

+

104 Args: 

+

105 parameter (str): The name of the parameter. 

+

106 input: The input to validate, can be of various types (string, int, float, numpy types). 

+

107 

+

108 Returns: 

+

109 bool: True if input is valid, False otherwise. 

+

110 

+

111 Raises: 

+

112 ValueError: If the data type of the parameter is not supported. 

+

113 """ 

+

114 is_input_string = isinstance(input, str) 

+

115 pdict = self.get_parameter_dict(parameter) 

+

116 dtype = pdict["type"] 

+

117 if dtype == "string": 

+

118 return is_input_string 

+

119 elif dtype == "other": 

+

120 # Do nother for the "other" types but simply 

+

121 # reply on the str() method 

+

122 if not is_input_string: 

+

123 warn( 

+

124 f"Parameter {parameter} has 'other' data type " 

+

125 "and your input is not a string. " 

+

126 "I hope you know what you're doing!" 

+

127 ) 

+

128 return True 

+

129 elif dtype == "integer": 

+

130 try: 

+

131 int(input) 

+

132 return True 

+

133 except (TypeError, ValueError): 

+

134 return False 

+

135 elif dtype == "double": 

+

136 try: 

+

137 float(input) 

+

138 return True 

+

139 except (TypeError, ValueError): 

+

140 try: 

+

141 float(input.split()[0]) 

+

142 return True 

+

143 except Exception: 

+

144 return False 

+

145 elif "array" in dtype: 

+

146 if is_input_string: 

+

147 if ("." in input) and ("integer" in dtype): 

+

148 warn( 

+

149 ( 

+

150 f"Input {input} for parameter " 

+

151 f"{parameter} it not strictly integer. " 

+

152 "I may still perform the conversion " 

+

153 "but be aware of data loss" 

+

154 ) 

+

155 ) 

+

156 try: 

+

157 arr = np.genfromtxt(input.splitlines(), dtype=float, ndmin=1) 

+

158 # In valid input with nan 

+

159 if np.isnan(arr).any(): 

+

160 arr = np.array(0.0) 

+

161 except Exception: 

+

162 arr = np.array(0.0) 

+

163 else: 

+

164 try: 

+

165 arr = np.atleast_1d(np.asarray(input)) 

+

166 if (arr.dtype not in (int, bool)) and ("integer" in dtype): 

+

167 warn( 

+

168 ( 

+

169 f"Input {input} for parameter {parameter} is" 

+

170 " not strictly integer. " 

+

171 "I can still perform the conversion but " 

+

172 "be aware of data loss" 

+

173 ) 

+

174 ) 

+

175 except Exception: 

+

176 arr = np.array(0.0) 

+

177 return len(arr.shape) > 0 

+

178 else: 

+

179 raise ValueError(f"Data type {dtype} is not supported!") 

+

180 

+

181 def convert_string_to_value(self, parameter, string): 

+

182 """ 

+

183 Converts a string input to the appropriate value type of the parameter. 

+

184 

+

185 Args: 

+

186 parameter (str): The name of the parameter. 

+

187 string (str): The string input to convert. 

+

188 

+

189 Returns: 

+

190 The converted value, type depends on parameter's expected type. 

+

191 

+

192 Raises: 

+

193 TypeError: If the input is not a string. 

+

194 ValueError: If the string is not a valid input for the parameter. 

+

195 """ 

+

196 

+

197 # Special case, the string may be a multiline string-array! 

+

198 if isinstance(string, list): 

+

199 # Make sure there is a line break at the end, for cases like ["2."] 

+

200 string.append("") 

+

201 string = [s.strip() for s in string] 

+

202 string = "\n".join(string) 

+

203 

+

204 is_input_string = isinstance(string, str) 

+

205 if not is_input_string: 

+

206 raise TypeError("Please give a string input!") 

+

207 

+

208 if not self.validate_input(parameter, string): 

+

209 raise ValueError(f"{string} is not a valid input for {parameter}") 

+

210 

+

211 pdict = self.get_parameter_dict(parameter) 

+

212 dtype = pdict["type"] 

+

213 allow_bool_input = pdict.get("allow_bool_input", False) 

+

214 

+

215 if dtype == "string": 

+

216 value = string.strip() 

+

217 elif dtype == "integer": 

+

218 value = int(string) 

+

219 if allow_bool_input: 

+

220 value = bool(value) 

+

221 elif dtype == "double": 

+

222 # Some inputs, like TARGET_PRESSURE, may be accepted with a unit 

+

223 # like 0.0 GPa. Only accept the first part 

+

224 try: 

+

225 value = float(string) 

+

226 except ValueError as e: 

+

227 try: 

+

228 value = float(string.split()[0]) 

+

229 except Exception: 

+

230 raise e 

+

231 elif dtype == "integer array": 

+

232 value = np.genfromtxt(string.splitlines(), dtype=int, ndmin=1) 

+

233 if allow_bool_input: 

+

234 value = value.astype(bool) 

+

235 elif dtype == "double array": 

+

236 value = np.genfromtxt(string.splitlines(), dtype=float, ndmin=1) 

+

237 elif dtype == "other": 

+

238 value = string 

+

239 # should not happen since validate_input has gatekeeping 

+

240 else: 

+

241 raise ValueError(f"Unsupported type {dtype}") 

+

242 

+

243 return value 

+

244 

+

245 def convert_value_to_string(self, parameter, value): 

+

246 """ 

+

247 Converts a value to its string representation based on the parameter type. 

+

248 

+

249 Args: 

+

250 parameter (str): The name of the parameter. 

+

251 value: The value to convert. 

+

252 

+

253 Returns: 

+

254 str: The string representation of the value. 

+

255 

+

256 Raises: 

+

257 ValueError: If the value is not valid for the parameter. 

+

258 """ 

+

259 

+

260 is_input_string = isinstance(value, str) 

+

261 if not self.validate_input(parameter, value): 

+

262 raise ValueError(f"{value} is not a valid input for {parameter}") 

+

263 

+

264 # Do not conver, just return the non-padded string 

+

265 if is_input_string: 

+

266 return value.strip() 

+

267 

+

268 pdict = self.get_parameter_dict(parameter) 

+

269 dtype = pdict["type"] 

+

270 # allow_bool_input = pdict.get("allow_bool_input", False) 

+

271 

+

272 if dtype == "string": 

+

273 string = str(value).strip() 

+

274 elif dtype == "integer": 

+

275 # Be aware of bool values! 

+

276 string = str(int(value)) 

+

277 elif dtype == "double": 

+

278 string = "{:.14f}".format(float(value)) 

+

279 elif dtype in ("integer array", "double array"): 

+

280 string = _array_to_string(value, dtype) 

+

281 elif dtype == "other": 

+

282 if not is_input_string: 

+

283 raise ValueError("Only support string value when datatype is other") 

+

284 string = value 

+

285 else: 

+

286 # should not happen since validate_input has gatekeeping 

+

287 raise ValueError(f"Unsupported type {dtype}") 

+

288 

+

289 return string 

+

290 

+

291 

+

292def _array_to_string(arr, format): 

+

293 """ 

+

294 Converts an array to a string representation based on the specified format. 

+

295 

+

296 Args: 

+

297 arr (array): The array to convert. 

+

298 format (str): The format type ('integer array', 'double array', etc.). 

+

299 

+

300 Returns: 

+

301 str: String representation of the array. 

+

302 """ 

+

303 arr = np.array(arr) 

+

304 if arr.ndim == 1: 

+

305 arr = arr.reshape(1, -1) 

+

306 buf = StringIO() 

+

307 if format in ("integer array", "integer"): 

+

308 fmt = "%d" 

+

309 elif format in ("double array", "double"): 

+

310 fmt = "%.14f" 

+

311 np.savetxt(buf, arr, delimiter=" ", fmt=fmt, header="", footer="", newline="\n") 

+

312 # Return the string output of the buffer with 

+

313 # whitespaces removed 

+

314 return buf.getvalue().strip() 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_calculator_py.html b/_static/htmlcov/z_e32f35a0016f670d_calculator_py.html new file mode 100644 index 00000000..fb26efcd --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_calculator_py.html @@ -0,0 +1,1419 @@ + + + + + Coverage for sparc/calculator.py: 62% + + + + + +
+
+

+ Coverage for sparc/calculator.py: + 62% +

+ +

+ 669 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1import datetime 

+

2import os 

+

3import signal 

+

4import subprocess 

+

5import tempfile 

+

6from pathlib import Path 

+

7from warnings import warn, warn_explicit 

+

8 

+

9import numpy as np 

+

10import psutil 

+

11from ase.atoms import Atoms 

+

12from ase.calculators.calculator import Calculator, FileIOCalculator, all_changes 

+

13from ase.parallel import world 

+

14from ase.stress import full_3x3_to_voigt_6_stress 

+

15from ase.units import Bohr, GPa, Hartree, eV 

+

16from ase.utils import IOContext 

+

17 

+

18from .api import SparcAPI 

+

19from .io import SparcBundle 

+

20from .socketio import ( 

+

21 SPARCProtocol, 

+

22 SPARCSocketClient, 

+

23 SPARCSocketServer, 

+

24 generate_random_socket_name, 

+

25) 

+

26from .utils import ( 

+

27 _find_default_sparc, 

+

28 _find_mpi_process, 

+

29 _get_slurm_jobid, 

+

30 _locate_slurm_step, 

+

31 _slurm_signal, 

+

32 compare_dict, 

+

33 deprecated, 

+

34 h2gpts, 

+

35 locate_api, 

+

36 monitor_process, 

+

37 time_limit, 

+

38) 

+

39 

+

40# Below are a list of ASE-compatible calculator input parameters that are 

+

41# in Angstrom/eV units 

+

42# Ideas are taken from GPAW calculator 

+

43sparc_python_inputs = [ 

+

44 "xc", 

+

45 "h", 

+

46 "kpts", 

+

47 "convergence", 

+

48 "gpts", 

+

49 "nbands", 

+

50] 

+

51 

+

52# The socket mode in SPARC calculator uses a relay-based mechanism 

+

53# Several scenarios: 

+

54# 1) use_socket = False --> Turn off all socket communications. SPARC runs from cold-start 

+

55# 2) use_socket = True, port < 0 --> Only connect the sparc binary using ephemeral unix socket. Interface appears as if it is a normal calculator 

+

56# 3) use_socket = True, port > 0 --> Use an out-going socket to relay information 

+

57# 4) use_socket = True, server_only = True --> Act as a SocketServer 

+

58# We do not support outgoing unix socket because the limited user cases 

+

59default_socket_params = { 

+

60 "use_socket": False, # Main switch to use socket or not 

+

61 "host": "localhost", # Name of the socket host (only outgoing) 

+

62 "port": -1, # Port number of the outgoing socket 

+

63 "allow_restart": True, # If True, allow the socket server to restart 

+

64 "server_only": False, # Start the calculator as a server 

+

65} 

+

66 

+

67 

+

68class SPARC(FileIOCalculator, IOContext): 

+

69 """Calculator interface to the SPARC codes via the FileIOCalculator""" 

+

70 

+

71 implemented_properties = ["energy", "forces", "fermi", "stress"] 

+

72 name = "sparc" 

+

73 ase_objtype = "sparc_calculator" # For JSON storage 

+

74 special_inputs = sparc_python_inputs 

+

75 default_params = { 

+

76 "xc": "pbe", 

+

77 "kpts": (1, 1, 1), 

+

78 "h": 0.25, # Angstrom equivalent to MESH_SPACING = 0.47 

+

79 } 

+

80 # TODO: ASE 3.23 compatibility. should use profile 

+

81 # TODO: remove the legacy command check for future releases 

+

82 _legacy_default_command = "sparc not initialized" 

+

83 

+

84 def __init__( 

+

85 self, 

+

86 restart=None, 

+

87 directory=".", 

+

88 *, 

+

89 label=None, 

+

90 atoms=None, 

+

91 command=None, 

+

92 psp_dir=None, 

+

93 log="sparc.log", 

+

94 sparc_json_file=None, 

+

95 sparc_doc_path=None, 

+

96 check_version=False, 

+

97 keep_old_files=True, 

+

98 use_socket=False, 

+

99 socket_params={}, 

+

100 **kwargs, 

+

101 ): 

+

102 """ 

+

103 Initialize the SPARC calculator similar to FileIOCalculator. The validator uses the JSON API guessed 

+

104 from sparc_json_file or sparc_doc_path. 

+

105 

+

106 Arguments: 

+

107 restart (str or None): Path to the directory for restarting a calculation. If None, starts a new calculation. 

+

108 directory (str or Path): Directory for SPARC calculation files. 

+

109 label (str, optional): Custom label for identifying calculation files. 

+

110 atoms (Atoms, optional): ASE Atoms object representing the system to be calculated. 

+

111 command (str, optional): Command to execute SPARC. If None, it will be determined automatically. 

+

112 psp_dir (str or Path, optional): Directory containing pseudopotentials. 

+

113 log (str, optional): Name of the log file. 

+

114 sparc_json_file (str, optional): Path to a JSON file with SPARC parameters. 

+

115 sparc_doc_path (str, optional): Path to the SPARC doc LaTeX code for parsing parameters. 

+

116 check_version (bool): Check if SPARC and document versions match 

+

117 keep_old_files (bool): Whether older SPARC output files should be preserved. 

+

118 If True, SPARC program will rewrite the output files 

+

119 with suffix like .out_01, .out_02 etc 

+

120 use_socket (bool): Main switch for the socket mode. Alias for socket_params["use_socket"] 

+

121 socket_params (dict): Parameters to control the socket behavior. Please check default_socket_params 

+

122 **kwargs: Additional keyword arguments to set up the calculator. 

+

123 """ 

+

124 self.validator = locate_api(json_file=sparc_json_file, doc_path=sparc_doc_path) 

+

125 self.valid_params = {} 

+

126 self.special_params = {} 

+

127 self.inpt_state = {} # Store the inpt file states 

+

128 self.system_state = {} # Store the system parameters (directory, bundle etc) 

+

129 FileIOCalculator.__init__( 

+

130 self, 

+

131 restart=None, 

+

132 label=None, 

+

133 atoms=atoms, 

+

134 command=command, 

+

135 directory=directory, 

+

136 **kwargs, 

+

137 ) 

+

138 

+

139 # sparc bundle will set the label. self.label will be available after the init 

+

140 if label is None: 

+

141 label = "SPARC" if restart is None else None 

+

142 

+

143 self.sparc_bundle = SparcBundle( 

+

144 directory=Path(self.directory), 

+

145 mode="w", 

+

146 atoms=self.atoms, 

+

147 label=label, # The order is tricky here. Use label not self.label 

+

148 psp_dir=psp_dir, 

+

149 validator=self.validator, 

+

150 ) 

+

151 

+

152 # Try restarting from an old calculation and set results 

+

153 self._restart(restart=restart) 

+

154 

+

155 # self.log = self.directory / log if log is not None else None 

+

156 self.log = log 

+

157 self.keep_old_files = keep_old_files 

+

158 if check_version: 

+

159 self.sparc_version = self.detect_sparc_version() 

+

160 else: 

+

161 self.sparc_version = None 

+

162 

+

163 # Partially update the socket params, so that when setting use_socket = True, 

+

164 # User can directly use the socket client 

+

165 self.socket_params = default_socket_params.copy() 

+

166 # Everything in argument socket_params will overwrite 

+

167 self.socket_params.update(use_socket=use_socket) 

+

168 self.socket_params.update(**socket_params) 

+

169 

+

170 # TODO: check parameter compatibility with socket params 

+

171 self.process = None 

+

172 # self.pid = None 

+

173 

+

174 # Initialize the socket settings 

+

175 self.in_socket = None 

+

176 self.out_socket = None 

+

177 self.ensure_socket() 

+

178 

+

179 def _compare_system_state(self): 

+

180 """Check if system parameters like command etc have changed 

+

181 

+

182 Returns: 

+

183 bool: True if all parameters are the same otherwise False 

+

184 """ 

+

185 old_state = self.system_state.copy() 

+

186 new_state = self._dump_system_state() 

+

187 for key, val in old_state.items(): 

+

188 new_val = new_state.pop(key, None) 

+

189 if isinstance(new_val, dict): 

+

190 if not compare_dict(val, new_val): 

+

191 return False 

+

192 else: 

+

193 if not val == new_val: 

+

194 return False 

+

195 if new_state == {}: 

+

196 return True 

+

197 else: 

+

198 return False 

+

199 

+

200 def _compare_calc_parameters(self, atoms, properties): 

+

201 """Check if SPARC calculator parameters have changed 

+

202 

+

203 Returns: 

+

204 bool: True if no change, otherwise False 

+

205 """ 

+

206 _old_inpt_state = self.inpt_state.copy() 

+

207 _new_inpt_state = self._generate_inpt_state(atoms, properties) 

+

208 result = True 

+

209 if set(_new_inpt_state.keys()) != set(_old_inpt_state.keys()): 

+

210 result = False 

+

211 else: 

+

212 for key, old_val in _old_inpt_state.items(): 

+

213 new_val = _new_inpt_state[key] 

+

214 # TODO: clean up bool 

+

215 if isinstance(new_val, (str, bool)): 

+

216 if new_val != old_val: 

+

217 result = False 

+

218 break 

+

219 elif isinstance(new_val, (int, float)): 

+

220 if not np.isclose(new_val, old_val): 

+

221 result = False 

+

222 break 

+

223 elif isinstance(new_val, (list, np.ndarray)): 

+

224 if not np.isclose(new_val, old_val).all(): 

+

225 result = False 

+

226 break 

+

227 return result 

+

228 

+

229 def _dump_system_state(self): 

+

230 """Returns a dict with current system parameters 

+

231 

+

232 changing these parameters will cause the calculator to reload 

+

233 especially in the use_socket = True case 

+

234 """ 

+

235 system_state = { 

+

236 "label": self.label, 

+

237 "directory": self.directory, 

+

238 "command": self.command, 

+

239 "log": self.log, 

+

240 "socket_params": self.socket_params, 

+

241 } 

+

242 return system_state 

+

243 

+

244 def ensure_socket(self): 

+

245 # TODO: more ensure directory to other place? 

+

246 if not self.directory.is_dir(): 

+

247 os.makedirs(self.directory, exist_ok=True) 

+

248 if not self.use_socket: 

+

249 return 

+

250 if self.in_socket is None: 

+

251 if self.socket_mode == "server": 

+

252 # TODO: Exception for wrong port 

+

253 self.in_socket = SPARCSocketServer( 

+

254 port=self.socket_params["port"], 

+

255 log=self.openfile( 

+

256 file=self._indir(ext=".log", label="socket"), 

+

257 comm=world, 

+

258 mode="w", 

+

259 ), 

+

260 parent=self, 

+

261 ) 

+

262 else: 

+

263 socket_name = generate_random_socket_name() 

+

264 print(f"Creating a socket server with name {socket_name}") 

+

265 self.in_socket = SPARCSocketServer( 

+

266 unixsocket=socket_name, 

+

267 # TODO: make the log fd persistent 

+

268 log=self.openfile( 

+

269 file=self._indir(ext=".log", label="socket"), 

+

270 comm=world, 

+

271 mode="w", 

+

272 ), 

+

273 parent=self, 

+

274 ) 

+

275 # TODO: add the outbound socket client 

+

276 # TODO: we may need to check an actual socket server at host:port?! 

+

277 # At this stage, we will need to wait the actual client to join 

+

278 if self.out_socket is None: 

+

279 if self.socket_mode == "client": 

+

280 self.out_socket = SPARCSocketClient( 

+

281 host=self.socket_params["host"], 

+

282 port=self.socket_params["port"], 

+

283 # TODO: change later 

+

284 log=self.openfile(file="out_socket.log", comm=world), 

+

285 # TODO: add the log and timeout part 

+

286 parent_calc=self, 

+

287 ) 

+

288 

+

289 def __enter__(self): 

+

290 """Reset upon entering the context.""" 

+

291 IOContext.__enter__(self) 

+

292 self.reset() 

+

293 self.close() 

+

294 return self 

+

295 

+

296 def __exit__(self, type, value, traceback): 

+

297 """Exiting the context manager and reset process""" 

+

298 IOContext.__exit__(self, type, value, traceback) 

+

299 self.close() 

+

300 return 

+

301 

+

302 @property 

+

303 def use_socket(self): 

+

304 return self.socket_params["use_socket"] 

+

305 

+

306 @property 

+

307 def socket_mode(self): 

+

308 """The mode of the socket calculator: 

+

309 

+

310 disabled: pure SPARC file IO interface 

+

311 local: Serves as a local SPARC calculator with socket support 

+

312 client: Relay SPARC calculation 

+

313 server: Remote server 

+

314 """ 

+

315 if self.use_socket: 

+

316 if self.socket_params["port"] > 0: 

+

317 if self.socket_params["server_only"]: 

+

318 return "server" 

+

319 else: 

+

320 return "client" 

+

321 else: 

+

322 return "local" 

+

323 else: 

+

324 return "disabled" 

+

325 

+

326 def _indir(self, ext, label=None, occur=0, d_format="{:02d}"): 

+

327 return self.sparc_bundle._indir( 

+

328 ext=ext, label=label, occur=occur, d_format=d_format 

+

329 ) 

+

330 

+

331 @property 

+

332 def log(self): 

+

333 return self.directory / self._log 

+

334 

+

335 @log.setter 

+

336 def log(self, log): 

+

337 # Stripe the parent direcoty information 

+

338 if log is not None: 

+

339 self._log = Path(log).name 

+

340 else: 

+

341 self._log = "sparc.log" 

+

342 return 

+

343 

+

344 @property 

+

345 def in_socket_filename(self): 

+

346 # The actual socket name for inbound socket 

+

347 # Return name as /tmp/ipi_sparc_<hex> 

+

348 if self.in_socket is None: 

+

349 return "" 

+

350 else: 

+

351 return self.in_socket.socket_filename 

+

352 

+

353 @property 

+

354 def directory(self): 

+

355 if hasattr(self, "sparc_bundle"): 

+

356 return Path(self.sparc_bundle.directory) 

+

357 else: 

+

358 return Path(self._directory) 

+

359 

+

360 @directory.setter 

+

361 def directory(self, directory): 

+

362 if hasattr(self, "sparc_bundle"): 

+

363 self.sparc_bundle.directory = Path(directory) 

+

364 else: 

+

365 self._directory = Path(directory) 

+

366 return 

+

367 

+

368 @property 

+

369 def label(self): 

+

370 """Rewrite the label from Calculator class, since we don't want to contain pathsep""" 

+

371 if hasattr(self, "sparc_bundle"): 

+

372 return self.sparc_bundle.label 

+

373 else: 

+

374 return getattr(self, "_label", None) 

+

375 

+

376 @label.setter 

+

377 def label(self, label): 

+

378 """Rewrite the label from Calculator class, 

+

379 since we don't want to contain pathsep 

+

380 """ 

+

381 label = str(label) 

+

382 if hasattr(self, "sparc_bundle"): 

+

383 self.sparc_bundle.label = self.sparc_bundle._make_label(label) 

+

384 else: 

+

385 self._label = label 

+

386 

+

387 @property 

+

388 def sort(self): 

+

389 """Like Vasp calculator 

+

390 ASE atoms --> sort --> SPARC 

+

391 """ 

+

392 if self.sparc_bundle.sorting is None: 

+

393 return None 

+

394 else: 

+

395 return self.sparc_bundle.sorting["sort"] 

+

396 

+

397 @property 

+

398 def resort(self): 

+

399 """Like Vasp calculator 

+

400 SPARC --> resort --> ASE atoms 

+

401 """ 

+

402 if self.sparc_bundle.sorting is None: 

+

403 return None 

+

404 else: 

+

405 return self.sparc_bundle.sorting["resort"] 

+

406 

+

407 def check_state(self, atoms, tol=1e-8): 

+

408 """Updated check_state method. 

+

409 By default self.atoms (cached from output files) contains the initial_magmoms, 

+

410 so we add a zero magmoms to the atoms for comparison if it does not exist. 

+

411 

+

412 reading a result from the .out file has only precision up to 10 digits 

+

413 

+

414 

+

415 """ 

+

416 atoms_copy = atoms.copy() 

+

417 if "initial_magmoms" not in atoms_copy.arrays: 

+

418 atoms_copy.set_initial_magnetic_moments( 

+

419 [ 

+

420 0, 

+

421 ] 

+

422 * len(atoms_copy) 

+

423 ) 

+

424 system_changes = FileIOCalculator.check_state(self, atoms_copy, tol=tol) 

+

425 # A few hard-written rules. Wrapping should only affect the position 

+

426 if "positions" in system_changes: 

+

427 atoms_copy.wrap(eps=tol) 

+

428 new_system_changes = FileIOCalculator.check_state(self, atoms_copy, tol=tol) 

+

429 if "positions" not in new_system_changes: 

+

430 system_changes.remove("positions") 

+

431 

+

432 system_state_changed = not self._compare_system_state() 

+

433 if system_state_changed: 

+

434 system_changes.append("system_state") 

+

435 return system_changes 

+

436 

+

437 def _make_command(self, extras=""): 

+

438 """Use $ASE_SPARC_COMMAND or self.command to determine the command 

+

439 as a last resort, if `sparc` exists in the PATH, use that information 

+

440 

+

441 Extras will add additional arguments to the self.command, 

+

442 e.g. -name, -socket etc 

+

443 

+

444 2024.09.05 @alchem0x2a 

+

445 Note in ase>=3.23 the FileIOCalculator.command will fallback 

+

446 to self._legacy_default_command, which we should set to invalid value for now. 

+

447 """ 

+

448 if isinstance(extras, (list, tuple)): 

+

449 extras = " ".join(extras) 

+

450 else: 

+

451 extras = extras.strip() 

+

452 if (self.command is None) or (self.command == SPARC._legacy_default_command): 

+

453 command_env = os.environ.get("ASE_SPARC_COMMAND", None) 

+

454 if command_env is None: 

+

455 sparc_exe, mpi_exe, num_cores = _find_default_sparc() 

+

456 if sparc_exe is None: 

+

457 raise EnvironmentError( 

+

458 "Cannot find your sparc setup via $ASE_SPARC_COMMAND, SPARC.command, or " 

+

459 "infer from your $PATH. Please refer to the manual!" 

+

460 ) 

+

461 if mpi_exe is not None: 

+

462 command_env = f"{mpi_exe} -n {num_cores} {sparc_exe}" 

+

463 else: 

+

464 command_env = f"{sparc_exe}" 

+

465 warn( 

+

466 f"Your sparc command is inferred to be {command_env}, " 

+

467 "If this is not correct, " 

+

468 "please manually set $ASE_SPARC_COMMAND or SPARC.command!" 

+

469 ) 

+

470 self.command = command_env 

+

471 return f"{self.command} {extras}" 

+

472 

+

473 def check_input_atoms(self, atoms): 

+

474 """Check if input atoms are valid for SPARC inputs. 

+

475 Raises: 

+

476 ValueError: if the atoms structure is not suitable for SPARC input file 

+

477 """ 

+

478 # Check if the user accidentally provides atoms unit cell without vacuum 

+

479 if atoms and np.any(atoms.cell.cellpar()[:3] == 0): 

+

480 msg = "Cannot setup SPARC calculation because at least one of the lattice dimension is zero!" 

+

481 if any([not bc_ for bc_ in atoms.pbc]): 

+

482 msg += " Please add a vacuum in the non-periodic direction of your input structure." 

+

483 raise ValueError(msg) 

+

484 # SPARC only supports orthogonal lattice when Dirichlet BC is used 

+

485 if any([not bc_ for bc_ in atoms.pbc]): 

+

486 if not np.isclose(atoms.cell.angles(), [90.0, 90.0, 90.0], 1.0e-4).all(): 

+

487 raise ValueError( 

+

488 ( 

+

489 "SPARC only supports orthogonal lattice when Dirichlet BC is used! " 

+

490 "Please modify your atoms structures" 

+

491 ) 

+

492 ) 

+

493 for i, bc_ in enumerate(atoms.pbc): 

+

494 if bc_: 

+

495 continue 

+

496 direction = "xyz"[i] 

+

497 min_pos, max_pos = atoms.positions[:, i].min(), atoms.positions[:, i].max() 

+

498 cell_len = atoms.cell.lengths()[i] 

+

499 if (min_pos < 0) or (max_pos > cell_len): 

+

500 raise ValueError( 

+

501 ( 

+

502 f"You have Dirichlet BC enabled for {direction}-direction, " 

+

503 "but atoms positions are out of domain. " 

+

504 "SPARC calculator cannot continue. " 

+

505 "Please consider using atoms.center() to reposition your atoms." 

+

506 ) 

+

507 ) 

+

508 # Additionally, we should not allow use to calculate pbc=False with CALC_STRESS=1 

+

509 if all([not bc_ for bc_ in atoms.pbc]): # All Dirichlet 

+

510 calc_stress = self.parameters.get("calc_stress", False) 

+

511 if calc_stress: 

+

512 raise ValueError( 

+

513 "Cannot set CALC_STRESS=1 for non-periodic system in SPARC!" 

+

514 ) 

+

515 return 

+

516 

+

517 def calculate(self, atoms=None, properties=["energy"], system_changes=all_changes): 

+

518 """Perform a calculation step""" 

+

519 

+

520 self.check_input_atoms(atoms) 

+

521 Calculator.calculate(self, atoms, properties, system_changes) 

+

522 

+

523 # Extra check for inpt parameters since check_state won't accept properties 

+

524 # inpt should only change when write_inpt is actually called 

+

525 param_changed = not self._compare_calc_parameters(atoms, properties) 

+

526 if param_changed: 

+

527 system_changes.append("parameters") 

+

528 

+

529 if self.socket_mode in ("local", "client"): 

+

530 self._calculate_with_socket( 

+

531 atoms=atoms, properties=properties, system_changes=system_changes 

+

532 ) 

+

533 return 

+

534 

+

535 if self.socket_mode == "server": 

+

536 self._calculate_as_server( 

+

537 atoms=atoms, properties=properties, system_changes=system_changes 

+

538 ) 

+

539 return 

+

540 self.write_input(self.atoms, properties, system_changes) 

+

541 self.execute() 

+

542 self.read_results() 

+

543 # Extra step, copy the atoms back to original atoms, if it's an 

+

544 # geopt or aimd calculation 

+

545 # This will not occur for socket calculator because it's using the static files 

+

546 if ("geopt" in self.raw_results) or ("aimd" in self.raw_results): 

+

547 # Update the parent atoms 

+

548 atoms.set_positions(self.atoms.positions, apply_constraint=False) 

+

549 atoms.cell = self.atoms.cell 

+

550 atoms.constraints = self.atoms.constraints 

+

551 atoms.pbc = self.atoms.pbc 

+

552 # copy init magmom just to avoid check_state issue 

+

553 if "initial_magmoms" in self.atoms.arrays: 

+

554 atoms.set_initial_magnetic_moments( 

+

555 self.atoms.get_initial_magnetic_moments() 

+

556 ) 

+

557 

+

558 def _calculate_as_server( 

+

559 self, atoms=None, properties=["energy"], system_changes=all_changes 

+

560 ): 

+

561 """Use the server component to send instructions to socket""" 

+

562 ret, raw_results = self.in_socket.calculate_new_protocol( 

+

563 atoms=atoms, params=self.parameters 

+

564 ) 

+

565 self.raw_results = raw_results 

+

566 if "stress" not in self.results: 

+

567 virial_from_socket = ret.get("virial", np.zeros(6)) 

+

568 stress_from_socket = ( 

+

569 -full_3x3_to_voigt_6_stress(virial_from_socket) / atoms.get_volume() 

+

570 ) 

+

571 self.results["stress"] = stress_from_socket 

+

572 # Energy and forces returned in this case do not need 

+

573 # resorting, since they are already in the same format 

+

574 self.results["energy"] = ret["energy"] 

+

575 self.results["forces"] = ret["forces"] 

+

576 return 

+

577 

+

578 def _calculate_with_socket( 

+

579 self, atoms=None, properties=["energy"], system_changes=all_changes 

+

580 ): 

+

581 """Perform one socket single point calculation""" 

+

582 # TODO: merge this part 

+

583 if self.process is None: 

+

584 if self.detect_socket_compatibility() is not True: 

+

585 raise RuntimeError( 

+

586 "Your sparc binary is not compiled with socket support!" 

+

587 ) 

+

588 

+

589 if any( 

+

590 [ 

+

591 p in system_changes 

+

592 for p in ("numbers", "pbc", "parameters", "system_state") 

+

593 ] 

+

594 ): 

+

595 if self.process is not None: 

+

596 if not self.socket_params["allow_restart"]: 

+

597 raise RuntimeError( 

+

598 ( 

+

599 f"System has changed {system_changes} and the " 

+

600 "calculator needs to be restarted!\n" 

+

601 "Please set socket_params['allow_restart'] = True " 

+

602 "if you want to continue" 

+

603 ) 

+

604 ) 

+

605 else: 

+

606 print( 

+

607 f"{system_changes} have changed since last calculation. Restart the socket process." 

+

608 ) 

+

609 self.close(keep_out_socket=True) 

+

610 

+

611 if self.process is None: 

+

612 self.ensure_socket() 

+

613 self.write_input(atoms) 

+

614 cmds = self._make_command( 

+

615 extras=f"-socket {self.in_socket_filename}:unix -name {self.label}" 

+

616 ) 

+

617 # Use the IOContext class's lazy context manager 

+

618 # TODO what if self.log is None 

+

619 fd_log = self.openfile(file=self.log, comm=world) 

+

620 self.process = subprocess.Popen( 

+

621 cmds, 

+

622 shell=True, 

+

623 stdout=fd_log, 

+

624 stderr=fd_log, 

+

625 cwd=self.directory, 

+

626 universal_newlines=True, 

+

627 bufsize=0, 

+

628 ) 

+

629 # in_socket is a server 

+

630 ret = self.in_socket.calculate_origin_protocol(atoms[self.sort]) 

+

631 # The results are parsed from file outputs (.static + .out) 

+

632 # Except for stress, they should be exactly the same as socket returned results 

+

633 self.read_results() # 

+

634 assert np.isclose( 

+

635 ret["energy"], self.results["energy"] 

+

636 ), "Energy values from socket communication and output file are different! Please contact the developers." 

+

637 try: 

+

638 assert np.isclose( 

+

639 ret["forces"][self.resort], self.results["forces"] 

+

640 ).all(), "Force values from socket communication and output file are different! Please contact the developers." 

+

641 except KeyError: 

+

642 print("Force values cannot be accessed via the results dictionary. They may not be available in the output file. Ensure PRINT_FORCES: 1\nResults:\n",self.results) 

+

643 # For stress information, we make sure that the stress is always present 

+

644 if "stress" not in self.results: 

+

645 virial_from_socket = ret.get("virial", np.zeros(6)) 

+

646 stress_from_socket = ( 

+

647 -full_3x3_to_voigt_6_stress(virial_from_socket) / atoms.get_volume() 

+

648 ) 

+

649 self.results["stress"] = stress_from_socket 

+

650 self.system_state = self._dump_system_state() 

+

651 return 

+

652 

+

653 def get_stress(self, atoms=None): 

+

654 """Warn user the dimensionality change when using stress""" 

+

655 if "stress_equiv" in self.results: 

+

656 raise NotImplementedError( 

+

657 "You're requesting stress in a low-dimensional system. Please use `calc.results['stress_equiv']` instead!" 

+

658 ) 

+

659 return super().get_stress(atoms) 

+

660 

+

661 def _check_input_exclusion(self, input_parameters, atoms=None): 

+

662 """Check if mutually exclusive parameters are provided 

+

663 

+

664 The exclusion rules are taken from the SPARC manual and currently hard-coded. 

+

665 We may need to have a clever way to do the automatic rule conversion in API 

+

666 """ 

+

667 # Rule 1: ECUT, MESH_SPACING, FD_GRID 

+

668 count = 0 

+

669 for key in ["ECUT", "MESH_SPACING", "FD_GRID"]: 

+

670 if key in input_parameters: 

+

671 count += 1 

+

672 if count > 1: 

+

673 raise ValueError( 

+

674 "ECUT, MESH_SPACING, FD_GRID cannot be specified simultaneously!" 

+

675 ) 

+

676 

+

677 # Rule 2: LATVEC_SCALE, CELL 

+

678 if ("LATVEC_SCALE" in input_parameters) and ("CELL" in input_parameters): 

+

679 raise ValueError( 

+

680 "LATVEC_SCALE and CELL cannot be specified simultaneously!" 

+

681 ) 

+

682 

+

683 # When the cell is provided via ase object, we will forbid user to provide 

+

684 # LATVEC, LATVEC_SCALE or CELL 

+

685 if atoms is not None: 

+

686 if any([p in input_parameters for p in ["LATVEC", "LATVEC_SCALE", "CELL"]]): 

+

687 raise ValueError( 

+

688 ( 

+

689 "When passing an ase atoms object, LATVEC, LATVEC_SCALE or CELL cannot be set simultaneously! \n" 

+

690 "Please set atoms.cell instead" 

+

691 ) 

+

692 ) 

+

693 

+

694 def _check_minimal_input(self, input_parameters): 

+

695 """Check if the minimal input set is satisfied""" 

+

696 for param in ["EXCHANGE_CORRELATION", "KPOINT_GRID"]: 

+

697 if param not in input_parameters: 

+

698 raise ValueError(f"Parameter {param} is not provided.") 

+

699 # At least one from ECUT, MESH_SPACING and FD_GRID must be provided 

+

700 if not any( 

+

701 [param in input_parameters for param in ("ECUT", "MESH_SPACING", "FD_GRID")] 

+

702 ): 

+

703 raise ValueError( 

+

704 "You should provide at least one of ECUT, MESH_SPACING or FD_GRID." 

+

705 ) 

+

706 

+

707 def _generate_inpt_state(self, atoms, properties=[]): 

+

708 """Return a key:value pair to be written to inpt file 

+

709 This is an immutable dict as the ground truth 

+

710 """ 

+

711 converted_params = self._convert_special_params(atoms=atoms) 

+

712 input_parameters = converted_params.copy() 

+

713 input_parameters.update(self.valid_params) 

+

714 

+

715 # Make sure desired properties are always ensured, but we don't modify the user inputs 

+

716 if "forces" in properties: 

+

717 input_parameters["PRINT_FORCES"] = True 

+

718 

+

719 if "stress" in properties: 

+

720 input_parameters["CALC_STRESS"] = True 

+

721 

+

722 self._check_input_exclusion(input_parameters, atoms=atoms) 

+

723 self._check_minimal_input(input_parameters) 

+

724 return input_parameters 

+

725 

+

726 def write_input(self, atoms, properties=[], system_changes=[]): 

+

727 """Create input files via SparcBundle 

+

728 Will use the self.keep_sold_files options to keep old output files 

+

729 like .out_01, .out_02 etc 

+

730 """ 

+

731 # import pdb; pdb.set_trace() 

+

732 FileIOCalculator.write_input(self, atoms, properties, system_changes) 

+

733 input_parameters = self._generate_inpt_state(atoms, properties=properties) 

+

734 

+

735 # TODO: make sure the sorting reset is justified (i.e. what about restarting?) 

+

736 self.sparc_bundle.sorting = None 

+

737 self.sparc_bundle._write_ion_and_inpt( 

+

738 atoms=atoms, 

+

739 label=self.label, 

+

740 # Pass the rest parameters from calculator! 

+

741 direct=False, 

+

742 sort=True, 

+

743 ignore_constraints=False, 

+

744 wrap=False, 

+

745 # Below are the parameters from v1 

+

746 # scaled -> direct, ignore_constraints --> not add_constraints 

+

747 scaled=False, 

+

748 add_constraints=True, 

+

749 copy_psp=True, 

+

750 comment="", 

+

751 input_parameters=input_parameters, 

+

752 ) 

+

753 

+

754 output_patterns = [".out", ".static", ".eigen", ".aimd", "geopt"] 

+

755 # We just remove the output files, in case the user has psp files manually copied 

+

756 if self.keep_old_files is False: 

+

757 for f in self.directory.glob("*"): 

+

758 if (f.is_file()) and any( 

+

759 [f.suffix.startswith(p) for p in output_patterns] 

+

760 ): 

+

761 os.remove(f) 

+

762 self.inpt_state = input_parameters 

+

763 self.system_state = self._dump_system_state() 

+

764 return 

+

765 

+

766 def execute(self): 

+

767 """Make a normal SPARC calculation without socket. Note we probably need to use a better handling of background process!""" 

+

768 extras = f"-name {self.label}" 

+

769 command = self._make_command(extras=extras) 

+

770 self.print_sysinfo(command) 

+

771 

+

772 try: 

+

773 if self.log is not None: 

+

774 with open(self.log, "a") as fd: 

+

775 self.process = subprocess.run( 

+

776 command, shell=True, cwd=self.directory, stdout=fd 

+

777 ) 

+

778 else: 

+

779 self.process = subprocess.run( 

+

780 command, shell=True, cwd=self.directory, stdout=None 

+

781 ) 

+

782 except OSError as err: 

+

783 msg = 'Failed to execute "{}"'.format(command) 

+

784 raise EnvironmentError(msg) from err 

+

785 

+

786 # We probably don't want to wait the 

+

787 errorcode = self.process.returncode 

+

788 

+

789 if errorcode > 0: 

+

790 msg = f"SPARC failed with command {command}" f"with error code {errorcode}" 

+

791 raise RuntimeError(msg) 

+

792 

+

793 return 

+

794 

+

795 def close(self, keep_out_socket=False): 

+

796 """Close the socket communication, the SPARC process etc""" 

+

797 if not self.use_socket: 

+

798 return 

+

799 if self.in_socket is not None: 

+

800 self.in_socket.close() 

+

801 

+

802 if (self.out_socket is not None) and (not keep_out_socket): 

+

803 self.out_socket.close() 

+

804 

+

805 # In most cases if in_socket is closed, the SPARC process should also exit 

+

806 if self.process: 

+

807 with time_limit(5): 

+

808 ret = self.process.poll() 

+

809 if ret is None: 

+

810 print("Force terminate the sparc process!") 

+

811 self._send_mpi_signal(signal.SIGKILL) 

+

812 else: 

+

813 print(f"SPARC process exists with code {ret}") 

+

814 

+

815 # TODO: check if in_socket should be merged 

+

816 self.in_socket = None 

+

817 if not keep_out_socket: 

+

818 self.out_socket = None 

+

819 self._reset_process() 

+

820 

+

821 def _send_mpi_signal(self, sig): 

+

822 """Send signal to the mpi process within self.process 

+

823 If the process cannot be found, return without affecting the state 

+

824 

+

825 This is a method taken from the vasp_interactive project 

+

826 """ 

+

827 try: 

+

828 pid = self.process.pid 

+

829 psutil_proc = psutil.Process(pid) 

+

830 except Exception as e: 

+

831 warn("SPARC process no longer exists. Will reset the calculator.") 

+

832 self._reset_process() 

+

833 return 

+

834 

+

835 if (self.pid == pid) and getattr(self, "mpi_match", None) is not None: 

+

836 match = self.mpi_match 

+

837 else: 

+

838 # self.pid = pid 

+

839 match = _find_mpi_process(pid) 

+

840 self.mpi_match = match 

+

841 if (match["type"] is None) or (match["process"] is None): 

+

842 warn( 

+

843 "Cannot find the mpi process or you're using different ompi wrapper. Will not send stop signal to mpi." 

+

844 ) 

+

845 return 

+

846 elif match["type"] == "mpi": 

+

847 mpi_process = match["process"] 

+

848 mpi_process.send_signal(sig) 

+

849 elif match["type"] == "slurm": 

+

850 slurm_step = match["process"] 

+

851 _slurm_signal(slurm_step, sig) 

+

852 else: 

+

853 raise ValueError("Unsupported process type!") 

+

854 return 

+

855 

+

856 def _reset_process(self): 

+

857 """Reset the record for process in the calculator. 

+

858 Useful if the process is missing or reset the calculator. 

+

859 """ 

+

860 # Reset process tracker 

+

861 self.process = None 

+

862 # self.pid = None 

+

863 if hasattr(self, "mpi_match"): 

+

864 self.mpi_match = None 

+

865 self.mpi_state = None 

+

866 

+

867 @property 

+

868 def pid(self): 

+

869 """The pid for the stored process""" 

+

870 if self.process is None: 

+

871 return None 

+

872 else: 

+

873 return self.process.pid 

+

874 

+

875 @property 

+

876 def raw_results(self): 

+

877 return getattr(self.sparc_bundle, "raw_results", {}) 

+

878 

+

879 @raw_results.setter 

+

880 def raw_results(self, value): 

+

881 self.sparc_bundle.raw_results = value 

+

882 return 

+

883 

+

884 def read_results(self): 

+

885 """Parse from the SparcBundle""" 

+

886 # self.sparc_bundle.read_raw_results() 

+

887 last = self.sparc_bundle.convert_to_ase(indices=-1, include_all_files=False) 

+

888 self.atoms = last.copy() 

+

889 self.results.update(last.calc.results) 

+

890 

+

891 def _restart(self, restart=None): 

+

892 """Reload the input parameters and atoms from previous calculation. 

+

893 

+

894 If self.parameters is already set, the parameters will not be loaded 

+

895 If self.atoms is already set, the atoms will be not be read 

+

896 """ 

+

897 if restart is None: 

+

898 return 

+

899 reload_atoms = self.atoms is None 

+

900 reload_parameters = len(self.parameters) == 0 

+

901 

+

902 self.read_results() 

+

903 if not reload_atoms: 

+

904 self.atoms = None 

+

905 if reload_parameters: 

+

906 self.parameters = self.raw_results["inpt"]["params"] 

+

907 

+

908 if (not reload_parameters) or (not reload_atoms): 

+

909 warn( 

+

910 "Extra parameters or atoms are provided when restarting the SPARC calculator, " 

+

911 "previous results will be cleared." 

+

912 ) 

+

913 self.results.clear() 

+

914 self.sparc_bundle.raw_results.clear() 

+

915 return 

+

916 

+

917 def get_fermi_level(self): 

+

918 """Extra get-method for Fermi level, if calculated""" 

+

919 return self.results.get("fermi", None) 

+

920 

+

921 def detect_sparc_version(self): 

+

922 """Run a short sparc test to determine which sparc is used""" 

+

923 try: 

+

924 cmd = self._make_command() 

+

925 except EnvironmentError: 

+

926 return None 

+

927 print("Running a short calculation to determine SPARC version....") 

+

928 # check_version must be set to False to avoid recursive calling 

+

929 new_calc = SPARC( 

+

930 command=self.command, psp_dir=self.sparc_bundle.psp_dir, check_version=False 

+

931 ) 

+

932 with tempfile.TemporaryDirectory() as tmpdir: 

+

933 new_calc.set(xc="pbe", h=0.3, kpts=(1, 1, 1), maxit_scf=1, directory=tmpdir) 

+

934 atoms = Atoms(["H"], positions=[[0.0, 0.0, 0.0]], cell=[2, 2, 2], pbc=False) 

+

935 try: 

+

936 new_calc.calculate(atoms) 

+

937 version = new_calc.raw_results["out"]["sparc_version"] 

+

938 except Exception as e: 

+

939 print("Error handling simple calculation: ", e) 

+

940 version = None 

+

941 # Warning information about version mismatch between binary and JSON API 

+

942 # only when both are not None 

+

943 if (version is None) and (self.validator.sparc_version is not None): 

+

944 if version != self.validator.sparc_version: 

+

945 warn( 

+

946 ( 

+

947 f"SPARC binary version {version} does not match JSON API version {self.validator.sparc_version}. " 

+

948 "You can set $SPARC_DOC_PATH to the SPARC documentation location." 

+

949 ) 

+

950 ) 

+

951 return version 

+

952 

+

953 def run_client(self, atoms=None, use_stress=False): 

+

954 """Main method to start the client code""" 

+

955 if not self.socket_mode == "client": 

+

956 raise RuntimeError( 

+

957 "Cannot use SPARC.run_client if the calculator is not configured in client mode!" 

+

958 ) 

+

959 

+

960 self.out_socket.run(atoms, use_stress) 

+

961 

+

962 def detect_socket_compatibility(self): 

+

963 """Test if the sparc binary supports socket mode""" 

+

964 try: 

+

965 cmd = self._make_command() 

+

966 except EnvironmentError: 

+

967 return False 

+

968 with tempfile.TemporaryDirectory() as tmpdir: 

+

969 proc = subprocess.run(cmd, shell=True, cwd=tmpdir, capture_output=True) 

+

970 output = proc.stdout.decode("ascii") 

+

971 if "USAGE:" not in output: 

+

972 raise EnvironmentError( 

+

973 "Cannot find the sparc executable! Please make sure you have the correct setup" 

+

974 ) 

+

975 compatibility = "-socket" in output 

+

976 return compatibility 

+

977 

+

978 def set(self, **kwargs): 

+

979 """Overwrite the initial parameters""" 

+

980 # Do not use JSON Schema for these arguments 

+

981 if "label" in kwargs: 

+

982 self.label = kwargs.pop("label") 

+

983 

+

984 if "directory" in kwargs: 

+

985 # str() call to deal with pathlib objects 

+

986 self.directory = str(kwargs.pop("directory")) 

+

987 

+

988 if "log" in kwargs: 

+

989 self.log = kwargs.pop("log") 

+

990 

+

991 if "check_version" in kwargs: 

+

992 self.check_version = bool(kwargs.pop("check_version")) 

+

993 

+

994 if "keep_old_files" in kwargs: 

+

995 self.keep_old_files = kwargs.pop("keep_old_files") 

+

996 

+

997 if "atoms" in kwargs: 

+

998 self.atoms = kwargs.pop("atoms") # Resets results 

+

999 

+

1000 if "command" in kwargs: 

+

1001 self.command = kwargs.pop("command") 

+

1002 

+

1003 # For now we don't let the user to hot-swap socket 

+

1004 if ("use_socket" in kwargs) or ("socket_params" in kwargs): 

+

1005 raise NotImplementedError("Hot swapping socket parameter is not supported!") 

+

1006 

+

1007 self._sanitize_kwargs(**kwargs) 

+

1008 set_params = {} 

+

1009 set_params.update(self.special_params) 

+

1010 set_params.update(self.valid_params) 

+

1011 changed = super().set(**set_params) 

+

1012 if changed != {}: 

+

1013 self.reset() 

+

1014 

+

1015 return changed 

+

1016 

+

1017 def _sanitize_kwargs(self, **kwargs): 

+

1018 """Convert known parameters from JSON API""" 

+

1019 validator = self.validator 

+

1020 if self.special_params == {}: 

+

1021 init = True 

+

1022 self.special_params = self.default_params.copy() 

+

1023 else: 

+

1024 init = False 

+

1025 # User input gpts will overwrite default h 

+

1026 # but user cannot put h and gpts both 

+

1027 if "gpts" in kwargs: 

+

1028 h = self.special_params.pop("h", None) 

+

1029 if (h is not None) and (not init): 

+

1030 warn("Parameter gpts will overwrite previously set parameter h.") 

+

1031 elif "h" in kwargs: 

+

1032 gpts = self.special_params.pop("gpts", None) 

+

1033 if (gpts is not None) and (not init): 

+

1034 warn("Parameter h will overwrite previously set parameter gpts.") 

+

1035 

+

1036 upper_valid_params = set() # Valid SPARC parameters in upper case 

+

1037 # SPARC API is case insensitive 

+

1038 for key, value in kwargs.items(): 

+

1039 if key in self.special_inputs: 

+

1040 # Special case: ignore h when gpts provided 

+

1041 

+

1042 self.special_params[key] = value 

+

1043 else: 

+

1044 key = key.upper() 

+

1045 if key in upper_valid_params: 

+

1046 warn(f"Parameter {key} (case-insentive) appears multiple times!") 

+

1047 if validator.validate_input(key, value): 

+

1048 self.valid_params[key] = value 

+

1049 upper_valid_params.add(key) 

+

1050 else: 

+

1051 raise ValueError( 

+

1052 f"Value {value} for parameter {key} (case-insensitive) is invalid!" 

+

1053 ) 

+

1054 return 

+

1055 

+

1056 def _convert_special_params(self, atoms=None): 

+

1057 """Convert ASE-compatible parameters to SPARC compatible ones 

+

1058 parameters like `h`, `nbands` may need atoms information 

+

1059 

+

1060 Special rules: 

+

1061 h <--> gpts <--> FD_GRID, only when None of FD_GRID / ECUT or MESH_SPACING is provided 

+

1062 """ 

+

1063 converted_sparc_params = {} 

+

1064 validator = self.validator 

+

1065 params = self.special_params.copy() 

+

1066 

+

1067 # xc --> EXCHANGE_CORRELATION 

+

1068 if "xc" in params: 

+

1069 xc = params.pop("xc") 

+

1070 if xc.lower() == "pbe": 

+

1071 converted_sparc_params["EXCHANGE_CORRELATION"] = "GGA_PBE" 

+

1072 elif xc.lower() == "lda": 

+

1073 converted_sparc_params["EXCHANGE_CORRELATION"] = "LDA_PZ" 

+

1074 elif xc.lower() == "rpbe": 

+

1075 converted_sparc_params["EXCHANGE_CORRELATION"] = "GGA_RPBE" 

+

1076 elif xc.lower() == "pbesol": 

+

1077 converted_sparc_params["EXCHANGE_CORRELATION"] = "GGA_PBEsol" 

+

1078 elif xc.lower() == "pbe0": 

+

1079 converted_sparc_params["EXCHANGE_CORRELATION"] = "PBE0" 

+

1080 elif xc.lower() == "hf": 

+

1081 converted_sparc_params["EXCHANGE_CORRELATION"] = "HF" 

+

1082 # backward compatibility for HSE03. Note HSE06 is not supported yet 

+

1083 elif (xc.lower() == "hse") or (xc.lower() == "hse03"): 

+

1084 converted_sparc_params["EXCHANGE_CORRELATION"] = "HSE" 

+

1085 # backward compatibility for VASP-style XCs 

+

1086 elif ( 

+

1087 (xc.lower() == "vdwdf1") 

+

1088 or (xc.lower() == "vdw-df") 

+

1089 or (xc.lower() == "vdw-df1") 

+

1090 ): 

+

1091 converted_sparc_params["EXCHANGE_CORRELATION"] = "vdWDF1" 

+

1092 elif (xc.lower() == "vdwdf2") or (xc.lower() == "vdw-df2"): 

+

1093 converted_sparc_params["EXCHANGE_CORRELATION"] = "vdWDF2" 

+

1094 elif xc.lower() == "scan": 

+

1095 converted_sparc_params["EXCHANGE_CORRELATION"] = "SCAN" 

+

1096 else: 

+

1097 raise ValueError(f"xc keyword value {xc} is invalid!") 

+

1098 

+

1099 # h --> gpts 

+

1100 if "h" in params: 

+

1101 if "gpts" in params: 

+

1102 raise KeyError( 

+

1103 "h and gpts cannot be provided together in SPARC calculator!" 

+

1104 ) 

+

1105 h = params.pop("h") 

+

1106 # if atoms is None: 

+

1107 # raise ValueError( 

+

1108 # "Must have an active atoms object to convert h --> gpts!" 

+

1109 # ) 

+

1110 if any( 

+

1111 [p in self.valid_params for p in ("FD_GRID", "ECUT", "MESH_SPACING")] 

+

1112 ): 

+

1113 warn( 

+

1114 "You have specified one of FD_GRID, ECUT or MESH_SPACING, " 

+

1115 "conversion of h to mesh grid is ignored." 

+

1116 ) 

+

1117 else: 

+

1118 # gpts = h2gpts(h, atoms.cell) 

+

1119 # params["gpts"] = gpts 

+

1120 # Use mesh_spacing instead of fd_grid to avoid parameters 

+

1121 converted_sparc_params["MESH_SPACING"] = h / Bohr 

+

1122 

+

1123 # gpts --> FD_GRID 

+

1124 if "gpts" in params: 

+

1125 gpts = params.pop("gpts") 

+

1126 if validator.validate_input("FD_GRID", gpts): 

+

1127 converted_sparc_params["FD_GRID"] = gpts 

+

1128 else: 

+

1129 raise ValueError(f"Input parameter gpts has invalid value {gpts}") 

+

1130 

+

1131 # kpts 

+

1132 if "kpts" in params: 

+

1133 kpts = params.pop("kpts") 

+

1134 if validator.validate_input("KPOINT_GRID", kpts): 

+

1135 converted_sparc_params["KPOINT_GRID"] = kpts 

+

1136 else: 

+

1137 raise ValueError(f"Input parameter kpts has invalid value {kpts}") 

+

1138 

+

1139 # nbands 

+

1140 if "nbands" in params: 

+

1141 # TODO: Check if the nbands are correct in current system 

+

1142 # TODO: default $N_e/2 \\times 1.2 + 5$ 

+

1143 nbands = params.pop("nbands") 

+

1144 if validator.validate_input("NSTATES", nbands): 

+

1145 converted_sparc_params["NSTATES"] = nbands 

+

1146 else: 

+

1147 raise ValueError(f"Input parameter nbands has invalid value {nbands}") 

+

1148 

+

1149 # convergence is a dict 

+

1150 if "convergence" in params: 

+

1151 convergence = params.pop("convergence") 

+

1152 tol_e = convergence.get("energy", None) 

+

1153 if tol_e: 

+

1154 # TOL SCF: Ha / atom <--> energy tol: eV / atom 

+

1155 converted_sparc_params["TOL_SCF"] = tol_e / Hartree 

+

1156 

+

1157 tol_f = convergence.get("relax", None) 

+

1158 if tol_f: 

+

1159 # TOL SCF: Ha / Bohr <--> energy tol: Ha / Bohr 

+

1160 converted_sparc_params["TOL_RELAX"] = tol_f / Hartree * Bohr 

+

1161 

+

1162 tol_dens = convergence.get("density", None) 

+

1163 if tol_dens: 

+

1164 # TOL SCF: electrons / atom 

+

1165 converted_sparc_params["TOL_PSEUDOCHARGE"] = tol_dens 

+

1166 

+

1167 tol_stress = convergence.get("stress", None) 

+

1168 if tol_stress: 

+

1169 # TOL SCF: electrons / atom 

+

1170 converted_sparc_params["TOL_RELAX_CELL"] = tol_stress / GPa 

+

1171 

+

1172 return converted_sparc_params 

+

1173 

+

1174 def print_sysinfo(self, command=None): 

+

1175 """Record current runtime information""" 

+

1176 now = datetime.datetime.now().isoformat() 

+

1177 if command is None: 

+

1178 command = self.command 

+

1179 msg = ( 

+

1180 "\n" + "*" * 80 + "\n" 

+

1181 f"SPARC program started by SPARC-X-API at {now}\n" 

+

1182 f"command: {command}\n" 

+

1183 ) 

+

1184 if self.log is None: 

+

1185 print(msg) 

+

1186 else: 

+

1187 with open(self.log, "a") as fd: 

+

1188 print(msg, file=fd) 

+

1189 

+

1190 ############################################### 

+

1191 # Below are deprecated functions from v1 

+

1192 ############################################### 

+

1193 @deprecated("Please use SPARC.set instead for setting grid") 

+

1194 def interpret_grid_input(self, atoms, **kwargs): 

+

1195 return None 

+

1196 

+

1197 @deprecated("Please use SPARC.set instead for setting kpoints") 

+

1198 def interpret_kpoint_input(self, atoms, **kwargs): 

+

1199 return None 

+

1200 

+

1201 @deprecated("Please use SPARC.set instead for setting downsampling parameter") 

+

1202 def interpret_downsampling_input(self, atoms, **kwargs): 

+

1203 return None 

+

1204 

+

1205 @deprecated("Please use SPARC.set instead for setting kpoint shift") 

+

1206 def interpret_kpoint_shift(self, atoms, **kwargs): 

+

1207 return None 

+

1208 

+

1209 @deprecated("Please use SPARC.psp_dir instead") 

+

1210 def get_pseudopotential_directory(self, pseudo_dir=None, **kwargs): 

+

1211 return self.sparc_bundle.psp_dir 

+

1212 

+

1213 def get_nstates(self): 

+

1214 raise NotImplementedError("Parsing nstates is not yet implemented.") 

+

1215 

+

1216 @deprecated("Please set the variables separatedly") 

+

1217 def setup_parallel_env(self): 

+

1218 return None 

+

1219 

+

1220 @deprecated("Please use SPARC._make_command instead") 

+

1221 def generate_command(self): 

+

1222 return self._make_command(f"-name {self.label}") 

+

1223 

+

1224 def estimate_memory(self, atoms=None, units="GB", **kwargs): 

+

1225 """ 

+

1226 a function to estimate the amount of memory required to run 

+

1227 the selected calculation. This function takes in **kwargs, 

+

1228 but if none are passed in, it will fall back on the parameters 

+

1229 input when the class was instantiated 

+

1230 """ 

+

1231 conversion_dict = { 

+

1232 "MB": 1e-6, 

+

1233 "GB": 1e-9, 

+

1234 "B": 1, 

+

1235 "byte": 1, 

+

1236 "KB": 1e-3, 

+

1237 } 

+

1238 if kwargs == {}: 

+

1239 kwargs = self.parameters 

+

1240 if atoms is None: 

+

1241 atoms = self.atoms 

+

1242 

+

1243 nstates = kwargs.get("NSTATES") 

+

1244 if nstates is None: 

+

1245 nstates = self.get_nstates(atoms=atoms, **kwargs) 

+

1246 

+

1247 # some annoying code to figure out if it's a spin system 

+

1248 spin_polarized = kwargs.get("nstates") 

+

1249 if spin_polarized is not None: 

+

1250 spin_polarized = int(spin_polarized) 

+

1251 else: 

+

1252 spin_polarized = 1 

+

1253 if spin_polarized == 2: 

+

1254 spin_factor = 2 

+

1255 else: 

+

1256 spin_factor = 1 

+

1257 

+

1258 if "MESH_SPACING" in kwargs: 

+

1259 # MESH_SPACING: Bohr; h: angstrom 

+

1260 kwargs["h"] = kwargs.pop("MESH_SPACING") / Bohr 

+

1261 npoints = np.product(self.interpret_grid_input(atoms, **kwargs)) 

+

1262 

+

1263 kpt_grid = self.interpret_kpoint_input(atoms, **kwargs) 

+

1264 kpt_factor = np.ceil(np.product(kpt_grid) / 2) 

+

1265 

+

1266 # this is a pretty generous over-estimate 

+

1267 estimate = 5 * npoints * nstates * kpt_factor * spin_factor * 8 # bytes 

+

1268 converted_estimate = estimate * conversion_dict[units] 

+

1269 return converted_estimate 

+

1270 

+

1271 def get_scf_steps(self, include_uncompleted_last_step=False): 

+

1272 raise NotImplemented 

+

1273 

+

1274 @deprecated("Use SPARC.get_number_of_ionic_steps instead") 

+

1275 def get_geometric_steps(self, include_uncompleted_last_step=False): 

+

1276 raise NotImplemented 

+

1277 

+

1278 def get_runtime(self): 

+

1279 raise NotImplemented 

+

1280 

+

1281 def get_fermi_level(self): 

+

1282 raise NotImplemented 

+

1283 

+

1284 @deprecated 

+

1285 def concatinate_output(self): 

+

1286 raise DeprecationWarning("Functionality moved in sparc.SparcBundle.") 

+

1287 

+

1288 @deprecated 

+

1289 def read_line(self, **kwargs): 

+

1290 raise DeprecationWarning( 

+

1291 "Parsers for individual files have been moved to sparc.sparc_parsers module" 

+

1292 ) 

+

1293 

+

1294 @deprecated 

+

1295 def parse_output(self, **kwargs): 

+

1296 raise DeprecationWarning("Use SPARC.read_results for parsing results!") 

+

1297 

+

1298 @deprecated 

+

1299 def parse_relax(self, *args, **kwargs): 

+

1300 raise DeprecationWarning("Use SPARC.read_results for parsing results!") 

+

1301 

+

1302 @deprecated 

+

1303 def parse_MD(self, *args, **kwargs): 

+

1304 raise DeprecationWarning("Use SPARC.read_results for parsing results!") 

+

1305 

+

1306 @deprecated 

+

1307 def parse_input_args(self, input_block): 

+

1308 raise DeprecationWarning("Use SPARC.set for argument handling!") 

+

1309 

+

1310 @deprecated 

+

1311 def recover_index_order_from_ion_file(self, label): 

+

1312 raise DeprecationWarning( 

+

1313 "Use SPARC.sort and SPARC.resort for atomic index sorting!" 

+

1314 ) 

+

1315 

+

1316 @deprecated 

+

1317 def atoms_dict(self, *args, **kwargs): 

+

1318 raise DeprecationWarning("") 

+

1319 

+

1320 @deprecated 

+

1321 def dict_atoms(self, *args, **kwargs): 

+

1322 raise DeprecationWarning("") 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_common_py.html b/_static/htmlcov/z_e32f35a0016f670d_common_py.html new file mode 100644 index 00000000..af6b14e4 --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_common_py.html @@ -0,0 +1,103 @@ + + + + + Coverage for sparc/common.py: 100% + + + + + +
+
+

+ Coverage for sparc/common.py: + 100% +

+ +

+ 4 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1from pathlib import Path 

+

2 

+

3import pkg_resources 

+

4 

+

5repo_dir = Path(pkg_resources.resource_filename("sparc", ".")) 

+

6psp_dir = Path(pkg_resources.resource_filename("sparc", "psp")) 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_docparser_py.html b/_static/htmlcov/z_e32f35a0016f670d_docparser_py.html new file mode 100644 index 00000000..1450971e --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_docparser_py.html @@ -0,0 +1,866 @@ + + + + + Coverage for sparc/docparser.py: 84% + + + + + +
+
+

+ Coverage for sparc/docparser.py: + 84% +

+ +

+ 363 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1# -*- coding: utf-8 -*- 

+

2""" 

+

3A module to parse the latex documents provided by SPARC 

+

4and convert to its Python API 

+

5 

+

6Created on Wed Mar 1 15:32:31 EST 2023 

+

7 

+

8Tian Tian (alchem0x2a@gmail.com) 

+

9""" 

+

10import json 

+

11import re 

+

12from copy import copy 

+

13from datetime import datetime 

+

14from pathlib import Path 

+

15from warnings import warn 

+

16 

+

17import numpy as np 

+

18 

+

19# Some fields in master SPARC doc may cause auto type detection 

+

20# to fail, need hard-coded post-processing for now 

+

21postprocess_items = { 

+

22 "RELAX_FLAG": {"allow_bool_input": False}, 

+

23 "NPT_SCALE_CONSTRAINTS": {"type": "string"}, 

+

24 "NPT_SCALE_VECS": {"type": "integer array"}, 

+

25 "TOL_POISSON": {"type": "double"}, 

+

26} 

+

27 

+

28sparc_repo_url = "https://github.com/SPARC-X/SPARC.git" 

+

29 

+

30 

+

31class SparcDocParser(object): 

+

32 """Parses LaTeX documentation of SPARC-X and converts it into a Python API. 

+

33 

+

34 This class extracts parameter information from LaTeX source files, 

+

35 organizing it into a structured format that can be easily used in 

+

36 Python. It supports parsing of version details, parameter types, 

+

37 units, and other relevant information. 

+

38 

+

39 Attributes: 

+

40 version (str): Parsed SPARC version, based on the documentation. 

+

41 parameter_categories (list): Categories of parameters extracted. 

+

42 parameters (dict): Extracted parameters with detailed information. 

+

43 other_parameters (dict): Additional parameters not categorized. 

+

44 

+

45 Methods: 

+

46 find_main_file(main_file_pattern): Finds the main LaTeX file based on a pattern. 

+

47 get_include_files(): Retrieves a list of included LaTeX files. 

+

48 parse_version(parse): Parses and sets the SPARC version. 

+

49 parse_parameters(): Extracts parameters from LaTeX files. 

+

50 postprocess(): Applies hard-coded post-processing to some parameters. 

+

51 to_dict(): Converts parsed information into a dictionary. 

+

52 json_from_directory(directory, include_subdirs, **kwargs): Class method to create JSON from a directory. 

+

53 json_from_repo(url, version, include_subdirs, **kwargs): Class method to create JSON from a repository. 

+

54 

+

55 """ 

+

56 

+

57 def __init__( 

+

58 self, 

+

59 directory=".", 

+

60 main_file="*Manual.tex", 

+

61 intro_file="Introduction.tex", 

+

62 params_from_intro=True, 

+

63 parse_version=True, 

+

64 ): 

+

65 """Create the doc parser pointing to the root of the doc file of SPARC 

+

66 

+

67 The SPARC doc is organized as follows: 

+

68 SPARC/doc/.LaTeX/ 

+

69 |---- Manual.tex 

+

70 |---- Introduction.tex 

+

71 |---- {Section}.tex 

+

72 

+

73 For parameters additional to the standard SPARC options, such as the SQ / cyclix 

+

74 options, we merge the dict from the sub-dirs 

+

75 

+

76 Args: 

+

77 doc_root: root directory to the LaTeX files, may look like `SPARC/doc/.LaTeX` 

+

78 main_file: main LaTeX file for the manual 

+

79 intro_file: LaTeX file for the introduction 

+

80 params_from_intro: only contain the parameters that can be parsed in `intro_file` 

+

81 parse_date: get the SPARC version by date 

+

82 """ 

+

83 self.root = Path(directory) 

+

84 self.main_file = self.find_main_file(main_file) 

+

85 self.intro_file = self.root / intro_file 

+

86 if not self.intro_file.is_file(): 

+

87 raise FileNotFoundError(f"Introduction file {intro_file} is missing!") 

+

88 self.include_files = self.get_include_files() 

+

89 self.params_from_intro = params_from_intro 

+

90 self.parse_version(parse_version) 

+

91 self.parse_parameters() 

+

92 self.postprocess() 

+

93 

+

94 def find_main_file(self, main_file_pattern): 

+

95 """ 

+

96 Finds the main LaTeX file that matches the given pattern, e.g. Manual.tex or Manual_cyclix.te 

+

97 

+

98 Args: 

+

99 main_file_pattern (str): Pattern to match the main LaTeX file name. 

+

100 

+

101 Returns: 

+

102 Path: Path to the main LaTeX file. 

+

103 

+

104 Raises: 

+

105 FileNotFoundError: If no or multiple files match the pattern. 

+

106 """ 

+

107 candidates = list(self.root.glob(main_file_pattern)) 

+

108 if len(candidates) != 1: 

+

109 raise FileNotFoundError( 

+

110 f"Main file {main_file_pattern} is missing or more than 1 exists!" 

+

111 ) 

+

112 return candidates[0] 

+

113 

+

114 def get_include_files(self): 

+

115 """ 

+

116 Retrieves a list of LaTeX files included in the main LaTeX document, e.g. Manual.tex. 

+

117 

+

118 Returns: 

+

119 list: A list of paths to the included LaTeX files. 

+

120 """ 

+

121 pattern = r"\\begin\{document\}(.*?)\\end\{document\}" 

+

122 text = open(self.main_file, "r", encoding="utf8").read() 

+

123 # Only the first begin/end document will be matched 

+

124 match = re.findall(pattern, text, re.DOTALL)[0] 

+

125 pattern_include = r"\\include\{(.+?)\}" 

+

126 include = re.findall(pattern_include, match, re.DOTALL) 

+

127 include_files = [] 

+

128 for name in include: 

+

129 tex_file = self.root / f"{name}.tex" 

+

130 if tex_file.is_file(): 

+

131 include_files.append(tex_file) 

+

132 else: 

+

133 warn( 

+

134 ( 

+

135 f"TeX file {tex_file} is missing! It may be a typo in the document, " 

+

136 "ignore parameters from this file." 

+

137 ) 

+

138 ) 

+

139 return include_files 

+

140 

+

141 def parse_version(self, parse=True): 

+

142 """ 

+

143 Parses and sets the SPARC version based on the C-source file, if possible. 

+

144 The date for the SPARC code is parsed from initialization.c in the "YYYY.MM.DD" 

+

145 format. 

+

146 

+

147 Args: 

+

148 parse (bool): Whether to parse the version from the documentation. 

+

149 

+

150 Sets: 

+

151 self.version (str): The parsed version in 'YYYY.MM.DD' format or None, 

+

152 if either parse=False, or the C-source code is missing 

+

153 """ 

+

154 if parse is False: 

+

155 self.version = None 

+

156 return 

+

157 init_c = self.root.parents[1] / "src" / "initialization.c" 

+

158 if not init_c.is_file(): 

+

159 warn( 

+

160 'Cannot find the c source file "initialization.c", skip version parsing!' 

+

161 ) 

+

162 self.version = None 

+

163 return 

+

164 text = open(init_c, "r", encoding="utf8").read() 

+

165 pattern_version = r"SPARC\s+\(\s*?version(.*?)\)" 

+

166 match = re.findall(pattern_version, text) 

+

167 if len(match) != 1: 

+

168 warn( 

+

169 'Parsing c source file "initialization.c" for version is unsuccessful!' 

+

170 ) 

+

171 self.version = None 

+

172 return 

+

173 # We need to add more spacing matching in case the source code includes extra 

+

174 date_str = re.sub(r"\s+", " ", match[0].strip().replace(",", " ")) 

+

175 # Older version of SPARC doc may contain abbreviated month format 

+

176 date_version = None 

+

177 for fmt in ("%b %d %Y", "%B %d %Y"): 

+

178 try: 

+

179 date_version = datetime.strptime(date_str, fmt).strftime("%Y.%m.%d") 

+

180 break 

+

181 except Exception: 

+

182 continue 

+

183 if date_version is None: 

+

184 raise ValueError(f"Cannot parse date time {date_str}") 

+

185 self.version = date_version 

+

186 return 

+

187 

+

188 def __parse_parameter_from_frame(self, frame): 

+

189 """Parse the parameters from a single LaTeX frame 

+

190 

+

191 Args: 

+

192 frame (str): a string containing the LaTeX frame (e.g. \\begin{frame} ... \\end{frame}) 

+

193 

+

194 Returns: 

+

195 dict: a key-value paired dict parsed from the frame. Some field names include: 

+

196 name: TOL_POISSON 

+

197 type: Double | Integer | String | Character | Double array 

+

198 unit: specified in the doc 

+

199 """ 

+

200 pattern_label = r"\\texttt\{(.*?)\}.*?\\label\{(.*?)\}" 

+

201 pattern_block = r"\\begin\{block\}\{(.*?)\}([\s\S]*?)\\end\{block\}" 

+

202 match_label = re.findall(pattern_label, frame, re.DOTALL | re.MULTILINE) 

+

203 if len(match_label) != 1: 

+

204 warn("Provided a non-structured frame for parsing, skip.") 

+

205 return {} 

+

206 symbol, label = ( 

+

207 convert_tex_parameter(match_label[0][0].strip()), 

+

208 match_label[0][1].strip(), 

+

209 ) 

+

210 # Every match contains the (name, content) pair of the blocks 

+

211 matches = re.findall(pattern_block, frame, re.DOTALL | re.MULTILINE) 

+

212 param_dict = {"symbol": symbol, "label": label} 

+

213 # TODO: add more type definition 

+

214 for key, content in matches: 

+

215 key = key.lower() 

+

216 content = content.strip() 

+

217 # Do not parse commented-out values 

+

218 

+

219 if (key == "type") and (content.startswith("%")): 

+

220 warn(f"Parameter {symbol} is disabled in the doc, ignore!") 

+

221 return {} 

+

222 if key in ("example",): 

+

223 content = convert_tex_example(content) 

+

224 param_dict[key] = content 

+

225 # Sanitize 1: Convert types 

+

226 param_dict = sanitize_type(param_dict) 

+

227 # Sanitize 2: Convert default values 

+

228 param_dict = sanitize_default(param_dict) 

+

229 # Sanitize 3: Remove TeX components in description and remark 

+

230 param_dict = sanitize_description(param_dict) 

+

231 

+

232 return param_dict 

+

233 

+

234 def __parse_frames_from_text(self, text): 

+

235 """Extract all the frames that aren't commented in the text 

+

236 

+

237 Arguments: 

+

238 text (str): Full LaTeX text 

+

239 Returns: 

+

240 list: Matched LaTeX Beamer frame fragments 

+

241 """ 

+

242 pattern_frame = r"\\begin\{frame\}(.*?)\\end\{frame\}" 

+

243 matches = re.findall(pattern_frame, text, re.DOTALL | re.MULTILINE) 

+

244 return matches 

+

245 

+

246 def __parse_intro_file(self): 

+

247 """Parse the introduction file 

+

248 

+

249 Returns: 

+

250 parameter_dict (dict): dictionary using the parameter category as the main key 

+

251 (following order in Introduction.tex) 

+

252 parameter_categories (list): list of categories 

+

253 """ 

+

254 text_intro = open(self.intro_file, "r", encoding="utf8").read() 

+

255 pattern_params = ( 

+

256 r"^\\begin\{frame\}.*?\{Input file options\}.*?$(.*?)\\end\{frame\}" 

+

257 ) 

+

258 pattern_block = r"\\begin\{block\}\{(.*?)\}([\s\S]*?)\\end\{block\}" 

+

259 pattern_line = r"\\hyperlink\{(.*?)\}{\\texttt\{(.*?)\}\}" 

+

260 text_params = re.findall(pattern_params, text_intro, re.DOTALL | re.MULTILINE)[ 

+

261 0 

+

262 ] 

+

263 parameter_categories = [] 

+

264 parameter_dict = {} 

+

265 for match in re.findall(pattern_block, text_params): 

+

266 cat = match[0].lower() 

+

267 # print(cat) 

+

268 if cat in parameter_categories: 

+

269 raise ValueError( 

+

270 f"Key {cat} already exists! You might have a wrong LaTeX doc file!" 

+

271 ) 

+

272 parameter_categories.append(cat) 

+

273 parameter_dict[cat] = [] 

+

274 param_lines = match[1].split("\n") 

+

275 for line in param_lines: 

+

276 matches = re.findall(pattern_line, line) 

+

277 if len(matches) == 0: 

+

278 continue 

+

279 # Each match should contain 2 items, the "Link" that matches a reference in included-tex files 

+

280 # symbol is the actual symbol name (in text-format) 

+

281 # In most cases the link and symbol should be the same 

+

282 for match in matches: 

+

283 label, symbol = match[0].strip(), convert_tex_parameter( 

+

284 match[1].strip() 

+

285 ) 

+

286 parameter_dict[cat].append({"label": label, "symbol": symbol}) 

+

287 return parameter_categories, parameter_dict 

+

288 

+

289 def __parse_all_included_files(self): 

+

290 """Pop up all known parameters from included files 

+

291 Returns: 

+

292 dict: All known parameters from included files 

+

293 """ 

+

294 all_params = {} 

+

295 for f in self.include_files: 

+

296 # Do not parse intro file since it's waste of time 

+

297 if f.resolve() == self.intro_file.resolve(): 

+

298 continue 

+

299 text = open(f, "r", encoding="utf8").read() 

+

300 frames = self.__parse_frames_from_text(text) 

+

301 for frame in frames: 

+

302 dic = self.__parse_parameter_from_frame(frame) 

+

303 if len(dic) > 0: 

+

304 label = dic["label"] 

+

305 all_params[label] = dic 

+

306 return all_params 

+

307 

+

308 def parse_parameters(self): 

+

309 """The actual thing for parsing parameters 

+

310 

+

311 Sets: 

+

312 parameters (dict): All parsed parameters 

+

313 parameter_categoris (list): List of categories 

+

314 other_parameters (dict): Any parameters that are not included in the categories 

+

315 """ 

+

316 parameter_categories, parameter_dict = self.__parse_intro_file() 

+

317 all_params = self.__parse_all_included_files() 

+

318 self.parameter_categories = parameter_categories 

+

319 # parameters contain only the "valid" ones that are shown in the intro 

+

320 # all others are clustered in "other_parameters" 

+

321 self.parameters = {} 

+

322 for cat, params in parameter_dict.items(): 

+

323 for p in params: 

+

324 label = p["label"] 

+

325 symbol = p["symbol"] 

+

326 param_details = all_params.pop(label, {}) 

+

327 if param_details != {}: 

+

328 param_details["category"] = cat 

+

329 self.parameters[symbol] = param_details 

+

330 

+

331 self.other_parameters = {} 

+

332 for param_details in all_params.values(): 

+

333 symbol = param_details["symbol"] 

+

334 self.other_parameters[symbol] = param_details 

+

335 return 

+

336 

+

337 def postprocess(self): 

+

338 """Use the hardcoded dict prostprocess_items to fix some issues""" 

+

339 for param, fix in postprocess_items.items(): 

+

340 if param in self.parameters: 

+

341 self.parameters[param].update(**fix) 

+

342 return 

+

343 

+

344 def to_dict(self): 

+

345 """Output a json dict from current document parser 

+

346 

+

347 Returns: 

+

348 dict: All API schemes in dict 

+

349 """ 

+

350 doc = {} 

+

351 doc["sparc_version"] = self.version 

+

352 doc["categories"] = self.parameter_categories 

+

353 doc["parameters"] = {k: v for k, v in sorted(self.parameters.items())} 

+

354 doc["other_parameters"] = { 

+

355 k: v for k, v in sorted(self.other_parameters.items()) 

+

356 } 

+

357 doc["data_types"] = sorted(set([p["type"] for p in self.parameters.values()])) 

+

358 return doc 

+

359 

+

360 @classmethod 

+

361 def json_from_directory(cls, directory=".", include_subdirs=True, **kwargs): 

+

362 """ 

+

363 Recursively add parameters from all Manual files 

+

364 Arguments: 

+

365 directory (str or PosixPath): The directory to the LaTeX files, e.g. <sparc-root>/doc/.LaTeX 

+

366 include_subdirs (bool): If true, also parse the manual files in submodules, e.g. cyclix, highT 

+

367 Returns: 

+

368 str: Formatted json-string of the API 

+

369 """ 

+

370 directory = Path(directory) 

+

371 root_dict = cls(directory=directory, **kwargs).to_dict() 

+

372 if include_subdirs: 

+

373 for sub_manual_tex in directory.glob("*/*Manual.tex"): 

+

374 subdir = sub_manual_tex.parent 

+

375 try: 

+

376 sub_dict = cls(directory=subdir, parse_version=False).to_dict() 

+

377 except FileNotFoundError: 

+

378 print(subdir, " Latex files not found. Check naming conventions for Manual.tex. Expects format *Manual.tex") 

+

379 continue 

+

380 for param, param_desc in sub_dict["parameters"].items(): 

+

381 if param not in root_dict["parameters"]: 

+

382 root_dict["parameters"][param] = param_desc 

+

383 json_string = json.dumps(root_dict, indent=True) 

+

384 return json_string 

+

385 

+

386 @classmethod 

+

387 def json_from_repo( 

+

388 cls, url=sparc_repo_url, version="master", include_subdirs=True, **kwargs 

+

389 ): 

+

390 """ 

+

391 Download the source code from git and use json_from_directory to parse 

+

392 Arguments: 

+

393 url (str): URL for the repository of SPARC, default is "https://github.com/SPARC-X/SPARC.git" 

+

394 version (str): Git version or commit hash of the SPARC repo 

+

395 include_subdirs (bool): If true, also parse the manual files in submodules, e.g. cyclix, highT 

+

396 Returns: 

+

397 str: Formatted json-string of the API 

+

398 """ 

+

399 import tempfile 

+

400 from subprocess import run 

+

401 

+

402 with tempfile.TemporaryDirectory() as tmpdir: 

+

403 tmpdir = Path(tmpdir) 

+

404 download_dir = tmpdir / "SPARC" 

+

405 download_cmds = ["git", "clone", "--depth", "1", str(url), "SPARC"] 

+

406 run(download_cmds, cwd=tmpdir) 

+

407 if version not in ["master", "HEAD"]: 

+

408 fetch_cmds = ["git", "fetch", "--depth", "1", str(version)] 

+

409 run(fetch_cmds, cwd=download_dir) 

+

410 checkout_cmds = ["git", "checkout", str(version)] 

+

411 run(checkout_cmds, cwd=download_dir) 

+

412 json_string = cls.json_from_directory( 

+

413 directory=download_dir / "doc" / ".LaTeX", 

+

414 include_subdirs=include_subdirs, 

+

415 **kwargs, 

+

416 ) 

+

417 return json_string 

+

418 

+

419 

+

420def convert_tex_parameter(text): 

+

421 """ 

+

422 Conver a TeX string to non-escaped name (for parameter only) 

+

423 Arguments: 

+

424 text (str): Parameter name in LaTeX format 

+

425 Returns: 

+

426 str: Text with sanitized parameter 

+

427 """ 

+

428 return text.strip().replace("\_", "_") 

+

429 

+

430 

+

431def convert_tex_example(text): 

+

432 """Convert TeX codes of examples as much as possible 

+

433 The examples follow the format 

+

434 SYMBOL: values (may contain new lines) 

+

435 Arguments: 

+

436 text (str): Single or multiline LaTeX contents 

+

437 Returns: 

+

438 str: Sanitized literal text 

+

439 """ 

+

440 mapper = {"\\texttt{": "", "\_": "_", "}": "", "\\": "\n"} 

+

441 new_text = copy(text) 

+

442 for m, r in mapper.items(): 

+

443 new_text = new_text.replace(m, r) 

+

444 

+

445 symbol, values = new_text.split(":") 

+

446 symbol = symbol.strip() 

+

447 values = re.sub("\n+", "\n", values.strip()) 

+

448 # Remove all comment lines 

+

449 values = "\n".join( 

+

450 [l for l in values.splitlines() if not l.lstrip().startswith("%")] 

+

451 ) 

+

452 new_text = f"{symbol}: {values}" 

+

453 return new_text 

+

454 

+

455 

+

456def convert_tex_default(text, desired_type=None): 

+

457 """Convert default values as much as possible. 

+

458 The desire type will convert the default values 

+

459 to the closest format 

+

460 

+

461 Currently supported conversions 

+

462 1. Remove all surrounding text modifiers (texttt) 

+

463 2. Remove all symbol wrappers $ 

+

464 3. Convert value to single or array 

+

465 

+

466 Arguments: 

+

467 text (str): Raw text string for value 

+

468 desired_type (str or None): Data type to be converted to. If None, preserve the string format 

+

469 

+

470 Returns: 

+

471 converted: Value converted from raw text 

+

472 """ 

+

473 mapper = { 

+

474 "\\texttt{": "", 

+

475 "}": "", 

+

476 "{": "", 

+

477 "\\_": "_", 

+

478 "\_": "_", 

+

479 "\\\\": "\n", 

+

480 "$": "", 

+

481 } 

+

482 text = text.strip() 

+

483 text = re.sub(r"\\hyperlink\{.*?\}", "", text) 

+

484 text = re.sub(r"\\times", "x", text) 

+

485 for m, r in mapper.items(): 

+

486 text = text.replace(m, r) 

+

487 text = re.sub(r"\n+", "\n", text) 

+

488 # Remove all comment lines 

+

489 text = "\n".join([l for l in text.splitlines() if not l.lstrip().startswith("%")]) 

+

490 

+

491 # print(text) 

+

492 converted = None 

+

493 if "none" in text.lower(): 

+

494 converted = None 

+

495 elif "no default" in text.lower(): 

+

496 converted = None 

+

497 elif "automat" in text.lower(): 

+

498 converted = "auto" 

+

499 else: 

+

500 # try type conversion 

+

501 if desired_type is None: 

+

502 converted = text 

+

503 elif desired_type == "string": 

+

504 converted = text 

+

505 else: 

+

506 converted = text2value(text, desired_type) 

+

507 return converted 

+

508 

+

509 

+

510def convert_comment(text): 

+

511 """Used to remove TeX-specific commands in description and remarks 

+

512 as much as possible 

+

513 

+

514 Arguments: 

+

515 text (str): Raw LaTeX code for the comment section in manual 

+

516 

+

517 Returns: 

+

518 str: Sanitized plain text 

+

519 """ 

+

520 mapper = { 

+

521 "\\texttt{": "", 

+

522 "}": "", 

+

523 "{": "", 

+

524 "\\_": "_", 

+

525 "\_": "_", 

+

526 "\\\\": "\n", 

+

527 "$": "", 

+

528 } 

+

529 text = text.strip() 

+

530 text = re.sub(r"\\hyperlink\{.*?\}", "", text) 

+

531 text = re.sub(r"\\href\{.*?\}", "", text) 

+

532 text = re.sub(r"\\times", "x", text) 

+

533 for m, r in mapper.items(): 

+

534 text = text.replace(m, r) 

+

535 text = re.sub(r"\n+", "\n", text) 

+

536 # Remove all comment lines 

+

537 text = "\n".join([l for l in text.splitlines() if not l.lstrip().startswith("%")]) 

+

538 return text 

+

539 

+

540 

+

541def text2value(text, desired_type): 

+

542 """Convert raw text to a desired type 

+

543 

+

544 Arguments: 

+

545 text (str): Text contents for the value 

+

546 desired_type (str): Target data type from 'string', 'integer', 

+

547 'integer array', 'double', 'double array', 

+

548 'bool', 'bool array' 

+

549 Returns: 

+

550 converted: Value converted to the desired type 

+

551 """ 

+

552 if desired_type is None: 

+

553 return text 

+

554 desired_type = desired_type.lower() 

+

555 if desired_type == "string": 

+

556 return text.strip() 

+

557 

+

558 try: 

+

559 arr = np.genfromtxt(text.splitlines(), delimiter=" ", dtype=float) 

+

560 if np.isnan(arr).any(): 

+

561 warn( 

+

562 f"Some fields in {text} cannot converted to a numerical array, will skip conversion." 

+

563 ) 

+

564 arr = None 

+

565 except Exception as e: 

+

566 warn( 

+

567 f"Cannot transform {text} to array, skip converting. Error message is:\n {e}" 

+

568 ) 

+

569 arr = None 

+

570 

+

571 if arr is None: 

+

572 return None 

+

573 

+

574 # Upshape ndarray to at least 1D 

+

575 if arr.shape == (): 

+

576 arr = np.reshape(arr, [1]) 

+

577 

+

578 converted = None 

+

579 from contextlib import suppress 

+

580 

+

581 # Ignore all failures and make conversion None 

+

582 with suppress(Exception): 

+

583 if desired_type == "integer": 

+

584 converted = int(arr[0]) 

+

585 elif desired_type == "bool": 

+

586 converted = bool(arr[0]) 

+

587 elif desired_type == "double": 

+

588 converted = float(arr[0]) 

+

589 elif desired_type == "integer array": 

+

590 converted = np.ndarray.tolist(arr.astype(int)) 

+

591 elif desired_type == "bool array": 

+

592 converted = np.ndarray.tolist(arr.astype(bool)) 

+

593 elif desired_type == "double array": 

+

594 converted = np.ndarray.tolist(arr.astype(float)) 

+

595 return converted 

+

596 

+

597 

+

598def is_array(text): 

+

599 """Simply try to convert a string into a numpy array and compare if length is larger than 1 

+

600 it is only used to compare a float / int value 

+

601 """ 

+

602 val = np.fromstring(text, sep=" ") 

+

603 if len(val) == 1: 

+

604 return False 

+

605 else: 

+

606 return True 

+

607 

+

608 

+

609def contain_only_bool(text): 

+

610 """Check if a string only contains 0 1 or spaces""" 

+

611 if any([c in text for c in (".", "+", "-", "e", "E")]): 

+

612 return False 

+

613 digits = re.findall(r"[-+e\d]+", text, re.DOTALL) 

+

614 for d in digits: 

+

615 val = int(d) 

+

616 if val not in (0, 1): 

+

617 return False 

+

618 return True 

+

619 

+

620 

+

621def sanitize_description(param_dict): 

+

622 """Sanitize the description and remark field 

+

623 

+

624 Arguments: 

+

625 param_dict (dict): Raw dict for one parameter entry 

+

626 

+

627 Returns: 

+

628 dict: Sanitized parameter dict with comment, remark and description 

+

629 converted to human-readable formats 

+

630 """ 

+

631 sanitized_dict = param_dict.copy() 

+

632 

+

633 original_desc = sanitized_dict["description"] 

+

634 sanitized_dict["description_raw"] = original_desc 

+

635 

+

636 original_remark = sanitized_dict.get("remark", "") 

+

637 sanitized_dict["remark_raw"] = original_remark 

+

638 

+

639 sanitized_dict["description"] = convert_comment(original_desc) 

+

640 sanitized_dict["remark"] = convert_comment(original_remark) 

+

641 return sanitized_dict 

+

642 

+

643 

+

644def sanitize_default(param_dict): 

+

645 """Sanitize the default field 

+

646 1. Create an extra field `default_remark` that copies original default 

+

647 2. Use `convert_tex_default` to convert values as much as possible 

+

648 

+

649 This function should be called after sanitize_type 

+

650 """ 

+

651 sanitized_dict = param_dict.copy() 

+

652 original_default = sanitized_dict["default"] 

+

653 sanitized_dict["default_remark"] = original_default 

+

654 converted_default = convert_tex_default(original_default, param_dict["type"]) 

+

655 sanitized_dict["default"] = converted_default 

+

656 return sanitized_dict 

+

657 

+

658 

+

659def sanitize_type(param_dict): 

+

660 """Sanitize the param dict so that the type are more consistent 

+

661 

+

662 For example, if type is Double / Integer, 

+

663 but parameter is a vector, 

+

664 make a double vector or integer vector 

+

665 """ 

+

666 sanitized_dict = param_dict.copy() 

+

667 symbol = param_dict["symbol"] 

+

668 origin_type = param_dict.get("type", None) 

+

669 if origin_type is None: 

+

670 print("Dict does not have type!") 

+

671 return sanitized_dict 

+

672 origin_type = origin_type.lower() 

+

673 

+

674 sanitized_type = None 

+

675 sanitized_dict["allow_bool_input"] = False 

+

676 # First pass, remove all singular types 

+

677 if origin_type == "0 or 1": 

+

678 origin_type = "integer" 

+

679 elif "permutation" in origin_type: 

+

680 sanitized_type = "integer" 

+

681 elif origin_type in ("string", "character"): 

+

682 sanitized_type = "string" 

+

683 elif "array" in origin_type: 

+

684 sanitized_type = origin_type 

+

685 

+

686 # Pass 2, test if int values are arrays 

+

687 if (origin_type in ["int", "integer", "double"]) and (sanitized_type is None): 

+

688 if "int" in origin_type: 

+

689 origin_type = "integer" 

+

690 # Test if the value from example is a single value or array 

+

691 try: 

+

692 example_value = param_dict["example"].split(":")[1] 

+

693 default = param_dict["default"] 

+

694 _array_test = is_array(example_value) 

+

695 _bool_test = contain_only_bool(example_value) and contain_only_bool(default) 

+

696 except Exception as e: 

+

697 warn( 

+

698 f"Array conversion failed for {example_value}, ignore." 

+

699 f"The error is {e}" 

+

700 ) 

+

701 _array_test = False # Retain 

+

702 

+

703 if _array_test is True: 

+

704 sanitized_type = f"{origin_type} array" 

+

705 else: 

+

706 sanitized_type = origin_type 

+

707 

+

708 # Pass 3: int to boolean test. This should be done very tight 

+

709 if _bool_test and ("integer" in sanitized_type): 

+

710 sanitized_dict["allow_bool_input"] = True 

+

711 

+

712 if sanitized_type is None: 

+

713 # Currently there is only one NPT_NH_QMASS has this type 

+

714 # TODO: think of a way to format a mixed array? 

+

715 warn(f"Type of {symbol} if not standard digit or array, mark as others.") 

+

716 sanitized_type = "other" 

+

717 # TODO: how about provide a true / false type? 

+

718 sanitized_dict["type"] = sanitized_type 

+

719 return sanitized_dict 

+

720 

+

721 

+

722if __name__ == "__main__": 

+

723 # Run the module as independent script to extract a json-formatted parameter list 

+

724 from argparse import ArgumentParser 

+

725 

+

726 argp = ArgumentParser(description="Parse the LaTeX doc to json") 

+

727 argp.add_argument( 

+

728 "-o", 

+

729 "--output", 

+

730 default="parameters.json", 

+

731 help="Output file name (json-formatted)", 

+

732 ) 

+

733 argp.add_argument( 

+

734 "--include-subdirs", 

+

735 action="store_true", 

+

736 help="Parse manual parameters from subdirs", 

+

737 ) 

+

738 argp.add_argument("--git", action="store_true") 

+

739 argp.add_argument( 

+

740 "--version", 

+

741 default="master", 

+

742 help="Version of the doc. Only works when using git repo", 

+

743 ) 

+

744 argp.add_argument( 

+

745 "root", 

+

746 nargs="?", 

+

747 help=( 

+

748 "Root of the SPARC doc LaTeX files, or remote git repo link. If not provided and --git is enables, use the default github repo" 

+

749 ), 

+

750 ) 

+

751 

+

752 args = argp.parse_args() 

+

753 output = Path(args.output).with_suffix(".json") 

+

754 if args.git: 

+

755 if args.root is None: 

+

756 root = sparc_repo_url 

+

757 else: 

+

758 root = args.root 

+

759 json_string = SparcDocParser.json_from_repo( 

+

760 url=root, version=args.version, include_subdirs=args.include_subdirs 

+

761 ) 

+

762 else: 

+

763 json_string = SparcDocParser.json_from_directory( 

+

764 directory=Path(args.root), include_subdirs=args.include_subdirs 

+

765 ) 

+

766 with open(output, "w", encoding="utf8") as fd: 

+

767 fd.write(json_string) 

+

768 print(f"SPARC parameter specifications written to {output}!") 

+

769 print("If you need to fintune the definitions, please edit them manually.") 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_download_data_py.html b/_static/htmlcov/z_e32f35a0016f670d_download_data_py.html new file mode 100644 index 00000000..13f1b9f3 --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_download_data_py.html @@ -0,0 +1,186 @@ + + + + + Coverage for sparc/download_data.py: 88% + + + + + +
+
+

+ Coverage for sparc/download_data.py: + 88% +

+ +

+ 50 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1"""Download the pseudopotential and other related files after sparc-x-api is installed 

+

2 

+

3Run: 

+

4 

+

5python -m sparc.download_data 

+

6""" 

+

7 

+

8import hashlib 

+

9import shutil 

+

10import tempfile 

+

11import zipfile 

+

12from io import BytesIO 

+

13from pathlib import Path 

+

14 

+

15# import urllib.request 

+

16from urllib.request import urlopen 

+

17 

+

18from .common import psp_dir 

+

19 

+

20sparc_tag = "b702c1061400a2d23c0e223e32182609d7958156" 

+

21sparc_source_url = "https://github.com/SPARC-X/SPARC/archive/{sparc_tag}.zip" 

+

22# This is a all_psp8_checksum 

+

23all_psp8_checksum = "5ef42c4a81733a90b0e080b771c5a73a" 

+

24 

+

25 

+

26def download_psp(sparc_tag=sparc_tag, psp_dir=psp_dir): 

+

27 """Download the external PSPs into the sparc/psp folder 

+

28 

+

29 Arguments: 

+

30 sparc_tag (str): Commit hash or git tag for the psp files 

+

31 psp_dir (str or PosixPath): Directory to download the psp files 

+

32 """ 

+

33 if is_psp_download_complete(): 

+

34 print("PSPs have been successfully downloaded!") 

+

35 return 

+

36 download_url = sparc_source_url.format(sparc_tag=sparc_tag) 

+

37 print(f"Download link: {download_url}") 

+

38 with tempfile.TemporaryDirectory() as tmpdir: 

+

39 tmpdir = Path(tmpdir) 

+

40 with urlopen(download_url) as zipresp: 

+

41 with zipfile.ZipFile(BytesIO(zipresp.read())) as zfile: 

+

42 zfile.extractall(tmpdir) 

+

43 # print(list(os.walk(tmpdir))) 

+

44 source_dir = next(tmpdir.glob("SPARC-*/psps")) 

+

45 print(f"Found source_dir at {source_dir}") 

+

46 if not source_dir.is_dir(): 

+

47 raise FileNotFoundError("Error downloading or extracting zip") 

+

48 print(f"Moving psp files to {psp_dir}") 

+

49 for ext in ("*.psp8", "*.psp", "*.pot"): 

+

50 for pspf in source_dir.glob(ext): 

+

51 print(f"Found {pspf} --> {psp_dir}") 

+

52 shutil.copy(pspf, psp_dir) 

+

53 if not is_psp_download_complete(psp_dir): 

+

54 raise RuntimeError(f"Files downloaded to {psp_dir} have different checksums!") 

+

55 return 

+

56 

+

57 

+

58def checksum_all(psp_dir=psp_dir, extension="*.psp8"): 

+

59 """Checksum all the files under the psp_dir to make sure the psp8 files 

+

60 are the same as intended 

+

61 

+

62 Arguments: 

+

63 psp_dir (str or PosixPath): Directory for the psp files 

+

64 extension (str): Search pattern for the psp files, either '.psp', '.psp8' or '.pot' 

+

65 

+

66 Returns: 

+

67 str: Checksum for all the files concatenated 

+

68 """ 

+

69 checker = hashlib.md5() 

+

70 psp_dir = Path(psp_dir) 

+

71 # Use sorted to make sure file order is correct 

+

72 for filename in sorted(psp_dir.glob(extension)): 

+

73 # Open the file in binary mode and update the group checksum 

+

74 with open(filename, "r") as f: 

+

75 f_checker = hashlib.md5() 

+

76 content = f.read().encode("utf8") 

+

77 f_checker.update(content) 

+

78 checker.update(f_checker.hexdigest().encode("ascii")) 

+

79 final_checksum = checker.hexdigest() 

+

80 return final_checksum 

+

81 

+

82 

+

83def is_psp_download_complete(psp_dir=psp_dir): 

+

84 return checksum_all(psp_dir) == all_psp8_checksum 

+

85 

+

86 

+

87if __name__ == "__main__": 

+

88 print("Running command-line psp downloader") 

+

89 download_psp() 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_io_py.html b/_static/htmlcov/z_e32f35a0016f670d_io_py.html new file mode 100644 index 00000000..fe129f7b --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_io_py.html @@ -0,0 +1,1277 @@ + + + + + Coverage for sparc/io.py: 73% + + + + + +
+
+

+ Coverage for sparc/io.py: + 73% +

+ +

+ 475 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1"""Providing a new bundled SPARC file format 

+

2""" 

+

3import os 

+

4import re 

+

5from pathlib import Path 

+

6from warnings import warn 

+

7 

+

8import numpy as np 

+

9from ase.atoms import Atoms 

+

10from ase.calculators.singlepoint import SinglePointDFTCalculator 

+

11 

+

12# various io formatters 

+

13from .api import SparcAPI 

+

14from .common import psp_dir as default_psp_dir 

+

15from .download_data import is_psp_download_complete 

+

16from .sparc_parsers.aimd import _read_aimd 

+

17from .sparc_parsers.atoms import atoms_to_dict, dict_to_atoms 

+

18from .sparc_parsers.geopt import _read_geopt 

+

19from .sparc_parsers.inpt import _read_inpt, _write_inpt 

+

20from .sparc_parsers.ion import _read_ion, _write_ion 

+

21from .sparc_parsers.out import _read_out 

+

22from .sparc_parsers.pseudopotential import copy_psp_file, parse_psp8_header 

+

23from .sparc_parsers.static import _add_cell_info, _read_static 

+

24from .utils import deprecated, locate_api, string2index 

+

25 

+

26# from .sparc_parsers.ion import read_ion, write_ion 

+

27defaultAPI = locate_api() 

+

28 

+

29 

+

30class SparcBundle: 

+

31 """Provide access to a calculation folder of SPARC as a simple bundle 

+

32 

+

33 The bundle can be optionally named as .sparc following the ASE's 

+

34 .bundle format 

+

35 

+

36 Currently the write method only supports 1 image, while read method support reading 

+

37 atoms results in following conditions 

+

38 

+

39 1) No calculation (minimal): .ion + .inpt file --> 1 image 

+

40 2) Single point calculation: .ion + .inpt + .out + .static --> 1 

+

41 image with calc 

+

42 3) Multiple SP calculations: chain all 

+

43 .out{digits} and .static{digitis} outputs 4) Relaxation: read from 

+

44 .geopt and .out (supporting chaining) 5) AIMD: read from .aimd and 

+

45 .out (support chaining) 

+

46 

+

47 

+

48 Attributes: 

+

49 directory (Path): Path to the directory containing SPARC files. 

+

50 mode (str): File access mode ('r', 'w', or 'a'). 

+

51 label (str): Name of the main SPARC file. 

+

52 init_atoms (Atoms): Initial atomic configuration. 

+

53 init_inputs (dict): Initial input parameters. 

+

54 psp_data (dict): Pseudopotential data. 

+

55 raw_results (dict): Raw results from SPARC calculations. 

+

56 psp_dir (Path): Directory containing pseudopotentials. 

+

57 sorting (list): Sort order for atoms. 

+

58 last_image (int): Index of the last image in a series of calculations. 

+

59 validator (SparcAPI): API validator for SPARC calculations. 

+

60 

+

61 Methods: 

+

62 __find_psp_dir(psp_dir=None): Finds the directory for SPARC pseudopotentials. 

+

63 _find_files(): Finds all files matching the bundle label. 

+

64 _make_label(label=None): Infers or sets the label for the SPARC bundle. 

+

65 _indir(ext, label=None, occur=0, d_format="{:02d}"): Finds a file with a specific extension in the bundle. 

+

66 _read_ion_and_inpt(): Reads .ion and .inpt files together. 

+

67 _write_ion_and_inpt(): Writes .ion and .inpt files to the bundle. 

+

68 _read_results_from_index(index, d_format="{:02d}"): Reads results from a specific calculation index. 

+

69 _make_singlepoint(calc_results, images, raw_results): Converts results and images to SinglePointDFTCalculators. 

+

70 _extract_static_results(raw_results, index=":"): Extracts results from static calculations. 

+

71 _extract_geopt_results(raw_results, index=":"): Extracts results from geometric optimization calculations. 

+

72 _extract_aimd_results(raw_results, index=":"): Extracts results from AIMD calculations. 

+

73 convert_to_ase(index=-1, include_all_files=False, **kwargs): Converts raw results to ASE Atoms with calculators. 

+

74 read_raw_results(include_all_files=False): Parses all files in the bundle and merges results. 

+

75 read_psp_info(): Parses pseudopotential information from the inpt file. 

+

76 """ 

+

77 

+

78 psp_env = ["SPARC_PSP_PATH", "SPARC_PP_PATH"] 

+

79 

+

80 def __init__( 

+

81 self, 

+

82 directory, 

+

83 mode="r", 

+

84 atoms=None, 

+

85 label=None, 

+

86 psp_dir=None, 

+

87 validator=defaultAPI, 

+

88 ): 

+

89 """ 

+

90 Initializes a SparcBundle for accessing SPARC calculation data. 

+

91 

+

92 Args: 

+

93 directory (str or Path): The path to the directory containing the SPARC files. 

+

94 mode (str, optional): The file access mode. Can be 'r' (read), 'w' (write), or 'a' (append). Defaults to 'r'. 

+

95 atoms (Atoms, optional): The initial atomic configuration. Only relevant in write mode. 

+

96 label (str, optional): A custom label for the bundle. If None, the label is inferred from the directory or files. 

+

97 psp_dir (str or Path, optional): Path to the directory containing pseudopotentials. If None, the path is inferred. 

+

98 validator (SparcAPI, optional): An instance of SparcAPI for validating and parsing SPARC parameters. Defaults to a default SparcAPI instance. 

+

99 

+

100 Raises: 

+

101 AssertionError: If an invalid mode is provided. 

+

102 ValueError: If multiple .ion files are found and no label is specified. 

+

103 Warning: If no .ion file is found in read-mode, or illegal characters are in the label. 

+

104 """ 

+

105 self.directory = Path(directory) 

+

106 self.mode = mode.lower() 

+

107 assert self.mode in ( 

+

108 "r", 

+

109 "w", 

+

110 "a", 

+

111 ), f"Invalid mode {self.mode}! Must one of 'r', 'w' or 'a'" 

+

112 self.label = self._make_label(label) 

+

113 self.init_atoms = atoms.copy() if atoms is not None else None 

+

114 self.init_inputs = {} 

+

115 self.psp_data = {} 

+

116 self.raw_results = {} 

+

117 self.psp_dir = self.__find_psp_dir(psp_dir) 

+

118 # Sorting should be consistent across the whole bundle! 

+

119 self.sorting = None 

+

120 self.last_image = -1 

+

121 self.validator = validator 

+

122 

+

123 def _find_files(self): 

+

124 """Find all files matching '{label}.*'""" 

+

125 return list(self.directory.glob(f"{self.label}.*")) 

+

126 

+

127 def _make_label(self, label=None): 

+

128 """Infer the label from the bundle 

+

129 

+

130 Special cases if label is None: 

+

131 1. read mode --> get the ion file name 

+

132 2. write mode --> infer from the directory 

+

133 

+

134 Arguments: 

+

135 label (str or None): Label to be used to write the .ion, .inpt files 

+

136 """ 

+

137 prefix = self.directory.resolve().with_suffix("").name 

+

138 

+

139 illegal_chars = '\\/:*?"<>|' 

+

140 if label is not None: 

+

141 label_ = label 

+

142 elif self.mode == "w": 

+

143 label_ = prefix 

+

144 else: 

+

145 # read 

+

146 match_ion = list(self.directory.glob("*.ion")) 

+

147 if len(match_ion) > 1: 

+

148 raise ValueError( 

+

149 "Cannot read sparc bundle with multiple ion files without specifying the label!" 

+

150 ) 

+

151 elif len(match_ion) == 1: 

+

152 label_ = match_ion[0].name.split(".")[0] 

+

153 else: 

+

154 # No file found, possibly an empty bundle 

+

155 warn("No .ion file found in the read-mode bundle.") 

+

156 label_ = prefix 

+

157 

+

158 if any([c in label_ for c in illegal_chars]): 

+

159 warn( 

+

160 f"Label name {label_} contains illegal characters! I'll make it 'SPARC'" 

+

161 ) 

+

162 label_ = "SPARC" 

+

163 return label_ 

+

164 

+

165 def __find_psp_dir(self, psp_dir=None): 

+

166 """Use environmental variable to find the directory for SPARC 

+

167 pseudopotentials 

+

168 

+

169 Searching priority: 

+

170 1. User defined psp_dir 

+

171 2. $SPARC_PSP_PATH 

+

172 3. $SPARC_PP_PATH 

+

173 4. psp bundled with sparc-api 

+

174 

+

175 Arguments: 

+

176 psp_dir (str or PosixPath or None): the specific directory to search the psp files. 

+

177 Each element can only have 1 psp file under psp_dir 

+

178 Returns: 

+

179 PosixPath: Location of psp files 

+

180 """ 

+

181 if psp_dir is not None: 

+

182 return Path(psp_dir) 

+

183 else: 

+

184 for var in self.psp_env: 

+

185 env_psp_dir = os.environ.get(var, None) 

+

186 if env_psp_dir: 

+

187 return Path(env_psp_dir) 

+

188 # At this point, we try to use the psp files bundled with sparc 

+

189 if is_psp_download_complete(default_psp_dir): 

+

190 return default_psp_dir 

+

191 else: 

+

192 warn( 

+

193 ( 

+

194 "PSP directory bundled with SPARC-X-API is broken! " 

+

195 "Please use `sparc.download_data` to re-download them!" 

+

196 ) 

+

197 ) 

+

198 

+

199 # Not found 

+

200 if self.mode == "w": 

+

201 warn( 

+

202 ( 

+

203 "No pseudopotential searching path was set and " 

+

204 "neither of $SPARC_PSP_PATH nor $SPARC_PP_PATH is set.\n" 

+

205 "Please explicitly provide the pseudopotentials parameter when writing the sparc bundle." 

+

206 ) 

+

207 ) 

+

208 return None 

+

209 

+

210 def _indir(self, ext, label=None, occur=0, d_format="{:02d}"): 

+

211 """Find the file with {label}.{ext} under current dir, 

+

212 if label is None, use the default 

+

213 

+

214 Arguments: 

+

215 ext (str): Extension of file, e.g. '.ion' or 'ion' 

+

216 label (str or None): Label for the file. If None, use the parent directory name for searching 

+

217 occur (int): Occurance index of the file, if occur > 0, search for files with suffix like 'SPARC.out_01' 

+

218 d_format (str): Format for the index 

+

219 

+

220 Returns: 

+

221 PosixPath: Path to the target file under self.directory 

+

222 """ 

+

223 label = self.label if label is None else label 

+

224 if not ext.startswith("."): 

+

225 ext = "." + ext 

+

226 if occur == 0: 

+

227 target = self.directory / f"{label}{ext}" 

+

228 else: 

+

229 target = self.directory / f"{label}{ext}_{d_format.format(occur)}" 

+

230 return target 

+

231 

+

232 def _read_ion_and_inpt(self): 

+

233 """Read the ion and inpt files together to obtain basic atomstic data. 

+

234 

+

235 Returns: 

+

236 Atoms: atoms object from .ion and .inpt file 

+

237 """ 

+

238 f_ion, f_inpt = self._indir(".ion"), self._indir(".inpt") 

+

239 ion_data = _read_ion(f_ion, validator=self.validator) 

+

240 inpt_data = _read_inpt(f_inpt, validator=self.validator) 

+

241 merged_data = {**ion_data, **inpt_data} 

+

242 return dict_to_atoms(merged_data) 

+

243 

+

244 def _write_ion_and_inpt( 

+

245 self, 

+

246 atoms=None, 

+

247 label=None, 

+

248 direct=False, 

+

249 sort=True, 

+

250 ignore_constraints=False, 

+

251 wrap=False, 

+

252 # Below are the parameters from v1 

+

253 # scaled -> direct, ignore_constraints --> not add_constraints 

+

254 scaled=False, 

+

255 add_constraints=True, 

+

256 copy_psp=False, 

+

257 comment="", 

+

258 input_parameters={}, 

+

259 # Parameters that do not require type conversion 

+

260 **kwargs, 

+

261 ): 

+

262 """Write the ion and inpt files to a bundle. This method only 

+

263 supports writing 1 image. If input_parameters are empty, 

+

264 there will only be .ion writing the positions and .inpt 

+

265 writing a minimal cell information 

+

266 

+

267 Args: 

+

268 atoms (Atoms, optional): The Atoms object to write. If None, uses initialized atoms associated with SparcBundle. 

+

269 label (str, optional): Custom label for the written files. 

+

270 direct (bool, optional): If True, writes positions in direct coordinates. 

+

271 sort (bool, optional): If True, sorts atoms before writing. 

+

272 ignore_constraints (bool, optional): If True, ignores constraints on atoms. 

+

273 wrap (bool, optional): If True, wraps atoms into the unit cell. 

+

274 **kwargs: Additional keyword arguments for writing. 

+

275 

+

276 Raises: 

+

277 ValueError: If the bundle is not in write mode. 

+

278 """ 

+

279 if self.mode != "w": 

+

280 raise ValueError( 

+

281 "Cannot write input files while sparc bundle is opened in read or append mode!" 

+

282 ) 

+

283 os.makedirs(self.directory, exist_ok=True) 

+

284 atoms = self.atoms.copy() if atoms is None else atoms.copy() 

+

285 pseudopotentials = kwargs.pop("pseudopotentials", {}) 

+

286 

+

287 if sort: 

+

288 if self.sorting is not None: 

+

289 old_sort = self.sorting.get("sort", None) 

+

290 if old_sort: 

+

291 sort = old_sort 

+

292 

+

293 data_dict = atoms_to_dict( 

+

294 atoms, 

+

295 direct=direct, 

+

296 sort=sort, 

+

297 ignore_constraints=ignore_constraints, 

+

298 psp_dir=self.psp_dir, 

+

299 pseudopotentials=pseudopotentials, 

+

300 ) 

+

301 merged_inputs = input_parameters.copy() 

+

302 merged_inputs.update(kwargs) 

+

303 data_dict["inpt"]["params"].update(merged_inputs) 

+

304 

+

305 # If copy_psp, change the PSEUDO_POT field and copy the files 

+

306 if copy_psp: 

+

307 for block in data_dict["ion"]["atom_blocks"]: 

+

308 if "PSEUDO_POT" in block: 

+

309 origin_psp = block["PSEUDO_POT"] 

+

310 target_dir = self.directory 

+

311 target_fname = copy_psp_file(origin_psp, target_dir) 

+

312 block["PSEUDO_POT"] = target_fname 

+

313 

+

314 _write_ion(self._indir(".ion"), data_dict, validator=self.validator) 

+

315 _write_inpt(self._indir(".inpt"), data_dict, validator=self.validator) 

+

316 # Update the sorting information 

+

317 ion_dict = _read_ion(self._indir(".ion"))["ion"] 

+

318 self.sorting = ion_dict.get("sorting", None) 

+

319 return 

+

320 

+

321 def read_raw_results(self, include_all_files=False): 

+

322 """Parse all files using the given self.label. 

+

323 The results are merged dict from all file formats 

+

324 

+

325 Arguments: 

+

326 include_all_files (bool): Whether to include output files with different suffices 

+

327 If true: include all files (e.g. SPARC.out, SPARC.out_01, 

+

328 SPARC.out_02, etc). 

+

329 Returns: 

+

330 dict or List: Dict containing all raw results. Only some of them will appear in the calculator's results 

+

331 

+

332 Sets: 

+

333 self.raw_results (dict or List): the same as the return value 

+

334 

+

335 #TODO: @TT 2024-11-01 allow accepting indices 

+

336 #TODO: @TT last_image is a bad name, it should refer to the occurance of images 

+

337 the same goes with num_calculations 

+

338 """ 

+

339 # Find the max output index 

+

340 out_files = self.directory.glob(f"{self.label}.out*") 

+

341 valid_out_files = [ 

+

342 f 

+

343 for f in out_files 

+

344 if (re.fullmatch(r"^\.out(?:_\d+)?$", f.suffix) is not None) 

+

345 ] 

+

346 # Combine and sort the file lists 

+

347 last_out = sorted(valid_out_files, reverse=True) 

+

348 # No output file, only ion / inpt 

+

349 if len(last_out) == 0: 

+

350 self.last_image = -1 

+

351 else: 

+

352 suffix = last_out[0].suffix 

+

353 if suffix == ".out": 

+

354 self.last_image = 0 

+

355 else: 

+

356 self.last_image = int(suffix.split("_")[1]) 

+

357 self.num_calculations = self.last_image + 1 

+

358 

+

359 # Always make sure ion / inpt results are parsed regardless of actual calculations 

+

360 if include_all_files: 

+

361 if self.num_calculations > 0: 

+

362 results = [ 

+

363 self._read_results_from_index(index) 

+

364 for index in range(self.num_calculations) 

+

365 ] 

+

366 else: 

+

367 results = [self._read_results_from_index(self.last_image)] 

+

368 else: 

+

369 results = self._read_results_from_index(self.last_image) 

+

370 

+

371 self.raw_results = results 

+

372 

+

373 if include_all_files: 

+

374 init_raw_results = self.raw_results[0] 

+

375 else: 

+

376 init_raw_results = self.raw_results.copy() 

+

377 

+

378 self.init_atoms = dict_to_atoms(init_raw_results) 

+

379 self.init_inputs = { 

+

380 "ion": init_raw_results["ion"], 

+

381 "inpt": init_raw_results["inpt"], 

+

382 } 

+

383 self.psp_data = self.read_psp_info() 

+

384 return self.raw_results 

+

385 

+

386 def _read_results_from_index(self, index, d_format="{:02d}"): 

+

387 """Read the results from one calculation index, and return a 

+

388 single raw result dict, e.g. for index=0 --> .static 

+

389 and index=1 --> .static_01. 

+

390 

+

391 Arguments: 

+

392 index (int): Index of image to return the results 

+

393 d_format (str): Format for the index suffix 

+

394 

+

395 Returns: 

+

396 dict: Results for single image 

+

397 

+

398 #TODO: @TT should we call index --> occurance? 

+

399 

+

400 """ 

+

401 results_dict = {} 

+

402 

+

403 for ext in ("ion", "inpt"): 

+

404 f = self._indir(ext, occur=0) 

+

405 if f.is_file(): 

+

406 data_dict = globals()[f"_read_{ext}"](f) 

+

407 results_dict.update(data_dict) 

+

408 for ext in ("geopt", "static", "aimd", "out"): 

+

409 f = self._indir(ext, occur=index, d_format=d_format) 

+

410 if f.is_file(): 

+

411 data_dict = globals()[f"_read_{ext}"](f) 

+

412 results_dict.update(data_dict) 

+

413 

+

414 # Must have files: ion, inpt 

+

415 if ("ion" not in results_dict) or ("inpt" not in results_dict): 

+

416 raise RuntimeError( 

+

417 "Either ion or inpt files are missing from the bundle! " 

+

418 "Your SPARC calculation may be corrupted." 

+

419 ) 

+

420 

+

421 # Copy the sorting information, if not existing 

+

422 sorting = results_dict["ion"].get("sorting", None) 

+

423 if sorting is not None: 

+

424 if self.sorting is None: 

+

425 self.sorting = sorting 

+

426 else: 

+

427 # Compare stored sorting 

+

428 assert (tuple(self.sorting["sort"]) == tuple(sorting["sort"])) and ( 

+

429 tuple(self.sorting["resort"]) == tuple(sorting["resort"]) 

+

430 ), "Sorting information changed!" 

+

431 return results_dict 

+

432 

+

433 def convert_to_ase(self, index=-1, include_all_files=False, **kwargs): 

+

434 """Read the raw results from the bundle and create atoms with 

+

435 single point calculators 

+

436 

+

437 Arguments: 

+

438 index (int or str): Index or slice of the image(s) to convert. Uses the same format as ase.io.read 

+

439 include_all_files (bool): If true, also read results with indexed suffices 

+

440 

+

441 Returns: 

+

442 Atoms or List[Atoms]: ASE-atoms or images with single point results 

+

443 

+

444 """ 

+

445 # Convert to images! 

+

446 # TODO: @TT 2024-11-01 read_raw_results should implement a more 

+

447 # robust behavior handling index, as it is the entry point for all 

+

448 rs = self.read_raw_results(include_all_files=include_all_files) 

+

449 if isinstance(rs, dict): 

+

450 raw_results = [rs] 

+

451 else: 

+

452 raw_results = list(rs) 

+

453 res_images = [] 

+

454 for entry in raw_results: 

+

455 if "static" in entry: 

+

456 calc_results, images = self._extract_static_results(entry, index=":") 

+

457 elif "geopt" in entry: 

+

458 calc_results, images = self._extract_geopt_results(entry, index=":") 

+

459 elif "aimd" in entry: 

+

460 calc_results, images = self._extract_aimd_results(entry, index=":") 

+

461 else: 

+

462 calc_results, images = None, [self.init_atoms.copy()] 

+

463 

+

464 if images is not None: 

+

465 if calc_results is not None: 

+

466 images = self._make_singlepoint(calc_results, images, entry) 

+

467 res_images.extend(images) 

+

468 

+

469 if isinstance(index, int): 

+

470 return res_images[index] 

+

471 else: 

+

472 return res_images[string2index(index)] 

+

473 

+

474 def _make_singlepoint(self, calc_results, images, raw_results): 

+

475 """Convert a calculator dict and images of Atoms to list of 

+

476 SinglePointDFTCalculators 

+

477 

+

478 The calculator also takes parameters from ion, inpt that exist 

+

479 in self.raw_results. 

+

480 

+

481 Arguments: 

+

482 calc_results (List): Calculation results for all images 

+

483 images (List): Corresponding Atoms images 

+

484 raw_results (List): Full raw results dict to obtain additional information 

+

485 

+

486 Returns: 

+

487 List(Atoms): ASE-atoms images with single point calculators attached 

+

488 

+

489 """ 

+

490 converted_images = [] 

+

491 for res, _atoms in zip(calc_results, images): 

+

492 atoms = _atoms.copy() 

+

493 sp = SinglePointDFTCalculator(atoms) 

+

494 # Res can be empty at this point, leading to incomplete calc 

+

495 sp.results.update(res) 

+

496 sp.name = "sparc" 

+

497 sp.kpts = raw_results["inpt"].get("params", {}).get("KPOINT_GRID", None) 

+

498 # There may be a better way handling the parameters... 

+

499 sp.parameters = raw_results["inpt"].get("params", {}) 

+

500 sp.raw_parameters = { 

+

501 "ion": raw_results["ion"], 

+

502 "inpt": raw_results["inpt"], 

+

503 } 

+

504 atoms.calc = sp 

+

505 converted_images.append(atoms) 

+

506 return converted_images 

+

507 

+

508 def _extract_static_results(self, raw_results, index=":"): 

+

509 """Extract the static calculation results and atomic 

+

510 structure(s) Returns: calc_results: dict with at least energy 

+

511 value atoms: ASE atoms object The priority is to parse 

+

512 position from static file first, then fallback from ion + inpt 

+

513 

+

514 Note: make all energy / forces resorted! 

+

515 

+

516 Arguments: 

+

517 raw_results (dict): Raw results parsed from self.read_raw_results 

+

518 index (str or int): Index or slice of images 

+

519 

+

520 Returns: 

+

521 List[results], List[Atoms] 

+

522 

+

523 """ 

+

524 static_results = raw_results.get("static", []) 

+

525 calc_results = [] 

+

526 # Use extra lattice information to construct the positions 

+

527 cell = self.init_atoms.cell 

+

528 # import pdb; pdb.set_trace() 

+

529 static_results = _add_cell_info(static_results, cell) 

+

530 

+

531 if isinstance(index, int): 

+

532 _images = [static_results[index]] 

+

533 elif isinstance(index, str): 

+

534 _images = static_results[string2index(index)] 

+

535 

+

536 ase_images = [] 

+

537 for static_results in _images: 

+

538 partial_results = {} 

+

539 if "free energy" in static_results: 

+

540 partial_results["energy"] = static_results["free energy"] 

+

541 partial_results["free energy"] = static_results["free energy"] 

+

542 

+

543 if "forces" in static_results: 

+

544 partial_results["forces"] = static_results["forces"][self.resort] 

+

545 

+

546 if "atomic_magnetization" in static_results: 

+

547 partial_results["magmoms"] = static_results["atomic_magnetization"][ 

+

548 self.resort 

+

549 ] 

+

550 

+

551 if "net_magnetization" in static_results: 

+

552 partial_results["magmom"] = static_results["net_magnetization"] 

+

553 

+

554 if "stress" in static_results: 

+

555 partial_results["stress"] = static_results["stress"] 

+

556 

+

557 if "stress_equiv" in static_results: 

+

558 partial_results["stress_equiv"] = static_results["stress_equiv"] 

+

559 

+

560 atoms = self.init_atoms.copy() 

+

561 # import pdb; pdb.set_trace() 

+

562 if "atoms" in static_results: 

+

563 atoms_dict = static_results["atoms"] 

+

564 

+

565 # The socket mode case. Reset all cell and positions 

+

566 # Be careful, 

+

567 if "lattice" in static_results: 

+

568 lat = static_results["lattice"] 

+

569 atoms.set_cell(lat, scale_atoms=False) 

+

570 if "coord" not in atoms_dict: 

+

571 raise KeyError( 

+

572 "Coordination conversion failed in socket static output!" 

+

573 ) 

+

574 atoms.set_positions( 

+

575 atoms_dict["coord"][self.resort], apply_constraint=False 

+

576 ) 

+

577 else: # Do not change cell information (normal static file) 

+

578 if "coord_frac" in atoms_dict: 

+

579 atoms.set_scaled_positions( 

+

580 atoms_dict["coord_frac"][self.resort] 

+

581 ) 

+

582 elif "coord" in atoms_dict: 

+

583 atoms.set_positions( 

+

584 atoms_dict["coord"][self.resort], apply_constraint=False 

+

585 ) 

+

586 ase_images.append(atoms) 

+

587 calc_results.append(partial_results) 

+

588 return calc_results, ase_images 

+

589 

+

590 def _extract_geopt_results(self, raw_results, index=":"): 

+

591 """Extract the static calculation results and atomic 

+

592 structure(s) Returns: calc_results: dict with at least energy 

+

593 value atoms: ASE atoms object The priority is to parse 

+

594 position from static file first, then fallback from ion + inpt 

+

595 

+

596 Arguments: 

+

597 raw_results (dict): Raw results parsed from self.read_raw_results 

+

598 index (str or int): Index or slice of images 

+

599 

+

600 Returns: 

+

601 List[results], List[Atoms] 

+

602 

+

603 """ 

+

604 # print("RAW_RES: ", raw_results) 

+

605 geopt_results = raw_results.get("geopt", []) 

+

606 calc_results = [] 

+

607 if len(geopt_results) == 0: 

+

608 warn( 

+

609 "Geopt file is empty! This is not an error if the calculation is continued from restart. " 

+

610 ) 

+

611 return None, None 

+

612 

+

613 if isinstance(index, int): 

+

614 _images = [geopt_results[index]] 

+

615 elif isinstance(index, str): 

+

616 _images = geopt_results[string2index(index)] 

+

617 

+

618 ase_images = [] 

+

619 for result in _images: 

+

620 atoms = self.init_atoms.copy() 

+

621 partial_result = {} 

+

622 if "energy" in result: 

+

623 partial_result["energy"] = result["energy"] 

+

624 partial_result["free energy"] = result["energy"] 

+

625 

+

626 if "forces" in result: 

+

627 partial_result["forces"] = result["forces"][self.resort] 

+

628 

+

629 if "stress" in result: 

+

630 partial_result["stress"] = result["stress"] 

+

631 

+

632 # Modify the atoms copy 

+

633 if "positions" in result: 

+

634 atoms.set_positions( 

+

635 result["positions"][self.resort], apply_constraint=False 

+

636 ) 

+

637 if "ase_cell" in result: 

+

638 atoms.set_cell(result["ase_cell"]) 

+

639 else: 

+

640 # For geopt and RELAX=2 (cell relaxation), 

+

641 # the positions may not be written in .geopt file 

+

642 relax_flag = raw_results["inpt"]["params"].get("RELAX_FLAG", 0) 

+

643 if relax_flag != 2: 

+

644 raise ValueError( 

+

645 ".geopt file missing positions while RELAX!=2. " 

+

646 "Please check your setup ad output files." 

+

647 ) 

+

648 if "ase_cell" not in result: 

+

649 raise ValueError( 

+

650 "Cannot recover positions from .geopt file due to missing cell information. " 

+

651 "Please check your setup ad output files." 

+

652 ) 

+

653 atoms.set_cell(result["ase_cell"], scale_atoms=True) 

+

654 

+

655 # Unlike low-dimensional stress in static calculations, we need to convert 

+

656 # stress_1d stress_2d to stress_equiv using the non-period cell dimension(s) 

+

657 # This has to be done when the actual cell information is loaded 

+

658 if "stress_1d" in result: 

+

659 stress_1d = result["stress_1d"] 

+

660 assert ( 

+

661 np.count_nonzero(atoms.pbc) == 1 

+

662 ), "Dimension of stress and PBC mismatch!" 

+

663 for i, bc in enumerate(atoms.pbc): 

+

664 if not bc: 

+

665 stress_1d /= atoms.cell.cellpar()[i] 

+

666 stress_equiv = stress_1d 

+

667 partial_result["stress_equiv"] = stress_equiv 

+

668 

+

669 if "stress_2d" in result: 

+

670 stress_2d = result["stress_2d"] 

+

671 assert ( 

+

672 np.count_nonzero(atoms.pbc) == 2 

+

673 ), "Dimension of stress and PBC mismatch!" 

+

674 for i, bc in enumerate(atoms.pbc): 

+

675 if not bc: 

+

676 stress_2d /= atoms.cell.cellpar()[i] 

+

677 stress_equiv = stress_2d 

+

678 partial_result["stress_equiv"] = stress_equiv 

+

679 

+

680 calc_results.append(partial_result) 

+

681 ase_images.append(atoms) 

+

682 

+

683 return calc_results, ase_images 

+

684 

+

685 def _extract_aimd_results(self, raw_results, index=":"): 

+

686 """Extract energy / forces from aimd results 

+

687 

+

688 For calculator, we only need the last image 

+

689 

+

690 We probably want more information for the AIMD calculations, 

+

691 but I'll keep them for now 

+

692 

+

693 Arguments: 

+

694 raw_results (dict): Raw results parsed from self.read_raw_results 

+

695 index (str or int): Index or slice of images 

+

696 

+

697 Returns: 

+

698 List[results], List[Atoms] 

+

699 

+

700 """ 

+

701 aimd_results = raw_results.get("aimd", []) 

+

702 calc_results = [] 

+

703 if len(aimd_results) == 0: 

+

704 warn( 

+

705 "Aimd file is empty! " 

+

706 "This is not an error if the calculation " 

+

707 "is continued from restart. " 

+

708 ) 

+

709 return None, None 

+

710 

+

711 if isinstance(index, int): 

+

712 _images = [aimd_results[index]] 

+

713 elif isinstance(index, str): 

+

714 _images = aimd_results[string2index(index)] 

+

715 

+

716 ase_images = [] 

+

717 for result in _images: 

+

718 partial_result = {} 

+

719 atoms = self.init_atoms.copy() 

+

720 if "total energy per atom" in result: 

+

721 partial_result["energy"] = result["total energy per atom"] * len(atoms) 

+

722 if "free energy per atom" in result: 

+

723 partial_result["free energy"] = result["free energy per atom"] * len( 

+

724 atoms 

+

725 ) 

+

726 

+

727 if "forces" in result: 

+

728 # The forces are already re-sorted! 

+

729 partial_result["forces"] = result["forces"][self.resort] 

+

730 

+

731 # Modify the atoms in-place 

+

732 if "positions" not in result: 

+

733 raise ValueError("Cannot have aimd without positions information!") 

+

734 

+

735 atoms.set_positions( 

+

736 result["positions"][self.resort], apply_constraint=False 

+

737 ) 

+

738 

+

739 if "velocities" in result: 

+

740 atoms.set_velocities(result["velocities"][self.resort]) 

+

741 

+

742 ase_images.append(atoms) 

+

743 calc_results.append(partial_result) 

+

744 return calc_results, ase_images 

+

745 

+

746 @property 

+

747 def sort(self): 

+

748 """Wrap the self.sorting dict. If sorting information does not exist, 

+

749 use the default slicing 

+

750 """ 

+

751 

+

752 if self.sorting is None: 

+

753 return slice(None, None, None) 

+

754 sort = self.sorting.get("sort", []) 

+

755 if len(sort) > 0: 

+

756 return sort 

+

757 else: 

+

758 return slice(None, None, None) 

+

759 

+

760 @property 

+

761 def resort(self): 

+

762 """Wrap the self.sorting dict. If sorting information does not exist, 

+

763 use the default slicing 

+

764 """ 

+

765 

+

766 if self.sorting is None: 

+

767 return slice(None, None, None) 

+

768 resort = self.sorting.get("resort", []) 

+

769 if len(resort) > 0: 

+

770 return resort 

+

771 else: 

+

772 return slice(None, None, None) 

+

773 

+

774 def read_psp_info(self): 

+

775 """Parse the psp information from inpt file options 

+

776 The psp file locations are relative to the bundle. 

+

777 

+

778 If the files cannot be found, the dict will only contain 

+

779 the path 

+

780 """ 

+

781 inpt = self.init_inputs.get("ion", {}) 

+

782 blocks = inpt.get("atom_blocks", []) 

+

783 psp_info = {} 

+

784 for block in blocks: 

+

785 element = block["ATOM_TYPE"] 

+

786 pseudo_path = block["PSEUDO_POT"] 

+

787 real_path = (self.directory / pseudo_path).resolve() 

+

788 psp_info[element] = {"rel_path": pseudo_path} 

+

789 if not real_path.is_file(): 

+

790 warn(f"Cannot locate pseudopotential {pseudo_path}. ") 

+

791 else: 

+

792 header = open(real_path, "r").read() 

+

793 psp_data = parse_psp8_header(header) 

+

794 psp_info[element].update(psp_data) 

+

795 return psp_info 

+

796 

+

797 

+

798def read_sparc(filename, index=-1, include_all_files=True, **kwargs): 

+

799 """Parse a SPARC bundle, return an Atoms object or list of Atoms (image) 

+

800 with embedded calculator result. 

+

801 

+

802 Arguments: 

+

803 filename (str or PosixPath): Filename to the sparc bundle 

+

804 index (int or str): Index or slice of the images, following the ase.io.read convention 

+

805 include_all_files (bool): If true, parse all output files with indexed suffices 

+

806 **kwargs: Additional parameters 

+

807 

+

808 Returns: 

+

809 Atoms or List[Atoms] 

+

810 

+

811 """ 

+

812 # We rely on minimal api version choose, i.e. default or set from env 

+

813 api = locate_api() 

+

814 sb = SparcBundle(directory=filename, validator=api) 

+

815 atoms_or_images = sb.convert_to_ase( 

+

816 index=index, include_all_files=include_all_files, **kwargs 

+

817 ) 

+

818 return atoms_or_images 

+

819 

+

820 

+

821def write_sparc(filename, images, **kwargs): 

+

822 """Write sparc file. Images can only be Atoms object 

+

823 or list of length 1 

+

824 

+

825 Arguments: 

+

826 filename (str or PosixPath): Filename to the output sparc directory 

+

827 images (Atoms or List(Atoms)): Atoms object to be written. Only supports writting 1 Atoms 

+

828 **kwargs: Additional parameters 

+

829 """ 

+

830 if isinstance(images, Atoms): 

+

831 atoms = images 

+

832 elif isinstance(images, list): 

+

833 if len(images) > 1: 

+

834 raise ValueError("SPARC format only supports writing one atoms object!") 

+

835 atoms = images[0] 

+

836 api = locate_api() 

+

837 sb = SparcBundle(directory=filename, mode="w", validator=api) 

+

838 sb._write_ion_and_inpt(atoms, **kwargs) 

+

839 return 

+

840 

+

841 

+

842@deprecated( 

+

843 "Reading individual .ion file is not recommended. Please use read_sparc instead." 

+

844) 

+

845def read_sparc_ion(filename, **kwargs): 

+

846 """Parse an .ion file inside the SPARC bundle using a wrapper around SparcBundle 

+

847 The reader works only when other files (.inpt) exist. 

+

848 

+

849 The returned Atoms object of read_ion method only contains the initial positions 

+

850 

+

851 Arguments: 

+

852 filename (str or PosixPath): Filename to the .ion file 

+

853 index (int or str): Index or slice of the images, following the ase.io.read convention 

+

854 **kwargs: Additional parameters 

+

855 

+

856 Returns: 

+

857 Atoms or List[Atoms] 

+

858 """ 

+

859 api = locate_api() 

+

860 parent_dir = Path(filename).parent 

+

861 sb = SparcBundle(directory=parent_dir, validator=api) 

+

862 atoms = sb._read_ion_and_inpt() 

+

863 return atoms 

+

864 

+

865 

+

866# Backward compatibity 

+

867read_ion = read_sparc_ion 

+

868 

+

869 

+

870@deprecated( 

+

871 "Writing individual .ion file is not recommended. Please use write_sparc instead." 

+

872) 

+

873def write_sparc_ion(filename, atoms, **kwargs): 

+

874 """Write .ion file using the SparcBundle wrapper. This method will also create the .inpt file 

+

875 

+

876 This is only for backward compatibility 

+

877 

+

878 Arguments: 

+

879 filename (str or PosixPath): Filename to the .ion file 

+

880 atoms (Atoms): atoms to be written 

+

881 **kwargs: Additional parameters 

+

882 """ 

+

883 label = Path(filename).with_suffix("").name 

+

884 parent_dir = Path(filename).parent 

+

885 api = locate_api() 

+

886 sb = SparcBundle(directory=parent_dir, label=label, mode="w", validator=api) 

+

887 sb._write_ion_and_inpt(atoms, **kwargs) 

+

888 return atoms 

+

889 

+

890 

+

891# Backward compatibility 

+

892write_ion = write_sparc_ion 

+

893 

+

894 

+

895@deprecated( 

+

896 "Reading individual .static file is not recommended. Please use read_sparc instead." 

+

897) 

+

898def read_sparc_static(filename, index=-1, **kwargs): 

+

899 """Parse a .static file bundle using a wrapper around SparcBundle 

+

900 The reader works only when other files (.ion, .inpt) exist. 

+

901 

+

902 Arguments: 

+

903 filename (str or PosixPath): Filename to the .static file 

+

904 index (int or str): Index or slice of the images, following the ase.io.read convention 

+

905 **kwargs: Additional parameters 

+

906 

+

907 Returns: 

+

908 Atoms or List[Atoms] 

+

909 """ 

+

910 parent_dir = Path(filename).parent 

+

911 api = locate_api() 

+

912 sb = SparcBundle(directory=parent_dir, validator=api) 

+

913 # In most of the cases the user wants to inspect all images 

+

914 kwargs = kwargs.copy() 

+

915 if "include_all_files" not in kwargs: 

+

916 kwargs.update(include_all_files=True) 

+

917 atoms_or_images = sb.convert_to_ase(index=index, **kwargs) 

+

918 return atoms_or_images 

+

919 

+

920 

+

921# Backward compatibility 

+

922read_static = read_sparc_static 

+

923 

+

924 

+

925@deprecated( 

+

926 "Reading individual .geopt file is not recommended. Please use read_sparc instead." 

+

927) 

+

928def read_sparc_geopt(filename, index=-1, **kwargs): 

+

929 """Parse a .geopt file bundle using a wrapper around SparcBundle 

+

930 The reader works only when other files (.ion, .inpt) exist. 

+

931 

+

932 Arguments: 

+

933 filename (str or PosixPath): Filename to the .geopt file 

+

934 index (int or str): Index or slice of the images, following the ase.io.read convention 

+

935 **kwargs: Additional parameters 

+

936 

+

937 Returns: 

+

938 Atoms or List[Atoms] 

+

939 """ 

+

940 parent_dir = Path(filename).parent 

+

941 api = locate_api() 

+

942 sb = SparcBundle(directory=parent_dir, validator=api) 

+

943 kwargs = kwargs.copy() 

+

944 if "include_all_files" not in kwargs: 

+

945 kwargs.update(include_all_files=True) 

+

946 atoms_or_images = sb.convert_to_ase(index=index, **kwargs) 

+

947 return atoms_or_images 

+

948 

+

949 

+

950# Backward compatibility 

+

951read_geopt = read_sparc_geopt 

+

952 

+

953 

+

954@deprecated( 

+

955 "Reading individual .aimd file is not recommended. Please use read_sparc instead." 

+

956) 

+

957def read_sparc_aimd(filename, index=-1, **kwargs): 

+

958 """Parse a .static file bundle using a wrapper around SparcBundle 

+

959 The reader works only when other files (.ion, .inpt) exist. 

+

960 

+

961 Arguments: 

+

962 filename (str or PosixPath): Filename to the .aimd file 

+

963 index (int or str): Index or slice of the images, following the ase.io.read convention 

+

964 **kwargs: Additional parameters 

+

965 

+

966 Returns: 

+

967 Atoms or List[Atoms] 

+

968 """ 

+

969 parent_dir = Path(filename).parent 

+

970 api = locate_api() 

+

971 sb = SparcBundle(directory=parent_dir, validator=api) 

+

972 kwargs = kwargs.copy() 

+

973 if "include_all_files" not in kwargs: 

+

974 kwargs.update(include_all_files=True) 

+

975 atoms_or_images = sb.convert_to_ase(index=index, **kwargs) 

+

976 return atoms_or_images 

+

977 

+

978 

+

979# Backward compatibility 

+

980read_aimd = read_sparc_aimd 

+

981 

+

982 

+

983def __register_new_filetype(): 

+

984 """Register the filetype() function that allows recognizing .sparc as directory 

+

985 This method should only be called for ase==3.22 compatibility and for ase-gui 

+

986 In future versions of ase gui where format is supported, this method should be removed 

+

987 """ 

+

988 import sys 

+

989 

+

990 from ase.io import formats as hacked_formats 

+

991 from ase.io.formats import filetype as _old_filetype 

+

992 from ase.io.formats import ioformats 

+

993 

+

994 def _new_filetype(filename, read=True, guess=True): 

+

995 """A hacked solution for the auto format recovery""" 

+

996 path = Path(filename) 

+

997 ext = path.name 

+

998 if ".sparc" in ext: 

+

999 return "sparc" 

+

1000 else: 

+

1001 if path.is_dir(): 

+

1002 if (len(list(path.glob("*.ion"))) > 0) and ( 

+

1003 len(list(path.glob("*.inpt"))) > 0 

+

1004 ): 

+

1005 return "sparc" 

+

1006 return _old_filetype(filename, read, guess) 

+

1007 

+

1008 hacked_formats.filetype = _new_filetype 

+

1009 sys.modules["ase.io.formats"] = hacked_formats 

+

1010 return 

+

1011 

+

1012 

+

1013@deprecated( 

+

1014 "register_ase_io_sparc will be deprecated for future releases. Please upgrade ase>=3.23." 

+

1015) 

+

1016def register_ase_io_sparc(name="sparc"): 

+

1017 """ 

+

1018 **Legacy register of io-formats for ase==3.22** 

+

1019 **For ase>=3.23, use the package entrypoint registration** 

+

1020 Monkey patching the ase.io and ase.io.formats 

+

1021 So that the following formats can be used 

+

1022 after `import sparc` 

+

1023 

+

1024 ``` 

+

1025 from ase.io import sparc 

+

1026 ase.io.read("test.sparc") 

+

1027 atoms.write("test.sparc") 

+

1028 ``` 

+

1029 

+

1030 The register method only aims to work for ase 3.22 

+

1031 the develope version of ase provides a much more powerful 

+

1032 register mechanism, we can wait. 

+

1033 """ 

+

1034 import sys 

+

1035 from warnings import warn 

+

1036 

+

1037 import pkg_resources 

+

1038 from ase.io.formats import define_io_format as F 

+

1039 from ase.io.formats import ioformats 

+

1040 

+

1041 name = name.lower() 

+

1042 if name in ioformats.keys(): 

+

1043 return 

+

1044 desc = "SPARC .sparc bundle" 

+

1045 

+

1046 # Step 1: patch the ase.io.sparc module 

+

1047 try: 

+

1048 entry_points = next( 

+

1049 ep for ep in pkg_resources.iter_entry_points("ase.io") if ep.name == "sparc" 

+

1050 ) 

+

1051 _monkey_mod = entry_points.load() 

+

1052 except Exception as e: 

+

1053 warn( 

+

1054 ( 

+

1055 "Failed to load entrypoint `ase.io.sparc`, " 

+

1056 "you may need to reinstall sparc python api.\n" 

+

1057 "You may still use `sparc.read_sparc` and " 

+

1058 "`sparc.write_sparc` methods, " 

+

1059 "but not `ase.io.read`\n", 

+

1060 f"The error is {e}", 

+

1061 ) 

+

1062 ) 

+

1063 return 

+

1064 

+

1065 sys.modules[f"ase.io.{name}"] = _monkey_mod 

+

1066 __register_new_filetype() 

+

1067 

+

1068 # Step 2: define a new format 

+

1069 F( 

+

1070 name, 

+

1071 desc=desc, 

+

1072 code="+S", # read_sparc has multi-image support 

+

1073 ext="sparc", 

+

1074 ) 

+

1075 

+

1076 if name not in ioformats.keys(): 

+

1077 warn( 

+

1078 ( 

+

1079 "Registering .sparc format with ase.io failed. " 

+

1080 "You may still use `sparc.read_sparc` and " 

+

1081 "`sparc.write_sparc` methods. \n" 

+

1082 "Please contact the developer to report this issue." 

+

1083 ) 

+

1084 ) 

+

1085 return 

+

1086 

+

1087 import tempfile 

+

1088 

+

1089 from ase.io import read 

+

1090 

+

1091 with tempfile.TemporaryDirectory(suffix=".sparc") as tmpdir: 

+

1092 try: 

+

1093 read(tmpdir.name) 

+

1094 except Exception as e: 

+

1095 emsg = str(e).lower() 

+

1096 if "bundletrajectory" in emsg: 

+

1097 warn( 

+

1098 "Atomatic format inference for sparc is not correctly registered. " 

+

1099 "You may need to use format=sparc in ase.io.read and ase.io.write. " 

+

1100 ) 

+

1101 # Add additional formats including .ion (r/w), .static, .geopt, .aimd 

+

1102 F( 

+

1103 "ion", 

+

1104 desc="SPARC .ion file", 

+

1105 module="sparc", 

+

1106 code="1S", 

+

1107 ext="ion", 

+

1108 ) 

+

1109 F( 

+

1110 "static", 

+

1111 desc="SPARC single point results", 

+

1112 module="sparc", 

+

1113 code="+S", 

+

1114 ext="static", 

+

1115 ) 

+

1116 F( 

+

1117 "geopt", 

+

1118 desc="SPARC geometric optimization results", 

+

1119 module="sparc", 

+

1120 code="+S", 

+

1121 ext="geopt", 

+

1122 ) 

+

1123 F("aimd", desc="SPARC AIMD results", module="sparc", code="+S", ext="aimd") 

+

1124 

+

1125 # TODO: remove print options as it may be redundant 

+

1126 print("Successfully registered sparc formats with ase.io!") 

+

1127 

+

1128 

+

1129# ase>=3.23 uses new ExternalIOFormat as registered entrypoints 

+

1130# Please do not use from ase.io.formats import ExternalIOFormat! 

+

1131# This causes circular import 

+

1132try: 

+

1133 from ase.utils.plugins import ExternalIOFormat as EIF 

+

1134except ImportError: 

+

1135 # Backward Compatibility 

+

1136 from typing import List, NamedTuple, Optional, Union 

+

1137 

+

1138 # Copy definition from 3.23 

+

1139 # Name is defined in the entry point 

+

1140 class ExternalIOFormat(NamedTuple): 

+

1141 desc: str 

+

1142 code: str 

+

1143 module: Optional[str] = None 

+

1144 glob: Optional[Union[str, List[str]]] = None 

+

1145 ext: Optional[Union[str, List[str]]] = None 

+

1146 magic: Optional[Union[bytes, List[bytes]]] = None 

+

1147 magic_regex: Optional[bytes] = None 

+

1148 

+

1149 EIF = ExternalIOFormat 

+

1150 

+

1151format_sparc = EIF( 

+

1152 desc="SPARC .sparc bundle", 

+

1153 module="sparc.io", 

+

1154 code="+S", # read_sparc has multi-image support 

+

1155 ext="sparc", 

+

1156) 

+

1157format_ion = EIF( 

+

1158 desc="SPARC .ion file", 

+

1159 module="sparc.io", 

+

1160 code="1S", 

+

1161 ext="ion", 

+

1162) 

+

1163format_static = EIF( 

+

1164 desc="SPARC single point results", 

+

1165 module="sparc.io", 

+

1166 code="+S", 

+

1167 glob=["*.static", "*.static_*"], 

+

1168) 

+

1169format_geopt = EIF( 

+

1170 desc="SPARC geometric optimization results", 

+

1171 module="sparc.io", 

+

1172 code="+S", 

+

1173 glob=["*.geopt", "*.geopt_*"], 

+

1174) 

+

1175format_aimd = EIF( 

+

1176 desc="SPARC AIMD results", 

+

1177 module="sparc", 

+

1178 code="+S", 

+

1179 glob=["*.aimd*", "*.geopt_*"], 

+

1180) 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_quicktest_py.html b/_static/htmlcov/z_e32f35a0016f670d_quicktest_py.html new file mode 100644 index 00000000..b4c3a50f --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_quicktest_py.html @@ -0,0 +1,513 @@ + + + + + Coverage for sparc/quicktest.py: 80% + + + + + +
+
+

+ Coverage for sparc/quicktest.py: + 80% +

+ +

+ 216 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1"""A simple test module for sparc python api 

+

2Usage: 

+

3python -m sparc.quicktest 

+

4""" 

+

5from pathlib import Path 

+

6 

+

7from ase.data import chemical_symbols 

+

8 

+

9from .utils import cprint 

+

10 

+

11 

+

12class BaseTest(object): 

+

13 """Base class for all tests providing functionalities 

+

14 

+

15 Each child class will implement its own `run_test` method to 

+

16 update the `result`, `error_handling` and `info` fields. 

+

17 

+

18 If you wish to include a simple error handling message for each 

+

19 child class, add a line starting `Error handling` follows by the 

+

20 helper message at the end of the docstring 

+

21 """ 

+

22 

+

23 def __init__(self): 

+

24 self.result = None 

+

25 self.error_msg = "" 

+

26 self.error_handling = "" 

+

27 self.info = {} 

+

28 

+

29 @property 

+

30 @classmethod 

+

31 def dislay_name(cls): 

+

32 return cls.__name__ 

+

33 

+

34 def display_docstring(self): 

+

35 """Convert the class's docstring to error handling""" 

+

36 doc = self.__class__.__doc__ 

+

37 error_handling_lines = [] 

+

38 begin_record = False 

+

39 indent = 0 # indentation for the "Error handling" line 

+

40 if doc: 

+

41 for line in doc.splitlines(): 

+

42 if line.lstrip().startswith("Error handling"): 

+

43 if begin_record is True: 

+

44 msg = ( 

+

45 "There are multiple Error handlings " 

+

46 "in the docstring of " 

+

47 f"{self.__class__.__name__}." 

+

48 ) 

+

49 raise ValueError(msg) 

+

50 begin_record = True 

+

51 indent = len(line) - len(line.lstrip()) 

+

52 elif begin_record is True: 

+

53 current_indent = len(line) - len(line.lstrip()) 

+

54 line = line.strip() 

+

55 if len(line) > 0: # Only add non-empty lines 

+

56 # Compensate for the extra indentation 

+

57 # if current_indent > indent 

+

58 spaces = max(0, current_indent - indent) * " " 

+

59 error_handling_lines.append(spaces + line) 

+

60 else: 

+

61 pass 

+

62 else: 

+

63 pass 

+

64 error_handling_string = "\n".join(error_handling_lines) 

+

65 return error_handling_string 

+

66 

+

67 def make_test(self): 

+

68 """Each class should implement ways to update `result` and `info`""" 

+

69 raise NotImplementedError 

+

70 

+

71 def run_test(self): 

+

72 """Run test and update result etc. 

+

73 If result is False, update the error handling message 

+

74 """ 

+

75 try: 

+

76 self.make_test() 

+

77 except Exception as e: 

+

78 self.result = False 

+

79 self.error_msg = str(e) 

+

80 

+

81 if self.result is None: 

+

82 raise ValueError( 

+

83 "Test result is not updated for " f"{self.__class__.__name__} !" 

+

84 ) 

+

85 if self.result is False: 

+

86 self.error_handling = self.display_docstring() 

+

87 return 

+

88 

+

89 

+

90class ImportTest(BaseTest): 

+

91 """Check if external io format `sparc` can be registered in ASE 

+

92 

+

93 Error handling: 

+

94 - Make sure SPARC-X-API is installed via conda / pip / setuptools 

+

95 - If you wish to work on SPARC-X-API source code, use `pip install -e` 

+

96 instead of setting up $PYTHON_PATH 

+

97 """ 

+

98 

+

99 display_name = "Import" 

+

100 

+

101 def make_test(self): 

+

102 cprint("Testing import...", color="COMMENT") 

+

103 from ase.io.formats import ioformats 

+

104 

+

105 self.result = "sparc" in ioformats.keys() 

+

106 if self.result is False: 

+

107 self.error_msg = ( 

+

108 "Cannot find `sparc` as a valid " "external ioformat for ASE." 

+

109 ) 

+

110 return 

+

111 

+

112 

+

113class PspTest(BaseTest): 

+

114 """Check at least one directory of Pseudopotential files exist 

+

115 info[`psp_dir`] contains the first psp dir found on system 

+

116 # TODO: check if all psp files can be located 

+

117 #TODO: update to the ASE 3.23 config method 

+

118 

+

119 Error handling: 

+

120 - Default version of psp files can be downloaded by 

+

121 `python -m sparc.download_data` 

+

122 - Alternatively, specify the variable $SPARC_PSP_PATH 

+

123 to the custom pseudopotential files 

+

124 """ 

+

125 

+

126 display_name = "Pseudopotential" 

+

127 

+

128 def make_test(self): 

+

129 cprint("Testing pseudo potential path...", color="COMMENT") 

+

130 import tempfile 

+

131 

+

132 from .io import SparcBundle 

+

133 from .sparc_parsers.pseudopotential import find_pseudo_path 

+

134 

+

135 with tempfile.TemporaryDirectory() as tmpdir: 

+

136 sb = SparcBundle(directory=tmpdir) 

+

137 psp_dir = sb.psp_dir 

+

138 

+

139 if psp_dir is not None: 

+

140 psp_dir = Path(psp_dir) 

+

141 self.info["psp_dir"] = f"{psp_dir.resolve()}" 

+

142 if not psp_dir.is_dir(): 

+

143 self.result = False 

+

144 self.error_msg = ( 

+

145 "Pseudopotential files path " f"{psp_dir.resolve()} does not exist." 

+

146 ) 

+

147 else: 

+

148 missing_elements = [] 

+

149 # Default psp file are 1-57 + 72-83 

+

150 spms_elements = chemical_symbols[1:58] + chemical_symbols[72:84] 

+

151 for element in spms_elements: 

+

152 try: 

+

153 find_pseudo_path(element, psp_dir) 

+

154 except Exception: 

+

155 missing_elements.append(element) 

+

156 if len(missing_elements) == 0: 

+

157 self.result = True 

+

158 else: 

+

159 self.result = False 

+

160 self.error_msg = ( 

+

161 "Pseudopotential files for " 

+

162 f"{len(missing_elements)} elements are " 

+

163 "missing or incompatible: \n" 

+

164 f"{missing_elements}" 

+

165 ) 

+

166 else: 

+

167 self.info["psp_dir"] = "None" 

+

168 self.result = False 

+

169 self.error_msg = ( 

+

170 "Pseudopotential file path not defined and/or " 

+

171 "default psp files are incomplete." 

+

172 ) 

+

173 return 

+

174 

+

175 

+

176class ApiTest(BaseTest): 

+

177 """Check if the API can be loaded, and store the Schema version. 

+

178 

+

179 # TODO: consider change to schema instead of api 

+

180 # TODO: allow config to change json file path 

+

181 Error handling: 

+

182 - Check if default JSON schema exists in 

+

183 `<sparc-x-api-root>/sparc_json_api/parameters.json` 

+

184 - Use $SPARC_DOC_PATH to specify the raw LaTeX files 

+

185 """ 

+

186 

+

187 display_name = "JSON API" 

+

188 

+

189 def make_test(self): 

+

190 from .utils import locate_api 

+

191 

+

192 try: 

+

193 api = locate_api() 

+

194 version = api.sparc_version 

+

195 self.result = True 

+

196 self.info["api_version"] = version 

+

197 self.info["api_source"] = api.source 

+

198 except Exception as e: 

+

199 self.result = False 

+

200 self.info["api_version"] = "NaN" 

+

201 self.info["api_source"] = "not found" 

+

202 self.error_msg = ( 

+

203 "Error when locating a JSON schema or " 

+

204 f"LaTeX source files for SPARC. Error is {e}" 

+

205 ) 

+

206 return 

+

207 

+

208 

+

209class CommandTest(BaseTest): 

+

210 """Check validity of command to run SPARC calculation. This test 

+

211 also checks sparc version and socket compatibility 

+

212 

+

213 # TODO: check ase 3.23 config with separate binary 

+

214 Error handling: 

+

215 - The command prefix to run SPARC calculation should look like 

+

216 `<mpi instructions> <sparc binary>` 

+

217 - Use $ASE_SPARC_COMMAND to set the command string 

+

218 - Check HPC resources and compatibility (e.g. `srun` on a login node) 

+

219 """ 

+

220 

+

221 display_name = "SPARC Command" 

+

222 

+

223 def make_test(self): 

+

224 import tempfile 

+

225 

+

226 from sparc.calculator import SPARC 

+

227 

+

228 self.info["command"] = "" 

+

229 self.info["sparc_version"] = "" 

+

230 

+

231 with tempfile.TemporaryDirectory() as tmpdir: 

+

232 calc = SPARC(directory=tmpdir) 

+

233 # Step 1: validity of sparc command 

+

234 try: 

+

235 test_cmd = calc._make_command() 

+

236 self.result = True 

+

237 self.info["command"] = test_cmd 

+

238 except Exception as e: 

+

239 self.result = False 

+

240 self.info["command"] = "not found" 

+

241 self.error_msg = f"Error setting SPARC command:\n{e}" 

+

242 

+

243 # Step 2: check SPARC binary version 

+

244 try: 

+

245 sparc_version = calc.detect_sparc_version() 

+

246 # Version may be None if failed to retrieve 

+

247 if sparc_version: 

+

248 self.result = self.result & True 

+

249 self.info["sparc_version"] = sparc_version 

+

250 else: 

+

251 self.result = False 

+

252 self.info["sparc_version"] = "NaN" 

+

253 self.error_msg += "\n" if len(self.error_msg) > 0 else "" 

+

254 self.error_msg += "Error detecting SPARC version" 

+

255 except Exception as e: 

+

256 self.result = False 

+

257 self.info["sparc_version"] = "NaN" 

+

258 self.error_msg += "\n" if len(self.error_msg) > 0 else "" 

+

259 self.error_msg += f"\nError detecting SPARC version:\n{e}" 

+

260 return 

+

261 

+

262 

+

263class FileIOCalcTest(BaseTest): 

+

264 """Run a simple calculation in File IO mode. 

+

265 

+

266 # TODO: check ase 3.23 config 

+

267 Error handling: 

+

268 - Check if settings for pseudopotential files are correct 

+

269 - Check if SPARC binary exists and functional 

+

270 - Check if specific HPC requirements are met: 

+

271 (module files, libraries, parallel settings, resources) 

+

272 """ 

+

273 

+

274 display_name = "Calculation (File I/O)" 

+

275 

+

276 def make_test(self): 

+

277 import tempfile 

+

278 

+

279 from ase.build import bulk 

+

280 

+

281 from sparc.calculator import SPARC 

+

282 

+

283 # 1x Al atoms with super bad calculation condition 

+

284 al = bulk("Al", cubic=False) 

+

285 

+

286 with tempfile.TemporaryDirectory() as tmpdir: 

+

287 calc = SPARC(h=0.3, kpts=(1, 1, 1), tol_scf=1e-3, directory=tmpdir) 

+

288 try: 

+

289 al.calc = calc 

+

290 al.get_potential_energy() 

+

291 self.result = True 

+

292 except Exception as e: 

+

293 self.result = False 

+

294 self.error_msg = "Simple calculation in file I/O mode failed: \n" f"{e}" 

+

295 return 

+

296 

+

297 

+

298class SocketCalcTest(BaseTest): 

+

299 """Run a simple calculation in Socket mode (UNIX socket). 

+

300 

+

301 # TODO: check ase 3.23 config 

+

302 Error handling: 

+

303 - The same as error handling in file I/O calculation test 

+

304 - Check if SPARC binary supports socket 

+

305 """ 

+

306 

+

307 display_name = "Calculation (UNIX socket)" 

+

308 

+

309 def make_test(self): 

+

310 import tempfile 

+

311 

+

312 from ase.build import bulk 

+

313 

+

314 from sparc.calculator import SPARC 

+

315 

+

316 # Check SPARC binary socket compatibility 

+

317 with tempfile.TemporaryDirectory() as tmpdir: 

+

318 calc = SPARC(directory=tmpdir) 

+

319 try: 

+

320 sparc_compat = calc.detect_socket_compatibility() 

+

321 self.info["sparc_socket_compatibility"] = sparc_compat 

+

322 except Exception: 

+

323 self.info["sparc_socket_compatibility"] = False 

+

324 

+

325 # 1x Al atoms with super bad calculation condition 

+

326 al = bulk("Al", cubic=False) 

+

327 

+

328 with tempfile.TemporaryDirectory() as tmpdir: 

+

329 calc = SPARC( 

+

330 h=0.3, kpts=(1, 1, 1), tol_scf=1e-3, use_socket=True, directory=tmpdir 

+

331 ) 

+

332 try: 

+

333 al.calc = calc 

+

334 al.get_potential_energy() 

+

335 self.result = True 

+

336 except Exception as e: 

+

337 self.result = False 

+

338 self.error_msg = ( 

+

339 "Simple calculation in socket mode (UNIX socket) failed: \n" f"{e}" 

+

340 ) 

+

341 return 

+

342 

+

343 

+

344def main(): 

+

345 cprint( 

+

346 ("Performing a quick test on your " "SPARC and python API setup"), 

+

347 color=None, 

+

348 ) 

+

349 

+

350 test_classes = [ 

+

351 ImportTest(), 

+

352 PspTest(), 

+

353 ApiTest(), 

+

354 CommandTest(), 

+

355 FileIOCalcTest(), 

+

356 SocketCalcTest(), 

+

357 ] 

+

358 

+

359 system_info = {} 

+

360 for test in test_classes: 

+

361 test.run_test() 

+

362 system_info.update(test.info) 

+

363 

+

364 # Header section 

+

365 print("-" * 80) 

+

366 cprint( 

+

367 "Summary", 

+

368 bold=True, 

+

369 color="HEADER", 

+

370 ) 

+

371 print("-" * 80) 

+

372 cprint("Configuration", bold=True, color="HEADER") 

+

373 for key, val in system_info.items(): 

+

374 print(f"{key}: {val}") 

+

375 

+

376 print("-" * 80) 

+

377 # Body section 

+

378 cprint("Tests", bold=True, color="HEADER") 

+

379 

+

380 print_wiki = False 

+

381 for test in test_classes: 

+

382 cprint(f"{test.display_name}:", bold=True, end="") 

+

383 if test.result is True: 

+

384 cprint(" PASS", color="OKGREEN") 

+

385 else: 

+

386 cprint(" FAIL", color="FAIL") 

+

387 print_wiki = True 

+

388 

+

389 print("-" * 80) 

+

390 # Error information section 

+

391 has_print_error_header = False 

+

392 for test in test_classes: 

+

393 if (test.result is False) and (test.error_handling): 

+

394 if has_print_error_header is False: 

+

395 cprint( 

+

396 ("Some tests failed! " "Please check the following information.\n"), 

+

397 color="FAIL", 

+

398 ) 

+

399 has_print_error_header = True 

+

400 cprint(f"{test.display_name}:", bold=True) 

+

401 cprint(f"{test.error_msg}", color="FAIL") 

+

402 print(test.error_handling) 

+

403 print("\n") 

+

404 

+

405 if print_wiki: 

+

406 print("-" * 80) 

+

407 cprint( 

+

408 "Please check additional information from:\n" 

+

409 "1. SPARC's documentation: https://github.com/SPARC-X/SPARC/blob/master/doc/Manual.pdf \n" 

+

410 "2. Python API documentation: https://github.com/alchem0x2A/SPARC-X-API/blob/master/README.md\n", 

+

411 color=None, 

+

412 ) 

+

413 

+

414 

+

415if __name__ == "__main__": 

+

416 main() 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_socketio_py.html b/_static/htmlcov/z_e32f35a0016f670d_socketio_py.html new file mode 100644 index 00000000..2b87d0c3 --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_socketio_py.html @@ -0,0 +1,451 @@ + + + + + Coverage for sparc/socketio.py: 23% + + + + + +
+
+

+ Coverage for sparc/socketio.py: + 23% +

+ +

+ 199 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1"""A i-PI compatible socket protocol implemented in SPARC 

+

2""" 

+

3import hashlib 

+

4import io 

+

5import os 

+

6import pickle 

+

7import random 

+

8import socket 

+

9import string 

+

10 

+

11import numpy as np 

+

12from ase.calculators.socketio import ( 

+

13 IPIProtocol, 

+

14 SocketClient, 

+

15 SocketClosed, 

+

16 SocketServer, 

+

17 actualunixsocketname, 

+

18) 

+

19 

+

20 

+

21def generate_random_socket_name(prefix="sparc_", length=6): 

+

22 """Generate a random socket name with the given prefix and a specified length of random hex characters.""" 

+

23 random_chars = "".join(random.choices(string.hexdigits.lower(), k=length)) 

+

24 return prefix + random_chars 

+

25 

+

26 

+

27class SPARCProtocol(IPIProtocol): 

+

28 """Extending the i-PI protocol to support extra routines""" 

+

29 

+

30 def send_string(self, msg, msglen=None): 

+

31 self.log(" send string", repr(msg)) 

+

32 # assert msg in self.statements, msg 

+

33 if msglen is None: 

+

34 msglen = len(msg) 

+

35 assert msglen >= len(msg) 

+

36 msg = msg.encode("ascii").ljust(msglen) 

+

37 self.send(msglen, np.int32) 

+

38 self.socket.sendall(msg) 

+

39 return 

+

40 

+

41 def send_object(self, obj): 

+

42 """Send an object dumped into pickle""" 

+

43 # We can use the highese protocol since the 

+

44 # python requirement >= 3.8 

+

45 pkl_bytes = pickle.dumps(obj, protocol=5) 

+

46 nbytes = len(pkl_bytes) 

+

47 md5_checksum = hashlib.md5(pkl_bytes) 

+

48 checksum_digest, checksum_count = ( 

+

49 md5_checksum.digest(), 

+

50 md5_checksum.digest_size, 

+

51 ) 

+

52 self.sendmsg("PKLOBJ") # To distinguish from other methods like INIT 

+

53 self.log(" pickle bytes to send: ", str(nbytes)) 

+

54 self.send(nbytes, np.int32) 

+

55 self.log(" sending pickle object....") 

+

56 self.socket.sendall(pkl_bytes) 

+

57 self.log(" sending md5 sum of size: ", str(checksum_count)) 

+

58 self.send(checksum_count, np.int32) 

+

59 self.log(" sending md5 sum..... ", str(checksum_count)) 

+

60 self.socket.sendall(checksum_digest) 

+

61 return 

+

62 

+

63 def recv_object(self, include_header=True): 

+

64 """Return a decoded file 

+

65 

+

66 include_header: should we receive the header or not 

+

67 """ 

+

68 if include_header: 

+

69 msg = self.recvmsg() 

+

70 assert ( 

+

71 msg.strip() == "PKLOBJ" 

+

72 ), f"Incorrect header {msg} received when calling recv_object method! Please contact the developers" 

+

73 nbytes = int(self.recv(1, np.int32)) 

+

74 self.log(" Will receive pickle object with n-bytes: ", nbytes) 

+

75 bytes_received = self._recvall(nbytes) 

+

76 checksum_nbytes = int(self.recv(1, np.int32)) 

+

77 self.log(" Will receive cheksum digest of nbytes:", checksum_nbytes) 

+

78 digest_received = self._recvall(checksum_nbytes) 

+

79 digest_calc = hashlib.md5(bytes_received).digest() 

+

80 minlen = min(len(digest_calc), len(digest_received)) 

+

81 assert ( 

+

82 digest_calc[:minlen] == digest_received[:minlen] 

+

83 ), "MD5 checksum for the received object does not match!" 

+

84 obj = pickle.loads(bytes_received) 

+

85 return obj 

+

86 

+

87 def send_param(self, name, value): 

+

88 """Send a specific param setting to SPARC 

+

89 This is just a test function to see how things may work 

+

90 

+

91 TODO: 

+

92 1) test with just 2 string values to see if SPARC can receive 

+

93 """ 

+

94 self.log(f"Setup param {name}, {value}") 

+

95 msg = self.status() 

+

96 assert msg == "READY", msg 

+

97 # Send message 

+

98 self.sendmsg("SETPARAM") 

+

99 # Send name 

+

100 self.send_string(str(name)) 

+

101 # Send value 

+

102 self.send_string(str(value)) 

+

103 # After this step, socket client should return READY 

+

104 return 

+

105 

+

106 def sendinit(self): 

+

107 """Mimick the old sendinit method but to provide atoms and params 

+

108 to the calculator instance. 

+

109 The actual behavior regarding how the calculator would be (re)-initialized, dependends on the implementation of recvinit 

+

110 """ 

+

111 self.log(" New sendinit for SPARC protocol") 

+

112 self.sendmsg("INIT") 

+

113 self.send(0, np.int32) # fallback 

+

114 msg_chars = [ord(c) for c in "NEWPROTO"] 

+

115 len_msg = len(msg_chars) 

+

116 self.send(len_msg, np.int32) 

+

117 self.send(msg_chars, np.byte) # initialization string 

+

118 return 

+

119 

+

120 def recvinit(self): 

+

121 """Fallback recvinit method""" 

+

122 return super().recvinit() 

+

123 

+

124 def calculate_new_protocol(self, atoms, params): 

+

125 atoms = atoms.copy() 

+

126 atoms.calc = None 

+

127 self.log(" calculate with new protocol") 

+

128 msg = self.status() 

+

129 # We don't know how NEEDINIT is supposed to work, but some codes 

+

130 # seem to be okay if we skip it and send the positions instead. 

+

131 if msg == "NEEDINIT": 

+

132 self.sendinit() 

+

133 self.send_object((atoms, params)) 

+

134 msg = self.status() 

+

135 cell = atoms.get_cell() 

+

136 positions = atoms.get_positions() # Original order 

+

137 assert msg == "READY", msg 

+

138 icell = np.linalg.pinv(cell).transpose() 

+

139 self.sendposdata(cell, icell, positions) 

+

140 msg = self.status() 

+

141 assert msg == "HAVEDATA", msg 

+

142 e, forces, virial, morebytes = self.sendrecv_force() 

+

143 r = dict(energy=e, forces=forces, virial=virial, morebytes=morebytes) 

+

144 # Additional data (e.g. parsed from file output) 

+

145 moredata = self.recv_object() 

+

146 return r, moredata 

+

147 

+

148 

+

149# TODO: make sure both calc are ok 

+

150 

+

151 

+

152class SPARCSocketServer(SocketServer): 

+

153 """We only implement the unix socket version due to simplicity 

+

154 

+

155 parent: the SPARC parent calculator 

+

156 """ 

+

157 

+

158 def __init__( 

+

159 self, 

+

160 port=None, 

+

161 unixsocket=None, 

+

162 timeout=None, 

+

163 log=None, 

+

164 parent=None 

+

165 # launch_client=None, 

+

166 ): 

+

167 super().__init__(port=port, unixsocket=unixsocket, timeout=timeout, log=log) 

+

168 self.parent = parent 

+

169 print("Parent : ", self.parent) 

+

170 if self.parent is not None: 

+

171 self.proc = self.parent.process 

+

172 else: 

+

173 self.proc = None 

+

174 print(self.proc) 

+

175 

+

176 # TODO: guard cases for non-unix sockets 

+

177 @property 

+

178 def socket_filename(self): 

+

179 return self.serversocket.getsockname() 

+

180 

+

181 @property 

+

182 def proc(self): 

+

183 if self.parent: 

+

184 return self.parent.process 

+

185 else: 

+

186 return None 

+

187 

+

188 @proc.setter 

+

189 def proc(self, value): 

+

190 return 

+

191 

+

192 def _accept(self): 

+

193 """Use the SPARCProtocol instead""" 

+

194 print(self.proc) 

+

195 super()._accept() 

+

196 print(self.proc) 

+

197 old_protocol = self.protocol 

+

198 # Swap the protocol 

+

199 if old_protocol: 

+

200 self.protocol = SPARCProtocol(self.clientsocket, txt=self.log) 

+

201 return 

+

202 

+

203 def send_atoms_and_params(self, atoms, params): 

+

204 """Update the atoms and parameters for the SPARC calculator 

+

205 The params should be assignable to SPARC.set 

+

206 

+

207 The calc for atoms is stripped for simplicity 

+

208 """ 

+

209 atoms.calc = None 

+

210 params = dict(params) 

+

211 pair = (atoms, params) 

+

212 self.protocol.send_object(pair) 

+

213 return 

+

214 

+

215 def calculate_origin_protocol(self, atoms): 

+

216 """Send geometry to client and return calculated things as dict. 

+

217 

+

218 This will block until client has established connection, then 

+

219 wait for the client to finish the calculation.""" 

+

220 assert not self._closed 

+

221 

+

222 # If we have not established connection yet, we must block 

+

223 # until the client catches up: 

+

224 if self.protocol is None: 

+

225 self._accept() 

+

226 return self.protocol.calculate(atoms.positions, atoms.cell) 

+

227 

+

228 def calculate_new_protocol(self, atoms, params={}): 

+

229 assert not self._closed 

+

230 

+

231 # If we have not established connection yet, we must block 

+

232 # until the client catches up: 

+

233 if self.protocol is None: 

+

234 self._accept() 

+

235 return self.protocol.calculate_new_protocol(atoms, params) 

+

236 

+

237 

+

238class SPARCSocketClient(SocketClient): 

+

239 def __init__( 

+

240 self, 

+

241 host="localhost", 

+

242 port=None, 

+

243 unixsocket=None, 

+

244 timeout=None, 

+

245 log=None, 

+

246 parent_calc=None 

+

247 # use_v2_protocol=True # If we should use the v2 SPARC protocol 

+

248 ): 

+

249 """Reload the socket client and use SPARCProtocol""" 

+

250 super().__init__( 

+

251 host=host, 

+

252 port=port, 

+

253 unixsocket=unixsocket, 

+

254 timeout=timeout, 

+

255 log=log, 

+

256 ) 

+

257 sock = self.protocol.socket 

+

258 self.protocol = SPARCProtocol(sock, txt=log) 

+

259 self.parent_calc = parent_calc # Track the actual calculator 

+

260 # TODO: make sure the client is compatible with the default socketclient 

+

261 

+

262 # We shall make NEEDINIT to be the default state 

+

263 # self.state = "NEEDINIT" 

+

264 

+

265 def calculate(self, atoms, use_stress): 

+

266 """Use the calculator instance""" 

+

267 if atoms.calc is None: 

+

268 atoms.calc = self.parent_calc 

+

269 return super().calculate(atoms, use_stress) 

+

270 

+

271 def irun(self, atoms, use_stress=True): 

+

272 """Reimplement single step calculation 

+

273 

+

274 We're free to implement the INIT method in socket protocol as most 

+

275 calculators do not involve using these. We can let the C-SPARC to spit out 

+

276 error about needinit error. 

+

277 """ 

+

278 # Discard positions received from POSDATA 

+

279 # if the server has send positions through recvinit method 

+

280 discard_posdata = False 

+

281 new_protocol = False 

+

282 try: 

+

283 while True: 

+

284 try: 

+

285 msg = self.protocol.recvmsg() 

+

286 except SocketClosed: 

+

287 # Server closed the connection, but we want to 

+

288 # exit gracefully anyway 

+

289 msg = "EXIT" 

+

290 

+

291 if msg == "EXIT": 

+

292 # Send stop signal to clients: 

+

293 self.comm.broadcast(np.ones(1, bool), 0) 

+

294 # (When otherwise exiting, things crashed and we should 

+

295 # let MPI_ABORT take care of the mess instead of trying 

+

296 # to synchronize the exit) 

+

297 return 

+

298 elif msg == "STATUS": 

+

299 self.protocol.sendmsg(self.state) 

+

300 elif msg == "POSDATA": 

+

301 assert self.state == "READY" 

+

302 assert ( 

+

303 atoms is not None 

+

304 ), "Your SPARCSocketClient isn't properly initialized!" 

+

305 cell, icell, positions = self.protocol.recvposdata() 

+

306 if not discard_posdata: 

+

307 atoms.cell[:] = cell 

+

308 atoms.positions[:] = positions 

+

309 

+

310 # At this stage, we should only rely on self.calculate 

+

311 # to continue the socket calculation or restart 

+

312 self.comm.broadcast(np.zeros(1, bool), 0) 

+

313 energy, forces, virial = self.calculate(atoms, use_stress) 

+

314 

+

315 self.state = "HAVEDATA" 

+

316 yield 

+

317 elif msg == "GETFORCE": 

+

318 assert self.state == "HAVEDATA", self.state 

+

319 self.protocol.sendforce(energy, forces, virial) 

+

320 if new_protocol: 

+

321 # TODO: implement more raw results 

+

322 raw_results = self.parent_calc.raw_results 

+

323 self.protocol.send_object(raw_results) 

+

324 self.state = "NEEDINIT" 

+

325 elif msg == "INIT": 

+

326 assert self.state == "NEEDINIT" 

+

327 # Fall back to the default socketio 

+

328 bead_index, initbytes = self.protocol.recvinit() 

+

329 # The parts below use the new sparc protocol 

+

330 print("Init bytes: ", initbytes) 

+

331 init_msg = "".join([chr(d) for d in initbytes]) 

+

332 if init_msg.startswith("NEWPROTO"): 

+

333 new_protocol = True 

+

334 recv_atoms, params = self.protocol.recv_object() 

+

335 print(recv_atoms, params) 

+

336 if params != {}: 

+

337 self.parent_calc.set(**params) 

+

338 # TODO: should we update the atoms directly or keep copy? 

+

339 atoms = recv_atoms 

+

340 atoms.calc = self.parent_calc 

+

341 discard_posdata = True 

+

342 self.state = "READY" 

+

343 else: 

+

344 raise KeyError("Bad message", msg) 

+

345 finally: 

+

346 self.close() 

+

347 

+

348 def run(self, atoms=None, use_stress=False): 

+

349 """Socket mode in SPARC should allow arbitrary start""" 

+

350 # As a default we shall start the SPARCSocketIO always in needinit mode 

+

351 if atoms is None: 

+

352 self.state = "NEEDINIT" 

+

353 for _ in self.irun(atoms=atoms, use_stress=use_stress): 

+

354 pass 

+
+ + + diff --git a/_static/htmlcov/z_e32f35a0016f670d_utils_py.html b/_static/htmlcov/z_e32f35a0016f670d_utils_py.html new file mode 100644 index 00000000..669c0184 --- /dev/null +++ b/_static/htmlcov/z_e32f35a0016f670d_utils_py.html @@ -0,0 +1,489 @@ + + + + + Coverage for sparc/utils.py: 38% + + + + + +
+
+

+ Coverage for sparc/utils.py: + 38% +

+ +

+ 221 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1"""Utilities that are loosely related to core sparc functionalities 

+

2""" 

+

3import _thread 

+

4import io 

+

5import os 

+

6import re 

+

7import shutil 

+

8import signal 

+

9import subprocess 

+

10import sys 

+

11import tempfile 

+

12import threading 

+

13import time 

+

14from contextlib import contextmanager 

+

15from pathlib import Path 

+

16from typing import List, Optional, Union 

+

17from warnings import warn 

+

18 

+

19import numpy as np 

+

20import psutil 

+

21 

+

22from .api import SparcAPI 

+

23from .docparser import SparcDocParser 

+

24 

+

25 

+

26def deprecated(message): 

+

27 def decorator(func): 

+

28 def new_func(*args, **kwargs): 

+

29 warn( 

+

30 "Function {} is deprecated! {}".format(func.__name__, message), 

+

31 category=DeprecationWarning, 

+

32 ) 

+

33 return func(*args, **kwargs) 

+

34 

+

35 return new_func 

+

36 

+

37 return decorator 

+

38 

+

39 

+

40def compare_dict(d1, d2): 

+

41 """Helper function to compare dictionaries""" 

+

42 # Use symmetric difference to find keys which aren't shared 

+

43 # for python 2.7 compatibility 

+

44 if set(d1.keys()) ^ set(d2.keys()): 

+

45 return False 

+

46 

+

47 # Check for differences in values 

+

48 for key, value in d1.items(): 

+

49 if np.any(value != d2[key]): 

+

50 return False 

+

51 return True 

+

52 

+

53 

+

54def string2index(string: str) -> Union[int, slice, str]: 

+

55 """Convert index string to either int or slice 

+

56 This method is a copy of ase.io.formats.string2index 

+

57 """ 

+

58 # A quick fix for slice 

+

59 if isinstance(string, (list, slice)): 

+

60 return string 

+

61 if ":" not in string: 

+

62 # may contain database accessor 

+

63 try: 

+

64 return int(string) 

+

65 except ValueError: 

+

66 return string 

+

67 i: List[Optional[int]] = [] 

+

68 for s in string.split(":"): 

+

69 if s == "": 

+

70 i.append(None) 

+

71 else: 

+

72 i.append(int(s)) 

+

73 i += (3 - len(i)) * [None] 

+

74 return slice(*i) 

+

75 

+

76 

+

77def _find_default_sparc(): 

+

78 """Find the default sparc by $PATH and mpi location""" 

+

79 sparc_exe = shutil.which("sparc") 

+

80 

+

81 mpi_exe = shutil.which("mpirun") 

+

82 # TODO: more examples on pbs / lsf 

+

83 if mpi_exe is not None: 

+

84 try: 

+

85 num_cores = int( 

+

86 os.environ.get( 

+

87 "OMPI_COMM_WORLD_SIZE", 

+

88 os.environ.get( 

+

89 "OMPI_UNIVERSE_SIZE", 

+

90 os.environ.get("MPICH_RANK_REORDER_METHOD", ""), 

+

91 ).split(":")[-1], 

+

92 ) 

+

93 ) 

+

94 except Exception: 

+

95 num_cores = 1 

+

96 return sparc_exe, mpi_exe, num_cores 

+

97 

+

98 mpi_exe = shutil.which("srun") 

+

99 if mpi_exe is not None: 

+

100 # If srun is available, get the number of cores from the environment 

+

101 num_cores = int(os.environ.get("SLURM_JOB_CPUS_PER_NODE", 1)) 

+

102 return sparc_exe, mpi_exe, num_cores 

+

103 

+

104 return sparc_exe, None, 1 

+

105 

+

106 

+

107def h2gpts(h, cell_cv, idiv=4): 

+

108 """Convert a h-parameter (Angstrom) to gpts""" 

+

109 cell_cv = np.array(cell_cv) 

+

110 cell_lengths = np.linalg.norm(cell_cv, axis=1) 

+

111 grid = np.ceil(cell_lengths / h) 

+

112 grid = np.maximum(idiv, grid) 

+

113 return [int(a) for a in grid] 

+

114 

+

115 

+

116def cprint(content, color=None, bold=False, underline=False, **kwargs): 

+

117 """Color print wrapper for ansi terminal. 

+

118 Only a few color names are provided 

+

119 """ 

+

120 ansi_color = dict( 

+

121 HEADER="\033[95m", 

+

122 COMMENT="\033[90m", 

+

123 OKBLUE="\033[94m", 

+

124 OKGREEN="\033[92m", 

+

125 OKCYAN="\033[96m", 

+

126 WARNING="\033[93m", 

+

127 FAIL="\033[91m", 

+

128 ENDC="\033[0m", 

+

129 ) 

+

130 

+

131 style_codes = {"BOLD": "\033[1m", "UNDERLINE": "\033[4m"} 

+

132 

+

133 if color is None: 

+

134 output = content 

+

135 elif color.upper() in ansi_color.keys() and color.upper() != "ENDC": 

+

136 output = ansi_color[color.upper()] + content + ansi_color["ENDC"] 

+

137 else: 

+

138 raise ValueError( 

+

139 f"Unknown ANSI color name. Allowed values are {list(ansi_color.keys())}" 

+

140 ) 

+

141 

+

142 if bold: 

+

143 output = style_codes["BOLD"] + output + ansi_color["ENDC"] 

+

144 

+

145 if underline: 

+

146 output = style_codes["UNDERLINE"] + output + ansi_color["ENDC"] 

+

147 

+

148 print(output, **kwargs) 

+

149 return 

+

150 

+

151 

+

152def locate_api(json_file=None, doc_path=None): 

+

153 """Find the default api in the following order 

+

154 1) User-provided json file path 

+

155 2) User-provided path to the doc 

+

156 3) If none of the above is provided, try to use SPARC_DOC_PATH 

+

157 4) Fallback to the as-shipped json api 

+

158 """ 

+

159 if json_file is not None: 

+

160 api = SparcAPI(json_file) 

+

161 return api 

+

162 

+

163 if doc_path is None: 

+

164 doc_path = os.environ.get("SPARC_DOC_PATH", None) 

+

165 

+

166 if (doc_path is not None) and Path(doc_path).is_dir(): 

+

167 try: 

+

168 with tempfile.TemporaryDirectory() as tmpdir: 

+

169 tmpdir = Path(tmpdir) 

+

170 tmpfile = tmpdir / "parameters.json" 

+

171 with open(tmpfile, "w") as fd: 

+

172 fd.write( 

+

173 SparcDocParser.json_from_directory( 

+

174 Path(doc_path), include_subdirs=True 

+

175 ) 

+

176 ) 

+

177 api = SparcAPI(tmpfile) 

+

178 api.source["path"] = Path(doc_path).resolve().as_posix() 

+

179 api.source["type"] = "latex" 

+

180 return api 

+

181 except Exception as e: 

+

182 warn(f"Cannot load JSON schema from env {doc_path}, the error is {e}.") 

+

183 pass 

+

184 

+

185 api = SparcAPI() 

+

186 return api 

+

187 

+

188 

+

189# Utilities taken from vasp_interactive project 

+

190 

+

191 

+

192class TimeoutException(Exception): 

+

193 """Simple class for timeout""" 

+

194 

+

195 pass 

+

196 

+

197 

+

198@contextmanager 

+

199def time_limit(seconds): 

+

200 """Usage: 

+

201 try: 

+

202 with time_limit(60): 

+

203 do_something() 

+

204 except TimeoutException: 

+

205 raise 

+

206 """ 

+

207 

+

208 def signal_handler(signum, frame): 

+

209 raise TimeoutException("Timed out closing sparc process.") 

+

210 

+

211 signal.signal(signal.SIGALRM, signal_handler) 

+

212 signal.alarm(seconds) 

+

213 try: 

+

214 yield 

+

215 finally: 

+

216 signal.alarm(0) 

+

217 

+

218 

+

219class ProcessReturned(Exception): 

+

220 """Simple class for process that has returned""" 

+

221 

+

222 pass 

+

223 

+

224 

+

225@contextmanager 

+

226def monitor_process(self, interval=1.0): 

+

227 """Usage: 

+

228 try: 

+

229 with monitor_process(process): 

+

230 do_something() 

+

231 except TimeoutException: 

+

232 raise 

+

233 """ 

+

234 

+

235 def signal_handler(signum, frame): 

+

236 raise ProcessReturned( 

+

237 f"Process {self.process.pid} has returned with exit code {self.process.poll()}!" 

+

238 ) 

+

239 

+

240 def check_process(): 

+

241 while True: 

+

242 if self.process.poll() is not None: 

+

243 # signal.alarm(0) 

+

244 print("The process has exited") 

+

245 self.in_socket.close() 

+

246 print(self.in_socket) 

+

247 signal(signal.SIGALRM) 

+

248 raise ProcessReturned( 

+

249 f"Process {self.process.pid} has returned with exit code {self.process.poll()}!" 

+

250 ) 

+

251 time.sleep(interval) 

+

252 

+

253 if self.process is None: 

+

254 raise RuntimeError("No process selected!") 

+

255 

+

256 signal.signal(signal.SIGALRM, signal_handler) 

+

257 monitor = threading.Thread(target=check_process) 

+

258 monitor.start() 

+

259 try: 

+

260 yield 

+

261 finally: 

+

262 monitor.join() 

+

263 

+

264 

+

265def _find_mpi_process(pid, mpi_program="mpirun", sparc_program="sparc"): 

+

266 """Recursively search children processes with PID=pid and return the one 

+

267 that mpirun (or synonyms) are the main command. 

+

268 

+

269 If srun is found as the process, need to use `scancel` to pause / resume the job step 

+

270 """ 

+

271 allowed_names = set(["mpirun", "mpiexec", "orterun", "oshrun", "shmemrun"]) 

+

272 allowed_sparc_names = set(["sparc"]) 

+

273 if mpi_program: 

+

274 allowed_names.add(mpi_program) 

+

275 if sparc_program: 

+

276 allowed_sparc_names.add(sparc_program) 

+

277 try: 

+

278 process_list = [psutil.Process(pid)] 

+

279 except psutil.NoSuchProcess: 

+

280 warn( 

+

281 "Psutil cannot locate the pid. Your sparc program may have already exited." 

+

282 ) 

+

283 match = {"type": None, "process": None} 

+

284 return match 

+

285 

+

286 process_list.extend(process_list[0].children(recursive=True)) 

+

287 mpi_candidates = [] 

+

288 match = {"type": None, "process": None} 

+

289 for proc in process_list: 

+

290 name = proc.name() 

+

291 if name in ["srun"]: 

+

292 match["type"] = "slurm" 

+

293 match["process"] = _locate_slurm_step(program=sparc_program) 

+

294 break 

+

295 elif proc.name() in allowed_names: 

+

296 # are the mpi process's direct children sparc binaries? 

+

297 children = proc.children() 

+

298 if len(children) > 0: 

+

299 if children[0].name() in allowed_sparc_names: 

+

300 mpi_candidates.append(proc) 

+

301 if len(mpi_candidates) > 1: 

+

302 warn( 

+

303 "More than 1 mpi processes are created. This may be a bug. I'll use the last one" 

+

304 ) 

+

305 if len(mpi_candidates) > 0: 

+

306 match["type"] = "mpi" 

+

307 match["process"] = mpi_candidates[-1] 

+

308 

+

309 return match 

+

310 

+

311 

+

312def _get_slurm_jobid(): 

+

313 jobid = os.environ.get("SLURM_JOB_ID", None) 

+

314 if jobid is None: 

+

315 jobid = os.environ.get("SLURM_JOBID", None) 

+

316 return jobid 

+

317 

+

318 

+

319def _locate_slurm_step(program="sparc"): 

+

320 """If slurm job system is involved, search for the slurm step id 

+

321 that matches vasp_std (or other vasp commands) 

+

322 

+

323 Steps: 

+

324 1. Look for SLURM_JOB_ID in current env 

+

325 2. Use `squeue` to locate the sparc step (latest) 

+

326 

+

327 squeue 

+

328 """ 

+

329 allowed_names = set(["sparc"]) 

+

330 if program: 

+

331 allowed_names.add(program) 

+

332 jobid = _get_slurm_jobid() 

+

333 if jobid is None: 

+

334 # TODO: convert warn to logger 

+

335 warn(("Cannot locate the SLURM job id.")) 

+

336 return None 

+

337 # Only 2 column output (jobid and jobname) 

+

338 cmds = ["squeue", "-s", "--job", str(jobid), "-o", "%.30i %.30j"] 

+

339 proc = _run_process(cmds, capture_output=True) 

+

340 output = proc.stdout.decode("utf8").split("\n") 

+

341 # print(output) 

+

342 candidates = [] 

+

343 # breakpoint() 

+

344 for line in output[1:]: 

+

345 try: 

+

346 stepid, name = line.strip().split() 

+

347 except Exception: 

+

348 continue 

+

349 if any([v in name for v in allowed_names]): 

+

350 candidates.append(stepid) 

+

351 

+

352 if len(candidates) > 1: 

+

353 warn("More than 1 slurm steps are found. I'll use the most recent one") 

+

354 if len(candidates) > 0: 

+

355 proc = candidates[0] 

+

356 else: 

+

357 proc = None 

+

358 return proc 

+

359 

+

360 

+

361def _slurm_signal(stepid, sig=signal.SIGTSTP): 

+

362 if isinstance(sig, (str,)): 

+

363 sig = str(sig) 

+

364 elif isinstance(sig, (int,)): 

+

365 sig = signal.Signals(sig).name 

+

366 else: 

+

367 sig = sig.name 

+

368 cmds = ["scancel", "-s", sig, str(stepid)] 

+

369 proc = _run_process(cmds, capture_output=True) 

+

370 output = proc.stdout.decode("utf8").split("\n") 

+

371 return 

+

372 

+

373 

+

374def _run_process(commands, shell=False, print_cmd=True, cwd=".", capture_output=False): 

+

375 """Wrap around subprocess.run 

+

376 Returns the process object 

+

377 """ 

+

378 full_cmd = " ".join(commands) 

+

379 if print_cmd: 

+

380 print(" ".join(commands)) 

+

381 if shell is False: 

+

382 proc = subprocess.run( 

+

383 commands, shell=shell, cwd=cwd, capture_output=capture_output 

+

384 ) 

+

385 else: 

+

386 proc = subprocess.run( 

+

387 full_cmd, shell=shell, cwd=cwd, capture_output=capture_output 

+

388 ) 

+

389 if proc.returncode == 0: 

+

390 return proc 

+

391 else: 

+

392 raise RuntimeError(f"Running {full_cmd} returned error code {proc.returncode}") 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e___init___py.html b/_static/htmlcov/z_ef57e6186893c87e___init___py.html new file mode 100644 index 00000000..7666bbee --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e___init___py.html @@ -0,0 +1,97 @@ + + + + + Coverage for sparc/sparc_parsers/__init__.py: 100% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/__init__.py: + 100% +

+ +

+ 0 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_aimd_py.html b/_static/htmlcov/z_ef57e6186893c87e_aimd_py.html new file mode 100644 index 00000000..84f7088e --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_aimd_py.html @@ -0,0 +1,273 @@ + + + + + Coverage for sparc/sparc_parsers/aimd.py: 89% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/aimd.py: + 89% +

+ +

+ 104 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1""" 

+

2Created on Thu Oct 18 14:16:21 2018 

+

3 

+

4Ben Comer (Georgia Tech) 

+

5 

+

6This file has been heavily modified since SPARC 0.1 

+

7 

+

8TODO: more descriptions about this file io parser 

+

9""" 

+

10from warnings import warn 

+

11 

+

12import numpy as np 

+

13from ase.units import AUT, Angstrom, Bohr, GPa, Hartree, fs 

+

14 

+

15# Safe wrappers for both string and fd 

+

16from ase.utils import reader, writer 

+

17 

+

18from ..api import SparcAPI 

+

19from .utils import strip_comments 

+

20 

+

21 

+

22@reader 

+

23def _read_aimd(fileobj): 

+

24 """Parse the aimd information Each geopt is similar to the static 

+

25 block, except that the field name is started by ':' The 

+

26 relaxations are separated by ':MDSTEP:' seperators 

+

27 

+

28 """ 

+

29 contents = fileobj.read() 

+

30 # label = get_label(fileobj, ".ion") 

+

31 # The geopt comments are simply discarded 

+

32 stripped, comments = strip_comments(contents) 

+

33 # Do not include the description lines 

+

34 data = [line for line in stripped if ":Desc" not in line] 

+

35 

+

36 # find the index for all atom type lines. They should be at the 

+

37 # top of their block 

+

38 step_bounds = [i for i, x in enumerate(data) if ":MDSTEP:" in x] + [len(data)] 

+

39 raw_aimd_blocks = [ 

+

40 data[start:end] for start, end in zip(step_bounds[:-1], step_bounds[1:]) 

+

41 ] 

+

42 aimd_steps = [_read_aimd_step(step) for step in raw_aimd_blocks] 

+

43 

+

44 return {"aimd": aimd_steps} 

+

45 

+

46 

+

47def _read_aimd_step(raw_aimd_text): 

+

48 """Parse a geopt step and compose the data dict 

+

49 

+

50 Arguments 

+

51 raw_aimd_text: list of lines within the step 

+

52 

+

53 Most values are just presented in their output format, 

+

54 higher level function calling _read_aimd_step and _read_aimd 

+

55 should implement how to use the values, 

+

56 e.g. E_tot = E_tot_per_atom * N_atoms 

+

57 

+

58 """ 

+

59 header, body = raw_aimd_text[0], raw_aimd_text[1:] 

+

60 if ":MDSTEP:" not in header: 

+

61 raise ValueError("Wrong aimd format! The :MDSTEP: label is missing.") 

+

62 # Geopt file uses 1-indexed step names, convert to 0-indexed 

+

63 step = int(header.split(":MDSTEP:")[-1]) - 1 

+

64 print("Step ", step) 

+

65 bounds = [i for i, x in enumerate(body) if ":" in x] + [len(body)] 

+

66 blocks = [body[start:end] for start, end in zip(bounds[:-1], bounds[1:])] 

+

67 data = {} 

+

68 for block in blocks: 

+

69 header_block, body_block = block[0], block[1:] 

+

70 header_name = header_block.split(":")[1] 

+

71 header_data = header_block.split(":")[-1].strip() 

+

72 if len(header_data) > 0: 

+

73 block_raw_data = [header_data] + body_block 

+

74 else: 

+

75 block_raw_data = body_block 

+

76 # import pdb; pdb.set_trace() 

+

77 raw_value = np.genfromtxt(block_raw_data, dtype=float) 

+

78 # The type definitions from MD may be treated from API again? 

+

79 if header_name == "R": 

+

80 name = "positions" 

+

81 value = raw_value.reshape((-1, 3)) * Bohr 

+

82 elif header_name == "V": 

+

83 name = "velocities" 

+

84 value = raw_value.reshape((-1, 3)) * Bohr / AUT 

+

85 elif header_name == "F": 

+

86 name = "forces" 

+

87 value = raw_value.reshape((-1, 3)) * Hartree / Bohr 

+

88 elif header_name == "MDTM": 

+

89 # This is not the md integration time! 

+

90 name = "md_walltime" 

+

91 value = float(raw_value) 

+

92 elif header_name == "TEL": 

+

93 name = "electron temp" 

+

94 value = float(raw_value) 

+

95 elif header_name == "TIO": 

+

96 name = "ion temp" 

+

97 value = float(raw_value) 

+

98 elif header_name == "TEN": 

+

99 # Note it's the total energy per atom! 

+

100 # TODO: shall we convert to ase fashion? 

+

101 name = "total energy per atom" 

+

102 value = float(raw_value) * Hartree 

+

103 elif header_name == "KEN": 

+

104 # Note it's the total energy per atom! 

+

105 # TODO: shall we convert to ase fashion? 

+

106 name = "kinetic energy per atom" 

+

107 value = float(raw_value) * Hartree 

+

108 elif header_name == "KENIG": 

+

109 # Note it's the total energy per atom! 

+

110 # TODO: shall we convert to ase fashion? 

+

111 name = "kinetic energy (ideal gas) per atom" 

+

112 value = float(raw_value) * Hartree 

+

113 elif header_name == "FEN": 

+

114 # Note it's the total energy per atom! 

+

115 # TODO: shall we convert to ase fashion? 

+

116 name = "free energy per atom" 

+

117 value = float(raw_value) * Hartree 

+

118 elif header_name == "UEN": 

+

119 # Note it's the total energy per atom! 

+

120 # TODO: shall we convert to ase fashion? 

+

121 name = "internal energy per atom" 

+

122 value = float(raw_value) * Hartree 

+

123 elif header_name == "TSEN": 

+

124 # Note it's the total energy per atom! 

+

125 # TODO: shall we convert to ase fashion? 

+

126 name = "entropy*T per atom" 

+

127 value = float(raw_value) * Hartree 

+

128 elif header_name == "STRESS": 

+

129 # Same rule as STRESS in geopt 

+

130 # no conversion to Voigt form yet 

+

131 dim = raw_value.shape[0] 

+

132 if dim == 3: 

+

133 name = "stress" 

+

134 value = raw_value * GPa 

+

135 elif dim == 2: 

+

136 name = "stress_2d" 

+

137 value = raw_value * Hartree / Bohr**2 

+

138 elif dim == 1: 

+

139 name = "stress_1d" 

+

140 value = raw_value * Hartree / Bohr 

+

141 else: 

+

142 raise ValueError("Incorrect stress matrix dimension!") 

+

143 elif header_name == "STRIO": 

+

144 # Don't do the volume conversion now 

+

145 name = "stress (ion-kinetic)" 

+

146 value = raw_value * GPa 

+

147 elif header_name == "PRES": 

+

148 # Don't do the volume conversion now 

+

149 name = "pressure" 

+

150 value = raw_value * GPa 

+

151 elif header_name == "PRESIO": 

+

152 # Don't do the volume conversion now 

+

153 name = "pressure (ion-kinetic)" 

+

154 value = raw_value * GPa 

+

155 elif header_name == "PRESIG": 

+

156 # Don't do the volume conversion now 

+

157 name = "pressure (ideal gas)" 

+

158 value = raw_value * GPa 

+

159 elif header_name in ("AVGV", "MAXV", "MIND"): 

+

160 warn(f"MD output keyword {header_name} will not be parsed.") 

+

161 value = None 

+

162 else: 

+

163 warn(f"MD output keyword {header_name} not known to SPARC. " "Ignore.") 

+

164 value = None 

+

165 if value is not None: 

+

166 data[name] = value 

+

167 data["step"] = step 

+

168 return data 

+

169 

+

170 

+

171@writer 

+

172def _write_aimd( 

+

173 fileobj, 

+

174 data_dict, 

+

175): 

+

176 raise NotImplementedError("Writing aimd file from SPARC-X-API " "not supported!") 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_atoms_py.html b/_static/htmlcov/z_ef57e6186893c87e_atoms_py.html new file mode 100644 index 00000000..4d48b614 --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_atoms_py.html @@ -0,0 +1,494 @@ + + + + + Coverage for sparc/sparc_parsers/atoms.py: 97% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/atoms.py: + 97% +

+ +

+ 195 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1"""Convert ase atoms to structured dict following SPARC format 

+

2and vice versa 

+

3""" 

+

4 

+

5from copy import deepcopy 

+

6from warnings import warn 

+

7 

+

8import numpy as np 

+

9from ase import Atom, Atoms 

+

10from ase.constraints import FixAtoms, FixedLine, FixedPlane 

+

11from ase.units import Bohr 

+

12 

+

13from .inpt import _inpt_cell_to_ase_cell 

+

14from .ion import _ion_coord_to_ase_pos 

+

15from .pseudopotential import find_pseudo_path 

+

16from .utils import make_reverse_mapping 

+

17 

+

18# from .sparc_parsers.ion import read_ion, write_ion 

+

19 

+

20 

+

21def atoms_to_dict( 

+

22 atoms, 

+

23 sort=True, 

+

24 direct=False, 

+

25 wrap=False, 

+

26 ignore_constraints=False, 

+

27 psp_dir=None, 

+

28 pseudopotentials={}, 

+

29 comments="", 

+

30): 

+

31 """Given an ASE Atoms object, convert to SPARC ion and inpt data dict 

+

32 

+

33 psp_dir: search path for psp8 files pseudopotentials: a mapping 

+

34 between symbol and psp file names, similar to QE like 'Na': 

+

35 'Na-pbe.psp8'. If the file name does not contain path information, 

+

36 use psp_dir / filname, otherwise use the file path. 

+

37 

+

38 We don't do any env variable replace ment for psp_dir, it should be handled by the 

+

39 explicit _write_ion_and_inpt() function 

+

40 

+

41 At this step, the copy_psp is not applied, since we don't yet know the location to write 

+

42 

+

43 """ 

+

44 # Step 1: if we should sort the atoms? 

+

45 # origin_atoms = atoms.copy() 

+

46 # sort = True re-calculate the sorting information 

+

47 # sort = list re-uses the sorting information 

+

48 if sort: 

+

49 if isinstance(sort, list): 

+

50 sort_ = np.array(sort) 

+

51 resort_ = make_reverse_mapping(sort_) 

+

52 else: 

+

53 sort_ = np.argsort(atoms.get_chemical_symbols(), kind="stable") 

+

54 resort_ = make_reverse_mapping(sort_) 

+

55 # This is the sorted atoms object 

+

56 atoms = atoms[sort_] 

+

57 else: 

+

58 sort_ = [] 

+

59 resort_ = [] 

+

60 

+

61 # Step 2: determine the counts of each element 

+

62 symbol_counts = count_symbols(atoms.get_chemical_symbols()) 

+

63 write_spin = np.any(atoms.get_initial_magnetic_moments() != 0) 

+

64 has_charge = np.any(atoms.get_initial_charges() != 0) 

+

65 if has_charge: 

+

66 warn( 

+

67 "SPARC currently doesn't support changing total number of electrons! " 

+

68 "via nomimal charges. The initial charges in the structure will be ignored." 

+

69 ) 

+

70 

+

71 relax_mask = relax_from_all_constraints(atoms.constraints, len(atoms)) 

+

72 write_relax = (len(relax_mask) > 0) and (not ignore_constraints) 

+

73 

+

74 atom_blocks = [] 

+

75 # Step 3: write each block 

+

76 for symbol, start, end in symbol_counts: 

+

77 block_dict = {} 

+

78 block_dict["ATOM_TYPE"] = symbol 

+

79 block_dict["N_TYPE_ATOM"] = end - start 

+

80 # TODO: make pseudo finding work 

+

81 # TODO: write comment that psp file may not exist 

+

82 try: 

+

83 psp_file = find_pseudo_path(symbol, psp_dir, pseudopotentials) 

+

84 # TODO: add option to determine if psp file exists! 

+

85 block_dict["PSEUDO_POT"] = psp_file.resolve().as_posix() 

+

86 

+

87 except Exception: 

+

88 warn( 

+

89 ( 

+

90 f"Failed to find pseudo potential file for symbol {symbol}. I will use a dummy file name" 

+

91 ) 

+

92 ) 

+

93 block_dict[ 

+

94 "PSEUDO_POT" 

+

95 ] = f"{symbol}-dummy.psp8 # Please replace with real psp file name!" 

+

96 # TODO: atomic mass? 

+

97 p_atoms = atoms[start:end] 

+

98 if direct: 

+

99 pos = p_atoms.get_scaled_positions(wrap=wrap) 

+

100 block_dict["COORD_FRAC"] = pos 

+

101 else: 

+

102 # TODO: should we use default converter? 

+

103 pos = p_atoms.get_positions(wrap=wrap) / Bohr 

+

104 block_dict["COORD"] = pos 

+

105 if write_spin: 

+

106 # TODO: should we process atoms with already calculated magmoms? 

+

107 n_atom = len(p_atoms) 

+

108 block_dict["SPIN"] = p_atoms.get_initial_magnetic_moments().reshape(n_atom,-1) 

+

109 if write_relax: 

+

110 relax_this_block = relax_mask[start:end] 

+

111 block_dict["RELAX"] = relax_this_block 

+

112 # TODO: get write_relax 

+

113 atom_blocks.append(block_dict) 

+

114 

+

115 # Step 4: inpt part 

+

116 # TODO: what if atoms does not have cell? 

+

117 cell_au = atoms.cell / Bohr 

+

118 inpt_blocks = {"LATVEC": cell_au, "LATVEC_SCALE": [1.0, 1.0, 1.0]} 

+

119 

+

120 # Step 5: retrieve boundary condition information 

+

121 # TODO: have to use space to join the single keywords 

+

122 # breakpoint() 

+

123 inpt_blocks.update(atoms_bc_to_sparc(atoms)) 

+

124 

+

125 if not isinstance(comments, list): 

+

126 comments = comments.split("\n") 

+

127 ion_data = { 

+

128 "atom_blocks": atom_blocks, 

+

129 "comments": comments, 

+

130 "sorting": {"sort": sort_, "resort": resort_}, 

+

131 } 

+

132 inpt_data = {"params": inpt_blocks, "comments": []} 

+

133 return {"ion": ion_data, "inpt": inpt_data} 

+

134 

+

135 

+

136def dict_to_atoms(data_dict): 

+

137 """Given a SPARC struct dict, construct the ASE atoms object 

+

138 

+

139 Note: this method supports only 1 Atoms at a time 

+

140 """ 

+

141 ase_cell = _inpt_cell_to_ase_cell(data_dict) 

+

142 new_data_dict = deepcopy(data_dict) 

+

143 _ion_coord_to_ase_pos(new_data_dict, ase_cell) 

+

144 # Now the real thing to construct an atom object 

+

145 atoms = Atoms() 

+

146 atoms.cell = ase_cell 

+

147 relax_dict = {} 

+

148 

+

149 atoms_count = 0 

+

150 atom_blocks = new_data_dict["ion"]["atom_blocks"] 

+

151 for block in atom_blocks: 

+

152 element = block["ATOM_TYPE"] 

+

153 positions = block["_ase_positions"] 

+

154 if positions.ndim == 1: 

+

155 positions = positions.reshape(1, -1) 

+

156 # Consider moving spins to another function 

+

157 spins = block.get("SPIN", None) 

+

158 if spins is None: 

+

159 spins = np.zeros(len(positions)) 

+

160 for pos, spin in zip(positions, spins): 

+

161 # TODO: What about charge? 

+

162 atoms.append(Atom(symbol=element, position=pos, magmom=spin)) 

+

163 relax = block.get("RELAX", np.array([])) 

+

164 # Reshape relax into 2d array 

+

165 relax = relax.reshape((-1, 3)) 

+

166 for i, r in enumerate(relax, start=atoms_count): 

+

167 relax_dict[i] = r 

+

168 atoms_count += len(positions) 

+

169 

+

170 if "sorting" in data_dict["ion"]: 

+

171 resort = data_dict["ion"]["sorting"].get("resort", np.arange(len(atoms))) 

+

172 # Resort may be None 

+

173 if len(resort) == 0: 

+

174 resort = np.arange(len(atoms)) 

+

175 else: 

+

176 resort = np.arange(len(atoms)) 

+

177 

+

178 if len(resort) != len(atoms): 

+

179 # TODO: new exception 

+

180 raise ValueError( 

+

181 "Length of resort mapping is different from the number of atoms!" 

+

182 ) 

+

183 # TODO: check if this mapping is correct 

+

184 # print(relax_dict) 

+

185 sort = make_reverse_mapping(resort) 

+

186 # print(resort, sort) 

+

187 sorted_relax_dict = {sort[i]: r for i, r in relax_dict.items()} 

+

188 # Now we do a sort on the atom indices. The atom positions read from 

+

189 # .ion correspond to the `sort` and we use `resort` to transform 

+

190 

+

191 # TODO: should we store the sorting information in SparcBundle? 

+

192 

+

193 atoms = atoms[resort] 

+

194 constraints = constraints_from_relax(sorted_relax_dict) 

+

195 atoms.constraints = constraints 

+

196 

+

197 # @2023.08.31 add support for PBC 

+

198 # TODO: move to a more modular function 

+

199 # TODO: Datatype for BC in the API, should it be string, or string array? 

+

200 sparc_bc = new_data_dict["inpt"]["params"].get("BC", "P P P").split() 

+

201 twist_angle = float(new_data_dict["inpt"]["params"].get("TWIST_ANGLE", 0)) 

+

202 modify_atoms_bc(atoms, sparc_bc, twist_angle) 

+

203 

+

204 return atoms 

+

205 

+

206 

+

207def count_symbols(symbols): 

+

208 """Count the number of consecutive elements. 

+

209 Output tuple is: element, start, end 

+

210 For example, "CHCHHO" --> [('C', 0, 1), ('H', 1, 2), ('C', 2, 3), ('H', 3, 5), ('O', 5, 6)] 

+

211 """ 

+

212 counts = [] 

+

213 current_count = 1 

+

214 current_symbol = symbols[0] 

+

215 for i, symbol in enumerate(symbols[1:], start=1): 

+

216 if symbol == current_symbol: 

+

217 current_count += 1 

+

218 else: 

+

219 counts.append((current_symbol, i - current_count, i)) 

+

220 current_count = 1 

+

221 current_symbol = symbol 

+

222 end = len(symbols) 

+

223 counts.append((current_symbol, end - current_count, end)) 

+

224 return counts 

+

225 

+

226 

+

227def constraints_from_relax(relax_dict): 

+

228 """ 

+

229 Convert the SPARC RELAX fields to ASE's constraints 

+

230 

+

231 Arguments 

+

232 relax: bool vector of size Nx3, i.e. [[True, True, True], [True, False, False]] 

+

233 

+

234 Supported ase constraints will be FixAtoms, FixedLine and FixedPlane. 

+

235 For constraints in the same direction, all indices will be gathered. 

+

236 

+

237 Note: ase>=3.22 will have FixedLine and FixedPlane accepting only 1 index at a time! 

+

238 

+

239 The relax vector must be already sorted! 

+

240 """ 

+

241 if len(relax_dict) == 0: 

+

242 return [] 

+

243 

+

244 cons_list = [] 

+

245 # gathered_indices is an intermediate dict that contains 

+

246 # key: relax mask if not all True 

+

247 # indices: indices that share the same mask 

+

248 # 

+

249 gathered_indices = {} 

+

250 

+

251 # breakpoint() 

+

252 for i, r in relax_dict.items(): 

+

253 r = np.array(r) 

+

254 r = tuple(np.ndarray.tolist(r.astype(bool))) 

+

255 if np.all(r): 

+

256 continue 

+

257 

+

258 if r not in gathered_indices: 

+

259 gathered_indices[r] = [i] 

+

260 else: 

+

261 gathered_indices[r].append(i) 

+

262 

+

263 for relax_type, indices in gathered_indices.items(): 

+

264 degree_freedom = 3 - relax_type.count(False) 

+

265 

+

266 # DegreeF == 0 --> fix atom 

+

267 if degree_freedom == 0: 

+

268 cons_list.append(FixAtoms(indices=indices)) 

+

269 # DegreeF == 1 --> move along line, fix line 

+

270 elif degree_freedom == 1: 

+

271 for ind in indices: 

+

272 cons_list.append(FixedLine(ind, np.array(relax_type).astype(int))) 

+

273 # DegreeF == 1 --> move along line, fix plane 

+

274 elif degree_freedom == 2: 

+

275 for ind in indices: 

+

276 cons_list.append(FixedPlane(ind, (~np.array(relax_type)).astype(int))) 

+

277 return cons_list 

+

278 

+

279 

+

280def relax_from_constraint(constraint): 

+

281 """returns dict of {atom_index: relax_dimensions} for the given constraint""" 

+

282 type_name = constraint.todict()["name"] 

+

283 if isinstance(constraint, FixAtoms): 

+

284 dimensions = [False] * 3 

+

285 expected_free = 0 

+

286 elif isinstance(constraint, FixedLine): 

+

287 # Only supports orthogonal basis! 

+

288 dimensions = [d == 1 for d in constraint.dir] 

+

289 expected_free = 1 

+

290 elif isinstance(constraint, FixedPlane): 

+

291 dimensions = [d != 1 for d in constraint.dir] 

+

292 expected_free = 2 

+

293 else: 

+

294 warn( 

+

295 f"The constraint type {type_name} is not supported by" 

+

296 " SPARC's .ion format. This constraint will be" 

+

297 " ignored" 

+

298 ) 

+

299 return {} 

+

300 if dimensions.count(True) != expected_free: 

+

301 warn( 

+

302 "SPARC's .ion filetype can only support freezing entire " 

+

303 f"dimensions (x,y,z). The {type_name} constraint will be ignored" 

+

304 ) 

+

305 return {} 

+

306 return {i: dimensions for i in constraint.get_indices()} # atom indices 

+

307 

+

308 

+

309def relax_from_all_constraints(constraints, natoms): 

+

310 """converts ASE atom constraints to SPARC relaxed dimensions for the atoms""" 

+

311 if len(constraints) == 0: 

+

312 return [] 

+

313 

+

314 relax = [ 

+

315 [True, True, True], 

+

316 ] * natoms # assume relaxed in all dimensions for all atoms 

+

317 for c in constraints: 

+

318 for atom_index, rdims in relax_from_constraint(c).items(): 

+

319 if atom_index >= natoms: 

+

320 raise ValueError( 

+

321 ( 

+

322 "Number of total atoms smaller than the constraint indices!\n" 

+

323 "Please check your input" 

+

324 ) 

+

325 ) 

+

326 # There might be multiple constraints applied on one index, 

+

327 # always make it more constrained 

+

328 relax[atom_index] = list(np.bitwise_and(relax[atom_index], rdims)) 

+

329 return relax 

+

330 

+

331 

+

332def modify_atoms_bc(atoms, sparc_bc, twist_angle=0): 

+

333 """Modify the atoms boundary conditions in-place from the bc information 

+

334 sparc_bc is a keyword from inpt 

+

335 twist_angle is the helix twist angle in inpt 

+

336 

+

337 conversion rules: 

+

338 BC: P --> pbc=True 

+

339 BC: D, H, C --> pbc=False 

+

340 """ 

+

341 ase_bc = [] 

+

342 # print(sparc_bc, type(sparc_bc)) 

+

343 for bc_ in sparc_bc: 

+

344 if bc_.upper() in ["C", "H"]: 

+

345 warn( 

+

346 ( 

+

347 "Parsing SPARC's helix or cyclic boundary conditions" 

+

348 " into ASE atoms is only partially supported. " 

+

349 "Saving the atoms object into other format may cause " 

+

350 "data-loss of the SPARC-specific BC information." 

+

351 ) 

+

352 ) 

+

353 pbc = ( 

+

354 False # Do not confuse ase-gui, we'll manually handle the visualization 

+

355 ) 

+

356 elif bc_.upper() == "D": 

+

357 pbc = False 

+

358 elif bc_.upper() == "P": 

+

359 pbc = True 

+

360 else: 

+

361 raise ValueError("Unknown BC keyword values!") 

+

362 ase_bc.append(pbc) 

+

363 atoms.info["sparc_bc"] = [bc_.upper() for bc_ in sparc_bc] 

+

364 if twist_angle != 0: 

+

365 atoms.info["twist_angle (rad/Bohr)"] = twist_angle 

+

366 atoms.pbc = ase_bc 

+

367 return 

+

368 

+

369 

+

370def atoms_bc_to_sparc(atoms): 

+

371 """Use atoms' internal pbc and info to construct inpt blocks 

+

372 

+

373 Returns: 

+

374 a dict containing 'BC' or 'TWIST_ANGLE' 

+

375 """ 

+

376 sparc_bc = ["P" if bc_ else "D" for bc_ in atoms.pbc] 

+

377 

+

378 # If "sparc_bc" info is stored in the atoms object, convert again 

+

379 if "sparc_bc" in atoms.info.keys(): 

+

380 converted_bc = [] 

+

381 stored_sparc_bc = atoms.info["sparc_bc"] 

+

382 for bc1, bc2 in zip(sparc_bc, stored_sparc_bc): 

+

383 # We store helix and cyclic BCs as non-periodic in ase-atoms 

+

384 print(bc1, bc2) 

+

385 if ((bc1 == "D") and (bc2 != "P")) or ((bc1 == "P") and (bc2 == "P")): 

+

386 converted_bc.append(bc2) 

+

387 else: 

+

388 raise ValueError( 

+

389 "Boundary conditions stored in ASE " 

+

390 "atoms.pbc and atoms.info['sparc_bc'] " 

+

391 "are different!" 

+

392 ) 

+

393 sparc_bc = converted_bc 

+

394 block = {"BC": " ".join(sparc_bc)} 

+

395 if "twist_angle" in atoms.info.keys(): 

+

396 block["TWIST_ANGLE"] = atoms.info["twist_angle (rad/Bohr)"] 

+

397 return block 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_geopt_py.html b/_static/htmlcov/z_ef57e6186893c87e_geopt_py.html new file mode 100644 index 00000000..8ee598fa --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_geopt_py.html @@ -0,0 +1,240 @@ + + + + + Coverage for sparc/sparc_parsers/geopt.py: 88% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/geopt.py: + 88% +

+ +

+ 76 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1""" 

+

2Created on Thu Oct 18 14:16:21 2018 

+

3 

+

4Ben Comer (Georgia Tech) 

+

5 

+

6This file has been heavily modified since SPARC 0.1 

+

7 

+

8TODO: more descriptions about this file io parser 

+

9""" 

+

10from warnings import warn 

+

11 

+

12import numpy as np 

+

13from ase.units import Bohr, GPa, Hartree 

+

14 

+

15# Safe wrappers for both string and fd 

+

16from ase.utils import reader, writer 

+

17 

+

18from ..api import SparcAPI 

+

19from .utils import strip_comments 

+

20 

+

21# TODO: should allow user to select the api 

+

22defaultAPI = SparcAPI() 

+

23 

+

24 

+

25@reader 

+

26def _read_geopt(fileobj): 

+

27 """ 

+

28 Parse the geopt information 

+

29 Each geopt is similar to the static block, except that the field name is started by 

+

30 ':' 

+

31 The relaxations are separated by ':RELAXSTEP:' seperators 

+

32 """ 

+

33 contents = fileobj.read() 

+

34 # label = get_label(fileobj, ".ion") 

+

35 # The geopt comments are simply discarded 

+

36 data, comments = strip_comments(contents) 

+

37 

+

38 # find the index for all atom type lines. They should be at the 

+

39 # top of their block 

+

40 step_bounds = [i for i, x in enumerate(data) if ":RELAXSTEP:" in x] + [len(data)] 

+

41 raw_geopt_blocks = [ 

+

42 data[start:end] for start, end in zip(step_bounds[:-1], step_bounds[1:]) 

+

43 ] 

+

44 geopt_steps = [_read_geopt_step(step) for step in raw_geopt_blocks] 

+

45 

+

46 return {"geopt": geopt_steps} 

+

47 

+

48 

+

49def _read_geopt_step(raw_step_text): 

+

50 """Parse a geopt step and compose the data dict 

+

51 

+

52 Arguments 

+

53 raw_step_text: list of lines within the step 

+

54 """ 

+

55 header, body = raw_step_text[0], raw_step_text[1:] 

+

56 if ":RELAXSTEP:" not in header: 

+

57 raise ValueError("Wrong geopt format! The :RELAXSTEP: label is missing.") 

+

58 # Geopt file uses 1-indexed step names, convert to 0-indexed 

+

59 step = int(header.split(":RELAXSTEP:")[-1]) - 1 

+

60 bounds = [i for i, x in enumerate(body) if ":" in x] + [len(body)] 

+

61 blocks = [body[start:end] for start, end in zip(bounds[:-1], bounds[1:])] 

+

62 data = {} 

+

63 for block in blocks: 

+

64 header_block, body_block = block[0], block[1:] 

+

65 header_name = header_block.split(":")[1] 

+

66 header_data = header_block.split(":")[-1].strip() 

+

67 if len(header_data) > 0: 

+

68 block_raw_data = [header_data] + body_block 

+

69 else: 

+

70 block_raw_data = body_block 

+

71 # import pdb; pdb.set_trace() 

+

72 raw_value = np.genfromtxt(block_raw_data, dtype=float) 

+

73 if "R(Bohr)" in header_name: 

+

74 name = "positions" 

+

75 value = raw_value.reshape((-1, 3)) * Bohr 

+

76 elif "E(Ha)" in header_name: 

+

77 name = "energy" 

+

78 value = float(raw_value) * Hartree 

+

79 elif "F(Ha/Bohr)" in header_name: 

+

80 name = "forces" 

+

81 value = raw_value.reshape((-1, 3)) * Hartree / Bohr 

+

82 elif "CELL" in header_name: 

+

83 name = "cell" 

+

84 value = raw_value * Bohr 

+

85 elif "VOLUME" in header_name: 

+

86 name = "volume" 

+

87 value = raw_value * Bohr**3 

+

88 elif "LATVEC" in header_name: 

+

89 # TODO: the LATVEC is ambiguous. Are the results a unit cell, or full cell? 

+

90 name = "latvec" 

+

91 value = raw_value * Bohr 

+

92 elif "STRESS" in header_name: 

+

93 # Stress handling in geopt output can be different 

+

94 # on low-dimensional systems. If the stress matrix is 3x3, 

+

95 # the unit is GPa, while lower dimensional stress matrices 

+

96 # are using Hartree / Bohr**2 or Hartree / Bohr 

+

97 dim = raw_value.shape[0] 

+

98 if dim == 3: 

+

99 name = "stress" 

+

100 stress_ev_a3 = raw_value * GPa 

+

101 # Standard stress value, use Voigt representation 

+

102 value = np.array( 

+

103 [ 

+

104 stress_ev_a3[0, 0], 

+

105 stress_ev_a3[1, 1], 

+

106 stress_ev_a3[2, 2], 

+

107 stress_ev_a3[1, 2], 

+

108 stress_ev_a3[0, 2], 

+

109 stress_ev_a3[0, 1], 

+

110 ] 

+

111 ) 

+

112 elif dim == 2: 

+

113 name = "stress_2d" 

+

114 value = raw_value * Hartree / Bohr**2 

+

115 elif dim == 1: 

+

116 name = "stress_1d" 

+

117 value = raw_value * Hartree / Bohr 

+

118 else: 

+

119 raise ValueError("Incorrect stress matrix dimension!") 

+

120 else: 

+

121 warn( 

+

122 f"Field {header_name} is not known to geopt! I'll use the results as is." 

+

123 ) 

+

124 name = header_name 

+

125 value = raw_value 

+

126 data[name] = value 

+

127 # Special treatment for latvec & cell 

+

128 if ("cell" in data) and ("latvec" in data): 

+

129 # TODO: check 

+

130 cell_, lat_ = data["cell"], data["latvec"] 

+

131 unit_lat = lat_ / np.linalg.norm(lat_, axis=1, keepdims=True) 

+

132 cell = (unit_lat.T * cell_).T 

+

133 data["ase_cell"] = cell 

+

134 data["step"] = step 

+

135 return data 

+

136 

+

137 

+

138@writer 

+

139def _write_geopt( 

+

140 fileobj, 

+

141 data_dict, 

+

142): 

+

143 raise NotImplementedError("Writing geopt file from SPARC-X-API not supported!") 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_inpt_py.html b/_static/htmlcov/z_ef57e6186893c87e_inpt_py.html new file mode 100644 index 00000000..9c669715 --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_inpt_py.html @@ -0,0 +1,203 @@ + + + + + Coverage for sparc/sparc_parsers/inpt.py: 94% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/inpt.py: + 94% +

+ +

+ 54 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1import numpy as np 

+

2from ase.units import Bohr 

+

3 

+

4# Safe wrappers for both string and fd 

+

5from ase.utils import reader, writer 

+

6 

+

7from ..api import SparcAPI 

+

8from .utils import read_block_input, strip_comments 

+

9 

+

10defaultAPI = SparcAPI() 

+

11 

+

12 

+

13@reader 

+

14def _read_inpt(fileobj, validator=defaultAPI): 

+

15 contents = fileobj.read() 

+

16 # label = get_label(fileobj, ".ion") 

+

17 data, comments = strip_comments(contents) 

+

18 # We do not read the cell at this time! 

+

19 

+

20 # find the index for all atom type lines. They should be at the 

+

21 # top of their block 

+

22 inpt_blocks = read_block_input(data, validator=validator) 

+

23 return {"inpt": {"params": inpt_blocks, "comments": comments}} 

+

24 

+

25 

+

26@writer 

+

27def _write_inpt(fileobj, data_dict, validator=defaultAPI): 

+

28 if "inpt" not in data_dict: 

+

29 raise ValueError("Your dict does not contain inpt section!") 

+

30 

+

31 inpt_dict = data_dict["inpt"] 

+

32 

+

33 if "params" not in inpt_dict: 

+

34 raise ValueError("Input dict for inpt file does not have `params` field!") 

+

35 

+

36 comments = inpt_dict.get("comments", []) 

+

37 banner = "Input File Generated By SPARC ASE Calculator" 

+

38 if len(comments) == 0: 

+

39 comments = [banner] 

+

40 elif "ASE" not in comments[0]: 

+

41 comments = [banner] + comments 

+

42 for line in comments: 

+

43 fileobj.write(f"# {line}\n") 

+

44 fileobj.write("\n") 

+

45 params = inpt_dict["params"] 

+

46 for key, val in params.items(): 

+

47 # TODO: can we add a multiline argument? 

+

48 val_string = validator.convert_value_to_string(key, val) 

+

49 if (val_string.count("\n") > 0) or ( 

+

50 key 

+

51 in [ 

+

52 "LATVEC", 

+

53 ] 

+

54 ): 

+

55 output = f"{key}:\n{val_string}\n" 

+

56 else: 

+

57 output = f"{key}: {val_string}\n" 

+

58 fileobj.write(output) 

+

59 return 

+

60 

+

61 

+

62def _inpt_cell_to_ase_cell(data_dict): 

+

63 """Convert the inpt cell convention to a real cell (in ASE Angstrom unit) 

+

64 

+

65 Arguments: 

+

66 inpt_blocks: an already validated inpt file blocks dict 

+

67 (i.e. parsed by _read_inpt) 

+

68 

+

69 Returns: 

+

70 cell in ASE-unit 

+

71 """ 

+

72 inpt_blocks = data_dict["inpt"]["params"] 

+

73 if ("CELL" in inpt_blocks) and ("LATVEC_SCALE" in inpt_blocks): 

+

74 # TODO: customize the exception class 

+

75 # TODO: how do we convert the rule from doc? 

+

76 raise ValueError("LATVEC_SCALE and CELL cannot be specified simultaneously!") 

+

77 

+

78 # if "CELL" in inpt_blocks: 

+

79 # cell = np.eye(inpt_blocks["CELL"]) * Bohr 

+

80 if "LATVEC" not in inpt_blocks: 

+

81 if ("CELL" in inpt_blocks) or ("LATVEC_SCALE" in inpt_blocks): 

+

82 lat_array = np.eye(3) * Bohr 

+

83 else: 

+

84 raise KeyError( 

+

85 "LATVEC is missing in inpt file and no CELL / LATVEC_SCALE provided!" 

+

86 ) 

+

87 else: 

+

88 lat_array = np.array(inpt_blocks["LATVEC"]) * Bohr 

+

89 

+

90 # LATVEC_SCALE: just multiplies 

+

91 if "LATVEC_SCALE" in inpt_blocks: 

+

92 scale = np.array(inpt_blocks["LATVEC_SCALE"]) 

+

93 cell = (lat_array.T * scale).T 

+

94 

+

95 # CELL: the lengths are in the LATVEC directions 

+

96 # TODO: the documentation about CELL is a bit messy. Is CELL always orthogonal? 

+

97 # Anyway the lat_array when CELL is none should be ok 

+

98 elif "CELL" in inpt_blocks: 

+

99 scale = np.array(inpt_blocks["CELL"]) 

+

100 unit_lat_array = ( 

+

101 lat_array / np.linalg.norm(lat_array, axis=1, keepdims=True) * Bohr 

+

102 ) 

+

103 cell = (unit_lat_array.T * scale).T 

+

104 else: 

+

105 cell = lat_array 

+

106 return cell 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_ion_py.html b/_static/htmlcov/z_ef57e6186893c87e_ion_py.html new file mode 100644 index 00000000..9af346ac --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_ion_py.html @@ -0,0 +1,323 @@ + + + + + Coverage for sparc/sparc_parsers/ion.py: 97% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/ion.py: + 97% +

+ +

+ 100 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1""" 

+

2Created on Thu Oct 18 14:16:21 2018 

+

3 

+

4Ben Comer (Georgia Tech) 

+

5 

+

6This file has been heavily modified since SPARC 0.1 

+

7 

+

8TODO: more descriptions about this file io parser 

+

9""" 

+

10import textwrap 

+

11from warnings import warn 

+

12 

+

13import numpy as np 

+

14from ase.units import Bohr 

+

15 

+

16# Safe wrappers for both string and fd 

+

17from ase.utils import reader, writer 

+

18 

+

19from ..api import SparcAPI 

+

20from .utils import ( 

+

21 bisect_and_strip, 

+

22 make_reverse_mapping, 

+

23 read_block_input, 

+

24 strip_comments, 

+

25) 

+

26 

+

27 

+

28class InvalidSortingComment(ValueError): 

+

29 def __init__(self, message): 

+

30 self.message = message 

+

31 

+

32 

+

33defaultAPI = SparcAPI() 

+

34 

+

35 

+

36@reader 

+

37def _read_ion(fileobj, validator=defaultAPI): 

+

38 """ 

+

39 Read information from the .ion file. Note, this method does not return an atoms object, 

+

40 but rather return a dict. Thus the label option is not necessary to keep 

+

41 

+

42 

+

43 Reads an ion file. Because some of the information necessary to create 

+

44 an atoms object is found in the .inpt file, this function also attemtps to read 

+

45 that as a source of data. If the file is not found or the information is invalid, 

+

46 it will look for it in the comments of the ion file, as written. 

+

47 """ 

+

48 contents = fileobj.read() 

+

49 # label = get_label(fileobj, ".ion") 

+

50 data, comments = strip_comments(contents) 

+

51 # We do not read the cell at this time! 

+

52 sort, resort, new_comments = _read_sort_comment(comments) 

+

53 

+

54 # find the index for all atom type lines. They should be at the top of their block 

+

55 atom_type_bounds = [i for i, x in enumerate(data) if "ATOM_TYPE" in x] + [len(data)] 

+

56 atom_blocks = [ 

+

57 read_block_input(data[start:end], validator=validator) 

+

58 for start, end in zip(atom_type_bounds[:-1], atom_type_bounds[1:]) 

+

59 ] 

+

60 

+

61 return { 

+

62 "ion": { 

+

63 "atom_blocks": atom_blocks, 

+

64 "comments": new_comments, 

+

65 "sorting": {"sort": sort, "resort": resort}, 

+

66 } 

+

67 } 

+

68 

+

69 

+

70@writer 

+

71def _write_ion( 

+

72 fileobj, 

+

73 data_dict, 

+

74 validator=defaultAPI, 

+

75): 

+

76 """ 

+

77 Writes the ion file content from the atom_dict 

+

78 

+

79 Please note this is not a Atoms-compatible function! 

+

80 

+

81 The data_dict takes similar format as _read_ion 

+

82 

+

83 Basically, we want to ensure 

+

84 data_dict = _read_ion("some.ion") 

+

85 _write_ion("some.ion", data_dict) 

+

86 shows the same format 

+

87 """ 

+

88 ion_dict = data_dict.get("ion", None) 

+

89 if ion_dict is None: 

+

90 raise ValueError("No ion data provided in the input!") 

+

91 if "atom_blocks" not in ion_dict: 

+

92 raise ValueError( 

+

93 "Must provide a data-section in the data_dict (blocks of atomic information)" 

+

94 ) 

+

95 

+

96 comments = ion_dict.get("comments", []) 

+

97 banner = "Ion File Generated by SPARC ASE Calculator" 

+

98 if len(comments) == 0: 

+

99 comments = [banner] 

+

100 elif "ASE" not in comments[0]: 

+

101 comments = [banner] + comments 

+

102 

+

103 # Handle the sorting mapping 

+

104 # the line wrap is 80 words 

+

105 if "sorting" in ion_dict: 

+

106 # print(ion_dict["sorting"]) 

+

107 resort = ion_dict["sorting"].get("resort", []) 

+

108 # Write resort information only when it's actually useful 

+

109 if len(resort) > 0: 

+

110 comments.append("ASE-SORT:") 

+

111 index_lines = textwrap.wrap(" ".join(map(str, resort)), width=80) 

+

112 comments.extend(index_lines) 

+

113 comments.append("END ASE-SORT") 

+

114 

+

115 for line in comments: 

+

116 fileobj.write(f"# {line}\n") 

+

117 

+

118 fileobj.write("\n") 

+

119 blocks = ion_dict["atom_blocks"] 

+

120 for block in blocks: 

+

121 for key in [ 

+

122 "ATOM_TYPE", 

+

123 "N_TYPE_ATOM", 

+

124 "PSEUDO_POT", 

+

125 "COORD_FRAC", 

+

126 "COORD", 

+

127 "SPIN", 

+

128 "RELAX", 

+

129 ]: 

+

130 val = block.get(key, None) 

+

131 # print(key, val) 

+

132 if (key not in ["RELAX", "COORD", "COORD_FRAC", "SPIN"]) and (val is None): 

+

133 raise ValueError(f"Key {key} is not provided! Abort writing ion file") 

+

134 # TODO: change the API version 

+

135 if val is None: 

+

136 continue 

+

137 

+

138 val_string = validator.convert_value_to_string(key, val) 

+

139 # print(val_string) 

+

140 # TODO: make sure 1 line is accepted 

+

141 # TODO: write pads to vector lines 

+

142 if (val_string.count("\n") > 0) or ( 

+

143 key in ["COORD_FRAC", "COORD", "RELAX", "SPIN"] 

+

144 ): 

+

145 output = f"{key}:\n{val_string}\n" 

+

146 else: 

+

147 output = f"{key}: {val_string}\n" 

+

148 fileobj.write(output) 

+

149 # TODO: check extra keys 

+

150 # TODO: how to handle multiple psp files? 

+

151 # Write a split line 

+

152 # TODO: do we need to distinguish the last line? 

+

153 fileobj.write("\n") 

+

154 return 

+

155 

+

156 

+

157def _ion_coord_to_ase_pos(data_dict, cell=None): 

+

158 """Convert the COORD or COORD_FRAC from atom blocks to ASE's positions 

+

159 

+

160 Arguments: 

+

161 cell: a unit cell in ASE-unit (i.e. parsed from inpt._inpt_cell_to_ase_cell) 

+

162 

+

163 This function modifies the data_dict in-place to add a field '_ase_positions' 

+

164 to the atom_blocks 

+

165 """ 

+

166 treated_blocks = [] 

+

167 can_have_coord_frac = cell is not None 

+

168 ion_atom_blocks = data_dict["ion"]["atom_blocks"] 

+

169 for i, block in enumerate(ion_atom_blocks): 

+

170 if ("COORD" in block.keys()) and ("COORD_FRAC" in block.keys()): 

+

171 raise KeyError("COORD and COORD_FRAC cannot co-exist!") 

+

172 if (not can_have_coord_frac) and ("COORD_FRAC" in block.keys()): 

+

173 raise KeyError("COORD_FRAC must be acompanied by a cell!") 

+

174 coord = block.get("COORD", None) 

+

175 if coord is not None: 

+

176 coord = coord * Bohr 

+

177 else: 

+

178 coord_frac = block["COORD_FRAC"] 

+

179 # Cell is already in Bohr 

+

180 coord = np.dot(coord_frac, cell) 

+

181 data_dict["ion"]["atom_blocks"][i]["_ase_positions"] = coord 

+

182 return 

+

183 

+

184 

+

185def _read_sort_comment(lines): 

+

186 """Parse the atom sorting info from the comment lines 

+

187 Format 

+

188 

+

189 ASE-SORT: 

+

190 r_i r_j r_k .... 

+

191 END ASE-SORT 

+

192 where r_i etc are the indices in the original ASE atoms object 

+

193 """ 

+

194 i = 0 

+

195 resort = [] 

+

196 record = False 

+

197 new_lines = [] 

+

198 while i < len(lines): 

+

199 line = lines[i] 

+

200 key, value = bisect_and_strip(line, ":") 

+

201 i += 1 

+

202 if key == "ASE-SORT": 

+

203 record = True 

+

204 elif key == "END ASE-SORT": 

+

205 record = False 

+

206 break 

+

207 elif record is True: 

+

208 resort += list(map(int, line.strip().split(" "))) 

+

209 else: 

+

210 # Put original lines in new_lines 

+

211 new_lines.append(line) 

+

212 # Put all remaining lines in new_lines 

+

213 for j in range(i, len(lines)): 

+

214 line = lines[j] 

+

215 if "ASE-SORT" in line: 

+

216 raise InvalidSortingComment( 

+

217 "There appears to be multiple sorting information in the ion comment section!" 

+

218 ) 

+

219 new_lines.append(line) 

+

220 if record: 

+

221 warn( 

+

222 "ASE atoms resort comment block is not properly formatted, this may cause data loss!" 

+

223 ) 

+

224 sort = make_reverse_mapping(resort) 

+

225 assert set(sort) == set(resort), "Sort and resort info are of different length!" 

+

226 return sort, resort, new_lines 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_out_py.html b/_static/htmlcov/z_ef57e6186893c87e_out_py.html new file mode 100644 index 00000000..1b09e8c0 --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_out_py.html @@ -0,0 +1,360 @@ + + + + + Coverage for sparc/sparc_parsers/out.py: 91% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/out.py: + 91% +

+ +

+ 140 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1""" 

+

2Created on Thu Oct 18 14:16:21 2018 

+

3 

+

4Ben Comer (Georgia Tech) 

+

5 

+

6This file has been heavily modified since SPARC 0.1 

+

7 

+

8TODO: more descriptions about this file io parser 

+

9""" 

+

10import re 

+

11from datetime import datetime 

+

12from warnings import warn 

+

13 

+

14import numpy as np 

+

15from ase.units import Bohr, GPa, Hartree 

+

16 

+

17# Safe wrappers for both string and fd 

+

18from ase.utils import reader, writer 

+

19 

+

20from ..api import SparcAPI 

+

21from .utils import bisect_and_strip, read_block_input 

+

22 

+

23# TODO: should allow user to select the api 

+

24defaultAPI = SparcAPI() 

+

25 

+

26 

+

27@reader 

+

28def _read_out(fileobj): 

+

29 """ 

+

30 Read the .out file content 

+

31 

+

32 The output file is just stdout. The blocks are read using re-patterns rather than the way .static / .geopt or .aimd are parsed 

+

33 """ 

+

34 contents = fileobj.read() 

+

35 sparc_version = _read_sparc_version(contents[:4096]) 

+

36 # print(sparc_version) 

+

37 # TODO: use the sparc version to construct the API 

+

38 output_dict = {"sparc_version": sparc_version} 

+

39 # Combine the input parameters and parallelization sections 

+

40 output_dict["parameters"] = _read_input_params(contents) 

+

41 

+

42 # Parse the Initialization and timing info, and if calculation 

+

43 # successfully finished 

+

44 # Note: not all information are converted! 

+

45 output_dict["run_info"] = _read_run_info(contents) 

+

46 # List of scf information, 

+

47 # including scf convergence, energy etc 

+

48 output_dict["ionic_steps"] = _read_scfs(contents) 

+

49 return {"out": output_dict} 

+

50 

+

51 

+

52def _read_sparc_version(header): 

+

53 """Read the sparc version from the output file header. 

+

54 

+

55 This function should live outside the _read_output since some other functions may use it 

+

56 

+

57 TODO: combine it with the version from initialization.c 

+

58 """ 

+

59 pattern_version = r"SPARC\s+\(\s*?version(.*?)\)" 

+

60 match = re.findall(pattern_version, header) 

+

61 if len(match) != 1: 

+

62 warn("Header does not contain SPARC version information!") 

+

63 return None 

+

64 date_str = match[0].strip().replace(",", " ") 

+

65 # Accept both abbreviate and full month name 

+

66 try: 

+

67 date_version = datetime.strptime(date_str, "%B %d %Y").strftime("%Y.%m.%d") 

+

68 except ValueError: 

+

69 try: 

+

70 date_version = datetime.strptime(date_str, "%b %d %Y").strftime("%Y.%m.%d") 

+

71 except ValueError: 

+

72 warn("Cannot fetch SPARC version information!") 

+

73 date_version = None 

+

74 return date_version 

+

75 

+

76 

+

77def _read_input_params(contents, validator=defaultAPI): 

+

78 """Parse the Input parameters and Paral""" 

+

79 lines = "\n".join( 

+

80 _get_block_text(contents, "Input parameters") 

+

81 + _get_block_text(contents, "Parallelization") 

+

82 ).split("\n") 

+

83 # print(lines) 

+

84 params = read_block_input(lines, validator=validator) 

+

85 return params 

+

86 

+

87 

+

88def _read_run_info(contents): 

+

89 """Parse the run info sections 

+

90 Note due to the complexity of the run info, 

+

91 the types are not directly converted 

+

92 """ 

+

93 lines = "\n".join( 

+

94 _get_block_text(contents, "Timing info") 

+

95 + _get_block_text(contents, "Initialization") 

+

96 ).split("\n") 

+

97 block_dict = {"raw_info": lines} 

+

98 # Select key fields to store 

+

99 for line in lines: 

+

100 if ":" not in line: 

+

101 continue 

+

102 key, value = bisect_and_strip(line, ":") 

+

103 key = key.lower() 

+

104 if key in block_dict: 

+

105 if key not in ("pseudopotential",): 

+

106 warn( 

+

107 f"Key {key} from run information appears multiple times in your outputfile!" 

+

108 ) 

+

109 # For keys like pseudopotential, we make it a list 

+

110 else: 

+

111 origin_value = list(block_dict[key]) 

+

112 value = origin_value + [value] 

+

113 

+

114 block_dict[key] = value 

+

115 return block_dict 

+

116 

+

117 

+

118def _read_scfs(contents): 

+

119 """Parse the ionic steps 

+

120 

+

121 Return: 

+

122 List of ionic steps information 

+

123 

+

124 

+

125 """ 

+

126 convergence_info = _get_block_text(contents, r"Self Consistent Field \(SCF.*?\)") 

+

127 results_info = _get_block_text(contents, "Energy and force calculation") 

+

128 

+

129 # Should not happen 

+

130 if len(convergence_info) > len(results_info) + 1: 

+

131 raise ValueError( 

+

132 "Error, length of convergence information and energy calculation mismatch!" 

+

133 ) 

+

134 elif len(convergence_info) == len(results_info) + 1: 

+

135 warn("Last ionic SCF has not finished! The results may be incomplete") 

+

136 else: 

+

137 pass 

+

138 

+

139 # Stick to the convergence information as the main section 

+

140 n_steps = len(convergence_info) 

+

141 steps = [] 

+

142 # for i, step in enumerate(zip(convergence_info, results_info)): 

+

143 for i in range(n_steps): 

+

144 conv = convergence_info[i] 

+

145 # Solution for incomplete calculations 

+

146 if i >= len(results_info): 

+

147 res = "" # Empty lines 

+

148 else: 

+

149 res = results_info[i] 

+

150 current_step = {"scf_step": i} 

+

151 # TODO: add support for convergence fields 

+

152 conv_lines = conv.splitlines() 

+

153 # conv_header is normally 4-column table 

+

154 conv_header = re.split(r"\s{3,}", conv_lines[0]) 

+

155 

+

156 scf_sub_steps = [] 

+

157 # For ground-state calculations, the output will be only 1 block 

+

158 # For hybrid (HSE/PBE0) calculations the EXX loops will also be included 

+

159 # General rule: we search for the line "Total number of SCF: N", read back N(+1) lines 

+

160 for lino, line in enumerate(conv_lines): 

+

161 if "Total number of SCF:" not in line: 

+

162 continue 

+

163 scf_num = int(line.split(":")[-1]) 

+

164 conv_array = np.genfromtxt( 

+

165 [ 

+

166 l 

+

167 for l in conv_lines[lino - scf_num : lino] 

+

168 if l.split()[0].isdigit() 

+

169 ], 

+

170 dtype=float, 

+

171 ndmin=2, 

+

172 ) 

+

173 conv_dict = {} 

+

174 for i, field in enumerate(conv_header): 

+

175 field = field.strip() 

+

176 value = conv_array[:, i] 

+

177 # TODO: re-use the value conversion function in res part 

+

178 if "Ha/atom" in field: 

+

179 value *= Hartree 

+

180 field.replace("Ha/atom", "eV/atom") 

+

181 if "Iteration" in field: 

+

182 value = value.astype(int) 

+

183 conv_dict[field] = value 

+

184 # Determine if the current block is a ground-state or EXX 

+

185 name_line = conv_lines[lino - scf_num - 1] 

+

186 if "Iteration" in name_line: 

+

187 name = "ground state" 

+

188 else: 

+

189 name = name_line 

+

190 

+

191 conv_dict["name"] = name 

+

192 scf_sub_steps.append(conv_dict) 

+

193 

+

194 current_step["convergence"] = scf_sub_steps 

+

195 

+

196 res = res.splitlines() 

+

197 for line in res: 

+

198 if ":" not in line: 

+

199 continue 

+

200 key, value = bisect_and_strip(line, ":") 

+

201 key = key.lower() 

+

202 if key in current_step: 

+

203 warn( 

+

204 f"Key {key} appears multiples in one energy / force calculation, your output file may be incorrect." 

+

205 ) 

+

206 # Conversion of values are relatively easy 

+

207 pattern_value = r"([+\-\d.Ee]+)\s+\((.*?)\)" 

+

208 match = re.findall(pattern_value, value) 

+

209 raw_value, unit = float(match[0][0]), match[0][1] 

+

210 if unit == "Ha": 

+

211 converted_value = raw_value * Hartree 

+

212 converted_unit = "eV" 

+

213 elif unit == "Ha/atom": 

+

214 converted_value = raw_value * Hartree 

+

215 converted_unit = "eV/atom" 

+

216 elif unit == "Ha/Bohr": 

+

217 converted_value = raw_value * Hartree / Bohr 

+

218 converted_unit = "eV/Angstrom" 

+

219 elif unit == "GPa": 

+

220 converted_value = raw_value * GPa 

+

221 converted_unit = "eV/Angstrom^3" 

+

222 elif unit == "sec": 

+

223 converted_value = raw_value * 1 

+

224 converted_unit = "sec" 

+

225 elif unit == "Bohr magneton": 

+

226 converted_value = raw_value 

+

227 converted_unit = "Bohr magneton" 

+

228 else: 

+

229 warn(f"Conversion for unit {unit} unknown! Treat as unit") 

+

230 converted_value = raw_value 

+

231 converted_unit = unit 

+

232 current_step[key] = { 

+

233 "value": converted_value, 

+

234 "unit": converted_unit, 

+

235 } 

+

236 steps.append(current_step) 

+

237 return steps 

+

238 

+

239 

+

240def _get_block_text(text, block_name): 

+

241 """Get an output 'block' with a specific block name 

+

242 

+

243 the outputs are not line-split 

+

244 """ 

+

245 # Add the ending separator so matching is possible for partial-complete 

+

246 # .out file from socket calculations 

+

247 text = text + ("=" * 68) + "\n" 

+

248 pattern_block = ( 

+

249 r"[\*=]{50,}\s*?\n\s*?BLOCK_NAME\s*?\n[\*=]{50,}\s*\n(.*?)[\*=]{50,}" 

+

250 ) 

+

251 pattern = pattern_block.replace("BLOCK_NAME", block_name) 

+

252 match = re.findall(pattern, text, re.DOTALL | re.MULTILINE) 

+

253 if len(match) == 0: 

+

254 warn(f"Block {block_name} cannot be parsed from current text!") 

+

255 return match 

+

256 

+

257 

+

258@writer 

+

259def _write_out( 

+

260 fileobj, 

+

261 data_dict, 

+

262): 

+

263 raise NotImplementedError("Writing output file from SPARC-X-API not supported!") 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_pseudopotential_py.html b/_static/htmlcov/z_ef57e6186893c87e_pseudopotential_py.html new file mode 100644 index 00000000..e27e887d --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_pseudopotential_py.html @@ -0,0 +1,289 @@ + + + + + Coverage for sparc/sparc_parsers/pseudopotential.py: 100% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/pseudopotential.py: + 100% +

+ +

+ 92 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1"""Provide a simple parser for pseudo potentials 

+

2 

+

3The psp8 format is defined in abinit manual 

+

4https://docs.abinit.org/developers/psp8_info/ 

+

5 

+

6The first 

+

7 

+

8""" 

+

9 

+

10import os 

+

11import re 

+

12import shutil 

+

13from pathlib import Path 

+

14from warnings import warn 

+

15 

+

16import numpy as np 

+

17from ase.data import atomic_names, chemical_symbols 

+

18 

+

19 

+

20class NotPSP8Format(Exception): 

+

21 def __init__(self, message): 

+

22 self.message = message 

+

23 

+

24 

+

25class NoMatchingPseudopotential(FileNotFoundError): 

+

26 def __init__(self, message): 

+

27 self.message = message 

+

28 

+

29 

+

30class MultiplePseudoPotentialFiles(Exception): 

+

31 def __init__(self, message): 

+

32 self.message = message 

+

33 

+

34 

+

35def parse_psp8_header(text): 

+

36 """Parse the first 4 lines of psp8 text 

+

37 if parsing failed, raise exception 

+

38 """ 

+

39 header = text.split("\n")[:4] 

+

40 # Line 1 

+

41 

+

42 psp8_magic = r"^\s*(?P<symbol>[\w]+)\s+ONCVPSP-(?P<psp8ver>[\d.\w]+)\s+r\_core=(?P<r_core>.*?)$" 

+

43 psp8_data = {} 

+

44 match = re.search(psp8_magic, header[0]) 

+

45 if match is None: 

+

46 raise NotPSP8Format(f"The pseudopotential file is not in PSP8 format!") 

+

47 mgroup = match.groupdict() 

+

48 psp8_data["symbol"] = mgroup["symbol"].strip() 

+

49 psp8_data["psp8ver"] = mgroup["psp8ver"].strip() 

+

50 psp8_data["r_core"] = np.fromstring(mgroup["r_core"].strip(), sep=" ", dtype=float) 

+

51 # Line 2 

+

52 zatom, zion, pspd, *_ = header[1].split() 

+

53 psp8_data["zatom"] = float(zatom) 

+

54 psp8_data["zion"] = float(zion) 

+

55 # TODO: should we make date in datetime object? 

+

56 psp8_data["pspd"] = str(pspd).strip() 

+

57 

+

58 # Line 3 

+

59 pspcod, pspxc, lmax, lloc, mmax, r2well, *_ = header[2].split() 

+

60 psp8_data["pspcod"] = int(pspcod) 

+

61 psp8_data["pspxc"] = int(pspxc) 

+

62 psp8_data["lmax"] = int(lmax) 

+

63 psp8_data["lloc"] = int(lloc) 

+

64 psp8_data["mmax"] = int(mmax) 

+

65 psp8_data["r2well"] = int(r2well) 

+

66 

+

67 # Line 4 

+

68 rchrg, fchrg, qchrg, *_ = header[3].split() 

+

69 psp8_data["rchrg"] = float(rchrg) 

+

70 psp8_data["fchrg"] = float(fchrg) 

+

71 psp8_data["qchrg"] = float(qchrg) 

+

72 

+

73 # Sanity check the symbol and zatom 

+

74 int_zatom = int(psp8_data["zatom"]) 

+

75 if chemical_symbols[int_zatom] != psp8_data["symbol"]: 

+

76 raise NotPSP8Format( 

+

77 ( 

+

78 f"The symbol defined in pseudo potential {psp8_data['symbol']} does not match " 

+

79 f"the Z={int_zatom}!" 

+

80 ) 

+

81 ) 

+

82 return psp8_data 

+

83 

+

84 

+

85def infer_pseudo_path(symbol, search_path): 

+

86 """Given an element symbol like 'Na', get the file name 

+

87 of the search_path (resolved) that search through the search path 

+

88 

+

89 TODO: shall we support multiple directories? 

+

90 TODO: add a `setup` option like VASP? 

+

91 """ 

+

92 search_path = Path(search_path).resolve() 

+

93 potfiles = ( 

+

94 list(search_path.glob("*.psp8")) 

+

95 + list(search_path.glob("*.psp")) 

+

96 + list(search_path.glob("*.pot")) 

+

97 ) 

+

98 candidates = [] 

+

99 for pf in potfiles: 

+

100 try: 

+

101 psp8_data = parse_psp8_header(open(pf, "r").read()) 

+

102 except Exception as e: 

+

103 print(e) 

+

104 psp8_data = None 

+

105 

+

106 if psp8_data: 

+

107 if psp8_data["symbol"] == symbol: 

+

108 candidates.append(pf) 

+

109 if len(candidates) == 0: 

+

110 raise NoMatchingPseudopotential( 

+

111 ( 

+

112 f"No pseudopotential file for {symbol} found " 

+

113 "under the search path {search_path}!" 

+

114 ) 

+

115 ) 

+

116 elif len(candidates) > 1: 

+

117 msg = ( 

+

118 f"There are multiple psp8 files for {symbol}:\n" 

+

119 f"{candidates}. Please select the desired pseudopotential file!" 

+

120 ) 

+

121 raise MultiplePseudoPotentialFiles(msg) 

+

122 

+

123 else: 

+

124 return candidates[0] 

+

125 

+

126 

+

127def copy_psp_file(source_pot, target_dir, use_symbol=False): 

+

128 """Copy the pseudo potential file `source_pot` under `target_dir` 

+

129 

+

130 if use_symbol is True, rename the potential to '{symbol}.psp8' 

+

131 

+

132 the function returns the name of the pseudo potential file 

+

133 """ 

+

134 

+

135 source_pot = Path(source_pot) 

+

136 target_dir = Path(target_dir) 

+

137 psp8_data = parse_psp8_header(open(source_pot, "r").read()) 

+

138 symbol = psp8_data["symbol"] 

+

139 if use_symbol: 

+

140 potname = f"{symbol}.psp8" 

+

141 else: 

+

142 potname = source_pot.name 

+

143 

+

144 target_pot = target_dir / potname 

+

145 # shutil will copy 

+

146 shutil.copy(source_pot, target_pot) 

+

147 return potname 

+

148 

+

149 

+

150def find_pseudo_path(symbol, search_path=None, pseudopotential_mapping={}): 

+

151 """Get the pseudo potential file at best effort 

+

152 

+

153 Searching priorities 

+

154 1) if pseudopotential_mapping has symbol as key, use the file name 

+

155 There are two possibilities 

+

156 i) filename does not contain directory information: i.e. Na-pbe.pot 

+

157 use search_path / filename for the mapping 

+

158 ii) filename contains directory information, directly use the file name 

+

159 2) No pseudopotential_mapping is given, get the psp from search_path 

+

160 """ 

+

161 mapping_psp = pseudopotential_mapping.get(symbol, None) 

+

162 if mapping_psp is None: 

+

163 if search_path is None: 

+

164 raise NoMatchingPseudopotential( 

+

165 ( 

+

166 f"No psudopotentials found for {symbol} " 

+

167 "because neither search_path nor psp name is provided." 

+

168 ) 

+

169 ) 

+

170 return infer_pseudo_path(symbol, search_path) 

+

171 else: 

+

172 str_psp = str(mapping_psp) 

+

173 mapping_psp = Path(mapping_psp) 

+

174 # if psp contains any path information (/, \\), treat is as a direct file 

+

175 is_node_file_name = (mapping_psp.name == str_psp) and (os.sep not in str_psp) 

+

176 if is_node_file_name: 

+

177 if search_path is None: 

+

178 raise NoMatchingPseudopotential( 

+

179 ( 

+

180 f"You provide the pseudopotential name {mapping_psp} but no search path is defined. I cannot locate the pseudopotential file." 

+

181 ) 

+

182 ) 

+

183 mapping_psp = Path(search_path) / str_psp 

+

184 

+

185 if not mapping_psp.is_file(): 

+

186 warn( 

+

187 ( 

+

188 f"Pseudopotential file {mapping_psp} is defined by user input but cannot be found!\n" 

+

189 "Please check your setup. I'll write the .ion file anyway." 

+

190 ) 

+

191 ) 

+

192 return mapping_psp 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_static_py.html b/_static/htmlcov/z_ef57e6186893c87e_static_py.html new file mode 100644 index 00000000..287701ee --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_static_py.html @@ -0,0 +1,329 @@ + + + + + Coverage for sparc/sparc_parsers/static.py: 90% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/static.py: + 90% +

+ +

+ 115 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1""" 

+

2Created on Thu Oct 18 14:16:21 2018 

+

3 

+

4Ben Comer (Georgia Tech) 

+

5 

+

6This file has been heavily modified since SPARC 0.1 

+

7 

+

8TODO: more descriptions about this file io parser 

+

9""" 

+

10from warnings import warn 

+

11 

+

12import numpy as np 

+

13from ase.units import Bohr, GPa, Hartree 

+

14 

+

15# Safe wrappers for both string and fd 

+

16from ase.utils import reader, writer 

+

17 

+

18from ..api import SparcAPI 

+

19from .utils import strip_comments 

+

20 

+

21# TODO: should allow user to select the api 

+

22# defaultAPI = SparcAPI() 

+

23 

+

24 

+

25@reader 

+

26def _read_static(fileobj): 

+

27 """ 

+

28 Read the .static file content 

+

29 

+

30 Each .static file should only host 1 or more images 

+

31 (if socket mode is enabled), but the output may vary 

+

32 a lot depending on the flags (e.g. PRINT_ATOMS, PRINT_FORCES etc) 

+

33 """ 

+

34 contents = fileobj.read() 

+

35 data, comments = strip_comments(contents) 

+

36 # Most static files should containe the Atom positions lines 

+

37 # this can happen for both a single- or multi-image file 

+

38 step_bounds = [i for i, x in enumerate(data) if "Atom positions" in x] + [len(data)] 

+

39 raw_static_steps = [ 

+

40 data[start:end] for start, end in zip(step_bounds[:-1], step_bounds[1:]) 

+

41 ] 

+

42 # In some cases (e.g. PRINT_ATOMS=0), the static file may not contain Atom positions 

+

43 # All existing lines will be regarded as one step 

+

44 if len(raw_static_steps) == 0: 

+

45 raw_static_steps = [data] 

+

46 static_steps = [_read_static_step(step) for step in raw_static_steps] 

+

47 return {"static": static_steps} 

+

48 

+

49 

+

50def _read_static_block(raw_block): 

+

51 """Parse ONE static data block, this will return dict with keys: 

+

52 {"name": PARAM, "value": value} 

+

53 

+

54 Arguments: 

+

55 raw_block: un-parsed block as list of strings 

+

56 """ 

+

57 header, body = raw_block[0], raw_block[1:] 

+

58 header_name, header_rest = header.split(":") 

+

59 if len(header_rest.strip()) > 0: 

+

60 body = [header_rest.strip()] + body 

+

61 

+

62 try: 

+

63 value = np.genfromtxt(body, dtype=float) 

+

64 if np.isnan(value).any(): 

+

65 warn( 

+

66 ( 

+

67 f"Field contains data that are not parsable by numpy!\n" 

+

68 f"Contents are: {body}" 

+

69 ) 

+

70 ) 

+

71 except Exception: 

+

72 value = body 

+

73 

+

74 # name = None 

+

75 if "Total free energy" in header_name: 

+

76 name = "free energy" 

+

77 elif "Atomic forces" in header_name: 

+

78 name = "forces" 

+

79 elif "Net magnetization" in header_name: 

+

80 name = "net_magnetization" 

+

81 elif "Atomic magnetization" in header_name: 

+

82 name = "atomic_magnetization" 

+

83 elif "Stress (GPa)" in header_name: 

+

84 name = "stress" 

+

85 elif "Stress equiv." in header_name: 

+

86 name = "stress_equiv" 

+

87 elif "Stress (Ha/Bohr)" in header_name: 

+

88 name = "stress_1d" 

+

89 elif "Stress (Ha/Bohr**2)" in header_name: 

+

90 name = "stress_2d" 

+

91 elif "Fractional coordinates" in header_name: 

+

92 # Fractional coordinates of Si -- > name=coord_frac symbol="Si" 

+

93 name = "coord_frac" 

+

94 symbol = header_name.split("of")[1].strip() 

+

95 clean_array = value.reshape((-1, 3)) 

+

96 value = {"value": clean_array, "symbol": symbol} 

+

97 # Exclusive to the socket mode 

+

98 elif "Lattice (Bohr)" in header_name: 

+

99 name = "lattice" 

+

100 value = value.reshape((3, 3)) 

+

101 else: 

+

102 name = header_name.strip() 

+

103 

+

104 return {"name": name, "value": value} 

+

105 

+

106 

+

107def _read_static_step(step): 

+

108 """Parse all the lines in one step and compose a dict containing sanitized blocks 

+

109 Args: 

+

110 step (list): Lines of raw lines in one step 

+

111 """ 

+

112 separator = "*" * 60 # Make the separator long enough 

+

113 # Clean up boundary lines 

+

114 data = [ 

+

115 line 

+

116 for line in step 

+

117 if ("Atom positions" not in line) and (separator not in line) 

+

118 ] 

+

119 block_bounds = [i for i, x in enumerate(data) if ":" in x] + [len(data)] 

+

120 raw_blocks = [ 

+

121 data[start:end] for start, end in zip(block_bounds[:-1], block_bounds[1:]) 

+

122 ] 

+

123 # import pdb; pdb.set_trace() 

+

124 block_dict = {} 

+

125 coord_dict = {} 

+

126 block_contents = [_read_static_block(block) for block in raw_blocks] 

+

127 for bc in block_contents: 

+

128 name, raw_value = bc["name"], bc["value"] 

+

129 # Coord frac needs to be collected in all positions 

+

130 if name == "coord_frac": 

+

131 value = None 

+

132 symbol, coord_f = raw_value["symbol"], raw_value["value"] 

+

133 pos_count = len(coord_f) 

+

134 if coord_dict == {}: 

+

135 coord_dict.update( 

+

136 { 

+

137 "symbols": [ 

+

138 symbol, 

+

139 ] 

+

140 * pos_count, 

+

141 "coord_frac": coord_f, 

+

142 } 

+

143 ) 

+

144 else: 

+

145 coord_dict["symbols"] += [ 

+

146 symbol, 

+

147 ] * pos_count 

+

148 # import pdb; pdb.set_trace() 

+

149 coord_dict["coord_frac"] = np.vstack( 

+

150 [coord_dict["coord_frac"], coord_f] 

+

151 ) 

+

152 

+

153 elif name == "free energy": 

+

154 value = raw_value * Hartree 

+

155 elif name == "forces": 

+

156 value = raw_value * Hartree / Bohr 

+

157 elif name == "atomic_magnetization": 

+

158 value = raw_value 

+

159 elif name == "net_magnetization": 

+

160 value = raw_value 

+

161 elif name == "stress": 

+

162 # Stress is in eV/Ang^3, may need to convert to Virial later when cell is known 

+

163 # For low-dimension stress info, use stress_equiv 

+

164 stress_ev_a3 = raw_value * GPa 

+

165 if stress_ev_a3.shape != (3, 3): 

+

166 raise ValueError("Stress from static file is not a 3x3 matrix!") 

+

167 # make the stress in voigt notation 

+

168 # TODO: check the order! 

+

169 value = np.array( 

+

170 [ 

+

171 stress_ev_a3[0, 0], 

+

172 stress_ev_a3[1, 1], 

+

173 stress_ev_a3[2, 2], 

+

174 stress_ev_a3[1, 2], 

+

175 stress_ev_a3[0, 2], 

+

176 stress_ev_a3[0, 1], 

+

177 ] 

+

178 ) 

+

179 elif name == "stress_equiv": 

+

180 # Only store the size up to the max. periodic directions, 

+

181 # let the atom parser decide how to transform the matrix 

+

182 value = raw_value * GPa 

+

183 elif name == "stress_1d": 

+

184 value = raw_value * Hartree / Bohr 

+

185 elif name == "stress_2d": 

+

186 value = raw_value * Hartree / (Bohr**2) 

+

187 elif name == "lattice": 

+

188 value = raw_value * Bohr 

+

189 

+

190 # Non-frac coord 

+

191 if value is not None: 

+

192 block_dict[name] = value 

+

193 # Finally, update the atomic positions 

+

194 # TODO: should we keep a default? 

+

195 if coord_dict != {}: 

+

196 block_dict["atoms"] = coord_dict 

+

197 

+

198 return block_dict 

+

199 

+

200 

+

201def _add_cell_info(static_steps, cell=None): 

+

202 """Use the cell information to convert positions 

+

203 if lattice exists in each step, use it to convert coord_frac 

+

204 else use the external cell (for example from inpt file) 

+

205 Args: 

+

206 static_steps: raw list of steps 

+

207 cell: external lattice information 

+

208 """ 

+

209 new_steps = [] 

+

210 for step in static_steps: 

+

211 new_step = step.copy() 

+

212 if "lattice" in step: 

+

213 lat = step["lattice"] 

+

214 elif cell is not None: 

+

215 lat = cell 

+

216 else: 

+

217 lat = None 

+

218 

+

219 if (lat is not None) and (step.get("atoms", None) is not None): 

+

220 coord_frac = new_step["atoms"]["coord_frac"] 

+

221 coord_cart = np.dot(coord_frac, lat) 

+

222 new_step["atoms"]["coord"] = coord_cart 

+

223 new_steps.append(new_step) 

+

224 return new_steps 

+

225 

+

226 

+

227@writer 

+

228def _write_static( 

+

229 fileobj, 

+

230 data_dict, 

+

231): 

+

232 raise NotImplementedError("Writing static file from SPARC-X-API not supported!") 

+
+ + + diff --git a/_static/htmlcov/z_ef57e6186893c87e_utils_py.html b/_static/htmlcov/z_ef57e6186893c87e_utils_py.html new file mode 100644 index 00000000..83d90739 --- /dev/null +++ b/_static/htmlcov/z_ef57e6186893c87e_utils_py.html @@ -0,0 +1,190 @@ + + + + + Coverage for sparc/sparc_parsers/utils.py: 98% + + + + + +
+
+

+ Coverage for sparc/sparc_parsers/utils.py: + 98% +

+ +

+ 53 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.6.7, + created at 2024-11-20 18:11 +0000 +

+ +
+
+
+

1from warnings import warn 

+

2 

+

3 

+

4def get_label(fileobj, ext): 

+

5 """Return the label of file by stripping the extension (e.g. .ion)""" 

+

6 return fileobj.name.rsplit(ext, 1)[0] 

+

7 

+

8 

+

9def strip_comments(rawtext, symbol="#"): 

+

10 """Strip comments from the text, including trailing comments""" 

+

11 stripped = [] 

+

12 comments = [] 

+

13 for line in rawtext.splitlines(): 

+

14 data, comment = bisect_and_strip(line, symbol) 

+

15 if data: 

+

16 stripped.append(data) 

+

17 if comment: 

+

18 comments.append(comment) 

+

19 return stripped, comments 

+

20 

+

21 

+

22def bisect_and_strip(text, delimiter): 

+

23 """split string in 2 at first occurence of a character and remove whitespace 

+

24 useful for separating comments from data, keys from values, etc. 

+

25 """ 

+

26 # wrap around to len(text) if not found (-1) 

+

27 index = text.find(delimiter) % (len(text) + 1) 

+

28 return text[:index].strip(), text[index + len(delimiter) :].strip() 

+

29 

+

30 

+

31def read_block_input(block, validator=None): 

+

32 """Read blocks of inputs from ion or inpt file and convert with validator 

+

33 

+

34 the following inputs are accepted: 

+

35 1) single line input: KEY: VALUE 

+

36 2) multiline input: KEY: VALUE1 \n VALUE2 --> (concanate the values) 

+

37 3) multiline input w/ blank first line: KEY: \n VALUE1 \n VALUE2 --> (append the values) 

+

38 """ 

+

39 block_dict = {} 

+

40 multiline_key = "" 

+

41 concat = False 

+

42 use_validator = True if validator else False 

+

43 for line in block: 

+

44 if ":" not in line: 

+

45 # import pdb; pdb.set_trace() 

+

46 # no key, assume multiline value. 

+

47 # be careful not to add blank lines 

+

48 if multiline_key: 

+

49 if concat: 

+

50 block_dict[multiline_key] = ( 

+

51 block_dict[multiline_key] + f" {line.strip()}" 

+

52 ) 

+

53 else: 

+

54 block_dict[multiline_key].append(line.strip()) 

+

55 continue 

+

56 key, value = bisect_and_strip(line, ":") 

+

57 key = key.upper() 

+

58 

+

59 if key and value: 

+

60 block_dict[key] = value 

+

61 multiline_key = key 

+

62 concat = True 

+

63 elif key: 

+

64 # no value, assume that this key has a list of values 

+

65 # in the following lines 

+

66 block_dict[key] = [] 

+

67 multiline_key = key 

+

68 concat = False 

+

69 for key, val in block_dict.items(): 

+

70 _use_validator_this_key = use_validator 

+

71 if _use_validator_this_key: 

+

72 if key not in validator.parameters.keys(): 

+

73 warn( 

+

74 f"Key {key} not in validator's parameter list, ignore value conversion!" 

+

75 ) 

+

76 _use_validator_this_key = False 

+

77 if _use_validator_this_key: 

+

78 val = validator.convert_string_to_value(key, val) 

+

79 block_dict[key] = val 

+

80 return block_dict 

+

81 

+

82 

+

83def make_reverse_mapping(mapping): 

+

84 """Given a list of mapping, get its reverse mapping 

+

85 

+

86 For example: 

+

87 mapping = [0, 2, 3, 1, 5, 4] 

+

88 reverse = [0, 3, 1, 2, 5, 4] 

+

89 """ 

+

90 reverse = [0] * len(mapping) 

+

91 for i, j in enumerate(mapping): 

+

92 reverse[j] = i 

+

93 return reverse 

+
+ + +