diff --git a/.github/workflows/pypi.yaml b/.github/workflows/pypi.yaml index 4c4ec152..1fe5b515 100644 --- a/.github/workflows/pypi.yaml +++ b/.github/workflows/pypi.yaml @@ -88,9 +88,6 @@ jobs: - ["cp310", "3.10"] - ["cp311", "3.11"] - ["cp312", "3.12"] - exclude: - - buildplat: [macos-latest, macosx_x86_64, macosx_10_14_x86_64] - version: ["cp312", "3.12"] steps: - uses: actions/checkout@v4 @@ -105,15 +102,9 @@ jobs: name: wheels path: dist - # TODO: Remove the manual h5py building once h5py 3.10 has been released with cp312 wheels - name: Install dependencies run: | WHL_NAME=$(python scripts/get_whl_name.py dist ${{ matrix.buildplat[2] }}) - if [[ "${{ matrix.version[1] }}" == '3.12' ]]; then - sudo apt-get update - sudo apt-get install libhdf5-dev - pip install git+https://github.com/h5py/h5py@89e1e2e78d7fb167d2a67c9a8354ced6491160fe - fi pip install "$WHL_NAME"[test] --prefer-binary - name: Python info @@ -163,6 +154,7 @@ jobs: - ["cp39", "python3.9"] - ["cp310", "python3.10"] - ["cp311", "python3.11"] + - ["cp312", "python3.12"] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 4a46d28d..f2d93bf9 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -24,7 +24,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - version: ["3.8", "3.9", "3.10", "3.11"] + version: ["3.8", "3.9", "3.10", "3.11", "3.12"] special: - ["", ""] include: @@ -35,7 +35,7 @@ jobs: - os: ubuntu-latest version: "3.11" special: ["GCC", "12"] - # CP2K 6.1/7.1/8.2/9.1 tests + # CP2K 6.1/7.1/8.2/9.1/2022.1 tests - os: ubuntu-latest version: "3.11" special: ["CP2K", "6.1"] @@ -153,15 +153,9 @@ jobs: with: python-version: ${{ matrix.version }} - # TODO: Remove the manual h5py building once h5py 3.10 has been released with cp312 wheels - name: Install dependencies run: | WHL_NAME=$(python scripts/get_whl_name.py wheelhouse manylinux2014_x86_64) - if [[ "${{ matrix.version }}" == '3.12' ]]; then - sudo apt-get update - sudo apt-get install libhdf5-dev - pip install git+https://github.com/h5py/h5py@89e1e2e78d7fb167d2a67c9a8354ced6491160fe - fi pip install "$WHL_NAME"[test] - name: Python info diff --git a/nanoqm/common.py b/nanoqm/common.py index dd83e50c..28f8721e 100644 --- a/nanoqm/common.py +++ b/nanoqm/common.py @@ -59,7 +59,7 @@ _path_valence_electrons = Path(nanoqm_path[0]) / "basis" / "valence_electrons.json" _path_aux_fit = Path(nanoqm_path[0]) / "basis" / "aux_fit.json" -with open(_path_valence_electrons, 'r') as f1, open(_path_aux_fit, 'r') as f2: +with open(_path_valence_electrons, 'r', encoding="utf8") as f1, open(_path_aux_fit, 'r', encoding="utf8") as f2: # noqa valence_electrons: "dict[str, int]" = json.load(f1) aux_fit: "dict[str, list[int]]" = json.load(f2) @@ -395,7 +395,7 @@ def read_cell_parameters_as_array( """Read the cell parameters as a numpy array.""" arr = np.loadtxt(file_cell_parameters, skiprows=1) - with open(file_cell_parameters, 'r') as f: + with open(file_cell_parameters, 'r', encoding="utf8") as f: header = f.readline() return header, arr diff --git a/nanoqm/schedule/components.py b/nanoqm/schedule/components.py index c17af951..c3e887cf 100644 --- a/nanoqm/schedule/components.py +++ b/nanoqm/schedule/components.py @@ -307,7 +307,7 @@ def create_point_folder( def split_file_geometries(path_xyz: PathLike) -> list[str]: """Read a set of molecular geometries in xyz format.""" # Read Cartesian Coordinates - with open(path_xyz) as f: + with open(path_xyz, "r", encoding="utf8") as f: xss = iter(f.readlines()) data = [] diff --git a/nanoqm/schedule/scheduleCP2K.py b/nanoqm/schedule/scheduleCP2K.py index 2df02282..c81e1153 100644 --- a/nanoqm/schedule/scheduleCP2K.py +++ b/nanoqm/schedule/scheduleCP2K.py @@ -136,7 +136,7 @@ def print_cp2k_error(path_dir: str | os.PathLike[str], prefix: str) -> str: err_file = next(Path(path_dir).glob(f"*{prefix}"), None) msg = "" if err_file is not None: - with open(err_file, 'r') as handler: + with open(err_file, 'r', encoding="utf8") as handler: err = handler.read() msg = f"CP2K {prefix} file:\n{err}\n" logger.error(msg) diff --git a/nanoqm/workflows/distribute_jobs.py b/nanoqm/workflows/distribute_jobs.py index 0682b4e8..68df04ea 100644 --- a/nanoqm/workflows/distribute_jobs.py +++ b/nanoqm/workflows/distribute_jobs.py @@ -27,6 +27,7 @@ from __future__ import annotations +import sys import copy import argparse import os @@ -42,6 +43,8 @@ from .input_validation import process_input from .. import _data +__all__ = ["distribute_jobs"] + def read_cmd_line() -> str: """Read the input file and the workflow name from the command line.""" @@ -59,8 +62,12 @@ def main() -> None: """Distribute the user specified by the user.""" # command line argument input_file = read_cmd_line() + distribute_jobs(input_file) + - with open(input_file, 'r') as f: +def distribute_jobs(input_file: str) -> None: + """Distribute the user specified by the user.""" + with open(input_file, 'r', encoding="utf8") as f: args = yaml.load(f, Loader=UniqueSafeLoader) # Read and process input @@ -132,6 +139,13 @@ def distribute_computations(config: _data.Distribute, hamiltonians: bool = False path_ham = f"{config.orbitals_type}_hamiltonians" dict_input.hamiltonians_dir = join(copy_config.scratch_path, path_ham) + # Disable keys that imply the necasity of pre-processing in the newly chunked jobs + # (as that's already done in this function) + for name in ["stride", "multiplicity"]: + # Attributes set to `NotImplemented` are ignored when writing the input + if hasattr(copy_config, name): + setattr(copy_config, name, NotImplemented) + # Write input file write_input(folder_path, copy_config) @@ -170,7 +184,7 @@ def write_input(folder_path: str | os.PathLike[str], original_config: _data.Dist } workflow_type = config["workflow"].lower() config['workflow'] = dict_distribute[workflow_type] - with open(file_path, "w") as f: + with open(file_path, "w", encoding="utf8") as f: yaml.dump(config, f, default_flow_style=False, allow_unicode=True) @@ -210,7 +224,7 @@ def write_slurm_script( content = format_slurm_parameters(slurm_config) + python + mkdir + copy # Write the script - with open(join(dict_input.folder_path, "launch.sh"), 'w') as f: + with open(join(dict_input.folder_path, "launch.sh"), 'w', encoding="utf8") as f: f.write(content) @@ -237,11 +251,12 @@ def format_slurm_parameters(slurm: _data.JobScheduler) -> str: def compute_number_of_geometries(file_name: str | os.PathLike[str]) -> int: """Count the number of geometries in XYZ formant in a given file.""" - with open(file_name, 'r') as f: + with open(file_name, 'r', encoding="utf8") as f: numat = int(f.readline()) cmd = f"wc -l {os.fspath(file_name)}" - wc = subprocess.getoutput(cmd).split()[0] + kwargs = {"encoding": "utf8"} if sys.version_info >= (3, 11) else {} + wc = subprocess.getoutput(cmd, **kwargs).split()[0] lines_per_geometry = numat + 2 diff --git a/nanoqm/workflows/initialization.py b/nanoqm/workflows/initialization.py index 1b0dd6d2..bcc174c2 100644 --- a/nanoqm/workflows/initialization.py +++ b/nanoqm/workflows/initialization.py @@ -52,7 +52,8 @@ def initialize(config: _data.GeneralOptions) -> None: """Initialize all the data required to schedule the workflows.""" with EnableFileHandler(f'{config.project_name}.log'): - logger.info(f"Using nano-qmflows version: {qmflows.__version__} ") + logger.info(f"Using qmflows version: {qmflows.__version__} ") + logger.info(f"Using nano-qmflows version: {__version__} ") logger.info(f"nano-qmflows path is: {nanoqm_path[0]}") logger.info(f"Working directory is: {os.path.abspath('.')}") logger.info(f"Data will be stored in HDF5 file: {config.path_hdf5}") @@ -213,7 +214,7 @@ def split_trajectory(path: str | Path, nblocks: int, pathOut: str | os.PathLike[ list of paths to the xyz geometries """ - with open(path, 'r') as f: + with open(path, 'r', encoding="utf8") as f: # Read First line ls = f.readline() numat = int(ls.split()[0]) @@ -234,11 +235,11 @@ def split_trajectory(path: str | Path, nblocks: int, pathOut: str | os.PathLike[ # Path where the splitted xyz files are written prefix = join(pathOut, 'chunk_xyz_') cmd = f'split -a 1 -l {lines_per_block} {path} {prefix}' - output = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True) + output = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True, encoding="utf8") rs = output.communicate() err = rs[1] if err: - raise RuntimeError(f"Submission Errors: {err.decode()}") + raise RuntimeError(f"Submission Errors: {err}") else: return fnmatch.filter(os.listdir(), "chunk_xyz_?") diff --git a/nanoqm/workflows/input_validation.py b/nanoqm/workflows/input_validation.py index dc2e17b7..3ece2d08 100644 --- a/nanoqm/workflows/input_validation.py +++ b/nanoqm/workflows/input_validation.py @@ -94,14 +94,14 @@ def process_input(input_file: PathLike, workflow_name: str) -> _data.GeneralOpti """ schema = schema_workflows[workflow_name] - with open(input_file, 'r') as f: + with open(input_file, 'r', encoding="utf8") as f: dict_input = yaml.load(f.read(), Loader=UniqueSafeLoader) try: d = schema.validate(dict_input) return InputSanitizer(d).sanitize() except SchemaError as e: - msg = f"There was an error in the input yaml provided:\n{e}" + msg = f"There was an error in the provided {workflow_name!r} input yaml:\n{e}" logger.warning(msg) raise @@ -317,5 +317,5 @@ def add_mo_index_range(self) -> None: def print_final_input(self) -> None: """Print the input after post-processing.""" xs = self.user_input.asdict() - with open("input_parameters.yml", "w") as f: + with open("input_parameters.yml", "w", encoding="utf8") as f: yaml.dump(xs, f, indent=4) diff --git a/nanoqm/workflows/run_workflow.py b/nanoqm/workflows/run_workflow.py index 0ef361f8..33b78891 100644 --- a/nanoqm/workflows/run_workflow.py +++ b/nanoqm/workflows/run_workflow.py @@ -49,7 +49,7 @@ def main() -> None: """Parse the command line arguments and run workflow.""" args = parser.parse_args() input_file: str = args.i - with open(input_file, 'r') as f: + with open(input_file, 'r', encoding="utf8") as f: dict_input = yaml.load(f, Loader=UniqueSafeLoader) if 'workflow' not in dict_input: raise RuntimeError( diff --git a/nanoqm/workflows/schemas.py b/nanoqm/workflows/schemas.py index 9022eb87..a01ab20d 100644 --- a/nanoqm/workflows/schemas.py +++ b/nanoqm/workflows/schemas.py @@ -132,10 +132,10 @@ def validate(self, data: Any, **kwargs: Any) -> Any: Optional("potential_file_name", default=None): Use(_parse_filenames), # Name(s) of the exchange part of the DFT functional` - Optional("functional_x", default=None): str, + Optional("functional_x", default=None): Or(str, None), # Name(s) of the correlation part of the DFT functional` - Optional("functional_c", default=None): str, + Optional("functional_c", default=None): Or(str, None), # Settings describing the input of the quantum package "cp2k_settings_main": Use(Settings), diff --git a/scripts/pyxaid/iconds_excess_energy.py b/scripts/pyxaid/iconds_excess_energy.py index 91e873ba..2e04aae0 100644 --- a/scripts/pyxaid/iconds_excess_energy.py +++ b/scripts/pyxaid/iconds_excess_energy.py @@ -57,7 +57,7 @@ def main(path_output: str, nstates: int, iconds: list[int], excess: float, delta for i in range(len(iconds)): t += f' {iconds[i]} {indexes[i][0] + 1}\n' - with open('initial_conditions.out', 'w') as f: + with open('initial_conditions.out', 'w', encoding="utf8") as f: f.write(t) diff --git a/scripts/qmflows/coordination_ldos.py b/scripts/qmflows/coordination_ldos.py index 64a8017d..199ddfc4 100644 --- a/scripts/qmflows/coordination_ldos.py +++ b/scripts/qmflows/coordination_ldos.py @@ -78,7 +78,7 @@ def store_optimized_molecule(optimized_geometry: Molecule, name: str, path_resul path_geometry = f"{path_results}/{name}" if not os.path.exists(path_geometry): os.mkdir(path_geometry) - with open(f"{path_geometry}/{name}_OPT.xyz", 'w') as f: + with open(f"{path_geometry}/{name}_OPT.xyz", 'w', encoding="utf8") as f: optimized_geometry.writexyz(f) @@ -141,7 +141,7 @@ def store_coordination(coord: NestedDict, name: str, path_results: str) -> None: t += f'{v[0]} {v[1]} "list{i}" {v[2]}\n' path_ldos = f"{path_results}/{name}" - with open(f"{path_ldos}/coord_lists.out", 'w') as f: + with open(f"{path_ldos}/coord_lists.out", 'w', encoding="utf8") as f: f.write(t) diff --git a/scripts/qmflows/mergeHDF5.py b/scripts/qmflows/mergeHDF5.py index 369ea19a..7242998a 100644 --- a/scripts/qmflows/mergeHDF5.py +++ b/scripts/qmflows/mergeHDF5.py @@ -67,7 +67,7 @@ def main() -> None: def touch(fname: str, times: tuple[float, float] | None = None) -> None: """Equivalent to unix touch command""" - with open(fname, 'a'): + with open(fname, 'a', encoding="utf8"): os.utime(fname, times) diff --git a/scripts/qmflows/plot_dos.py b/scripts/qmflows/plot_dos.py index c13d1bee..479fe884 100644 --- a/scripts/qmflows/plot_dos.py +++ b/scripts/qmflows/plot_dos.py @@ -31,7 +31,7 @@ def readatom(filename: str) -> str: # In the first line in column 6, the atom is defined - with open(filename, 'r') as f: + with open(filename, 'r', encoding="utf8") as f: atom = f.readline().split()[6] return atom diff --git a/test/test_cli.py b/test/test_cli.py index 8183a06c..6871d965 100644 --- a/test/test_cli.py +++ b/test/test_cli.py @@ -32,11 +32,13 @@ def test_run_workflow(mocker: MockFixture, tmp_path: Path): def test_run_workflow_no_workflow(mocker: MockFixture, tmp_path: Path): """Check that an error is raised if not workflow is provided.""" # remove workflow keyword - with open(PATH_TEST / "input_fast_test_derivative_couplings.yml", 'r') as handler: + with open( + PATH_TEST / "input_fast_test_derivative_couplings.yml", 'r', encoding="utf8" + ) as handler: input = yaml.load(handler, UniqueSafeLoader) input.pop('workflow') path_input = tmp_path / "wrong_input.yml" - with open(path_input, 'w') as handler: + with open(path_input, 'w', encoding="utf8") as handler: yaml.dump(input, handler) with pytest.raises(RuntimeError) as info: diff --git a/test/test_cpk2_error_call.py b/test/test_cpk2_error_call.py index f48f0898..62633a1f 100644 --- a/test/test_cpk2_error_call.py +++ b/test/test_cpk2_error_call.py @@ -10,7 +10,7 @@ def test_cp2k_call_error(tmp_path: Path): """Check cp2k error files.""" path_err = tmp_path / "cp2k.err" - with open(path_err, 'w') as handler: + with open(path_err, 'w', encoding="utf8") as handler: handler.write("Some CP2K error") with pytest.raises(RuntimeError) as info: diff --git a/test/test_distribute.py b/test/test_distribute.py index ddcfb416..760050f5 100644 --- a/test/test_distribute.py +++ b/test/test_distribute.py @@ -1,42 +1,37 @@ """Test the distribution script.""" -from qmflows.type_hints import PathLike -from pathlib import Path -from subprocess import (PIPE, Popen) + import fnmatch import shutil import os +import pytest +from pathlib import Path +from typing import Literal +from nanoqm.workflows.distribute_jobs import distribute_jobs +from nanoqm.workflows.input_validation import process_input -def test_distribute_couplings(tmp_path: PathLike) -> None: - """Check that the scripts to compute a trajectory are generated correctly.""" - call_distribute( - tmp_path, - "distribute_jobs.py -i test/test_files/input_test_distribute_derivative_couplings.yml", - ) - +_WorkflowKind = Literal["derivative_couplings", "absorption_spectrum"] -def test_distribute_absorption(tmp_path: PathLike) -> None: - call_distribute( - tmp_path, - "distribute_jobs.py -i test/test_files/input_test_distribute_absorption_spectrum.yml", - ) +JOBS = { + "derivative_couplings": "test/test_files/input_test_distribute_derivative_couplings.yml", + "absorption_spectrum": "test/test_files/input_test_distribute_absorption_spectrum.yml", +} -def call_distribute(tmp_path: PathLike, cmd: str) -> None: +@pytest.mark.parametrize("workflow,file", JOBS.items(), ids=JOBS) +def test_distribute(workflow: _WorkflowKind, file: str) -> None: """Execute the distribute script and check that if finish succesfully.""" try: - p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True, text=True) - _, err = p.communicate() - if err: - raise RuntimeError(err) - check_scripts() + distribute_jobs(file) + check_scripts(workflow) finally: remove_chunk_folder() -def check_scripts() -> None: +def check_scripts(workflow: _WorkflowKind) -> None: """Check that the distribution scripts were created correctly.""" paths = fnmatch.filter(os.listdir('.'), "chunk*") + cwd_old = os.getcwd() # Check that the files are created correctly files = ["launch.sh", "chunk_xyz*", "input.yml"] @@ -48,6 +43,12 @@ def check_scripts() -> None: except StopIteration: msg = f"There is no such file: {f!r}" raise RuntimeError(msg) from None + if f == "input.yml": + os.chdir(p) + try: + process_input(f, workflow) + finally: + os.chdir(cwd_old) def remove_chunk_folder() -> None: diff --git a/test/test_initialization.py b/test/test_initialization.py index 2ab71270..9f05f5e4 100644 --- a/test/test_initialization.py +++ b/test/test_initialization.py @@ -29,7 +29,7 @@ def test_run_workflow(tmp_path: Path) -> None: def create_config(tmp_path: Path, scrath_is_None: bool) -> None: path = PATH_TEST / "input_fast_test_derivative_couplings.yml" - with open(path, 'r') as f: + with open(path, 'r', encoding="utf8") as f: inp = yaml.load(f, UniqueSafeLoader) # change scratch @@ -42,7 +42,7 @@ def create_config(tmp_path: Path, scrath_is_None: bool) -> None: inp["path_hdf5"] = (Path(inp["scratch_path"]) / "test_init.hdf5").as_posix() path_inp = tmp_path / "test_init.yml" - with open(path_inp, 'w') as f: + with open(path_inp, 'w', encoding="utf8") as f: yaml.dump(inp, f) config = process_input(path_inp, 'derivative_couplings') @@ -85,7 +85,7 @@ def get_input( pass # Construct a set with all keys that are supposed to be in the .hdf5 file - with open(PATH_TEST / "test_initialization.yaml", "r") as f: + with open(PATH_TEST / "test_initialization.yaml", "r", encoding="utf8") as f: keys = set(yaml.load(f, Loader=yaml.SafeLoader)[name]) return config, keys diff --git a/test/test_input_validation.py b/test/test_input_validation.py index 1aa5c781..cde9dc5c 100644 --- a/test/test_input_validation.py +++ b/test/test_input_validation.py @@ -30,7 +30,7 @@ def test_basic(self) -> None: @pytest.mark.parametrize("key", ["potential_file_name", "basis_set_file_name"]) def test_filename_override(self, key: str) -> None: """Test that filename overrides are respected.""" - with open(PATH_TEST / "input_test_pbe0.yml", "r") as f: + with open(PATH_TEST / "input_test_pbe0.yml", "r", encoding="utf8") as f: s = plams.Settings(yaml.load(f, Loader=yaml.SafeLoader)) dft1 = s.cp2k_general_settings.cp2k_settings_main.specific.cp2k.force_eval.dft dft1[key] = "test" @@ -44,7 +44,7 @@ def test_filename_override(self, key: str) -> None: @pytest.mark.parametrize("is_list", [True, False], ids=["list", "str"]) @pytest.mark.parametrize("key", ["potential_file_name", "basis_file_name"]) def test_basis_filename(self, key: str, is_list: bool) -> None: - with open(PATH_TEST / "input_test_pbe0.yml", "r") as f: + with open(PATH_TEST / "input_test_pbe0.yml", "r", encoding="utf8") as f: s = plams.Settings(yaml.load(f, Loader=yaml.SafeLoader)) s.cp2k_general_settings[key] = ["a", "b", "c"] if is_list else "a" @@ -60,7 +60,7 @@ def test_basis_filename(self, key: str, is_list: bool) -> None: assert dft2["basis_set_file_name" if key == "basis_file_name" else key] == ref def test_basis(self) -> None: - with open(PATH_TEST / "input_test_pbe0.yml", "r") as f: + with open(PATH_TEST / "input_test_pbe0.yml", "r", encoding="utf8") as f: s = plams.Settings(yaml.load(f, Loader=yaml.SafeLoader)) s.cp2k_general_settings.basis = "DZVP-MOLOPT-MGGA-GTH" s.cp2k_general_settings.potential = "GTH-MGGA" @@ -84,7 +84,7 @@ def test_basis(self) -> None: @pytest.mark.parametrize("functional_c", [True, False]) @pytest.mark.parametrize("functional_x", [True, False]) def test_functional(self, functional_c: bool, functional_x: bool) -> None: - with open(PATH_TEST / "input_test_pbe0.yml", "r") as f: + with open(PATH_TEST / "input_test_pbe0.yml", "r", encoding="utf8") as f: s = plams.Settings(yaml.load(f, Loader=yaml.SafeLoader)) s.cp2k_general_settings.cp2k_settings_main.specific.template = "main" if functional_c: diff --git a/test/test_read_cp2k_basis.py b/test/test_read_cp2k_basis.py index 88ec7f38..b772c1be 100644 --- a/test/test_read_cp2k_basis.py +++ b/test/test_read_cp2k_basis.py @@ -48,7 +48,7 @@ def test_legacy(self, tmp_path: Path) -> None: shutil.copy2(PATH_TEST / "legacy.hdf5", hdf5_file) store_cp2k_basis(hdf5_file, PATH_TEST / "BASIS_MOLOPT") - with open(PATH_TEST / "test_initialization.yaml", "r") as f1: + with open(PATH_TEST / "test_initialization.yaml", "r", encoding="utf8") as f1: ref = set(yaml.load(f1, Loader=yaml.SafeLoader)["MOLOPT"]) with h5py.File(hdf5_file, "r") as f2: diff --git a/test/test_tools.py b/test/test_tools.py index 21cd778b..8da986e2 100644 --- a/test/test_tools.py +++ b/test/test_tools.py @@ -9,7 +9,7 @@ def test_calc_sphericals(): """Test the calculation of spherical functions.""" - with open(PATH_TEST / 'Cd33Se33.xyz', 'r') as f: + with open(PATH_TEST / 'Cd33Se33.xyz', 'r', encoding="utf8") as f: mol = parse_string_xyz(f.read()) path_hdf5 = PATH_TEST / "Cd33Se33.hdf5" xs = number_spherical_functions_per_atom( diff --git a/test/utilsTest.py b/test/utilsTest.py index 1bff3935..b5fd7c39 100644 --- a/test/utilsTest.py +++ b/test/utilsTest.py @@ -75,7 +75,7 @@ def _read_result_file(result: Result, extension: str, max_line: int = 100) -> "N iterator = (os.path.join(root, i) for i in os.listdir(root) if os.path.splitext(i)[1] == extension) for i in iterator: - with open(i, "r") as f: + with open(i, "r", encoding="utf8") as f: ret_list = f.readlines() ret = "..." if len(ret_list) > max_line else "" ret += "".join(ret_list[-max_line:])