diff --git a/brainglobe_utils/IO/cells.py b/brainglobe_utils/IO/cells.py index ddb1636..68592f8 100644 --- a/brainglobe_utils/IO/cells.py +++ b/brainglobe_utils/IO/cells.py @@ -29,11 +29,9 @@ def get_cells( cells_only: bool = False, cell_type: Optional[int] = None, ): - # TODO: implement csv read if cells_file_path.endswith(".xml"): return get_cells_xml(cells_file_path, cells_only=cells_only) elif cells_file_path.endswith(".yml"): - # Not general return get_cells_yml(cells_file_path, ignore_type=True) elif os.path.isdir(cells_file_path): try: diff --git a/brainglobe_utils/general/system.py b/brainglobe_utils/general/system.py index 48fafc0..d84f56b 100644 --- a/brainglobe_utils/general/system.py +++ b/brainglobe_utils/general/system.py @@ -218,8 +218,8 @@ def how_many_cores_with_sufficient_ram( this to ensure that the number of processes isn't too high. :param float fraction_free_ram: Fraction of the ram to ensure stays free regardless of the current program. - :param float max_ram_usage: Maximum amount of RAM (in bytes) - to use (allthough available may be lower) + :param float max_ram_usage: The Maximum amount of RAM (in bytes) + to use (although available may be lower) :return: How many CPU cores could be theoretically used based on the amount of free RAM """ @@ -271,21 +271,6 @@ def get_free_ram(): return psutil.virtual_memory().available -def sanitize_num_processes(num_processes, min_processes, parallel=False): - """ - Returns False to prevent parallel processing in case more processes have - been requested than can be used. - :param num_processes: How many processes have been requested - :param min_processes: Minimum number of cores to keep free - :param parallel: If parallel is requested - :return bool: True if num_processes is sensible - """ - if parallel: - if num_processes < min_processes: - parallel = False - return parallel - - def safe_execute_command(cmd, log_file_path=None, error_file_path=None): """ Executes a command in the terminal, making sure that the output can diff --git a/tests/tests/test_IO/test_cell_io.py b/tests/tests/test_IO/test_cell_io.py index 40f9b01..85a5a91 100644 --- a/tests/tests/test_IO/test_cell_io.py +++ b/tests/tests/test_IO/test_cell_io.py @@ -1,5 +1,6 @@ import os +import pandas as pd import pytest from natsort import natsorted @@ -317,11 +318,11 @@ def test_get_cells(): assert cell_io.get_cells("misc_format.abc") -def test_cells_to_xml(tmpdir): +def test_cells_to_xml(tmp_path): cells = cell_io.get_cells(xml_path) - tmp_cells_out_path = os.path.join(str(tmpdir), "cells.xml") + tmp_cells_out_path = tmp_path / "cells.xml" cell_io.cells_to_xml(cells, tmp_cells_out_path) - assert cells == cell_io.get_cells(tmp_cells_out_path) + assert cells == cell_io.get_cells(str(tmp_cells_out_path)) def test_cells_xml_to_dataframe(): @@ -331,3 +332,15 @@ def test_cells_xml_to_dataframe(): assert cells_df.x.tolist() == x_vals assert cells_df.y.tolist() == y_vals assert cells_df.z.tolist() == z_vals + + +def test_cells_to_csv(tmp_path): + cells = cell_io.get_cells(xml_path) + tmp_cells_out_path = tmp_path / "cells.csv" + cell_io.cells_to_csv(cells, tmp_cells_out_path) + cells_df = pd.read_csv(tmp_cells_out_path) + assert len(cells_df) == 65 + assert cells_df.type.tolist() == type_vals + assert cells_df.x.tolist() == x_vals + assert cells_df.y.tolist() == y_vals + assert cells_df.z.tolist() == z_vals diff --git a/tests/tests/test_general/test_exceptions.py b/tests/tests/test_general/test_exceptions.py new file mode 100644 index 0000000..0bb050f --- /dev/null +++ b/tests/tests/test_general/test_exceptions.py @@ -0,0 +1,12 @@ +import pytest +from brainglobe_utils.general.exceptions import CommandLineInputError + + +def raise_CommandLineInputError(): + raise CommandLineInputError("Error") + + +def test_CommandLineInputError(): + with pytest.raises(CommandLineInputError) as e: + raise_CommandLineInputError() + assert str(e.value) == "Error" diff --git a/tests/tests/test_general/test_pathlib.py b/tests/tests/test_general/test_pathlib.py new file mode 100644 index 0000000..479b3aa --- /dev/null +++ b/tests/tests/test_general/test_pathlib.py @@ -0,0 +1,9 @@ +from pathlib import Path +from brainglobe_utils.general.pathlib import append_to_pathlib_stem + + +def test_append_to_pathlib_stem(): + path = Path("path", "to", "file.txt") + appended_path = append_to_pathlib_stem(path, "_appended") + assert appended_path == Path("path", "to", "file_appended.txt") + diff --git a/tests/tests/test_general/test_system.py b/tests/tests/test_general/test_system.py index 482761f..34f7167 100644 --- a/tests/tests/test_general/test_system.py +++ b/tests/tests/test_general/test_system.py @@ -2,7 +2,7 @@ import random from pathlib import Path from random import shuffle -from unittest.mock import patch +from unittest.mock import Mock, patch import pytest @@ -32,6 +32,23 @@ sorted_cubes_dir = [os.path.join(str(cubes_dir), cube) for cube in cubes] +@pytest.fixture +def mock_disk_usage(): + """Fixture to mock shutil.disk_usage.""" + return Mock( + return_value=(1000000, 500000, 500000) + ) # total, used, free space in bytes + + +@pytest.fixture +def mock_statvfs(): + """Fixture to mock os.statvfs.""" + mock_stats = Mock() + mock_stats.f_frsize = 1024 # Fragment size + mock_stats.f_bavail = 1000 # Free blocks + return mock_stats + + def test_replace_extension(): test_file = "test_file.sh" test_ext = "txt" @@ -118,25 +135,157 @@ def test_max_processes_windows_low(): def test_max_processes_windows_high(): - cpu_count = 128 + mock_cpu_count = 128 with patch( "brainglobe_utils.general.system.platform.system", return_value="Windows", ): with patch( "brainglobe_utils.general.system.psutil.cpu_count", - return_value=cpu_count, + return_value=mock_cpu_count, ): # 61 is max on Windows - assert system.limit_cpus_windows(cpu_count) == 61 + assert system.limit_cpus_windows(mock_cpu_count) == 61 + +@pytest.mark.parametrize("cores_available", [1, 100, 1000]) +def test_cores_available_in_slurm_environment(cores_available): + mock_slurm_parameters = Mock() + mock_slurm_parameters.allocated_cores = cores_available -class Paths: - def __init__(self, directory): - self.one = directory / "one.aaa" - self.two = directory / "two.bbb" - self.tmp__three = directory / "three.ccc" - self.tmp__four = directory / "four.ddd" + with patch.dict( + "brainglobe_utils.general.system.os.environ", {"SLURM_JOB_ID": "1"} + ), patch( + "brainglobe_utils.general.system.slurmio.SlurmJobParameters", + return_value=mock_slurm_parameters, + ): + assert system.get_cores_available() == cores_available + + +@pytest.mark.parametrize("cores_available", [1, 100, 1000]) +def test_cores_available(cores_available): + with patch( + "brainglobe_utils.general.system.psutil.cpu_count", + return_value=cores_available, + ): + assert system.get_cores_available() == cores_available + + +@pytest.mark.parametrize( + "ram_needed_per_cpu, fraction_free_ram, max_ram_usage, " + "free_system_ram, expected_cores", + [ + ( + 1024**3, + 0.1, + None, + 16 * 1024**3, + 14, + ), # 1 GB per core, 0.1 fraction free, no max ram, + # 16GB free on the system, expect 14 + ( + 2 * 1024**3, + 0.5, + None, + 256 * 1024**3, + 64, + ), # 1 GB per core, 0.5 fraction free, no max ram, + # 256GB free on the system, expect 64 + ( + 1024**3, + 0.5, + 10 * 1024**3, + 256 * 1024**3, + 5, + ), # 1 GB per core, 0.5 fraction free, 10Gb max ram, + # 256GB free on the system, expect 5 + ], +) +def test_how_many_cores_with_sufficient_ram( + ram_needed_per_cpu, + fraction_free_ram, + max_ram_usage, + free_system_ram, + expected_cores, +): + with patch( + "brainglobe_utils.general.system.get_free_ram", + return_value=free_system_ram, + ): + assert ( + system.how_many_cores_with_sufficient_ram( + ram_needed_per_cpu, + fraction_free_ram, + max_ram_usage=max_ram_usage, + ) + == expected_cores + ) + + +def test_how_many_cores_with_sufficient_ram_in_slurm_environment(): + ram_needed_per_cpu = 1024**3 # 1 GB + free_system_ram = 16 * 1024**3 # 16 GB + + mock_slurm_parameters = Mock() + mock_slurm_parameters.allocated_memory = free_system_ram + + with patch.dict( + "brainglobe_utils.general.system.os.environ", {"SLURM_JOB_ID": "1"} + ), patch( + "brainglobe_utils.general.system.slurmio.SlurmJobParameters", + return_value=mock_slurm_parameters, + ): + assert ( + system.how_many_cores_with_sufficient_ram(ram_needed_per_cpu) == 14 + ) # (0.9 * 16) GB / 1 GB per core + + +def test_disk_free_gb_windows(mock_disk_usage): + with patch( + "brainglobe_utils.general.system.platform.system", + return_value="Windows", + ), patch( + "brainglobe_utils.general.system.os.path.splitdrive", + return_value=("C:\\", ""), + ), patch( + "brainglobe_utils.general.system.shutil.disk_usage", mock_disk_usage + ): + free_space = system.disk_free_gb("C:\\path\\to\\file") + assert free_space == 500000 / 1024**3 + + +def test_disk_free_gb_linux(mock_statvfs): + with patch( + "brainglobe_utils.general.system.platform.system", return_value="Linux" + ), patch( + "brainglobe_utils.general.system.os.statvfs", return_value=mock_statvfs + ): + free_space = system.disk_free_gb("/path/to/file") + assert free_space == (1024 * 1000) / 1024**3 # Free space in GB + + +def test_disk_free_gb_macos(mock_statvfs): + with patch( + "brainglobe_utils.general.system.platform.system", + return_value="Darwin", + ), patch( + "brainglobe_utils.general.system.os.statvfs", return_value=mock_statvfs + ): + free_space = system.disk_free_gb("/path/to/file") + assert free_space == (1024 * 1000) / 1024**3 # Free space in GB + + +def test_get_free_ram(): + mock_free_ram = 1000000001 + + mock_virtual_memory = Mock() + mock_virtual_memory.available = mock_free_ram + + with patch( + "brainglobe_utils.general.system.psutil.virtual_memory", + return_value=mock_virtual_memory, + ): + assert system.get_free_ram() == mock_free_ram def write_n_random_files(n, dir, min_size=32, max_size=2048): @@ -146,8 +295,8 @@ def write_n_random_files(n, dir, min_size=32, max_size=2048): fout.write(os.urandom(size)) -def test_delete_directory_contents(tmpdir): - delete_dir = os.path.join(str(tmpdir), "delete_dir") +def test_delete_directory_contents_with_progress(tmp_path): + delete_dir = tmp_path / "delete_dir" os.mkdir(delete_dir) write_n_random_files(10, delete_dir) @@ -156,3 +305,15 @@ def test_delete_directory_contents(tmpdir): system.delete_directory_contents(delete_dir, progress=True) assert os.listdir(delete_dir) == [] + + +def test_delete_directory_contents(tmp_path): + delete_dir = tmp_path / "delete_dir" + os.mkdir(delete_dir) + write_n_random_files(10, delete_dir) + + # check the directory isn't empty first + assert not os.listdir(delete_dir) == [] + + system.delete_directory_contents(delete_dir, progress=False) + assert os.listdir(delete_dir) == []