diff --git a/mapreader/download/sheet_downloader.py b/mapreader/download/sheet_downloader.py
index ab540b4e..e30a3b93 100644
--- a/mapreader/download/sheet_downloader.py
+++ b/mapreader/download/sheet_downloader.py
@@ -533,7 +533,7 @@ def _initialise_merger(self, path_save: str):
"""
self.merger = TileMerger(output_folder=f"{path_save}/")
- def _check_map_sheet_exists(self, feature: dict) -> bool:
+ def _check_map_sheet_exists(self, feature: dict, metadata_fname) -> bool:
"""
Checks if a map sheet is already saved.
@@ -544,51 +544,87 @@ def _check_map_sheet_exists(self, feature: dict) -> bool:
Returns
-------
bool
- True if file exists, False if not.
+ img_path if file exists, False if not.
"""
- map_name = str("map_" + feature["properties"]["IMAGE"])
path_save = self.merger.output_folder
- if os.path.exists(f"{path_save}{map_name}.png"):
+
+ try:
+ # get image id with same coords in metadata
+ existing_metadata_df = pd.read_csv(
+ f"{path_save}{metadata_fname}", sep=",", index_col=0
+ )
+ except FileNotFoundError:
+ return False
+
+ polygon = get_polygon_from_grid_bb(feature["grid_bb"])
+ if str(polygon.bounds) in existing_metadata_df["coordinates"].values:
+ image_id = existing_metadata_df[
+ existing_metadata_df["coordinates"] == str(polygon.bounds)
+ ].iloc[0]["name"]
+ else:
+ return False # coordinates not in metadata means image doesn't exist
+
+ if os.path.exists(f"{path_save}{image_id}"):
try:
- mpimg.imread(f"{path_save}{map_name}.png")
- print(
- f'[INFO] "{path_save}{map_name}.png" already exists. Skipping download.'
- )
- return True
+ # check image is valid
+ mpimg.imread(f"{path_save}{image_id}")
+ return image_id
except OSError:
return False
return False
- def _download_map(self, feature: dict, download_in_parallel: bool = True) -> bool:
+ def _download_map(
+ self,
+ feature: dict,
+ existing_id: str | bool,
+ download_in_parallel: bool = True,
+ overwrite: bool = False,
+ ) -> str | bool:
"""
Downloads a single map sheet and saves as png file.
Parameters
----------
feature : dict
+ The feature for which to download the map sheet.
+ existing_id : str | bool
+ The existing image id if the map sheet already exists.
+ download_in_parallel : bool, optional
+ Whether to download tiles in parallel, by default ``True``.
+ overwrite : bool, optional
+ Whether to overwrite existing maps, by default ``False``.
Returns
-------
- bool
- True if map was downloaded successfully, False if not.
+ str or bool
+ image path if map was downloaded successfully, False if not.
"""
- map_name = str("map_" + feature["properties"]["IMAGE"])
self.downloader.download_tiles(
feature["grid_bb"], download_in_parallel=download_in_parallel
)
- success = self.merger.merge(feature["grid_bb"], map_name)
- if success:
- print(f'[INFO] Downloaded "{map_name}.png"')
+
+ if existing_id is False:
+ map_name = f"map_{feature['properties']['IMAGE']}"
+ else:
+ map_name = existing_id[:-4] # remove file extension (assuming .png)
+
+ img_path = self.merger.merge(
+ feature["grid_bb"], file_name=map_name, overwrite=overwrite
+ )
+
+ if img_path is not False:
+ print(f'[INFO] Downloaded "{img_path}"')
else:
- print(f'[WARNING] Download of "{map_name}.png" was unsuccessful.')
+ print(f'[WARNING] Download of "{img_path}" was unsuccessful.')
shutil.rmtree(DEFAULT_TEMP_FOLDER)
- return success
+ return img_path
def _save_metadata(
self,
feature: dict,
out_filepath: str,
+ img_path: str,
metadata_to_save: dict | None = None,
**kwargs: dict | None,
) -> None:
@@ -602,6 +638,8 @@ def _save_metadata(
The feature for which to extract the metadata from
out_filepath : str
The path to save metadata csv.
+ img_path : str
+ The path to the downloaded map sheet.
metadata_to_save : dict, optional
A dictionary containing column names (str) and metadata keys (str or list) to save to metadata csv.
Multilayer keys should be passed as a list, i.e. ["key1","key2"] will search for ``self.features[i]["key1"]["key2"]``.
@@ -631,7 +669,7 @@ def _save_metadata(
metadata_dict = {col: None for col in metadata_cols}
# get default metadata
- metadata_dict["name"] = str("map_" + feature["properties"]["IMAGE"] + ".png")
+ metadata_dict["name"] = os.path.basename(img_path)
metadata_dict["url"] = str(feature["properties"]["IMAGEURL"])
if not self.published_dates:
date_col = kwargs.get("date_col", None)
@@ -705,16 +743,25 @@ def _download_map_sheets(
"""
for feature in tqdm(features):
- if not overwrite:
- if self._check_map_sheet_exists(feature):
- continue
- success = self._download_map(
- feature, download_in_parallel=download_in_parallel
+ existing_id = self._check_map_sheet_exists(feature, metadata_fname)
+ if (
+ not overwrite and existing_id is not False
+ ): # if map already exists and overwrite is False then skip
+ print(f'[INFO] "{existing_id}" already exists. Skipping download.')
+ continue
+ img_path = self._download_map(
+ feature,
+ existing_id,
+ download_in_parallel=download_in_parallel,
+ overwrite=overwrite,
)
- if success:
+ if img_path is not False:
metadata_path = f"{path_save}/{metadata_fname}"
self._save_metadata(
- feature=feature, out_filepath=metadata_path, **kwargs
+ feature=feature,
+ out_filepath=metadata_path,
+ img_path=img_path,
+ **kwargs,
)
def download_all_map_sheets(
diff --git a/mapreader/download/tile_merging.py b/mapreader/download/tile_merging.py
index 4fdc110b..461e5b78 100644
--- a/mapreader/download/tile_merging.py
+++ b/mapreader/download/tile_merging.py
@@ -142,7 +142,12 @@ def _load_tile_size(self, grid_bb: GridBoundingBox):
tile_size = img_size[0]
return tile_size
- def merge(self, grid_bb: GridBoundingBox, file_name: str | None = None) -> bool:
+ def merge(
+ self,
+ grid_bb: GridBoundingBox,
+ file_name: str | None = None,
+ overwrite: bool = False,
+ ) -> str | bool:
"""Merges cells contained within GridBoundingBox.
Parameters
@@ -151,11 +156,13 @@ def merge(self, grid_bb: GridBoundingBox, file_name: str | None = None) -> bool:
GridBoundingBox containing tiles to merge
file_name : Union[str, None], optional
Name to use when saving map
-
+ If None, default name will be used, by default None
+ overwrite : bool, optional
+ Whether or not to overwrite existing files, by default False
Returns
-------
- bool
- True if file has successfully downloaded, False if not.
+ str or bool
+ out path if file has successfully downloaded, False if not.
"""
os.makedirs(self.output_folder, exist_ok=True)
@@ -191,11 +198,18 @@ def merge(self, grid_bb: GridBoundingBox, file_name: str | None = None) -> bool:
file_name = self._get_output_name(grid_bb)
out_path = f"{self.output_folder}{file_name}.{self.img_output_format[0]}"
+ if not overwrite:
+ i = 1
+ while os.path.exists(out_path):
+ out_path = (
+ f"{self.output_folder}{file_name}_{i}.{self.img_output_format[0]}"
+ )
+ i += 1
merged_image.save(out_path, self.img_output_format[1])
- success = True if os.path.exists(out_path) else False
- if success:
- logger.info(f"Merge successful! The image has been stored at '{out_path}'")
- else:
+ success = out_path if os.path.exists(out_path) else False
+ if success is False:
logger.warning(f"Merge unsuccessful! '{out_path}' not saved.")
+ else:
+ logger.info(f"Merge successful! The image has been stored at '{out_path}'")
return success
diff --git a/tests/sample_files/test_json.json b/tests/sample_files/test_json.json
index fcc2df72..af910502 100644
--- a/tests/sample_files/test_json.json
+++ b/tests/sample_files/test_json.json
@@ -1,6 +1,6 @@
{
"type": "FeatureCollection",
- "totalFeatures": 4,
+ "totalFeatures": 6,
"features": [
{
"type": "Feature",
@@ -217,6 +217,194 @@
"WFS_TITLE": "London VI.SE, Revised: 1893 to 1894, Published: 1894 to 1896",
"test": "test"
}
+ },
+ {
+ "type": "Feature",
+ "id": "Six_Inch_GB_WFS.107",
+ "test_date": "2021",
+ "geometry": {
+ "type": "MultiPolygon",
+ "coordinates": [
+ [
+ [
+ [
+ -4.7674,
+ 53.2872
+ ],
+ [
+ -4.7682,
+ 53.3161
+ ],
+ [
+ -4.6957,
+ 53.3168
+ ],
+ [
+ -4.695,
+ 53.2879
+ ],
+ [
+ -4.7674,
+ 53.2872
+ ]
+ ]
+ ]
+ ]
+ },
+ "geometry_name": "the_geom",
+ "properties": {
+ "IMAGE": "101603986",
+ "SERIES": "OS 6 Inch to the Mile - England & Wales",
+ "SET": "6Q_A",
+ "COUNTRY": "WAL",
+ "COUNTY": "Anglesey",
+ "SHEET_MAP": "X.NE & XI.NW",
+ "SHEET_NO": "010_NE",
+ "EDITION": "2",
+ "INSET_EXT": "",
+ "RESURVEY": "",
+ "SUR_STA": "1899",
+ "SUR_END": "1899",
+ "PSUR_STA": "",
+ "PSUR_END": "",
+ "ENG_STA": "",
+ "ENG_END": "",
+ "REV_STA": "",
+ "REV_END": "",
+ "PUB_STA": "1901",
+ "PUB_END": "1901",
+ "PPUB_STA": "",
+ "PPUB_END": "",
+ "PRI_STA": "",
+ "PRI_END": "",
+ "RPRI_STA": "",
+ "RPRI_END": "",
+ "MSD_SIG": "",
+ "LEV_STA": "",
+ "LEV_END": "",
+ "BOUND_STA": "",
+ "BOUND_END": "",
+ "SUR_SORT": "1899",
+ "PUB_SORT": "1901",
+ "PARISH": "Holyhead Rural; Holyhead Urban",
+ "RAILWAYS": "",
+ "RECORDSET": "",
+ "BNDRY_INST": "",
+ "YEAR": "1901",
+ "GROUP": "102",
+ "IN_NLS": "Y",
+ "NOTE_MAP": "",
+ "NOTE_LIST": "",
+ "SHEET": "Anglesey X.NE & XI.NW",
+ "DATES": "Series: Ordnance Survey. Six-inch to the mile
Revised: 1899
Published: 1901",
+ "IMAGETHUMB": "https://deriv.nls.uk/dcn4/1016/0398/101603986.4.jpg",
+ "IMAGEURL": "https://maps.nls.uk/view/101603986",
+ "IIIF_JSON": "https://map-view.nls.uk/iiif/2/10160%2F101603986/info.json",
+ "WFS_TITLE": "Anglesey X.NE & XI.NW, Revised: 1899, Published: 1901",
+ "WFS": "Y",
+ "OR_Num": "0",
+ "test": "test"
+ },
+ "bbox": [
+ -4.7682,
+ 53.2872,
+ -4.695,
+ 53.3168
+ ]
+ },
+ {
+ "type": "Feature",
+ "id": "Six_Inch_GB_WFS.116",
+ "test_date": "2021",
+ "geometry": {
+ "type": "MultiPolygon",
+ "coordinates": [
+ [
+ [
+ [
+ -4.695,
+ 53.2879
+ ],
+ [
+ -4.6957,
+ 53.3168
+ ],
+ [
+ -4.6233,
+ 53.3175
+ ],
+ [
+ -4.6226,
+ 53.2885
+ ],
+ [
+ -4.695,
+ 53.2879
+ ]
+ ]
+ ]
+ ]
+ },
+ "geometry_name": "the_geom",
+ "properties": {
+ "IMAGE": "101603986",
+ "SERIES": "OS 6 Inch to the Mile - England & Wales",
+ "SET": "6Q_A",
+ "COUNTRY": "WAL",
+ "COUNTY": "Anglesey",
+ "SHEET_MAP": "X.NE & XI.NW",
+ "SHEET_NO": "011_NW",
+ "EDITION": "2",
+ "INSET_EXT": "",
+ "RESURVEY": "",
+ "SUR_STA": "1899",
+ "SUR_END": "1899",
+ "PSUR_STA": "",
+ "PSUR_END": "",
+ "ENG_STA": "",
+ "ENG_END": "",
+ "REV_STA": "",
+ "REV_END": "",
+ "PUB_STA": "1901",
+ "PUB_END": "1901",
+ "PPUB_STA": "",
+ "PPUB_END": "",
+ "PRI_STA": "",
+ "PRI_END": "",
+ "RPRI_STA": "",
+ "RPRI_END": "",
+ "MSD_SIG": "",
+ "LEV_STA": "",
+ "LEV_END": "",
+ "BOUND_STA": "",
+ "BOUND_END": "",
+ "SUR_SORT": "1899",
+ "PUB_SORT": "1901",
+ "PARISH": "Holyhead Rural; Holyhead Urban",
+ "RAILWAYS": "",
+ "RECORDSET": "",
+ "BNDRY_INST": "",
+ "YEAR": "1901",
+ "GROUP": "102",
+ "IN_NLS": "Y",
+ "NOTE_MAP": "",
+ "NOTE_LIST": "",
+ "SHEET": "Anglesey X.NE & XI.NW",
+ "DATES": "Series: Ordnance Survey. Six-inch to the mile
Revised: 1899
Published: 1901",
+ "IMAGETHUMB": "https://deriv.nls.uk/dcn4/1016/0398/101603986.4.jpg",
+ "IMAGEURL": "https://maps.nls.uk/view/101603986",
+ "IIIF_JSON": "https://map-view.nls.uk/iiif/2/10160%2F101603986/info.json",
+ "WFS_TITLE": "Anglesey X.NE & XI.NW, Revised: 1899, Published: 1901",
+ "WFS": "Y",
+ "OR_Num": "0",
+ "test": "test"
+ },
+ "bbox": [
+ -4.6957,
+ 53.2879,
+ -4.6226,
+ 53.3175
+ ]
}
],
"crs": {
diff --git a/tests/test_sheet_downloader.py b/tests/test_sheet_downloader.py
index 9db67d27..218e2dd6 100644
--- a/tests/test_sheet_downloader.py
+++ b/tests/test_sheet_downloader.py
@@ -6,11 +6,14 @@
import pandas as pd
import pytest
+from PIL import Image
from pytest import approx
from shapely.geometry import LineString, MultiPolygon, Polygon
from mapreader import SheetDownloader
from mapreader.download.data_structures import GridBoundingBox
+from mapreader.download.tile_loading import TileDownloader
+from mapreader.download.tile_merging import TileMerger
@pytest.fixture
@@ -20,14 +23,14 @@ def sample_dir():
@pytest.fixture
def sheet_downloader(sample_dir):
- test_json = f"{sample_dir}/test_json.json" # contains 4 features
+ test_json = f"{sample_dir}/test_json.json" # contains 6 features
download_url = "https://geo.nls.uk/maps/os/1inch_2nd_ed/{z}/{x}/{y}.png"
return SheetDownloader(test_json, download_url)
def test_init(sheet_downloader):
sd = sheet_downloader
- assert len(sd) == 4
+ assert len(sd) == 6
assert sd.crs == "EPSG:4326"
@@ -121,7 +124,7 @@ def test_get_minmax_latlon(sheet_downloader, capfd):
out, _ = capfd.readouterr()
assert (
out
- == "[INFO] Min lat: 51.49344796, max lat: 54.2089733 \n[INFO] Min lon: -2.62837527, max lon: -0.16093917\n"
+ == "[INFO] Min lat: 51.49344796, max lat: 54.2089733 \n[INFO] Min lon: -4.7682, max lon: -0.16093917\n"
)
@@ -280,17 +283,49 @@ def test_query_by_string_key_errors(sheet_downloader):
# download
-def test_download_all(sheet_downloader, tmp_path):
+@pytest.fixture(scope="function")
+def mock_response(monkeypatch):
+ def mock_download_tiles(self, *args, **kwargs):
+ os.makedirs(self.temp_folder, exist_ok=True)
+ return
+
+ monkeypatch.setattr(TileDownloader, "download_tiles", mock_download_tiles)
+
+ def mock_merge(self, *args, **kwargs):
+ os.makedirs(self.output_folder, exist_ok=True)
+
+ merged_image = Image.new("RGBA", (10, 10))
+
+ if kwargs["file_name"] is None:
+ file_name = self._get_output_name(kwargs["grid_bb"])
+ else:
+ file_name = kwargs["file_name"]
+
+ out_path = f"{self.output_folder}{file_name}.{self.img_output_format[0]}"
+ if not kwargs["overwrite"]:
+ i = 1
+ while os.path.exists(out_path):
+ out_path = (
+ f"{self.output_folder}{file_name}_{i}.{self.img_output_format[0]}"
+ )
+ i += 1
+ merged_image.save(out_path, self.img_output_format[1])
+ return out_path
+
+ monkeypatch.setattr(TileMerger, "merge", mock_merge)
+
+
+def test_download_all(sheet_downloader, tmp_path, mock_response):
sd = sheet_downloader
- sd.get_grid_bb(10)
- assert sd.grid_bbs is True
- maps_path = tmp_path / "test_maps/"
+ # zoom level 14
+ sd.get_grid_bb(14)
+ maps_path = tmp_path / "test_maps_14/"
metadata_fname = "test_metadata.csv"
sd.download_all_map_sheets(maps_path, metadata_fname)
assert os.path.exists(f"{maps_path}/map_102352861.png")
assert os.path.exists(f"{maps_path}/{metadata_fname}")
df = pd.read_csv(f"{maps_path}/{metadata_fname}", sep=",", index_col=0)
- assert len(df) == 4
+ assert len(df) == 6
assert list(df.columns) == [
"name",
"url",
@@ -299,25 +334,32 @@ def test_download_all(sheet_downloader, tmp_path):
"published_date",
"grid_bb",
]
- assert list(df["name"]) == [
- "map_101602026.png",
- "map_101602038.png",
- "map_102352861.png",
- "map_91617032.png",
- ]
+ assert all(
+ name in list(df["name"])
+ for name in [
+ "map_101602026.png",
+ "map_101602038.png",
+ "map_102352861.png",
+ "map_91617032.png",
+ "map_101603986.png",
+ "map_101603986_1.png",
+ ]
+ )
# test coords
assert literal_eval(df.loc[0, "coordinates"]) == approx(
- (-1.0546875, 53.33087298301705, -0.703125, 53.54030739150021), rel=1e-6
+ (-0.98876953125, 53.448806835427575, -0.90087890625, 53.48804553605621),
+ rel=1e-6,
)
-
- sd.get_grid_bb(14)
- maps_path = tmp_path / "test_maps_14/"
+ # zoom level 17
+ sd.get_grid_bb(17)
+ assert sd.grid_bbs is True
+ maps_path = tmp_path / "test_maps_17/"
metadata_fname = "test_metadata.csv"
sd.download_all_map_sheets(maps_path, metadata_fname)
assert os.path.exists(f"{maps_path}/map_102352861.png")
assert os.path.exists(f"{maps_path}/{metadata_fname}")
df = pd.read_csv(f"{maps_path}/{metadata_fname}", sep=",", index_col=0)
- assert len(df) == 4
+ assert len(df) == 6
assert list(df.columns) == [
"name",
"url",
@@ -326,32 +368,35 @@ def test_download_all(sheet_downloader, tmp_path):
"published_date",
"grid_bb",
]
- assert list(df["name"]) == [
- "map_101602026.png",
- "map_101602038.png",
- "map_102352861.png",
- "map_91617032.png",
- ]
- # test coords
- assert literal_eval(df.loc[0, "coordinates"]) == approx(
- (-0.98876953125, 53.448806835427575, -0.90087890625, 53.48804553605621),
- rel=1e-6,
+ print(list(df["name"]))
+ assert all(
+ name in list(df["name"])
+ for name in [
+ "map_101602026.png",
+ "map_101602038.png",
+ "map_102352861.png",
+ "map_91617032.png",
+ "map_101603986.png",
+ "map_101603986_1.png",
+ ]
)
def test_download_all_kwargs(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
- maps_path = tmp_path / "test_maps/"
+ # zoom level 14
+ sd.get_grid_bb(14)
+ maps_path = tmp_path / "test_maps_14/"
metadata_fname = "test_metadata.csv"
kwargs = {
"metadata_to_save": {"test1": ["properties", "test"], "test2": "id"},
"date_col": "test_date",
}
sd.download_all_map_sheets(maps_path, metadata_fname, **kwargs)
+ assert os.path.exists(f"{maps_path}/map_102352861.png")
assert os.path.exists(f"{maps_path}/{metadata_fname}")
df = pd.read_csv(f"{maps_path}/{metadata_fname}", sep=",", index_col=0)
- assert len(df) == 4
+ assert len(df) == 6
assert list(df.columns) == [
"name",
"url",
@@ -362,12 +407,22 @@ def test_download_all_kwargs(sheet_downloader, tmp_path):
"test1",
"test2",
]
- assert list(df["name"]) == [
- "map_101602026.png",
- "map_101602038.png",
- "map_102352861.png",
- "map_91617032.png",
- ]
+ assert all(
+ name in list(df["name"])
+ for name in [
+ "map_101602026.png",
+ "map_101602038.png",
+ "map_102352861.png",
+ "map_91617032.png",
+ "map_101603986.png",
+ "map_101603986_1.png",
+ ]
+ )
+ # test coords
+ assert literal_eval(df.loc[0, "coordinates"]) == approx(
+ (-0.98876953125, 53.448806835427575, -0.90087890625, 53.48804553605621),
+ rel=1e-6,
+ )
assert df.loc[3, "published_date"] == 2021
assert df.loc[3, "test1"] == "test"
assert df.loc[3, "test2"] == "Six_Inch_GB_WFS.132"
@@ -375,7 +430,7 @@ def test_download_all_kwargs(sheet_downloader, tmp_path):
def test_download_by_wfs_ids(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
sd.download_map_sheets_by_wfs_ids(
@@ -411,9 +466,55 @@ def test_download_by_wfs_ids(sheet_downloader, tmp_path):
assert df.loc[1, "name"] == "map_101602038.png"
+def test_download_same_image_names(sheet_downloader, tmp_path, capfd):
+ sd = sheet_downloader
+ sd.get_grid_bb(14)
+ maps_path = tmp_path / "test_maps/"
+ metadata_fname = "test_metadata.csv"
+ sd.download_map_sheets_by_wfs_ids(
+ [107, 116], maps_path, metadata_fname
+ ) # 107 and 116 both refer to https://maps.nls.uk/view/101603986
+ assert os.path.exists(f"{maps_path}/map_101603986.png")
+ assert os.path.exists(f"{maps_path}/map_101603986_1.png")
+ assert os.path.exists(f"{maps_path}/{metadata_fname}")
+ df = pd.read_csv(f"{maps_path}/{metadata_fname}", sep=",", index_col=0)
+ assert len(df) == 2
+ assert list(df.columns) == [
+ "name",
+ "url",
+ "coordinates",
+ "crs",
+ "published_date",
+ "grid_bb",
+ ]
+ assert df.loc[0, "name"] == "map_101603986.png"
+ assert df.loc[1, "name"] == "map_101603986_1.png"
+
+ # run again, nothing should happen
+ sd.download_map_sheets_by_wfs_ids([107, 116], maps_path, metadata_fname)
+ out, _ = capfd.readouterr()
+ assert out.endswith(
+ '[INFO] "map_101603986.png" already exists. Skipping download.\n[INFO] "map_101603986_1.png" already exists. Skipping download.\n'
+ )
+ df = pd.read_csv(f"{maps_path}/{metadata_fname}", sep=",", index_col=0)
+ assert len(df) == 2
+
+ # now overwrite them, but check we don't add new ones (_2, _3 etc.)
+ sd.download_map_sheets_by_wfs_ids(
+ [107, 116], maps_path, metadata_fname, overwrite=True
+ ) # 107 and 116 both refer to https://maps.nls.uk/view/101603986
+ assert os.path.exists(f"{maps_path}/map_101603986.png")
+ assert os.path.exists(f"{maps_path}/map_101603986_1.png")
+ assert os.path.exists(f"{maps_path}/{metadata_fname}")
+ df = pd.read_csv(f"{maps_path}/{metadata_fname}", sep=",", index_col=0)
+ assert len(df) == 2
+ assert df.loc[0, "name"] == "map_101603986.png"
+ assert df.loc[1, "name"] == "map_101603986_1.png"
+
+
def test_download_by_wfs_ids_errors(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
with pytest.raises(ValueError, match="as int or list of ints"):
@@ -427,7 +528,7 @@ def test_download_by_wfs_ids_errors(sheet_downloader, tmp_path):
def test_download_by_polygon(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
polygon = Polygon(
[
[-0.98078243, 53.45664144],
@@ -467,7 +568,7 @@ def test_download_by_polygon(sheet_downloader, tmp_path):
def test_download_by_polygon_errors(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
polygon = Polygon([[0, 1], [1, 2], [2, 3], [3, 4], [0, 1]])
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
@@ -483,7 +584,7 @@ def test_download_by_polygon_errors(sheet_downloader, tmp_path):
def test_download_by_coords(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
sd.download_map_sheets_by_coordinates((-0.99, 53.43), maps_path, metadata_fname)
@@ -504,7 +605,7 @@ def test_download_by_coords(sheet_downloader, tmp_path):
def test_download_by_coords_errors(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
with pytest.raises(ValueError, match="out of map metadata bounds"):
@@ -513,7 +614,7 @@ def test_download_by_coords_errors(sheet_downloader, tmp_path):
def test_download_by_line(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
line = LineString([(-0.99, 53.43), (-0.93, 53.46)])
@@ -535,7 +636,7 @@ def test_download_by_line(sheet_downloader, tmp_path):
def test_download_by_line_errors(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
line = LineString([(0, 1), (2, 3)])
@@ -545,7 +646,7 @@ def test_download_by_line_errors(sheet_downloader, tmp_path):
def test_download_by_string(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
sd.download_map_sheets_by_string(
@@ -585,7 +686,7 @@ def test_download_by_string(sheet_downloader, tmp_path):
def test_download_by_string_value_errors(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
with pytest.raises(ValueError, match="pass ``string`` as a string"):
@@ -598,7 +699,7 @@ def test_download_by_string_value_errors(sheet_downloader, tmp_path):
def test_download_by_string_key_errors(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
with pytest.raises(KeyError, match="not found in features dictionary"):
@@ -609,7 +710,7 @@ def test_download_by_string_key_errors(sheet_downloader, tmp_path):
def test_download_by_queries(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
sd.query_map_sheets_by_wfs_ids([16320, 132]) # features[0] and [3]
@@ -634,7 +735,7 @@ def test_download_by_queries(sheet_downloader, tmp_path):
def test_download_by_queries_errors(sheet_downloader, tmp_path):
sd = sheet_downloader
- sd.get_grid_bb(10)
+ sd.get_grid_bb(14)
maps_path = tmp_path / "test_maps/"
metadata_fname = "test_metadata.csv"
with pytest.raises(ValueError, match="No query results"):