Skip to content

Commit

Permalink
add cases for links and bd constraint
Browse files Browse the repository at this point in the history
  • Loading branch information
MartinBelthle committed Jan 25, 2024
1 parent d613fd4 commit ab14f68
Show file tree
Hide file tree
Showing 4 changed files with 205 additions and 77 deletions.
80 changes: 79 additions & 1 deletion antarest/study/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import json
import logging
import os
import re
from datetime import datetime, timedelta
from http import HTTPStatus
from pathlib import Path, PurePosixPath
Expand All @@ -12,6 +13,7 @@
from uuid import uuid4

import numpy as np
import pandas as pd
from fastapi import HTTPException, UploadFile
from markupsafe import escape
from starlette.responses import FileResponse, Response
Expand All @@ -20,6 +22,7 @@
from antarest.core.exceptions import (
BadEditInstructionException,
CommandApplicationError,
IncorrectPathError,
NotAManagedStudyException,
StudyDeletionNotAllowed,
StudyNotFoundError,
Expand Down Expand Up @@ -54,6 +57,7 @@
from antarest.study.business.areas.thermal_management import ThermalManager
from antarest.study.business.binding_constraint_management import BindingConstraintManager
from antarest.study.business.config_management import ConfigManager
from antarest.study.business.correlation_management import CorrelationManager
from antarest.study.business.district_manager import DistrictManager
from antarest.study.business.general_management import GeneralManager
from antarest.study.business.link_management import LinkInfoDTO, LinkManager
Expand Down Expand Up @@ -109,7 +113,14 @@
should_study_be_denormalized,
upgrade_study,
)
from antarest.study.storage.utils import assert_permission, get_start_date, is_managed, remove_from_cache, study_matcher
from antarest.study.storage.utils import (
SPECIFIC_MATRICES,
assert_permission,
get_start_date,
is_managed,
remove_from_cache,
study_matcher,
)
from antarest.study.storage.variantstudy.model.command.icommand import ICommand
from antarest.study.storage.variantstudy.model.command.replace_matrix import ReplaceMatrix
from antarest.study.storage.variantstudy.model.command.update_comments import UpdateComments
Expand Down Expand Up @@ -2383,3 +2394,70 @@ def get_disk_usage(self, uuid: str, params: RequestParameters) -> int:
assert_permission(params.user, study, StudyPermissionType.READ)
path = str(self.storage_service.get_storage(study).get_study_path(study))
return get_disk_usage(path=path)

def get_matrix_with_index_and_header(self, study_id: str, path: str, parameters: RequestParameters) -> pd.DataFrame:
matrix_path = Path(path)
study = self.get_study(study_id)
for aggregate in ["allocation", "correlation"]:
if matrix_path == Path("input") / "hydro" / aggregate:
all_areas = cast(
List[AreaInfoDTO],
self.get_all_areas(study_id, area_type=AreaType.AREA, ui=False, params=parameters),
)
if aggregate == "allocation":
hydro_matrix = self.allocation_manager.get_allocation_matrix(study, all_areas)
else:
hydro_matrix = CorrelationManager(self.storage_service).get_correlation_matrix(all_areas, study, []) # type: ignore
return pd.DataFrame(data=hydro_matrix.data, columns=hydro_matrix.columns, index=hydro_matrix.index)

json_matrix = self.get(study_id, path, depth=3, formatted=True, params=parameters)
expected_keys = ["data", "index", "columns"]
for key in expected_keys:
if key not in json_matrix:
raise IncorrectPathError(f"The path filled does not correspond to a matrix : {path}")
df_matrix = pd.DataFrame(data=json_matrix["data"], columns=json_matrix["columns"], index=json_matrix["index"])
for specific_matrix in SPECIFIC_MATRICES:
if re.compile(specific_matrix).match(path):
json_matrix = SPECIFIC_MATRICES[specific_matrix]
if json_matrix["alias"] == "bindingconstraints":
study_version = int(study.version)
if study_version < 870:
cols: t.List[str] = json_matrix["cols_with_version"]["before_870"] # type: ignore
else:
cols: t.List[str] = json_matrix["cols_with_version"]["after_870"] # type: ignore
elif json_matrix["alias"] == "links":
study_version = int(study.version)
path_parts = matrix_path.parts
area_1 = path_parts[3]
area_2 = path_parts[4]
if study_version < 820:
cols: t.List[str] = json_matrix["cols_with_version"]["before_820"] # type: ignore
else:
cols: t.List[str] = json_matrix["cols_with_version"]["after_820"] # type: ignore
for k, col in enumerate(cols):
if col == "Hurdle costs direct":
cols[k] = f"{col} ({area_1}->{area_2})"
elif col == "Hurdle costs indirect":
cols[k] = f"{col} ({area_2}->{area_1})"
else:
cols: t.List[str] = json_matrix["cols"] # type: ignore
rows: t.List[str] = json_matrix["rows"] # type: ignore
if cols:
df_matrix.columns = pd.Index(cols)
if rows:
df_matrix.index = rows
else:
matrix_index = self.get_input_matrix_startdate(study_id, path, parameters)
time_column = pd.date_range(
start=matrix_index.start_date,
periods=len(df_matrix),
freq=matrix_index.level.value[0],
)
df_matrix.index = time_column
return df_matrix
matrix_index = self.get_input_matrix_startdate(study_id, path, parameters)
time_column = pd.date_range(
start=matrix_index.start_date, periods=len(df_matrix), freq=matrix_index.level.value[0]
)
df_matrix.index = time_column
return df_matrix
119 changes: 119 additions & 0 deletions antarest/study/storage/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,125 @@ def assert_permission(
)


def _generate_columns(column_suffix: str) -> t.List[str]:
return [str(i) + column_suffix for i in range(100)]


SPECIFIC_MATRICES = {
"input/hydro/common/capacity/creditmodulations_*": {
"alias": "creditmodulations",
"cols": _generate_columns(""),
"rows": ["Generating Power", "Pumping Power"],
"stats": False,
},
"input/hydro/common/capacity/maxpower_*": {
"alias": "maxpower",
"cols": [
"Generating Max Power (MW)",
"Generating Max Energy (Hours at Pmax)",
"Pumping Max Power (MW)",
"Pumping Max Energy (Hours at Pmax)",
],
"rows": [],
"stats": False,
},
"input/hydro/common/capacity/reservoir_*": {
"alias": "reservoir",
"cols": ["Lev Low (p.u)", "Lev Avg (p.u)", "Lev High (p.u)"],
"rows": [],
"stats": False,
},
"input/hydro/common/capacity/waterValues_*": {
"alias": "waterValues",
"cols": _generate_columns("%"),
"rows": [],
"stats": False,
},
"input/hydro/series/*/mod": {"alias": "mod", "cols": [], "rows": [], "stats": True},
"input/hydro/series/*/ror": {"alias": "ror", "cols": [], "rows": [], "stats": True},
"input/hydro/common/capacity/inflowPattern_*": {
"alias": "inflowPattern",
"cols": ["Inflow Pattern (X)"],
"rows": [],
"stats": False,
},
"input/hydro/prepro/*/energy": {
"alias": "energy",
"cols": ["Expectation (MWh)", "Std Deviation (MWh)", "Min. (MWh)", "Max. (MWh)", "ROR Share"],
"rows": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
"stats": False,
},
"input/thermal/prepro/*/*/modulation": {
"alias": "modulation",
"cols": ["Marginal cost modulation", "Market bid modulation", "Capacity modulation", "Min gen modulation"],
"rows": [],
"stats": False,
},
"input/thermal/prepro/*/*/data": {
"alias": "data",
"cols": ["FO Duration", "PO Duration", "FO Rate", "PO Rate", "NPO Min", "NPO Max"],
"rows": [],
"status": False,
},
"input/reserves/*": {
"alias": "reserves",
"cols": ["Primary Res. (draft)", "Strategic Res. (draft)", "DSM", "Day Ahead"],
"rows": [],
"status": False,
},
"input/misc-gen/miscgen-*": {
"alias": "miscgen",
"cols": ["CHP", "Bio Mass", "Bio Gaz", "Waste", "GeoThermal", "Other", "PSP", "ROW Balance"],
"rows": [],
"status": False,
},
"input/bindingconstraints/*": {
"alias": "bindingconstraints",
"rows": [],
"stats": False,
"cols_with_version": {"after_870": ["<", ">", "="], "before_870": []},
},
"input/links/*/*": {
"alias": "links",
"rows": [],
"stats": False,
"cols_with_version": {
"after_820": [
"Hurdle costs direct",
"Hurdle costs indirect",
"Impedances",
"Loop flow",
"P.Shift Min",
"P.Shift Max",
],
"before_820": [
"Capacités de transmission directes",
"Capacités de transmission indirectes",
"Hurdle costs direct",
"Hurdle costs indirect",
"Impedances",
"Loop flow",
"P.Shift Min",
"P.Shift Max",
],
},
},
}


def get_start_date(
file_study: FileStudy,
output_id: t.Optional[str] = None,
Expand Down
81 changes: 6 additions & 75 deletions antarest/study/web/raw_studies_blueprint.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,74 +268,22 @@ def get_matrix(
index: bool = True,
current_user: JWTUser = Depends(auth.get_current_user),
) -> StreamingResponse:
# todo: Il faudrait supporter le format txt s'il n'y a pas de header. Comment ?
# todo: Il faudrait supporter le format txt s'il n'y a pas de header. Possible avec GET raw et formatted à False. A creuser.
# todo: Question Alexander : Quel séparateur pour csv et xlsx ?
# todo: ajouter dans les exemples le truc pour allocation et correlation pour que les users comprennent
# todo: envoyer un xlsx à Alexander pour voir s'il s'ouvre bien dans excel sous Windows
# todo: tester la perf du StreamingResponse VS FileResponse
# todo: Peut-être wrapper autour d'une HTTP Exception en cas de download fail (try catch dans le code)
# todo: ajouter les autres cas : binding, lien etc. avec l'hydro
# todo: on ne gère pas encore les stats
# todo: on ne gère pas non plus les outputs
# todo: peut-être changer la structure de la matrice pour ne pas avoir à faire du type ignore
# todo: changer le check if index or header pour aller plus vite si nécessaire.
# todo: comment gérer la langue ?
# todo: faire tout le travail pour avoir les headers dans un endpoint dédié. Ce endpoint pourrait prendre en entrée une langue qui serait envoyé par le front.
# pour allocation et correlation nécessite de build toute la matrice mais bon.

parameters = RequestParameters(user=current_user)
if pathlib.Path(path) == pathlib.Path("input") / "hydro" / "allocation":
all_areas = t.cast(
t.List[AreaInfoDTO],
study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=parameters),
)
study = study_service.get_study(uuid)
allocation_matrix = study_service.allocation_manager.get_allocation_matrix(study, all_areas)
df_matrix = pd.DataFrame(
data=allocation_matrix.data, columns=allocation_matrix.columns, index=allocation_matrix.index
)
elif pathlib.Path(path) == pathlib.Path("input") / "hydro" / "correlation":
all_areas = t.cast(
t.List[AreaInfoDTO],
study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=parameters),
)
manager = CorrelationManager(study_service.storage_service)
study = study_service.get_study(uuid)
correlation_matrix = manager.get_correlation_matrix(all_areas, study, [])
df_matrix = pd.DataFrame(
data=correlation_matrix.data, columns=correlation_matrix.columns, index=correlation_matrix.index
)
else:
json_matrix = study_service.get(uuid, path, depth=3, formatted=True, params=parameters)
expected_keys = ["data", "index", "columns"]
for key in expected_keys:
if key not in json_matrix:
raise IncorrectPathError(f"The path filled does not correspond to a matrix : {path}")
df_matrix = pd.DataFrame(
data=json_matrix["data"], columns=json_matrix["columns"], index=json_matrix["index"]
)
if index:
if pathlib.Path(path).parts[:2] == ("input", "hydro"):
for hydro_name in HYDRO_MATRICES:
pattern = re.compile(hydro_name)
if pattern.match(path):
hydro_json = HYDRO_MATRICES[hydro_name]
cols: t.List[str] = hydro_json["cols"] # type: ignore
rows: t.List[str] = hydro_json["rows"] # type: ignore
if cols:
df_matrix.columns = pd.Index(cols)
if rows:
df_matrix.set_index(rows)
else:
matrix_index = study_service.get_input_matrix_startdate(uuid, path, parameters)
time_column = pd.date_range(
start=matrix_index.start_date,
periods=len(df_matrix),
freq=matrix_index.level.value[0],
)
df_matrix.set_index(time_column)
else:
matrix_index = study_service.get_input_matrix_startdate(uuid, path, parameters)
time_column = pd.date_range(
start=matrix_index.start_date, periods=len(df_matrix), freq=matrix_index.level.value[0]
)
df_matrix.set_index(time_column)
df_matrix = study_service.get_matrix_with_index_and_header(uuid, path, parameters)

export_file_download = study_service.file_transfer_manager.request_download(
f"{pathlib.Path(path).stem}.{format.value}",
Expand Down Expand Up @@ -384,20 +332,3 @@ def _create_matrix_files(
index=index,
float_format="%.6f",
)


def _generate_columns(column_suffix: str) -> t.List[str]:
return [str(i) + column_suffix for i in range(100)]


# fmt: off
HYDRO_MATRICES = {
"input/hydro/common/capacity/creditmodulations_*": {'cols': _generate_columns(""), 'rows': ["Generating Power", "Pumping Power"], 'stats': False},
"input/hydro/common/capacity/maxpower_*": {'cols': ["Generating Max Power (MW)", "Generating Max Energy (Hours at Pmax)", "Pumping Max Power (MW)", "Pumping Max Energy (Hours at Pmax)"], 'rows': [], 'stats': False},
"input/hydro/common/capacity/reservoir_*": {'cols': ["Lev Low (p.u)", "Lev Avg (p.u)", "Lev High (p.u)"], 'rows': [], 'stats': False},
"input/hydro/common/capacity/waterValues_*": {'cols': _generate_columns("%"), 'rows': [], 'stats': False},
"input/hydro/series/*/mod": {'cols': [], 'rows': [], 'stats': True},
"input/hydro/series/*/ror": {'cols': [], 'rows': [], 'stats': True},
"input/hydro/common/capacity/inflowPattern_*": {'cols': ["Inflow Pattern (X)"], 'rows': [], 'stats': False},
"input/hydro/prepro/*/energy": {'cols': ["Expectation (MWh)", "Std Deviation (MWh)", "Min. (MWh)", "Max. (MWh)", "ROR Share"], 'rows': ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"], 'stats': False}
}
2 changes: 1 addition & 1 deletion tests/integration/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -2167,7 +2167,7 @@ def test_download_matrices(client: TestClient, admin_access_token: str, study_id
# reformat into a json to help comparison
new_cols = [int(col) for col in dataframe.columns]
dataframe.columns = new_cols
dataframe.set_index(range(len(dataframe)))
dataframe.index = range(len(dataframe))
actual_matrix = dataframe.to_dict(orient="split")

# asserts that the result is the same as the one we get with the classic get /raw endpoint
Expand Down

0 comments on commit ab14f68

Please sign in to comment.