Skip to content

Commit

Permalink
v2.17
Browse files Browse the repository at this point in the history
Merge pull request #2028 from AntaresSimulatorTeam/release/2.17.0
  • Loading branch information
laurent-laporte-pro authored May 15, 2024
2 parents 3663b60 + 4f6da59 commit d9bac0e
Show file tree
Hide file tree
Showing 259 changed files with 25,959 additions and 5,741 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ share/python-wheels/
.installed.cfg
*.egg
MANIFEST
node_modules

# PyInstaller
# Usually these files are written by a python script from a template
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
"""
Add delete cascade constraint to variant study foreign keys
Revision ID: c0c4aaf84861
Revises: fd73601a9075
Create Date: 2024-02-21 17:29:48.736664
"""
from alembic import op # type: ignore

# revision identifiers, used by Alembic.
revision = "c0c4aaf84861"
down_revision = "fd73601a9075"
branch_labels = None
depends_on = None

COMMAND_BLOCK_FK = "commandblock_study_id_fkey"
SNAPSHOT_FK = "variant_study_snapshot_id_fkey"


def upgrade() -> None:
dialect_name: str = op.get_context().dialect.name

# SQLite doesn't support dropping foreign keys, so we need to ignore it here
if dialect_name == "postgresql":
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("commandblock", schema=None) as batch_op:
batch_op.drop_constraint(COMMAND_BLOCK_FK, type_="foreignkey")
batch_op.create_foreign_key(COMMAND_BLOCK_FK, "variantstudy", ["study_id"], ["id"], ondelete="CASCADE")

with op.batch_alter_table("variant_study_snapshot", schema=None) as batch_op:
batch_op.drop_constraint(SNAPSHOT_FK, type_="foreignkey")
batch_op.create_foreign_key(SNAPSHOT_FK, "variantstudy", ["id"], ["id"], ondelete="CASCADE")

# ### end Alembic commands ###


def downgrade() -> None:
dialect_name: str = op.get_context().dialect.name

# SQLite doesn't support dropping foreign keys, so we need to ignore it here
if dialect_name == "postgresql":
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("variant_study_snapshot", schema=None) as batch_op:
batch_op.drop_constraint(SNAPSHOT_FK, type_="foreignkey")
batch_op.create_foreign_key(SNAPSHOT_FK, "variantstudy", ["id"], ["id"])

with op.batch_alter_table("commandblock", schema=None) as batch_op:
batch_op.drop_constraint(COMMAND_BLOCK_FK, type_="foreignkey")
batch_op.create_foreign_key(COMMAND_BLOCK_FK, "variantstudy", ["study_id"], ["id"])

# ### end Alembic commands ###
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
"""
Populate `tag` and `study_tag` tables from `patch` field in `study_additional_data` table
Revision ID: dae93f1d9110
Revises: 3c70366b10ea
Create Date: 2024-02-08 10:30:20.590919
"""
import collections
import itertools
import json
import secrets
import typing as t

import sqlalchemy as sa # type: ignore
from alembic import op
from sqlalchemy.engine import Connection # type: ignore

from antarest.study.css4_colors import COLOR_NAMES

# revision identifiers, used by Alembic.
revision = "dae93f1d9110"
down_revision = "3c70366b10ea"
branch_labels = None
depends_on = None


def _avoid_duplicates(tags: t.Iterable[str]) -> t.Sequence[str]:
"""Avoid duplicate tags (case insensitive)"""

upper_tags = {tag.upper(): tag for tag in tags}
return list(upper_tags.values())


def _load_patch_obj(patch: t.Optional[str]) -> t.MutableMapping[str, t.Any]:
"""Load the patch object from the `patch` field in the `study_additional_data` table."""

obj: t.MutableMapping[str, t.Any] = json.loads(patch or "{}")
obj["study"] = obj.get("study") or {}
obj["study"]["tags"] = _avoid_duplicates(obj["study"].get("tags") or [])
return obj


def upgrade() -> None:
"""
Populate `tag` and `study_tag` tables from `patch` field in `study_additional_data` table
Four steps to proceed:
- Retrieve study-tags pairs from patches in `study_additional_data`.
- Delete all rows in `tag` and `study_tag`, as tag updates between revised 3c70366b10ea and this version,
do modify the data in patches alongside the two previous tables.
- Populate `tag` table using unique tag-labels and by randomly generating their associated colors.
- Populate `study_tag` using study-tags pairs.
"""

# create connexion to the db
connexion: Connection = op.get_bind()

# retrieve the tags and the study-tag pairs from the db
study_tags = connexion.execute("SELECT study_id, patch FROM study_additional_data")
tags_by_ids: t.MutableMapping[str, t.Set[str]] = {}
for study_id, patch in study_tags:
obj = _load_patch_obj(patch)
tags_by_ids[study_id] = obj["study"]["tags"]

# delete rows in tables `tag` and `study_tag`
connexion.execute("DELETE FROM study_tag")
connexion.execute("DELETE FROM tag")

# insert the tags in the `tag` table
all_labels = {lbl.upper(): lbl for lbl in itertools.chain.from_iterable(tags_by_ids.values())}
bulk_tags = [{"label": label, "color": secrets.choice(COLOR_NAMES)} for label in all_labels.values()]
if bulk_tags:
sql = sa.text("INSERT INTO tag (label, color) VALUES (:label, :color)")
connexion.execute(sql, *bulk_tags)

# Create relationships between studies and tags in the `study_tag` table
bulk_study_tags = [
# fmt: off
{"study_id": id_, "tag_label": all_labels[lbl.upper()]}
for id_, tags in tags_by_ids.items()
for lbl in tags
# fmt: on
]
if bulk_study_tags:
sql = sa.text("INSERT INTO study_tag (study_id, tag_label) VALUES (:study_id, :tag_label)")
connexion.execute(sql, *bulk_study_tags)


def downgrade() -> None:
"""
Restore `patch` field in `study_additional_data` from `tag` and `study_tag` tables
Three steps to proceed:
- Retrieve study-tags pairs from `study_tag` table.
- Update patches study-tags in `study_additional_data` using these pairs.
- Delete all rows from `tag` and `study_tag`.
"""
# create a connection to the db
connexion: Connection = op.get_bind()

# Creating the `tags_by_ids` mapping from data in the `study_tags` table
tags_by_ids: t.MutableMapping[str, t.Set[str]] = collections.defaultdict(set)
study_tags = connexion.execute("SELECT study_id, tag_label FROM study_tag")
for study_id, tag_label in study_tags:
tags_by_ids[study_id].add(tag_label)

# Then, we read objects from the `patch` field of the `study_additional_data` table
objects_by_ids = {}
study_tags = connexion.execute("SELECT study_id, patch FROM study_additional_data")
for study_id, patch in study_tags:
obj = _load_patch_obj(patch)
obj["study"]["tags"] = _avoid_duplicates(tags_by_ids[study_id] | set(obj["study"]["tags"]))
objects_by_ids[study_id] = obj

# Updating objects in the `study_additional_data` table
bulk_patches = [{"study_id": id_, "patch": json.dumps(obj)} for id_, obj in objects_by_ids.items()]
if bulk_patches:
sql = sa.text("UPDATE study_additional_data SET patch = :patch WHERE study_id = :study_id")
connexion.execute(sql, *bulk_patches)

# Deleting study_tags and tags
connexion.execute("DELETE FROM study_tag")
connexion.execute("DELETE FROM tag")
86 changes: 86 additions & 0 deletions alembic/versions/fd73601a9075_add_delete_cascade_studies.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
"""
Add delete cascade constraint to study foreign keys
Revision ID: fd73601a9075
Revises: 3c70366b10ea
Create Date: 2024-02-12 17:27:37.314443
"""
import sqlalchemy as sa # type: ignore
from alembic import op

# revision identifiers, used by Alembic.
revision = "fd73601a9075"
down_revision = "dae93f1d9110"
branch_labels = None
depends_on = None

# noinspection SpellCheckingInspection
RAWSTUDY_FK = "rawstudy_id_fkey"

# noinspection SpellCheckingInspection
VARIANTSTUDY_FK = "variantstudy_id_fkey"

# noinspection SpellCheckingInspection
STUDY_ADDITIONAL_DATA_FK = "study_additional_data_study_id_fkey"


def upgrade() -> None:
dialect_name: str = op.get_context().dialect.name

# SQLite doesn't support dropping foreign keys, so we need to ignore it here
if dialect_name == "postgresql":
with op.batch_alter_table("rawstudy", schema=None) as batch_op:
batch_op.drop_constraint(RAWSTUDY_FK, type_="foreignkey")
batch_op.create_foreign_key(RAWSTUDY_FK, "study", ["id"], ["id"], ondelete="CASCADE")

with op.batch_alter_table("study_additional_data", schema=None) as batch_op:
batch_op.drop_constraint(STUDY_ADDITIONAL_DATA_FK, type_="foreignkey")
batch_op.create_foreign_key(STUDY_ADDITIONAL_DATA_FK, "study", ["study_id"], ["id"], ondelete="CASCADE")

with op.batch_alter_table("variantstudy", schema=None) as batch_op:
batch_op.drop_constraint(VARIANTSTUDY_FK, type_="foreignkey")
batch_op.create_foreign_key(VARIANTSTUDY_FK, "study", ["id"], ["id"], ondelete="CASCADE")

with op.batch_alter_table("group_metadata", schema=None) as batch_op:
batch_op.alter_column("group_id", existing_type=sa.VARCHAR(length=36), nullable=False)
batch_op.alter_column("study_id", existing_type=sa.VARCHAR(length=36), nullable=False)
batch_op.create_index(batch_op.f("ix_group_metadata_group_id"), ["group_id"], unique=False)
batch_op.create_index(batch_op.f("ix_group_metadata_study_id"), ["study_id"], unique=False)
if dialect_name == "postgresql":
batch_op.drop_constraint("group_metadata_group_id_fkey", type_="foreignkey")
batch_op.drop_constraint("group_metadata_study_id_fkey", type_="foreignkey")
batch_op.create_foreign_key(
"group_metadata_group_id_fkey", "groups", ["group_id"], ["id"], ondelete="CASCADE"
)
batch_op.create_foreign_key(
"group_metadata_study_id_fkey", "study", ["study_id"], ["id"], ondelete="CASCADE"
)


def downgrade() -> None:
dialect_name: str = op.get_context().dialect.name
# SQLite doesn't support dropping foreign keys, so we need to ignore it here
if dialect_name == "postgresql":
with op.batch_alter_table("rawstudy", schema=None) as batch_op:
batch_op.drop_constraint(RAWSTUDY_FK, type_="foreignkey")
batch_op.create_foreign_key(RAWSTUDY_FK, "study", ["id"], ["id"])

with op.batch_alter_table("study_additional_data", schema=None) as batch_op:
batch_op.drop_constraint(STUDY_ADDITIONAL_DATA_FK, type_="foreignkey")
batch_op.create_foreign_key(STUDY_ADDITIONAL_DATA_FK, "study", ["study_id"], ["id"])

with op.batch_alter_table("variantstudy", schema=None) as batch_op:
batch_op.drop_constraint(VARIANTSTUDY_FK, type_="foreignkey")
batch_op.create_foreign_key(VARIANTSTUDY_FK, "study", ["id"], ["id"])

with op.batch_alter_table("group_metadata", schema=None) as batch_op:
# SQLite doesn't support dropping foreign keys, so we need to ignore it here
if dialect_name == "postgresql":
batch_op.drop_constraint("group_metadata_study_id_fkey", type_="foreignkey")
batch_op.drop_constraint("group_metadata_group_id_fkey", type_="foreignkey")
batch_op.create_foreign_key("group_metadata_study_id_fkey", "study", ["study_id"], ["id"])
batch_op.create_foreign_key("group_metadata_group_id_fkey", "groups", ["group_id"], ["id"])
batch_op.drop_index(batch_op.f("ix_group_metadata_study_id"))
batch_op.drop_index(batch_op.f("ix_group_metadata_group_id"))
batch_op.alter_column("study_id", existing_type=sa.VARCHAR(length=36), nullable=True)
batch_op.alter_column("group_id", existing_type=sa.VARCHAR(length=36), nullable=True)
4 changes: 2 additions & 2 deletions antarest/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@

# Standard project metadata

__version__ = "2.16.8"
__version__ = "2.17"
__author__ = "RTE, Antares Web Team"
__date__ = "2024-04-19"
__date__ = "2024-05-15"
# noinspection SpellCheckingInspection
__credits__ = "(c) Réseau de Transport de l’Électricité (RTE)"

Expand Down
3 changes: 1 addition & 2 deletions antarest/core/configdata/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
from typing import Any, Optional

from pydantic import BaseModel
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, Sequence, String # type: ignore
from sqlalchemy.orm import relationship # type: ignore
from sqlalchemy import Column, Integer, String # type: ignore

from antarest.core.persistence import Base

Expand Down
Loading

0 comments on commit d9bac0e

Please sign in to comment.