Skip to content

Commit

Permalink
Merge branch 'dev' into feat/support-solver-v87
Browse files Browse the repository at this point in the history
  • Loading branch information
MartinBelthle committed Feb 26, 2024
2 parents c2c1bc7 + 84462c7 commit ef0536f
Show file tree
Hide file tree
Showing 298 changed files with 15,086 additions and 21,496 deletions.
13 changes: 13 additions & 0 deletions .github/workflows/commitlint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: Lint Commit Messages
on: [pull_request]

permissions:
contents: read
pull-requests: read

jobs:
commitlint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: wagoid/commitlint-github-action@v5
2 changes: 1 addition & 1 deletion .github/workflows/compatibility.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ jobs:
with:
node-version: ${{ matrix.node-version }}
- name: Install dependencies
run: npm install --legacy-peer-deps
run: npm install
working-directory: webapp
- name: Build
run: npm run build
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ jobs:
node-version: 18.16.1

- name: 💚 Install dependencies
run: npm install --legacy-peer-deps
run: npm install
working-directory: webapp

- name: 💚 Build webapp
Expand Down
17 changes: 1 addition & 16 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ jobs:
with:
node-version: 18.16.1
- name: Install dependencies
run: npm install --legacy-peer-deps
run: npm install
working-directory: webapp
- name: Build
run: npm run build
Expand All @@ -89,16 +89,6 @@ jobs:
- name: Lint
run: npm run lint
working-directory: webapp
- name: Run tests
run: npm run test-coverage
working-directory: webapp
env:
CI: true
- name: Archive code coverage results
uses: actions/upload-artifact@v3
with:
name: js-code-coverage-report
path: webapp/coverage/lcov.info

sonarcloud:
runs-on: ubuntu-20.04
Expand All @@ -109,11 +99,6 @@ jobs:
uses: actions/download-artifact@v3
with:
name: python-code-coverage-report
- name: Download js coverage report
uses: actions/download-artifact@v3
with:
name: js-code-coverage-report
path: webapp/coverage
- name: SonarCloud Scan
uses: sonarsource/sonarcloud-github-action@master
env:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Install the front-end dependencies:

```shell script
cd webapp
npm install --legacy-peer-deps
npm install
cd ..
```

Expand Down
58 changes: 58 additions & 0 deletions alembic/versions/3c70366b10ea_add_tag_and_study_tag_tables.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
"""Add tag and study_tag tables
Revision ID: 3c70366b10ea
Revises: 1f5db5dfad80
Create Date: 2024-02-02 13:06:47.627554
"""
from alembic import op
import sqlalchemy as sa


# revision identifiers, used by Alembic.
revision = "3c70366b10ea"
down_revision = "1f5db5dfad80"
branch_labels = None
depends_on = None


def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"tag",
sa.Column("label", sa.String(length=40), nullable=False),
sa.Column("color", sa.String(length=20), nullable=True),
sa.PrimaryKeyConstraint("label"),
)
with op.batch_alter_table("tag", schema=None) as batch_op:
batch_op.create_index(batch_op.f("ix_tag_color"), ["color"], unique=False)
batch_op.create_index(batch_op.f("ix_tag_label"), ["label"], unique=False)

op.create_table(
"study_tag",
sa.Column("study_id", sa.String(length=36), nullable=False),
sa.Column("tag_label", sa.String(length=40), nullable=False),
sa.ForeignKeyConstraint(["study_id"], ["study.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["tag_label"], ["tag.label"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("study_id", "tag_label"),
)
with op.batch_alter_table("study_tag", schema=None) as batch_op:
batch_op.create_index(batch_op.f("ix_study_tag_study_id"), ["study_id"], unique=False)
batch_op.create_index(batch_op.f("ix_study_tag_tag_label"), ["tag_label"], unique=False)

# ### end Alembic commands ###


def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("study_tag", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_study_tag_tag_label"))
batch_op.drop_index(batch_op.f("ix_study_tag_study_id"))

op.drop_table("study_tag")
with op.batch_alter_table("tag", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_tag_label"))
batch_op.drop_index(batch_op.f("ix_tag_color"))

op.drop_table("tag")
# ### end Alembic commands ###
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
"""
Add delete cascade constraint to variant study foreign keys
Revision ID: c0c4aaf84861
Revises: fd73601a9075
Create Date: 2024-02-21 17:29:48.736664
"""
from alembic import op # type: ignore

# revision identifiers, used by Alembic.
revision = "c0c4aaf84861"
down_revision = "fd73601a9075"
branch_labels = None
depends_on = None

COMMAND_BLOCK_FK = "commandblock_study_id_fkey"
SNAPSHOT_FK = "variant_study_snapshot_id_fkey"


def upgrade() -> None:
dialect_name: str = op.get_context().dialect.name

# SQLite doesn't support dropping foreign keys, so we need to ignore it here
if dialect_name == "postgresql":
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("commandblock", schema=None) as batch_op:
batch_op.drop_constraint(COMMAND_BLOCK_FK, type_="foreignkey")
batch_op.create_foreign_key(COMMAND_BLOCK_FK, "variantstudy", ["study_id"], ["id"], ondelete="CASCADE")

with op.batch_alter_table("variant_study_snapshot", schema=None) as batch_op:
batch_op.drop_constraint(SNAPSHOT_FK, type_="foreignkey")
batch_op.create_foreign_key(SNAPSHOT_FK, "variantstudy", ["id"], ["id"], ondelete="CASCADE")

# ### end Alembic commands ###


def downgrade() -> None:
dialect_name: str = op.get_context().dialect.name

# SQLite doesn't support dropping foreign keys, so we need to ignore it here
if dialect_name == "postgresql":
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("variant_study_snapshot", schema=None) as batch_op:
batch_op.drop_constraint(SNAPSHOT_FK, type_="foreignkey")
batch_op.create_foreign_key(SNAPSHOT_FK, "variantstudy", ["id"], ["id"])

with op.batch_alter_table("commandblock", schema=None) as batch_op:
batch_op.drop_constraint(COMMAND_BLOCK_FK, type_="foreignkey")
batch_op.create_foreign_key(COMMAND_BLOCK_FK, "variantstudy", ["study_id"], ["id"])

# ### end Alembic commands ###
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
"""
Populate `tag` and `study_tag` tables from `patch` field in `study_additional_data` table
Revision ID: dae93f1d9110
Revises: 3c70366b10ea
Create Date: 2024-02-08 10:30:20.590919
"""
import collections
import itertools
import json
import secrets
import typing as t

import sqlalchemy as sa # type: ignore
from alembic import op
from sqlalchemy.engine import Connection # type: ignore

from antarest.study.css4_colors import COLOR_NAMES

# revision identifiers, used by Alembic.
revision = "dae93f1d9110"
down_revision = "3c70366b10ea"
branch_labels = None
depends_on = None


def _avoid_duplicates(tags: t.Iterable[str]) -> t.Sequence[str]:
"""Avoid duplicate tags (case insensitive)"""

upper_tags = {tag.upper(): tag for tag in tags}
return list(upper_tags.values())


def _load_patch_obj(patch: t.Optional[str]) -> t.MutableMapping[str, t.Any]:
"""Load the patch object from the `patch` field in the `study_additional_data` table."""

obj: t.MutableMapping[str, t.Any] = json.loads(patch or "{}")
obj["study"] = obj.get("study") or {}
obj["study"]["tags"] = _avoid_duplicates(obj["study"].get("tags") or [])
return obj


def upgrade() -> None:
"""
Populate `tag` and `study_tag` tables from `patch` field in `study_additional_data` table
Four steps to proceed:
- Retrieve study-tags pairs from patches in `study_additional_data`.
- Delete all rows in `tag` and `study_tag`, as tag updates between revised 3c70366b10ea and this version,
do modify the data in patches alongside the two previous tables.
- Populate `tag` table using unique tag-labels and by randomly generating their associated colors.
- Populate `study_tag` using study-tags pairs.
"""

# create connexion to the db
connexion: Connection = op.get_bind()

# retrieve the tags and the study-tag pairs from the db
study_tags = connexion.execute("SELECT study_id, patch FROM study_additional_data")
tags_by_ids: t.MutableMapping[str, t.Set[str]] = {}
for study_id, patch in study_tags:
obj = _load_patch_obj(patch)
tags_by_ids[study_id] = obj["study"]["tags"]

# delete rows in tables `tag` and `study_tag`
connexion.execute("DELETE FROM study_tag")
connexion.execute("DELETE FROM tag")

# insert the tags in the `tag` table
all_labels = {lbl.upper(): lbl for lbl in itertools.chain.from_iterable(tags_by_ids.values())}
bulk_tags = [{"label": label, "color": secrets.choice(COLOR_NAMES)} for label in all_labels.values()]
if bulk_tags:
sql = sa.text("INSERT INTO tag (label, color) VALUES (:label, :color)")
connexion.execute(sql, *bulk_tags)

# Create relationships between studies and tags in the `study_tag` table
bulk_study_tags = [
# fmt: off
{"study_id": id_, "tag_label": all_labels[lbl.upper()]}
for id_, tags in tags_by_ids.items()
for lbl in tags
# fmt: on
]
if bulk_study_tags:
sql = sa.text("INSERT INTO study_tag (study_id, tag_label) VALUES (:study_id, :tag_label)")
connexion.execute(sql, *bulk_study_tags)


def downgrade() -> None:
"""
Restore `patch` field in `study_additional_data` from `tag` and `study_tag` tables
Three steps to proceed:
- Retrieve study-tags pairs from `study_tag` table.
- Update patches study-tags in `study_additional_data` using these pairs.
- Delete all rows from `tag` and `study_tag`.
"""
# create a connection to the db
connexion: Connection = op.get_bind()

# Creating the `tags_by_ids` mapping from data in the `study_tags` table
tags_by_ids: t.MutableMapping[str, t.Set[str]] = collections.defaultdict(set)
study_tags = connexion.execute("SELECT study_id, tag_label FROM study_tag")
for study_id, tag_label in study_tags:
tags_by_ids[study_id].add(tag_label)

# Then, we read objects from the `patch` field of the `study_additional_data` table
objects_by_ids = {}
study_tags = connexion.execute("SELECT study_id, patch FROM study_additional_data")
for study_id, patch in study_tags:
obj = _load_patch_obj(patch)
obj["study"]["tags"] = _avoid_duplicates(tags_by_ids[study_id] | set(obj["study"]["tags"]))
objects_by_ids[study_id] = obj

# Updating objects in the `study_additional_data` table
bulk_patches = [{"study_id": id_, "patch": json.dumps(obj)} for id_, obj in objects_by_ids.items()]
if bulk_patches:
sql = sa.text("UPDATE study_additional_data SET patch = :patch WHERE study_id = :study_id")
connexion.execute(sql, *bulk_patches)

# Deleting study_tags and tags
connexion.execute("DELETE FROM study_tag")
connexion.execute("DELETE FROM tag")
Loading

0 comments on commit ef0536f

Please sign in to comment.