From bda9632d5cfbc6ae266755e8bbc238e83b92c13f Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Thu, 18 May 2023 13:15:18 +0100
Subject: [PATCH 01/85] Update README.md
Change CAD 1 version
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index c5fa833c1..f1a467840 100644
--- a/README.md
+++ b/README.md
@@ -18,7 +18,7 @@
[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/claritychallenge/clarity/main.svg)](https://results.pre-commit.ci/latest/github/claritychallenge/clarity/main)
[![Downloads](https://pepy.tech/badge/pyclarity)](https://pepy.tech/project/pyclarity)
-[![PyPI](https://img.shields.io/static/v1?label=CAD1%20and%20CPC2%20Challenges%20-%20pypi&message=v0.3.2&color=orange)](https://pypi.org/project/pyclarity/0.3.2/)
+[![PyPI](https://img.shields.io/static/v1?label=CAD1%20and%20CPC2%20Challenges%20-%20pypi&message=v0.3.3&color=orange)](https://pypi.org/project/pyclarity/0.3.3/)
[![PyPI](https://img.shields.io/static/v1?label=ICASSP%202023%20Challenge%20-%20pypi&message=v0.2.1&color=orange)](https://pypi.org/project/pyclarity/0.2.1/)
[![PyPI](https://img.shields.io/static/v1?label=CEC2%20Challenge%20-%20pypi&message=v0.1.1&color=orange)](https://pypi.org/project/pyclarity/0.1.1/)
From abfb956b40f6e44368d0558be0ef1374951aff49 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 30 May 2023 05:04:54 +0000
Subject: [PATCH 02/85] [pre-commit.ci] pre-commit-autoupdate
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
updates:
- [github.com/charliermarsh/ruff-pre-commit: v0.0.267 → v0.0.270](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.267...v0.0.270)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fc3a6dc4f..93491bf71 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -73,7 +73,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
- rev: "v0.0.267"
+ rev: "v0.0.270"
hooks:
- id: ruff
From 1a743ad24f25d9dd454c7507c719ca5b34fa179f Mon Sep 17 00:00:00 2001
From: Neil Shephard
Date: Tue, 30 May 2023 13:38:10 +0100
Subject: [PATCH 03/85] Adding ORDA workflow, tweaking README with badge
---
.github/workflows/ORDA.yaml | 28 ++++++++++++++++++++++++++++
README.md | 4 ++--
2 files changed, 30 insertions(+), 2 deletions(-)
create mode 100644 .github/workflows/ORDA.yaml
diff --git a/.github/workflows/ORDA.yaml b/.github/workflows/ORDA.yaml
new file mode 100644
index 000000000..d9313c05a
--- /dev/null
+++ b/.github/workflows/ORDA.yaml
@@ -0,0 +1,28 @@
+name: Release to ORDA
+on:
+ workflow_dispatch:
+ release:
+ types: [published]
+jobs:
+ upload:
+ runs-on: ubuntu-latest
+ env:
+ ARCHIVE_NAME: ${{ github.event.repository.name }}-${{ github.event.release.tag_name }}
+ steps:
+ - name: prepare-data-folder
+ run : mkdir 'data'
+ - name: download-archive
+ run: |
+ curl -sL "${{ github.event.release.zipball_url }}" > "$ARCHIVE_NAME".zip
+ curl -sL "${{ github.event.release.tarball_url }}" > "$ARCHIVE_NAME".tar.gz
+ - name: move-archive
+ run: |
+ mv "$ARCHIVE_NAME".zip data/
+ mv "$ARCHIVE_NAME".tar.gz data/
+ - name: upload-to-figshare
+ uses: figshare/github-upload-action@v1.1
+ with:
+ FIGSHARE_TOKEN: ${{ secrets.FIGSHARE_TOKEN }}
+ FIGSHARE_ENDPOINT: 'https://api.figshare.com/v2'
+ FIGSHARE_ARTICLE_ID: 23230694
+ DATA_DIR: 'data'
diff --git a/README.md b/README.md
index c5fa833c1..2d6eef2ad 100644
--- a/README.md
+++ b/README.md
@@ -18,10 +18,10 @@
[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/claritychallenge/clarity/main.svg)](https://results.pre-commit.ci/latest/github/claritychallenge/clarity/main)
[![Downloads](https://pepy.tech/badge/pyclarity)](https://pepy.tech/project/pyclarity)
-[![PyPI](https://img.shields.io/static/v1?label=CAD1%20and%20CPC2%20Challenges%20-%20pypi&message=v0.3.2&color=orange)](https://pypi.org/project/pyclarity/0.3.2/)
+[![PyPI](https://img.shields.io/static/v1?label=CAD1%20and%20CPC2%20Challenges%20-%20pypi&message=v0.3.3&color=orange)](https://pypi.org/project/pyclarity/0.3.3/)
[![PyPI](https://img.shields.io/static/v1?label=ICASSP%202023%20Challenge%20-%20pypi&message=v0.2.1&color=orange)](https://pypi.org/project/pyclarity/0.2.1/)
[![PyPI](https://img.shields.io/static/v1?label=CEC2%20Challenge%20-%20pypi&message=v0.1.1&color=orange)](https://pypi.org/project/pyclarity/0.1.1/)
-
+[![ORDA](https://img.shields.io/badge/ORDA--DOI-10.15131%2Fshef.data.23230694.v.1-lightgrey)](https://figshare.shef.ac.uk/articles/software/clarity/23230694/1)
From 14a2ec6805dc3912965c7add6553893f780e454b Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 13 Jun 2023 07:09:26 +0000
Subject: [PATCH 04/85] [pre-commit.ci] pre-commit-autoupdate
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
updates:
- [github.com/asottile/pyupgrade: v3.4.0 → v3.6.0](https://github.com/asottile/pyupgrade/compare/v3.4.0...v3.6.0)
- [github.com/DavidAnson/markdownlint-cli2: v0.7.1 → v0.8.1](https://github.com/DavidAnson/markdownlint-cli2/compare/v0.7.1...v0.8.1)
- [github.com/charliermarsh/ruff-pre-commit: v0.0.270 → v0.0.272](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.270...v0.0.272)
---
.pre-commit-config.yaml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 93491bf71..dfffba6e4 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,7 +20,7 @@ repos:
- id: check-toml
- repo: https://github.com/asottile/pyupgrade
- rev: v3.4.0
+ rev: v3.6.0
hooks:
- id: pyupgrade
args: [--py38-plus]
@@ -33,7 +33,7 @@ repos:
additional_dependencies: ["click==8.0.4"]
- repo: https://github.com/DavidAnson/markdownlint-cli2
- rev: v0.7.1
+ rev: v0.8.1
hooks:
- id: markdownlint-cli2
@@ -73,7 +73,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
- rev: "v0.0.270"
+ rev: "v0.0.272"
hooks:
- id: ruff
From 34a979794e891ef972a08de0e2dd448ba5def6a4 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 27 Jun 2023 06:57:52 +0000
Subject: [PATCH 05/85] [pre-commit.ci] pre-commit-autoupdate
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
updates:
- [github.com/asottile/pyupgrade: v3.6.0 → v3.7.0](https://github.com/asottile/pyupgrade/compare/v3.6.0...v3.7.0)
- [github.com/pre-commit/mirrors-mypy: v1.3.0 → v1.4.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.3.0...v1.4.1)
- [github.com/charliermarsh/ruff-pre-commit: v0.0.272 → v0.0.275](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.272...v0.0.275)
---
.pre-commit-config.yaml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index dfffba6e4..19bf70fa0 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,7 +20,7 @@ repos:
- id: check-toml
- repo: https://github.com/asottile/pyupgrade
- rev: v3.6.0
+ rev: v3.7.0
hooks:
- id: pyupgrade
args: [--py38-plus]
@@ -65,7 +65,7 @@ repos:
- id: nbstripout
- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v1.3.0
+ rev: v1.4.1
hooks:
- id: mypy
args:
@@ -73,7 +73,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
- rev: "v0.0.272"
+ rev: "v0.0.275"
hooks:
- id: ruff
From 3212ed936ff3b51fce5cca54296b6ecd3f120b71 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 18 Jul 2023 08:18:06 +0000
Subject: [PATCH 06/85] [pre-commit.ci] pre-commit-autoupdate
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
updates:
- [github.com/asottile/pyupgrade: v3.7.0 → v3.9.0](https://github.com/asottile/pyupgrade/compare/v3.7.0...v3.9.0)
- [github.com/psf/black: 23.3.0 → 23.7.0](https://github.com/psf/black/compare/23.3.0...23.7.0)
- https://github.com/charliermarsh/ruff-pre-commit → https://github.com/astral-sh/ruff-pre-commit
- [github.com/astral-sh/ruff-pre-commit: v0.0.275 → v0.0.278](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.275...v0.0.278)
---
.pre-commit-config.yaml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 19bf70fa0..3670f4fa3 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,13 +20,13 @@ repos:
- id: check-toml
- repo: https://github.com/asottile/pyupgrade
- rev: v3.7.0
+ rev: v3.9.0
hooks:
- id: pyupgrade
args: [--py38-plus]
- repo: https://github.com/psf/black
- rev: 23.3.0
+ rev: 23.7.0
hooks:
- id: black
types: [python]
@@ -71,9 +71,9 @@ repos:
args:
- --explicit-package-bases
- - repo: https://github.com/charliermarsh/ruff-pre-commit
+ - repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
- rev: "v0.0.275"
+ rev: "v0.0.278"
hooks:
- id: ruff
From 3d07e1a9b8c2075639d7848aaad4741593244d46 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 25 Jul 2023 07:28:11 +0000
Subject: [PATCH 07/85] [pre-commit.ci] pre-commit-autoupdate
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
updates:
- [github.com/astral-sh/ruff-pre-commit: v0.0.278 → v0.0.280](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.278...v0.0.280)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 3670f4fa3..7167117a7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -73,7 +73,7 @@ repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
- rev: "v0.0.278"
+ rev: "v0.0.280"
hooks:
- id: ruff
From 56686d86915f301eae6ad7b07882ea4c29b28280 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 17:14:33 +0100
Subject: [PATCH 08/85] Script to generate ICASSP 2024 dataset
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/__init__.py | 0
.../generate_dataset/config.yaml | 17 ++
.../generates_at_mic_musdb18.py | 217 ++++++++++++++++++
3 files changed, 234 insertions(+)
create mode 100644 recipes/cad_icassp_2024/__init__.py
create mode 100644 recipes/cad_icassp_2024/generate_dataset/config.yaml
create mode 100644 recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
diff --git a/recipes/cad_icassp_2024/__init__.py b/recipes/cad_icassp_2024/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/recipes/cad_icassp_2024/generate_dataset/config.yaml b/recipes/cad_icassp_2024/generate_dataset/config.yaml
new file mode 100644
index 000000000..70a7ba784
--- /dev/null
+++ b/recipes/cad_icassp_2024/generate_dataset/config.yaml
@@ -0,0 +1,17 @@
+path:
+ root: ???
+ metadata_dir: ${path.root}/metadata
+ music_dir: ${path.root}/audio/music # musdb18 dataset
+ hrtf_dir: ${path.root}/audio/hrtf
+ scene_file: ${path.metadata_dir}/scenes.train.json
+ music_file: ${path.metadata_dir}/musdb18.train.json
+ gain_file: ${path.metadata_dir}/gains.json
+ head_positions_file: ${path.metadata_dir}/head_positions.json
+ output_music_dir: ${path.root}/audio/at_mic_music # at microphone musdb18 dataset
+ output_music_file: ${path.metadata_dir}/at_mic_music.train.json
+
+sample_rate: 44100
+
+hydra:
+ run:
+ dir: .
diff --git a/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py b/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
new file mode 100644
index 000000000..2b73780ba
--- /dev/null
+++ b/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
@@ -0,0 +1,217 @@
+"""
+Script creates the dataset for the ICASSP 2024 Grand Challenge.
+
+It takes the music from the MUSDB18 dataset and applies the HRTF signals
+to simulate the music at the microphone position.
+The output is saved in the same format as the MUSDB18 dataset.
+
+The script takes as input:
+ - The metadata of the scenes.
+ - The metadata of the music.
+ - The metadata of the head positions.
+ - The HRTF signals.
+ - The music signals.
+
+The script outputs:
+ - The metadata of the music at the hearing aids microphone.
+ - The music signals at the hearing aids microphone.
+"""
+from __future__ import annotations
+
+# pylint: disable=import-error
+import json
+import logging
+import warnings
+from pathlib import Path
+
+import hydra
+import numpy as np
+import pyloudnorm as pyln
+from numpy import ndarray
+from omegaconf import DictConfig
+from scipy.signal import lfilter
+
+from clarity.utils.file_io import read_signal, write_signal
+
+logger = logging.getLogger(__name__)
+
+
+def apply_hrtf(signal: ndarray, hrtf_left: ndarray, hrtf_right) -> ndarray:
+ """Applies the Left and Right HRTF to a signal.
+
+ Args:
+ signal (ndarray): Signal.
+ hrtf_left (ndarray): Left HRTF.
+ hrtf_right (ndarray): Right HRTF.
+
+ Returns:
+ ndarray: Signal with applied HRTF.
+ """
+ output_left_ear = lfilter(hrtf_left[:, 0], 1, signal[:, 0])
+ output_right_ear = lfilter(hrtf_left[:, 1], 1, signal[:, 0])
+
+ output_left_ear += lfilter(hrtf_right[:, 0], 1, signal[:, 1])
+ output_right_ear += lfilter(hrtf_right[:, 1], 1, signal[:, 1])
+
+ return np.stack([output_left_ear, output_right_ear], axis=1)
+
+
+def load_hrtf_signals(hrtf_path: str, hp: dict) -> tuple[ndarray, ndarray]:
+ """Loads the HRTF signals for a given head position.
+
+ Args:
+ hrtf_path (str): Path to the HRTF signals.
+ hp (dict): Head position.
+
+ Returns:
+ tuple(ndarray, ndarray): Left and right HRTF signals.
+ """
+
+ hp_left_path = (
+ Path(hrtf_path) / f"{hp['mic']}-{hp['subject']}-n{abs(hp['left_angle'])}.wav"
+ )
+ hp_right_path = (
+ Path(hrtf_path) / f"{hp['mic']}-{hp['subject']}-p{abs(hp['right_angle'])}.wav"
+ )
+
+ hp_left_signal = read_signal(hp_left_path)
+ hp_right_signal = read_signal(hp_right_path)
+
+ return hp_left_signal, hp_right_signal
+
+
+def normalise_lufs_level(
+ signal: ndarray, reference_signal: ndarray, sample_rate: float
+) -> ndarray:
+ """Normalises the signal to the LUFS level of the reference signal.
+
+ Args:
+ signal (ndarray): Signal to normalise.
+ reference_signal (ndarray): Reference signal.
+ sample_rate (float): Sample rate of the signal.
+
+ Returns:
+ ndarray: Normalised signal.
+ """
+ loudness_meter = pyln.Meter(int(sample_rate))
+
+ signal_lufs = loudness_meter.integrated_loudness(signal)
+ reference_signal_lufs = loudness_meter.integrated_loudness(reference_signal)
+
+ gain = reference_signal_lufs - signal_lufs
+ return pyln.normalize.loudness(signal, signal_lufs, signal_lufs + gain)
+
+
+def find_precreated_samples(source_dir: str | Path) -> list[str]:
+ """Finds music tracks created in a previous run.
+ This avoids reprocessing them.
+
+ Args:
+ source_dir (str| Path): Source directory.
+
+ Returns:
+ list[str]: List of precreated samples.
+ """
+ if isinstance(source_dir, str):
+ source_dir = Path(source_dir)
+
+ return [f.name for f in source_dir.glob("*/*")]
+
+
+@hydra.main(config_path="", config_name="config")
+def run(cfg: DictConfig) -> None:
+ """Main function of the script."""
+
+ logger.info("Generating dataset for the ICASSP 2024 Grand Challenge.\n")
+ logger.info(f"Processing music for scenes: {cfg.path.scene_file}")
+ logger.info(f"Transforming music signals from: {cfg.path.music_dir}")
+ logger.info(f"and save them to {cfg.path.output_music_dir}")
+
+ # Load precraeted samples to avoid reprocessing them
+ precreated_samples = find_precreated_samples(cfg.path.output_music_dir)
+ if len(precreated_samples) > 0:
+ logger.warning(f"Found {len(precreated_samples)} precreated samples.\n")
+
+ # Load the scenes metadata
+ with open(cfg.path.scene_file, encoding="utf-8") as f:
+ scenes_metadata = json.load(f)
+
+ # Load the music metadata
+ with open(cfg.path.music_file, encoding="utf-8") as f:
+ music_metadata = json.load(f)
+ music_metadata = {m["Track Name"]: m for m in music_metadata}
+
+ # Load the head positions metadata
+ with open(cfg.path.head_positions_file, encoding="utf-8") as f:
+ head_positions_metadata = json.load(f)
+
+ # From the scenes, get the samples names and parameters
+ toprocess_samples = {
+ f"{v['music']}-{v['head_position']}": {
+ "music": v["music"],
+ "head_position": v["head_position"],
+ }
+ for _, v in scenes_metadata.items()
+ }
+
+ # create output metadata content
+ out_music = []
+ for idx, sample in enumerate(toprocess_samples.items(), 1):
+ sample_name, sample_detail = sample
+ music = music_metadata[sample_detail["music"]]
+ head_position = sample_detail["head_position"]
+
+ out_music.append(
+ {
+ "Track Name": sample_name,
+ "Split": music["Split"],
+ "Path": f"{music['Split']}/{sample_name}",
+ }
+ )
+
+ if sample_name in precreated_samples:
+ logger.info(
+ f"[{idx}/{len(toprocess_samples)}] Skipping sample: {sample_name}"
+ )
+ continue
+
+ scene_path = Path(cfg.path.output_music_dir) / music["Split"] / sample_name
+ scene_path.mkdir(parents=True, exist_ok=True)
+
+ logger.info(f"[{idx}/{len(toprocess_samples)}] Creating sample: {sample_name}")
+
+ hrtf_left, hrtf_rigth = load_hrtf_signals(
+ cfg.path.hrtf_dir, head_positions_metadata[head_position]
+ )
+
+ for stem_name in ["mixture", "vocals", "drums", "bass", "other"]:
+ music_signal = read_signal(
+ Path(cfg.path.music_dir) / music["Path"] / f"{stem_name}.wav"
+ )
+
+ at_mic_signal = apply_hrtf(music_signal, hrtf_left, hrtf_rigth)
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", message="Possible clipped samples in output"
+ )
+ at_mic_signal = normalise_lufs_level(
+ at_mic_signal, music_signal, cfg.sample_rate
+ )
+
+ # Save the signal
+ save_path = scene_path / f"{stem_name}.wav"
+ write_signal(
+ save_path, at_mic_signal, cfg.sample_rate, floating_point=False
+ )
+
+ precreated_samples.append(sample_name)
+
+ # Save the metadata
+ with open(cfg.path.output_music_file, "w", encoding="utf-8") as f:
+ json.dump(out_music, f, indent=4)
+
+
+# pylint: disable = no-value-for-parameter
+if __name__ == "__main__":
+ run()
From cbbaac558c5e9d7bfdde0bc8305bc42c2bfc6aaf Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 17:15:34 +0100
Subject: [PATCH 09/85] recipe files
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/README.md | 189 +++++++++++++++++++
recipes/cad_icassp_2024/baseline/config.yaml | 42 +++++
2 files changed, 231 insertions(+)
create mode 100644 recipes/cad_icassp_2024/baseline/README.md
create mode 100644 recipes/cad_icassp_2024/baseline/config.yaml
diff --git a/recipes/cad_icassp_2024/baseline/README.md b/recipes/cad_icassp_2024/baseline/README.md
new file mode 100644
index 000000000..7732be324
--- /dev/null
+++ b/recipes/cad_icassp_2024/baseline/README.md
@@ -0,0 +1,189 @@
+# The First Cadenza Challenge (CAD1) - Task 1: Listening music via headphones
+
+Cadenza challenge code for the First Cadenza Challenge (CAD1) Task1.
+
+For more information please visit the [challenge website](https://cadenzachallenge.org/docs/cadenza1/cc1_intro).
+
+## 1. Data structure
+
+The First Cadenza Challenge - task 1 is using the MUSDB18-HQ dataset.
+The data is split into train, validation and test following the same split from museval.
+I.e., 86 songs are for training, 16 for validation and 50 for evaluation.
+
+To download the data, please visit [here](https://forms.gle/UQkuCxqQVxZtGggPA). The data is split into `cadenza_cad1_task1_core_musdb18hq.tar.gz` (containing the MUSDB18-HQ dataset) and
+`cadenza_cad1_task1_core_metadata.tar.gz` (containing the list of songs and listeners' characteristics per split).
+Alternatively, you can download the MUSDB18-HQ dataset from the official [SigSep website](https://sigsep.github.io/datasets/musdb.html#musdb18-hq-uncompressed-wav).
+If you opt for this alternative, be sure to download the uncompressed wav version. Note that you will need both packages to run the baseline system.
+
+If you need additional music data for training your model, please restrict to the use of [MedleyDB](https://medleydb.weebly.com/) [4] [5],
+[BACH10](https://labsites.rochester.edu/air/resource.html) [6] and [FMA-small](https://github.com/mdeff/fma) [7].
+Theses are shared as `cadenza_cad1_task1_augmentation_medleydb.tar.gz`, `cadenza_cad1_task1_augmentation_bach10.tar.gz`
+and `cadenza_cad1_task1_augmentation_fma_small.tar.gz`.
+**Keeping the augmentation data restricted to these datasets will ensure that the evaluation is fair for all participants**.
+
+Unpack packages under the same root directory using
+
+```bash
+tar -xvzf
+```
+
+### 1.1 Necessary data
+
+* **Music** contains the MUSDB18-HQ music dataset for training, validation and evaluation.
+
+```text
+cadenza_data
+└───task1
+ └───audio
+ └───musdb18hq
+ ├───train
+ └───test
+```
+
+* **Metadata** contains the metadata for the systems.
+
+```text
+cadenza_data
+└───task1
+ └───metadata
+ └───musdb18hq
+ ├───listeners.train.json
+ ├───listeners.valid.json
+ ├───musdb18.train.json
+ ├───musdb18.valid.json
+ └───musdb18.test.json
+```
+
+### 1.2 Additional optional data
+
+* **MedleyDB** contains both MedleyDB versions 1 [[4](#references)] and 2 [[5](#references)] datasets.
+
+Tracks from the MedleyDB dataset are not included in the evaluation set.
+However, is your responsibility to exclude any song that may be already contained in the training set.
+
+```text
+cadenza_data
+└───task1
+ └───audio
+ └───MedleyDB
+ ├───Audio
+ └───Metadata
+```
+
+* **BACH10** contains the BACH10 dataset [[6](#references)].
+
+Tracks from the BACH10 dataset are not included in MUSDB18-HQ and can all be used as training augmentation data.
+
+```text
+cadenza_data
+└───task1
+ └───audio
+ └───fma_small
+ ├───000
+ ├───001
+ ├───...
+```
+
+* **FMA Small** contains the FMA small subset of the FMA dataset [[7](references)].
+
+Tracks from the FMA small dataset are not included in the MUSDB18-HQ.
+This dataset does not provide independent stems but only the full mix.
+However, it can be used to train an unsupervised model to better initialise a supervised model.
+
+```text
+cadenza_data
+└───task1
+ └───audio
+ └───fma_small
+ ├───000
+ ├───001
+ ├───...
+```
+
+### 1.3 Demo data
+
+To help you to start with the challenge, we provide a small subset of the data.
+The `demo_data` folder contains a single song and two listeners from the validation set.
+
+To use the demo data, simply download the package `cadenza_data_demo.tar.xz`
+from [here](https://drive.google.com/drive/folders/1Yxo_R-yPByEUvX5O5lhsHk3tW1ek5qKW?usp=share_link)
+and unpack it under `recipes/cad1/task1/`, i.e., one level above the baseline directory.
+Note that the `root.path` variable in `config.yaml` is already set to the demo data by default.
+
+To unpack the demo data, run:
+
+```bash
+tar -xvf cadenza_data_demo.tar.xz
+```
+
+## 2. Baseline
+
+In the `baseline/` folder, we provide code for running the baseline enhancement system and performing the objective evaluation.
+Note that we use [hydra](https://hydra.cc/docs/intro/) for config handling.
+
+### 2.1 Enhancement
+
+The baseline enhance simply takes the out-of-the-box [Hybrid Demucs](https://github.com/facebookresearch/demucs) [1]
+source separation model distributed on [TorchAudio](https://pytorch.org/audio/main/tutorials/hybrid_demucs_tutorial.html)
+and applies a simple NAL-R [2] fitting amplification to each VDBO (`vocals`, `drums`, `bass` and `others`) stem.
+
+The remixing is performed by summing the amplified VDBO stems.
+
+The baseline generates a left and right signal for each VDBO stem and a remixed signal, totalling 9 signals per song-listener.
+
+To run the baseline enhancement system first, make sure that `paths.root` in `config.yaml` points to
+where you have installed the Cadenza data. This parameter defaults to the working directory.
+You can also define your own `path.exp_folder` to store enhanced
+signals and evaluated results.
+
+Then run:
+
+```bash
+python enhance.py
+```
+
+Alternatively, you can provide the root variable on the command line, e.g.,
+
+```bash
+python enhance.py path.root=/full/path/to/my/cadenza_data
+```
+
+To get a full list of the parameters, run:
+
+```bash
+python enhance.py --help
+```
+
+The folder `enhanced_signals` will appear in the `exp` folder.
+
+### 2.2 Evaluation
+
+The `evaluate.py` simply takes the signals stored in `enhanced_signals` and computes the HAAQI [[3](#references)] score
+for each of the eight left and right VDBO stems.
+The average of these eight scores is computed and returned for each signal.
+
+To run the evaluation stage, make sure that `path.root` is set in the `config.yaml` file and then run
+
+```bash
+python evaluate.py
+```
+
+A csv file containing the eight HAAQI scores and the combined score will be generated in the `path.exp_folder`.
+
+To check the HAAQI code, see [here](../../../../clarity/evaluator/haaqi).
+
+Please note: you will not get identical HAAQI scores for the same signals if the random seed is not defined
+(in the given recipe, the random seed for each signal is set as the last eight digits of the song md5).
+As there are random noises generated within HAAQI, but the differences should be sufficiently small.
+
+The score for the baseline is 0.3608 HAAQI overall.
+
+## References
+
+* [1] Défossez, A. "Hybrid Spectrogram and Waveform Source Separation". Proceedings of the ISMIR 2021 Workshop on Music Source Separation. [doi:10.48550/arXiv.2111.03600](https://arxiv.org/abs/2111.03600)
+* [2] Byrne, Denis, and Harvey Dillon. "The National Acoustic Laboratories'(NAL) new procedure for selecting the gain and frequency response of a hearing aid." Ear and hearing 7.4 (1986): 257-265. [doi:10.1097/00003446-198608000-00007](https://doi.org/10.1097/00003446-198608000-00007)
+* [3] Kates J M, Arehart K H. "The Hearing-Aid Audio Quality Index (HAAQI)". IEEE/ACM transactions on audio, speech, and language processing, 24(2), 354–365. [doi:10.1109/TASLP.2015.2507858](https://doi.org/10.1109%2FTASLP.2015.2507858)
+* [4] R. Bittner, J. Salamon, M. Tierney, M. Mauch, C. Cannam and J. P. Bello, "MedleyDB: A Multitrack Dataset for Annotation-Intensive MIR Research", in 15th International Society for Music Information Retrieval Conference, Taipei, Taiwan, Oct. 2014. [pdf](https://archives.ismir.net/ismir2014/paper/000322.pdf)
+* [5] Rachel M. Bittner, Julia Wilkins, Hanna Yip and Juan P. Bello, "MedleyDB 2.0: New Data and a System for Sustainable Data Collection" Late breaking/demo extended abstract, 17th International Society for Music Information Retrieval (ISMIR) conference, August 2016. [pdf](https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/08/bittner-medleydb.pdf)
+* [6] Zhiyao Duan, Bryan Pardo and Changshui Zhang, "Multiple fundamental frequency estimation by modeling spectral peaks and non-peak regions," IEEE Trans. Audio Speech Language Process., vol. 18, no. 8, pp. 2121-2133, 2010. [doi:10.1109/TASL.2010.2042119](https://doi.org/10.1109/TASL.2010.2042119)
+* [7] Defferrard, M., Benzi, K., Vandergheynst, P., & Bresson, X. (2016). "FMA: A dataset for music analysis". arXiv preprint arXiv:1612.01840. [doi:10.48550/arXiv.1612.01840](https://doi.org/10.48550/arXiv.1612.01840)
diff --git a/recipes/cad_icassp_2024/baseline/config.yaml b/recipes/cad_icassp_2024/baseline/config.yaml
new file mode 100644
index 000000000..caba995a5
--- /dev/null
+++ b/recipes/cad_icassp_2024/baseline/config.yaml
@@ -0,0 +1,42 @@
+path:
+ root: /media/gerardo/Extended_old/cadenza_data/icassp_2024
+ metadata_dir: ${path.root}/metadata
+ music_dir: ${path.root}/audio/at_mic_music
+ gains_file: ${path.metadata_dir}/gains.json
+ head_positions_file: ${path.metadata_dir}/head_positions.json
+ listeners_file: ${path.metadata_dir}/listeners.train.json
+ music_file: ${path.metadata_dir}/at_mic_music.train.json
+ scenes_file: ${path.metadata_dir}/scenes.train.json
+ scene_listeners_file: ${path.metadata_dir}/scene_listeners.train.json
+ exp_folder: ./exp # folder to store enhanced signals and final results
+
+sample_rate: 44100
+
+nalr:
+ nfir: 220
+ sample_rate: ${sample_rate}
+
+apply_compressor: False
+compressor:
+ threshold: 0.35
+ attenuation: 0.1
+ attack: 50
+ release: 1000
+ rms_buffer_size: 0.064
+
+soft_clip: True
+
+separator:
+ model: demucs # demucs or openunmix
+ device: ~
+
+evaluate:
+ set_random_seed: True
+ small_test: True
+ batch_size: 1 # Number of batches
+ batch: 0 # Batch number to evaluate
+
+# hydra config
+hydra:
+ run:
+ dir: ${path.exp_folder}
\ No newline at end of file
From 84e87525d65ea8f6457728c8a9db47a7d6c9897f Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 17:16:54 +0100
Subject: [PATCH 10/85] Add source_separation_utils to keep enhance cleaner
Signed-off-by: Gerardo Roa Dabike
---
.../baseline/source_separation_utils.py | 99 +++++++++++++++++++
1 file changed, 99 insertions(+)
create mode 100644 recipes/cad_icassp_2024/baseline/source_separation_utils.py
diff --git a/recipes/cad_icassp_2024/baseline/source_separation_utils.py b/recipes/cad_icassp_2024/baseline/source_separation_utils.py
new file mode 100644
index 000000000..5ecbe2041
--- /dev/null
+++ b/recipes/cad_icassp_2024/baseline/source_separation_utils.py
@@ -0,0 +1,99 @@
+"""Module that contains functions for source separation."""
+from __future__ import annotations
+
+# pylint: disable=import-error
+import torch
+from torchaudio.transforms import Fade
+
+
+def separate_sources(
+ model: torch.nn.Module,
+ mix: torch.Tensor,
+ sample_rate: int,
+ segment: float = 10.0,
+ overlap: float = 0.1,
+ device: torch.device | str | None = None,
+):
+ """
+ Apply model to a given mixture.
+ Use fade, and add segments together in order to add model segment by segment.
+
+ Args:
+ model (torch.nn.Module): model to use for separation
+ mix (torch.Tensor): mixture to separate, shape (batch, channels, time)
+ sample_rate (int): sampling rate of the mixture
+ segment (float): segment length in seconds
+ overlap (float): overlap between segments, between 0 and 1
+ device (torch.device, str, or None): if provided, device on which to
+ execute the computation, otherwise `mix.device` is assumed.
+ When `device` is different from `mix.device`, only local computations will
+ be on `device`, while the entire tracks will be stored on `mix.device`.
+
+ Returns:
+ torch.Tensor: estimated sources
+
+ Based on https://pytorch.org/audio/main/tutorials/hybrid_demucs_tutorial.html
+ """
+ device = mix.device if device is None else torch.device(device)
+ mix = torch.as_tensor(mix, device=device)
+
+ if mix.ndim == 1:
+ # one track and mono audio
+ mix = mix.unsqueeze(0)
+ elif mix.ndim == 2:
+ # one track and stereo audio
+ mix = mix.unsqueeze(0)
+
+ batch, channels, length = mix.shape
+
+ chunk_len = int(sample_rate * segment * (1 + overlap))
+ start = 0
+ end = chunk_len
+ overlap_frames = overlap * sample_rate
+ fade = Fade(fade_in_len=0, fade_out_len=int(overlap_frames), fade_shape="linear")
+
+ final = torch.zeros(batch, 4, channels, length, device=device)
+
+ while start < length - overlap_frames:
+ chunk = mix[:, :, start:end]
+ with torch.no_grad():
+ out = model.forward(chunk)
+ out = fade(out)
+ final[:, :, :, start:end] += out
+ if start == 0:
+ fade.fade_in_len = int(overlap_frames)
+ start += int(chunk_len - overlap_frames)
+ else:
+ start += chunk_len
+ end += chunk_len
+ if end >= length:
+ fade.fade_out_len = 0
+
+ return final.cpu().detach().numpy()
+
+
+def get_device(device: str) -> tuple:
+ """Get the Torch device.
+
+ Args:
+ device (str): device type, e.g. "cpu", "gpu0", "gpu1", etc.
+
+ Returns:
+ torch.device: torch.device() appropiate to the hardware available.
+ str: device type selected, e.g. "cpu", "cuda".
+ """
+ if device is None:
+ if torch.cuda.is_available():
+ return torch.device("cuda"), "cuda"
+ return torch.device("cpu"), "cpu"
+
+ if device.startswith("gpu"):
+ device_index = int(device.replace("gpu", ""))
+ if device_index > torch.cuda.device_count():
+ raise ValueError(f"GPU device index {device_index} is not available.")
+ return torch.device(f"cuda:{device_index}"), "cuda"
+
+ if device == "cpu":
+ return torch.device("cpu"), "cpu"
+
+ raise ValueError(f"Unsupported device type: {device}")
From e035f0a24dcd564674686b56650c80e08ca079ef Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 17:19:22 +0100
Subject: [PATCH 11/85] Add enhancer
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/enhance.py | 280 ++++++++++++++++++++
1 file changed, 280 insertions(+)
create mode 100644 recipes/cad_icassp_2024/baseline/enhance.py
diff --git a/recipes/cad_icassp_2024/baseline/enhance.py b/recipes/cad_icassp_2024/baseline/enhance.py
new file mode 100644
index 000000000..f93394bca
--- /dev/null
+++ b/recipes/cad_icassp_2024/baseline/enhance.py
@@ -0,0 +1,280 @@
+""" Run the dummy enhancement. """
+from __future__ import annotations
+
+import json
+import logging
+from pathlib import Path
+
+# pylint: disable=import-error
+import hydra
+import numpy as np
+import torch
+from evaluate import apply_gains, make_scene_listener_list, remix_stems
+from numpy import ndarray
+from omegaconf import DictConfig
+from source_separation_utils import get_device, separate_sources
+from torchaudio.pipelines import HDEMUCS_HIGH_MUSDB
+
+from clarity.enhancer.compressor import Compressor
+from clarity.enhancer.nalr import NALR
+from clarity.utils.audiogram import Audiogram, Listener
+from clarity.utils.file_io import read_signal, write_signal
+from clarity.utils.signal_processing import (
+ denormalize_signals,
+ normalize_signal,
+ resample,
+)
+
+logger = logging.getLogger(__name__)
+
+
+# pylint: disable=unused-argument
+def decompose_signal(
+ model: torch.nn.Module,
+ model_sample_rate: int,
+ signal: ndarray,
+ signal_sample_rate: int,
+ device: torch.device,
+ sources_list: list[str],
+ listener: Listener,
+ normalise: bool = True,
+) -> dict[str, ndarray]:
+ """
+ Decompose signal into 8 stems.
+
+ The listener is ignored by the baseline system as it
+ is not performing personalised decomposition.
+ Instead, it performs a standard music decomposition using a pre-trained
+ model trained on the MUSDB18 dataset.
+
+ Args:
+ model (torch.nn.Module): Torch model.
+ model_sample_rate (int): Sample rate of the model.
+ signal (ndarray): Signal to be decomposed.
+ signal_sample_rate (int): Sample frequency.
+ device (torch.device): Torch device to use for processing.
+ sources_list (list): List of strings used to index dictionary.
+ listener (Listener).
+ normalise (bool): Whether to normalise the signal.
+
+ Returns:
+ Dictionary: Indexed by sources with the associated model as values.
+ """
+ if signal.shape[0] > signal.shape[1]:
+ signal = signal.T
+
+ if signal_sample_rate != model_sample_rate:
+ signal = resample(signal, signal_sample_rate, model_sample_rate)
+
+ if normalise:
+ signal, ref = normalize_signal(signal)
+
+ sources = separate_sources(
+ model,
+ torch.from_numpy(signal.astype(np.float32)),
+ model_sample_rate,
+ device=device,
+ )
+
+ # only one element in the batch
+ sources = sources[0]
+ if normalise:
+ sources = denormalize_signals(sources, ref)
+
+ sources = np.transpose(sources, (0, 2, 1))
+ return dict(zip(sources_list, sources))
+
+
+def apply_baseline_ha(
+ enhancer: NALR,
+ compressor: Compressor | None,
+ signal: ndarray,
+ audiogram: Audiogram,
+ apply_compressor: bool = False,
+) -> np.ndarray:
+ """
+ Apply NAL-R prescription hearing aid to a signal.
+
+ Args:
+ enhancer (NALR): A NALR object that enhances the signal.
+ compressor (Compressor | None): A Compressor object that compresses the signal.
+ signal (ndarray): An ndarray representing the audio signal.
+ audiogram (Audiogram): An Audiogram object representing the listener's
+ audiogram.
+ apply_compressor (bool): Whether to apply the compressor.
+
+ Returns:
+ An ndarray representing the processed signal.
+ """
+ nalr_fir, _ = enhancer.build(audiogram)
+ proc_signal = enhancer.apply(nalr_fir, signal)
+ if apply_compressor:
+ if compressor is None:
+ raise ValueError("Compressor must be provided to apply compressor.")
+
+ proc_signal, _, _ = compressor.process(proc_signal)
+ return proc_signal
+
+
+def process_remix_for_listener(
+ signal: ndarray,
+ enhancer: NALR,
+ compressor: Compressor,
+ listener: Listener,
+ apply_compressor: bool = False,
+) -> ndarray:
+ """Process the stems from sources.
+
+ Args:
+ stems (dict) : Dictionary of stems
+ sample_rate (float) : Sample rate of the signal
+ enhancer (NALR) : NAL-R prescription hearing aid
+ compressor (Compressor) : Compressor
+ listener: Listener object
+ apply_compressor (bool) : Whether to apply the compressor
+ Returns:
+ ndarray: Processed signal.
+ """
+ left_output = apply_baseline_ha(
+ enhancer, compressor, signal[:, 0], listener.audiogram_left, apply_compressor
+ )
+ right_output = apply_baseline_ha(
+ enhancer, compressor, signal[:, 1], listener.audiogram_right, apply_compressor
+ )
+
+ return np.stack([left_output, right_output], axis=1)
+
+
+@hydra.main(config_path="", config_name="config")
+def enhance(config: DictConfig) -> None:
+ """
+ Run the music enhancement.
+ The system decomposes the music into vocal, drums, bass, and other stems.
+ Then, the NAL-R prescription procedure is applied to each stem.
+ Args:
+ config (dict): Dictionary of configuration options for enhancing music.
+
+ Returns 8 stems for each song:
+ - left channel vocal, drums, bass, and other stems
+ - right channel vocal, drums, bass, and other stems
+ """
+
+ # Set the output directory where processed signals will be saved
+ enhanced_folder = Path("enhanced_signals")
+ enhanced_folder.mkdir(parents=True, exist_ok=True)
+
+ # Loading pretrained source separation model
+ if config.separator.model == "demucs":
+ separation_model = HDEMUCS_HIGH_MUSDB.get_model()
+ model_sample_rate = HDEMUCS_HIGH_MUSDB.sample_rate
+ sources_order = separation_model.sources
+ normalise = True
+ elif config.separator.model == "openunmix":
+ separation_model = torch.hub.load("sigsep/open-unmix-pytorch", "umxhq", niter=0)
+ model_sample_rate = separation_model.sample_rate
+ sources_order = ["vocals", "drums", "bass", "other"]
+ normalise = False
+ else:
+ raise ValueError(f"Separator model {config.separator.model} not supported.")
+
+ device, _ = get_device(config.separator.device)
+ separation_model.to(device)
+
+ # Load listener audiograms and songs
+ listener_dict = Listener.load_listener_dict(config.path.listeners_file)
+
+ with Path(config.path.gains_file).open("r", encoding="utf-8") as file:
+ gains = json.load(file)
+
+ with Path(config.path.scenes_file).open("r", encoding="utf-8") as file:
+ scenes = json.load(file)
+
+ with Path(config.path.scene_listeners_file).open("r", encoding="utf-8") as file:
+ scenes_listeners = json.load(file)
+
+ with Path(config.path.music_file).open("r", encoding="utf-8") as file:
+ songs = json.load(file)
+
+ enhancer = NALR(**config.nalr)
+ compressor = Compressor(**config.compressor)
+
+ # Select a batch to process
+ scene_listener_pairs = make_scene_listener_list(
+ scenes_listeners, config.evaluate.small_test
+ )
+ scene_listener_pairs = scene_listener_pairs[
+ config.evaluate.batch :: config.evaluate.batch_size
+ ]
+
+ # Decompose each song into left and right vocal, drums, bass, and other stems
+ # and process each stem for the listener
+ previous_song = ""
+ num_scenes = len(scene_listener_pairs)
+ for idx, scene_listener_pair in enumerate(scene_listener_pairs):
+ scene_id, listener_id = scene_listener_pair
+
+ scene = scenes[scene_id]
+ song_name = f"{scene['music']}-{scene['head_position']}"
+
+ logger.info(
+ f"[{idx:03d}/{num_scenes:03d}] "
+ f"Processing {song_name} for listener {listener_id}"
+ )
+ # Get the listener's audiogram
+ listener = listener_dict[listener_id]
+
+ # Read the mixture signal
+ # Convert to 32-bit floating point and transpose
+ # from [samples, channels] to [channels, samples]
+ if song_name != previous_song:
+ mixture_signal = read_signal(
+ filename=Path(config.path.music_dir)
+ / songs[song_name]["Path"]
+ / "mixture.wav",
+ sample_rate=config.sample_rate,
+ allow_resample=True,
+ )
+
+ stems: dict[str, ndarray] = decompose_signal(
+ model=separation_model,
+ model_sample_rate=model_sample_rate,
+ signal=mixture_signal,
+ signal_sample_rate=config.sample_rate,
+ device=device,
+ sources_list=sources_order,
+ listener=listener,
+ normalise=normalise,
+ )
+
+ stems = apply_gains(stems, config.sample_rate, gains[scene["gain"]])
+ enhanced_signal = remix_stems(stems, mixture_signal, model_sample_rate)
+
+ enhanced_signal = process_remix_for_listener(
+ signal=enhanced_signal,
+ enhancer=enhancer,
+ compressor=compressor,
+ listener=listener,
+ apply_compressor=config.apply_compressor,
+ )
+
+ filename = Path(
+ enhanced_folder
+ / f"{listener.id}"
+ / f"{song_name}"
+ / f"{scene_id}_{listener.id}_remix.wav"
+ )
+ filename.parent.mkdir(parents=True, exist_ok=True)
+ write_signal(
+ filename=filename,
+ signal=enhanced_signal,
+ sample_rate=config.sample_rate,
+ floating_point=False,
+ strict=False,
+ )
+
+ logger.info("Done!")
+
+
+# pylint: disable = no-value-for-parameter
+if __name__ == "__main__":
+ enhance()
From 394d7429bf25e1c70877d4685f8b4a5df612ab8a Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 17:21:37 +0100
Subject: [PATCH 12/85] evaluate
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/evaluate.py | 332 +++++++++++++++++++
1 file changed, 332 insertions(+)
create mode 100644 recipes/cad_icassp_2024/baseline/evaluate.py
diff --git a/recipes/cad_icassp_2024/baseline/evaluate.py b/recipes/cad_icassp_2024/baseline/evaluate.py
new file mode 100644
index 000000000..b8c3a4dfb
--- /dev/null
+++ b/recipes/cad_icassp_2024/baseline/evaluate.py
@@ -0,0 +1,332 @@
+"""Evaluate the enhanced signals using the HAAQI metric."""
+from __future__ import annotations
+
+# pylint: disable=import-error
+import csv
+import hashlib
+import json
+import logging
+import warnings
+from pathlib import Path
+
+import hydra
+import numpy as np
+import pyloudnorm as pyln
+from numpy import ndarray
+from omegaconf import DictConfig
+
+from clarity.enhancer.nalr import NALR
+from clarity.evaluator.haaqi import compute_haaqi
+from clarity.utils.audiogram import Listener
+from clarity.utils.file_io import read_signal
+from clarity.utils.signal_processing import compute_rms
+
+logger = logging.getLogger(__name__)
+
+
+class ResultsFile:
+ """A utility class for writing results to a CSV file.
+
+ Attributes:
+ file_name (str): The name of the file to write results to.
+ """
+
+ def __init__(self, file_name: str):
+ """Initialize the ResultsFile instance.
+
+ Args:
+ file_name (str): The name of the file to write results to.
+ """
+ self.file_name = file_name
+
+ def write_header(self):
+ """Write the header row to the CSV file."""
+ with open(self.file_name, "w", encoding="utf-8", newline="") as csv_file:
+ csv_writer = csv.writer(
+ csv_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
+ )
+ csv_writer.writerow(
+ [
+ "scene",
+ "song",
+ "listener",
+ "left_score",
+ "right_score",
+ "score",
+ ]
+ )
+
+ def add_result(
+ self,
+ scene: str,
+ song: str,
+ listener_id: str,
+ left_score: float,
+ right_score: float,
+ score: float,
+ ):
+ """Add a result to the CSV file.
+
+ Args:
+ scene (str): The name of the scene that the result is for.
+ song (str): The name of the song that the result is for.
+ listener_id (str): The name of the listener who submitted the result.
+ left_score (float): The score for the left channel.
+ right_score (float): The score for the right channel.
+ score (float): The combined score.
+ """
+ logger.info(f"The combined score is {score}")
+
+ with open(self.file_name, "a", encoding="utf-8", newline="") as csv_file:
+ csv_writer = csv.writer(
+ csv_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
+ )
+ csv_writer.writerow(
+ [
+ scene,
+ song,
+ listener_id,
+ str(left_score),
+ str(right_score),
+ str(score),
+ ]
+ )
+
+
+def apply_gains(stems: dict, sample_rate: float, gains: dict) -> dict:
+ """Apply gain to the signal by using LUFS.
+
+ Args:
+ stems (dict): Dictionary of stems.
+ sample_rate (float): Sample rate of the signal.
+ gains (dict): Dictionary of gains.
+
+ Returns:
+ dict: Dictionary of stems with applied gains.
+ """
+ meter = pyln.Meter(int(sample_rate))
+ stems_gain = {}
+ for stem_str, stem_signal in stems.items():
+ if stem_signal.shape[0] < stem_signal.shape[1]:
+ stem_signal = stem_signal.T
+
+ stem_lufs = meter.integrated_loudness(stem_signal)
+ if stem_lufs == -np.inf:
+ stem_lufs = -80
+
+ gain = stem_lufs + gains[stem_str]
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", message="Possible clipped samples in output"
+ )
+ stems_gain[stem_str] = pyln.normalize.loudness(stem_signal, stem_lufs, gain)
+ return stems_gain
+
+
+def level_normalisation(
+ signal: ndarray, reference_signal: ndarray, sample_rate: float
+) -> ndarray:
+ """Normalise the signal to the LUFS level of the reference signal.
+
+ Args:
+ signal (ndarray): Signal to normalise.
+ reference_signal (ndarray): Reference signal.
+ sample_rate (float): Sample rate of the signal.
+
+ Returns:
+ ndarray: Normalised signal.
+ """
+ meter = pyln.Meter(int(sample_rate))
+ signal_lufs = meter.integrated_loudness(signal)
+ reference_signal_lufs = meter.integrated_loudness(reference_signal)
+
+ if signal_lufs == -np.inf:
+ signal_lufs = -80
+
+ if reference_signal_lufs == -np.inf:
+ reference_signal_lufs = -80
+
+ gain = reference_signal_lufs - signal_lufs
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", message="Possible clipped samples in output")
+ normed_signal = pyln.normalize.loudness(signal, signal_lufs, signal_lufs + gain)
+ return normed_signal
+
+
+def remix_stems(stems: dict, reference_signal, sample_rate: float) -> ndarray:
+ """Remix the stems into a stereo signal.
+
+ The remixing is done by summing the stems.
+ Then, the signal is normalised to the LUFS level of the reference signal.
+
+ Args:
+ stems (dict): Dictionary of stems.
+ reference_signal (ndarray): Reference signal.
+ sample_rate (float): Sample rate of the signal.
+
+ Returns:
+ ndarray: Stereo signal.
+ """
+ remix_signal = np.zeros(stems["vocals"].shape)
+ for _, stem_signal in stems.items():
+ remix_signal += stem_signal
+ return level_normalisation(remix_signal, reference_signal, sample_rate)
+
+
+def make_scene_listener_list(scenes_listeners, small_test=False):
+ """Make the list of scene-listener pairing to process"""
+ scene_listener_pairs = [
+ (scene, listener)
+ for scene in scenes_listeners
+ for listener in scenes_listeners[scene]
+ ]
+
+ # Can define a standard 'small_test' with just 1/50 of the data
+ if small_test:
+ scene_listener_pairs = scene_listener_pairs[::50]
+
+ return scene_listener_pairs
+
+
+def set_song_seed(song: str) -> None:
+ """Set a seed that is unique for the given song"""
+ song_encoded = hashlib.md5(song.encode("utf-8")).hexdigest()
+ song_md5 = int(song_encoded, 16) % (10**8)
+ np.random.seed(song_md5)
+
+
+def load_reference_stems(music_dir: str | Path) -> tuple[dict[str, ndarray], ndarray]:
+ """Load the reference stems for a given scene.
+
+ Args:
+ scene (dict): The scene to load the stems for.
+ music_dir (str | Path): The path to the music directory.
+ Returns:
+ reference_stems (dict): A dictionary of reference stems.
+ original_mixture (ndarray): The original mixture.
+ """
+ reference_stems = {}
+ for instrument in ["drums", "bass", "other", "vocals"]:
+ stem = read_signal(Path(music_dir) / f"{instrument}.wav")
+ reference_stems[instrument] = stem
+
+ return reference_stems, read_signal(Path(music_dir) / "mixture.wav")
+
+
+@hydra.main(config_path="", config_name="config")
+def run_calculate_aq(config: DictConfig) -> None:
+ """Evaluate the enhanced signals using the HAAQI metric."""
+
+ enhanced_folder = Path("enhanced_signals")
+ logger.info(f"Evaluating from {enhanced_folder} directory")
+
+ # Load listener audiograms and songs
+ listener_dict = Listener.load_listener_dict(config.path.listeners_file)
+
+ with Path(config.path.gains_file).open("r", encoding="utf-8") as file:
+ gains = json.load(file)
+
+ with Path(config.path.scenes_file).open("r", encoding="utf-8") as file:
+ scenes = json.load(file)
+
+ with Path(config.path.scene_listeners_file).open("r", encoding="utf-8") as file:
+ scenes_listeners = json.load(file)
+
+ with Path(config.path.music_file).open("r", encoding="utf-8") as file:
+ songs = json.load(file)
+
+ enhancer = NALR(**config.nalr)
+
+ if config.evaluate.batch_size == 1:
+ results_file = ResultsFile("scores.csv")
+ else:
+ results_file = ResultsFile(
+ f"scores_{config.evaluate.batch + 1}-{config.evaluate.batch_size}.csv"
+ )
+ results_file.write_header()
+
+ scene_listener_pairs = make_scene_listener_list(
+ scenes_listeners, config.evaluate.small_test
+ )
+ scene_listener_pairs = scene_listener_pairs[
+ config.evaluate.batch :: config.evaluate.batch_size
+ ]
+ num_scenes = len(scene_listener_pairs)
+ for idx, scene_listener_pair in enumerate(scene_listener_pairs):
+ scene_id, listener_id = scene_listener_pair
+
+ scene = scenes[scene_id]
+ song_name = f"{scene['music']}-{scene['head_position']}"
+
+ logger.info(
+ f"[{idx:03d}/{num_scenes:03d}] "
+ f"Processing {song_name} for listener {listener_id}"
+ )
+
+ # Load reference signals
+ reference_stems, original_mixture = load_reference_stems(
+ Path(config.path.music_dir) / songs[song_name]["Path"]
+ )
+ reference_stems = apply_gains(
+ reference_stems, config.sample_rate, gains[scene["gain"]]
+ )
+ reference_mixture = remix_stems(
+ reference_stems, original_mixture, config.sample_rate
+ )
+
+ # Set the random seed for the scene
+ if config.evaluate.set_random_seed:
+ set_song_seed(scene_id)
+
+ # Evaluate listener
+ listener = listener_dict[listener_id]
+
+ # Load enhanced signal
+ enhanced_signal = read_signal(
+ Path(
+ enhanced_folder
+ / f"{listener.id}"
+ / f"{song_name}"
+ / f"{scene_id}_{listener.id}_remix.wav"
+ )
+ )
+
+ # Compute the score for left channel
+ nalr_fir, _ = enhancer.build(listener.audiogram_left)
+ left_reference = enhancer.apply(nalr_fir, reference_mixture[:, 0])
+ left_score = compute_haaqi(
+ enhanced_signal[:, 0],
+ left_reference,
+ listener.audiogram_left,
+ config.sample_rate,
+ 65 - 20 * np.log10(compute_rms(left_reference)),
+ )
+
+ # Compute score for right channel
+ nalr_fir, _ = enhancer.build(listener.audiogram_right)
+ right_reference = enhancer.apply(nalr_fir, reference_mixture[:, 1])
+ right_score = compute_haaqi(
+ enhanced_signal[:, 1],
+ right_reference,
+ listener.audiogram_left,
+ config.sample_rate,
+ 65 - 20 * np.log10(compute_rms(left_reference)),
+ )
+
+ # Save scores
+ results_file.add_result(
+ scene=scene_id,
+ song=song_name,
+ listener_id=listener.id,
+ left_score=left_score,
+ right_score=right_score,
+ score=np.mean([left_score, right_score])[0],
+ )
+
+ logger.info("Done!")
+
+
+# pylint: disable = no-value-for-parameter
+if __name__ == "__main__":
+ run_calculate_aq()
From 9b1bf5a05bf0c214ca645bbe87f22ecf22bceac3 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 17:22:26 +0100
Subject: [PATCH 13/85] Add merge batches
Signed-off-by: Gerardo Roa Dabike
---
.../baseline/merge_batches_results.py | 31 +++++++++++++++++++
1 file changed, 31 insertions(+)
create mode 100644 recipes/cad_icassp_2024/baseline/merge_batches_results.py
diff --git a/recipes/cad_icassp_2024/baseline/merge_batches_results.py b/recipes/cad_icassp_2024/baseline/merge_batches_results.py
new file mode 100644
index 000000000..ff29762da
--- /dev/null
+++ b/recipes/cad_icassp_2024/baseline/merge_batches_results.py
@@ -0,0 +1,31 @@
+"""Join batches scores into a single file."""
+
+# pylint: disable=import-error
+import hydra
+import pandas as pd
+from omegaconf import DictConfig
+
+
+@hydra.main(config_path="", config_name="config")
+def join_batches(config: DictConfig) -> None:
+ """
+ Join batches scores into a single file.
+
+ Args:
+ config (DictConfig): Dictionary of configuration options.
+ The `.evaluate.batch_size` is extracted to determine how many
+ batches there are to combine.
+
+ """
+ batches_results = []
+ for batch in range(config.evaluate.batch_size):
+ batches_results.append(
+ pd.read_csv(f"scores_{batch + 1}-{config.evaluate.batch_size}.csv")
+ )
+ df_res = pd.concat(batches_results, ignore_index=True)
+ df_res.to_csv("scores.csv", index=False)
+
+
+# pylint: disable=no-value-for-parameter
+if __name__ == "__main__":
+ join_batches()
From 95f4ff3f4d17b70c6ab8a0a2e521e26c60af84ca Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 18:18:59 +0100
Subject: [PATCH 14/85] equalization to 2 in HAAQI
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/evaluate.py | 22 +++++++++++---------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/recipes/cad_icassp_2024/baseline/evaluate.py b/recipes/cad_icassp_2024/baseline/evaluate.py
index b8c3a4dfb..e728af1a0 100644
--- a/recipes/cad_icassp_2024/baseline/evaluate.py
+++ b/recipes/cad_icassp_2024/baseline/evaluate.py
@@ -296,22 +296,24 @@ def run_calculate_aq(config: DictConfig) -> None:
nalr_fir, _ = enhancer.build(listener.audiogram_left)
left_reference = enhancer.apply(nalr_fir, reference_mixture[:, 0])
left_score = compute_haaqi(
- enhanced_signal[:, 0],
- left_reference,
- listener.audiogram_left,
- config.sample_rate,
- 65 - 20 * np.log10(compute_rms(left_reference)),
+ processed_signal=enhanced_signal[:, 0],
+ reference_signal=left_reference,
+ audiogram=listener.audiogram_left,
+ sample_rate=config.sample_rate,
+ equalisation=2,
+ level1=65 - 20 * np.log10(compute_rms(left_reference)),
)
# Compute score for right channel
nalr_fir, _ = enhancer.build(listener.audiogram_right)
right_reference = enhancer.apply(nalr_fir, reference_mixture[:, 1])
right_score = compute_haaqi(
- enhanced_signal[:, 1],
- right_reference,
- listener.audiogram_left,
- config.sample_rate,
- 65 - 20 * np.log10(compute_rms(left_reference)),
+ processed_signal=enhanced_signal[:, 1],
+ reference_signal=right_reference,
+ audiogram=listener.audiogram_left,
+ sample_rate=config.sample_rate,
+ equalisation=2,
+ level1=65 - 20 * np.log10(compute_rms(left_reference)),
)
# Save scores
From da4874b3e6989b1538e889e4740ea6c6e8cdd95e Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 18:20:04 +0100
Subject: [PATCH 15/85] readme
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/README.md | 170 ++++++++++-----------
1 file changed, 78 insertions(+), 92 deletions(-)
diff --git a/recipes/cad_icassp_2024/baseline/README.md b/recipes/cad_icassp_2024/baseline/README.md
index 7732be324..704086ff3 100644
--- a/recipes/cad_icassp_2024/baseline/README.md
+++ b/recipes/cad_icassp_2024/baseline/README.md
@@ -1,25 +1,18 @@
-# The First Cadenza Challenge (CAD1) - Task 1: Listening music via headphones
+# The ICASSP 2024 Cadenza Challenge (CAD_ICASSP_2024)
-Cadenza challenge code for the First Cadenza Challenge (CAD1) Task1.
+Cadenza challenge code for the ICASSP 2024 Cadenza Challenge.
-For more information please visit the [challenge website](https://cadenzachallenge.org/docs/cadenza1/cc1_intro).
+For more information please visit the [challenge website](https://cadenzachallenge.org/docs/icassp_2024/intro).
## 1. Data structure
-The First Cadenza Challenge - task 1 is using the MUSDB18-HQ dataset.
-The data is split into train, validation and test following the same split from museval.
-I.e., 86 songs are for training, 16 for validation and 50 for evaluation.
+The ICASSP 2024 Cadenza Challenge dataset is based on the MUSDB18-HQ dataset.
+To download the data, please visit [Download data and software](https://cadenzachallenge.org/docs/icassp_2024/take_part/download)
+webpage.
-To download the data, please visit [here](https://forms.gle/UQkuCxqQVxZtGggPA). The data is split into `cadenza_cad1_task1_core_musdb18hq.tar.gz` (containing the MUSDB18-HQ dataset) and
-`cadenza_cad1_task1_core_metadata.tar.gz` (containing the list of songs and listeners' characteristics per split).
-Alternatively, you can download the MUSDB18-HQ dataset from the official [SigSep website](https://sigsep.github.io/datasets/musdb.html#musdb18-hq-uncompressed-wav).
-If you opt for this alternative, be sure to download the uncompressed wav version. Note that you will need both packages to run the baseline system.
-
-If you need additional music data for training your model, please restrict to the use of [MedleyDB](https://medleydb.weebly.com/) [4] [5],
-[BACH10](https://labsites.rochester.edu/air/resource.html) [6] and [FMA-small](https://github.com/mdeff/fma) [7].
-Theses are shared as `cadenza_cad1_task1_augmentation_medleydb.tar.gz`, `cadenza_cad1_task1_augmentation_bach10.tar.gz`
-and `cadenza_cad1_task1_augmentation_fma_small.tar.gz`.
-**Keeping the augmentation data restricted to these datasets will ensure that the evaluation is fair for all participants**.
+The data is split into four packages: `cadenza_icassp2024_core.v1_0.tgz`,
+`cadenza_icassp2024_augmentation_medleydb.tar.gz`, `cadenza_icassp2024_augmentation_bach10.tar.gz`
+and `cadenza_icassp2024_augmentation_fma_small.tar.gz`.
Unpack packages under the same root directory using
@@ -29,62 +22,70 @@ tar -xvzf
### 1.1 Necessary data
-* **Music** contains the MUSDB18-HQ music dataset for training, validation and evaluation.
+* **Core** contains the metadata and audio signal to generate the ICASSP 2024 dataset.
```text
cadenza_data
-└───task1
- └───audio
- └───musdb18hq
- ├───train
- └───test
-```
-
-* **Metadata** contains the metadata for the systems.
+├───audio
+| ├───hrtf (336 kB)
+| | | BTE_fr-VP_E1-n22.5.wav
+| | | BTE_fr-VP_E1-n30.0.wav
+| | | ...
+| |
+| └───music
+| └───train (20.2 GB)
+| ├───A Classic Education - NightOwl
+| | | bass.wav
+| | | drums.wav
+| | | other.wav
+| | | vocals.wav
+| | | mixture.wav
+| |
+| ├───...
+|
+└───metadata (328 kB)
+ | gains.json
+ | head_positions.json
+ | listeners.train.json
+ | listeners.valid.json
+ | musdb18.train.json
+ | musdb18.valid.json
+ | scene_listeners.train.json
+ | scenes.train.json
+ | ...
-```text
-cadenza_data
-└───task1
- └───metadata
- └───musdb18hq
- ├───listeners.train.json
- ├───listeners.valid.json
- ├───musdb18.train.json
- ├───musdb18.valid.json
- └───musdb18.test.json
```
### 1.2 Additional optional data
-* **MedleyDB** contains both MedleyDB versions 1 [[4](#references)] and 2 [[5](#references)] datasets.
+If you need additional music data for training your model, please restrict to the use of [MedleyDB](https://medleydb.weebly.com/) [[5](#references)] [[6](#references)],
+[BACH10](https://labsites.rochester.edu/air/resource.html) [7] and [FMA-small](https://github.com/mdeff/fma) [7].
+
+**Keeping the augmentation data restricted to these datasets will ensure that the evaluation is fair for all participants**.
-Tracks from the MedleyDB dataset are not included in the evaluation set.
-However, is your responsibility to exclude any song that may be already contained in the training set.
+* **MedleyDB** contains both MedleyDB versions 1 [[5](#references)] and 2 [[6](#references)] datasets.
```text
cadenza_data
-└───task1
- └───audio
- └───MedleyDB
- ├───Audio
- └───Metadata
+└───audio
+ └───MedleyDB (164 GB)
+ ├───Audio
+ └───Metadata
```
-* **BACH10** contains the BACH10 dataset [[6](#references)].
+* **BACH10** contains the BACH10 dataset [[7](#references)].
Tracks from the BACH10 dataset are not included in MUSDB18-HQ and can all be used as training augmentation data.
```text
cadenza_data
-└───task1
- └───audio
- └───fma_small
- ├───000
- ├───001
- ├───...
+└───audio
+ └───Bach10 (150 MB)
+ ├───01-AchGottundHerr
+ ├───...
```
-* **FMA Small** contains the FMA small subset of the FMA dataset [[7](references)].
+* **FMA Small** contains the FMA small subset of the FMA dataset [[8](references)].
Tracks from the FMA small dataset are not included in the MUSDB18-HQ.
This dataset does not provide independent stems but only the full mix.
@@ -92,28 +93,11 @@ However, it can be used to train an unsupervised model to better initialise a su
```text
cadenza_data
-└───task1
- └───audio
- └───fma_small
- ├───000
- ├───001
- ├───...
-```
-
-### 1.3 Demo data
-
-To help you to start with the challenge, we provide a small subset of the data.
-The `demo_data` folder contains a single song and two listeners from the validation set.
-
-To use the demo data, simply download the package `cadenza_data_demo.tar.xz`
-from [here](https://drive.google.com/drive/folders/1Yxo_R-yPByEUvX5O5lhsHk3tW1ek5qKW?usp=share_link)
-and unpack it under `recipes/cad1/task1/`, i.e., one level above the baseline directory.
-Note that the `root.path` variable in `config.yaml` is already set to the demo data by default.
-
-To unpack the demo data, run:
-
-```bash
-tar -xvf cadenza_data_demo.tar.xz
+└───audio
+ └───fma_small (8 GB)
+ ├───000
+ ├───001
+ ├───...
```
## 2. Baseline
@@ -123,16 +107,19 @@ Note that we use [hydra](https://hydra.cc/docs/intro/) for config handling.
### 2.1 Enhancement
-The baseline enhance simply takes the out-of-the-box [Hybrid Demucs](https://github.com/facebookresearch/demucs) [1]
-source separation model distributed on [TorchAudio](https://pytorch.org/audio/main/tutorials/hybrid_demucs_tutorial.html)
-and applies a simple NAL-R [2] fitting amplification to each VDBO (`vocals`, `drums`, `bass` and `others`) stem.
+The baseline enhance takes an out-of-the-box source separation model and estimates
+the VDBO (vocals, drums, bass and others) stems for each song-listener pair.
+
+For each estimated stem, the baseline applies the gains and remix the signal.
+A simple NAL-R [2] fitting amplification is applied to the final remix
-The remixing is performed by summing the amplified VDBO stems.
+The basile offers 2 source separation options:
-The baseline generates a left and right signal for each VDBO stem and a remixed signal, totalling 9 signals per song-listener.
+1. [Hybrid Demucs](https://github.com/facebookresearch/demucs) [[1](#references)] distributed on [TorchAudio](https://pytorch.org/audio/main/tutorials/hybrid_demucs_tutorial.html)
+2. [Open-Unmix](https://github.com/sigsep/open-unmix-pytorch) [[2](#references)] distributed through Pytorch hub.
To run the baseline enhancement system first, make sure that `paths.root` in `config.yaml` points to
-where you have installed the Cadenza data. This parameter defaults to the working directory.
+where you have installed the Cadenza data.
You can also define your own `path.exp_folder` to store enhanced
signals and evaluated results.
@@ -145,7 +132,7 @@ python enhance.py
Alternatively, you can provide the root variable on the command line, e.g.,
```bash
-python enhance.py path.root=/full/path/to/my/cadenza_data
+python enhance.py path.root=/Volumes/data/cadenza_data
```
To get a full list of the parameters, run:
@@ -158,9 +145,7 @@ The folder `enhanced_signals` will appear in the `exp` folder.
### 2.2 Evaluation
-The `evaluate.py` simply takes the signals stored in `enhanced_signals` and computes the HAAQI [[3](#references)] score
-for each of the eight left and right VDBO stems.
-The average of these eight scores is computed and returned for each signal.
+The `evaluate.py` simply takes the signals stored in `enhanced_signals` and computes the HAAQI [[3](#references)] scores
To run the evaluation stage, make sure that `path.root` is set in the `config.yaml` file and then run
@@ -168,22 +153,23 @@ To run the evaluation stage, make sure that `path.root` is set in the `config.ya
python evaluate.py
```
-A csv file containing the eight HAAQI scores and the combined score will be generated in the `path.exp_folder`.
+A csv file containing the left and right channels HAAQI scores and the mean of both will be generated in the `path.exp_folder`.
-To check the HAAQI code, see [here](../../../../clarity/evaluator/haaqi).
+To check the HAAQI code, see [here](https://github.com/claritychallenge/clarity/blob/main/clarity/evaluator/haaqi/haaqi.py).
Please note: you will not get identical HAAQI scores for the same signals if the random seed is not defined
(in the given recipe, the random seed for each signal is set as the last eight digits of the song md5).
As there are random noises generated within HAAQI, but the differences should be sufficiently small.
-The score for the baseline is 0.3608 HAAQI overall.
+The score for the baseline is XXXX HAAQI overall.
## References
* [1] Défossez, A. "Hybrid Spectrogram and Waveform Source Separation". Proceedings of the ISMIR 2021 Workshop on Music Source Separation. [doi:10.48550/arXiv.2111.03600](https://arxiv.org/abs/2111.03600)
-* [2] Byrne, Denis, and Harvey Dillon. "The National Acoustic Laboratories'(NAL) new procedure for selecting the gain and frequency response of a hearing aid." Ear and hearing 7.4 (1986): 257-265. [doi:10.1097/00003446-198608000-00007](https://doi.org/10.1097/00003446-198608000-00007)
-* [3] Kates J M, Arehart K H. "The Hearing-Aid Audio Quality Index (HAAQI)". IEEE/ACM transactions on audio, speech, and language processing, 24(2), 354–365. [doi:10.1109/TASLP.2015.2507858](https://doi.org/10.1109%2FTASLP.2015.2507858)
-* [4] R. Bittner, J. Salamon, M. Tierney, M. Mauch, C. Cannam and J. P. Bello, "MedleyDB: A Multitrack Dataset for Annotation-Intensive MIR Research", in 15th International Society for Music Information Retrieval Conference, Taipei, Taiwan, Oct. 2014. [pdf](https://archives.ismir.net/ismir2014/paper/000322.pdf)
-* [5] Rachel M. Bittner, Julia Wilkins, Hanna Yip and Juan P. Bello, "MedleyDB 2.0: New Data and a System for Sustainable Data Collection" Late breaking/demo extended abstract, 17th International Society for Music Information Retrieval (ISMIR) conference, August 2016. [pdf](https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/08/bittner-medleydb.pdf)
-* [6] Zhiyao Duan, Bryan Pardo and Changshui Zhang, "Multiple fundamental frequency estimation by modeling spectral peaks and non-peak regions," IEEE Trans. Audio Speech Language Process., vol. 18, no. 8, pp. 2121-2133, 2010. [doi:10.1109/TASL.2010.2042119](https://doi.org/10.1109/TASL.2010.2042119)
-* [7] Defferrard, M., Benzi, K., Vandergheynst, P., & Bresson, X. (2016). "FMA: A dataset for music analysis". arXiv preprint arXiv:1612.01840. [doi:10.48550/arXiv.1612.01840](https://doi.org/10.48550/arXiv.1612.01840)
+* [2] Stöter, F. R., Liutkus, A., Ito, N., Nakashika, T., Ono, N., & Mitsufuji, Y. (2019). "Open-Unmix: A Reference Implementation for Music Source Separation". Journal of Open Source Software, 4(41), 1667. [doi:10.21105/joss.01667](https://doi.org/10.21105/joss.01667)
+* [3] Byrne, Denis, and Harvey Dillon. "The National Acoustic Laboratories'(NAL) new procedure for selecting the gain and frequency response of a hearing aid." Ear and hearing 7.4 (1986): 257-265. [doi:10.1097/00003446-198608000-00007](https://doi.org/10.1097/00003446-198608000-00007)
+* [4] Kates J M, Arehart K H. "The Hearing-Aid Audio Quality Index (HAAQI)". IEEE/ACM transactions on audio, speech, and language processing, 24(2), 354–365. [doi:10.1109/TASLP.2015.2507858](https://doi.org/10.1109%2FTASLP.2015.2507858)
+* [5] R. Bittner, J. Salamon, M. Tierney, M. Mauch, C. Cannam and J. P. Bello, "MedleyDB: A Multitrack Dataset for Annotation-Intensive MIR Research", in 15th International Society for Music Information Retrieval Conference, Taipei, Taiwan, Oct. 2014. [pdf](https://archives.ismir.net/ismir2014/paper/000322.pdf)
+* [6] Rachel M. Bittner, Julia Wilkins, Hanna Yip and Juan P. Bello, "MedleyDB 2.0: New Data and a System for Sustainable Data Collection" Late breaking/demo extended abstract, 17th International Society for Music Information Retrieval (ISMIR) conference, August 2016. [pdf](https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/08/bittner-medleydb.pdf)
+* [7] Zhiyao Duan, Bryan Pardo and Changshui Zhang, "Multiple fundamental frequency estimation by modeling spectral peaks and non-peak regions," IEEE Trans. Audio Speech Language Process., vol. 18, no. 8, pp. 2121-2133, 2010. [doi:10.1109/TASL.2010.2042119](https://doi.org/10.1109/TASL.2010.2042119)
+* [8] Defferrard, M., Benzi, K., Vandergheynst, P., & Bresson, X. (2016). "FMA: A dataset for music analysis". arXiv preprint arXiv:1612.01840. [doi:10.48550/arXiv.1612.01840](https://doi.org/10.48550/arXiv.1612.01840)
From 333f13b592574268007aa689aeaa2f393c192925 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Mon, 11 Sep 2023 19:25:03 +0100
Subject: [PATCH 16/85] readme gen dataset
Signed-off-by: Gerardo Roa Dabike
---
.../generate_dataset/README.md | 67 +++++++++++++++++++
1 file changed, 67 insertions(+)
create mode 100644 recipes/cad_icassp_2024/generate_dataset/README.md
diff --git a/recipes/cad_icassp_2024/generate_dataset/README.md b/recipes/cad_icassp_2024/generate_dataset/README.md
new file mode 100644
index 000000000..5c3556a05
--- /dev/null
+++ b/recipes/cad_icassp_2024/generate_dataset/README.md
@@ -0,0 +1,67 @@
+# Generate music dataset for the ICASSP 2024 Cadenza Challenge
+
+The ICASSP 2024 Cadenza Challenge music dataset is based on the MUSDB18-HQ dataset.
+
+Steps:
+
+1. Download `cadenza_cad1_task1_core_musdb18hq.tar.gz` and `cadenza_cad1_task1_core_metadata.tar.gz`
+packages from the [Cadenza Challenge website](https://cadenza-challenge.github.io/).
+2. Unpack packages under the same root directory.
+3. Run the script
+
+## Unpack the data
+
+To unpack the data run:
+
+```bash
+tar -xvzf
+```
+
+## Generate the dataset
+
+To generate the dataset, set the `path.root` parameter in the `generate_dataset/config.yaml`
+to where you unpacked the data. Then run:
+
+```bash
+python generates_at_mic_musdb18.py
+```
+
+or, run the script with the `path.root` parameter:
+
+```bash
+python generates_at_mic_musdb18.py path.root
+```
+
+The script will generate the dataset in the `path.root` directory.
+
+The script should create the `at_mic_microphone` where all music samples
+picked up by the microphones (at the mic) are saved.
+
+In the next example, `A Classic Education - NightOwl-hp_0103` corresponds to the
+song `A Classic Education - NightOwl` with the `hp_0103` head position.
+
+```text
+cadenza_data
+├───audio
+| ├───at_mic_music
+| | └───train (20.2 GB)
+| | ├───A Classic Education - NightOwl-hp_0103
+| | | | bass.wav
+| | | | drums.wav
+| | | | other.wav
+| | | | vocals.wav
+| | | | mixture.wav
+| | |
+| | ├───A Classic Education - NightOwl-hp_0138
+| | | ....
+| |
+| ├───hrtf (336 kB)
+| |
+| └───music
+| └───train (20.2 GB)
+|
+└───metadata (328 kB)
+| gains.json
+ | at_mic_music.train.json
+ | ...
+```
From d5db7710615d271fb974d8563921e106f2c7b075 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 12:32:23 +0100
Subject: [PATCH 17/85] readme gen dataset
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/generate_dataset/README.md | 2 +-
recipes/cad_icassp_2024/generate_dataset/config.yaml | 1 -
2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/recipes/cad_icassp_2024/generate_dataset/README.md b/recipes/cad_icassp_2024/generate_dataset/README.md
index 5c3556a05..2d4236d0f 100644
--- a/recipes/cad_icassp_2024/generate_dataset/README.md
+++ b/recipes/cad_icassp_2024/generate_dataset/README.md
@@ -44,7 +44,7 @@ song `A Classic Education - NightOwl` with the `hp_0103` head position.
cadenza_data
├───audio
| ├───at_mic_music
-| | └───train (20.2 GB)
+| | └───train (80.8 GB)
| | ├───A Classic Education - NightOwl-hp_0103
| | | | bass.wav
| | | | drums.wav
diff --git a/recipes/cad_icassp_2024/generate_dataset/config.yaml b/recipes/cad_icassp_2024/generate_dataset/config.yaml
index 70a7ba784..0e1f04ff0 100644
--- a/recipes/cad_icassp_2024/generate_dataset/config.yaml
+++ b/recipes/cad_icassp_2024/generate_dataset/config.yaml
@@ -5,7 +5,6 @@ path:
hrtf_dir: ${path.root}/audio/hrtf
scene_file: ${path.metadata_dir}/scenes.train.json
music_file: ${path.metadata_dir}/musdb18.train.json
- gain_file: ${path.metadata_dir}/gains.json
head_positions_file: ${path.metadata_dir}/head_positions.json
output_music_dir: ${path.root}/audio/at_mic_music # at microphone musdb18 dataset
output_music_file: ${path.metadata_dir}/at_mic_music.train.json
From dd8aa75e8343cd9a2748f0a41e954c41db9fda77 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 12 Sep 2023 12:11:11 +0000
Subject: [PATCH 18/85] [pre-commit.ci] pre-commit-autoupdate
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
updates:
- [github.com/asottile/pyupgrade: v3.9.0 → v3.10.1](https://github.com/asottile/pyupgrade/compare/v3.9.0...v3.10.1)
- [github.com/psf/black: 23.7.0 → 23.9.1](https://github.com/psf/black/compare/23.7.0...23.9.1)
- [github.com/DavidAnson/markdownlint-cli2: v0.8.1 → v0.9.2](https://github.com/DavidAnson/markdownlint-cli2/compare/v0.8.1...v0.9.2)
- [github.com/pycqa/flake8.git: 6.0.0 → 6.1.0](https://github.com/pycqa/flake8.git/compare/6.0.0...6.1.0)
- [github.com/pre-commit/mirrors-mypy: v1.4.1 → v1.5.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.4.1...v1.5.1)
- [github.com/astral-sh/ruff-pre-commit: v0.0.280 → v0.0.288](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.280...v0.0.288)
- [github.com/pycqa/pylint: v3.0.0a6 → v3.0.0a7](https://github.com/pycqa/pylint/compare/v3.0.0a6...v3.0.0a7)
---
.pre-commit-config.yaml | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 7167117a7..04742edae 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,25 +20,25 @@ repos:
- id: check-toml
- repo: https://github.com/asottile/pyupgrade
- rev: v3.9.0
+ rev: v3.10.1
hooks:
- id: pyupgrade
args: [--py38-plus]
- repo: https://github.com/psf/black
- rev: 23.7.0
+ rev: 23.9.1
hooks:
- id: black
types: [python]
additional_dependencies: ["click==8.0.4"]
- repo: https://github.com/DavidAnson/markdownlint-cli2
- rev: v0.8.1
+ rev: v0.9.2
hooks:
- id: markdownlint-cli2
- repo: https://github.com/pycqa/flake8.git
- rev: 6.0.0
+ rev: 6.1.0
hooks:
- id: flake8
additional_dependencies: [flake8-print, Flake8-pyproject]
@@ -65,7 +65,7 @@ repos:
- id: nbstripout
- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v1.4.1
+ rev: v1.5.1
hooks:
- id: mypy
args:
@@ -73,13 +73,13 @@ repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
- rev: "v0.0.280"
+ rev: "v0.0.288"
hooks:
- id: ruff
# Serious pylint errors that will be enforced by CI
- repo: https://github.com/pycqa/pylint
- rev: v3.0.0a6
+ rev: v3.0.0a7
hooks:
- id: pylint
args:
From ee4c61cabe7284bc30770d9cd1fa35fbad1f4244 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 14:56:37 +0100
Subject: [PATCH 19/85] fix output from gen dataset
Signed-off-by: Gerardo Roa Dabike
---
.../generate_dataset/generates_at_mic_musdb18.py | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py b/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
index 2b73780ba..571a77903 100644
--- a/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
+++ b/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
@@ -155,19 +155,17 @@ def run(cfg: DictConfig) -> None:
}
# create output metadata content
- out_music = []
+ out_music = {}
for idx, sample in enumerate(toprocess_samples.items(), 1):
sample_name, sample_detail = sample
music = music_metadata[sample_detail["music"]]
head_position = sample_detail["head_position"]
- out_music.append(
- {
- "Track Name": sample_name,
- "Split": music["Split"],
- "Path": f"{music['Split']}/{sample_name}",
- }
- )
+ out_music[sample_name] = {
+ "Track Name": sample_name,
+ "Split": music["Split"],
+ "Path": f"{music['Split']}/{sample_name}",
+ }
if sample_name in precreated_samples:
logger.info(
From 27b9d515a8a9b738a2d3499c13c319b84e542e40 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 15:03:45 +0100
Subject: [PATCH 20/85] better meta at_mic_music
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/enhance.py | 1 +
.../generate_dataset/generates_at_mic_musdb18.py | 5 +++++
2 files changed, 6 insertions(+)
diff --git a/recipes/cad_icassp_2024/baseline/enhance.py b/recipes/cad_icassp_2024/baseline/enhance.py
index f93394bca..aef38e1e9 100644
--- a/recipes/cad_icassp_2024/baseline/enhance.py
+++ b/recipes/cad_icassp_2024/baseline/enhance.py
@@ -202,6 +202,7 @@ def enhance(config: DictConfig) -> None:
scene_listener_pairs = make_scene_listener_list(
scenes_listeners, config.evaluate.small_test
)
+
scene_listener_pairs = scene_listener_pairs[
config.evaluate.batch :: config.evaluate.batch_size
]
diff --git a/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py b/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
index 571a77903..4cbc488e9 100644
--- a/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
+++ b/recipes/cad_icassp_2024/generate_dataset/generates_at_mic_musdb18.py
@@ -165,6 +165,8 @@ def run(cfg: DictConfig) -> None:
"Track Name": sample_name,
"Split": music["Split"],
"Path": f"{music['Split']}/{sample_name}",
+ "Original Track Name": music["Track Name"],
+ "Head Position": head_position,
}
if sample_name in precreated_samples:
@@ -209,6 +211,9 @@ def run(cfg: DictConfig) -> None:
with open(cfg.path.output_music_file, "w", encoding="utf-8") as f:
json.dump(out_music, f, indent=4)
+ logger.info(f"Saved metadata to: {cfg.path.output_music_file}")
+ logger.info("Done.")
+
# pylint: disable = no-value-for-parameter
if __name__ == "__main__":
From 73a07e0635a45c424deabfc0e16d57898c32513d Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 16:24:10 +0100
Subject: [PATCH 21/85] Utilities from v0.3 not in v0.4
Signed-off-by: Gerardo Roa Dabike
---
clarity/utils/flac_encoder.py | 262 +++++++++++++++++++++++++++++
clarity/utils/signal_processing.py | 34 ++++
2 files changed, 296 insertions(+)
create mode 100644 clarity/utils/flac_encoder.py
diff --git a/clarity/utils/flac_encoder.py b/clarity/utils/flac_encoder.py
new file mode 100644
index 000000000..55718a98c
--- /dev/null
+++ b/clarity/utils/flac_encoder.py
@@ -0,0 +1,262 @@
+"""
+Class for encoding and decoding audio signals
+ using flac compression.
+"""
+from __future__ import annotations
+
+import logging
+import tempfile
+
+# pylint: disable=import-error, protected-access
+from pathlib import Path
+
+import numpy as np
+import pyflac as pf
+import soundfile as sf
+
+logger = logging.getLogger(__name__)
+
+
+class WavEncoder(pf.encoder._Encoder):
+ """
+ Class offers an adaptation of the pyflac.encoder.FileEncoder
+ to work directly with WAV signals as input.
+
+ """
+
+ def __init__(
+ self,
+ signal: np.ndarray,
+ sample_rate: int,
+ output_file: str | Path | None = None,
+ compression_level: int = 5,
+ blocksize: int = 0,
+ streamable_subset: bool = True,
+ verify: bool = False,
+ ) -> None:
+ """
+ Initialise the encoder.
+
+ Args:
+ signal (np.ndarray): The raw audio data to be encoded.
+ sample_rate (int): The sample rate of the audio data.
+ output_file (str | Path | None): Path to the output FLAC file,
+ a temporary file will be created if unspecified.
+ compression_level (int): The compression level parameter that
+ varies from 0 (fastest) to 8 (slowest). The default setting
+ is 5, see https://en.wikipedia.org/wiki/FLAC for more details.
+ blocksize (int): The size of the block to be returned in the
+ callback. The default is 0 which allows libFLAC to determine
+ the best block size.
+ streamable_subset (bool): Whether to use the streamable subset for encoding.
+ If true the encoder will check settings for compatibility. If false, the
+ settings may take advantage of the full range that the format allows.
+ verify (bool): If `True`, the encoder will verify it's own
+ encoded output by feeding it through an internal decoder and
+ comparing the original signal against the decoded signal.
+ If a mismatch occurs, the `process` method will raise a
+ `EncoderProcessException`. Note that this will slow the
+ encoding process by the extra time required for decoding and comparison.
+ """
+ super().__init__()
+
+ self.__raw_audio = signal
+ self._sample_rate = sample_rate
+
+ if output_file:
+ self.__output_file = (
+ Path(output_file) if isinstance(output_file, str) else output_file
+ )
+ else:
+ with tempfile.NamedTemporaryFile(suffix=".flac") as ofile:
+ self.__output_file = Path(ofile.name)
+
+ self._blocksize = blocksize
+ self._compression_level = compression_level
+ self._streamable_subset = streamable_subset
+ self._verify = verify
+ self._initialised = False
+
+ def _init(self):
+ """
+ Initialise the encoder to write to a file.
+
+ Raises:
+ EncoderInitException: if initialisation fails.
+ """
+ c_output_filename = pf.encoder._ffi.new(
+ "char[]", str(self.__output_file).encode("utf-8")
+ )
+ rc = pf.encoder._lib.FLAC__stream_encoder_init_file(
+ self._encoder,
+ c_output_filename,
+ pf.encoder._lib._progress_callback,
+ self._encoder_handle,
+ )
+ pf.encoder._ffi.release(c_output_filename)
+ if rc != pf.encoder._lib.FLAC__STREAM_ENCODER_INIT_STATUS_OK:
+ raise pf.EncoderInitException(rc)
+
+ self._initialised = True
+
+ def process(self) -> bytes:
+ """
+ Process the audio data from the WAV file.
+
+ Returns:
+ (bytes): The FLAC encoded bytes.
+
+ Raises:
+ EncoderProcessException: if an error occurs when processing the samples
+ """
+ super().process(self.__raw_audio)
+ self.finish()
+ with open(self.__output_file, "rb") as f:
+ return f.read()
+
+
+class FileDecoder(pf.decoder.FileDecoder):
+ def process(self) -> tuple[np.ndarray, int]:
+ """
+ Overwritten version of the process method from the pyflac decoder.
+ Original process returns stereo signals in float64 format.
+
+ In this version, the data is returned using the original number
+ of channels and in in16 format.
+
+ Returns:
+ (tuple): A tuple of the decoded numpy audio array, and the sample rate
+ of the audio data.
+
+ Raises:
+ DecoderProcessException: if any fatal read, write, or memory allocation
+ error occurred (meaning decoding must stop)
+ """
+ result = pf.decoder._lib.FLAC__stream_decoder_process_until_end_of_stream(
+ self._decoder
+ )
+ if self.state != pf.decoder.DecoderState.END_OF_STREAM and not result:
+ raise pf.DecoderProcessException(str(self.state))
+
+ self.finish()
+ self.__output.close()
+ return sf.read(str(self.__output_file), always_2d=False, dtype="int16")
+
+
+class FlacEncoder:
+ """
+ Class for encoding and decoding audio signals using FLAC
+
+ It uses the pyflac library to encode and decode the audio data.
+ And offers convenient methods for encoding and decoding audio data.
+ """
+
+ def __init__(self, compression_level: int = 5) -> None:
+ """
+ Initialise the compressor.
+
+ Args:
+ compression_level (int): The compression level parameter that
+ varies from 0 (fastest) to 8 (slowest). The default setting
+ is 5, see https://en.wikipedia.org/wiki/FLAC for more details.
+ """
+ self.compression_level = compression_level
+
+ def encode(
+ self,
+ signal: np.ndarray,
+ sample_rate: int,
+ output_file: str | Path | None = None,
+ ) -> bytes:
+ """
+ Method to encode the audio data using FLAC compressor.
+
+ It creates a WavEncoder object and uses it to encode the audio data.
+
+ Args:
+ signal (np.ndarray): The raw audio data to be compressed.
+ sample_rate (int): The sample rate of the audio data.
+ output_file (str | Path): Path to where to
+ save the output FLAC file. If not specified, a temporary file
+ will be created.
+
+ Returns:
+ (bytes): The FLAC encoded audio signal.
+
+ Raises:
+ ValueError: If the audio signal is not in `np.int16` format.
+ """
+ if signal.dtype != np.int16:
+ logger.error(
+ f"FLAC encoder only supports 16-bit integer signals, "
+ f"but got {signal.dtype}"
+ )
+ raise ValueError(
+ f"FLAC encoder only supports 16-bit integer signals, "
+ f"but got {signal.dtype}"
+ )
+
+ wav_encoder = WavEncoder(
+ signal=signal,
+ sample_rate=sample_rate,
+ compression_level=self.compression_level,
+ output_file=output_file,
+ )
+ return wav_encoder.process()
+
+ @staticmethod
+ def decode(input_filename: Path | str) -> tuple[np.ndarray, float]:
+ """
+ Method to decode a flac file to wav audio data.
+
+ It uses the pyflac library to decode the flac file.
+
+ Args:
+ input_filename (pathlib.Path | str): Path to the input FLAC file.
+
+ Returns:
+ (np.ndarray): The raw audio data.
+
+ Raises:
+ FileNotFoundError: If the flac file to decode does not exist.
+ """
+ input_filename = (
+ Path(input_filename) if isinstance(input_filename, str) else input_filename
+ )
+
+ if not input_filename.exists():
+ logger.error(f"File {input_filename} not found.")
+ raise FileNotFoundError(f"File {input_filename} not found.")
+
+ decoder = FileDecoder(input_filename)
+ signal, sample_rate = decoder.process()
+
+ return signal, float(sample_rate)
+
+
+def read_flac_signal(filename: Path) -> tuple[np.ndarray, float]:
+ """Read a FLAC signal and return it as a numpy array
+
+ Args:
+ filename (Path): The path to the FLAC file to read.
+
+ Returns:
+ signal (np.ndarray): The decoded signal.
+ sample_rate (float): The sample rate of the signal.
+ """
+ # Create encoder object
+ flac_encoder = FlacEncoder()
+
+ # Decode FLAC file
+ signal, sample_rate = flac_encoder.decode(
+ filename,
+ )
+ signal = (signal / 32768.0).astype(np.float32)
+
+ # Load scale factor
+ if filename.with_suffix(".txt").exists():
+ with open(filename.with_suffix(".txt"), encoding="utf-8") as fp:
+ max_value = float(fp.read())
+ # Scale signal
+ signal *= max_value
+ return signal, sample_rate
diff --git a/clarity/utils/signal_processing.py b/clarity/utils/signal_processing.py
index 0e9078ee6..fb7fd391f 100644
--- a/clarity/utils/signal_processing.py
+++ b/clarity/utils/signal_processing.py
@@ -1,12 +1,32 @@
"""Signal processing utilities."""
from __future__ import annotations
+# pylint: disable=import-error
import numpy as np
import scipy
import soxr
from numpy import ndarray
+def clip_signal(signal: np.ndarray, soft_clip: bool = False) -> tuple[np.ndarray, int]:
+ """Clip the signal.
+
+ Args:
+ signal (np.ndarray): Signal to be clipped and saved.
+ soft_clip (bool): Whether to use soft clipping.
+
+ Returns:
+ signal (np.ndarray): Clipped signal.
+ n_clipped (int): Number of samples clipped.
+ """
+
+ if soft_clip:
+ signal = np.tanh(signal)
+ n_clipped = np.sum(np.abs(signal) > 1.0)
+ signal = np.clip(signal, -1.0, 1.0)
+ return signal, int(n_clipped)
+
+
def compute_rms(signal: ndarray) -> float:
"""Compute RMS of signal
@@ -86,3 +106,17 @@ def resample(
)
raise ValueError(f"Unknown resampling method: {method}")
+
+
+def to_16bit(signal: np.ndarray) -> np.ndarray:
+ """Convert the signal to 16 bit.
+
+ Args:
+ signal (np.ndarray): Signal to be converted.
+
+ Returns:
+ signal (np.ndarray): Converted signal.
+ """
+ signal = signal * 32768.0
+ signal = np.clip(signal, -32768.0, 32767.0)
+ return signal.astype(np.dtype("int16"))
From 4ebc0c7fc3f0f8a1a906fcf246595d3480dc381b Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 16:29:47 +0100
Subject: [PATCH 22/85] Add apply HA
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/evaluate.py | 60 +++++++++++++++++---
1 file changed, 52 insertions(+), 8 deletions(-)
diff --git a/recipes/cad_icassp_2024/baseline/evaluate.py b/recipes/cad_icassp_2024/baseline/evaluate.py
index e728af1a0..1adcd3ae2 100644
--- a/recipes/cad_icassp_2024/baseline/evaluate.py
+++ b/recipes/cad_icassp_2024/baseline/evaluate.py
@@ -15,9 +15,10 @@
from numpy import ndarray
from omegaconf import DictConfig
+from clarity.enhancer.compressor import Compressor
from clarity.enhancer.nalr import NALR
from clarity.evaluator.haaqi import compute_haaqi
-from clarity.utils.audiogram import Listener
+from clarity.utils.audiogram import Audiogram, Listener
from clarity.utils.file_io import read_signal
from clarity.utils.signal_processing import compute_rms
@@ -93,6 +94,37 @@ def add_result(
)
+def apply_ha(
+ enhancer: NALR,
+ compressor: Compressor | None,
+ signal: ndarray,
+ audiogram: Audiogram,
+ apply_compressor: bool = False,
+) -> np.ndarray:
+ """
+ Apply NAL-R prescription hearing aid to a signal.
+
+ Args:
+ enhancer (NALR): A NALR object that enhances the signal.
+ compressor (Compressor | None): A Compressor object that compresses the signal.
+ signal (ndarray): An ndarray representing the audio signal.
+ audiogram (Audiogram): An Audiogram object representing the listener's
+ audiogram.
+ apply_compressor (bool): Whether to apply the compressor.
+
+ Returns:
+ An ndarray representing the processed signal.
+ """
+ nalr_fir, _ = enhancer.build(audiogram)
+ proc_signal = enhancer.apply(nalr_fir, signal)
+ if apply_compressor:
+ if compressor is None:
+ raise ValueError("Compressor must be provided to apply compressor.")
+
+ proc_signal, _, _ = compressor.process(proc_signal)
+ return proc_signal
+
+
def apply_gains(stems: dict, sample_rate: float, gains: dict) -> dict:
"""Apply gain to the signal by using LUFS.
@@ -292,28 +324,40 @@ def run_calculate_aq(config: DictConfig) -> None:
)
)
+ # Compute the scores
+ # First, we apply NAL-R to the reference signal
# Compute the score for left channel
- nalr_fir, _ = enhancer.build(listener.audiogram_left)
- left_reference = enhancer.apply(nalr_fir, reference_mixture[:, 0])
+ left_reference = apply_ha(
+ enhancer=enhancer,
+ compressor=None,
+ signal=reference_mixture[:, 0],
+ audiogram=listener.audiogram_left,
+ apply_compressor=False,
+ )
left_score = compute_haaqi(
processed_signal=enhanced_signal[:, 0],
reference_signal=left_reference,
audiogram=listener.audiogram_left,
sample_rate=config.sample_rate,
equalisation=2,
- level1=65 - 20 * np.log10(compute_rms(left_reference)),
+ level1=65 - 20 * np.log10(compute_rms(reference_mixture[:, 0])),
)
# Compute score for right channel
- nalr_fir, _ = enhancer.build(listener.audiogram_right)
- right_reference = enhancer.apply(nalr_fir, reference_mixture[:, 1])
+ right_reference = apply_ha(
+ enhancer=enhancer,
+ compressor=None,
+ signal=reference_mixture[:, 1],
+ audiogram=listener.audiogram_right,
+ apply_compressor=False,
+ )
right_score = compute_haaqi(
processed_signal=enhanced_signal[:, 1],
reference_signal=right_reference,
- audiogram=listener.audiogram_left,
+ audiogram=listener.audiogram_right,
sample_rate=config.sample_rate,
equalisation=2,
- level1=65 - 20 * np.log10(compute_rms(left_reference)),
+ level1=65 - 20 * np.log10(compute_rms(right_reference)),
)
# Save scores
From d45a3acf8dac18700f818e7d9d9b14ca59628f2d Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 16:31:57 +0100
Subject: [PATCH 23/85] output sample rate in config
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/config.yaml | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/recipes/cad_icassp_2024/baseline/config.yaml b/recipes/cad_icassp_2024/baseline/config.yaml
index caba995a5..615d4c12c 100644
--- a/recipes/cad_icassp_2024/baseline/config.yaml
+++ b/recipes/cad_icassp_2024/baseline/config.yaml
@@ -1,5 +1,5 @@
path:
- root: /media/gerardo/Extended_old/cadenza_data/icassp_2024
+ root: ???
metadata_dir: ${path.root}/metadata
music_dir: ${path.root}/audio/at_mic_music
gains_file: ${path.metadata_dir}/gains.json
@@ -11,6 +11,7 @@ path:
exp_folder: ./exp # folder to store enhanced signals and final results
sample_rate: 44100
+remix_sample_rate: 32000
nalr:
nfir: 220
@@ -32,7 +33,7 @@ separator:
evaluate:
set_random_seed: True
- small_test: True
+ small_test: False
batch_size: 1 # Number of batches
batch: 0 # Batch number to evaluate
From 0cffe9a9d1a0175c10ee470dc45b03561aa3ba8f Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 16:32:15 +0100
Subject: [PATCH 24/85] save flac and 32 kHz
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/enhance.py | 118 +++++++++++++-------
1 file changed, 77 insertions(+), 41 deletions(-)
diff --git a/recipes/cad_icassp_2024/baseline/enhance.py b/recipes/cad_icassp_2024/baseline/enhance.py
index aef38e1e9..0f8f49bad 100644
--- a/recipes/cad_icassp_2024/baseline/enhance.py
+++ b/recipes/cad_icassp_2024/baseline/enhance.py
@@ -9,7 +9,6 @@
import hydra
import numpy as np
import torch
-from evaluate import apply_gains, make_scene_listener_list, remix_stems
from numpy import ndarray
from omegaconf import DictConfig
from source_separation_utils import get_device, separate_sources
@@ -17,17 +16,83 @@
from clarity.enhancer.compressor import Compressor
from clarity.enhancer.nalr import NALR
-from clarity.utils.audiogram import Audiogram, Listener
-from clarity.utils.file_io import read_signal, write_signal
+from clarity.utils.audiogram import Listener
+from clarity.utils.file_io import read_signal
+from clarity.utils.flac_encoder import FlacEncoder
from clarity.utils.signal_processing import (
+ clip_signal,
denormalize_signals,
normalize_signal,
resample,
+ to_16bit,
+)
+from recipes.cad_icassp_2024.baseline.evaluate import (
+ apply_gains,
+ apply_ha,
+ make_scene_listener_list,
+ remix_stems,
)
logger = logging.getLogger(__name__)
+def save_flac_signal(
+ signal: np.ndarray,
+ filename: Path,
+ signal_sample_rate,
+ output_sample_rate,
+ do_clip_signal: bool = False,
+ do_soft_clip: bool = False,
+ do_scale_signal: bool = False,
+) -> None:
+ """
+ Function to save output signals.
+
+ - The output signal will be resample to ``output_sample_rate``
+ - The output signal will be clipped to [-1, 1] if ``do_clip_signal`` is True
+ and use soft clipped if ``do_soft_clip`` is True. Note that if
+ ``do_clip_signal`` is False, ``do_soft_clip`` will be ignored.
+ Note that if ``do_clip_signal`` is True, ``do_scale_signal`` will be ignored.
+ - The output signal will be scaled to [-1, 1] if ``do_scale_signal`` is True.
+ If signal is scale, the scale factor will be saved in a TXT file.
+ Note that if ``do_clip_signal`` is True, ``do_scale_signal`` will be ignored.
+ - The output signal will be saved as a FLAC file.
+
+ Args:
+ signal (np.ndarray) : Signal to save
+ filename (Path) : Path to save signal
+ signal_sample_rate (int) : Sample rate of the input signal
+ output_sample_rate (int) : Sample rate of the output signal
+ do_clip_signal (bool) : Whether to clip signal
+ do_soft_clip (bool) : Whether to apply soft clipping
+ do_scale_signal (bool) : Whether to scale signal
+ """
+ # Resample signal to expected output sample rate
+ if signal_sample_rate != output_sample_rate:
+ signal = resample(signal, signal_sample_rate, output_sample_rate)
+
+ if do_scale_signal:
+ # Scale stem signal
+ max_value = np.max(np.abs(signal))
+ signal = signal / max_value
+
+ # Save scale factor
+ with open(filename.with_suffix(".txt"), "w", encoding="utf-8") as file:
+ file.write(f"{max_value}")
+
+ elif do_clip_signal:
+ # Clip the signal
+ signal, n_clipped = clip_signal(signal, do_soft_clip)
+ if n_clipped > 0:
+ logger.warning(f"Writing {filename}: {n_clipped} samples clipped")
+
+ # Convert signal to 16-bit integer
+ signal = to_16bit(signal)
+
+ # Create flac encoder object to compress and save the signal
+ FlacEncoder().encode(signal, output_sample_rate, filename)
+
+
# pylint: disable=unused-argument
def decompose_signal(
model: torch.nn.Module,
@@ -85,37 +150,6 @@ def decompose_signal(
return dict(zip(sources_list, sources))
-def apply_baseline_ha(
- enhancer: NALR,
- compressor: Compressor | None,
- signal: ndarray,
- audiogram: Audiogram,
- apply_compressor: bool = False,
-) -> np.ndarray:
- """
- Apply NAL-R prescription hearing aid to a signal.
-
- Args:
- enhancer (NALR): A NALR object that enhances the signal.
- compressor (Compressor | None): A Compressor object that compresses the signal.
- signal (ndarray): An ndarray representing the audio signal.
- audiogram (Audiogram): An Audiogram object representing the listener's
- audiogram.
- apply_compressor (bool): Whether to apply the compressor.
-
- Returns:
- An ndarray representing the processed signal.
- """
- nalr_fir, _ = enhancer.build(audiogram)
- proc_signal = enhancer.apply(nalr_fir, signal)
- if apply_compressor:
- if compressor is None:
- raise ValueError("Compressor must be provided to apply compressor.")
-
- proc_signal, _, _ = compressor.process(proc_signal)
- return proc_signal
-
-
def process_remix_for_listener(
signal: ndarray,
enhancer: NALR,
@@ -135,10 +169,10 @@ def process_remix_for_listener(
Returns:
ndarray: Processed signal.
"""
- left_output = apply_baseline_ha(
+ left_output = apply_ha(
enhancer, compressor, signal[:, 0], listener.audiogram_left, apply_compressor
)
- right_output = apply_baseline_ha(
+ right_output = apply_ha(
enhancer, compressor, signal[:, 1], listener.audiogram_right, apply_compressor
)
@@ -264,13 +298,15 @@ def enhance(config: DictConfig) -> None:
/ f"{song_name}"
/ f"{scene_id}_{listener.id}_remix.wav"
)
+
filename.parent.mkdir(parents=True, exist_ok=True)
- write_signal(
- filename=filename,
+ save_flac_signal(
signal=enhanced_signal,
- sample_rate=config.sample_rate,
- floating_point=False,
- strict=False,
+ filename=filename,
+ signal_sample_rate=config.sample_rate,
+ output_sample_rate=config.remix_sample_rate,
+ do_clip_signal=True,
+ do_soft_clip=config.soft_clip,
)
logger.info("Done!")
From 4c510c891eea0a3f39b31196730644cd3186a2f6 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 16:34:07 +0100
Subject: [PATCH 25/85] save flac and 32 kHz
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/enhance.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/recipes/cad_icassp_2024/baseline/enhance.py b/recipes/cad_icassp_2024/baseline/enhance.py
index 0f8f49bad..eec12d75f 100644
--- a/recipes/cad_icassp_2024/baseline/enhance.py
+++ b/recipes/cad_icassp_2024/baseline/enhance.py
@@ -296,7 +296,7 @@ def enhance(config: DictConfig) -> None:
enhanced_folder
/ f"{listener.id}"
/ f"{song_name}"
- / f"{scene_id}_{listener.id}_remix.wav"
+ / f"{scene_id}_{listener.id}_remix.flac"
)
filename.parent.mkdir(parents=True, exist_ok=True)
From 32504bdeb723eb356dc7f9d2acf93f4e9b2a3af6 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 16:37:50 +0100
Subject: [PATCH 26/85] allows processed and reference different sample rate
Signed-off-by: Gerardo Roa Dabike
---
clarity/evaluator/haaqi/haaqi.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/clarity/evaluator/haaqi/haaqi.py b/clarity/evaluator/haaqi/haaqi.py
index f4d19662c..b3ba3b035 100644
--- a/clarity/evaluator/haaqi/haaqi.py
+++ b/clarity/evaluator/haaqi/haaqi.py
@@ -178,8 +178,9 @@ def haaqi_v1(
def compute_haaqi(
processed_signal: ndarray,
reference_signal: ndarray,
+ processed_sample_rate: float,
+ reference_sample_rate: float,
audiogram: Audiogram,
- sample_rate: float,
equalisation: int = 1,
level1: float = 65.0,
) -> float:
@@ -191,8 +192,9 @@ def compute_haaqi(
reference_signal (np.ndarray): Input reference speech signal with no noise
or distortion. If a hearing loss is specified, NAL-R equalization
is optional
+ processed_sample_rate (float): Sampling rate in Hz for processed signal.
+ reference_sample_rate (float): Sampling rate in Hz for reference signal.
audiogram (Audiogram): Audiogram object.
- sample_rate (int): Sample rate in Hz.
equalisation (int): hearing loss equalization mode for reference signal:
1 = no EQ has been provided, the function will add NAL-R
2 = NAL-R EQ has already been added to the reference signal
@@ -211,9 +213,9 @@ def compute_haaqi(
score, _, _, _ = haaqi_v1(
reference=reference_signal,
- reference_freq=sample_rate,
+ reference_freq=reference_sample_rate,
processed=processed_signal,
- processed_freq=sample_rate,
+ processed_freq=processed_sample_rate,
audiogram=audiogram,
equalisation=equalisation,
level1=level1,
From 19180e33bdb7de2d4e27fb992978656b6882be0a Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 16:39:01 +0100
Subject: [PATCH 27/85] allows processed and reference different sample rate
Signed-off-by: Gerardo Roa Dabike
---
tests/evaluator/haaqi/test_haaqi.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/tests/evaluator/haaqi/test_haaqi.py b/tests/evaluator/haaqi/test_haaqi.py
index c38ec5a60..bf29363e0 100644
--- a/tests/evaluator/haaqi/test_haaqi.py
+++ b/tests/evaluator/haaqi/test_haaqi.py
@@ -1,4 +1,5 @@
"""Tests for haaqi module"""
+# pylint: disable=import-error
import numpy as np
import pytest
@@ -57,8 +58,9 @@ def test_compute_haaqi(levels, freqs, expected_result):
score = compute_haaqi(
processed_signal=enh_signal,
reference_signal=ref_signal,
+ processed_sample_rate=sample_rate,
+ reference_sample_rate=sample_rate,
audiogram=audiogram,
- sample_rate=sample_rate,
)
# Check that the score is a float between 0 and 1
From e6912d787451ede14bce0089cb984b166dd184c6 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 16:51:50 +0100
Subject: [PATCH 28/85] reading flac and evaluate
Signed-off-by: Gerardo Roa Dabike
---
recipes/cad_icassp_2024/baseline/evaluate.py | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/recipes/cad_icassp_2024/baseline/evaluate.py b/recipes/cad_icassp_2024/baseline/evaluate.py
index 1adcd3ae2..eae37963f 100644
--- a/recipes/cad_icassp_2024/baseline/evaluate.py
+++ b/recipes/cad_icassp_2024/baseline/evaluate.py
@@ -20,6 +20,7 @@
from clarity.evaluator.haaqi import compute_haaqi
from clarity.utils.audiogram import Audiogram, Listener
from clarity.utils.file_io import read_signal
+from clarity.utils.flac_encoder import read_flac_signal
from clarity.utils.signal_processing import compute_rms
logger = logging.getLogger(__name__)
@@ -293,7 +294,7 @@ def run_calculate_aq(config: DictConfig) -> None:
logger.info(
f"[{idx:03d}/{num_scenes:03d}] "
- f"Processing {song_name} for listener {listener_id}"
+ f"Evaluating {song_name} for listener {listener_id}"
)
# Load reference signals
@@ -315,12 +316,12 @@ def run_calculate_aq(config: DictConfig) -> None:
listener = listener_dict[listener_id]
# Load enhanced signal
- enhanced_signal = read_signal(
+ enhanced_signal, _ = read_flac_signal(
Path(
enhanced_folder
/ f"{listener.id}"
/ f"{song_name}"
- / f"{scene_id}_{listener.id}_remix.wav"
+ / f"{scene_id}_{listener.id}_remix.flac"
)
)
@@ -337,8 +338,9 @@ def run_calculate_aq(config: DictConfig) -> None:
left_score = compute_haaqi(
processed_signal=enhanced_signal[:, 0],
reference_signal=left_reference,
+ processed_sample_rate=config.remix_sample_rate,
+ reference_sample_rate=config.sample_rate,
audiogram=listener.audiogram_left,
- sample_rate=config.sample_rate,
equalisation=2,
level1=65 - 20 * np.log10(compute_rms(reference_mixture[:, 0])),
)
@@ -354,8 +356,9 @@ def run_calculate_aq(config: DictConfig) -> None:
right_score = compute_haaqi(
processed_signal=enhanced_signal[:, 1],
reference_signal=right_reference,
+ processed_sample_rate=config.remix_sample_rate,
+ reference_sample_rate=config.sample_rate,
audiogram=listener.audiogram_right,
- sample_rate=config.sample_rate,
equalisation=2,
level1=65 - 20 * np.log10(compute_rms(right_reference)),
)
From 6b1db3ce9a3686fb9f3d4b6ea11700fa3f43ea91 Mon Sep 17 00:00:00 2001
From: Gerardo Roa Dabike
Date: Tue, 12 Sep 2023 18:04:24 +0100
Subject: [PATCH 29/85] test enhance
Signed-off-by: Gerardo Roa Dabike
---
tests/recipes/cad_icassp_2024/__init__.py | 0
.../cad_icassp_2024/baseline/__init__.py | 0
.../cad_icassp_2024/baseline/test_enhance.py | 134 ++++++++++++++++++
...t_enhance.test_decompose_signal_demucs.npy | Bin 0 -> 706145 bytes
...nhance.test_decompose_signal_openunmix.npy | Bin 0 -> 706145 bytes
...rocess_remix_for_listener_w_compressor.npy | Bin 0 -> 3680 bytes
...ocess_remix_for_listener_wo_compressor.npy | Bin 0 -> 3680 bytes
7 files changed, 134 insertions(+)
create mode 100644 tests/recipes/cad_icassp_2024/__init__.py
create mode 100644 tests/recipes/cad_icassp_2024/baseline/__init__.py
create mode 100644 tests/recipes/cad_icassp_2024/baseline/test_enhance.py
create mode 100644 tests/resources/recipes/cad_icassp_2024/test_enhance.test_decompose_signal_demucs.npy
create mode 100644 tests/resources/recipes/cad_icassp_2024/test_enhance.test_decompose_signal_openunmix.npy
create mode 100644 tests/resources/recipes/cad_icassp_2024/test_enhance.test_process_remix_for_listener_w_compressor.npy
create mode 100644 tests/resources/recipes/cad_icassp_2024/test_enhance.test_process_remix_for_listener_wo_compressor.npy
diff --git a/tests/recipes/cad_icassp_2024/__init__.py b/tests/recipes/cad_icassp_2024/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/recipes/cad_icassp_2024/baseline/__init__.py b/tests/recipes/cad_icassp_2024/baseline/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/recipes/cad_icassp_2024/baseline/test_enhance.py b/tests/recipes/cad_icassp_2024/baseline/test_enhance.py
new file mode 100644
index 000000000..36fac0a43
--- /dev/null
+++ b/tests/recipes/cad_icassp_2024/baseline/test_enhance.py
@@ -0,0 +1,134 @@
+"""Tests for the enhance module"""
+# pylint:: disable=import-error
+from pathlib import Path
+
+import numpy as np
+import pytest
+import torch
+from torchaudio.pipelines import HDEMUCS_HIGH_MUSDB
+
+from clarity.enhancer.compressor import Compressor
+from clarity.enhancer.nalr import NALR
+from clarity.utils.audiogram import Audiogram, Listener
+from clarity.utils.flac_encoder import read_flac_signal
+from recipes.cad_icassp_2024.baseline.enhance import (
+ decompose_signal,
+ process_remix_for_listener,
+ save_flac_signal,
+)
+
+BASE_DIR = Path.cwd()
+RESOURCES = BASE_DIR / "tests" / "resources" / "recipes" / "cad_icassp_2024"
+
+
+def test_save_flac_signal(tmp_path):
+ """Test save flac signal"""
+ np.random.seed(2024)
+ sample_rate = 44100
+ duration = 0.5
+ signal = np.random.rand(int(sample_rate * duration))
+
+ filename = Path(tmp_path) / "signal.flac"
+ save_flac_signal(signal, filename, sample_rate, sample_rate)
+
+ signal_out, sample_rate_out = read_flac_signal(filename)
+ assert np.sum(signal) == pytest.approx(11040.050741283)
+ assert np.sum(signal_out) == pytest.approx(11039.716)
+ assert sample_rate_out == sample_rate
+
+
+@pytest.mark.parametrize(
+ "separation_model",
+ [
+ pytest.param("demucs"),
+ pytest.param("openunmix", marks=pytest.mark.slow),
+ ],
+)
+def test_decompose_signal(separation_model):
+ """Takes a signal and decomposes it into
+ VDBO sources using the HDEMUCS model"""
+ np.random.seed(2024)
+ # Load Separation Model
+ if separation_model == "demucs":
+ model = HDEMUCS_HIGH_MUSDB.get_model()
+ model_sample_rate = HDEMUCS_HIGH_MUSDB.sample_rate
+ sources_order = model.sources
+
+ elif separation_model == "openunmix":
+ model = torch.hub.load("sigsep/open-unmix-pytorch", "umxhq")
+ model_sample_rate = model.sample_rate
+ sources_order = ["vocals", "drums", "bass", "other"]
+
+ device = torch.device("cpu")
+ model.to(device)
+
+ # Create a mock signal to decompose
+ sample_rate = 44100
+ duration = 0.5
+ signal = np.random.uniform(size=(1, 2, int(sample_rate * duration))).astype(
+ np.float32
+ )
+
+ # Call the decompose_signal function and check that the output has the expected keys
+ cfs = np.array([250, 500, 1000, 2000, 4000, 6000, 8000, 9000, 10000])
+ audiogram = Audiogram(levels=np.ones(9), frequencies=cfs)
+ listener = Listener(audiogram, audiogram)
+ output = decompose_signal(
+ model,
+ model_sample_rate,
+ signal,
+ sample_rate,
+ device,
+ sources_order,
+ listener,
+ )
+ expected_results = np.load(
+ RESOURCES / f"test_enhance.test_decompose_signal_{separation_model}.npy",
+ allow_pickle=True,
+ )[()]
+
+ for key, item in output.items():
+ np.testing.assert_array_almost_equal(item, expected_results[key])
+
+
+@pytest.mark.parametrize(
+ "apply_compressor",
+ [
+ True,
+ False,
+ ],
+)
+def test_process_remix_for_listener(apply_compressor):
+ """Test the process remix for listener"""
+ np.random.seed(2024)
+ sample_rate = 44100
+ duration = 0.5
+ signal = np.random.uniform(size=(2, int(duration * sample_rate)))
+
+ audiogram = Audiogram(
+ levels=np.ones(9),
+ frequencies=np.array([250, 500, 1000, 2000, 4000, 6000, 8000, 9000, 10000]),
+ )
+ listener = Listener(audiogram_left=audiogram, audiogram_right=audiogram)
+ enhancer = NALR(nfir=220, sample_rate=16000)
+ compressor = Compressor(
+ threshold=0.35, attenuation=0.1, attack=50, release=1000, rms_buffer_size=0.064
+ )
+
+ output = process_remix_for_listener(
+ signal, enhancer, compressor, listener, apply_compressor=apply_compressor
+ )
+
+ if apply_compressor:
+ expected_results = np.load(
+ RESOURCES / "test_enhance.test_process_remix_for_listener_w_compressor.npy",
+ allow_pickle=True,
+ )[()]
+ else:
+ expected_results = np.load(
+ RESOURCES
+ / "test_enhance.test_process_remix_for_listener_wo_compressor.npy",
+ allow_pickle=True,
+ )[()]
+
+ assert np.sum(output) == pytest.approx(np.sum(expected_results))
diff --git a/tests/resources/recipes/cad_icassp_2024/test_enhance.test_decompose_signal_demucs.npy b/tests/resources/recipes/cad_icassp_2024/test_enhance.test_decompose_signal_demucs.npy
new file mode 100644
index 0000000000000000000000000000000000000000..6c0577ac8c21b23400db9cbc8edcdc66f518b20b
GIT binary patch
literal 706145
zcmbrlcU;bI{QlkEd+)vXF41v>kV>-m%J>)=C428A3JGOzA+v;#BuPb4QAUywvO+5S
z&hPJg-+$bH-~G{}uC97ty|43poyY4qp0DkZL;6LHkd&A&vBZAT;o=y$thw=V^CreC#Y+6|vy^5{`rn_%N`_0Ui-6cBg_r4jbGDS$eT9
zt}ffNit%P>K7#kSvuKDWYmGY4b?GZSS*Fbc&vLNF$bu1{%vjiP2?^PcFt**0&z5x{
z?^*>e+*9ZJOZt3g+kqmr8<-}c#-qP2+1&3p_E4AGTu$NdtZVRFHusGM=(sW2y$DiAm1cQzwM56|MwdAS6#+|
z&URRzGGS+eBU`lZp}FD(T!z;py}_J4+VkMQ--y~K?O4}Wilb^mxk~*Y65p=J%0^Gt
zILdP9^v#$Z?m>fne=&QFIRoa`BEU}EOD#=4wb_r3FmoQTRcEQb5)-ePFh+F?+TVRg
z?o36l_fcc~j&@vYw&2@fdHzk8p^C9G18)pN%6dI!YS{36ziQ07pvZZvl~~YEnlpNJ
z;&r7Y7wG&%c8NCw4=%z|*H%<)9>n9R#mH4HLE6qTyxH^@n{-sUqgIYeADwA$xC@uw
z-9@ZyFjML@nD!$Z5%>S$^6+x#NxE=wiyDK^YY~0L`|^7x<_xf=#oj>n^-|*0ZZU|<
zu;hLILt;P$)ND;SYO^IvuI$E~kc&9k>Od`bW!|yUWBYG6CQQr4Qm2iOd8x*a-k{p-
z3Viu-3d=VHv0}eAofK}OSvM1TtM20HLQ9UkG=N4Q6A`>&H|~`O@u=Zn$VaI2(Zkos
zJC_5i?c(oW)!+|_W-M;YN64HHnD$7O-L8Iv$5;tE$s4i1qCX$@ypL0BwnA@cBGh#H
z@LK;ml=YKj#{^{#lg)?GZ5z&6R)W^k+B{TWiG(UgUaBZWdyNct@2Ep=<2`JtapjEr
zZ}8`)1CN+Wuqo&?BJ*pIFXK$zK^551au@pwl?%(MCRBfP8%x!#nBRC5#s@UmYndx^!sTi8>UMumIf
z&JZ<*Jnh9Pc^1rHlZ-73F2hJU6Z)-UFDQ+nV@DH)I{Bm5oB-Ng)ZxIBT`?`lm_A9~
zRPO#2A8s7P$`2}Bn0*WCR~0yaunjF#&SBFs@!AiTBi}}f5{83N(&);oPQKhWz=O%x
zt|5B1Eo#13V*VL#W(=`qxl<{|Z}8{zA$u{>{tD*)C&R`ux-=bo8vzfR@o}*k^Sb&m
zIMD;{VV*>04vza>#>nt4?61{WvZ~Wx&(P9flQdO4QVEJe6q9F@$La1$ean
zGkRv4@pAhL)O)|jn$xb7yK2Fve;KIr_>QORyRyvWH-40@!;xohto_r7<^x~Rb(ks_
z#unh-A43K`>C4gMim=^b6*i^)L|*tm^tmTRKd-**h>_sv-6`$P6%?nE25-MWeG{k^$7T$)Bs*Dm$>@{n&FwDcj5#gNr&l-7*|R4<
zPB?^S**kc8*MNt$%Ajf0mz92rIC||JPRP1(OvW!eKNa~}$&^|j4#P?NE`~4G
zV#y2#y2sqYT9ZyB{S4uDPsscyi{c8*Nw`()@d_M_cNv_
zU4rTBRvgbWWjCop^fq%~ud`A#u&l@XMYj=|_XT@r4x>cvUu14h#_z-f@Z8ajQ|}xH
z+8SZt*oQ8^RCvFk1zu$-h`MLU(QRc|7w*fUBmcvXM0oL`h&ioJ
zoqqFh?H#%Fl>`?KdxaYd6o_wTTraJ`zdVJSS~o6^I*iea&cn{sma{Hsa#r#uEb{+{
zLVp{oq*Nf+@hs|RXX9z2J69`6vh|rPb8EZvW9w;{J(`0Vmu2Xu6wbs6eUN9VLHR2_
zoOi^GN8S};^vEq(Ho=XB2^#breF#T7d{|!Q$B2%L_!FCreLpXws?v?drRCW7Hh^y*
zHK4Y2BVOe79yfG7XH4B%z>)3nRyv?*|Kb{
zHsSPZs!W*G40l<1THn3|Qj-si>v5`k3tTU`&^cL_i=9f)p5{xHudgv@Y$D3e25@+S
zIp?R8qPTb|h8#2Gx{d&s1qrXA=!!O*wSOcR?Zv4{a3hEP#xwycYo0F5F#hliLQ~
zKx_AG)P$6vxI&tfUjwxY+L
z02WKPpr|1S&U}paAr07;N%{sUQz6O-JMV~V6n?>sMcp{RP>Exd>fra^35?VD0iz#o
zG?G4ulf(UaW0@pd+A6W|+XehKRN%Q+K7`p`*fr=id?bXSEPYu3X-=_O`L1@P|N-)Q~XfI|zs8B^lQh-`P<)jx$xC*@h=
zOs@XWi`xc|LgKkxY%!E(_H7x?n)e%#)uiPNZANb}N6dgWth!~+E^gX%9cW3l5xrno
z8OWwiWu}`f#%T3-Fi8&N+I?PB*>nQ?>B`YcP`9sv_y~d=hP?k07Qlr5P`iT$38vlx2pM7~*G;rf1mWbrgZqCHQc|Ih1^|q0t^E_IR3yXM6n^5Ymj*w>Lt!
zrv%eSknvB=xqgNn)H9ypsKMXpS$^yK99OxWz&hv{k0@g=Jp({c}?kFPyz
z?|g%6A0Ik>6?(=s5s^#3!@1pwWotiUwEj`Vn2qDA&H_AJG#nu?=cg6H{C2DeZezCN
z$gqCQ>9ZT_h3@j}BEvmZ#R$0L&mlg4U|@9ygHIp8)!6@-#tbL~uEz2iBj^`$9%k0!
z`tKefW$I;Yc{q@NmHjdJ-gSf)+H+%q33p5m?|vU-{E?c-CpAAB%xC$TJl)&5~Q2R
za?W2DKGQpj+lk(s;nxnygmQq7?nABzdTzI~QFSnypEfN9;x;tHqk*-Wf6`x+Ca<
zj}OBx^`fd&Ddh4*ub}D6$_F|;J*or?FRq5Bg)KdXc<}z060CHuL(_BzhT9b4jCwnQ
zju#@q>@TX#bXogOlHQ~3C>5~@j>$sfj8@?D{s)n(7r~_8Yf3vUkD
za80ZU;|BG|*!^nkoMOic2Mw<6smHAy>5zNUfkRnouzoOJ_9qBlJg0h5{2`S?>7nm$z``k*vXpw6zB)Hx>7m?i}k7`);hb{6}x`SlGvYHdT=
zsV1bKwW4y)b#%;j;MjUa2EDq8XWuOu94;sJ-72)Yh+fct9M^{RVCVv0+{O+JJT;h?
za!x~2>IN!`!N?cgIccpgLTBv9a2abJ9iq%lUu`(+*kTmt+R$a9B3-U`;XD5locpjb
zXj7mIQQV!Qp0}WL{WpB5YQe~HT4H@>A;8p?P2lSn$ys_hv}E*`~)
zx8{6^omf5Ai#KoBaCO&zSgjWYPoF^Eo2SW;F-cJKYr+1|$4Gq=#j-0ZOh1|esRIT~
zdA$nD`xtZ6Hh2ELeGZYfYCL<)n-3oZ2c^|Iuuk+q<2PP_#XDP0(s+l>TP>J-;t~cr
zJV0D=FAfcf$N5QeTx;)5x43iIB4ffV-877wuDUS)#7=npk>?$411@xsxUu}Uw+Y@EVSNXEPmXKB)<>1^fnR6ukC4e
z>mk;)>GO7*6qhFU;iKGRI1_yxnYKCj+5Q7n0TWrg_B|9fh<;o{lJkVVzPIolzWum>
zdP!^UTM){;);~xMS%qFLFR`-NfamXt&QWo?ngdaqYO-B*RpD75+VR5oQ&JjF@y5?eqF^L}ns}
zIcd{+zYg^?K4C)YPdqsskL$i_9D8m8_s&bgSf{V}6JW#?O?l?Z{1$%80t_;c;rNIN
zESkRqo7ZncZAT=Zz5IwQ)z9d$umiVuHQ+<25+%*wqCQfa{l0!jltu_%)CAL0Vk~1U
zOklcaAHqisXO8Ap42e09=;bpw*7*{)s^5qEl;iLfzQcee|FKlM1Zr7FkyY1*OJig>
z%KSIB>C5r-8sUj7w`PU58B1M;!a?1FK5@O7Ubqe#13shaS02KqIq>MUKRA+K0GDHZ
zxKg+8ua!9kh7i&+$%wF#gapL?*1n(%46c35w_V
zZ!y@iS&?I=s*ho{ER-YY
zqF9A(k77_xmbHo0_Tpy=gYzK3A>qEVcF|1KgO_QPqn=$Bm|LSdB+rxXP6#g}=A
zu`a+NTPr5U3jZ|aK7x;xVsexuGtYZ+oxcIcZWEs1!=nlzgxda%dOVaRu@fKQVN7g=UPsoIBmRn@3}ZOS|iWuEZfj@>E#RC#<0
z-^M;d;Wm4|>C%pWXF9Q8=Mp}4^x)Khbx^S~<|wxhm~h^R6AX2^Fk~UVt`4Nrf;1dj
z3Z8B)fW(x0$QAFQ)%0tym$IVGT^U+z_l8%KG?)K~;1qcSHaU2qhpYpaAF|}a!(NB)XST6@k
zZct^szBFGiw`cd--ZytrXL&3
z4bNU9F1G?x8)I_mQ&d>DZ7+6+
zo;F>s8%I>xP{Oxo*A>S=!%xBk+hU8h=umn-c?$PVL*D&cj*b#hFBGE~`u8ZFjNb~A
z3oaa{D9>+iE@MfU4WCxM!NjADxa@7lS5seMi~n!z6tDZ@y!9BrU;t;m$U(E&23%0o
zV{48E-#eP|x1FG(*@deaKpJ^Z-a+>XDNs&Qs@D_XWbMQOiFnEgqe*|Rh`DWeAJUIFw_
zFUEwu+2|CWPDqQ;AzxEr_cRs{7e?}(oeBRa%TsRsZuA;bkD5(>REhr&D&F>NIc&|y
zt-5
z=pRrC@aI0IzDN&?VE>CMw2^BFN*y$cb)p8=zVSp#$#UHIGDT=k6^?Jq#BLKsM#h$7
z-9>$>{%plsd2#+Y$+FEbACi`DasRF)hX-}1_qI~pX{m*|Ybo|vi2O+A9-?;1^THoD
zw(PXS-MQMlw@aEAZv^tqa-rqqcVqCVG$@v6aDIjj|LpmUxasyZu$zw`o<7VS^$1~I
zGm$zngjX_N;bO^m1gPvm=W$!UuTW)&@B`l5m1EoYH0a$k=FzkaeC!s=G{2`P^m+)R
zq0zixBZ-$2FX4Q1cb4t(rdC5dx_CT;-)r@*8*
z@xNQW!O=g?ytwK+KK_0awE3Jb2fXb<9lOfmtX24s%!z>*&Wm=W8GeJ;mv`@k>km~G72L(QqYM3&C7ZJ4w(2$S-Id8qGC
zl()UWwkhU({L`G8>eo?zcnR$1bz*g6FUJ10XJq*~$d+xv!4o?CzRZ_n^B*IBkv*EF
z!id0NHaaZ8T=@Xz44aDahhO4lkR?ybJb_1k520z!;#;aQUp`Rgp@&P+A*;aClXbY?
zsu?a}0i2Zi2sTrS@Zy6r50za)uwf}&_x!=Cy-{3RSdQA9*Eo?>4{cLT1`fQ6i3fjS
z?7uM#4OoDn;3hcdf5MC=GkQ#}hSCNHI?c>N+M3UJo-caw$FNfJx(W76!j$4)f+|_$12AUc$Qae)AC0(2hJ&J!3
zUHIkTST=n1MEmSaY_19uUZy<;`I$3)O&}+(DZzljinKaE473
z;%lTE-Ku2ydUyx6+?ax?Zr$lNQi|aps-Sk%f%e<7kb6P+@q4^@$SNOp&qOVrm4%-o
z2YF^Th*P{LLu
zCw!av?=E1|jhi_C;R8l`i@y``0_}O5F=mY$yESBD^|bCB{rWq0=dXm>yssGV+LtC5
z)fp+f0A;_v;!ZzvY9-6j*Q5@P!e?H#<1q@xD{{JTH~Li%MopKuc$06zb;|G2y3v@w
zdS8dXyd@>(f5D@h>3BHWhe5mJ?2X048fE$;3{lvb3
zqY-gtGG|VeVqx+uXgqLYuf5+Ox
zp6v%qk(0Ea?av7l4H@lo7rC}gm|oS2yz>u9E5@7dxFjgss(bm|M
zUVaBqv{8!_?ZmqHpw8Z*75J$19Xsyc!^B~ab8E;ChDM#1#R#;z^;SqH+cAI?-{k@-J#Rg6O-6+px
zpJW&|C>vd}oVmE9CwuJgj)xzTU~6Z=O;H*mUk{|sg7tX6WgmLX@5NNTcBEI!vRLIe
z!fnWkc#ektp#4cLzO}uE#e#S6Df47{*(o$$Ka91mJ@`%b
zJo3+2Q18q`=n7qxxNaZ1WmRI*fhbyz`i-weJ5ks7DT+dMIAgXy)9xqXVM7+)JoaN`
zmIX7{eZ-HtQ_vc!%M(iG{8DfLwVAt6lpV}<0SX*ysm+;sPCRB2gD-KbF?PNiXRZ}K
zd3QBxJKloX*837F##RBpoZdsp!ELfvF~FHkIz64!6vMp(2xHfJwy72OhkCO^G(w&*v&U%=7Td(;7_Dp_=TuEUq)ZB
z=A2D7L9T8lbVvxJ?8#KfetwFLhsk9{5_E~E2{ImG$|74QR*h8VlLk{tX-`MQ3U#(F
z&=4M8Hs+1=q|uOHxV5zo+F~7U?PJO;_c!o1Scmkzku=@w&9A;YVSoHPmUoPSM6)OR
zI7(Aui2~17NHFyMOO#v)qeo{7`jqX%PA5~Y9;VCDA1zp`unk{=J8&x6j23az5#6UN
zhkp%Vb97f+N^Zm>pWY0;y%Lui+u_njiM2GN(=&6rMEYR!vQjuNp2k-5bGTw#hh0zf
z7&2iGZY^}=+dH2z)JTg5GvDKG*hjP`G{EJq@P~%FGp4E(dqtKU{B;8YcQ{h}vJ}%7
zE3ofMT~?=7;)?SJWQm?KWS}eKZF1o@z5)l&262D#W881+KtpjA9(A|ibFG(9(6pp$
z@*AWS`XF*%t?e?rbf
zEtdb%VAiTSZ0cJ9_v}bU$ecos$pdt)Za`(OCgXc`WxL%2)W)BMU3W!>h_hst!XbEmLR!=3v=6#1eh9sAUR8IUALuOqdnwB3bc8Ro1v
zv*eZE7qBn=2~uqh=@N}9IN(`b-VK}lc7*R6IoqJ631al$9u7>A8x_aHFM}E^r%*)0=^sz
zrNS5=YM9T*@cdL92ngV^AD^LJq)Jbhhe$l8#z?V0?b5fS-Ty2s3f(zCNt)+`_wg!O
zh53&+VX31P7Yp_x+G!6oY+u1v@(=bbHsSXhiu76i8K?3pa8o*rj~^z&C3!MRt_Lw+
z+JI88ZI~w&Bz{Mm8_qg2>D+ZRX{vGiaedK`9zkP#BWA?}ao_60m^a^$$9WnyzpU6I
zdb6bkYv6Fvn%#PgKxkk$#^3mf`T3<#)AnSY#$9|E(~cSsWB!ycL$2BZXc@%P&UZ5Q
zBs9XV$%9Rb>*tMPbpJH9W|qRtj=rik;ddgXR#?fr@O)pZ!(MP%x`4OzSEBdRS6vEiy64g8*<
z?$%#C9sUsuPd!4!;4a*H{ShjDd_%(^Zw5c_j}gzCP(H+f=abb~xNQI}who5=$AhR=
z?N0YMx6otcRa~DT*vaYL*z{DFt7|X7P;gHXm6x$N&4F){%_)848b<0$vaHRSZ=XNK
z`jvSoi8svGa56A5{pvAn(po53?nLpo=0}ZQ?9jedGZmQg6v;hil
zmH1|D5MBOlLiIG!pUQSesgbi_IJC5hML*H)AZ0
z*O+nnpY6~+l8KPy9yF5G=K{emepal;g=e49d)^~d-pIsLJxLCb^<~Bkbq*e_O})G)
zcvW@`vi>!_-O;_W2>EOu7t7BR!@Z)8)kokq3#KsLPTZc$?|*z}O?WC^SWBhYgqX5q#Ez
z)u>tY3Te#|RH_!-*WUe@*sMUe_tx}Oz7MOBm8g397JYKOc}cqnV@1yPxLTV9b!AxC
zuELLD+FU+mA|Cl$Fmjk4?OLC}<)0?&MfUg(!kem4q=w89%#Q2E-fC^I>KcJwJ-e~A
zsthijGCW)$!@~W7jna6DT9N-2v`*$XxiuKA8H9UfBX~;7fXnBo!phx-SzY|-B+kG?
z3tEt*BFlZEeswYz)*=`DnkP%gG-HlF{|k2nvmn#44-dEKvP$UU#;IDo6?F!o)6+0~
zNH~*no}hYDIc6QvV)P0fu5tf~upMS>^8SwX-)|!Q+EEVjszH-RL9e^-YgYdz$d~sz2zR
za1_nkf;h^;j#h)tAmwN-BI2K7+93nJO>$z>y^FXwy%F~Je3@iehPUEONpOFFkEdkm
zdfS7&_3QA=ZzE!(KjZ9dKX#cXSZ@8_7}HmkP2uaYZjA=hpZGC;Tr1+D_h9h{C3d|k
z%Z@)^Fv~&s8ms@IN74;EEBB$JqI(B&So|fp
zjs1%GLMN+-KgZ@S^6cMLif!43Y)L&M>X#pN=YGe5>Od@tkfc_x0gO{_#yze5C>dnS
zHCJE2P0o(%l1^Y<$BRUuwe>u!(i%pA6twDF=^as
zj2ZkL31zlCy!SX#n>D#GPKn);ROqazLxZ{uTp0TT72oaoZleSxir=GVM=6X)Ji*aV
z;r!M8A&yyzdfQilx9yL?DGF?h?uQ7~zMSAL!>cEZu-sR8Ylh-A{V?PNvF_I|kYll*
zI*awDAV%bB<6jHr@Wu=1-xllo&VIPkg{RKx)8vj|p+vSaKjJ-#1y6*bwb&M_&6R_M
zpO8TOzdSw%?L9quyhmxi#1CyoTN`sVFJ2L%H!Vh^hi34a3e)}{0b31^JplEb!9Wnf^%-J#D{^`q145TLx=0o
zY*_11(v<}3yTTa2>)fxm7QAL_U$=QLX~jU#+c^6%9ovPTU4B8887Hhbd{YJ1
z3U7Srr879WR^-J6Zfv@H1{;ol#n%IJ{5i~+4zb74OHG>Br)%(>zV^kSb9>#_@7tpO?0c5}Sr;5;LI@5Mw(p5*U73(Esca88y
zOc)faC^G6ioEfIc&P;J1Esvw^uO%BCa^PcHiPUl3d9}I%2KVlvDy6&N@ddjVUWJGA
z!Orr{aK2H4p$mdJ@bE^wT=*DG>cePtZ5fKSl=!!gV5krNM*kmg(Plmob@5#&(}Pqh
zUxf0b`|-xvhkm;1{Fi1#n_W`{cPc@>?h#!4JQ_te+VJDO;3wK;dG3oJM;}gt%R7BW
zO5cIo*Si>N8cel}V<>HIgi^UPrBvnl;_*7@{p&@mu&1bRug6L23d}tq&!HbI*>{c5
zRf5-0csmMB&p%^Yr8_5{k)ikAI;^bLV(c<${#<$)SEnArvz1|-o7#w;`md2wJdBTR
zmC(2QJvNpPeHe>BIV}4P&g~sw=EcFd#!6?B(7mBl|%9JruUxF5g
z+0jGjM}r&kG&h%_`(95u
z&T6zL2e7AhAWM|b!?EQAq@G2v$H3RPVC8|zP5M0C)tZmPteH9UCZb+CQ=@+}WQ>mD
zjL;o_3=FucU9dfqWU1NBmRsg5MYTyOe3I-rweMM62ziWRub~W6+KYz#R=9~d2e*?e
zQS$yBhOTIb{|ZSSzvaW0*mk_U^#~s(-NL|N;nB_R!|
zH^Il#gLt|h2SVJTVi?WA3Koo(nv51rk=Y~{VpIMfIBhWI;MwVrmD8rnc@rL7U5+V&
zacpwgf;+mpob|?A@NMOAdzFRgMxnmZ;3c_kNMG}
zCIj2;z$0&Tc(qg?Kc`vK>6P$oH`sGabSaXyC_{N?2nWlFUft2DLN%stu_H{XdkB&j!NT+2!Y@sRY>nW*
z4*x`NtqX{55PYngn3s4a!%p+A9I)PyQ4Sx2yxyv^_VoaM=~EQ+F4lp*g9JMmoq*mQ
zE$I2%g}ZKiMM#P~`PK8mBxS~K|7aReMn6ngpxPH#72prznhj%8uaH65-L8A{{H9x%V{%6h?7
ze9OCo4XO%^`|mnhC-vi;n0t7au^Gq0f|z*bIu>nIq09qWMhtj@YkjoI#oFvGX1l(L
zUd`=}8i!tAjRj&(b9a|c_&Hke*#HUlm$TvXox5>t%t6!?`?2}#Q_RXyqincfeP@Z>
zX^|I=QnJwULWxddw#cqojT18+cslq)&|hyax<6H9L!;p8ZmEbYMUqKFOjvO!8wX>W
zF=nFcv2r{)O*#ipnnZRYsYK<28nhH#
zk7}tbqpuCc1<^0oju*A{)dIA+%w&<3m@8@9fL?EWSm~Pux7SJ>D|*fs1JvneX~E99
zt8lnO%$G|1#k>ddoLXzb%!cFWxkHD#%cOYywy`z+9H^r7``y0}=
zIsyMe-$2&426rWO7&+L1rPEYs>fVN^v_)u;)8{{39~xC0L7ufTyIm_mKe1nux7bpu
zSel`42B71)1N(J&aIuCC6oocA&}PDvNp;Y<+ytqZ2YB3DaFONK>~VM%5@ZB#KKU1t
z`k#Q+2tT%$w&1lmKa9kD$i0&X@OG{et!IRDe|Rbm=9qBOj_v`=%QgUyFEpDqFfqP0P_9*n@YMbdWxUS8)n^pPu(@vNf`4i3OkRaHrI|cS^^mJps
z(sC5-Uxoao{pfMgmc!3ogKDovF@Gq>*ZUvhh4AoQch&~NwT-f~Tj9j(Np5hpQh<7-G~$a18IEtjomLA$|0EU|K7V)$+B
z5e#~P;Pq$4&&1;ABPk=h3WI0mp>a-M{+)6V=1Gc6OEa7Fv(nx&i@`_{X}n?y?%ktb(y#!Yfo>%m)*@&X6MowSaAL?
ztjb(y*rCk3p^8kfl;O{!-7vY5f%`5I)M@kP@QhR#ooGVXuNJ(yD9^03Ce*snh=-rr
z5v8Ti+UUhNQ0&B_Oj(94twZw<4^|J_3YV|ed|B`%+J*~
zF08LukAyZ|HZB#c)
z-3Q-YyK!%TAq%^&5qwB3Je8d2_DzkizaN0aNe2e!h`Km02iKeXamvp}SSTm7|J8>m
z5wioDq2&mkF#=iovuG#oRYvhiTq{v!zjyU8a`=klP5M-8QfKYZS6Ccp!-Aaia5a65
z98nKff8LCfuZ;!c?8{KWdMfo)WbD^K&N`Tkh6yRKC=KD|ui`xV`5cLJli{eR$fch=
zcS(7o(b)3R0zu{b8wie;;yV2P%icY_FU>Gc!IY9J?Dd&*eKaxiu9mT?*
z!k;wNV@%o?g!UHs(bnEb7VFu=#g&i!FTwFl6RID5M#&ddmYfnCdZDG5S6GXXDPpcH
z{2$hcEa$>;ZT<_;=kbicaEO%P`y*#iCA{>o=^|H(%SNQO@I`Y(Za>nE^}0C-bl4{5
zmj-f_nmR9JpF*cpHXi(PWciT?2#hhO-L*RS{?+6TnKG39`G!L`MsfJ=ZJ0Nu3WXl4
z@T_}ZhBfQ8JNv~g^9uo
z)h~CXPI(jNPyPc-nFG?*Dxvuw2tn8WxtIJJU|4E4*oXzPnOgIXX
z>+Q2p&OP-eOxa}sLk7@Vp0z_qgknfHb9x1PaZ=a_Xr|gTA<2zH`>&GRUlW!;5dA>%
z9WGni@TO}r3Zt|cX88~`f5Q;s?#Y4goj6mi3HLat@>Jn_tnPIKlcP)F)h3kFjz?kB
z-!eSE+>)Ay$}s+!c*2~NX*{F^ws+FuJV2HDKYds%?@6}q9E=eCw#&K?aB1}cM$vBE
zUC@+6$LMgd&m8<#u0plF4-@*SGit{>cr1>=nkGXTw&WVBwRXYaEf|tB2|*)=aL8R@
zSfouxcY|=Qy0sB^Wry=GpaJLal#3Q;$rX)N&{Pw>u}ZSsBeXefr8-+=+H+oeeL9&w
z#SWiLSe{yi6zeWLlyA#}!S`V%}-5(DZ#lthKH>2;W(Q-`|
zT-uyM>Y6ruEdGxvSxa$Bm;@(I4Wap*Zm`n7f!15x*i_S)6YjXuW6%@?+;`=poBQB&
z;RH4{4CN6e4S7dxp%m-GQLWv0=k{iVx*JpB_XEV`sVy8%{T|;tRia3}2On
z=o{nF>5D#395ds>u>H6i9l+Md>o6uY2ao^L<{)D&E|eWkcDd-F;^Xv`=lr%;=kPnu
zg3~Uzaf;+sESG*j`9^=PD?flY#b0o8f-Wm
z8IEP?wzSG`#a%aaS?0J5&FpWX{!e>OzW*G{Z)c;X-2iqD`-2ai=HSwUWaNGfV9c+k
zye#L*i?`!=IZjw8?KdKNzc%$xzQl|6M!cjiIf8BuEZncfAcI7#==cefKdP{a>~WW#
zyNQl2?RcfMDW|X1MCJea68FUeXfYc}(URXk*@&;gBT4t_$MWAN(D$7MO(U~m@nIjz
z-UM@i_d|ro6rrRiskKOab;Xb2P^a`yrv*56(
z8sBz_?o
zYGW$)ist8A>dsR^f3cS{8?~i$h
z(+Rpv9-u7G>LBPU%RORMTMq7?4Q*4&9QNyhj*lGKc}OQJWrZSSd~;s8EgGc$1Ng6g
zBwkV#D(cqZ)Lsi3k9`cC7589&N-{mOwqx&X$xbNuV>|I|D46C#F;n;-mXERef(Gvh
zV<6>~A?pq~@}TlXSc>j`^{dR2lKYu#l#jNt_Wa(=on}!Vku&`STyhS}N&*45euqK#aE*!$Lb3U9{<;&V(F@c9`<+X4KB0sPr
z75|!ZU|BDmb&&nH`*RHLZiNez1~9elBkc0MhkEfv*NK07dGreu1j$)*dl+x;ufUF>
z!t%LSgFNs_7B0}bU?Trf^@I>P92ns*V=
z|4A-xr33fc%3iQ}2HGjuGP<-s3a&~{TylTm+qQ~!I*>QFO+w?ea*TBA$b?J-Jelc0
zz59V2JA4O*E>L8~Tu;eN9)y+TuROEE81llDmY*(S&eld~x$83TZZ!_hEy4M5w=hJw
zH1XSPxnTTbe0Xch>n)q`>#EnNYPB6f!jGU`b86VWfo|}AjG5nzkEe8@p87=GdHMyA
zeE#;`XOS7|#*%~bnF^eVpNsBcigNex4Zf5NCT8RysC$M$5_&a@$Vx8J*+(>KB
zd7Iwj@2FRZQnhAxYVy{~2$YO=))TzH1{(Gwr|bb#U~IcyQX}}
zzKt*S6z|F>QSU!PYx{3x$WDLdq$cz!x(wfOj%+p5gf~-k`9}8owIlE2wxcmSOc#&W
zID2YeAB`$cGu|5IPqir#h&^mahoC!X*~*89;-V3D`5VHQ4rcg6H*D^tM!yJcMx?yN
zF@JaVe^Q9Q#sBbiqAKs|zrm)A7EEZkk6tw&@KNnA#uo)}y4f+L?3<4>v7PvIj(7&I
zPR7h7ZFqEkHp+*`qe$|&2jz28>|)6LN@IF$cHvYr4XCcJz(osrj*E70^Qj(n-}P8E
zGZ+6}ZbNNGcP_q`gsEqJIRDgNBsI#e?zwPg)@H$OT@LzomY+G@lM%0OV|YX!)>A8vZ`C!O-pvirgt!0SSTwx-N(z&y+Elizn1w^by7Bx
zn)QW3dNanZw4>@46@JoiWBQq?sOVRS>F(lTS2yJG8S>qHZjUuxI&rYG1<`3EmcFy%
zcRTTvZFFGdGA+*Tcpl+nEjagwKKl*4hhVi7yt-k*OH*B`pj(5wX-hEtxDEZX4EgA3
zEq;AWN6}aDtF8tsj9*~pcSR1nZ^jRUFJggLCiWb#=ZAGo_+YmpCk>bXpKVjNt8`(*
z;5bC8m!bWBu%u@$R(453_aYO{FjQgw(&k)S9f_rK&vq~|TI)3cx@d^Fs8&n_Hh@{JLf45>)7(bwqpGJXG-J2b{w@n1Fc0n
zDD04f|1PzqZ>39-sMnl_DoZnc3GCzH4zRH7_k_|6iFF{RdJ%*>Nu%)|r0?ak2^zItQ8hO%i
zLIno<&cW)rHk@4U#LC)gSX!rJ*>x|vHCN!w35W1=p%<-WF)}9Z6`DT}q>0sQxR_qS
z{G+ZsI-(rK`EPJWX4j+_rI-`y&9c;d^c3HF#Bbr6B>B)-etycxix}|9h%=uxf!q0R
zOm}l)`ECc4L>Y3ta6D!Q7<2G~D1=w&GWDA#x13O*){|yjnVEtmh2m@5^#wCEjv`p*
zonE4!KdRQ_w-wi6GpYe;-Bsw)cPX7X8nY3u~$o-R~JJx;E!7;cIoh_XI)moYFlYOo_Uc_!8+Wnu-fA
z+|;L4&k49VN|}%4?q7A?O?Hca&{FPlrGrl+N;3E}*L*?vGApL1ycv`ywWB&V%pA%n5E>?rpSMS2{;u~~vYtMZaSJC@fD!N??
zW9wh?SsPl21%7vt9*_++*|8b5Q|EVU1CB^gV#w69D9tT^(vJ>2Zk`1zn|dryY|HGF
zg%}gmgfk?s_R%2@$8NZBRh~J^hAZ=->1E8-eG8ABD!j2SS^SXhT==^alguY0qAUfv
zW1V?Go>7C{@?_WY7XEX5Xz~4t+&!9enT6!MCxBthIlhLS=S$@;qN?_7C?7Lyc
z0j@2%d!#w*bob%Epii(nFWjl)LA<{y4(?4Jpg?wSPRGqyc*KL9s&^t=m<(NJ%lUjn
zn^}GKBCpbxvtBepcb^yQY+qw|gF3z~?7&Cuof&!W02Xd;rtpA3BA}H
zoY*cN*fl1sNIithwzrQIFDJqg@38>4U2Z$a$^1uG?bh|Kwsf9#m~UlCf)cc
zrwQF6yWy-Qc;&NrjqKzuI$|Gwd6=;9l6b#GE1hslkpur&@zW#|j`hx#ov;x{<()>h
z+Y!{R>%eAXJ_|chc$N1IXyq4y;O8=@UjB>IGvL7?F<+26^4iyfTvvL#+
zlY7bB>&XlH3Bp3GgWjYP#EJ)_rpk;-Keu37p*2l+dUIv(C2-&R6FONPsebYyhRa#H
ze82qnDYD1es?Yvn=$x4s3%#`aC|E0AbjgKmiU)^}SBAI$eweHf-LJL=`qLhxeR6yD
z7hllUXR=ou*-JiKRWMagL#=|a{TANC**H^XI3}X4-1Us6euHL23H~kV#fl?}xVK1|
zLEI{f>$ltQe%f6wR0|Gyh%5zsm8;tVn2GQInmtHlxdG8
zhj-*0BK<>I;&%geug+l3KvVIQXCP7Z%8nsgY<1e0R~1@t%NI`=`F%t7%E8obDl^{X
zD43@C^Ua7KSQsY^p_1O*sdfRX;!EsV2W}epMtBtJRCd^b%qn$u8v+h^DE`&R^Vl#c
z85RCRm=d)Kg`4&v&$lDT&2eO2>SI{#F2cU3V)PUJr_T^IE;_XzGel}HTwu&kV-n!8
ztkWZCBt-aE3Qv}kKmwZa39t}d{ia!
z%oSg}nlk^&-|NMFBP!S*Lhzh2x!+2b#p)G)Ie4&hz6Niu8i!_ge&NzEGg`}eF>#s^
z6V&G7{k%_D_NqINEA@eo`2MabcHnHK9q9PA6pD`&*`-Q$o4OzI*I2wL=U(9UTFC&H
zIu|#LGo)bgTS@`wO#S
zdHebvlLUtd1;IgVzQQTY16h!5qpFpRl_ZOgjRW7=LcU9lTI-i7kJks@;5cB9I-
z7A%TghoQ%P`NpCvF6i6x{MJyGZ8r_9IpV}2k6r2Xb3fc=H;}Y&J0@*@h|wqd&`^A8
zR!fdbX6PIS+*K0hjSl0YY~{XhLzA>zSSE*KuFSrZ_9;;P^AcqB_TMR6IjpFxI*ahm9}9w-_xpy!(zF7LTBJ
zL5aP+?bz2_kquWRGw1Xbn|*Ji%TOm;JQK~ayE(_szk|sZO&Dm}k=K+1ab&9ax;OeT
z*~5{Z?arcy?Pn~$(3K-)pUxXa!u&I1mkW>3(xMhtBRqN8UX@pb$+cd*NdwwF#`|nN
z+GhO0^gVhUuYC)aK}BfSQy6&@nz3?NDO%UbPN_vGWBU{!*E|t-7k1*9umfmz>jv%~
zvSf_hO_nG$5r%9rl16IsdH8LpwbEqJzGtWy(v&+dYcTO|Qw~1x0MFW}QOWWHQa|f+
z;RVqtOu{gHcL4VsI*G#($B`4?pCdQQ?><79!^8JrglYxCdv)UMkVv>45$EO9Dimy3
ziyedVv7)FeLp_A4@KrQrg%!x@_Z)2ly6|zmF)uIrE}D@up1kwm`L&{_iyqs-TZeag
zR^!=T*}t8Vy+NH55A{8bncY%h{$9QZ3lm@@%oA_%kh!Joz_&&fo>uvc%&Zio4sxKz
z&^An+EB=zR&Do=IHeQV?#90L&J{N{?!p1h7y?g>32W-StuU70}XviV*XCFVk7m6Xm
zEi!WANuNTz$o>w6gJ-a9(r|V#Er83lL#R5|h9O5bg9zk&OUcyA?qIp(aZZX(J>sGQ
z`@e3(i=#waeANl_e~K5O;yps9>9b8UVFS1Nf^}y`VC@H8I_c@NMEr^onKCnPU5V%&
zEvc2D!QrzU=`<}Ac7gW741JGb5lSqX`5he;beJMu!zJsZu*;zXM@AmQa0Lh9F*e~M
zyC+b{s=(`GU*I&R9euh9dm?xh%D245r`O&1I4lz}k2Ki#l@(2WM&qru4WEc#e)J^^
zn7-Cvm{u=dG{{5T;HPNY&@my2M;Fz?+o&Ig;Ga!(|GrQqW*(b!x
zbL8S93(gOW7mr#SmPr;dtz;v1k9XkkZhB0W+<4sLLPXB`h6d4w=B$!u$cJYLTvC96
zckf~I!!nE#hQW#+HXL6$3OR+#6H!(w0K@nSvhStR!{(GLEVXP|A29?zUi
zM8*|&&M=bvZO4Ofa5Uv8pIFRWVaK16m6~jqk8*7X2KSqWgkp0R=u18^<{H*mRbl-1
z3+P_ajJaWLc!K2!IJ_Sjt<`A1Pm|$}FW~;efp3TCbH(jfq9Wrf9e`oYEmRB_&-6dx;`&^G%F{M9?%j$*8-`%>KH*p$e-DT0H((Xti-#kVuv-|*
z*_l;ne({HJgSzq1qO~v(fAv1$#kRL~qF0_hU+s|{(f&C67+Hbg+TMKnx)j?xe?anP
zFAksF4WGq(oh?7x<81+AElwc8N%A_voVeos9P6~5xnYC`8~-Katg#st_W#84eK}|%
zGs>jf!k;s8rPq#Vw3}qZ+LZm6CZ5e1vChoO-;dg;u5?*+4V_x3a7{xhYK80Me%p$5
z4cXYH@5*l{aI*iQ?Vq-;E`nYtX{vD0=M`{H=BiHay#lDd87Tyt5@S&5AaDicHZ-
z$E46!?6dtj>H~%QTB^XcGlMwuL@KKK3j4&wlh>EK(R8@*;e@rCWn7NK_bqw6R-R39
zTM**cja6Nb;3o8%VFf~`7oolr?F8mvM
zH&bKhr>_GK8U`}Fhcn~Scj8(@Kh_$Lf==BT;jRl;^|b{Dev6TO!&`iN1S;+;#){&d
zcqPo|kEcyIA}$T*@-5hIj}AxX2IGrD03UT1zE7pFlw)i;-N2iZLM8Lw>M5!ulajcm
zGk(^6!oj{)>~QBgKHT$W7?hc9Ci?unvv^Vb3HLV_Bl77z%*=41sj&D|UR
zrith94eWn)q4$N0h<}!a0l6*sXv+%>_|X8jwT+nGR-YS7+&L@w9(*=@4NR2Gu1kGi
zW^bK~M_F3Tn(rh#j-hzDLfdMYq+_j@$&Zp>xtt{}0)dW1ggK!r`>jlHrHZjT;d
zn*LARH@0W{|LVloQjVoMzC5s4c6bx~p^pZ%nP<&vIjb(ulU?J|BM46T2ZJ8st#MDm
z@E;l+Myo=hHl2WRrHqWxZbsz$eEMd#`85G|>;XMaX`Zbi@k6d03gNT->)ynAC8oL37s
z?13h$1`fsKjlJfUa-RBJ>j)WNt~TGv+^CcUmVK
zG5ox+Vl=qljHH-p%53p=m<-I
z40$G738UaY;rNfdfM;`W;NM>-c9r|RZ|mEb8>C6cJr0~JpTTXBHxW3f6;-Fq#
z_FSGCmy1xhQ;T19g}-#;JtEfn(e||}x5?+-f9GlO$^_AC%WpV4KSO|>FDGZc$M>RH
z*c5gjOQXAR$aEdvpZFdN4&H-*&ql=EGGTg+_)traVSthkcmM3dblowSckcxrJ+cyi
z)K(O)lb(RJZO8}S%utL+#(&Z&kXwnm+tE<(s>U2oPv%IbzQSw+Y{H%+d}RZM9adrR
zIAxkxH)2eREi-rC!2YY}P^Varrr&KDo@B>|;g3=CcnuEUX~$8ih1hZC1uSN{h*wyN
z#@0<(DjDFT(piuj7K`$gro6Ir5TEpx?90wbmiw75~u{x-CX@;DxmYjM#A*p_cU+
zdA%1RRJwD22Y0z24MsEB9V>`;D)rk3Y%XlXNegGrJbn#*N^YQXnB>~S%CS4~8jNaw
zer53*rg5M`<#dW**I*{QD*n~-8s-A5ysyh;_#F9
zT$6Q9GGf)3Av$xkFcxBl7a>~wjAq#t$g39ywx9G#d=gfXWHsl{xC(n0P0k()CK=wq
zjG|+h^;mK`PF}JHvF7e_RgV696J}uvSS);@;FY(bq12SY+z&fXAD*nQheOweSfe5S
z-8ji6#N8LB>r<@A8A#umtC6j;S@Nm=^c6jJMUyvJGdW#!Vr^l_*s|4mIZwWvLTrH%
zFQ-}YQ2Zmf`Ka?lnhs4%vhhpK8ogbqc>Y$6`C8sUPKXToEG3SV3o23MjY
z4^EcekfG;=@hp1z!B&i^zksEcu?Tehi@uU3J3Heq7Vh4M)xquzH!|c2_q~|*#E+52
z{bb)}AzGs~-=ynMaZMtQXjP$KcpLuf8i*FTZuIE;8*Y+A`!&pkJ;tiAcemg8l{*ES
z?2TynT#-@5DpWZt{H&C%*c1g$3RK~*v@{HStik!h__4TGh1o65STiFJ^L-7e)m4=%
z0@JX5o3rene_)I7BU?(IMt651Oou8nQrC>zT4-^vZ!M-zNW{WX!oC=mhhq&Y+)!Z6
z?d98X&9OJ#gm<(?We$wQ>lv2%0qa~{Wk*wr`@$Z%CfeDsh4q-X#(;$@^F
z=ZJS~hslpU_$*q54^;IzX`CCI{E0!vgeP$9Y|p&i&+$}Mo>?tRp{k?D$VuI3owx>P
zk0fE>xVF@iS^urP&)EYHBiYHA^Owl%R4RQfH~blS=Nozy#NbOuxre;T!m?Qvu=~-T
zwQVDi7v@4=(NP;rqM;@XGSB2t4qRM^0;7S*o!gHUjneax+8vhSeGhMKLvy>QSm%(4
zmPebjd`l?x?Ki+JAR39Pl3l5FFPr+?BRrWmb
z8;2wt{6!e@gZfq@K>9qY=iNlslIar*Mj=y2c!R>b=8Oq3<(
z2`e|;c`80#&4Sty73!+Fu&VY65;HF1k5Vhn*{I6Bc2>-GEQ4oW2o%h#k#Wt7A?jM(
zb5ohRt0mj=XFMiKj`mxeGUwZfAAX-9Q{-OfFPZV-e}-b?72(tGIe@c{?p%3;TpyDS
z^Png!n_rJO1@Z9CeTs;{ay)KpFTDyD^lp6y?KP^PuvVKUhHs#!ZpR$AJLuX;g~L*e
zI4OG*uE^a%%S?C&vuqd=cnoLf?m@|Z9YMf9H}*GBWbR!#56aT9w97TwZ3lAZ
zz%OW!U1Fc+^7lNiPFrPj#{DYB?=GU}H+uoQ{?=SPNO#I({ZJYA&5W2tKN?{-%FDeAmrY9dUPI=JYH-)p&e
zsMp=Z;h%{}@9WM5o12UGG9LXhd(zOf27N4IQTti^T1Wmt(}5+6r@@gshpi^JCE(CJzL-7TNs<5N?1?E4rOUzzc+cP*~eo`IgudpOJP
z)kpkW&Z1ZK8ZLLi?jzt?o{aD;3pIvOGn4P22A%9mi`d=T;G!pAD=|bRro32FX`6s7e@Y3naO)wbHD9Zj9(mq
zPgAX!@c5q0rw(-NQZ2iqatxoLPThS5nn1b0We3$xWzHG5jc8XZdnfns6OQMB2GOq2I**z86OIQ9xNu?AUL;X19mp0ua
zlJKc_8)nPSt8j5Yj9oT?O|E}OL*;RV$iFif-I{jet-0f}1x(CZ@!7>c*fh5e9Sig%
zllKQ2k^%WOW*<%u)n{MzPdK9V6gziW(RQo}l{a2Qa4#eNnIXB)l^3!1xGoEv3vjv7
zk-ulZKuoe|@!7iEv1Ttu8!IzF!-I$C+`!^zC3w_OkJd@*Ty88ou+GWo7h%X2eU3{`
zE*}@W_hpSqE&4t6f%PZ{Ht*eKU8^G$%X!(ZXx7A35>VPcOlh`!O^==
zZZ3Y%DZ;o{(xb^LPg=Krifto{k#Xi2E)Va+_L*n#Yvw+5Za##
zNPl@93i~9(F~o~Ul@ejo@}aq^DQ9nJ&Jh|qT+n+HzUupM
z$3s;n_{8Ibmjk~{egaqL8id5(!>~7=Tp?P+!5CqiH#6s`gV~5_ZN*-_x1#qxf0o$V
za6{CAz=Co)TMKpQ)Abiz+bJ;0OvcRkUFH`Yv?7+$0
z;#E}W!Os250u75Dd2zKO*XLHjCfS_!vM;LGYD1qh@ko&8$H03PXjmB!rEej^0!e^=
zMN1Yse1pg39SD2Zla>`<(X;3tg2pLu%9qc`&bp7u4O)!MA0S-_N%&Qeg+t2ocX{7~c5H3cZ#_#U%uy*bxlj~mU!C-_>2ORt~A{b}7fb5%0Ni>JTk0ACI@^2XQh7Q81u
z>ht0yOBIjk^#JMWyD1*oC)Y4h`q~yPs}&Y>JqlD^IkZ(JwnvHo!`6;(Jf7q8UPCUK
zaS>geU&GSNlT)AVgGX2}J8jEE*GZr7@tiJe`iw?ot_O`otNC)oo>tPMHUD%HdJJEW
zaYLNAw2M10Cv8TQ#N@uOWUg`KD;x2kw|I#MqMOz{(B&Yb=G5L`
zLz|fgVJ`aH$TnK+=qtU29`$ILd>F24ISyxZkXX3TrtpmISxq+=j=vyjzpyvlQ90NjdUf3%jP=iVrWmMxjzRULBG?p(UQo
za@>W;2vhb}zKIjxBv12Jho?7I;=b%@H@=kJLsL@@?ks%~`FiwA$p!X2MPYk8PLn<4
zQ^}B7m#T81bknRF;=y9cLzV1#j$^wmIpC5h6YYhMB%gMD@mN+JxPutUxdjI5F>{kP
zuPE!WygUtybw5C5ugnMerabqysdT=4#M#<3g!U4TfT|j&{@jK3t^=u_lZ5NiM{set
zoA78
zwxRWq6!_k?oSuT5$;RyAVa^!w4Bd5Y%9))GVw(8K
zUd$A}{S_^S&r|2pxi`_q+K0;d>yfup_}ezNRH-!QquM{1xu*?A3WwwI3YkHzs}XD{
zoT0`TDENtwQLzl#uT!zSts(WFYBE*LS8}ON_;t^k>BnpEqW1yuTJhi(Ghia3Ok5zm4RrZ_!b)6|c!o$X9n9Cj2tzkJ-Y)4Sb
zeKq-0^bkw+^H}3)%qy}ND2<9hOl%e6=j+kvp&fT!EJseSwb*vRm&rjNaMEA2VU4bg
z7tVo4?=P6TT8%CPv(QD$p6OkMr#9A@?#8WXoSO#y$I?q=TL;}gUy-|AhsMi%qyuXc
z4y;pS{Xb8Zmkz@~SFoT_Rajn8;xAI*MafJ(nOX)PliL`U(T=x-$CDxXysaw~*|n_#
z`~LcX-SIZuF}M`-&%ML@dQHZhkh`+d9w>=t?!#^)?tfM!tY1quY_wyDZ4#FE7>&8+
zA?y|=pXsDHL_4{$$H3djc_H_WXeAc>N|QUb6{{XOvo!iXEKco#@fv^r_1%YH*+I7!
z-&s*cBra+MvUZSgu*{4(>DUaM$d~=3c;k;wXovsqOa5t@Dlg2D&)e7ic$h35XU#uj
zP!k0fIQ0--{#sbJy^95R#BcWPFMfw>)6GzL_s(imxH%8a7yZE_XCHdCmvbR>4?YcR
z$;c`T(Qq{QFmnwC%WQkuQ=k53b1`O^1uH#e7E4g)DU&34P5z3XkDY0sE9|Xf^8W6(
zVaaL_ghg1fFvFgauP>u=voVXTg>4WXgTynJ;qajuZFS7qt+x}ur+eju}jYXrrLRY-Vch9dy-}(C7-}5BKs5WAQ
zRyfsNj$n9d74o_~!dS=M7@^me$ErF~Fa8+T1nh(M-kz+9OvYcs7(9H{lMZ&(P?{z8
z!~|jQ9kSzc;dXa_Z_i&z53$2>B-Wkt~~
ztR3%ywo0ve*x`$CwbtXROJBA=rNP#sFXf*%;3erS80`58V=k!hVA?Ir?sOivOgiwx
zXBTQ-y#$qE;-ktXG*#+w6(j_;$aSYZGMDNzYRHjgXI3llVyjWqI+=>4EF`{
z`oK%5nxBTTb0rtk%!zh!=0yK1*k*eZ=jyE3cb)?)&Tqyo(e`F{)2EN;LCmsl#tt&~
zXvE9+VNe?M-d%xOO$|yebf&3_Gq>mz1YS-wrtNAkE}Sd;iFHL-*P7ILCcf*k`8e>v
zk1cZ>U}?AlMy|v2%PZZiv@;PyJ^UPuHg9#aeVbpO5`(eHoS|48k*-JaDcQ
zJ*79Xxl1#yZ&D8{>mqz!WJV+_GVazTjBhQwEXj$^oPHBE(;Yc|q#i%VuR!AQ&(Mi$
zL(><d_4;%Un#i|VQdXWNuuP|{%%p0fsYzWO!%Zh)yFHb-N>H*?)MIkJ_mWsaKcd*@1I3Ib^12}v)Mz@mh#*qN}zpKWU
zm!E+yDv~i#rs3W^G@ED6yREf2-8>G};(1y-N{x-FB>H8eG+f;Iy
z+8bd$@hoO^_hb`anFZIWa(AvK7oOM(^=vox%94(&Jqb|n7tBKQeAG~j_NSj<{ucut
zdm`Q1F(V|?(vEY+2#?`h3Zi7TGyfu;+VVUYzSNR;Qe{6BYRKym${ad54#l5~rH|T)
z6My96dz~$J4cBCl#XhtaLJE
z9mLmf+4y!&MZ7?^3{{p+8LKE*mj_TiI2OCc3kN+dfTMTGxjD<3ufICCidq@O;J#EERuVz>{~#EN{x^QElk{G7Xc$_u;}SeQp+h
zq1h=D`m625I$>c<-(tzJV_#yH`X$({e1+;A?Wv?zi5J@k;6t~*eDhF`=^w5jq~b3^
z9^A(g$s6nmh2_%SU+D@i}sRJgL2IHKv{uZ}u={j_kJp
zrjFg%Qnat7%~rv!QxGq=DMG+;ZOQXgW6iM-X#dBRYL82WUwZ%>)U?^*s~+2nU$U|q}D;n{a#%Bz@
zb{SP#P3VtWga*~)=h?T&oYj^$gfq8))Joi(Sc%{WU5-k3qjskvMEFYPx#26cV-I4k
zX*;$X8IPmA4SCJgg$XX(;Op6w+E*51gT)ajZ|+Aw;XQm8Kd0|V;dB@Wa;fuVOh0@c
zyX5{|)y$EdcE84pUx^qdd&UgWOdO6r!tJLmxhQl8&aSiPh&|E=Iz^Lz-EwfZF&Ya8
zX!H9lKUP=^za&?YCRYNfdZiFudK|>fkY03Oq%Le-Gw3Zd;Sia{|J;^b|EMzQeG+ff
zQgi8;mp!(4wv3QB5?Hw$15mdkTZQ3gfUyT{=A4aPZbzEN&@U%d(#suPf}zBoij@
zH{@ygJAQ1l4GZozVaRlAdW#RG=i(n|IJg6<&c@Q~-h&N8lF;t;S=1SuQt@c9Xv5|-
z{aA%-(`;CsBAuLmeek8xg&EG(=-_%07s9O>D7s$3^=8}^yIQ!A3S2IX%MOtVl7F^g
z`?t5CA*@`zX5Zocs0|N@$JelLcUVRBW*=)c9_*Bkv#vMb6(pT{{e=N7^X;XPFEPdW
z23Ak9r{xgo;5?_w>^nusnCrs5+umS{y$&DfYw^w15lHAE9o~tei664&-$kNr43gic
zMQbj+S{vAHrw$jtwdadGf9~0M4Grb9(Wiw2-<*Gge&SPK;Z_G9nKkF`QRMYeKVWcP
zi_a$s*RjP>w3~4amj6cbldEI_yXN8cHaWistw5?}3-%c!3^D0tNEsv=cQXTSnRG%t
zxew*Lti#MECOnm4z%=m#TKO)5uH1d=L-!-e;}ovXZO3^bmXakh;0y14ST7v-4?nG#
z{8R2?;vwzqV#c5|`b@P`rIAWHR2pk>XO#z4+iNg-=pAGk*CV%ubo*+4#X`H+D7kCF
zb?aUE>xB-(OJ-rz-Y1wLd9up)jZl`E;$i6r9Lexum)jNayuS@DX8+*dy(wdRYO%GV
zGdoNFjH=8tAp`TJ$LTm++6Hm?DSa-G`|HJ@PjN{2Bxd7239q*rPLYy9sY$|5MQhd?
z`mue3uo5D)Xm}zMBjlO0rlbRJu9ZxsL7MD3Um@d=3*QCZk~u<~on0L{6xXo7Wg8fpyicI55W0-M4Tt?kR50
z3E;ZX`Esw7u0hGw#CVA>H&~d$p^}-5Qe}{7JVqW>WvX!tep~$ww)IN9f=~Fg*o$`2
z5!fnTg7inyd%Q)J3xs#*(e)mza?H4MeFStnN-i!*iPMkS(A|AB)$c+kzr=8rhb|
z)jnaHyX3#O7_;*84QRh}WT~qr<0g*4p}lQ6c}{bNn;%8*0CQ%f%lq>9B%CD|qi|4W
zss=xf{n`;1TaQLkpFSKkLzi0lw-7W~opGLN@afQwcS?+9mP-sg{>F_@ceUZDi)V17
zZMkrX^y#{x4$B`M!{A$vT+~N_4Ih8WXZSPjO|)g?zIqf@xx&HQo6TaRhp14CT@MOh
zbA@Ptr^R#ckbuAeCFp54fN2K@;le;IIllrq`p76a^%L$R|Djc`FoaIoF?!xfD1KDn
zz$@bWscXQX`ipYM=+61lA)vkS6ijUb_}w@jbNlw9PxeBnZn%e#l3=b7Z;WS;U_Nud
zB09)PYzVrFp-DfmbWx~uZz*#^d?DUkFye%86*gU%ihK5Vuuw8R>bB;5Q}7*gnJ?W`
zHniLE4RN0n5TwzW)-8XEW_bh2dD-YP#GH+9gm<3v7-xmgovka~)??>kxo%HBAMpxX
zO#WfqyPwiaa}N%}m`QZcL093sE$ktkX2L4CSrH4{=94i%(~I{cn_Mu}olh&0(R%O+
z2o!*`?(anFS^B&n-uH()-CyD&HR>I$!4H{^m3YIEUel9r!h54mzs-Lbd3)SHfGMeeXa{7}uVQh1oo^
zSazL_l{hLMs^;4tV#9w+F;RSuUX|i+C|U}&C3E_{}Q*2;e|;BN(v
z?|h4+w;fneYsFiID^Pmy56Xq5H$}G<{`iT0^Td-ui3c%9^M!OHJi=YGp7fO7l`X%c
zkn=H^mt?0mQ98*+>b*f$;R8IE9gF6$EbJ3bLxv{G(Kf8aX!|o{xv|0#gR89m#m#
z|1eEv$B!+9Sr%f-u|M_r@U8}@&q_khrQ5KKZ^x2dMy$Is5Ie33(EVQfLY982aYik>;?JBk}q@K^m0&JA_tj>eaW
z+^a$Tia+pMZ_bu^3hZ-!0&0?USociwPz5bH(Oj1cH*P>;H${%Cdyf4h#h3W4qx4n?
zE3t`EBS%H^1N3X)r^@w;)57r!%z$9?Q3p9BR|O%2%Bl(
z+O~XDZo|LEaq!vn34uA?Y5XSz=2u2xt$d#L*%{LJmJ(I?665dxLy*TW`8=zNZ|ow@
zPcx)WA8Yo@%!9%hbFTJ!hq?+qEv5-GUa$!PJ#=!O8vt
z+Dy}Af@ok>!Zy)N5)NAIHO$TXj9=ci%t=f`Gau=VN6RxZ%LqTW`OY8MbYD0`gPx<
zLiUGl?0o7l&ibAaFMuz9TC38;OnR(K7sJC-&Vr97ENb|Ga}l1rc;XCniuNLDLXh-T
z>hiYUYjn>Nj&T7
z9}d9bLwj1k%!Z?R8uUKoV_HXLE?qf*@9&7a?33&S{_67F)aG2bR*~ucnK+1AI0>)$
zT-RXC8ubIN`Q2H4rvtY4lMd!CTN*pO#gxpiuc@tX=g!|b2Ix0;x#e)@!b!~)cw5aPn-@nB)jkvL
zyLV=nHNr_4xCen@&RjRS1WCo(JSmyAwJS@|bEb6dDuq!nSeN@%o)W
z4Ha&6f_t?aFXy>1EF>OP9ZtZhxdr=XdUBKA0W1$|CcO(sCXU^Tecv;nKGKK3&)!GM
z0ZX2kro$iqsxg0U9n97pMQ)-l52o6&y-Pakl(SJKU6bf&;rZP)V(0eB2r%4@wZbs<
z>nMGR4jV9ec@q|ov|-imENDnZe#V+=I2T$9yEPBVNiM>b)1*TeKh%rM$V1HD~6
zxK#Yv7fPp!o3#={R>^GZVj|p^sBqr$pTaBjB3svDr`}Td-08~nJ7ripY7WdliI;G>
zI~Vl5gtu?L!$G)?E2&H^VQKohq`|mb8eadD9hZqemBz__>ezLpOYZcD^J`o`wih>R
zoH(=2hn{klA33-gU4E`WQIEgK6y8O;|-)jc^{Y0Cy
zk9sn7|3w6y`6wJ#4|<-v5Bnu1G&2|HQpgFE$<8r%k|GC}sZqb123=R*LEg`&IKHwA
zw=HhZte>+`d{%ng(jDt%X~>UXWggNqA4fmi&{-HfcZDOre`v7uQ{`ND*^V6(t$44E
z*Ca%YA@bn#oS!{9G;J$)shL%4v_qMF;apzVV{N-duw}B&sqsdOU28%q%{}k
zn4|4Wcdi!C;LI(d_+_9@W9agA!yzn{{9BtBo~&0;=S&@C&XrDntn5M-kNpM}@k~97
zab(lZk2o73jiO~nj5>W1r}gX@C0tHN4PoU^P~+%*_KXz>OSgQNH6}BFukSEvGI%|jVJxU-|zK!cO6mN4?dr<-d%NZ5E&DNH`wtgy@76)G?{9q{W6EA3#!1{)%Y+we=*XMpmV7kK
zkgqpaqc7s{X0JT=TF6;IIUR|b{n$0A1coVJus_g=-XX{EVxticw*G@_-`}BBo;
zCATxglm8U#g@Tg}+Y4_yLGq~4+8yYr))yUQo}QK8m;L+f$AUQ;+-xgz9y1djkZj%S
zpDIk+nS{AJg?YEfjT`R{gn8LtOo;R5Kaxc|=am7N{fGW}>Cn8|g2w57>~~Vw)PKS;
z{Odb(dgDN~Zr$0pt7OtA8**l_5zlT3$8ke%=Cu}f^N=3A*g~JyqskC38J8m8I(%vB
z%_w$<`5ECu`nmF)i838L#Hp}F`i>87W9D69eEo7~`REc%d~gxbJF5{8>dRQ^hJ;vt
z!>P1!HOKXayBY#jhNHQ
zo8R}ia8|cG+&Q})0j;Z$Xxg5&**lO2Ynsarpl7ZV3e^Vk$DmJ`-K8@cg+t{Krow`$
zf%Lp}8r{#|#klQ)7|xNb;gMU|Ij;#@YCW0%s{s);51=*e{r`}J3|pYb=+QAyeD)Pd
zyRX4zny`Ov=<#6WLuecSLG<c)K7^^mUa-fs2
zn`-tTq-z2i20JiV?k*)swyY69kA9=PqfdT^mw4gM{@H{0X6`Jkv}C`k8`%G#E5lkD
z%e+u{lH$P#HqoX-Nfx$9$7o!b6YEb$3u9N8F{#>I7bF?sH=l&{mxJV$AK@-L=Yz73
zS{Yb_xoX1mkpDiLHQ^}Fy@~+Y10BBBiD~bDBIJfJK>odptag%{o%9p-P+-^ht}NIQ
ziQ@zBLC>Wfb0yQO7c&g&HObwNn{ji-6%4!d60whcnYuLxw`S~uxl%U{wManMCR1T#
z3Rm9nEIjtB^YA-G-Wz%YFASxVB;KYgn;xNZYy_^zy*p3Wle43}I4ti0c7Lrwv*B%d
z<=!$VZB}K6zWMkzPCD%G{21Ex07AsyqGztg!P4~!n|cA`#_h()*2C#zSBpLC<7A#=
z$Tp88Q!}d^i_((eS>?l0|D8Az){Y*svmL7=-OG;6X&}$5O}{cQRqp8-L+dfw^CC7&
z7I}?ysEvg6o%gK_Mq?jf!*H1edYE$CKqV%4debJR0<+X(@vP@(m9uXgg)L$+#mnEx6P*6LI1JSss30J`Zyaxg|O9uaeuexPhuu2eDy$NA_$g5o4-ofty#;T`zUWfRZ>V~+1VIDzIJk@OQum}oA<#^`zT%&*-y@E5N3MP+oM@BV
zu+VhmNjbwjFg<}zlHnQl(UO7Q;+~yd1#i_)cqQG@GaoY%KJzrxWmn$juXF?b51_}?
z4ooZ-r%FR_sP2D>!CQ6d_f2}Jo-#|h-Is@T&)^=OqgO{qCP#`tTyh_^>nmZ>tvUZx
ziZ62OK1@kgrIxVHXIZ;(@vLE(FSCNT(qZYZeiCbik@`%p5yejn@Ou4ay!PwB-HI1+
zdtP&fdb=>w(h1Jp-5LEr9G;6`W5(lZSacVM*yLZ(w`fb3Ejci7k{sY@@q%i#6n{-C
zPFH-0Ppy?0?U9Vx>Q=lGB{}79g=jBshv=2MZ1JHKYlW+~{joEri4*0Vtp>N=leyTi
zNYoFQkET=Pyv!y%>-_^MSM<3<|23Z73*?tL=>@%8gENhlIC(69b!PhPCak=Xweje_
zR{T_(#a%Nv647NwG?a{M@XAPpo=-$O6;HmDtjb~af3Zf@idxxisC7%7^%K02_tu!*
zw~u4u;U%zpmJYWKmB`ijft!nZ^Rz`J{`T1o<#yG`nm>e*@w>1vB^>Ua1DLK-imAfI
zHk(|Ae~)X@bgu(P`>W9SuLd6vI|7v{NeCX~N^|9JSR8B4k!9)P3)ki5KaOl!wI2~i
zl0&a-BN-7F>e?5fpDi3U7q9j@zE=#?6xz{0tTu(&r
z$98-t9isEgUZP+`9WMH5bBjEu9_;;$Lj7_q9U~d3>aFPQ@fmS_{pjv|LHck;bZ+6p
zjXSQx+1HL5x_5EhGZEEi1KDm%681j%ihVm2=z3g(3N@nrH5M5^N`wuY@
z{%kWh8xK;g*(pXorI`IT%1}HigO1P*x6aKZ<617d-uM0DVx(o
zcKjV4N1}hC+!el}VSywRf=y
z4qaGsTe?eQq|f794BvbAu~xcnOC8PB+=t&sq(vRVor|H>xs`mE8j!NB810wY(7kL2mdtYHvHOy{{=ES!
zv~;PY(wA>}BXAw}Ik0bDUE$
zZjA{+uVR^VM>OMUyXWXQOLAGlSNastif^Rjbnn4ZC`pfGe~dkM&+m->n>^^0-JTUo
z79n2oBsBb{aO&O|REvM&=CS~8&FI7}Z!cqD`Z~Dv@nQOSXF7jAEPTuyWFG8Ald?9L
z`j7M=He1p+VHG07g^?n?$N~K;q3tf7fwMK3ve1zK3`~P{fjRB0KBMG=9+&9lA$ZJn
zBpzu&J?Rvr1m73e=`=*tb)a5w3XavuyfUC(c3dhvW+&{+WvZO9doQeyrs81F4%E84
zTR1iyIdX{13DdSALU?)JuO-tJB>o}cU8PS`=k!zswvs$hXzm?kO^8F2p>S2DgS*lv
zh`+T?V0y?c^fHp{Tu~g%fBcW#=uH2tXk2`|4VIHyaQ7LRNsRj`Tw)WdjX#4ReQ`M6
z{tk^IT|TmSkCt+8GP>EF@zQIbWPAmhF~Yb#uFX}e-I$S5f~ke)ac9>^NsjCm8WmaeT{cl9{uz9%Z;>sgeOEFsB9jAs$ry}S%l=haQ+aepTv=p{aP)C+T
zT5#@yqr#?EZMcioN)E}p`wJ|+x#t4usl=Fx$&kJf+FF7Cr}JN}WLVe>2EY-pQ}->2QEH*qg|Ps>1%VRtI8E5)T~
zV|vYhi=9T|^4vLut%~*KjB*l6ZKU@w-i^KnCgLuzk=}|j!p978?bKw{xn&sF
zAs-K(s&)0QZa>ZmbM1_dDWi`+L~(%@=N{H0y*0Sv
zp*9UAr{H!b2EKoE_^-GZLL{fWFHLx5%0JMe%9@(HjCuIZCG`2F#4B^ZVf?W&}w`
z>~A`18oXF9=lON*9q6kgeA?CCIB?UASsTrmXY0oBJ`WKhE^nPl3iL}Xf#R9YRN43(
zrZVT-EIp9Faa?x{FYU#aR_)neJW~6FJ?#0{mfnY2F|^rPGz8}2!cYU+Ipx7E
zIfzM{x1qpESRX6J8R&T#%OxxNO}>AbG6zlcScn@F4XL}lBfE~s$A^!JcyZN^Ry{VM
zcSj*rqWJcy&;?=QCeoi>KtUrpeA|<`X0g%7ChTpM!(5
zpj_$Ad&e?xyr&vtM~RDTU}yA<>?|{+D*Wki9BWd|_^-Yhqc1jRmHL0!R`?X34m&b*
zXep9ECnH2yW!i-Xe12~q)>PYbXuS?Ew>yF7FWgx_KN7df-=k(`ci}r(3yb+U&b?4(
z-qjnz9IwQ+zhl_#-woLIMLbAjJ=uTZ0IZOl>JzzJo-&?|qGN4o?qpaHi
z2hwSU60fy*hL)SM@bQ%79(x#avhZ16pAcVNn9OGKn{Y_D5Z0L$vcG+eeHq$JoKXk0
zIe$5B0USvx_w!ayd6*ONl$owwQ$YEP5I4^la_r%
zgVAL?%r)TfNH-?@vlFvlwB;imb++z*6-R|x&`#T3@;#y0Bz^RnNOk%pw?#mkLlgUZ&TP$po$A-M?7zNuKro6RLkHZw7LnBa~2g;TCd~hw2
zHYqank0$T^{eUe_t{gB!x+p3S(L!?Nfv#^5vq0{OUz9n1{wcU_*XO_`ZJF)eDBi5&
z(2Y>#uqt^s3^Zq3nLBR{y@?U>Y)buChu6a2Vqcyeuc~+C1!c+Dt-OHR${K8u{BBr)
zC1(jwEvTa@=iF>U`l1qqo(jcc&qiD;?!s4F58&5bb5>7j#oyhx;^UlLj6W*p%vm1H
zY`q!3H-1LBxE#C(*TGI023t}hVcAocGY94(O}e=`JzI0d$^rz1R3TXAkv93lsucfV
z?eZ+)(wcHZO)56q=yPwi4+H(S!$>k7%MS@FrME7Fua)6oO`&jl+VIJy>)5QBf*aZ`
z>G4jwtM`6kWM?ZbRQ!Nm2fxDIwH?PVx`bBT#BAJ~m@xHa@xm#fu7@
zaY6VWK?k6J$dIk#@8M#A1#{Zivgbefl0o{7K*`9Q&5J?!i?^tey=NsWQL@{Ootw#A
zC0=qR()(7Z?9K~8&OGM)7@Ot2u=?{WOxX7XC9AdBOI*5ceOqyrx8nyI(BY)`b6TB6vuGcdA9a;ybs;p2j=`!=
z1x&^{^Vb#?YCrsnUvI5h|2h**#W(OE-Ilw1sWUEDooyQxctg&Bi@QqKY@|B(xP3*M
zqTKU}Ix|6fN^#L6(c`JSBX)kpV%g_Ux~j}yQ%zZMz9ZMpxQb-)%BCNPmN|*IIgA5o
zR{k5^A6rwYARpcIFXF`vE!Grja8o~h-WyyBg}K&Tyg34yHra?z5KhH?L+-u*5T~Z(
zVAl+BCB+#t!r}nxg6(-j_Wg5R6Y%sw7A%WCV_Ss}gM>4+x-c45HZ$-yG68`*y_mM%
zi`qtyFk@;0Zak2@^5|?RKbQW;sbow|{@*>b6UXk9ey5`*)So%COL%Lx874e6aU4$V
z+YRNiufM&_mD^7&funs8_uEBb@?8gBh)P7NWUrV0$c3KUZA@Muj0P8B